complexity
int64
1
139
fun_name
stringlengths
1
80
code
stringlengths
101
62.2k
commit_id
stringlengths
40
40
ast_errors
stringlengths
0
3.11k
ast_levels
int64
6
36
file_name
stringlengths
5
79
n_ast_nodes
int64
17
19.2k
commit_message
stringlengths
3
15.3k
d_id
int64
12
121k
n_ast_errors
int64
0
9
n_whitespaces
int64
4
10.8k
token_counts
int64
5
3.06k
vocab_size
int64
4
1.11k
id
int64
20
338k
n_words
int64
4
4.82k
repo
stringlengths
3
22
n_identifiers
int64
2
176
path
stringlengths
7
134
language
stringclasses
1 value
nloc
int64
1
413
documentation
dict
url
stringlengths
31
59
2
strategy_supports_loss_scaling
def strategy_supports_loss_scaling(): if not tf.distribute.has_strategy(): return True strategy = tf.distribute.get_strategy() # Strategies are supported if either there is only one replica or if variables # are replicated per device. Otherwise, the current model.fit() implementation # and most custom training loops incorrectly unscale the gradients. Currently, # gradients are unscaled once per compute replica, but they should be unscaled # once per variable replica. When there is one variable replica for each # compute replica, this works fine, but otherwise issues will occur. # TODO(reedwm): Support all strategies. return isinstance( strategy, ( tf.distribute.MultiWorkerMirroredStrategy, tf.compat.v1.distribute.experimental.MultiWorkerMirroredStrategy, tf.distribute.OneDeviceStrategy, tf.compat.v1.distribute.OneDeviceStrategy, tf.distribute.MirroredStrategy, tf.compat.v1.distribute.MirroredStrategy, ), )
84afc5193d38057e2e2badf9c889ea87d80d8fbf
13
loss_scale_optimizer.py
136
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
81,301
0
229
85
75
275,052
99
keras
13
keras/mixed_precision/loss_scale_optimizer.py
Python
15
{ "docstring": "Returns True if the current Strategy supports loss scaling.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/keras-team/keras.git
3
stream_csv
def stream_csv(self, queryset): writer = csv.DictWriter(Echo(), fieldnames=self.list_export) yield writer.writerow( {field: self.get_heading(queryset, field) for field in self.list_export} ) for item in queryset: yield self.write_csv_row(writer, self.to_row_dict(item))
d10f15e55806c6944827d801cd9c2d53f5da4186
12
mixins.py
105
Reformat with black
15,895
0
81
67
21
72,433
24
wagtail
15
wagtail/admin/views/mixins.py
Python
7
{ "docstring": "Generate a csv file line by line from queryset, to be used in a StreamingHTTPResponse", "language": "en", "n_whitespaces": 14, "n_words": 15, "vocab_size": 13 }
https://github.com/wagtail/wagtail.git
5
_agg
def _agg(self, agg, axis=0, level=None, **kwargs): if level is not None or axis != 0: raise NotImplementedError( "HDK's aggregation functions does not support 'level' and 'axis' parameters." ) # TODO: Do filtering on numeric columns if `numeric_only=True` if not kwargs.get("skipna", True) or kwargs.get("numeric_only"): raise NotImplementedError( "HDK's aggregation functions does not support 'skipna' and 'numeric_only' parameters." ) # Processed above, so can be omitted kwargs.pop("skipna", None) kwargs.pop("numeric_only", None) new_frame = self._modin_frame.agg(agg) new_frame = new_frame._set_index( pandas.Index.__new__( pandas.Index, data=[MODIN_UNNAMED_SERIES_LABEL], dtype="O" ) ) return self.__constructor__(new_frame, shape_hint="row")
e5b1888cd932909e49194d58035da34b210b91c4
12
query_compiler.py
202
FEAT-#4946: Replace OmniSci with HDK (#4947) Co-authored-by: Iaroslav Igoshev <[email protected]> Signed-off-by: Andrey Pavlenko <[email protected]>
36,114
0
270
121
60
154,619
82
modin
20
modin/experimental/core/storage_formats/hdk/query_compiler.py
Python
18
{ "docstring": "\n Perform specified aggregation along rows/columns.\n\n Parameters\n ----------\n agg : str\n Name of the aggregation function to perform.\n axis : {0, 1}, default: 0\n Axis to perform aggregation along. 0 is to apply function against each column,\n all the columns will be reduced into a single scalar. 1 is to aggregate\n across rows.\n *Note:* HDK storage format supports aggregation for 0 axis only, aggregation\n along rows will be defaulted to pandas.\n level : None, default: None\n Serves the compatibility purpose, always have to be None.\n **kwargs : dict\n Additional parameters to pass to the aggregation function.\n\n Returns\n -------\n DFAlgQueryCompiler\n New single-column (``axis=1``) or single-row (``axis=0``) query compiler containing\n the result of aggregation.\n ", "language": "en", "n_whitespaces": 299, "n_words": 111, "vocab_size": 81 }
https://github.com/modin-project/modin.git
1
run_once
async def run_once(self) -> None: raise NotImplementedError("LoopService subclasses must implement this method.")
3fd5aef3a1b94ac270e7325aa75d534e4f412d5c
8
loop_service.py
27
Clean up loop service methods
11,732
0
26
13
12
58,169
12
prefect
3
src/prefect/orion/services/loop_service.py
Python
11
{ "docstring": "\n Represents one loop of the service.\n\n Users should override this method.\n\n To actually run the service once, call `LoopService().start(loops=1)`\n instead of `LoopService().run_once()`, because this method will not invoke setup\n and teardown methods properly.\n ", "language": "en", "n_whitespaces": 76, "n_words": 33, "vocab_size": 30 }
https://github.com/PrefectHQ/prefect.git
1
test_no_auth
def test_no_auth(self) -> None: channel = self.make_request("GET", self.url, {}) self.assertEqual(HTTPStatus.UNAUTHORIZED, channel.code, msg=channel.json_body) self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
7a1cefc6e37aa583647f2804c9d9c9765712c59a
9
test_user.py
90
Add admin API to get users' account data (#11664) Co-authored-by: reivilibre <[email protected]>
70,945
0
42
56
14
246,015
14
synapse
13
tests/rest/admin/test_user.py
Python
5
{ "docstring": "Try to get information of a user without authentication.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/matrix-org/synapse.git
8
_is_extrinsic
def _is_extrinsic(seq): if len(seq) != 3: raise ValueError("Expected 3 axes, got `{}`.".format(seq)) if type(seq) != str: raise ValueError('Expected seq to be a string.') intrinsic = seq.isupper() extrinsic = seq.islower() if not (intrinsic or extrinsic): raise ValueError("seq must either be fully uppercase (for extrinsic " "rotations), or fully lowercase, for intrinsic " "rotations).") i, j, k = seq.lower() if (i == j) or (j == k): raise ValueError("Consecutive axes must be different") bad = set(seq) - set('xyzXYZ') if bad: raise ValueError("Expected axes from `seq` to be from " "['x', 'y', 'z'] or ['X', 'Y', 'Z'], " "got {}".format(''.join(bad))) return extrinsic
1823aa534e379b9123f70389f5818ac4d24015a0
14
quaternion.py
231
Changed _check_sequence to _is_extrinsic As suggested by @smichr Co-authored-by: Christopher Smith <[email protected]>
49,735
0
263
128
68
200,621
99
sympy
18
sympy/algebras/quaternion.py
Python
20
{ "docstring": "validate seq and return True if seq is lowercase and False if uppercase", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 10 }
https://github.com/sympy/sympy.git
3
sep_token
def sep_token(self) -> str: if self._sep_token is None: if self.verbose: logger.error("Using sep_token, but it is not set yet.") return None return str(self._sep_token)
3eed5530ec74bb60ad9f8f612717d0f6ccf820f2
12
tokenization_utils_base.py
61
Fix properties of unset special tokens in non verbose mode (#17797) Co-authored-by: SaulLu <[email protected]>
5,763
0
80
35
19
31,488
22
transformers
7
src/transformers/tokenization_utils_base.py
Python
10
{ "docstring": "\n `str`: Separation token, to separate context and query in an input sequence. Log an error if used while not\n having been set.\n ", "language": "en", "n_whitespaces": 44, "n_words": 22, "vocab_size": 21 }
https://github.com/huggingface/transformers.git
4
get_year
def get_year(self): year = self.year if year is None: try: year = self.kwargs["year"] except KeyError: try: year = self.request.GET["year"] except KeyError: raise Http404(_("No year specified")) return year
9c19aff7c7561e3a82978a272ecdaad40dda5c00
18
dates.py
96
Refs #33476 -- Reformatted code with Black.
51,751
0
160
54
17
206,849
27
django
9
django/views/generic/dates.py
Python
11
{ "docstring": "Return the year for which this view should display data.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/django/django.git
18
_validate_result_metrics
def _validate_result_metrics(self, result): if int(os.environ.get("TUNE_DISABLE_STRICT_METRIC_CHECKING", 0)) != 1 and ( len({k for k in result if k not in list(DEBUG_METRICS) + [DONE]}) > 1 ): base_metric = self._metric if self._metric != DEFAULT_METRIC else None scheduler_metric = ( self._scheduler_alg.metric if self._scheduler_alg.metric != DEFAULT_METRIC else None ) search_metrics = ( self._search_alg.metric if self._search_alg.metric != DEFAULT_METRIC else None ) if isinstance(search_metrics, str): search_metrics = [search_metrics] if base_metric and base_metric not in result: report_metric = base_metric location = "tune.TuneConfig()" elif scheduler_metric and scheduler_metric not in result: report_metric = scheduler_metric location = type(self._scheduler_alg).__name__ elif search_metrics and any( search_metric not in result for search_metric in search_metrics ): report_metric = list( filter( lambda search_metric: search_metric not in result, search_metrics, ) ) if len(report_metric) == 1: report_metric = report_metric[0] location = type(self._search_alg).__name__ else: report_metric = None location = None if report_metric: raise ValueError( "Trial returned a result which did not include the " "specified metric(s) `{}` that `{}` expects. " "Make sure your calls to `tune.report()` include the " "metric, or set the " "TUNE_DISABLE_STRICT_METRIC_CHECKING " "environment variable to 1. Result: {}".format( report_metric, location, result ) )
de7bd015a4867317569cb0ad775015f6f35fdd1e
17
trial_runner.py
379
[air/tune/docs] Change Tuner() occurences in rest of ray/tune (#26961)
27,954
0
902
238
88
125,703
179
ray
31
python/ray/tune/execution/trial_runner.py
Python
49
{ "docstring": "\n Check if any of the required metrics was not reported\n in the last result. If the only items are ``done`` or any of\n DEBUG_METRICS, this means that no result was ever received and\n the trial just returned. This is also okay and will not raise\n an error.\n\n This will ignore checking for the DEFAULT_METRIC.\n ", "language": "en", "n_whitespaces": 104, "n_words": 54, "vocab_size": 43 }
https://github.com/ray-project/ray.git
6
save
def save(self, must_create=False): if self.session_key is None: return self.create() data = self._get_session(no_load=must_create) obj = self.create_model_instance(data) using = router.db_for_write(self.model, instance=obj) try: with transaction.atomic(using=using): obj.save( force_insert=must_create, force_update=not must_create, using=using ) except IntegrityError: if must_create: raise CreateError raise except DatabaseError: if not must_create: raise UpdateError raise
9c19aff7c7561e3a82978a272ecdaad40dda5c00
13
db.py
168
Refs #33476 -- Reformatted code with Black.
50,688
0
244
103
34
204,301
43
django
23
django/contrib/sessions/backends/db.py
Python
19
{ "docstring": "\n Save the current session data to the database. If 'must_create' is\n True, raise a database error if the saving operation doesn't create a\n new entry (as opposed to possibly updating an existing entry).\n ", "language": "en", "n_whitespaces": 62, "n_words": 33, "vocab_size": 29 }
https://github.com/django/django.git
4
dag_maker
def dag_maker(request): import lazy_object_proxy # IMPORTANT: Delay _all_ imports from `airflow.*` to _inside a method_. # This fixture is "called" early on in the pytest collection process, and # if we import airflow.* here the wrong (non-test) config will be loaded # and "baked" in to various constants want_serialized = False # Allow changing default serialized behaviour with `@pytest.mark.need_serialized_dag` or # `@pytest.mark.need_serialized_dag(False)` serialized_marker = request.node.get_closest_marker("need_serialized_dag") if serialized_marker: (want_serialized,) = serialized_marker.args or (True,) from airflow.utils.log.logging_mixin import LoggingMixin
d48a3a357fd89ec805d086d5b6c1f1d4daf77b9a
10
conftest.py
83
Add TaskMap and TaskInstance.map_id (#20286) Co-authored-by: Ash Berlin-Taylor <[email protected]>
8,096
0
119
100
60
43,921
76
airflow
13
tests/conftest.py
Python
25
{ "docstring": "\n The dag_maker helps us to create DAG, DagModel, and SerializedDAG automatically.\n\n You have to use the dag_maker as a context manager and it takes\n the same argument as DAG::\n\n with dag_maker(dag_id=\"mydag\") as dag:\n task1 = DummyOperator(task_id='mytask')\n task2 = DummyOperator(task_id='mytask2')\n\n If the DagModel you want to use needs different parameters than the one\n automatically created by the dag_maker, you have to update the DagModel as below::\n\n dag_maker.dag_model.is_active = False\n session.merge(dag_maker.dag_model)\n session.commit()\n\n For any test you use the dag_maker, make sure to create a DagRun::\n\n dag_maker.create_dagrun()\n\n The dag_maker.create_dagrun takes the same arguments as dag.create_dagrun\n\n If you want to operate on serialized DAGs, then either pass ``serialized=True` to the ``dag_maker()``\n call, or you can mark your test/class/file with ``@pytest.mark.need_serialized_dag(True)``. In both of\n these cases the ``dag`` returned by the context manager will be a lazily-evaluated proxy object to the\n SerializedDAG.\n ", "language": "en", "n_whitespaces": 231, "n_words": 137, "vocab_size": 90 }
https://github.com/apache/airflow.git
11
rot90
def rot90(m, k=1, axes=(0, 1)): axes = tuple(axes) if len(axes) != 2: raise ValueError("len(axes) must be 2.") m = asanyarray(m) if axes[0] == axes[1] or absolute(axes[0] - axes[1]) == m.ndim: raise ValueError("Axes must be different.") if (axes[0] >= m.ndim or axes[0] < -m.ndim or axes[1] >= m.ndim or axes[1] < -m.ndim): raise ValueError("Axes={} out of range for array of ndim={}." .format(axes, m.ndim)) k %= 4 if k == 0: return m[:] if k == 2: return flip(flip(m, axes[0]), axes[1]) axes_list = arange(0, m.ndim) (axes_list[axes[0]], axes_list[axes[1]]) = (axes_list[axes[1]], axes_list[axes[0]]) if k == 1: return transpose(flip(m, axes[1]), axes_list) else: # k == 3 return flip(transpose(m, axes_list), axes[1])
f404e9e92e87a3990712d723d5c562a89300ac01
12
function_base.py
377
Add space after argument name
38,560
0
265
250
68
160,188
105
numpy
15
numpy/lib/function_base.py
Python
79
{ "docstring": "\n Rotate an array by 90 degrees in the plane specified by axes.\n\n Rotation direction is from the first towards the second axis.\n\n Parameters\n ----------\n m : array_like\n Array of two or more dimensions.\n k : integer\n Number of times the array is rotated by 90 degrees.\n axes : (2,) array_like\n The array is rotated in the plane defined by the axes.\n Axes must be different.\n\n .. versionadded:: 1.12.0\n\n Returns\n -------\n y : ndarray\n A rotated view of `m`.\n\n See Also\n --------\n flip : Reverse the order of elements in an array along the given axis.\n fliplr : Flip an array horizontally.\n flipud : Flip an array vertically.\n\n Notes\n -----\n ``rot90(m, k=1, axes=(1,0))`` is the reverse of\n ``rot90(m, k=1, axes=(0,1))``\n\n ``rot90(m, k=1, axes=(1,0))`` is equivalent to\n ``rot90(m, k=-1, axes=(0,1))``\n\n Examples\n --------\n >>> m = np.array([[1,2],[3,4]], int)\n >>> m\n array([[1, 2],\n [3, 4]])\n >>> np.rot90(m)\n array([[2, 4],\n [1, 3]])\n >>> np.rot90(m, 2)\n array([[4, 3],\n [2, 1]])\n >>> m = np.arange(8).reshape((2,2,2))\n >>> np.rot90(m, 1, (1,2))\n array([[[1, 3],\n [0, 2]],\n [[5, 7],\n [4, 6]]])\n\n ", "language": "en", "n_whitespaces": 378, "n_words": 170, "vocab_size": 108 }
https://github.com/numpy/numpy.git
7
aug_test_rpn
def aug_test_rpn(self, feats, img_metas): samples_per_gpu = len(img_metas[0]) aug_proposals = [[] for _ in range(samples_per_gpu)] for x, img_meta in zip(feats, img_metas): results_list = self.simple_test_rpn(x, img_meta) for i, results in enumerate(results_list): proposals = torch.cat( [results.bboxes, results.scores[:, None]], dim=-1) aug_proposals[i].append(proposals) # reorganize the order of 'img_metas' to match the dimensions # of 'aug_proposals' aug_img_metas = [] for i in range(samples_per_gpu): aug_img_meta = [] for j in range(len(img_metas)): aug_img_meta.append(img_metas[j][i]) aug_img_metas.append(aug_img_meta) # after merging, proposals will be rescaled to the original image size merged_proposals = [] for proposals, aug_img_meta in zip(aug_proposals, aug_img_metas): merged_proposal = merge_aug_proposals(proposals, aug_img_meta, self.test_cfg) results = InstanceData() results.bboxes = merged_proposal[:, :4] results.scores = merged_proposal[:, 4] merged_proposals.append(results) return merged_proposals if sys.version_info >= (3, 7):
9a3bf7660e6ced54672741095f96df07919f9ba7
15
dense_test_mixins.py
335
[Refactor] Refactor dense head outputs to InstanceResults.
70,342
0
421
206
77
244,350
111
mmdetection
34
mmdet/models/dense_heads/dense_test_mixins.py
Python
24
{ "docstring": "Test with augmentation for only for ``RPNHead`` and its variants,\n e.g., ``GARPNHead``, etc.\n\n Args:\n feats (tuple[Tensor]): Features from the upstream network, each is\n a 4D-tensor.\n img_metas (list[dict]): Meta info of each image.\n\n Returns:\n list[Tensor]: Proposals of each image, each item has shape (n, 5),\n where 5 represent (tl_x, tl_y, br_x, br_y, score).\n ", "language": "en", "n_whitespaces": 151, "n_words": 52, "vocab_size": 47 }
https://github.com/open-mmlab/mmdetection.git
5
show_job_status
def show_job_status(failed_history, deserialized_data, to_doctype): if not failed_history: frappe.msgprint( _("Creation of {0} successful").format(to_doctype), title="Successful", indicator="green", ) if len(failed_history) != 0 and len(failed_history) < len(deserialized_data): frappe.msgprint( _().format( to_doctype ), title="Partially successful", indicator="orange", ) if len(failed_history) == len(deserialized_data): frappe.msgprint( _().format( to_doctype ), title="Failed", indicator="red", )
a3e69cf75d27198132d05c7c10475a0297b1e190
14
bulk_transaction.py
187
feat: Bulk Transaction Processing (#28580) * feat: Bulk Transaction Processing * fix: add flags to ignore validations and exception handling correction * fix: remove duplicate code, added logger functionality and improved notifications * fix: linting and sider issues * test: added tests * fix: linter issues * fix: failing test case * fix: sider issues and test cases * refactor: mapping function calls to create order/invoice * fix: added more test cases to increase coverage * fix: test cases * fix: sider issue * fix: rename doctype, improve formatting and minor refactor * fix: update doctype name in hooks and sider issues * fix: entry log test case * fix: typos, translations and company name in tests * fix: linter issues and translations * fix: linter issue * fix: split into separate function for marking failed transaction * fix: typos, retry failed transaction logic and make log read only * fix: hide retry button when no failed transactions and remove test cases not rrelevant * fix: sider issues and indentation to tabs Co-authored-by: Ankush Menat <[email protected]>
13,603
0
19
111
30
64,314
42
erpnext
11
erpnext/utilities/bulk_transaction.py
Python
25
{ "docstring": "Creation of {0} partially successful.\n\t\t\t\tCheck <b><a href=\"/app/bulk-transaction-log\">Bulk Transaction Log</a></b>Creation of {0} failed.\n\t\t\t\tCheck <b><a href=\"/app/bulk-transaction-log\">Bulk Transaction Log</a></b>", "language": "en", "n_whitespaces": 15, "n_words": 18, "vocab_size": 12 }
https://github.com/frappe/erpnext.git
2
_make_prop_dict
def _make_prop_dict(self) -> pd.DataFrame: pd = import_required("pandas", "Structure graphs require Pandas (http://pandas.pydata.org) to be installed") df = pd.DataFrame() for x in self._graph.nodes(data=True): M = self._model.select_one(dict(id=x[0])) Z = pd.DataFrame(self._obj_props_to_df2(M)) Z["id"] = x[0] Z["model"] = str(M) Z["values"] = Z["values"].map(lambda x: str(x)) Z["types"] = Z["types"].map(lambda x: str(x)) df = pd.concat([df, Z]) return df
560a57e166a1f54319df57127502d48ee4ecc72e
14
structure.py
239
Generalize filtering on CDS views (#12054) * Generalize filtering on CDS views * Add type information to us_{counties,states} * Add plotting/file/filtering example * Add a migration note * Update models.util.structure and tests * Fix a pandas' deprecation warning * Update CDSView.{filters->filter} * Update documentation * Add more unit tests for BitSet (Indices) * Add unit tests * Add CDSView.filters back-compat to bokehjs
53,207
0
162
142
38
212,228
50
bokeh
20
bokeh/models/util/structure.py
Python
16
{ "docstring": " Returns a dataframe containing all the properties of all the submodels of the model being\n analyzed. Used as datasource to show attributes.\n\n ", "language": "en", "n_whitespaces": 37, "n_words": 22, "vocab_size": 18 }
https://github.com/bokeh/bokeh.git
3
encode_to_file
def encode_to_file(self, fh, bufsize): errcode = 0 while errcode == 0: status, errcode, buf = self.encode(bufsize) if status > 0: fh.write(buf[status:]) return errcode
a0e1fde1eddf45f26653e2ff6080d31e177adbec
13
ImageFile.py
75
Added PyEncoder
69,858
0
88
47
19
242,436
23
Pillow
9
src/PIL/ImageFile.py
Python
7
{ "docstring": "\n :param fh: File handle.\n :param bufsize: Buffer size.\n\n :returns: If finished successfully, return 0.\n Otherwise, return an error code. Err codes are from\n :data:`.ImageFile.ERRORS`.\n ", "language": "en", "n_whitespaces": 75, "n_words": 24, "vocab_size": 22 }
https://github.com/python-pillow/Pillow.git
9
count_params
def count_params(weights): unique_weights = {id(w): w for w in weights}.values() # Ignore TrackableWeightHandlers, which will not have a shape defined. unique_weights = [w for w in unique_weights if hasattr(w, "shape")] weight_shapes = [w.shape.as_list() for w in unique_weights] standardized_weight_shapes = [ [0 if w_i is None else w_i for w_i in w] for w in weight_shapes ] return int(sum(np.prod(p) for p in standardized_weight_shapes))
84afc5193d38057e2e2badf9c889ea87d80d8fbf
12
layer_utils.py
145
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
81,800
0
93
93
39
276,953
62
keras
17
keras/utils/layer_utils.py
Python
8
{ "docstring": "Count the total number of scalars composing the weights.\n\n Args:\n weights: An iterable containing the weights on which to compute params\n\n Returns:\n The total number of scalars composing the weights\n ", "language": "en", "n_whitespaces": 53, "n_words": 30, "vocab_size": 21 }
https://github.com/keras-team/keras.git
3
render
def render(self, template_name, extra_context=None): if extra_context is None: extra_context = {} elif not isinstance(extra_context, dict): raise TypeError("extra_context must be a dictionary") return get_template(template_name).render({**self.context, **extra_context})
e7f54c5867cf49126bbf95e28633e4283c2bbcb2
11
templates.py
87
Reorganize plugin resources
78,268
0
74
53
23
266,015
24
netbox
9
netbox/extras/plugins/templates.py
Python
6
{ "docstring": "\n Convenience method for rendering the specified Django template using the default context data. An additional\n context dictionary may be passed as `extra_context`.\n ", "language": "en", "n_whitespaces": 44, "n_words": 22, "vocab_size": 20 }
https://github.com/netbox-community/netbox.git
7
deal_bb
def deal_bb(result_token): # find out <thead></thead> parts. thead_pattern = '<thead>(.*?)</thead>' if re.search(thead_pattern, result_token) is None: return result_token thead_part = re.search(thead_pattern, result_token).group() origin_thead_part = copy.deepcopy(thead_part) # check "rowspan" or "colspan" occur in <thead></thead> parts or not . span_pattern = "<td rowspan=\"(\d)+\" colspan=\"(\d)+\">|<td colspan=\"(\d)+\" rowspan=\"(\d)+\">|<td rowspan=\"(\d)+\">|<td colspan=\"(\d)+\">" span_iter = re.finditer(span_pattern, thead_part) span_list = [s.group() for s in span_iter] has_span_in_head = True if len(span_list) > 0 else False if not has_span_in_head: # <thead></thead> not include "rowspan" or "colspan" branch 1. # 1. replace <td> to <td><b>, and </td> to </b></td> # 2. it is possible to predict text include <b> or </b> by Text-line recognition, # so we replace <b><b> to <b>, and </b></b> to </b> thead_part = thead_part.replace('<td>', '<td><b>')\ .replace('</td>', '</b></td>')\ .replace('<b><b>', '<b>')\ .replace('</b></b>', '</b>') else: # <thead></thead> include "rowspan" or "colspan" branch 2. # Firstly, we deal rowspan or colspan cases. # 1. replace > to ><b> # 2. replace </td> to </b></td> # 3. it is possible to predict text include <b> or </b> by Text-line recognition, # so we replace <b><b> to <b>, and </b><b> to </b> # Secondly, deal ordinary cases like branch 1 # replace ">" to "<b>" replaced_span_list = [] for sp in span_list: replaced_span_list.append(sp.replace('>', '><b>')) for sp, rsp in zip(span_list, replaced_span_list): thead_part = thead_part.replace(sp, rsp) # replace "</td>" to "</b></td>" thead_part = thead_part.replace('</td>', '</b></td>') # remove duplicated <b> by re.sub mb_pattern = "(<b>)+" single_b_string = "<b>" thead_part = re.sub(mb_pattern, single_b_string, thead_part) mgb_pattern = "(</b>)+" single_gb_string = "</b>" thead_part = re.sub(mgb_pattern, single_gb_string, thead_part) # ordinary cases like branch 1 thead_part = thead_part.replace('<td>', '<td><b>').replace('<b><b>', '<b>') # convert <tb><b></b></tb> back to <tb></tb>, empty cell has no <b></b>. # but space cell(<tb> </tb>) is suitable for <td><b> </b></td> thead_part = thead_part.replace('<td><b></b></td>', '<td></td>') # deal with duplicated <b></b> thead_part = deal_duplicate_bb(thead_part) # deal with isolate span tokens, which causes by wrong predict by structure prediction. # eg.PMC5994107_011_00.png thead_part = deal_isolate_span(thead_part) # replace original result with new thead part. result_token = result_token.replace(origin_thead_part, thead_part) return result_token
ddaa2c2552e19635cd6cdf38619f1f176c358f89
17
table_master_match.py
484
add SLANet
4,740
0
720
264
170
24,488
324
PaddleOCR
30
ppstructure/table/table_master_match.py
Python
35
{ "docstring": "\n In our opinion, <b></b> always occurs in <thead></thead> text's context.\n This function will find out all tokens in <thead></thead> and insert <b></b> by manual.\n :param result_token:\n :return:\n ", "language": "en", "n_whitespaces": 43, "n_words": 27, "vocab_size": 24 }
https://github.com/PaddlePaddle/PaddleOCR.git
7
_rm_rs_action
def _rm_rs_action(self) -> HVACAction | None: if (running_state := self._thrm.running_state) is None: return None if running_state & ( T.RunningState.Heat_State_On | T.RunningState.Heat_2nd_Stage_On ): return HVACAction.HEATING if running_state & ( T.RunningState.Cool_State_On | T.RunningState.Cool_2nd_Stage_On ): return HVACAction.COOLING if running_state & ( T.RunningState.Fan_State_On | T.RunningState.Fan_2nd_Stage_On | T.RunningState.Fan_3rd_Stage_On ): return HVACAction.FAN if running_state & T.RunningState.Idle: return HVACAction.IDLE if self.hvac_mode != HVACMode.OFF: return HVACAction.IDLE return HVACAction.OFF
8745401af59da209e6304911f81e5416d4f18bd7
11
climate.py
193
Use climate enums in zha (#70754)
97,918
0
258
124
33
298,979
60
core
22
homeassistant/components/zha/climate.py
Python
23
{ "docstring": "Return the current HVAC action based on running mode and running state.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 11 }
https://github.com/home-assistant/core.git
2
mousePressEvent
def mousePressEvent(self, e): if e.button() == Qt.MouseButton.LeftButton: e.accept() self.toggle() else: super().mousePressEvent(e)
0877fb0d78635692e481c8bde224fac5ad0dd430
12
miscwidgets.py
72
Run scripts/dev/rewrite_enums.py
117,657
0
65
41
11
321,317
11
qutebrowser
10
qutebrowser/misc/miscwidgets.py
Python
6
{ "docstring": "Toggle the fold if the widget was pressed.\n\n Args:\n e: The QMouseEvent.\n ", "language": "en", "n_whitespaces": 37, "n_words": 12, "vocab_size": 11 }
https://github.com/qutebrowser/qutebrowser.git
2
get_text_size
def get_text_size(self, text): font = self.fonts['NORMAL'] if hasattr(font, 'getbbox'): # Pillow >= 9.2.0 return font.getbbox(text)[2:4] else: return font.getsize(text)
99eab68bf959e4c71c2688e4b1675ce9147ee785
10
img.py
77
Upgrade pygments to 2.13.0
41,534
0
69
45
17
174,994
18
pip
8
src/pip/_vendor/pygments/formatters/img.py
Python
6
{ "docstring": "\n Get the text size (width, height).\n ", "language": "en", "n_whitespaces": 21, "n_words": 6, "vocab_size": 6 }
https://github.com/pypa/pip.git
1
test_power_levels
def test_power_levels(self): # The other user doesn't have the proper power level. channel = self._upgrade_room(self.other_token) self.assertEqual(403, channel.code, channel.result) # Increase the power levels so that this user can upgrade. power_levels = self.helper.get_state( self.room_id, "m.room.power_levels", tok=self.creator_token, ) power_levels["users"][self.other] = 100 self.helper.send_state( self.room_id, "m.room.power_levels", body=power_levels, tok=self.creator_token, ) # The upgrade should succeed! channel = self._upgrade_room(self.other_token) self.assertEqual(200, channel.code, channel.result)
02d708568b476f2f7716000b35c0adfa4cbd31b3
10
test_upgrade_room.py
172
Replace assertEquals and friends with non-deprecated versions. (#12092)
71,409
0
224
109
39
246,925
56
synapse
17
tests/rest/client/test_upgrade_room.py
Python
17
{ "docstring": "\n Another user can upgrade the room if their power level is increased.\n ", "language": "en", "n_whitespaces": 27, "n_words": 12, "vocab_size": 12 }
https://github.com/matrix-org/synapse.git
5
wheel_graph
def wheel_graph(n, create_using=None): _, nodes = n G = empty_graph(nodes, create_using) if G.is_directed(): raise NetworkXError("Directed Graph not supported") if len(nodes) > 1: hub, *rim = nodes G.add_edges_from((hub, node) for node in rim) if len(rim) > 1: G.add_edges_from(pairwise(rim, cyclic=True)) return G
de1d00f20e0bc14f1cc911b3486e50225a8fa168
14
classic.py
139
Adjust the usage of nodes_or_number decorator (#5599) * recorrect typo in decorators.py * Update tests to show troubles in current code * fix troubles with usage of nodes_or_number * fix typo * remove nodes_or_number where that makes sense * Reinclude nodes_or_numbers and add some tests for nonstandard usage * fix typowq * hopefully final tweaks (no behavior changes * Update test_classic.py Co-authored-by: Jarrod Millman <[email protected]>
42,010
0
97
86
32
176,628
40
networkx
16
networkx/generators/classic.py
Python
11
{ "docstring": "Return the wheel graph\n\n The wheel graph consists of a hub node connected to a cycle of (n-1) nodes.\n\n Parameters\n ----------\n n : int or iterable\n If an integer, node labels are 0 to n with center 0.\n If an iterable of nodes, the center is the first.\n create_using : NetworkX graph constructor, optional (default=nx.Graph)\n Graph type to create. If graph instance, then cleared before populated.\n\n Node labels are the integers 0 to n - 1.\n ", "language": "en", "n_whitespaces": 117, "n_words": 76, "vocab_size": 51 }
https://github.com/networkx/networkx.git
1
test_checkpointing_by_steps
def test_checkpointing_by_steps(self): testargs = f.split() _ = subprocess.run(self._launch_args + testargs, stdout=subprocess.PIPE, env=os.environ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir, "step_4")))
23c0341262bd396a3ba9265614b3818d6e08a6c1
12
test_examples.py
104
Refactor tests to use accelerate launch (#373) Co-authored-by: Sylvain Gugger <[email protected]>
121,081
0
34
61
13
337,582
14
accelerate
18
tests/test_examples.py
Python
8
{ "docstring": "\n examples/by_feature/checkpointing.py\n --checkpointing_steps 2\n --output_dir {self.tmpdir}\n ", "language": "en", "n_whitespaces": 34, "n_words": 5, "vocab_size": 5 }
https://github.com/huggingface/accelerate.git
1
test_exception
async def test_exception(hass): flow = config_flow.SomaFlowHandler() flow.hass = hass with patch.object(SomaApi, "list_devices", side_effect=RequestException()): result = await flow.async_step_import({"host": MOCK_HOST, "port": MOCK_PORT}) assert result["type"] == data_entry_flow.FlowResultType.ABORT assert result["reason"] == "connection_error"
7cd68381f1d4f58930ffd631dfbfc7159d459832
14
test_config_flow.py
124
Search/replace RESULT_TYPE_* by FlowResultType enum (#74642)
114,853
0
53
69
24
316,275
28
core
17
tests/components/soma/test_config_flow.py
Python
7
{ "docstring": "Test if RequestException fires when no connection can be made.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/home-assistant/core.git
1
test_cf_data
def test_cf_data(self): site = Site(name='Test Site', slug='test-site') # Check custom field data on new instance site.custom_field_data['foo'] = 'abc' self.assertEqual(site.cf['foo'], 'abc') # Check custom field data from database site.save() site = Site.objects.get(name='Test Site') self.assertEqual(site.cf['foo'], 'abc')
ea6d86e6c4bb6037465410db6205a7471bc81a6c
10
test_customfields.py
129
Closes #10052: The cf attribute now returns deserialized custom field data
78,273
0
97
69
24
266,036
34
netbox
12
netbox/extras/tests/test_customfields.py
Python
7
{ "docstring": "\n Check that custom field data is present on the instance immediately after being set and after being fetched\n from the database.\n ", "language": "en", "n_whitespaces": 43, "n_words": 21, "vocab_size": 18 }
https://github.com/netbox-community/netbox.git
8
tokenize
def tokenize(self, text, never_split=None): # union() returns a new set by concatenating the two sets. never_split = self.never_split.union(set(never_split)) if never_split else self.never_split text = self._clean_text(text) # This was added on November 1st, 2018 for the multilingual and Chinese # models. This is also applied to the English models now, but it doesn't # matter since the English models were not trained on any Chinese data # and generally don't have any Chinese data in them (there are Chinese # characters in the vocabulary because Wikipedia does have some Chinese # words in the English Wikipedia.). if self.tokenize_chinese_chars: text = self._tokenize_chinese_chars(text) orig_tokens = whitespace_tokenize(text) split_tokens = [] for token in orig_tokens: if token not in never_split: if self.do_lower_case: token = token.lower() if self.strip_accents is not False: token = self._run_strip_accents(token) elif self.strip_accents: token = self._run_strip_accents(token) split_tokens.extend(self._run_split_on_punc(token, never_split)) output_tokens = whitespace_tokenize(" ".join(split_tokens)) return output_tokens
841d979190319098adc8101f9820a02ee3be4c8b
16
tokenization_realm.py
232
Add FastTokenizer to REALM (#15211) * Remove BertTokenizer abstraction * Add FastTokenizer to REALM * Fix config archive map * Fix copies * Update realm.mdx * Apply suggestions from code review
6,238
0
396
140
89
34,277
141
transformers
21
src/transformers/models/realm/tokenization_realm.py
Python
18
{ "docstring": "\n Basic Tokenization of a piece of text. Split on \"white spaces\" only, for sub-word tokenization, see\n WordPieceTokenizer.\n\n Args:\n never_split (`List[str]`, *optional*)\n Kept for backward compatibility purposes. Now implemented directly at the base class level (see\n [`PreTrainedTokenizer.tokenize`]) List of token not to split.\n ", "language": "en", "n_whitespaces": 112, "n_words": 42, "vocab_size": 39 }
https://github.com/huggingface/transformers.git
2
async_mqtt_connect
def async_mqtt_connect(self) -> None: if not self.hass.is_stopping: self.async_write_ha_state()
2f1138562720cd50343d2fedd4981913a9ef6bd9
9
mixins.py
38
Add typing hints for MQTT mixins (#80702) * Add typing hints for MQTT mixins * Follow up comments * config_entry is always set * typing discovery_data - substate None assignment * Rename `config[CONF_DEVICE]` -> specifications
88,869
0
33
21
8
289,733
8
core
5
homeassistant/components/mqtt/mixins.py
Python
4
{ "docstring": "Update state on connection/disconnection to MQTT broker.", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
https://github.com/home-assistant/core.git
1
test_count_characters
def test_count_characters(self): validator = RichTextMaxLengthValidator(50) self.assertEqual(validator.clean("<p>Plain text</p>"), 10) # HTML entities should be un-escaped. self.assertEqual(validator.clean("<p>There&#x27;s quote</p>"), 13) # BR should be ignored. self.assertEqual(validator.clean("<p>Line<br/>break</p>"), 9) # Content over multiple blocks should be treated as a single line of text with no joiner. self.assertEqual(validator.clean("<p>Multi</p><p>blocks</p>"), 11) # Empty blocks should be ignored. self.assertEqual(validator.clean("<p>Empty</p><p></p><p>blocks</p>"), 11) # HR should be ignored. self.assertEqual(validator.clean("<p>With</p><hr/><p>HR</p>"), 6) # Embed blocks should be ignored. self.assertEqual(validator.clean("<p>With</p><embed/><p>embed</p>"), 9) # Counts symbols with multiple code units (heart unicode + variation selector). self.assertEqual(validator.clean("<p>U+2764 U+FE0F ❤️</p>"), 16) # Counts symbols with zero-width joiners. self.assertEqual(validator.clean("<p>👨‍👨‍👧</p>"), 5)
8a7e0884d789449ddbbd08ddae48374d92a14d11
10
test_rich_text.py
229
Finish implementing rich text max length with identical client & server count
16,821
0
223
129
61
78,814
90
wagtail
6
wagtail/tests/test_rich_text.py
Python
11
{ "docstring": "Keep those tests up-to-date with MaxLength tests client-side.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 7 }
https://github.com/wagtail/wagtail.git
3
test_already_created_plus_written_results
def test_already_created_plus_written_results(indexer, indexer_cache) -> None: org_id = 1234 raw_indexer = indexer indexer = CachingIndexer(indexer_cache, indexer) v0 = raw_indexer.record(use_case_id, org_id, "v1.2.0") v1 = raw_indexer.record(use_case_id, org_id, "v1.2.1") v2 = raw_indexer.record(use_case_id, org_id, "v1.2.2") expected_mapping = {"v1.2.0": v0, "v1.2.1": v1, "v1.2.2": v2} results = indexer.bulk_record( use_case_id=use_case_id, org_strings={org_id: {"v1.2.0", "v1.2.1", "v1.2.2"}} ) assert len(results[org_id]) == len(expected_mapping) == 3 for string, id in results[org_id].items(): assert expected_mapping[string] == id results = indexer.bulk_record( use_case_id=use_case_id, org_strings={org_id: {"v1.2.0", "v1.2.1", "v1.2.2", "v1.2.3"}}, ) v3 = raw_indexer.resolve(use_case_id, org_id, "v1.2.3") expected_mapping["v1.2.3"] = v3 assert len(results[org_id]) == len(expected_mapping) == 4 for string, id in results[org_id].items(): assert expected_mapping[string] == id fetch_meta = results.get_fetch_metadata() assert_fetch_type_for_tag_string_set( fetch_meta[org_id], FetchType.CACHE_HIT, {"v1.2.0", "v1.2.1", "v1.2.2"} ) assert_fetch_type_for_tag_string_set(fetch_meta[org_id], FetchType.FIRST_SEEN, {"v1.2.3"})
7bbb85a0d95d23620228a02bb4401fc09658f5f1
13
test_all_indexers.py
411
ref(metrics): Split caching out of indexers, random test refactoring [sns-1606] (#37714)
19,097
0
216
257
62
94,500
108
sentry
27
tests/sentry/sentry_metrics/test_all_indexers.py
Python
32
{ "docstring": "\n Test that we correctly combine db read results with db write results\n for the same organization.\n ", "language": "en", "n_whitespaces": 26, "n_words": 16, "vocab_size": 14 }
https://github.com/getsentry/sentry.git
1
url_to_file_path
def url_to_file_path(url, filecache): key = CacheController.cache_url(url) return filecache._fn(key)
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
8
file_cache.py
39
upd; format
12,586
0
17
23
8
61,474
8
transferlearning
7
.venv/lib/python3.8/site-packages/pip/_vendor/cachecontrol/caches/file_cache.py
Python
3
{ "docstring": "Return the file cache path based on the URL.\n\n This does not ensure the file exists!\n ", "language": "en", "n_whitespaces": 22, "n_words": 16, "vocab_size": 13 }
https://github.com/jindongwang/transferlearning.git
7
validate_multi_return
def validate_multi_return(outputs_list, output_value, callback_id): if not isinstance(output_value, (list, tuple)): raise exceptions.InvalidCallbackReturnValue( dedent( f ) ) if len(output_value) != len(outputs_list): raise exceptions.InvalidCallbackReturnValue( f ) for i, outi in enumerate(outputs_list): if isinstance(outi, list): vi = output_value[i] if not isinstance(vi, (list, tuple)): raise exceptions.InvalidCallbackReturnValue( dedent( f ) ) if len(vi) != len(outi): raise exceptions.InvalidCallbackReturnValue( dedent( f ) )
c3c84b9ecf16bcc61ed80ec39d511af92fe07f2c
19
_validate.py
275
f-strings everywhere! fffff
7,339
0
361
122
30
40,175
55
dash
15
dash/_validate.py
Python
43
{ "docstring": "\n The callback {callback_id} is a multi-output.\n Expected the output type to be a list or tuple but got:\n {output_value!r}.\n \n Invalid number of output values for {callback_id}.\n Expected {len(outputs_list)}, got {len(output_value)}\n \n The callback {callback_id} output {i} is a wildcard multi-output.\n Expected the output type to be a list or tuple but got:\n {vi!r}.\n output spec: {outi!r}\n \n Invalid number of output values for {callback_id} item {i}.\n Expected {len(vi)}, got {len(outi)}\n output spec: {outi!r}\n output value: {vi!r}\n ", "language": "en", "n_whitespaces": 401, "n_words": 74, "vocab_size": 38 }
https://github.com/plotly/dash.git
3
test_pipe_leakage
def test_pipe_leakage(): from psutil import Process parent = Process() # Get this platform's *count open handles* method. open_fds = parent.num_handles if os.name == "nt" else parent.num_fds old = open_fds() # Creating an isolated.Python() does nothing. child = isolated.Python() assert open_fds() == old # Entering its context creates the child process and 4 handles for sending/receiving to/from it. Then on Windows, # opening the parent's ends of the two pipes creates another two handles and starting the subprocess adds another # two (although I don't know what for). EXPECTED_INCREASE_IN_FDS = (4 if os.name != "nt" else 8) with child: assert open_fds() == old + EXPECTED_INCREASE_IN_FDS # Exiting must close them all immediately. No implicit closure by garbage collect. assert open_fds() == old # Do it again just to be sure that the context manager properly restarts. with child: assert open_fds() == old + EXPECTED_INCREASE_IN_FDS assert open_fds() == old
1a7d704ffbabb433007e3ba04750c2f13ade48e5
10
test_isolation.py
171
Fix typos (#6782) [skip ci]
77,412
0
218
94
96
262,862
147
pyinstaller
14
tests/unit/test_isolation.py
Python
14
{ "docstring": "\n There is a finite number of open pipes/file handles/file descriptors allowed per process. Ensure that all\n opened handles eventually get closed to prevent such *leakages* causing crashes in very long processes (such as\n the rest of our test suite).\n ", "language": "en", "n_whitespaces": 52, "n_words": 39, "vocab_size": 38 }
https://github.com/pyinstaller/pyinstaller.git
1
test_visit_collection_with_private_pydantic
async def test_visit_collection_with_private_pydantic(self): input = PrivatePydantic(x=1) input._y = 2 input._z = 4 result = await visit_collection( input, visit_fn=visit_even_numbers, return_data=False ) assert result is None assert EVEN == {2, 4} result = await visit_collection( input, visit_fn=negative_even_numbers, return_data=True ) assert result == input assert result.__private_attributes__ == input.__private_attributes__ breakpoint() assert result._y == -2 assert result._z == -4
c33f87fc7e0b6fb4714a88b492e7545f4dbd821f
10
test_collections.py
150
get private attrs working
11,499
0
181
95
33
56,293
54
prefect
16
tests/utilities/test_collections.py
Python
17
{ "docstring": "Check that we successfully capture private pydantic fields", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/PrefectHQ/prefect.git
28
search_packages_info
def search_packages_info(query): # type: (List[str]) -> Iterator[Dict[str, str]] installed = {} for p in pkg_resources.working_set: installed[canonicalize_name(p.project_name)] = p query_names = [canonicalize_name(name) for name in query] missing = sorted( [name for name, pkg in zip(query, query_names) if pkg not in installed] ) if missing: logger.warning('Package(s) not found: %s', ', '.join(missing))
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
12
show.py
130
upd; format
12,227
0
94
483
37
60,651
49
transferlearning
17
.venv/lib/python3.8/site-packages/pip/_internal/commands/show.py
Python
58
{ "docstring": "\n Gather details from installed distributions. Print distribution name,\n version, location, and installed files. Installed files requires a\n pip generated 'installed-files.txt' in the distributions '.egg-info'\n directory.\n ", "language": "en", "n_whitespaces": 41, "n_words": 25, "vocab_size": 24 }
https://github.com/jindongwang/transferlearning.git
1
get_series_to_preserve
def get_series_to_preserve(doctype): series_to_preserve = frappe.db.sql_list( .format( doctype=doctype ) ) series_to_preserve.sort() return series_to_preserve
494bd9ef78313436f0424b918f200dab8fc7c20b
11
refactor_naming_series.py
50
style: format code with black
14,213
0
4
29
10
66,552
12
erpnext
8
erpnext/patches/v11_0/refactor_naming_series.py
Python
8
{ "docstring": "select distinct naming_series from `tab{doctype}` where ifnull(naming_series, '') != ''", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/frappe/erpnext.git
7
extractRegexResult
def extractRegexResult(regex, content, flags=0): retVal = None if regex and content and "?P<result>" in regex: if isinstance(content, six.binary_type) and isinstance(regex, six.text_type): regex = getBytes(regex) match = re.search(regex, content, flags) if match: retVal = match.group("result") return retVal
63977ebdffb9e54978da337a7ec3ce8200723162
13
common.py
119
Minor update
27,333
0
91
74
25
123,306
36
sqlmap
14
lib/core/common.py
Python
9
{ "docstring": "\n Returns 'result' group value from a possible match with regex on a given\n content\n\n >>> extractRegexResult(r'a(?P<result>[^g]+)g', 'abcdefg')\n 'bcdef'\n >>> extractRegexResult(r'a(?P<result>[^g]+)g', 'ABCDEFG', re.I)\n 'BCDEF'\n ", "language": "en", "n_whitespaces": 45, "n_words": 23, "vocab_size": 20 }
https://github.com/sqlmapproject/sqlmap.git
2
loss_cardinality
def loss_cardinality(self, outputs, targets, indices, num_boxes): logits = outputs["logits"] device = logits.device target_lengths = torch.as_tensor([len(v["class_labels"]) for v in targets], device=device) # Count the number of predictions that are NOT "no-object" (which is the last class) card_pred = (logits.argmax(-1) != logits.shape[-1] - 1).sum(1) card_err = nn.functional.l1_loss(card_pred.float(), target_lengths.float()) losses = {"cardinality_error": card_err} return losses
59407bbeb31fff8340938768051c9daabd38d7a7
13
modeling_deformable_detr.py
167
Add Deformable DETR (#17281) * First draft * More improvements * Improve model, add custom CUDA code * Import torch before * Add script that imports custom layer * Add everything in new ops directory * Import custom layer in modeling file * Fix ARCHIVE_MAP typo * Creating the custom kernel on the fly. * Import custom layer in modeling file * More improvements * Fix CUDA loading * More improvements * Improve conversion script * Improve conversion script * Make it work until encoder_outputs * Make forward pass work * More improvements * Make logits match original implementation * Make implementation also support single_scale model * Add support for single_scale and dilation checkpoint * Add support for with_box_refine model * Support also two stage model * Improve tests * Fix more tests * Make more tests pass * Upload all models to the hub * Clean up some code * Improve decoder outputs * Rename intermediate hidden states and reference points * Improve model outputs * Move tests to dedicated folder * Improve model outputs * Fix retain_grad test * Improve docs * Clean up and make test_initialization pass * Improve variable names * Add copied from statements * Improve docs * Fix style * Improve docs * Improve docs, move tests to model folder * Fix rebase * Remove DetrForSegmentation from auto mapping * Apply suggestions from code review * Improve variable names and docstrings * Apply some more suggestions from code review * Apply suggestion from code review * better docs and variables names * hint to num_queries and two_stage confusion * remove asserts and code refactor * add exception if two_stage is True and with_box_refine is False * use f-strings * Improve docs and variable names * Fix code quality * Fix rebase * Add require_torch_gpu decorator * Add pip install ninja to CI jobs * Apply suggestion of @sgugger * Remove DeformableDetrForObjectDetection from auto mapping * Remove DeformableDetrModel from auto mapping * Add model to toctree * Add model back to mappings, skip model in pipeline tests * Apply @sgugger's suggestion * Fix imports in the init * Fix copies * Add CPU implementation * Comment out GPU function * Undo previous change * Apply more suggestions * Remove require_torch_gpu annotator * Fix quality * Add logger.info * Fix logger * Fix variable names * Fix initializaztion * Add missing initialization * Update checkpoint name * Add model to doc tests * Add CPU/GPU equivalence test * Add Deformable DETR to pipeline tests * Skip model for object detection pipeline Co-authored-by: Nicolas Patry <[email protected]> Co-authored-by: Nouamane Tazi <[email protected]> Co-authored-by: Sylvain Gugger <[email protected]>
6,125
0
115
104
45
33,630
52
transformers
23
src/transformers/models/deformable_detr/modeling_deformable_detr.py
Python
8
{ "docstring": "\n Compute the cardinality error, i.e. the absolute error in the number of predicted non-empty boxes.\n\n This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients.\n ", "language": "en", "n_whitespaces": 54, "n_words": 32, "vocab_size": 29 }
https://github.com/huggingface/transformers.git
1
test_duplicated_txn_id_one_call
def test_duplicated_txn_id_one_call(self): txn_id = "something_else_suitably_random" # Create two duplicate events to persist at the same time event1, context1 = self._create_duplicate_event(txn_id) event2, context2 = self._create_duplicate_event(txn_id) # Ensure their event IDs are different to start with self.assertNotEqual(event1.event_id, event2.event_id) events, _ = self.get_success( self._persist_event_storage_controller.persist_events( [(event1, context1), (event2, context2)] ) ) # Check that we've deduplicated the events. self.assertEqual(len(events), 2) self.assertEqual(events[0].event_id, events[1].event_id)
1e453053cb12ff084fdcdc2f75c08ced274dff21
12
test_message.py
158
Rename storage classes (#12913)
72,277
0
179
99
49
248,428
58
synapse
17
tests/handlers/test_message.py
Python
12
{ "docstring": "Test that we correctly handle duplicates that we try and persist at\n the same time.\n ", "language": "en", "n_whitespaces": 29, "n_words": 15, "vocab_size": 13 }
https://github.com/matrix-org/synapse.git
1
test_tutte_polynomial_disjoint_K1
def test_tutte_polynomial_disjoint_K1(): g = nx.complete_graph(1) t_g = nx.tutte_polynomial(g) h = nx.disjoint_union(g, g) t_h = nx.tutte_polynomial(h) assert sympy.simplify(t_g * t_g).equals(t_h)
f11068c0115ede0c7b631f771c10be7efd0b950b
10
test_polynomials.py
88
Add Tutte polynomial (#5265) Add a new polynomial module to algorithms for characteristic polynomials. Adds the Tutte polynomial, which is computed and ultimate represented as a sympy expression. Co-authored-by: Dan Schult <[email protected]> Co-authored-by: Ross Barnowski <[email protected]>
41,896
0
37
53
16
176,433
19
networkx
12
networkx/algorithms/tests/test_polynomials.py
Python
6
{ "docstring": "Tutte polynomial factors into the Tutte polynomials of its components.\n Verify this property with the disjoint union of two copies of `K_1`.\n ", "language": "en", "n_whitespaces": 28, "n_words": 22, "vocab_size": 18 }
https://github.com/networkx/networkx.git
1
perspective
def perspective(aspect_ratio, fov_y, near_clip, far_clip): focal_lengths_y = 1.0 / np.tan(fov_y * (math.pi / 360.0)) depth_range = far_clip - near_clip p_22 = -(far_clip + near_clip) / depth_range p_23 = -2.0 * (far_clip * near_clip / depth_range) zeros = np.zeros_like(p_23, dtype=np.float32) # pyformat: disable perspective_transform = np.concatenate( [ focal_lengths_y / aspect_ratio, zeros, zeros, zeros, zeros, focal_lengths_y, zeros, zeros, zeros, zeros, p_22, p_23, zeros, zeros, -np.ones_like(p_23, dtype=np.float32), zeros ], axis=0) # pyformat: enable perspective_transform = np.reshape(perspective_transform, [4, 4, -1]) return np.transpose(perspective_transform, [2, 0, 1])
7375ee364e0df2a417f92593e09557f1b2a3575a
14
ganfit_camera.py
229
initialize ostec
1,634
0
160
167
53
9,559
81
insightface
23
reconstruction/ostec/utils/ganfit_camera.py
Python
15
{ "docstring": "Computes perspective transformation matrices.\n Functionality mimes gluPerspective (third_party/GL/glu/include/GLU/glu.h).\n Args:\n aspect_ratio: float value specifying the image aspect ratio (width/height).\n fov_y: 1-D float32 Tensor with shape [batch_size] specifying output vertical\n field of views in degrees.\n near_clip: 1-D float32 Tensor with shape [batch_size] specifying near\n clipping plane distance.\n far_clip: 1-D float32 Tensor with shape [batch_size] specifying far clipping\n plane distance.\n Returns:\n A [batch_size, 4, 4] float tensor that maps from right-handed points in eye\n space to left-handed points in clip space.\n ", "language": "en", "n_whitespaces": 147, "n_words": 78, "vocab_size": 56 }
https://github.com/deepinsight/insightface.git
2
_test_tkinter
def _test_tkinter(cls) -> None: try: import tkinter # noqa pylint: disable=unused-import,import-outside-toplevel except ImportError as err: logger.error("It looks like TkInter isn't installed for your OS, so the GUI has been " "disabled. To enable the GUI please install the TkInter application. You " "can try:") logger.info("Anaconda: conda install tk") logger.info("Windows/macOS: Install ActiveTcl Community Edition from " "http://www.activestate.com") logger.info("Ubuntu/Mint/Debian: sudo apt install python3-tk") logger.info("Arch: sudo pacman -S tk") logger.info("CentOS/Redhat: sudo yum install tkinter") logger.info("Fedora: sudo dnf install python3-tkinter") raise FaceswapError("TkInter not found") from err
26e26c628803e592ce876e101a45033c87a5a97b
12
launcher.py
141
Update TF to 2.9 - Update TF to 2.7 to 2.9 - Bump dependencies - Remove decode from pynvml calls - force keras predict functions to non-verbose - update tests - update Tensorboard logging - Update docs
20,568
0
274
69
67
101,138
82
faceswap
9
lib/cli/launcher.py
Python
26
{ "docstring": " If the user is running the GUI, test whether the tkinter app is available on their\n machine. If not exit gracefully.\n\n This avoids having to import every tkinter function within the GUI in a wrapper and\n potentially spamming traceback errors to console.\n\n Raises\n ------\n FaceswapError\n If tkinter cannot be imported\n ", "language": "en", "n_whitespaces": 111, "n_words": 50, "vocab_size": 41 }
https://github.com/deepfakes/faceswap.git
1
is_monotonic_increasing
def is_monotonic_increasing(self) -> bool: from pandas import Index return Index(self).is_monotonic_increasing
acd7218f67fbe31308db7482e11fb9c8f30b51a8
8
base.py
34
DEPR: Index.is_monotonic for Index.is_monotonic_increasing (#45422)
39,484
0
31
19
10
163,673
10
pandas
5
pandas/core/base.py
Python
11
{ "docstring": "\n Return boolean if values in the object are\n monotonic_increasing.\n\n Returns\n -------\n bool\n ", "language": "en", "n_whitespaces": 55, "n_words": 12, "vocab_size": 12 }
https://github.com/pandas-dev/pandas.git
3
check_yaml_c_exts
def check_yaml_c_exts(): if testutils.ON_CI and sys.version_info[:2] != (3, 10): from yaml import CLoader # pylint: disable=unused-import @pytest.hookimpl(tryfirst=True, hookwrapper=True)
9c4169c7b7d96a10012a72c70fc38c6154f7481f
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
9
conftest.py
68
tests: Remove some unused imports
117,234
1
31
28
18
320,623
18
qutebrowser
11
tests/conftest.py
Python
3
{ "docstring": "Make sure PyYAML C extensions are available on CI.\n\n Not available yet with a nightly Python, see:\n https://github.com/yaml/pyyaml/issues/416\n ", "language": "en", "n_whitespaces": 27, "n_words": 18, "vocab_size": 17 }
https://github.com/qutebrowser/qutebrowser.git
3
custom_generator_multi_io_temporal
def custom_generator_multi_io_temporal(self, sample_weights=None): batch_size = 3 num_samples = 3 iteration = 0 while True: batch_index = iteration * batch_size % num_samples iteration += 1 start = batch_index end = start + batch_size x = [self.x[start:end], self.x[start:end]] y = [self.y1[start:end], self.y2[start:end]] if sample_weights: sw = tf.nest.map_structure( lambda w: w[start:end], sample_weights ) else: sw = None yield x, y, sw
84afc5193d38057e2e2badf9c889ea87d80d8fbf
15
temporal_sample_weights_correctness_test.py
180
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
81,689
0
256
116
39
276,584
58
keras
18
keras/tests/temporal_sample_weights_correctness_test.py
Python
18
{ "docstring": "Generator for getting data for temporal multi io model.\n\n Args:\n sample_weights: List of sample_weights.\n\n Yields:\n Tuple of inputs, label, sample weights data.\n ", "language": "en", "n_whitespaces": 61, "n_words": 22, "vocab_size": 20 }
https://github.com/keras-team/keras.git
1
ping_google
def ping_google(sitemap_url=None, ping_url=PING_URL, sitemap_uses_https=True): sitemap_full_url = _get_sitemap_full_url(sitemap_url, sitemap_uses_https) params = urlencode({"sitemap": sitemap_full_url}) urlopen("%s?%s" % (ping_url, params))
9c19aff7c7561e3a82978a272ecdaad40dda5c00
11
__init__.py
73
Refs #33476 -- Reformatted code with Black.
50,695
0
28
44
15
204,321
16
django
10
django/contrib/sitemaps/__init__.py
Python
4
{ "docstring": "\n Alert Google that the sitemap for the current site has been updated.\n If sitemap_url is provided, it should be an absolute path to the sitemap\n for this site -- e.g., '/sitemap.xml'. If sitemap_url is not provided, this\n function will attempt to deduce it by using urls.reverse().\n ", "language": "en", "n_whitespaces": 62, "n_words": 46, "vocab_size": 34 }
https://github.com/django/django.git
1
test_sensors_no_data
async def test_sensors_no_data(self): # Setup platform with default responses await self._setup_platform(use_default_responses=True) # Change mock responses to empty data and refresh the coordinator self.picnic_mock().get_user.return_value = {} self.picnic_mock().get_cart.return_value = None self.picnic_mock().get_deliveries.return_value = None self.picnic_mock().get_delivery_position.side_effect = ValueError await self._coordinator.async_refresh() # Assert all default-enabled sensors have STATE_UNAVAILABLE because the last update failed assert self._coordinator.last_update_success is False self._assert_sensor("sensor.picnic_cart_total_price", STATE_UNAVAILABLE) self._assert_sensor("sensor.picnic_selected_slot_start", STATE_UNAVAILABLE) self._assert_sensor("sensor.picnic_selected_slot_end", STATE_UNAVAILABLE) self._assert_sensor( "sensor.picnic_selected_slot_max_order_time", STATE_UNAVAILABLE ) self._assert_sensor( "sensor.picnic_selected_slot_min_order_value", STATE_UNAVAILABLE ) self._assert_sensor( "sensor.picnic_last_order_max_order_time", STATE_UNAVAILABLE ) self._assert_sensor("sensor.picnic_last_order_delivery_time", STATE_UNAVAILABLE) self._assert_sensor("sensor.picnic_next_delivery_eta_start", STATE_UNAVAILABLE) self._assert_sensor("sensor.picnic_next_delivery_eta_end", STATE_UNAVAILABLE)
137793c06748b3914ae4906c9d11599dbd83d1fd
10
test_sensor.py
258
Add sensors for next Picnic deliveries (#66474)
91,520
0
270
148
55
292,434
76
core
17
tests/components/picnic/test_sensor.py
Python
23
{ "docstring": "Test sensor states when the api only returns empty objects.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/home-assistant/core.git
2
get_rollback
def get_rollback(self): if not self.in_atomic_block: raise TransactionManagementError( "The rollback flag doesn't work outside of an 'atomic' block." ) return self.needs_rollback
9c19aff7c7561e3a82978a272ecdaad40dda5c00
10
base.py
39
Refs #33476 -- Reformatted code with Black.
50,899
0
78
21
20
204,814
20
django
5
django/db/backends/base/base.py
Python
6
{ "docstring": "Get the \"needs rollback\" flag -- for *advanced use* only.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/django/django.git
2
get_file_breaks
def get_file_breaks(self, filename): filename = self.canonic(filename) if filename in self.breaks: return self.breaks[filename] else: return []
8198943edd73a363c266633e1aa5b2a9e9c9f526
9
bdb.py
58
add python 3.10.4 for windows
56,206
0
65
35
13
221,103
15
XX-Net
5
python3.10.4/Lib/bdb.py
Python
6
{ "docstring": "Return all lines with breakpoints for filename.\n\n If no breakpoints are set, return an empty list.\n ", "language": "en", "n_whitespaces": 30, "n_words": 16, "vocab_size": 15 }
https://github.com/XX-net/XX-Net.git
1
get
def get(self, request, **kwargs): instance = self.get_object(**kwargs) return render(request, self.get_template_name(), { 'object': instance, **self.get_extra_context(request, instance), })
54834c47f8870e7faabcd847c3270da0bd3d2884
11
object_views.py
76
Refactor generic views; add plugins dev documentation
77,673
0
66
48
16
264,301
16
netbox
9
netbox/netbox/views/generic/object_views.py
Python
6
{ "docstring": "\n GET request handler. `*args` and `**kwargs` are passed to identify the object being queried.\n\n Args:\n request: The current request\n ", "language": "en", "n_whitespaces": 52, "n_words": 19, "vocab_size": 18 }
https://github.com/netbox-community/netbox.git
3
dup_eval
def dup_eval(f, a, K): result = K.zero for c in f: result *= a result += c return result
c52d6ce2c8d5eecc5b891d05b1c32a456c4cd308
8
densetools.py
45
fixed error in primitive_element for some case when ext=False
48,415
0
45
40
15
197,261
19
sympy
7
sympy/polys/densetools.py
Python
8
{ "docstring": "\n Evaluate a polynomial at ``x = a`` in ``K[x]`` using Horner scheme.\n\n Examples\n ========\n\n >>> from sympy.polys import ring, ZZ\n >>> R, x = ring(\"x\", ZZ)\n\n >>> R.dup_eval(x**2 + 2*x + 3, 2)\n 11\n\n ", "language": "en", "n_whitespaces": 59, "n_words": 34, "vocab_size": 30 }
https://github.com/sympy/sympy.git
1
map_relation
def map_relation(self, w): array = w.array_form s1 = array[0][0] s2 = array[1][0] key = ((s2, -1), (s1, 1), (s2, 1)) key = self.free_group.dtype(key) return self.pc_presentation[key]
498015021131af4dbb07eb110e5badaba8250c7b
9
pc_groups.py
104
Updated import locations
47,616
0
74
70
20
196,116
25
sympy
11
sympy/combinatorics/pc_groups.py
Python
7
{ "docstring": "\n Return a conjugate relation.\n\n Explanation\n ===========\n\n Given a word formed by two free group elements, the\n corresponding conjugate relation with those free\n group elements is formed and mapped with the collected\n word in the polycyclic presentation.\n\n Examples\n ========\n\n >>> from sympy.combinatorics.named_groups import SymmetricGroup\n >>> from sympy.combinatorics import free_group\n >>> G = SymmetricGroup(3)\n >>> PcGroup = G.polycyclic_group()\n >>> collector = PcGroup.collector\n >>> F, x0, x1 = free_group(\"x0, x1\")\n >>> w = x1*x0\n >>> collector.map_relation(w)\n x1**2\n\n See Also\n ========\n\n pc_presentation\n\n ", "language": "en", "n_whitespaces": 233, "n_words": 78, "vocab_size": 55 }
https://github.com/sympy/sympy.git
7
applied_filters
def applied_filters(context, model, form, query_params): user = context['request'].user form.is_valid() # Ensure cleaned_data has been set applied_filters = [] for filter_name in form.changed_data: if filter_name not in form.cleaned_data: continue querydict = query_params.copy() if filter_name not in querydict: continue bound_field = form.fields[filter_name].get_bound_field(form, filter_name) querydict.pop(filter_name) display_value = ', '.join([str(v) for v in get_selected_values(form, filter_name)]) applied_filters.append({ 'name': filter_name, 'value': form.cleaned_data[filter_name], 'link_url': f'?{querydict.urlencode()}', 'link_text': f'{bound_field.label}: {display_value}', }) save_link = None if user.has_perm('extras.add_savedfilter') and 'filter' not in context['request'].GET: content_type = ContentType.objects.get_for_model(model).pk parameters = context['request'].GET.urlencode() url = reverse('extras:savedfilter_add') save_link = f"{url}?content_types={content_type}&parameters={quote(parameters)}" return { 'applied_filters': applied_filters, 'save_link': save_link, }
484efdaf75f267a43f9321b938fda1bc967b9e53
15
helpers.py
370
Closes #9623: Implement saved filters (#10801) * Initial work on saved filters * Return only enabled/shared filters * Add tests * Clean up filtering of usable SavedFilters
78,255
0
283
198
69
265,988
91
netbox
36
netbox/utilities/templatetags/helpers.py
Python
29
{ "docstring": "\n Display the active filters for a given filter form.\n ", "language": "en", "n_whitespaces": 16, "n_words": 9, "vocab_size": 9 }
https://github.com/netbox-community/netbox.git
4
text
def text(self) -> str: control = { Qt.Key_Space: ' ', Qt.Key_Tab: '\t', Qt.Key_Backspace: '\b', Qt.Key_Return: '\r', Qt.Key_Enter: '\r', Qt.Key_Escape: '\x1b', } if self.key in control: return control[self.key] elif not _is_printable(self.key): return '' text = QKeySequence(self.key).toString() if not self.modifiers & Qt.ShiftModifier: text = text.lower() return text
a20bb67a878b2e68abf8268c1b0a27f018d01352
11
keyutils.py
180
mypy: Upgrade to PyQt5-stubs 5.15.6.0 For some unknown reason, those new stubs cause a *lot* of things now to be checked by mypy which formerly probably got skipped due to Any being implied somewhere. The stubs themselves mainly improved, with a couple of regressions too. In total, there were some 337 (!) new mypy errors. This commit fixes almost all of them, and the next commit improves a fix to get things down to 0 errors again. Overview of the changes: ==== qutebrowser/app.py - Drop type ignore due to improved stubs. ==== qutebrowser/browser/browsertab.py - Specify the type of _widget members more closely than just QWidget. This is debatable: I suppose the abstract stuff shouldn't need to know anything about the concrete backends at all. But it seems like we cut some corners when initially implementing things, and put some code in browsertab.py just because the APIs of both backends happened to be compatible. Perhaps something to reconsider once we drop QtWebKit and hopefully implement a dummy backend. - Add an additional assertion in AbstractAction.run_string. This is already covered by the isinstance(member, self.action_base) above it, but that's too dynamic for mypy to understand. - Fix the return type of AbstractScroller.pos_px, which is a QPoint (with x and y components), not a single int. - Fix the return type of AbstractScroller.pos_perc, which is a Tuple (with x and y components), not a single int. - Fix the argument types of AbstractScroller.to_perc, as it's possible to pass fractional percentages too. - Specify the type for AbstractHistoryPrivate._history. See above (_widget) re this being debatable. - Fix the return type of AbstractTabPrivate.event_target(), which can be None (see #3888). - Fix the return type of AbstractTabPrivate.run_js_sync, which is Any (the JS return value), not None. - Fix the argument type for AbstractTabPrivate.toggle_inspector: position can be None to use the last used position. - Declare the type of sub-objects of AbstractTab. - Fix the return value of AbstractTab.icon(), which is the QIcon, not None. ==== qutebrowser/browser/commands.py - Make sure the active window is a MainWindow (with a .win_id attribute). ==== qutebrowser/browser/downloadview.py - Add _model() which makes sure that self.model() is a DownloadModel, not None or any other model. This is needed because other methods access a variety of custom attributes on it, e.g. last_index(). ==== qutebrowser/browser/greasemonkey.py - Add an ignore for AbstractDownload.requested_url which we patch onto the downloads. Probably would be nicer to add it as a proper attribute which always gets set by the DownloadManager. ==== qutebrowser/browser/hints.py - Remove type ignores for QUrl.toString(). - Add a new type ignore for combining different URL flags (which works, but is not exactly type safe... still probably a regression in the stubs). - Make sure the things we get back from self._get_keyparser are what we actually expect. Probably should introduce a TypedDict (and/or overloads for _get_keyparser with typing.Literal) to teach mypy about the exact return value. See #7098. This is needed because we access Hint/NormalKeyParser-specific attributes such as .set_inhibited_timout() or .update_bindings(). ==== qutebrowser/browser/inspector.py - Similar changes than in browsertab.py to make some types where we share API (e.g. .setPage()) more concrete. Didn't work out unfortunately, see next commit. ==== qutebrowser/browser/network/pac.py - Remove now unneeded type ignore for signal. ==== qutebrowser/browser/qtnetworkdownloads.py - Make sure that downloads is a qtnetworkdownloads.DownloadItem (rather than an AbstractDownload), so that we can call ._uses_nam() on it. ==== qutebrowser/browser/qutescheme.py - Remove now unneeded type ignore for QUrl flags. ==== qutebrowser/browser/urlmarks.py - Specify the type of UrlMarkManager._lineparser, as those only get initialized in _init_lineparser of subclasses, so mypy doesn't know it's supposed to exist. ==== qutebrowser/browser/webelem.py - New casts to turn single KeyboardModifier (enum) entries into KeyboardModifiers (flags). Might not be needed anymore with Qt 6. - With that, casting the final value is now unneeded. ==== qutebrowser/browser/webengine/notification.py - Remove now unneeded type ignore for signal. - Make sure the self.sender() we get in HerbeNotificationAdapter._on_finished() is a QProcess, not just any QObject. ==== qutebrowser/browser/webengine/webenginedownloads.py - Remove now unneeded type ignores for signals. ==== qutebrowser/browser/webengine/webengineelem.py - Specify the type of WebEngineElement._tab. - Remove now unneeded type ignore for mixed flags. ==== qutebrowser/browser/webengine/webengineinspector.py - See changes to inspector.py and next commit. - Remove now unneeded type ignore for signal. ==== qutebrowser/browser/webengine/webenginequtescheme.py - Remove now unneeded type ignore for mixed flags. ==== qutebrowser/browser/webengine/webenginesettings.py - Ignore access of .setter attribute which we patch onto QWebEngineProfile. Would be nice to have a subclass or wrapper-class instead. ==== qutebrowser/browser/webengine/webenginetab.py - Specified the type of _widget members more closely than just QWidget. See browsertab.py changes for details. - Remove some now-unneeded type ignores for creating FindFlags. - Specify more concrete types for WebEngineTab members where we actually need to access WebEngine-specific attributes. - Make sure the page we get is our custom WebEnginePage subclass, not just any QWebEnginePage. This is needed because we access custom attributes on it. ==== qutebrowser/browser/webengine/webview.py - Make sure the page we get is our custom WebEnginePage subclass, not just any QWebEnginePage. This is needed because we access custom attributes on it. ==== qutebrowser/browser/webkit/network/networkreply.py - Remove now unneeded type ignores for signals. ==== qutebrowser/browser/webkit/webkitinspector.py - See changes to inspector.py and next commit. ==== qutebrowser/browser/webkit/webkittab.py - Specify the type of _widget members more closely than just QWidget. See browsertab.py changes for details. - Add a type ignore for WebKitAction because our workaround needs to treat them as ints (which is allowed by PyQt, even if not type-safe). - Add new ignores for findText calls: The text is a QString and can be None; the flags are valid despite mypy thinking they aren't (stubs regression?). - Specify the type for WebKitHistoryPrivate._history, because we access WebKit-specific attributes. See above (_widget) re this being debatable. - Make mypy aware that .currentFrame() and .frameAt() can return None (stubs regression?). - Make sure the .page() and .page().networkAccessManager() are our subclasses rather than the more generic QtWebKit objects, as we use custom attributes. - Add new type ignores for signals (stubs regression!) ==== qutebrowser/browser/webkit/webpage.py - Make sure the .networkAccessManager() is our subclass rather than the more generic QtWebKit object, as we use custom attributes. - Replace a cast by a type ignore. The cast didn't work anymore. ==== qutebrowser/browser/webkit/webview.py - Make sure the .page() is our subclass rather than the more generic QtWebKit object, as we use custom attributes. ==== qutebrowser/commands/userscripts.py - Remove now unneeded type ignore for signal. ==== qutebrowser/completion/completer.py - Add a new _completion() getter (which ensures it actually gets the completion view) rather than accessing the .parent() directly (which could be any QObject). ==== qutebrowser/completion/completiondelegate.py - Make sure self.parent() is a CompletionView (no helper method as there is only one instance). - Remove a now-unneeded type ignore for adding QSizes. ==== qutebrowser/completion/completionwidget.py - Add a ._model() getter which ensures that we get a CompletionModel (with custom attributes) rather than Qt's .model() which can be any QAbstractItemModel (or None). - Removed a now-unneeded type ignore for OR-ing flags. ==== qutebrowser/completion/models/completionmodel.py - Remove now unneeded type ignores for signals. - Ignore a complaint about .set_pattern() not being defined. Completion categories don't share any common parent class, so it would be good to introduce a typing.Protocol for this. See #7098. ==== qutebrowser/components/misccommands.py - Removed a now-unneeded type ignore for OR-ing flags. ==== qutebrowser/components/readlinecommands.py - Make sure QApplication.instance() is a QApplication (and not just a QCoreApplication). This includes the former "not None" check. ==== qutebrowser/components/scrollcommands.py - Add basic annotation for "funcs" dict. Could have a callable protocol to specify it needs a count kwarg, see #7098. ==== qutebrowser/config/stylesheet.py - Correctly specify that stylesheet apply to QWidgets, not any QObject. - Ignore an attr-defined for obj.STYLESHEET. Perhaps could somehow teach mypy about this with overloads and protocols (stylesheet for set_register being None => STYLESHEET needs to be defined, otherwise anything goes), but perhaps not worth the troble. See #7098. ==== qutebrowser/keyinput/keyutils.py - Remove some now-unneeded type ignores and add a cast for using a single enum value as flags. Might need to look at this again with Qt 6 support. ==== qutebrowser/keyinput/modeman.py - Add a FIXME for using a TypedDict, see comments for hints.py above. ==== qutebrowser/mainwindow/mainwindow.py - Remove now-unneeded type ignores for calling with OR-ed flags. - Improve where we cast from WindowType to WindowFlags, no int needed - Use new .tab_bar() getter, see below. ==== qutebrowser/mainwindow/prompt.py - Remove now-unneeded type ignores for calling with OR-ed flags. ==== qutebrowser/mainwindow/statusbar/bar.py - Adjust type ignores around @pyqtProperty. The fact one is still needed seems like a stub regression. ==== qutebrowser/mainwindow/statusbar/command.py - Fix type for setText() override (from QLineEdit): text can be None (QString in C++). ==== qutebrowser/mainwindow/statusbar/url.py - Adjust type ignores around @pyqtProperty. The fact one is still needed seems like a stub regression. ==== qutebrowser/mainwindow/tabbedbrowser.py - Specify that TabDeque manages browser tabs, not any QWidgets. It accesses AbstractTab-specific attributes. - Make sure that the .tabBar() we get is a tabwidget.TabBar, as we access .maybe_hide. - Fix the annotations for stored marks: Scroll positions are a QPoint, not int. - Add _current_tab() and _tab_by_idx() wrappers for .currentWidget() and .widget(), which ensures that the return values are valid AbstractTabs (or None for _tab_by_idx). This is needed because we access AbstractTab-specific attributes. - For some places, where the tab can be None, continue using .currentTab() but add asserts. - Remove some now-unneeded [unreachable] ignores, as mypy knows about the None possibility now. ==== qutebrowser/mainwindow/tabwidget.py - Add new tab_bar() and _tab_by_idx() helpers which check that the .tabBar() and .widget() are of type TabBar and AbstractTab, respectively. - Add additional assertions where we expect ._tab_by_idx() to never be None. - Remove dead code in get_tab_fields for handling a None y scroll position. I was unable to find any place in the code where this could be set to None. - Remove some now-unneeded type ignores and casts, as mypy now knows that _type_by_idx() could be None. - Work around a strange instance where mypy complains about not being able to find the type of TabBar.drag_in_progress from TabWidget._toggle_visibility, despite it clearly being shown as a bool *inside* that class without any annotation. - Add a ._tab_widget() getter in TabBar which ensures that the .parent() is in fact a TabWidget. ==== qutebrowser/misc/crashsignal.py - Remove now unneeded type ignores for signals. ==== qutebrowser/misc/editor.py - Remove now unneeded type ignores for signals. ==== qutebrowser/misc/ipc.py - Remove now unneeded type ignores for signals. - Add new type ignores for .error() which is both a signal and a getter (stub regression?). Won't be relevant for Qt 6 anymore, as the signal was renamed to errorOccurred in 5.15. ==== qutebrowser/misc/objects.py - Make sure mypy knows that objects.app is our custom Application (with custom attributes) rather than any QApplication. ==== qutebrowser/utils/objreg.py - Ignore attr-defined for .win_id attributes. Maybe could add a typing.Protocol, but ideally, the whole objreg stuff should die one day anyways. ==== tests/unit/completion/test_completer.py - Make CompletionWidgetStub inherit from CompletionView so that it passes the new isinstance() asserts in completer.py (see above).
117,350
0
200
104
36
320,783
45
qutebrowser
18
qutebrowser/keyinput/keyutils.py
Python
18
{ "docstring": "Get the text which would be displayed when pressing this key.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
https://github.com/qutebrowser/qutebrowser.git
5
train_one_step
def train_one_step(trainer, train_batch, policies_to_train=None) -> Dict: config = trainer.config workers = trainer.workers local_worker = workers.local_worker() num_sgd_iter = config.get("num_sgd_iter", 1) sgd_minibatch_size = config.get("sgd_minibatch_size", 0) learn_timer = trainer._timers[LEARN_ON_BATCH_TIMER] with learn_timer: # Subsample minibatches (size=`sgd_minibatch_size`) from the # train batch and loop through train batch `num_sgd_iter` times. if num_sgd_iter > 1 or sgd_minibatch_size > 0: info = do_minibatch_sgd( train_batch, { pid: local_worker.get_policy(pid) for pid in policies_to_train or local_worker.get_policies_to_train(train_batch) }, local_worker, num_sgd_iter, sgd_minibatch_size, [], ) # Single update step using train batch. else: info = local_worker.learn_on_batch(train_batch) learn_timer.push_units_processed(train_batch.count) trainer._counters[NUM_ENV_STEPS_TRAINED] += train_batch.count trainer._counters[NUM_AGENT_STEPS_TRAINED] += train_batch.agent_steps() return info @DeveloperAPI
00922817b66ee14ba215972a98f416f3d6fef1ba
@DeveloperAPI
17
train_ops.py
245
[RLlib] Rewrite PPO to use training_iteration + enable DD-PPO for Win32. (#23673)
34,101
1
357
151
72
147,843
92
ray
27
rllib/execution/train_ops.py
Python
40
{ "docstring": "Function that improves the all policies in `train_batch` on the local worker.\n\n Examples:\n >>> from ray.rllib.execution.rollout_ops import synchronous_parallel_sample\n >>> trainer = [...] # doctest: +SKIP\n >>> train_batch = synchronous_parallel_sample(trainer.workers) # doctest: +SKIP\n >>> # This trains the policy on one batch.\n >>> results = train_one_step(trainer, train_batch)) # doctest: +SKIP\n {\"default_policy\": ...}\n\n Updates the NUM_ENV_STEPS_TRAINED and NUM_AGENT_STEPS_TRAINED counters as well as\n the LEARN_ON_BATCH_TIMER timer of the `trainer` object.\n ", "language": "en", "n_whitespaces": 121, "n_words": 67, "vocab_size": 47 }
https://github.com/ray-project/ray.git
11
_preprocess
def _preprocess(expr, func=None, hint='_Integral'): if isinstance(expr, Pow): # if f(x)**p=0 then f(x)=0 (p>0) if (expr.exp).is_positive: expr = expr.base derivs = expr.atoms(Derivative) if not func: funcs = set().union(*[d.atoms(AppliedUndef) for d in derivs]) if len(funcs) != 1: raise ValueError('The function cannot be ' 'automatically detected for %s.' % expr) func = funcs.pop() fvars = set(func.args) if hint is None: return expr, func reps = [(d, d.doit()) for d in derivs if not hint.endswith('_Integral') or d.has(func) or set(d.variables) & fvars] eq = expr.subs(reps) return eq, func
65be461082dda54c8748922f9c29a19af1279fe1
14
deutils.py
268
Remove abbreviations in documentation
48,503
0
200
163
61
197,360
83
sympy
29
sympy/solvers/deutils.py
Python
18
{ "docstring": "Prepare expr for solving by making sure that differentiation\n is done so that only func remains in unevaluated derivatives and\n (if hint does not end with _Integral) that doit is applied to all\n other derivatives. If hint is None, do not do any differentiation.\n (Currently this may cause some simple differential equations to\n fail.)\n\n In case func is None, an attempt will be made to autodetect the\n function to be solved for.\n\n >>> from sympy.solvers.deutils import _preprocess\n >>> from sympy import Derivative, Function\n >>> from sympy.abc import x, y, z\n >>> f, g = map(Function, 'fg')\n\n If f(x)**p == 0 and p>0 then we can solve for f(x)=0\n >>> _preprocess((f(x).diff(x)-4)**5, f(x))\n (Derivative(f(x), x) - 4, f(x))\n\n Apply doit to derivatives that contain more than the function\n of interest:\n\n >>> _preprocess(Derivative(f(x) + x, x))\n (Derivative(f(x), x) + 1, f(x))\n\n Do others if the differentiation variable(s) intersect with those\n of the function of interest or contain the function of interest:\n\n >>> _preprocess(Derivative(g(x), y, z), f(y))\n (0, f(y))\n >>> _preprocess(Derivative(f(y), z), f(y))\n (0, f(y))\n\n Do others if the hint does not end in '_Integral' (the default\n assumes that it does):\n\n >>> _preprocess(Derivative(g(x), y), f(x))\n (Derivative(g(x), y), f(x))\n >>> _preprocess(Derivative(f(x), y), f(x), hint='')\n (0, f(x))\n\n Do not do any derivatives if hint is None:\n\n >>> eq = Derivative(f(x) + 1, x) + Derivative(f(x), y)\n >>> _preprocess(eq, f(x), hint=None)\n (Derivative(f(x) + 1, x) + Derivative(f(x), y), f(x))\n\n If it's not clear what the function of interest is, it must be given:\n\n >>> eq = Derivative(f(x) + g(x), x)\n >>> _preprocess(eq, g(x))\n (Derivative(f(x), x) + Derivative(g(x), x), g(x))\n >>> try: _preprocess(eq)\n ... except ValueError: print(\"A ValueError was raised.\")\n A ValueError was raised.\n\n ", "language": "en", "n_whitespaces": 402, "n_words": 276, "vocab_size": 153 }
https://github.com/sympy/sympy.git
5
_clean_yaml
def _clean_yaml(self, data): records = [] try: for data in yaml.load_all(data, Loader=yaml.SafeLoader): if type(data) == list: records.extend(data) elif type(data) == dict: records.append(data) else: raise forms.ValidationError({ self.data_field: _( "Invalid YAML data. Data must be in the form of multiple documents, or a single document " "comprising a list of dictionaries." ) }) except yaml.error.YAMLError as err: raise forms.ValidationError({ self.data_field: f"Invalid YAML data: {err}" }) return records
80ced6b782e15179e7f35f0ef6737a65ddd60f92
20
forms.py
174
Closes #11163: Auto-detect data format during bulk import
78,362
0
369
102
54
266,301
65
netbox
20
netbox/utilities/forms/forms.py
Python
20
{ "docstring": "\n Clean YAML-formatted data. Data must be either\n a) A single document comprising a list of dictionaries (each representing an object), or\n b) Multiple documents, separated with the '---' token\n ", "language": "en", "n_whitespaces": 62, "n_words": 29, "vocab_size": 29 }
https://github.com/netbox-community/netbox.git
1
test_requester_is_no_admin
def test_requester_is_no_admin(self): url = self.url_prefix % "@bob:test" channel = self.make_request( "GET", url, access_token=self.other_user_token, ) self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body) self.assertEqual("You are not a server admin", channel.json_body["error"]) channel = self.make_request( "PUT", url, access_token=self.other_user_token, content=b"{}", ) self.assertEqual(HTTPStatus.FORBIDDEN, channel.code, msg=channel.json_body) self.assertEqual("You are not a server admin", channel.json_body["error"])
18862f20b5495bdc556c54e92fd4b1efdc718ba7
10
test_user.py
183
Remove the 'password_hash' from the Users Admin API endpoint response dictionary (#11576)
70,969
0
189
114
25
246,053
42
synapse
15
tests/rest/admin/test_user.py
Python
17
{ "docstring": "\n If the user is not a server admin, an error is returned.\n ", "language": "en", "n_whitespaces": 27, "n_words": 12, "vocab_size": 11 }
https://github.com/matrix-org/synapse.git
2
axis0_safe_slice
def axis0_safe_slice(X, mask, len_mask): if len_mask != 0: return X[safe_mask(X, mask), :] return np.zeros(shape=(0, X.shape[1]))
537c325f2927895449ce418b3a77750135c0ba7b
11
__init__.py
68
DOC Ensure that sklearn.utils.axis0_safe_slice passes numpydoc (#24561)
76,691
0
31
45
14
261,218
15
scikit-learn
8
sklearn/utils/__init__.py
Python
4
{ "docstring": "Return a mask which is safer to use on X than safe_mask.\n\n This mask is safer than safe_mask since it returns an\n empty array, when a sparse matrix is sliced with a boolean mask\n with all False, instead of raising an unhelpful error in older\n versions of SciPy.\n\n See: https://github.com/scipy/scipy/issues/5361\n\n Also note that we can avoid doing the dot product by checking if\n the len_mask is not zero in _huber_loss_and_gradient but this\n is not going to be the bottleneck, since the number of outliers\n and non_outliers are typically non-zero and it makes the code\n tougher to follow.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}\n Data on which to apply mask.\n\n mask : ndarray\n Mask to be used on X.\n\n len_mask : int\n The length of the mask.\n\n Returns\n -------\n mask : ndarray\n Array that is safe to use on X.\n ", "language": "en", "n_whitespaces": 225, "n_words": 140, "vocab_size": 91 }
https://github.com/scikit-learn/scikit-learn.git
1
test_binary_serialization
def test_binary_serialization() -> None: uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684)) obj = SpecificLocation(id=uid, name="Test") assert sy.serialize(obj, to_bytes=True) == blob_bytes
b61c1fc4b83fc740d3d9d0d84d0ca6022a3c49bb
12
specific_test.py
78
Refactored store interface to eliminate confusion with __getitem__ - Fixed serde tests effected by protobuf magic bytes
118
0
28
47
15
780
16
PySyft
15
packages/syft/tests/syft/core/io/location/specific_test.py
Python
5
{ "docstring": "Tests that binary SpecificLocation serializes as expected", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
https://github.com/OpenMined/PySyft.git
2
col_op
def col_op(self, j, f): for i in range(self.rows): self[i, j] = f(self[i, j], i)
59d22b6bb7287613d598611027f640d068ca5748
11
repmatrix.py
56
Moved imports to higher level
47,900
0
39
38
14
196,400
14
sympy
7
sympy/matrices/repmatrix.py
Python
3
{ "docstring": "In-place operation on col j using two-arg functor whose args are\n interpreted as (self[i, j], i).\n\n Examples\n ========\n\n >>> from sympy import eye\n >>> M = eye(3)\n >>> M.col_op(1, lambda v, i: v + 2*M[i, 0]); M\n Matrix([\n [1, 2, 0],\n [0, 1, 0],\n [0, 0, 1]])\n\n See Also\n ========\n col\n row_op\n ", "language": "en", "n_whitespaces": 157, "n_words": 52, "vocab_size": 45 }
https://github.com/sympy/sympy.git
1
update_sub
def update_sub(x, decrement): return tf.compat.v1.assign_sub(x, decrement) @keras_export("keras.backend.moving_average_update") @doc_controls.do_not_generate_docs
84afc5193d38057e2e2badf9c889ea87d80d8fbf
@keras_export("keras.backend.moving_average_update") @doc_controls.do_not_generate_docs
9
backend.py
51
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
80,190
1
12
21
8
269,566
8
keras
10
keras/backend.py
Python
2
{ "docstring": "Update the value of `x` by subtracting `decrement`.\n\n Args:\n x: A Variable.\n decrement: A tensor of same shape as `x`.\n\n Returns:\n The variable `x` updated.\n ", "language": "en", "n_whitespaces": 55, "n_words": 25, "vocab_size": 22 }
https://github.com/keras-team/keras.git
11
_insert_layers
def _insert_layers(self, layers, relevant_nodes=None): layers = tf.nest.flatten(layers) tf_utils.assert_no_legacy_layers(layers) node_to_depth = {} for depth, nodes in self._nodes_by_depth.items(): node_to_depth.update({node: depth for node in nodes}) # The nodes of these Layers that are relevant to this Network. If not # provided, assume all Nodes are relevant if not relevant_nodes: relevant_nodes = tf.nest.flatten( [layer._inbound_nodes for layer in layers] ) network_nodes = set(relevant_nodes + list(node_to_depth.keys()))
84afc5193d38057e2e2badf9c889ea87d80d8fbf
13
functional.py
155
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
80,733
0
171
275
48
271,241
60
keras
22
keras/engine/functional.py
Python
44
{ "docstring": "Inserts Layers into the Network after Network creation.\n\n This is only valid for Keras Graph Networks. Layers added via this function\n will be included in the `call` computation and `get_config` of this Network.\n They will not be added to the Network's outputs.\n\n Args:\n layers: Arbitrary nested structure of Layers. Layers must be reachable\n from one or more of the `keras.Input` Tensors that correspond to this\n Network's inputs.\n relevant_nodes: Nodes from the Layers that should be considered part of\n this Network. If `None`, all Nodes will be considered part of this\n Network.\n\n Raises:\n ValueError: If the layers depend on `Input`s not found in this Model.\n ", "language": "en", "n_whitespaces": 218, "n_words": 104, "vocab_size": 67 }
https://github.com/keras-team/keras.git
5
test_previewing_multiple_kubernetes_deployments_from_python
def test_previewing_multiple_kubernetes_deployments_from_python(): result = invoke_and_assert( [ "deployment", "preview", "./tests/deployment_test_files/multiple_kubernetes_deployments.py", ], expected_output_contains="kind: Job", ) assert result.stdout.endswith("\n") previews = [p.strip() for p in re.split("Preview for .+:", result.stdout) if p] assert len(previews) == 4 # there should be 3 K8s and 1 non-K8s in the file # spot-check a few attributes of the first one manifest = yaml.load(previews[0], yaml.SafeLoader) assert manifest["apiVersion"] == "batch/v1" assert manifest["kind"] == "Job" assert manifest["metadata"]["generateName"] == "cool-name" container = manifest["spec"]["template"]["spec"]["containers"][0] assert "PREFECT_TEST_MODE" in [variable["name"] for variable in container["env"]] # spot-check a few attributes of the third one, which is customized manifest = yaml.load(previews[2], yaml.SafeLoader) assert manifest["apiVersion"] == "batch/v1" assert manifest["kind"] == "Job" assert manifest["metadata"]["generateName"] == "cool-name" container = manifest["spec"]["template"]["spec"]["containers"][0] assert "MY_ENV_VAR" in [variable["name"] for variable in container["env"]]
5afded9fe6724d9e336f59792ee1d60656a2d94d
12
test_deployment_preview.py
375
Add a CLI command to preview how a FlowRun will appear in any FlowRunner's execution environment (PrefectHQ/orion#1971) Co-authored-by: Terrence Dorsey <[email protected]> Co-authored-by: Michael Adkins <[email protected]>
11,494
0
233
209
68
56,283
118
prefect
18
tests/cli/test_deployment_preview.py
Python
24
{ "docstring": "`prefect deployment preview my-flow-file.py` should render multiple\n Kubernetes Jobs from a deployment file", "language": "en", "n_whitespaces": 15, "n_words": 13, "vocab_size": 12 }
https://github.com/PrefectHQ/prefect.git
2
restore
def restore(self, parameters): for c_param, param in zip(self.collected_params, parameters): param.data.copy_(c_param.data)
ca86da3a30c4e080d4db8c25fca73de843663cb4
10
ema.py
51
release more models
36,941
0
35
32
9
157,475
10
stablediffusion
9
ldm/modules/ema.py
Python
3
{ "docstring": "\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n ", "language": "en", "n_whitespaces": 125, "n_words": 55, "vocab_size": 36 }
https://github.com/Stability-AI/stablediffusion.git
1
test_lassolarsic_alpha_selection
def test_lassolarsic_alpha_selection(criterion): model = make_pipeline(StandardScaler(), LassoLarsIC(criterion=criterion)) model.fit(X, y) best_alpha_selected = np.argmin(model[-1].criterion_) assert best_alpha_selected == 7 @pytest.mark.parametrize("fit_intercept", [True, False])
e41753ebd57c44ae91b389f190c43ddc0b384a75
@pytest.mark.parametrize("fit_intercept", [True, False])
11
test_least_angle.py
101
MAINT Clean deprecation for 1.2: normalize in linear models (#24391)
76,733
1
32
47
16
261,292
18
scikit-learn
16
sklearn/linear_model/tests/test_least_angle.py
Python
5
{ "docstring": "Check that we properly compute the AIC and BIC score.\n\n In this test, we reproduce the example of the Fig. 2 of Zou et al.\n (reference [1] in LassoLarsIC) In this example, only 7 features should be\n selected.\n ", "language": "en", "n_whitespaces": 50, "n_words": 38, "vocab_size": 32 }
https://github.com/scikit-learn/scikit-learn.git
5
equals
def equals(self, *args, **kwargs): args = tuple(arg.table if isinstance(arg, Table) else arg for arg in args) kwargs = {k: v.table if isinstance(v, Table) else v for k, v in kwargs} return self.table.equals(*args, **kwargs)
e35be138148333078284b942ccc9ed7b1d826f97
11
table.py
105
Update docs to new frontend/UI (#3690) * WIP: update docs to new UI * make style * Rm unused * inject_arrow_table_documentation __annotations__ * hasattr(arrow_table_method, "__annotations__") * Update task_template.rst * Codeblock PT-TF-SPLIT * Convert loading scripts * Convert docs to mdx * Fix mdx * Add <Tip> * Convert mdx tables * Fix codeblock * Rm unneded hashlinks * Update index.mdx * Redo dev change * Rm circle ci `build_doc` & `deploy_doc` * Rm unneeded files * Update docs reamde * Standardize to `Example::` * mdx logging levels doc * Table properties inject_arrow_table_documentation * ``` to ```py mdx * Add Tips mdx * important,None -> <Tip warning={true}> * More misc * Center imgs * Update instllation page * `setup.py` docs section * Rm imgs since they are in hf.co * Update docs/source/access.mdx Co-authored-by: Steven Liu <[email protected]> * Update index mdx * Update docs/source/access.mdx Co-authored-by: Steven Liu <[email protected]> * just `Dataset` obj * Addedversion just italics * Update ReadInstruction doc example syntax * Change docstring for `prepare_for_task` * Chore * Remove `code` syntax from headings * Rm `code` syntax from headings * Hashlink backward compatability * S3FileSystem doc * S3FileSystem doc updates * index.mdx updates * Add darkmode gifs * Index logo img css classes * Index mdx dataset logo img size * Docs for DownloadMode class * Doc DownloadMode table * format docstrings * style * Add doc builder scripts (#3790) * add doc builder scripts * fix docker image * Docs new UI actions no self hosted (#3793) * No self hosted * replace doc injection by actual docstrings * Docstring formatted Co-authored-by: Quentin Lhoest <[email protected]> Co-authored-by: Mishig Davaadorj <[email protected]> Co-authored-by: Lysandre Debut <[email protected]> Co-authored-by: Mishig Davaadorj <[email protected]> * Rm notebooks from docs actions since they dont exi * Update tsting branch * More docstring * Chore * bump up node version * bump up node * ``` -> ```py for audio_process.mdx * Update .github/workflows/build_documentation.yml Co-authored-by: Quentin Lhoest <[email protected]> * Uodate dev doc build * remove run on PR * fix action * Fix gh doc workflow * forgot this change when merging master * Update build doc Co-authored-by: Steven Liu <[email protected]> Co-authored-by: Quentin Lhoest <[email protected]> Co-authored-by: Quentin Lhoest <[email protected]> Co-authored-by: Lysandre Debut <[email protected]>
21,851
0
61
70
25
104,415
33
datasets
11
src/datasets/table.py
Python
4
{ "docstring": "\n Check if contents of two tables are equal.\n\n Args:\n other (:class:`datasets.table.Table`):\n Table to compare against.\n check_metadata (:obj:`bool`, defaults to :obj:`False`):\n Whether schema metadata equality should be checked as well.\n\n Returns:\n :obj:`bool`\n ", "language": "en", "n_whitespaces": 123, "n_words": 31, "vocab_size": 30 }
https://github.com/huggingface/datasets.git
6
makelink
def makelink(self, tarinfo, targetpath): try: # For systems that support symbolic and hard links. if tarinfo.issym(): os.symlink(tarinfo.linkname, targetpath) else: # See extract(). if os.path.exists(tarinfo._link_target): os.link(tarinfo._link_target, targetpath) else: self._extract_member(self._find_link_target(tarinfo), targetpath) except symlink_exception: if tarinfo.issym(): linkpath = os.path.join(os.path.dirname(tarinfo.name), tarinfo.linkname) else: linkpath = tarinfo.linkname else: try: self._extract_member(self._find_link_target(tarinfo), targetpath) except KeyError: raise ExtractError("unable to resolve link inside archive")
c69d55f7c82d5ae2cce542bcfb98d043ca4836a0
17
tarfile.py
219
Vendor in pip 22.1.2
3,804
0
432
133
39
21,391
54
pipenv
21
pipenv/patched/notpip/_vendor/distlib/_backport/tarfile.py
Python
22
{ "docstring": "Make a (symbolic) link called targetpath. If it cannot be created\n (platform limitation), we try to make a copy of the referenced file\n instead of a link.\n ", "language": "en", "n_whitespaces": 52, "n_words": 27, "vocab_size": 24 }
https://github.com/pypa/pipenv.git
2
getincrementaldecoder
def getincrementaldecoder(encoding): decoder = lookup(encoding).incrementaldecoder if decoder is None: raise LookupError(encoding) return decoder
8198943edd73a363c266633e1aa5b2a9e9c9f526
9
codecs.py
45
add python 3.10.4 for windows
56,378
0
32
26
11
221,364
13
XX-Net
6
python3.10.4/Lib/codecs.py
Python
5
{ "docstring": " Lookup up the codec for the given encoding and return\n its IncrementalDecoder class or factory function.\n\n Raises a LookupError in case the encoding cannot be found\n or the codecs doesn't provide an incremental decoder.\n\n ", "language": "en", "n_whitespaces": 59, "n_words": 34, "vocab_size": 29 }
https://github.com/XX-net/XX-Net.git
4
_check_expected_dtype
def _check_expected_dtype(self, obj, label): if isinstance(obj, Index): assert obj.dtype == label elif isinstance(obj, Series): if label.startswith("period"): assert obj.dtype == "Period[M]" else: assert obj.dtype == label else: raise ValueError
ee6b0a09fff7789879c3322edffe9f84d10acee3
13
test_append_common.py
96
ENH: Index[bool] (#45061)
39,575
0
130
58
18
164,583
28
pandas
10
pandas/tests/reshape/concat/test_append_common.py
Python
10
{ "docstring": "\n Check whether obj has expected dtype depending on label\n considering not-supported dtypes\n ", "language": "en", "n_whitespaces": 34, "n_words": 12, "vocab_size": 12 }
https://github.com/pandas-dev/pandas.git
1
test_no_duplicates_for_m2m_in_list_filter
def test_no_duplicates_for_m2m_in_list_filter(self): blues = Genre.objects.create(name="Blues") band = Band.objects.create(name="B.B. King Review", nr_of_members=11) band.genres.add(blues) band.genres.add(blues) m = BandAdmin(Band, custom_site) request = self.factory.get("/band/", data={"genres": blues.pk}) request.user = self.superuser cl = m.get_changelist_instance(request) cl.get_results(request) # There's only one Group instance self.assertEqual(cl.result_count, 1) # Queryset must be deletable. self.assertIs(cl.queryset.query.distinct, False) cl.queryset.delete() self.assertEqual(cl.queryset.count(), 0)
9c19aff7c7561e3a82978a272ecdaad40dda5c00
12
tests.py
238
Refs #33476 -- Reformatted code with Black.
51,826
0
159
144
40
206,991
47
django
33
tests/admin_changelist/tests.py
Python
14
{ "docstring": "\n Regression test for #13902: When using a ManyToMany in list_filter,\n results shouldn't appear more than once. Basic ManyToMany.\n ", "language": "en", "n_whitespaces": 40, "n_words": 18, "vocab_size": 18 }
https://github.com/django/django.git
14
handle_transforms
def _handle_transforms(self, element, mobject): if element.hasAttribute("x") and element.hasAttribute("y"): x = self._attribute_to_float(element.getAttribute("x")) # Flip y y = -self._attribute_to_float(element.getAttribute("y")) mobject.shift(x * RIGHT + y * UP) transform_attr_value = element.getAttribute("transform") # parse the various transforms in the attribute value transform_names = ["matrix", "translate", "scale", "rotate", "skewX", "skewY"] # Borrowed/Inspired from: # https://github.com/cjlano/svg/blob/3ea3384457c9780fa7d67837c9c5fd4ebc42cb3b/svg/svg.py#L75 # match any SVG transformation with its parameter (until final parenthesis) # [^)]* == anything but a closing parenthesis # '|'.join == OR-list of SVG transformations transform_regex = "|".join([x + r"[^)]*\)" for x in transform_names]) transforms = re.findall(transform_regex, transform_attr_value) number_regex = r"[-+]?(?:\d+(?:\.\d*)?|\.\d+)(?:[eE][-+]?\d+)?" for t in transforms: op_name, op_args = t.split("(") op_name = op_name.strip() op_args = [float(x) for x in re.findall(number_regex, op_args)] if op_name == "matrix": transform_args = np.array(op_args).reshape([3, 2]) x = transform_args[2][0] y = -transform_args[2][1] matrix = np.identity(self.dim) matrix[:2, :2] = transform_args[:2, :] matrix[1] *= -1 matrix[:, 1] *= -1 for mob in mobject.family_members_with_points(): if config["renderer"] == "opengl": mob.points = np.dot(mob.points, matrix) else: mob.points = np.dot(mob.points, matrix) mobject.shift(x * RIGHT + y * UP) elif op_name == "scale": scale_values = op_args if len(scale_values) == 2: scale_x, scale_y = scale_values mobject.scale(np.array([scale_x, scale_y, 1]), about_point=ORIGIN) elif len(scale_values) == 1: scale = scale_values[0] mobject.scale(np.array([scale, scale, 1]), about_point=ORIGIN) elif op_name == "translate": if len(op_args) == 2: x, y = op_args else: x = op_args y = 0 mobject.shift(x * RIGHT + y * DOWN) else: # TODO: handle rotate, skewX and skewY # for now adding a warning message logger.warning( "Handling of %s transform is not supported yet!", op_name, )
902e7eb4f0147b5882a613b67467e38a1d47f01e
18
svg_mobject.py
706
Hide more private methods from the docs. (#2468) * hide privs from text_mobject.py * hide privs from tex_mobject.py * hide privs from code_mobject.py * hide privs from svg_mobject.py * remove SVGPath and utils from __init__.py * don't import string_to_numbers * hide privs from geometry.py * hide privs from matrix.py * hide privs from numbers.py * hide privs from three_dimensions.py * forgot underscore under set_stroke_width_from_length * there were more i missed * unhidea method that was used in docs * forgot other text2hash * remove svg_path from docs
46,070
0
1,007
429
143
189,462
245
manim
48
manim/mobject/svg/svg_mobject.py
Python
48
{ "docstring": "Applies the SVG transform to the specified mobject. Transforms include:\n ``matrix``, ``translate``, and ``scale``.\n\n Parameters\n ----------\n element : :class:`minidom.Element`\n The transform command to perform\n\n mobject : :class:`Mobject`\n The Mobject to transform.\n ", "language": "en", "n_whitespaces": 95, "n_words": 31, "vocab_size": 25 }
https://github.com/ManimCommunity/manim.git
1
fit_predict
def fit_predict(self, features, target, sample_weight=None, groups=None): self.fit(features, target, sample_weight=sample_weight, groups=groups) return self.predict(features)
388616b6247ca4ea8de4e2f340d6206aee523541
8
base.py
60
Revert "Deployed 7ccda9a with MkDocs version: 1.3.0" This reverts commit bd9629c40e01241766197119b581a99409b07068.
43,611
0
33
41
11
181,835
12
tpot
8
tpot/base.py
Python
3
{ "docstring": "Call fit and predict in sequence.\n\n Parameters\n ----------\n features: array-like {n_samples, n_features}\n Feature matrix\n target: array-like {n_samples}\n List of class labels for prediction\n sample_weight: array-like {n_samples}, optional\n Per-sample weights. Higher weights force TPOT to put more emphasis on those points\n groups: array-like, with shape {n_samples, }, optional\n Group labels for the samples used when performing cross-validation.\n This parameter should only be used in conjunction with sklearn's Group cross-validation\n functions, such as sklearn.model_selection.GroupKFold\n\n Returns\n ----------\n array-like: {n_samples}\n Predicted target for the provided features\n\n ", "language": "en", "n_whitespaces": 229, "n_words": 82, "vocab_size": 68 }
https://github.com/EpistasisLab/tpot.git
1
test_thermostat_missing_set_point
async def test_thermostat_missing_set_point(hass): await setup_climate( hass, { "sdm.devices.traits.ThermostatHvac": {"status": "OFF"}, "sdm.devices.traits.ThermostatMode": { "availableModes": ["HEAT", "COOL", "HEATCOOL", "OFF"], "mode": "HEATCOOL", }, }, ) assert len(hass.states.async_all()) == 1 thermostat = hass.states.get("climate.my_thermostat") assert thermostat is not None assert thermostat.state == HVAC_MODE_HEAT_COOL assert thermostat.attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_IDLE assert thermostat.attributes[ATTR_CURRENT_TEMPERATURE] is None assert set(thermostat.attributes[ATTR_HVAC_MODES]) == { HVAC_MODE_HEAT, HVAC_MODE_COOL, HVAC_MODE_HEAT_COOL, HVAC_MODE_OFF, } assert thermostat.attributes[ATTR_TEMPERATURE] is None assert thermostat.attributes[ATTR_TARGET_TEMP_LOW] is None assert thermostat.attributes[ATTR_TARGET_TEMP_HIGH] is None assert ATTR_PRESET_MODE not in thermostat.attributes assert ATTR_PRESET_MODES not in thermostat.attributes assert ATTR_FAN_MODE not in thermostat.attributes assert ATTR_FAN_MODES not in thermostat.attributes
0bcad5579b806284ae0c565bb27ca59ea061b4a1
14
test_climate_sdm.py
275
Set nest climate hvac_action to report idle when hvac mode is not off (#62811)
107,367
0
253
172
49
308,619
87
core
26
tests/components/nest/test_climate_sdm.py
Python
30
{ "docstring": "Test a thermostat missing many thermostat traits in api response.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 9 }
https://github.com/home-assistant/core.git
1
setacl
def setacl(self, mailbox, who, what): return self._simple_command('SETACL', mailbox, who, what)
8198943edd73a363c266633e1aa5b2a9e9c9f526
8
imaplib.py
38
add python 3.10.4 for windows
55,029
0
24
25
8
217,944
10
XX-Net
6
python3.10.4/Lib/imaplib.py
Python
2
{ "docstring": "Set a mailbox acl.\n\n (typ, [data]) = <instance>.setacl(mailbox, who, what)\n ", "language": "en", "n_whitespaces": 24, "n_words": 10, "vocab_size": 10 }
https://github.com/XX-net/XX-Net.git
2
_disable_autolayout
def _disable_autolayout(): # This is a workaround for an issue in matplotlib, for details see # https://github.com/mwaskom/seaborn/issues/2914 # The only affect of this rcParam is to set the default value for # layout= in plt.figure, so we could just do that instead. # But then we would need to own the complexity of the transition # from tight_layout=True -> layout="tight". This seems easier, # but can be removed when (if) that is simpler on the matplotlib side, # or if the layout algorithms are improved to handle figure legends. orig_val = mpl.rcParams["figure.autolayout"] try: mpl.rcParams["figure.autolayout"] = False yield finally: mpl.rcParams["figure.autolayout"] = orig_val
6460a21555ba6557e1f6f06f4d677d9c19148169
11
utils.py
73
Workaround for matplotlib rc_context issue (#2925) * Workaround for matplotlib rc_context issue Fixes #2914 * Add some additional comments about this workaround
7,478
0
158
34
74
42,078
101
seaborn
4
seaborn/utils.py
Python
7
{ "docstring": "Context manager for preventing rc-controlled auto-layout behavior.", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
https://github.com/mwaskom/seaborn.git
9
_get_frame_ranges
def _get_frame_ranges(self) -> Optional[List[Tuple[int, int]]]: if not self._args.frame_ranges: logger.debug("No frame range set") return None minframe, maxframe = None, None if self._images.is_video: minframe, maxframe = 1, self._images.count else: indices = [int(self._imageidxre.findall(os.path.basename(filename))[0]) for filename in self._images.file_list] if indices: minframe, maxframe = min(indices), max(indices) logger.debug("minframe: %s, maxframe: %s", minframe, maxframe) if minframe is None or maxframe is None: raise FaceswapError("Frame Ranges specified, but could not determine frame numbering " "from filenames") retval = [] for rng in self._args.frame_ranges: if "-" not in rng: raise FaceswapError("Frame Ranges not specified in the correct format") start, end = rng.split("-") retval.append((max(int(start), minframe), min(int(end), maxframe))) logger.debug("frame ranges: %s", retval) return retval
1022651eb8a7741014f5d2ec7cbfe882120dfa5f
18
convert.py
336
Bugfix: convert - Gif Writer - Fix non-launch error on Gif Writer - convert plugins - linting - convert/fs_media/preview/queue_manager - typing - Change convert items from dict to Dataclass
20,778
0
362
206
70
101,363
103
faceswap
32
scripts/convert.py
Python
34
{ "docstring": " Obtain the frame ranges that are to be converted.\n\n If frame ranges have been specified, then split the command line formatted arguments into\n ranges that can be used.\n\n Returns\n list or ``None``\n A list of frames to be processed, or ``None`` if the command line argument was not\n used\n ", "language": "en", "n_whitespaces": 108, "n_words": 49, "vocab_size": 35 }
https://github.com/deepfakes/faceswap.git
1
test_run_translation_no_trainer
def test_run_translation_no_trainer(self): tmp_dir = self.get_auto_remove_tmp_dir() testargs = f.split() run_command(self._launch_args + testargs) result = get_results(tmp_dir) self.assertGreaterEqual(result["eval_bleu"], 30) self.assertTrue(os.path.exists(os.path.join(tmp_dir, "epoch_0"))) self.assertTrue(os.path.exists(os.path.join(tmp_dir, "translation_no_trainer")))
99eb9b523f9b9ea6096323ce5610ce6633acc88a
12
test_accelerate_examples.py
159
Fix `no_trainer` CI (#18242) * Fix all tests
5,908
0
68
89
17
32,334
20
transformers
17
examples/pytorch/test_accelerate_examples.py
Python
25
{ "docstring": "\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n ", "language": "en", "n_whitespaces": 209, "n_words": 25, "vocab_size": 22 }
https://github.com/huggingface/transformers.git
3
convert_example
def convert_example(example, tokenizer, is_test=False, language='en'): if language == 'ch': q_name = "query" t_name = "title" label = "label" else: q_name = "sentence1" t_name = "sentence2" label = "labels" query, title = example[q_name], example[t_name] query_ids = np.array(tokenizer.encode(query), dtype="int64") query_seq_len = np.array(len(query_ids), dtype="int64") title_ids = np.array(tokenizer.encode(title), dtype="int64") title_seq_len = np.array(len(title_ids), dtype="int64") result = [query_ids, title_ids, query_seq_len, title_seq_len] if not is_test: label = np.array(example[label], dtype="int64") result.append(label) return result
93cae49c0c572b5c1ac972759140fbe924b0374d
12
utils.py
266
Add NLP model interpretation (#1752) * upload NLP interpretation * fix problems and relocate project * remove abandoned picture * remove abandoned picture * fix dead link in README * fix dead link in README * fix code style problems * fix CR round 1 * remove .gitkeep files * fix code style * fix file encoding problem * fix code style * delete duplicated files due to directory rebuild * fix CR round 2 * fix code style * fix ernie tokenizer * fix code style * fix problem from CR round 1 * fix bugs * fix README * remove duplicated files * deal with diff of old and new tokenizer results * fix CR round 4 * fix code style * add missing dependence * fix broken import path * move some data file to cloud * MRC upper case to lower case Co-authored-by: Zeyu Chen <[email protected]> Co-authored-by: binlinquge <xxx> Co-authored-by: Guo Sheng <[email protected]>
118,323
0
154
156
43
322,992
65
PaddleNLP
21
examples/model_interpretation/task/similarity/simnet/utils.py
Python
19
{ "docstring": "\n Builds model inputs from a sequence for sequence classification tasks. \n It use `jieba.cut` to tokenize text.\n\n Args:\n example(obj:`list[str]`): List of input data, containing text and label if it have label.\n tokenizer(obj: paddlenlp.data.JiebaTokenizer): It use jieba to cut the chinese string.\n is_test(obj:`False`, defaults to `False`): Whether the example contains label or not.\n\n Returns:\n query_ids(obj:`list[int]`): The list of query ids.\n title_ids(obj:`list[int]`): The list of title ids.\n query_seq_len(obj:`int`): The input sequence query length.\n title_seq_len(obj:`int`): The input sequence title length.\n label(obj:`numpy.array`, data type of int64, optional): The input label if not is_test.\n ", "language": "en", "n_whitespaces": 161, "n_words": 88, "vocab_size": 62 }
https://github.com/PaddlePaddle/PaddleNLP.git
9
_load_included_file
def _load_included_file(self, included_file, iterator, is_handler=False): display.debug("loading included file: %s" % included_file._filename) try: data = self._loader.load_from_file(included_file._filename) if data is None: return [] elif not isinstance(data, list): raise AnsibleError("included task files must contain a list of tasks") ti_copy = self._copy_included_file(included_file) block_list = load_list_of_blocks( data, play=iterator._play, parent_block=ti_copy.build_parent_block(), role=included_file._task._role, use_handlers=is_handler, loader=self._loader, variable_manager=self._variable_manager, ) # since we skip incrementing the stats when the task result is # first processed, we do so now for each host in the list for host in included_file._hosts: self._tqm._stats.increment('ok', host.name) except AnsibleParserError: raise except AnsibleError as e: if isinstance(e, AnsibleFileNotFound): reason = "Could not find or access '%s' on the Ansible Controller." % to_text(e.file_name) else: reason = to_text(e) for r in included_file._results: r._result['failed'] = True for host in included_file._hosts: tr = TaskResult(host=host, task=included_file._task, return_data=dict(failed=True, reason=reason)) self._tqm._stats.increment('failures', host.name) self._tqm.send_callback('v2_runner_on_failed', tr) raise AnsibleError(reason) from e # finally, send the callback and return the list of blocks loaded self._tqm.send_callback('v2_playbook_on_include', included_file) display.debug("done processing included file") return block_list
42d8a9daa89907545ebd208f4fd0a9192738c6a6
17
__init__.py
427
Prevent double failing hosts for includes in loops (#76928) Fixes #23161
79,472
0
621
267
112
268,291
153
ansible
51
lib/ansible/plugins/strategy/__init__.py
Python
37
{ "docstring": "\n Loads an included YAML file of tasks, applying the optional set of variables.\n\n Raises AnsibleError exception in case of a failure during including a file,\n in such case the caller is responsible for marking the host(s) as failed\n using PlayIterator.mark_host_failed().\n ", "language": "en", "n_whitespaces": 76, "n_words": 40, "vocab_size": 33 }
https://github.com/ansible/ansible.git
1
test_named_group_field_choices_change_list
def test_named_group_field_choices_change_list(self): link1 = reverse("admin:admin_views_fabric_change", args=(self.fab1.pk,)) link2 = reverse("admin:admin_views_fabric_change", args=(self.fab2.pk,)) response = self.client.get(reverse("admin:admin_views_fabric_changelist")) fail_msg = ( "Changelist table isn't showing the right human-readable values " "set by a model field 'choices' option named group." ) self.assertContains( response, '<a href="%s">Horizontal</a>' % link1, msg_prefix=fail_msg, html=True, ) self.assertContains( response, '<a href="%s">Vertical</a>' % link2, msg_prefix=fail_msg, html=True, )
9c19aff7c7561e3a82978a272ecdaad40dda5c00
12
tests.py
160
Refs #33476 -- Reformatted code with Black.
52,047
0
233
97
41
207,677
53
django
16
tests/admin_views/tests.py
Python
20
{ "docstring": "\n Ensures the admin changelist shows correct values in the relevant column\n for rows corresponding to instances of a model in which a named group\n has been used in the choices option of a field.\n ", "language": "en", "n_whitespaces": 63, "n_words": 34, "vocab_size": 27 }
https://github.com/django/django.git
1
test_nonconflicting_mixed_basenames
def test_nonconflicting_mixed_basenames(self): self.router.register(r'notes', NoteViewSet) self.router.register(r'notes_kwduplicate', KWargedNoteViewSet, basename='routertestmodel_kwduplicate') self.router.register(r'notes_duplicate', NoteViewSet, basename='routertestmodel_duplicate')
48a21aa0eb3a95d32456c2a927eff9552a04231e
9
test_routers.py
78
raise ImproperlyConfigured exception if `basename` is not unique (#8438) * raise ImproperlyConfigured if basename already exists * rename already_registered function; return True/False * additional basename tests * additional basename tests * Update rest_framework/routers.py Co-authored-by: David Graves <[email protected]> Co-authored-by: Asif Saif Uddin <[email protected]>
9,586
0
38
47
10
48,735
10
django-rest-framework
7
tests/test_routers.py
Python
4
{ "docstring": "\n Ensure 2 routers with the same model, and a distinct basename\n specified on the second router does not fail\n ", "language": "en", "n_whitespaces": 41, "n_words": 19, "vocab_size": 18 }
https://github.com/encode/django-rest-framework.git
3
find_most_preferred_tag
def find_most_preferred_tag(self, tags, tag_to_priority): # type: (List[Tag], Dict[Tag, int]) -> int return min( tag_to_priority[tag] for tag in self.file_tags if tag in tag_to_priority )
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
9
wheel.py
44
upd; format
12,306
0
62
28
21
60,859
23
transferlearning
7
.venv/lib/python3.8/site-packages/pip/_internal/models/wheel.py
Python
4
{ "docstring": "Return the priority of the most preferred tag that one of the wheel's file\n tag combinations acheives in the given list of supported tags using the given\n tag_to_priority mapping, where lower priorities are more-preferred.\n\n This is used in place of support_index_min in some cases in order to avoid\n an expensive linear scan of a large list of tags.\n\n :param tags: the PEP 425 tags to check the wheel against.\n :param tag_to_priority: a mapping from tag to priority of that tag, where\n lower is more preferred.\n\n :raises ValueError: If none of the wheel's file tags match one of\n the supported tags.\n ", "language": "en", "n_whitespaces": 178, "n_words": 100, "vocab_size": 61 }
https://github.com/jindongwang/transferlearning.git
7
_finalize_config_layers
def _finalize_config_layers(layers): for layer in layers: # It is assumed that layers define their unconditional losses after being # recreated from the config and built. The exceptions to this # are Functional and Sequential models, which only store conditional losses # (losses dependent on the inputs) in the config. Unconditional losses like # weight regularization must be revived from the SavedModel. if _is_graph_network(layer): _restore_layer_unconditional_losses(layer) # Some layers, like Dense, record their activation loss function in the # config. However, not all layers do this, so the activation loss may be # missing when restored from the config/hdf5. # TODO(kathywu): Investigate ways to improve the config to ensure consistent # loading behavior between HDF5 and SavedModel. _restore_layer_activation_loss(layer) # Restore metrics list. _restore_layer_metrics(layer) # Restore RNN layer states. if (isinstance(layer, base_rnn.RNN) and layer.stateful and hasattr(_get_keras_attr(layer), 'states')): layer.states = getattr(_get_keras_attr(layer), 'states', None) for variable in tf.nest.flatten(layer.states): backend.track_variable(variable) # Perform any layer defined finalization of the layer state. layer.finalize_state()
01c906c4178db5ae03b7eb2d298a052c952a0667
14
load.py
169
Reorganize RNN layers, cells and wrappers into smaller logically organized files hosted under an `rnn` directory. PiperOrigin-RevId: 428841673
79,804
0
247
95
107
268,984
155
keras
22
keras/saving/saved_model/load.py
Python
13
{ "docstring": "Runs the final steps of loading Keras Layers from config.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/keras-team/keras.git
7
local_node_connectivity
def local_node_connectivity(G, source, target, cutoff=None): if target == source: raise nx.NetworkXError("source and target have to be different nodes.") # Maximum possible node independent paths if G.is_directed(): possible = min(G.out_degree(source), G.in_degree(target)) else: possible = min(G.degree(source), G.degree(target)) K = 0 if not possible: return K if cutoff is None: cutoff = float("inf") exclude = set() for i in range(min(possible, cutoff)): try: path = _bidirectional_shortest_path(G, source, target, exclude) exclude.update(set(path)) K += 1 except nx.NetworkXNoPath: break return K
cc1db275efc709cb964ce88abbfa877798d58c10
13
connectivity.py
232
Minor improvements from general code readthrough (#5414) * Add deprecated directive to reversed docstring. * Add missing dep directives to shpfiles. * Remove defn of INF sentinel. * typo. * str -> comment in forloop. * STY: appropriate casing for var name.
41,908
0
200
143
56
176,447
74
networkx
23
networkx/algorithms/approximation/connectivity.py
Python
21
{ "docstring": "Compute node connectivity between source and target.\n\n Pairwise or local node connectivity between two distinct and nonadjacent\n nodes is the minimum number of nodes that must be removed (minimum\n separating cutset) to disconnect them. By Menger's theorem, this is equal\n to the number of node independent paths (paths that share no nodes other\n than source and target). Which is what we compute in this function.\n\n This algorithm is a fast approximation that gives an strict lower\n bound on the actual number of node independent paths between two nodes [1]_.\n It works for both directed and undirected graphs.\n\n Parameters\n ----------\n\n G : NetworkX graph\n\n source : node\n Starting node for node connectivity\n\n target : node\n Ending node for node connectivity\n\n cutoff : integer\n Maximum node connectivity to consider. If None, the minimum degree\n of source or target is used as a cutoff. Default value None.\n\n Returns\n -------\n k: integer\n pairwise node connectivity\n\n Examples\n --------\n >>> # Platonic octahedral graph has node connectivity 4\n >>> # for each non adjacent node pair\n >>> from networkx.algorithms import approximation as approx\n >>> G = nx.octahedral_graph()\n >>> approx.local_node_connectivity(G, 0, 5)\n 4\n\n Notes\n -----\n This algorithm [1]_ finds node independents paths between two nodes by\n computing their shortest path using BFS, marking the nodes of the path\n found as 'used' and then searching other shortest paths excluding the\n nodes marked as used until no more paths exist. It is not exact because\n a shortest path could use nodes that, if the path were longer, may belong\n to two different node independent paths. Thus it only guarantees an\n strict lower bound on node connectivity.\n\n Note that the authors propose a further refinement, losing accuracy and\n gaining speed, which is not implemented yet.\n\n See also\n --------\n all_pairs_node_connectivity\n node_connectivity\n\n References\n ----------\n .. [1] White, Douglas R., and Mark Newman. 2001 A Fast Algorithm for\n Node-Independent Paths. Santa Fe Institute Working Paper #01-07-035\n http://eclectic.ss.uci.edu/~drwhite/working.pdf\n\n ", "language": "en", "n_whitespaces": 494, "n_words": 314, "vocab_size": 192 }
https://github.com/networkx/networkx.git
1
easy_print
def easy_print(*args, size=(None, None), end=None, sep=None, location=(None, None), relative_location=(None, None), font=None, no_titlebar=False, no_button=False, grab_anywhere=False, keep_on_top=None, do_not_reroute_stdout=True, echo_stdout=False, text_color=None, background_color=None, colors=None, c=None, erase_all=False, resizable=True, blocking=None): if _DebugWin.debug_window is None: _DebugWin.debug_window = _DebugWin(size=size, location=location, relative_location=relative_location, font=font, no_titlebar=no_titlebar, no_button=no_button, grab_anywhere=grab_anywhere, keep_on_top=keep_on_top, do_not_reroute_stdout=do_not_reroute_stdout, echo_stdout=echo_stdout, resizable=resizable, blocking=blocking) txt_color, bg_color = _parse_colors_parm(c or colors) _DebugWin.debug_window.Print(*args, end=end, sep=sep, text_color=text_color or txt_color, background_color=background_color or bg_color, erase_all=erase_all, font=font, blocking=blocking)
935e430420f5ac18df67233040ba86359d98a579
11
PySimpleGUI.py
279
Addition of blocking parameter to debug printing. IF True, then execution of your code is stopped until the "Quit" button / "X" is clicked on the Debug Window.
53,492
0
223
94
51
212,886
60
PySimpleGUI
27
PySimpleGUI.py
Python
3
{ "docstring": "\n Works like a \"print\" statement but with windowing options. Routes output to the \"Debug Window\"\n\n In addition to the normal text and background colors, you can use a \"colors\" tuple/string\n The \"colors\" or \"c\" parameter defines both the text and background in a single parm.\n It can be a tuple or a single single. Both text and background colors need to be specified\n colors -(str, str) or str. A combined text/background color definition in a single parameter\n c - (str, str) - Colors tuple has format (foreground, backgrouned)\n c - str - can also be a string of the format \"foreground on background\" (\"white on red\")\n\n :param *args: stuff to output\n :type *args: (Any)\n :param size: (w,h) w=characters-wide, h=rows-high\n :type size: (int, int)\n :param end: end character\n :type end: (str)\n :param sep: separator character\n :type sep: (str)\n :param location: Location of upper left corner of the window\n :type location: (int, int)\n :param relative_location: (x,y) location relative to the default location of the window, in pixels. Normally the window centers. This location is relative to the location the window would be created. Note they can be negative.\n :type relative_location: (int, int)\n :param font: specifies the font family, size, etc. Tuple or Single string format 'name size styles'. Styles: italic * roman bold normal underline overstrike\n :type font: (str or (str, int[, str]) or None)\n :param no_titlebar: If True no titlebar will be shown\n :type no_titlebar: (bool)\n :param no_button: don't show button\n :type no_button: (bool)\n :param grab_anywhere: If True: can grab anywhere to move the window (Default = False)\n :type grab_anywhere: (bool)\n :param background_color: color of background\n :type background_color: (str)\n :param text_color: color of the text\n :type text_color: (str)\n :param keep_on_top: If True the window will remain above all current windows\n :type keep_on_top: (bool)\n :param location: Location of upper left corner of the window\n :type location: (int, int)\n :param do_not_reroute_stdout: do not reroute stdout and stderr. If False, both stdout and stderr will reroute to here\n :type do_not_reroute_stdout: (bool)\n :param echo_stdout: If True stdout is sent to both the console and the debug window\n :type echo_stdout: (bool)\n :param colors: Either a tuple or a string that has both the text and background colors\n :type colors: (str) or (str, str)\n :param c: Either a tuple or a string that has both the text and background colors\n :type c: (str) or (str, str)\n :param resizable: if True, the user can resize the debug window. Default is True\n :type resizable: (bool)\n :param erase_all: If True when erase the output before printing\n :type erase_all: (bool)\n :param blocking: if True, makes the window block instead of returning immediately. The \"Quit\" button changers to \"More\"\n :type blocking: (bool | None)\n :return:\n :rtype:\n ", "language": "en", "n_whitespaces": 1135, "n_words": 444, "vocab_size": 200 }
https://github.com/PySimpleGUI/PySimpleGUI.git
2
load_version_info_from_text_file
def load_version_info_from_text_file(filename): # Read and parse the version file. It may have a byte order marker or encoding cookie - respect it if it does. import PyInstaller.utils.misc as miscutils with open(filename, 'rb') as fp: text = miscutils.decode(fp.read()) # Deserialize via eval() try: info = eval(text) except Exception as e: raise ValueError("Failed to deserialize VSVersionInfo from text-based representation!") from e # Sanity check assert isinstance(info, VSVersionInfo), \ f"Loaded incompatible structure type! Expected VSVersionInfo, got: {type(info)!r}" return info
f57e15ae14d2370cba7a14cfae97d2c29b5c8154
12
versioninfo.py
129
building: EXE: load version info structure before comparing guts Load the version information structure in `EXE` constructor, so that the comparison against cached state is done with the structure instead of the filen name. This way, changing the contents of the version information file triggers rebuild of the EXE. Split and clean-up related functions in the `utils.win32.versioninfo` module as well as in `pyi-grab_version` and `pyi-set_version` utility scripts.
77,609
0
134
69
68
264,108
76
pyinstaller
19
PyInstaller/utils/win32/versioninfo.py
Python
11
{ "docstring": "\n Load the `VSVersionInfo` structure from its string-based (`VSVersionInfo.__str__`) serialization by reading the\n text from the file and running it through `eval()`.\n ", "language": "en", "n_whitespaces": 31, "n_words": 21, "vocab_size": 18 }
https://github.com/pyinstaller/pyinstaller.git
3
intercept
def intercept(self): if self.intercepted: return self.intercepted = True if self._resume_event is not None: self._resume_event.clear()
ede269fce40ec4000a4717d5f5aec7835d9931c2
10
flow.py
55
Flow.intercept: use an Event instead of the reply system This is patch 3/4 of the reply-ectomy.
73,511
0
64
32
13
250,580
14
mitmproxy
5
mitmproxy/flow.py
Python
6
{ "docstring": "\n Intercept this Flow. Processing will stop until resume is\n called.\n ", "language": "en", "n_whitespaces": 32, "n_words": 10, "vocab_size": 10 }
https://github.com/mitmproxy/mitmproxy.git
7
_autodetect_num_gpus
def _autodetect_num_gpus(): result = 0 if sys.platform.startswith("linux"): if importlib.util.find_spec("GPUtil"): gpu_list = GPUtil.getGPUs() result = len(gpu_list) else: proc_gpus_path = "/proc/driver/nvidia/gpus" if os.path.isdir(proc_gpus_path): result = len(os.listdir(proc_gpus_path)) elif sys.platform == "win32": props = "AdapterCompatibility" cmdargs = ["WMIC", "PATH", "Win32_VideoController", "GET", props] lines = subprocess.check_output(cmdargs).splitlines()[1:] result = len([x.rstrip() for x in lines if x.startswith(b"NVIDIA")]) return result
6f68c74a5dbfbb936cb675781acb2a36eae10984
17
resource_spec.py
231
Use GPUtil for gpu detection when available (#18938) In Envs with K8S and enabled SELinux there is a bug: "/proc/nvidia/" is not allowed to mount in container So, i made a rework for GPU detection based on GPutil package. ## Checks - [x] I've run `scripts/format.sh` to lint the changes in this PR. - [x] I've made sure the tests are passing. Note that there might be a few flaky tests, see the recent failures at https://flakey-tests.ray.io/ - Testing Strategy - [x] Release tests Co-authored-by: Mopga <[email protected]> Co-authored-by: Julius <[email protected]>
33,474
0
168
132
36
145,520
52
ray
25
python/ray/_private/resource_spec.py
Python
16
{ "docstring": "Attempt to detect the number of GPUs on this machine.\n\n TODO(rkn): This currently assumes NVIDIA GPUs on Linux.\n TODO(mehrdadn): Use a better mechanism for Windows.\n\n Returns:\n The number of GPUs if any were detected, otherwise 0.\n ", "language": "en", "n_whitespaces": 55, "n_words": 36, "vocab_size": 31 }
https://github.com/ray-project/ray.git
3
execute
def execute(): work_order = frappe.qb.DocType("Work Order") query = ( frappe.qb.from_(work_order) .select( work_order.name, work_order.produced_qty, work_order.material_request, work_order.material_request_item, work_order.sales_order ).where( (work_order.material_request.isnotnull()) & (work_order.material_request_item.isnotnull()) & (work_order.sales_order.isnotnull()) & (work_order.docstatus == 1) & (work_order.produced_qty > 0) ) ) results = query.run(as_dict=True) for row in results: so_item = frappe.get_value( "Material Request Item", row.material_request_item, "sales_order_item" ) frappe.db.set_value("Work Order", row.name, "sales_order_item", so_item) if so_item: wo = frappe.get_doc("Work Order", row.name) wo.update_work_order_qty_in_so()
0ca58d762715fd10c751c4497f3037908f4dfb20
17
set_work_order_qty_in_so_from_mr.py
264
chore: Patch to update SO work_order_qty and Linter fix
13,623
0
267
164
51
64,391
61
erpnext
27
erpnext/patches/v14_0/set_work_order_qty_in_so_from_mr.py
Python
26
{ "docstring": "\n 1. Get submitted Work Orders with MR, MR Item and SO set\n 2. Get SO Item detail from MR Item detail in WO, and set in WO\n 3. Update work_order_qty in SO\n ", "language": "en", "n_whitespaces": 45, "n_words": 32, "vocab_size": 21 }
https://github.com/frappe/erpnext.git
11
_get_packed_offsets
def _get_packed_offsets(widths, total, sep, mode="fixed"): r _api.check_in_list(["fixed", "expand", "equal"], mode=mode) if mode == "fixed": offsets_ = np.cumsum([0] + [w + sep for w in widths]) offsets = offsets_[:-1] if total is None: total = offsets_[-1] - sep return total, offsets elif mode == "expand": # This is a bit of a hack to avoid a TypeError when *total* # is None and used in conjugation with tight layout. if total is None: total = 1 if len(widths) > 1: sep = (total - sum(widths)) / (len(widths) - 1) else: sep = 0 offsets_ = np.cumsum([0] + [w + sep for w in widths]) offsets = offsets_[:-1] return total, offsets elif mode == "equal": maxh = max(widths) if total is None: if sep is None: raise ValueError("total and sep cannot both be None when " "using layout mode 'equal'") total = (maxh + sep) * len(widths) else: sep = total / len(widths) - maxh offsets = (maxh + sep) * np.arange(len(widths)) return total, offsets
8ef4e017f8a95db8704728a5fffd2c0384afc525
16
offsetbox.py
381
Don't pass unused xdescent to _get_packed_offsets. Instead of passing a list of (widths, xdescents) where xdescent is unused, just pass a list of widths. This helper is private so we just need to adjust the call sites and tests with no deprecation. This patch is preliminary work for some further cleanup on the offsetbox module.
23,960
0
420
231
79
110,170
163
matplotlib
18
lib/matplotlib/offsetbox.py
Python
74
{ "docstring": "\n Pack boxes specified by their *widths*.\n\n For simplicity of the description, the terminology used here assumes a\n horizontal layout, but the function works equally for a vertical layout.\n\n There are three packing *mode*\\s:\n\n - 'fixed': The elements are packed tight to the left with a spacing of\n *sep* in between. If *total* is *None* the returned total will be the\n right edge of the last box. A non-*None* total will be passed unchecked\n to the output. In particular this means that right edge of the last\n box may be further to the right than the returned total.\n\n - 'expand': Distribute the boxes with equal spacing so that the left edge\n of the first box is at 0, and the right edge of the last box is at\n *total*. The parameter *sep* is ignored in this mode. A total of *None*\n is accepted and considered equal to 1. The total is returned unchanged\n (except for the conversion *None* to 1). If the total is smaller than\n the sum of the widths, the laid out boxes will overlap.\n\n - 'equal': If *total* is given, the total space is divided in N equal\n ranges and each box is left-aligned within its subspace.\n Otherwise (*total* is *None*), *sep* must be provided and each box is\n left-aligned in its subspace of width ``(max(widths) + sep)``. The\n total width is then calculated to be ``N * (max(widths) + sep)``.\n\n Parameters\n ----------\n widths : list of float\n Widths of boxes to be packed.\n total : float or None\n Intended total length. *None* if not used.\n sep : float\n Spacing between boxes.\n mode : {'fixed', 'expand', 'equal'}\n The packing mode.\n\n Returns\n -------\n total : float\n The total width needed to accommodate the laid out boxes.\n offsets : array of float\n The left offsets of the boxes.\n ", "language": "en", "n_whitespaces": 460, "n_words": 298, "vocab_size": 150 }
https://github.com/matplotlib/matplotlib.git
7
del_tracking
def del_tracking(self): # Now that the node object has been fully loaded, and the checkpoint has # been restored, the object no longer needs to track objects added from # SerializedAttributes. (Note that saving a training checkpoint still # functions correctly, because layers and variables are tracked # separately by the Layer object.) # TODO(kathywu): Instead of outright deleting these nodes (which would # make restoring from a different checkpoint tricky), mark them as extra # dependencies that are OK to overwrite. for node in self.loaded_nodes.values(): node = node[0] if not isinstance(node, base_layer.Layer): # Loaded nodes can contain other trackable objects created when # loading layers from the config, such as variables. continue for name in PUBLIC_ATTRIBUTES: node._delete_tracking(name) # pylint: disable=protected-access if isinstance(node, functional_lib.Functional): # Delete the temporary layer dependencies, which were used to # restore the checkpointed values. When the model is live, the # user can delete or add layers to the model at any time, so # these layer dependencies may be obsolete. dependencies = list( node._self_unconditional_dependency_names ) # pylint: disable=protected-access for name in dependencies: if ( re.match(r"^layer(_with_weights)?-[\d+]", name) is not None ): node._delete_tracking( name ) # pylint: disable=protected-access
b0ffc0031e9c1964e7398ca47c6666bbfc0d5086
16
load.py
161
resolve line-too-long in saving
82,463
0
650
91
122
278,305
192
keras
18
keras/saving/saved_model/load.py
Python
19
{ "docstring": "Removes tracked references that are only used when loading the\n model.", "language": "en", "n_whitespaces": 17, "n_words": 11, "vocab_size": 11 }
https://github.com/keras-team/keras.git
3
all_data
def all_data(request, data, data_missing): if request.param == "data": return data elif request.param == "data_missing": return data_missing
89be1f053b695c4ce1c0569f737caf3f03c12128
8
conftest.py
49
DOC: Added docstrings to fixtures defined in array module (#47211)
39,882
0
39
28
13
166,950
16
pandas
5
pandas/tests/arrays/floating/conftest.py
Python
5
{ "docstring": "Parametrized fixture returning 'data' or 'data_missing' float arrays.\n\n Used to test dtype conversion with and without missing values.\n ", "language": "en", "n_whitespaces": 24, "n_words": 18, "vocab_size": 18 }
https://github.com/pandas-dev/pandas.git
1
iteritems
def iteritems(self) -> Iterable[tuple[Hashable, Any]]: warnings.warn( "iteritems is deprecated and will be removed in a future version. " "Use .items instead.", FutureWarning, stacklevel=find_stack_level(inspect.currentframe()), ) return self.items() # ---------------------------------------------------------------------- # Misc public methods
f77dbfb5af93faf9425c5d717a93ea7f6f26b3fd
12
series.py
74
DOC: Add deprecation marks to deprecated functions (#48183) * DOC: Add deprecation marks to deprecated functions * Address docs * docstrings
40,335
0
110
43
31
168,829
32
pandas
14
pandas/core/series.py
Python
30
{ "docstring": "\n Lazily iterate over (index, value) tuples.\n\n .. deprecated:: 1.5.0\n iteritems is deprecated and will be removed in a future version.\n Use .items instead.\n\n This method returns an iterable tuple (index, value). This is\n convenient if you want to create a lazy iterator.\n\n Returns\n -------\n iterable\n Iterable of tuples containing the (index, value) pairs from a\n Series.\n\n See Also\n --------\n Series.items : Recommended alternative.\n DataFrame.items : Iterate over (column name, Series) pairs.\n DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs.\n ", "language": "en", "n_whitespaces": 217, "n_words": 81, "vocab_size": 65 }
https://github.com/pandas-dev/pandas.git
4
checkAndReturnConsistentLogs
def checkAndReturnConsistentLogs(self, results, sleep_per_iter=None): class_results = copy.deepcopy(results) function_results = copy.deepcopy(results) class_output = [] function_output = [] scheduler_notif = []
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
8
test_api.py
67
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
29,747
0
61
306
12
132,382
19
ray
11
python/ray/tune/tests/test_api.py
Python
63
{ "docstring": "Checks logging is the same between APIs.\n\n Ignore \"DONE\" for logging but checks that the\n scheduler is notified properly with the last result.\n ", "language": "en", "n_whitespaces": 44, "n_words": 23, "vocab_size": 19 }
https://github.com/ray-project/ray.git
1
test_tags_help_text_spaces_allowed
def test_tags_help_text_spaces_allowed(self): widget = widgets.AdminTagWidget() help_text = widget.get_context(None, None, {})["widget"]["help_text"] html = widget.render("tags", None, {}) help_text_html_element = self.get_help_text_html_element(html) self.assertEqual( help_text, 'Multi-word tags with spaces will automatically be enclosed in double quotes (").', ) self.assertHTMLEqual( help_text_html_element, % help_text, )
1822d7eee23cf5fceff8b1f58f3ca2f0a32c6e34
11
test_widgets.py
123
display help text message for tag field - resolves #1874 - ensure message is dynamic based on the setting TAG_SPACES_ALLOWED - Update wagtail/admin/templates/wagtailadmin/widgets/tag_widget.html
16,620
0
146
73
32
77,078
38
wagtail
13
wagtail/admin/tests/test_widgets.py
Python
13
{ "docstring": "Checks that the tags help text html element content is correct when TAG_SPACES_ALLOWED is True<p class=\"help\">%s</p>", "language": "en", "n_whitespaces": 15, "n_words": 16, "vocab_size": 15 }
https://github.com/wagtail/wagtail.git
3
clean_pipeline_string
def clean_pipeline_string(self, individual): dirty_string = str(individual) # There are many parameter prefixes in the pipeline strings, used solely for # making the terminal name unique, eg. LinearSVC__. parameter_prefixes = [ (m.start(), m.end()) for m in re.finditer(", [\w]+__", dirty_string) ] # We handle them in reverse so we do not mess up indices pretty = dirty_string for (start, end) in reversed(parameter_prefixes): pretty = pretty[: start + 2] + pretty[end:] return pretty
388616b6247ca4ea8de4e2f340d6206aee523541
12
base.py
120
Revert "Deployed 7ccda9a with MkDocs version: 1.3.0" This reverts commit bd9629c40e01241766197119b581a99409b07068.
43,614
0
162
74
55
181,840
70
tpot
13
tpot/base.py
Python
9
{ "docstring": "Provide a string of the individual without the parameter prefixes.\n\n Parameters\n ----------\n individual: individual\n Individual which should be represented by a pretty string\n\n Returns\n -------\n A string like str(individual), but with parameter prefixes removed.\n\n ", "language": "en", "n_whitespaces": 94, "n_words": 34, "vocab_size": 28 }
https://github.com/EpistasisLab/tpot.git
2
put_timestamp
def put_timestamp(self, feed_id, timestamp): self._fetch_data() with self._lock, open(self._data_file, "wb") as myfile: self._data.update({feed_id: timestamp}) _LOGGER.debug( "Overwriting feed %s timestamp in storage file %s", feed_id, self._data_file, ) try: pickle.dump(self._data, myfile) except Exception: # pylint: disable=broad-except _LOGGER.error("Error saving pickled data to %s", self._data_file) self._cache_outdated = True
62a5854e40cb554fecb1eec897d7bcb4c94628fe
13
__init__.py
144
Fix bare except (#72906)
100,706
0
202
86
41
301,868
43
core
18
homeassistant/components/feedreader/__init__.py
Python
14
{ "docstring": "Update timestamp for given feed id (usually the url).", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/home-assistant/core.git
1
test_deploy_with_transient_constructor_failure
def test_deploy_with_transient_constructor_failure(mock_deployment_state): deployment_state, timer, goal_manager = mock_deployment_state b_info_1, b_version_1 = deployment_info(num_replicas=2) create_goal, updating = deployment_state.deploy(b_info_1) goal_obj = goal_manager.get_goal(create_goal) # Burn 4 retries from both replicas. deleted = _constructor_failure_loop_two_replica(deployment_state, 2) assert not deleted # Let both replicas succeed in last try. deployment_state.update() check_counts(deployment_state, total=2, by_state=[(ReplicaState.STARTING, 2)]) assert deployment_state._replica_constructor_retry_counter == 4 replica_1 = deployment_state._replicas.get()[0] replica_2 = deployment_state._replicas.get()[1] replica_1._actor.set_ready() replica_2._actor.set_ready() deployment_state.update() check_counts(deployment_state, total=2, by_state=[(ReplicaState.RUNNING, 2)]) assert deployment_state._replica_constructor_retry_counter == 4 assert goal_manager.check_complete(create_goal) assert goal_obj.exception is None @pytest.fixture
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
@pytest.fixture
11
test_deployment_state.py
268
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
29,444
1
135
165
52
130,998
73
ray
34
python/ray/serve/tests/test_deployment_state.py
Python
19
{ "docstring": "\n Test deploy() multiple replicas with transient constructor failure.\n Ensures:\n 1) Async goal manager can correctly recognize deployment goal as\n successful\n 2) There should be expected # of RUNNING replicas eventually that\n matches user intent\n 3) Replica counter set as -1 to stop tracking current goal as it's\n already completed\n\n Same testing for same test case in test_deploy.py.\n ", "language": "en", "n_whitespaces": 124, "n_words": 57, "vocab_size": 52 }
https://github.com/ray-project/ray.git