complexity
int64
1
56
n_identifiers
int64
1
114
code
stringlengths
19
12.7k
path
stringlengths
8
134
n_ast_nodes
int64
12
2.35k
ast_errors
stringlengths
0
4.01k
repo
stringlengths
3
28
documentation
dict
n_words
int64
2
866
language
stringclasses
1 value
vocab_size
int64
2
323
commit_id
stringlengths
40
40
file_name
stringlengths
5
79
id
int64
243
338k
nloc
int64
1
228
token_counts
int64
5
1.4k
fun_name
stringlengths
1
77
url
stringlengths
31
60
commit_message
stringlengths
3
15.3k
n_whitespaces
int64
1
3.23k
n_ast_errors
int64
0
20
d_id
int64
74
121k
ast_levels
int64
4
29
2
11
def faces_count(self) -> int: retval = sum(len(val["faces"]) for val in self._data.values()) logger.trace(retval) # type:ignore return retval
lib/align/alignments.py
66
faceswap
{ "docstring": " int: The total number of faces that appear in the alignments :attr:`data`. ", "language": "en", "n_whitespaces": 13, "n_words": 12, "vocab_size": 12 }
16
Python
15
e5356a417e7c2124e75c4a2994ed604fc0a3cc74
alignments.py
101,699
5
38
faces_count
https://github.com/deepfakes/faceswap.git
Alignments update: - Store face embeddings in PNG header when sorting - typing + refactor - Update alignments keys for 'identity' and 'video_meta' + bump to v2.3 - General typing fixes
45
0
21,103
12
1
15
async def test_loading_race_condition(hass): store = auth_store.AuthStore(hass) with patch( "homeassistant.helpers.entity_registry.async_get" ) as mock_ent_registry, patch( "homeassistant.helpers.device_registry.async_get" ) as mock_dev_registry, patch( "homeassistant.helpers.storage.Store.async_load", return_value=None ) as mock_load: results = await asyncio.gather(store.async_get_users(), store.async_get_users()) mock_ent_registry.assert_called_once_with(hass) mock_dev_registry.assert_called_once_with(hass) mock_load.assert_called_once_with() assert results[0] == results[1]
tests/auth/test_auth_store.py
152
core
{ "docstring": "Test only one storage load called when concurrent loading occurred .", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
35
Python
28
69cc6ab5f1d58adc586c3b300a4f7f0cde2cd0c2
test_auth_store.py
300,810
14
86
test_loading_race_condition
https://github.com/home-assistant/core.git
Clean up accessing entity_registry.async_get_registry helper via hass (#72005)
109
0
99,666
13
2
15
def test_get_resource_deprecation_warning(self): hook = AwsBaseHook(aws_conn_id='aws_default', resource_type='dynamodb') warning_message = with pytest.warns(DeprecationWarning) as warnings: hook.get_resource_type('dynamodb') assert warning_message in [str(w.message) for w in warnings]
tests/providers/amazon/aws/hooks/test_base_aws.py
91
airflow
{ "docstring": "resource_type is deprecated. Set resource_type from class attribute.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 7 }
21
Python
18
774ca085d2d2d58d4292b43c3511a145cc07154b
test_base_aws.py
46,201
6
51
test_get_resource_deprecation_warning
https://github.com/apache/airflow.git
Enable JSON serialization for connections (#19857) Previously in general we could only store connections in the Airflow URI format. With this change we can serialize as JSON. The Airflow URI format can be very tricky to work with and although we have for some time had a convenience method Connection.get_uri, using JSON is just simpler.
64
0
8,811
12
2
17
def pip_install(self, reqs): if not reqs: return log.info('Calling pip to install %s', reqs) cmd = [ sys.executable, '-m', 'pip', 'install', '--ignore-installed', '--prefix', self.path] + list(reqs) check_call( cmd, stdout=LoggerWrapper(log, logging.INFO), stderr=LoggerWrapper(log, logging.ERROR), )
.venv/lib/python3.8/site-packages/pip/_vendor/pep517/envbuild.py
120
transferlearning
{ "docstring": "Install dependencies into this env by calling pip in a subprocess", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
32
Python
32
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
envbuild.py
62,982
12
74
pip_install
https://github.com/jindongwang/transferlearning.git
upd; format
140
0
13,085
11
3
8
def _get_classifier_artifacts(fitted_estimator, prefix, X, y_true, sample_weight): import sklearn if not _is_plotting_supported(): return []
mlflow/sklearn/utils.py
41
mlflow
{ "docstring": "\n Draw and record various common artifacts for classifier\n\n For all classifiers, we always log:\n (1) confusion matrix:\n https://scikit-learn.org/stable/modules/generated/sklearn.metrics.plot_confusion_matrix.html\n\n For only binary classifiers, we will log:\n (2) precision recall curve:\n https://scikit-learn.org/stable/modules/generated/sklearn.metrics.plot_precision_recall_curve.html\n (3) roc curve:\n https://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html\n\n Steps:\n 1. Extract X and y_true from fit_args and fit_kwargs, and split into train & test datasets.\n 2. If the sample_weight argument exists in fit_func (accuracy_score by default\n has sample_weight), extract it from fit_args or fit_kwargs as\n (y_true, y_pred, sample_weight, multioutput), otherwise as (y_true, y_pred, multioutput)\n 3. return a list of artifacts path to be logged\n\n :param fitted_estimator: The already fitted regressor\n :param fit_args: Positional arguments given to fit_func.\n :param fit_kwargs: Keyword arguments given to fit_func.\n :return: List of artifacts to be logged\n ", "language": "en", "n_whitespaces": 178, "n_words": 117, "vocab_size": 91 }
13
Python
13
847eb6b22d03f0cffef945996cf835272870435a
utils.py
19,198
48
187
_get_classifier_artifacts
https://github.com/mlflow/mlflow.git
Improve confusion matrix plot (#5273) * update Signed-off-by: Weichen Xu <[email protected]> * fix Signed-off-by: Weichen Xu <[email protected]> * update Signed-off-by: Weichen Xu <[email protected]>
29
0
2,909
8
1
21
def mock_stretch() -> Generator[None, MagicMock, None]: chosen_env = "stretch_v31" with patch( "homeassistant.components.plugwise.gateway.Smile", autospec=True ) as smile_mock: smile = smile_mock.return_value smile.gateway_id = "259882df3c05415b99c2d962534ce820" smile.heater_id = None smile.smile_version = "3.1.11" smile.smile_type = "stretch" smile.smile_hostname = "stretch98765" smile.smile_model = "Gateway" smile.smile_name = "Stretch" smile.connect.return_value = True smile.async_update.return_value = _read_json(chosen_env, "all_data") yield smile @pytest.fixture
tests/components/plugwise/conftest.py
171
@pytest.fixture
core
{ "docstring": "Create a Mock Stretch environment for testing exceptions.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
50
Python
39
6abf677092e2d45d39c515c8d4fa7e1787394766
conftest.py
288,912
17
90
mock_stretch
https://github.com/home-assistant/core.git
Bump plugwise to v0.25.0 and adapt relevant plugwise code (#80129)
145
1
88,061
11
1
8
def _model_version(self) -> int: retval = int(self._model_full_name[self._model_full_name.rfind("_") + 2:]) self.logger.trace(retval) # type: ignore return retval
lib/utils.py
68
faceswap
{ "docstring": " int: The model's version number from the model full name. ", "language": "en", "n_whitespaces": 11, "n_words": 10, "vocab_size": 10 }
15
Python
14
91fecc47b2157d684ab9c219a860df51543222a3
utils.py
101,008
5
39
_model_version
https://github.com/deepfakes/faceswap.git
lib.Utils - add DPI detector
44
0
20,451
14
3
29
def rolling_mean_by_h(x, h, w, name): # Aggregate over h df = pd.DataFrame({'x': x, 'h': h}) df2 = ( df.groupby('h').agg(['sum', 'count']).reset_index().sort_values('h') ) xs = df2['x']['sum'].values ns = df2['x']['count'].values hs = df2.h.values trailing_i = len(df2) - 1 x_sum = 0 n_sum = 0 # We don't know output size but it is bounded by len(df2) res_x = np.empty(len(df2)) # Start from the right and work backwards for i in range(len(df2) - 1, -1, -1): x_sum += xs[i] n_sum += ns[i] while n_sum >= w: # Include points from the previous horizon. All of them if still # less than w, otherwise weight the mean by the difference excess_n = n_sum - w excess_x = excess_n * xs[i] / ns[i] res_x[trailing_i] = (x_sum - excess_x)/ w x_sum -= xs[trailing_i] n_sum -= ns[trailing_i] trailing_i -= 1 res_h = hs[(trailing_i + 1):] res_x = res_x[(trailing_i + 1):] return pd.DataFrame({'horizon': res_h, name: res_x})
python/prophet/diagnostics.py
376
prophet
{ "docstring": "Compute a rolling mean of x, after first aggregating by h.\n\n Right-aligned. Computes a single mean for each unique value of h. Each\n mean is over at least w samples.\n\n Parameters\n ----------\n x: Array.\n h: Array of horizon for each value in x.\n w: Integer window size (number of elements).\n name: Name for metric in result dataframe\n\n Returns\n -------\n Dataframe with columns horizon and name, the rolling mean of x.\n ", "language": "en", "n_whitespaces": 106, "n_words": 70, "vocab_size": 53 }
147
Python
101
e665430adcd7690a1ea7565803f34043596045fe
diagnostics.py
3,288
25
228
rolling_mean_by_h
https://github.com/facebook/prophet.git
Improved execution time of rolling_mean_by_h (#2142)
321
0
430
16
1
5
def as_dict(self) -> dict[str, Any]:
homeassistant/helpers/restore_state.py
22
core
{ "docstring": "Return a dict representation of the extra data.\n\n Must be serializable by Home Assistant's JSONEncoder.\n ", "language": "en", "n_whitespaces": 29, "n_words": 15, "vocab_size": 15 }
5
Python
5
009b31941a45c3d880b69dcf91d14edeb61a78a7
restore_state.py
312,884
5
13
as_dict
https://github.com/home-assistant/core.git
Support restoring SensorEntity native_value (#66068)
12
0
111,515
6
1
27
def test_consume_barcode_unsupported_jpg_file(self, m): test_file = os.path.join( os.path.dirname(__file__), "samples", "simple.jpg", ) dst = os.path.join(settings.SCRATCH_DIR, "simple.jpg") shutil.copy(test_file, dst) with self.assertLogs("paperless.tasks", level="WARNING") as cm: self.assertIn("Success", tasks.consume_file(dst)) self.assertEqual( cm.output, [ "WARNING:paperless.tasks:Unsupported file format for barcode reader: .jpg", ], ) m.assert_called_once() args, kwargs = m.call_args self.assertIsNone(kwargs["override_filename"]) self.assertIsNone(kwargs["override_title"]) self.assertIsNone(kwargs["override_correspondent_id"]) self.assertIsNone(kwargs["override_document_type_id"]) self.assertIsNone(kwargs["override_tag_ids"])
src/documents/tests/test_tasks.py
258
paperless-ngx
{ "docstring": "\n This test assumes barcode and TIFF support are enabled and\n the user uploads an unsupported image file (e.g. jpg)\n\n The function shouldn't try to scan for separating barcodes\n and continue archiving the file as is.\n ", "language": "en", "n_whitespaces": 71, "n_words": 35, "vocab_size": 31 }
45
Python
42
6d0fdc751027809a13e0430c16b8f248b3eb03e8
test_tasks.py
319,336
23
150
test_consume_barcode_unsupported_jpg_file
https://github.com/paperless-ngx/paperless-ngx.git
add tests fix indention add more documentation Signed-off-by: Florian Brandes <[email protected]>
242
0
116,934
12
1
10
def test_get_pdu_returns_nothing_when_event_does_not_exist(self): remote_pdu = self.get_success( self.hs.get_federation_client().get_pdu( ["yet.another.server"], "event_should_not_exist", RoomVersions.V9, ) ) self.assertEqual(remote_pdu, None)
tests/federation/test_federation_client.py
71
synapse
{ "docstring": "No event should be returned when the event does not exist", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 10 }
13
Python
12
0f971ca68e808dd16f53f5594a6b33b7bddcc9a9
test_federation_client.py
248,920
9
42
test_get_pdu_returns_nothing_when_event_does_not_exist
https://github.com/matrix-org/synapse.git
Update `get_pdu` to return the original, pristine `EventBase` (#13320) Update `get_pdu` to return the untouched, pristine `EventBase` as it was originally seen over federation (no metadata added). Previously, we returned the same `event` reference that we stored in the cache which downstream code modified in place and added metadata like setting it as an `outlier` and essentially poisoned our cache. Now we always return a copy of the `event` so the original can stay pristine in our cache and re-used for the next cache call. Split out from https://github.com/matrix-org/synapse/pull/13205 As discussed at: - https://github.com/matrix-org/synapse/pull/13205#discussion_r918365746 - https://github.com/matrix-org/synapse/pull/13205#discussion_r918366125 Related to https://github.com/matrix-org/synapse/issues/12584. This PR doesn't fix that issue because it hits [`get_event` which exists from the local database before it tries to `get_pdu`](https://github.com/matrix-org/synapse/blob/7864f33e286dec22368dc0b11c06eebb1462a51e/synapse/federation/federation_client.py#L581-L594).
108
0
72,511
13
1
6
def config_dicts(self) -> Dict[str, Any]: return self._config_dicts
tools/preview/preview.py
28
faceswap
{ "docstring": " dict: The convert configuration options in dictionary form.", "language": "en", "n_whitespaces": 8, "n_words": 8, "vocab_size": 8 }
7
Python
7
1022651eb8a7741014f5d2ec7cbfe882120dfa5f
preview.py
101,425
3
17
config_dicts
https://github.com/deepfakes/faceswap.git
Bugfix: convert - Gif Writer - Fix non-launch error on Gif Writer - convert plugins - linting - convert/fs_media/preview/queue_manager - typing - Change convert items from dict to Dataclass
21
0
20,839
6
1
22
def test_prune_gap_if_old(self): # Advance the clock for many days to make the old extremity "old". We # also set the depth to "lots". self.reactor.advance(7 * 24 * 60 * 60) # Fudge a second event which points to an event we don't have. This is a # state event so that the state changes (otherwise we won't prune the # extremity as they'll have the same state group). remote_event_2 = event_from_pdu_json( { "type": EventTypes.Member, "state_key": "@user:other2", "content": {"membership": Membership.JOIN}, "room_id": self.room_id, "sender": "@user:other2", "depth": 10000, "prev_events": ["$some_unknown_message"], "auth_events": [], "origin_server_ts": self.clock.time_msec(), }, RoomVersions.V6, ) state_before_gap = self.get_success( self.state.get_current_state_ids(self.room_id) ) self.persist_event(remote_event_2, state=state_before_gap) # Check the new extremity is just the new remote event. self.assert_extremities([remote_event_2.event_id])
tests/storage/test_events.py
218
synapse
{ "docstring": "Test that we drop extremities after a gap when the previous extremity\n is \"old\"\n ", "language": "en", "n_whitespaces": 28, "n_words": 14, "vocab_size": 14 }
113
Python
84
b83bc5fab57b37f75a79d02213d6032c586fd36e
test_events.py
248,370
21
124
test_prune_gap_if_old
https://github.com/matrix-org/synapse.git
Pull out less state when handling gaps mk2 (#12852)
390
0
72,247
13
1
6
def tree(self) -> Tree: highlighter = ReprHighlighter() tree = Tree(highlighter(repr(self)))
src/textual/dom.py
45
textual
{ "docstring": "Get a Rich tree object which will recursively render the structure of the node tree.\n\n Returns:\n Tree: A Rich object which may be printed.\n ", "language": "en", "n_whitespaces": 49, "n_words": 24, "vocab_size": 20 }
10
Python
9
2635f58e7c3d10b161ee69a15ebfe6499ac26daa
dom.py
181,943
11
35
tree
https://github.com/Textualize/textual.git
docstrings and tidy
31
0
43,682
12
1
6
def synchronized_update_sequences(self) -> tuple[str, str]: return ( self._synchronized_update_start_sequence(), self._synchronized_update_end_sequence(), )
src/textual/_terminal_features.py
45
textual
{ "docstring": "\n Returns the ANSI sequence that we should send to the terminal to tell it that\n it should buffer the content we're about to send, as well as the ANIS sequence to end the buffering.\n If the terminal doesn't seem to support synchronised updates both strings will be empty.\n\n Returns:\n tuple[str, str]: the start and end ANSI sequences, respectively. They will both be empty strings\n if the terminal emulator doesn't seem to support the \"synchronised updates\" mode.\n ", "language": "en", "n_whitespaces": 138, "n_words": 76, "vocab_size": 47 }
10
Python
10
d14659c1a3760eade2dd3479b66eb8b2e7711db0
_terminal_features.py
183,562
14
28
synchronized_update_sequences
https://github.com/Textualize/textual.git
[terminal buffering] Add support for the "mode 2026" That task is definitely way more complicated that it seemed to be 😅
53
0
44,251
8
5
19
def _filter_args(args): distrib_args = distrib_run.get_args_parser() known_args, _ = distrib_args.parse_known_args() for arg in list(vars(args).keys()): if arg not in vars(known_args).keys(): delattr(args, arg) distrib_args = distrib_run.parse_args(vars(args)) for key, value in vars(args).items(): setattr(distrib_args, key, value) if is_torch_version("<", "1.9.0"): setattr(distrib_args, "use_env", True) return distrib_args
src/accelerate/utils/launch.py
177
accelerate
{ "docstring": "\n Filters out all `accelerate` specific args\n ", "language": "en", "n_whitespaces": 13, "n_words": 6, "vocab_size": 6 }
39
Python
28
9fd08d79f9a72973073d2cdef6bf23f367b75d6f
launch.py
338,065
12
107
_filter_args
https://github.com/huggingface/accelerate.git
Fully remove `subprocess` from the multi-gpu launcher (#623) * Remove one of the subprocesses!
95
0
121,158
12
5
7
def create_endpoint(endpoint_type=0): if endpoint_type == 1: try: endpoint = f"http://api.openweathermap.org/data/2.5/weather?zip={APP_DATA['Postal']},{APP_DATA['Country']}&appid={API_KEY}&units={APP_DATA['Units']}" return endpoint except ConnectionError: return elif endpoint_type == 2: try: # endpoint = f"http://api.openweathermap.org/data/2.5/weather?q={APP_DATA['City'].replace(' ', '%20')},us&APPID={API_KEY}&units={APP_DATA['Units']}" endpoint = f"http://api.openweathermap.org/data/2.5/weather?q={APP_DATA['City'].replace(' ', '%20')},{APP_DATA['Country']}&APPID={API_KEY}&units={APP_DATA['Units']}" return endpoint except ConnectionError: return else: return
DemoPrograms/Demo_Desktop_Widget_Weather.py
163
PySimpleGUI
{ "docstring": " Create the api request endpoint\n {0: default, 1: zipcode, 2: city_name}", "language": "en", "n_whitespaces": 14, "n_words": 11, "vocab_size": 11 }
38
Python
21
186b16e77ac2b54eb966bafc0e5f092e028e7ed8
Demo_Desktop_Widget_Weather.py
212,784
15
45
create_endpoint
https://github.com/PySimpleGUI/PySimpleGUI.git
Addition of county to the Weather Desktop Widget
162
0
53,400
17
6
34
def install(collection, path, artifacts_manager): # FIXME: mv to dataclasses? # type: (Candidate, str, ConcreteArtifactsManager) -> None b_artifact_path = ( artifacts_manager.get_artifact_path if collection.is_concrete_artifact else artifacts_manager.get_galaxy_artifact_path )(collection) collection_path = os.path.join(path, collection.namespace, collection.name) b_collection_path = to_bytes(collection_path, errors='surrogate_or_strict') display.display( u"Installing '{coll!s}' to '{path!s}'". format(coll=to_text(collection), path=collection_path), ) if os.path.exists(b_collection_path): shutil.rmtree(b_collection_path) if collection.is_dir: install_src(collection, b_artifact_path, b_collection_path, artifacts_manager) else: install_artifact( b_artifact_path, b_collection_path, artifacts_manager._b_working_directory, collection.signatures, artifacts_manager.keyring ) if (collection.is_online_index_pointer and isinstance(collection.src, GalaxyAPI)): write_source_metadata( collection, b_collection_path, artifacts_manager ) display.display( '{coll!s} was installed successfully'. format(coll=to_text(collection)), )
lib/ansible/galaxy/collection/__init__.py
262
ansible
{ "docstring": "Install a collection under a given path.\n\n :param collection: Collection to be installed.\n :param path: Collection dirs layout path.\n :param artifacts_manager: Artifacts manager.\n ", "language": "en", "n_whitespaces": 35, "n_words": 23, "vocab_size": 18 }
76
Python
62
43e55db20821a1341d21ffa1e4e7e6185b244105
__init__.py
266,595
33
170
install
https://github.com/ansible/ansible.git
ansible-galaxy - add signature verification of the MANIFEST.json (#76681) * ansible-galaxy collection install|verify: - Support verifying the origin of the MANIFEST.json when the Galaxy server has provided signatures. - Allow supplemental signatures to use during verification on the CLI/requirements file. * ansible-galaxy collection install: - Support disabling signature verification. This silences the warning provided by ansible-galaxy if the Galaxy server provided signatures it cannot use because no keyring is configured. - Store Galaxy server metadata alongside installed collections for provenance. This is used by 'ansible-galaxy collection verify --offline'. * Add unit tests for method that gets signatures from a Galaxy server * Add integration tests for user-provided signature sources - Test CLI option combinations - Test installing collections with valid/invalid signature sources - Test disabling GPG verification when installing collections - Test verifying collections with valid/invalid signature sources * Make signature verification advisory-by-default if signatures are provided by the Galaxy server - Make the default keyring None - Warn if the keyring is None but the Galaxy server provided signatures - Error if the keyring is None but the user supplied signatures - Error if the keyring is not None but is invalid * changelog * add ansible-galaxy user documentation for new options Co-authored-by: Matt Martz <[email protected]> Co-authored-by: Sviatoslav Sydorenko <[email protected]> Co-authored-by: Martin Krizek <[email protected]> Co-authored-by: Sandra McCann <[email protected]> Co-authored-by: Andy Mott <[email protected]> Co-authored-by: John R Barker <[email protected]>
315
0
78,490
13
1
2
def validate(self) -> None:
src/sentry/utils/services.py
16
sentry
{ "docstring": "\n Validates the settings for this backend (i.e. such as proper connection\n info).\n\n Raise ``InvalidConfiguration`` if there is a configuration error.\n ", "language": "en", "n_whitespaces": 49, "n_words": 20, "vocab_size": 20 }
4
Python
4
fbe987561d2b9df070f34652785294cc430b41e4
services.py
96,505
7
8
validate
https://github.com/getsentry/sentry.git
typing: Add type hints to sentry/utils/services.py (#31984) We use these service backends in a lot of places that are typed. Adding typing here adds a lot of value since we can now properly introspect things that inherit from `Service` and related.
11
0
19,322
6
13
35
def scanString(self, instring, maxMatches=_MAX_INT, overlap=False): if not self.streamlined: self.streamline() for e in self.ignoreExprs: e.streamline() if not self.keepTabs: instring = _ustr(instring).expandtabs() instrlen = len(instring) loc = 0 preparseFn = self.preParse parseFn = self._parse ParserElement.resetCache() matches = 0 try: while loc <= instrlen and matches < maxMatches: try: preloc = preparseFn(instring, loc) nextLoc, tokens = parseFn(instring, preloc, callPreParse=False) except ParseException: loc = preloc + 1 else: if nextLoc > loc: matches += 1 yield tokens, preloc, nextLoc if overlap: nextloc = preparseFn(instring, loc) if nextloc > loc: loc = nextLoc else: loc += 1 else: loc = nextLoc else: loc = preloc + 1 except ParseBaseException as exc: if ParserElement.verbose_stacktrace: raise else: # catch and re-raise exception from here, clearing out pyparsing internal stack trace if getattr(exc, '__traceback__', None) is not None: exc.__traceback__ = self._trim_traceback(exc.__traceback__) raise exc
.venv/lib/python3.8/site-packages/pip/_vendor/pyparsing.py
354
transferlearning
{ "docstring": "\n Scan the input string for expression matches. Each match will return the\n matching tokens, start location, and end location. May be called with optional\n ``maxMatches`` argument, to clip scanning after 'n' matches are found. If\n ``overlap`` is specified, then overlapping matches will be reported.\n\n Note that the start and end locations are reported relative to the string\n being parsed. See :class:`parseString` for more information on parsing\n strings with embedded tabs.\n\n Example::\n\n source = \"sldjf123lsdjjkf345sldkjf879lkjsfd987\"\n print(source)\n for tokens, start, end in Word(alphas).scanString(source):\n print(' '*start + '^'*(end-start))\n print(' '*start + tokens[0])\n\n prints::\n\n sldjf123lsdjjkf345sldkjf879lkjsfd987\n ^^^^^\n sldjf\n ^^^^^^^\n lsdjjkf\n ^^^^^^\n sldkjf\n ^^^^^^\n lkjsfd\n ", "language": "en", "n_whitespaces": 442, "n_words": 99, "vocab_size": 78 }
135
Python
80
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
pyparsing.py
63,394
41
217
scanString
https://github.com/jindongwang/transferlearning.git
upd; format
789
0
13,285
20
2
14
def _get_related_events(self) -> List[str]: # Request the relations of the event. channel = self.make_request( "GET", f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}", access_token=self.user_token, ) self.assertEquals(200, channel.code, channel.json_body) return [ev["event_id"] for ev in channel.json_body["chunk"]]
tests/rest/client/test_relations.py
109
synapse
{ "docstring": "\n Requests /relations on the parent ID and returns a list of event IDs.\n ", "language": "en", "n_whitespaces": 28, "n_words": 13, "vocab_size": 13 }
27
Python
26
1da0f79d5455b594f2aa989106a672786f5b990f
test_relations.py
247,707
11
59
_get_related_events
https://github.com/matrix-org/synapse.git
Refactor relations tests (#12232) * Moves the relation pagination tests to a separate class. * Move the assertion of the response code into the `_send_relation` helper. * Moves some helpers into the base-class.
102
0
71,860
11
3
13
def _express_axis(self, axis, frame): try: ax_mat = axis.to_matrix(self.parent_interframe) except ValueError: ax_mat = axis.to_matrix(self.child_interframe) try: self.parent_interframe.dcm(frame) # Check if connected int_frame = self.parent_interframe except ValueError: int_frame = self.child_interframe return self._to_vector(ax_mat, int_frame).express(frame)
sympy/physics/mechanics/joint.py
118
sympy
{ "docstring": "Helper function to get an axis expressed in a specified frame.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
30
Python
22
7c199e306648513c13c9b5c5b5fad06e5f1c3020
joint.py
199,355
11
72
_express_axis
https://github.com/sympy/sympy.git
Fix failing test and simplify joint orient methods
128
0
49,253
12
1
3
def limits(self): return self.args[1]
sympy/geometry/curve.py
23
sympy
{ "docstring": "The limits for the curve.\n\n Returns\n =======\n\n limits : tuple\n Contains parameter and lower and upper limits.\n\n Examples\n ========\n\n >>> from sympy.abc import t\n >>> from sympy import Curve\n >>> C = Curve([t, t**3], (t, -2, 2))\n >>> C.limits\n (t, -2, 2)\n\n See Also\n ========\n\n plot_interval\n\n ", "language": "en", "n_whitespaces": 155, "n_words": 46, "vocab_size": 36 }
4
Python
4
498015021131af4dbb07eb110e5badaba8250c7b
curve.py
196,265
2
13
limits
https://github.com/sympy/sympy.git
Updated import locations
18
0
47,765
7
8
24
def extract(self, member, path="", set_attrs=True): self._check("r") if isinstance(member, str): tarinfo = self.getmember(member) else: tarinfo = member # Prepare the link target for makelink(). if tarinfo.islnk(): tarinfo._link_target = os.path.join(path, tarinfo.linkname) try: self._extract_member(tarinfo, os.path.join(path, tarinfo.name), set_attrs=set_attrs) except EnvironmentError as e: if self.errorlevel > 0: raise else: if e.filename is None: self._dbg(1, "tarfile: %s" % e.strerror) else: self._dbg(1, "tarfile: %s %r" % (e.strerror, e.filename)) except ExtractError as e: if self.errorlevel > 1: raise else: self._dbg(1, "tarfile: %s" % e)
pipenv/patched/notpip/_vendor/distlib/_backport/tarfile.py
279
pipenv
{ "docstring": "Extract a member from the archive to the current working directory,\n using its full name. Its file information is extracted as accurately\n as possible. `member' may be a filename or a TarInfo object. You can\n specify a different directory using `path'. File attributes (owner,\n mtime, mode) are set unless `set_attrs' is False.\n ", "language": "en", "n_whitespaces": 99, "n_words": 52, "vocab_size": 45 }
76
Python
52
c69d55f7c82d5ae2cce542bcfb98d043ca4836a0
tarfile.py
21,475
24
170
extract
https://github.com/pypa/pipenv.git
Vendor in pip 22.1.2
372
0
3,863
19
2
8
def formatmonthname(self, theyear, themonth, withyear=True): if withyear: s = '%s %s' % (month_name[themonth], theyear) else: s = '%s' % month_name[themonth] return '<tr><th colspan="7" class="%s">%s</th></tr>' % ( self.cssclass_month_head, s)
python3.10.4/Lib/calendar.py
80
XX-Net
{ "docstring": "\n Return a month name as a table row.\n ", "language": "en", "n_whitespaces": 23, "n_words": 8, "vocab_size": 7 }
28
Python
24
8198943edd73a363c266633e1aa5b2a9e9c9f526
calendar.py
221,244
7
49
formatmonthname
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
89
0
56,291
11
2
6
def unicode_is_ascii(u_string): assert isinstance(u_string, str) try: u_string.encode("ascii") return True except UnicodeEncodeError: return False
pipenv/patched/pip/_vendor/requests/_internal_utils.py
50
pipenv
{ "docstring": "Determine if unicode string only contains ASCII characters.\n\n :param str u_string: unicode string to check. Must be unicode\n and not Python 2 `str`.\n :rtype: bool\n ", "language": "en", "n_whitespaces": 41, "n_words": 25, "vocab_size": 22 }
13
Python
12
cd5a9683be69c86c8f3adcd13385a9bc5db198ec
_internal_utils.py
22,039
7
28
unicode_is_ascii
https://github.com/pypa/pipenv.git
Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.
46
0
4,128
10
1
8
def test_do_nothing_if_converted(self, run_convert_mock): stdout, _ = self.call_command() run_convert_mock.assert_not_called() self.assertIn("Converting all PNG thumbnails to WebP", stdout)
src/documents/tests/test_management_convert_thumbnail.py
53
paperless-ngx
{ "docstring": "\n GIVEN:\n - Document exists with default WebP thumbnail path\n WHEN:\n - Thumbnail conversion is attempted\n THEN:\n - Nothing is converted\n ", "language": "en", "n_whitespaces": 82, "n_words": 20, "vocab_size": 17 }
15
Python
15
08c3d6e84b17da2acfb10250438fe357398e5e0e
test_management_convert_thumbnail.py
319,727
4
30
test_do_nothing_if_converted
https://github.com/paperless-ngx/paperless-ngx.git
Fixes existing testing, adds test coverage of new command
43
0
116,987
8
2
6
def set_rollback(self, rollback): if not self.in_atomic_block: raise TransactionManagementError( "The rollback flag doesn't work outside of an 'atomic' block." ) self.needs_rollback = rollback
django/db/backends/base/base.py
43
django
{ "docstring": "\n Set or unset the \"needs rollback\" flag -- for *advanced use* only.\n ", "language": "en", "n_whitespaces": 27, "n_words": 12, "vocab_size": 12 }
22
Python
21
9c19aff7c7561e3a82978a272ecdaad40dda5c00
base.py
204,826
6
24
set_rollback
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
80
0
50,909
10
1
12
def test_warning_does_not_halt(self): self.write_settings( "settings.py", apps=[ "admin_scripts.app_raising_warning", "django.contrib.auth", "django.contrib.contenttypes", ], sdict={"DEBUG": True}, ) args = ["check"] out, err = self.run_manage(args) expected_err = ( "System check identified some issues:\n" # No "CommandError: " part "\n" "WARNINGS:\n" "?: A warning\n" "\n" "System check identified 1 issue (0 silenced).\n" ) self.assertEqual(err, expected_err) self.assertNoOutput(out)
tests/admin_scripts/tests.py
139
django
{ "docstring": "\n When there are only warnings or less serious messages, then Django\n should not prevent user from launching their project, so `check`\n command should not raise `CommandError` exception.\n\n In this test we also test output format.\n ", "language": "en", "n_whitespaces": 71, "n_words": 35, "vocab_size": 32 }
49
Python
42
9c19aff7c7561e3a82978a272ecdaad40dda5c00
tests.py
207,400
22
71
test_warning_does_not_halt
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
268
0
51,951
11
1
15
def test_from(self) -> None: self._create_users_with_media(20, 2) channel = self.make_request( "GET", self.url + "?from=5", access_token=self.admin_user_tok, ) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], 20) self.assertEqual(len(channel.json_body["users"]), 15) self.assertNotIn("next_token", channel.json_body) self._check_fields(channel.json_body["users"])
tests/rest/admin/test_statistics.py
168
synapse
{ "docstring": "\n Testing list of media with a defined starting point (from)\n ", "language": "en", "n_whitespaces": 25, "n_words": 10, "vocab_size": 10 }
25
Python
25
c97042f7eef3748e17c90e48a4122389a89c4735
test_statistics.py
249,167
15
103
test_from
https://github.com/matrix-org/synapse.git
Use literals in place of `HTTPStatus` constants in tests (#13469)
121
0
72,674
11
1
2
def period(self): return self["period"]
packages/python/plotly/plotly/graph_objs/layout/polar/_angularaxis.py
22
plotly.py
{ "docstring": "\n Set the angular period. Has an effect only when\n `angularaxis.type` is \"category\".\n\n The 'period' property is a number and may be specified as:\n - An int or float in the interval [0, inf]\n\n Returns\n -------\n int|float\n ", "language": "en", "n_whitespaces": 95, "n_words": 36, "vocab_size": 34 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_angularaxis.py
232,103
2
11
period
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
63,547
7
6
21
def operand_to_template_torchscript(op_id, oper): shape_parts = ["("] for d, s in enumerate(oper.shape): if s > 0: # Fixed shape dimension: just add the value. shape_parts.append(str(s)) else: # Flexible shape dimension: it should have been computed in a variable. shape_parts.append(flex_name(op_id, d)) shape_parts.append(",") shape_parts.append(")") shape_code = "".join(shape_parts) if oper.op_type == NNAPI_OperandCode.TENSOR_FLOAT32: return f"torch.zeros({shape_code}, dtype=torch.float32)" elif oper.op_type == NNAPI_OperandCode.TENSOR_INT32: return f"torch.zeros({shape_code}, dtype=torch.int32)" elif oper.op_type == NNAPI_OperandCode.TENSOR_QUANT8_ASYMM: return ( f"torch.quantize_per_tensor(" f"torch.zeros(1), scale={oper.scale}, zero_point={oper.zero_point}, dtype=torch.quint8)" f".expand({shape_code}).contiguous()" ) raise Exception(f"Unsupported output operand type: {oper.op_type}")
torch/backends/_nnapi/serializer.py
238
pytorch
{ "docstring": "Return a TorchScript expression to build a template for a given operand.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 10 }
77
Python
62
d9106116aa5e399f7d63feeb7fc77f92a076dd93
serializer.py
102,109
21
120
operand_to_template_torchscript
https://github.com/pytorch/pytorch.git
nnapi: Add int32 type torchscript expressions (#70197) Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/70197 Test Plan: * `pytest test/test_nnapi.py` * Testing via ops following this commit Reviewed By: anshuljain1, dreiss Differential Revision: D33237917 fbshipit-source-id: f0493620f28a62ad9fe0b97b67d1e25059d50c24
322
0
21,468
14
1
14
def test_ignore_model_permissions_with_unauthenticated_user(self): request = factory.get('/', format='json') request.resolver_match = ResolverMatch('get', (), {}) response = ignored_get_queryset_list_view(request) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
tests/test_permissions.py
86
django-rest-framework
{ "docstring": "\n We check that the ``_ignore_model_permissions`` attribute\n doesn't ignore the authentication.\n ", "language": "en", "n_whitespaces": 32, "n_words": 10, "vocab_size": 9 }
16
Python
14
c0d95cb9678b1693f8f1a8658d4665c51de87ddf
test_permissions.py
48,708
5
50
test_ignore_model_permissions_with_unauthenticated_user
https://github.com/encode/django-rest-framework.git
Fix #8771 - Checking for authentication even if `_ignore_model_permissions = True` (#8772)
51
0
9,572
10
2
8
def skipUnlessAnyDBFeature(*features): return _deferredSkip( lambda: not any( getattr(connection.features, feature, False) for feature in features ), "Database doesn't support any of the feature(s): %s" % ", ".join(features), "skipUnlessAnyDBFeature", )
django/test/testcases.py
72
django
{ "docstring": "Skip a test unless a database has any of the named features.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 11 }
28
Python
28
9c19aff7c7561e3a82978a272ecdaad40dda5c00
testcases.py
206,470
8
43
skipUnlessAnyDBFeature
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
76
0
51,534
14
3
10
def installed_models(self, tables): tables = set(map(self.identifier_converter, tables)) return { m for m in self.get_migratable_models() if self.identifier_converter(m._meta.db_table) in tables }
django/db/backends/base/introspection.py
73
django
{ "docstring": "\n Return a set of all models represented by the provided list of table\n names.\n ", "language": "en", "n_whitespaces": 36, "n_words": 14, "vocab_size": 13 }
19
Python
16
9c19aff7c7561e3a82978a272ecdaad40dda5c00
introspection.py
204,850
7
46
installed_models
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
80
0
50,928
12
16
32
def customize_compiler(compiler): if compiler.compiler_type == "unix": if sys.platform == "darwin": # Perform first-time customization of compiler-related # config vars on OS X now that we know we need a compiler. # This is primarily to support Pythons from binary # installers. The kind and paths to build tools on # the user system may vary significantly from the system # that Python itself was built on. Also the user OS # version and build tools may not support the same set # of CPU architectures for universal builds. if not _config_vars.get('CUSTOMIZED_OSX_COMPILER'): import _osx_support _osx_support.customize_compiler(_config_vars) _config_vars['CUSTOMIZED_OSX_COMPILER'] = 'True' (cc, cxx, cflags, ccshared, ldshared, shlib_suffix, ar, ar_flags) = \ get_config_vars('CC', 'CXX', 'CFLAGS', 'CCSHARED', 'LDSHARED', 'SHLIB_SUFFIX', 'AR', 'ARFLAGS') if 'CC' in os.environ: newcc = os.environ['CC'] if (sys.platform == 'darwin' and 'LDSHARED' not in os.environ and ldshared.startswith(cc)): # On OS X, if CC is overridden, use that as the default # command for LDSHARED as well ldshared = newcc + ldshared[len(cc):] cc = newcc if 'CXX' in os.environ: cxx = os.environ['CXX'] if 'LDSHARED' in os.environ: ldshared = os.environ['LDSHARED'] if 'CPP' in os.environ: cpp = os.environ['CPP'] else: cpp = cc + " -E" # not always if 'LDFLAGS' in os.environ: ldshared = ldshared + ' ' + os.environ['LDFLAGS'] if 'CFLAGS' in os.environ: cflags = cflags + ' ' + os.environ['CFLAGS'] ldshared = ldshared + ' ' + os.environ['CFLAGS'] if 'CPPFLAGS' in os.environ: cpp = cpp + ' ' + os.environ['CPPFLAGS'] cflags = cflags + ' ' + os.environ['CPPFLAGS'] ldshared = ldshared + ' ' + os.environ['CPPFLAGS'] if 'AR' in os.environ: ar = os.environ['AR'] if 'ARFLAGS' in os.environ: archiver = ar + ' ' + os.environ['ARFLAGS'] else: archiver = ar + ' ' + ar_flags cc_cmd = cc + ' ' + cflags compiler.set_executables( preprocessor=cpp, compiler=cc_cmd, compiler_so=cc_cmd + ' ' + ccshared, compiler_cxx=cxx, linker_so=ldshared, linker_exe=cc, archiver=archiver) compiler.shared_lib_extension = shlib_suffix
python3.10.4/Lib/distutils/sysconfig.py
658
XX-Net
{ "docstring": "Do any platform-specific customization of a CCompiler instance.\n\n Mainly needed on Unix, so we can plug in the information that\n varies across Unices and is stored in Python's Makefile.\n ", "language": "en", "n_whitespaces": 38, "n_words": 29, "vocab_size": 28 }
303
Python
147
8198943edd73a363c266633e1aa5b2a9e9c9f526
sysconfig.py
223,004
50
369
customize_compiler
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
957
0
56,846
17
1
9
def activate_previous_tab(self) -> None: current_tab_index = self.find_tab_by_name(self._active_tab_name) previous_tab_index = current_tab_index - 1 previous_tab_name = self.tabs[previous_tab_index].name self._active_tab_name = previous_tab_name
src/textual/widgets/tabs.py
63
textual
{ "docstring": "Activate the tab to the left of the currently active tab", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 8 }
18
Python
13
18a3fb4b576f06e1e82e8a030d95bea2df1836b1
tabs.py
182,359
6
38
activate_previous_tab
https://github.com/Textualize/textual.git
Tidying Tabs, adding docstrings
53
0
43,804
9
10
34
def transform(self, node, results): head = results["head"] method = results["method"][0] # Extract node for method name tail = results["tail"] syms = self.syms method_name = method.value isiter = method_name.startswith("iter") isview = method_name.startswith("view") if isiter or isview: method_name = method_name[4:] assert method_name in ("keys", "items", "values"), repr(method) head = [n.clone() for n in head] tail = [n.clone() for n in tail] special = not tail and self.in_special_context(node, isiter) args = head + [pytree.Node(syms.trailer, [Dot(), Name(method_name, prefix=method.prefix)]), results["parens"].clone()] new = pytree.Node(syms.power, args) if not (special or isview): new.prefix = "" new = Call(Name("iter" if isiter else "list"), [new]) if tail: new = pytree.Node(syms.power, [new] + tail) new.prefix = node.prefix return new P1 = "power< func=NAME trailer< '(' node=any ')' > any* >" p1 = patcomp.compile_pattern(P1) P2 = p2 = patcomp.compile_pattern(P2)
python3.10.4/Lib/lib2to3/fixes/fix_dict.py
416
XX-Net
{ "docstring": "for_stmt< 'for' any 'in' node=any ':' any* >\n | comp_for< 'for' any 'in' node=any any* >\n ", "language": "en", "n_whitespaces": 35, "n_words": 16, "vocab_size": 10 }
127
Python
81
8198943edd73a363c266633e1aa5b2a9e9c9f526
fix_dict.py
218,645
27
232
transform
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
440
0
55,443
15
1
5
def __lt__(self, other): return self.sort_key() < sympify(other).sort_key()
sympy/combinatorics/partitions.py
40
sympy
{ "docstring": "\n Checks if a partition is less than the other.\n\n Examples\n ========\n\n >>> from sympy.combinatorics import Partition\n >>> a = Partition([1, 2], [3, 4, 5])\n >>> b = Partition([1], [2, 3], [4], [5])\n >>> a.rank, b.rank\n (9, 34)\n >>> a < b\n True\n ", "language": "en", "n_whitespaces": 120, "n_words": 42, "vocab_size": 34 }
7
Python
7
498015021131af4dbb07eb110e5badaba8250c7b
partitions.py
196,112
2
23
__lt__
https://github.com/sympy/sympy.git
Updated import locations
21
0
47,612
10
2
8
def get_key(self): return ( RayWrapper.materialize(self.key) if isinstance(self.key, ray.ObjectRef) else self.key )
modin/core/execution/ray/implementations/cudf_on_ray/partitioning/partition.py
51
modin
{ "docstring": "\n Get integer key of this partition in dict-storage of `self.gpu_manager`.\n\n Returns\n -------\n int\n ", "language": "en", "n_whitespaces": 49, "n_words": 13, "vocab_size": 12 }
11
Python
11
1dc16415333bf2428ee2b1f4d31ff94e66b9a0a6
partition.py
154,522
6
32
get_key
https://github.com/modin-project/modin.git
REFACTOR-#5009: use RayWrapper.materialize instead of ray.get (#5010) Signed-off-by: Myachev <[email protected]>
65
0
36,045
10
18
26
def find_common_type(types): if not types: raise ValueError("no types given") first = types[0] # workaround for find_common_type([np.dtype('datetime64[ns]')] * 2) # => object if lib.dtypes_all_equal(list(types)): return first # get unique types (dict.fromkeys is used as order-preserving set()) types = list(dict.fromkeys(types).keys()) if any(isinstance(t, ExtensionDtype) for t in types): for t in types: if isinstance(t, ExtensionDtype): res = t._get_common_dtype(types) if res is not None: return res return np.dtype("object") # take lowest unit if all(is_datetime64_dtype(t) for t in types): return np.dtype("datetime64[ns]") if all(is_timedelta64_dtype(t) for t in types): return np.dtype("timedelta64[ns]") # don't mix bool / int or float or complex # this is different from numpy, which casts bool with float/int as int has_bools = any(is_bool_dtype(t) for t in types) if has_bools: for t in types: if is_integer_dtype(t) or is_float_dtype(t) or is_complex_dtype(t): return np.dtype("object") return np.find_common_type(types, [])
pandas/core/dtypes/cast.py
322
pandas
{ "docstring": "\n Find a common data type among the given dtypes.\n\n Parameters\n ----------\n types : list of dtypes\n\n Returns\n -------\n pandas extension or numpy dtype\n\n See Also\n --------\n numpy.find_common_type\n\n ", "language": "en", "n_whitespaces": 61, "n_words": 27, "vocab_size": 27 }
131
Python
74
aa889881863d2f47edd4580f128be4e138ae1e80
cast.py
163,074
24
194
find_common_type
https://github.com/pandas-dev/pandas.git
CLN: assorted follow-ups (#45184)
317
0
39,366
14
2
4
def _get_text_color(self, style): if style['color'] is not None: fill = '#' + style['color'] else: fill = '#000' return fill
pipenv/patched/notpip/_vendor/pygments/formatters/img.py
60
pipenv
{ "docstring": "\n Get the correct color for the token from the style.\n ", "language": "en", "n_whitespaces": 25, "n_words": 10, "vocab_size": 8 }
19
Python
15
f3166e673fe8d40277b804d35d77dcdb760fc3b3
img.py
20,339
6
32
_get_text_color
https://github.com/pypa/pipenv.git
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
69
0
3,330
11
2
14
async def async_added_to_hass(self) -> None: self.async_on_remove( async_dispatcher_connect( self.hass, SIA_EVENT.format(self.port, self.account), self.async_handle_event, ) ) self.handle_last_state(await self.async_get_last_state()) if self._attr_available: self.async_create_post_interval_update_cb()
homeassistant/components/sia/sia_entity_base.py
95
core
{ "docstring": "Run when entity about to be added to hass.\n\n Overridden from Entity.\n\n 1. register the dispatcher and add the callback to on_remove\n 2. get previous state from storage and pass to entity specific function\n 3. if available: create availability cb\n ", "language": "en", "n_whitespaces": 75, "n_words": 40, "vocab_size": 33 }
18
Python
17
af4e37339a39badd5596e8bc9ba86d6c1994aa1b
sia_entity_base.py
291,940
19
58
async_added_to_hass
https://github.com/home-assistant/core.git
Add Connectivity sensor to SIA (#64305) * implemented connectivity sensor * further cleanup off update code * cleanup and tighter behaviour for attributes * added seperate connectivity class to binary sensor * callbacks and keys * redid name and unique_id logic, non-breaking result * using entry more in inits * Fix import * fix ping_interval in sia_entity_base * added ping_interval default to next * fixed next Co-authored-by: Martin Hjelmare <[email protected]>
131
0
91,043
12
1
16
def test_metadata(self): checkpoint = self._prepare_fs_checkpoint() # Convert into dict checkpoint data_dict = checkpoint.to_dict() self.assertIsInstance(data_dict, dict) data_dict["my_marker"] = "marked" # Create from dict checkpoint = Checkpoint.from_dict(data_dict) self.assertTrue(checkpoint._data_dict) self._assert_fs_checkpoint(checkpoint) # Convert back to dict data_dict_2 = Checkpoint.from_directory(checkpoint.to_directory()).to_dict() assert data_dict_2["my_marker"] == "marked"
python/ray/air/tests/test_checkpoints.py
142
ray
{ "docstring": "Test conversion with metadata involved.\n\n a. from fs to dict checkpoint;\n b. drop some marker to dict checkpoint;\n c. convert back to fs checkpoint;\n d. convert back to dict checkpoint.\n\n Assert that the marker should still be there.", "language": "en", "n_whitespaces": 72, "n_words": 38, "vocab_size": 27 }
39
Python
27
cc53a1e28bdb0dc7121f4378c651e6290b7bc84d
test_checkpoints.py
141,909
10
79
test_metadata
https://github.com/ray-project/ray.git
[air] update checkpoint.py to deal with metadata in conversion. (#25727) This is carved out from https://github.com/ray-project/ray/pull/25558. tlrd: checkpoint.py current doesn't support the following ``` a. from fs to dict checkpoint; b. drop some marker to dict checkpoint; c. convert back to fs checkpoint; d. convert back to dict checkpoint. Assert that the marker should still be there ```
130
0
32,525
12
3
13
def get_reverse_charge_recoverable_total(filters): query_filters = get_filters(filters) query_filters.append(["reverse_charge", "=", "Y"]) query_filters.append(["recoverable_reverse_charge", ">", "0"]) query_filters.append(["docstatus", "=", 1]) try: return ( frappe.db.get_all( "Purchase Invoice", filters=query_filters, fields=["sum(total)"], as_list=True, limit=1 )[0][0] or 0 ) except (IndexError, TypeError): return 0
erpnext/regional/report/uae_vat_201/uae_vat_201.py
162
erpnext
{ "docstring": "Returns the sum of the total of each Purchase invoice made with recoverable reverse charge.", "language": "en", "n_whitespaces": 14, "n_words": 15, "vocab_size": 13 }
33
Python
30
494bd9ef78313436f0424b918f200dab8fc7c20b
uae_vat_201.py
67,268
14
96
get_reverse_charge_recoverable_total
https://github.com/frappe/erpnext.git
style: format code with black
19
0
14,462
16
1
4
def find_distributions(self, context=Context()):
python3.10.4/Lib/importlib/metadata/__init__.py
22
XX-Net
{ "docstring": "\n Find distributions.\n\n Return an iterable of all Distribution instances capable of\n loading the metadata for packages matching the ``context``,\n a DistributionFinder.Context instance.\n ", "language": "en", "n_whitespaces": 58, "n_words": 22, "vocab_size": 20 }
3
Python
3
8198943edd73a363c266633e1aa5b2a9e9c9f526
__init__.py
218,263
8
12
find_distributions
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
10
0
55,232
7
1
22
def test_multiple_doc_paths(self): doc_a = Document.objects.create( title="does not matter", created=timezone.make_aware(datetime.datetime(2020, 6, 25, 7, 36, 51, 153)), mime_type="application/pdf", pk=2, checksum="2", archive_serial_number=4, storage_path=StoragePath.objects.create( name="sp1", path="ThisIsAFolder/{asn}/{created}", ), ) doc_b = Document.objects.create( title="does not matter", created=timezone.make_aware(datetime.datetime(2020, 7, 25, 7, 36, 51, 153)), mime_type="application/pdf", pk=5, checksum="abcde", storage_path=StoragePath.objects.create( name="sp2", path="SomeImportantNone/{created}", ), ) self.assertEqual(generate_filename(doc_a), "ThisIsAFolder/4/2020-06-25.pdf") self.assertEqual(generate_filename(doc_b), "SomeImportantNone/2020-07-25.pdf")
src/documents/tests/test_file_handling.py
262
paperless-ngx
{ "docstring": "\n GIVEN:\n - Two documents, each with different storage paths\n WHEN:\n - the filename is generated for the documents\n THEN:\n - Each document generated filename uses its storage path\n ", "language": "en", "n_whitespaces": 90, "n_words": 28, "vocab_size": 22 }
49
Python
33
69ef26dab04d51e7e102dcb33cd98ddc6ad975fd
test_file_handling.py
319,621
26
170
test_multiple_doc_paths
https://github.com/paperless-ngx/paperless-ngx.git
Feature: Dynamic document storage pathes (#916) * Added devcontainer * Add feature storage pathes * Exclude tests and add versioning * Check escaping * Check escaping * Check quoting * Echo * Escape * Escape : * Double escape \ * Escaping * Remove if * Escape colon * Missing \ * Esacpe : * Escape all * test * Remove sed * Fix exclude * Remove SED command * Add LD_LIBRARY_PATH * Adjusted to v1.7 * Updated test-cases * Remove devcontainer * Removed internal build-file * Run pre-commit * Corrected flak8 error * Adjusted to v1.7 * Updated test-cases * Corrected flak8 error * Adjusted to new plural translations * Small adjustments due to code-review backend * Adjusted line-break * Removed PAPERLESS prefix from settings variables * Corrected style change due to search+replace * First documentation draft * Revert changes to Pipfile * Add sphinx-autobuild with keep-outdated * Revert merge error that results in wrong storage path is evaluated * Adjust styles of generated files ... * Adds additional testing to cover dynamic storage path functionality * Remove unnecessary condition * Add hint to edit storage path dialog * Correct spelling of pathes to paths * Minor documentation tweaks * Minor typo * improving wrapping of filter editor buttons with new storage path button * Update .gitignore * Fix select border radius in non input-groups * Better storage path edit hint * Add note to edit storage path dialog re document_renamer * Add note to bulk edit storage path re document_renamer * Rename FILTER_STORAGE_DIRECTORY to PATH * Fix broken filter rule parsing * Show default storage if unspecified * Remove note re storage path on bulk edit * Add basic validation of filename variables Co-authored-by: Markus Kling <[email protected]> Co-authored-by: Trenton Holmes <[email protected]> Co-authored-by: Michael Shamoon <[email protected]> Co-authored-by: Quinn Casey <[email protected]>
323
0
116,978
13
9
3
def download(self, url):
django/core/management/templates.py
15
django
{ "docstring": "\n Download the given URL and return the file name.\n ", "language": "en", "n_whitespaces": 24, "n_words": 9, "vocab_size": 8 }
3
Python
3
9c19aff7c7561e3a82978a272ecdaad40dda5c00
templates.py
204,713
37
261
download
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
10
0
50,847
6
3
8
def media_album_name(self) -> str | None: if self._playing and self._is_feature_available(FeatureName.Album): return self._playing.album return None
homeassistant/components/apple_tv/media_player.py
53
core
{ "docstring": "Album name of current playing media, music track only.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
14
Python
13
5276d849ec497ccd0cecf3cb6a8dacae4fa6f845
media_player.py
306,846
5
32
media_album_name
https://github.com/home-assistant/core.git
Improve type hints in apple_tv media player (#77940)
46
0
105,629
9
3
17
def get_model_params(model_name, override_params): if model_name.startswith('efficientnet'): w, d, _, p = efficientnet_params(model_name) blocks_args, global_params = efficientnet(width_coefficient=w, depth_coefficient=d, dropout_rate=p) else: raise NotImplementedError('model name is not pre-defined: %s' % model_name) if override_params: global_params = global_params._replace(**override_params) return blocks_args, global_params
modules/image/text_to_image/disco_diffusion_ernievil_base/vit_b_16x/ernievil2/transformers/efficientnet.py
116
PaddleHub
{ "docstring": " Get the block args and global params for a given model ", "language": "en", "n_whitespaces": 12, "n_words": 11, "vocab_size": 11 }
35
Python
29
ffcde21305c61d950a9f93e57e6180c9a9665b87
efficientnet.py
50,243
9
71
get_model_params
https://github.com/PaddlePaddle/PaddleHub.git
add disco_diffusion_ernievil_base
78
0
10,062
12
3
11
def load(self, path): path = os.path.normpath(path) mtime = os.stat(path).st_mtime if path not in self or self[path].mtime != mtime: manifest = self.build(path) self[path] = self.manifest_mod(manifest, mtime) return self[path].manifest
.venv/lib/python3.8/site-packages/pip/_vendor/pkg_resources/__init__.py
111
transferlearning
{ "docstring": "\n Load a manifest at path or return a suitable manifest already loaded.\n ", "language": "en", "n_whitespaces": 27, "n_words": 12, "vocab_size": 10 }
27
Python
23
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
__init__.py
63,184
7
71
load
https://github.com/jindongwang/transferlearning.git
upd; format
84
0
13,190
10
1
6
async def test_default_discovery_in_progress(hass, manager): mock_integration(hass, MockModule("comp")) mock_entity_platform(hass, "config_flow.comp", None)
tests/test_config_entries.py
45
core
{ "docstring": "Test that a flow using default discovery can only be triggered once.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
9
Python
9
7cd68381f1d4f58930ffd631dfbfc7159d459832
test_config_entries.py
316,440
21
165
test_default_discovery_in_progress
https://github.com/home-assistant/core.git
Search/replace RESULT_TYPE_* by FlowResultType enum (#74642)
18
0
115,018
10
1
14
def test_override(self) -> None: self.get_success( self.store.register_user( self.user_id, self.pwhash, approved=True, ) ) user = self.get_success(self.store.get_user_by_id(self.user_id)) self.assertIsNotNone(user) assert user is not None self.assertEqual(user["approved"], 1) approved = self.get_success(self.store.is_user_approved(self.user_id)) self.assertTrue(approved)
tests/storage/test_registration.py
150
synapse
{ "docstring": "Tests that if we require approval for new accounts, but we explicitly say the\n new user should be considered approved, they're marked as approved.\n ", "language": "en", "n_whitespaces": 38, "n_words": 24, "vocab_size": 22 }
26
Python
23
be76cd8200b18f3c68b895f85ac7ef5b0ddc2466
test_registration.py
249,580
17
94
test_override
https://github.com/matrix-org/synapse.git
Allow admins to require a manual approval process before new accounts can be used (using MSC3866) (#13556)
156
0
73,002
11
1
9
def test_action_column_class(self): response = self.client.get(reverse("admin:admin_views_subscriber_changelist")) self.assertIsNotNone(response.context["action_form"]) self.assertContains(response, "action-checkbox-column")
tests/admin_views/test_actions.py
69
django
{ "docstring": "The checkbox column class is present in the response.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
8
Python
8
9c19aff7c7561e3a82978a272ecdaad40dda5c00
test_actions.py
207,495
4
38
test_action_column_class
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
36
0
51,985
11
5
27
async def async_load_history_from_system() -> dict[str, BluetoothServiceInfoBleak]: if platform.system() != "Linux": return {} from bluetooth_adapters import ( # pylint: disable=import-outside-toplevel BlueZDBusObjects, ) bluez_dbus = BlueZDBusObjects() await bluez_dbus.load() now = monotonic_time_coarse() return { address: BluetoothServiceInfoBleak( name=history.advertisement_data.local_name or history.device.name or history.device.address, address=history.device.address, rssi=history.device.rssi, manufacturer_data=history.advertisement_data.manufacturer_data, service_data=history.advertisement_data.service_data, service_uuids=history.advertisement_data.service_uuids, source=history.source, device=history.device, advertisement=history.advertisement_data, connectable=False, time=now, ) for address, history in bluez_dbus.history.items() }
homeassistant/components/bluetooth/util.py
233
core
{ "docstring": "Load the device and advertisement_data history if available on the current system.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 11 }
55
Python
51
1589c06203c0bc9f87adcc97fe34d5c52aaf403a
util.py
290,029
28
153
async_load_history_from_system
https://github.com/home-assistant/core.git
Significantly reduce clock_gettime syscalls on platforms with broken vdso (#81257)
261
0
89,155
14
1
7
def open(self, filename): self.filename = filename self.file = io.open(self.filename, 'r', errors=self.errors) self.current_line = 0
python3.10.4/Lib/distutils/text_file.py
62
XX-Net
{ "docstring": "Open a new file named 'filename'. This overrides both the\n 'filename' and 'file' arguments to the constructor.", "language": "en", "n_whitespaces": 27, "n_words": 17, "vocab_size": 16 }
14
Python
12
8198943edd73a363c266633e1aa5b2a9e9c9f526
text_file.py
223,376
4
38
open
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
42
0
56,871
10
7
23
def execute(self, sql, params=()): # Don't perform the transactional DDL check if SQL is being collected # as it's not going to be executed anyway. if ( not self.collect_sql and self.connection.in_atomic_block and not self.connection.features.can_rollback_ddl ): raise TransactionManagementError( "Executing DDL statements while in a transaction on databases " "that can't perform a rollback is prohibited." ) # Account for non-string statement objects. sql = str(sql) # Log the command we're running, then run it logger.debug( "%s; (params %r)", sql, params, extra={"params": params, "sql": sql} ) if self.collect_sql: ending = "" if sql.rstrip().endswith(";") else ";" if params is not None: self.collected_sql.append( (sql % tuple(map(self.quote_value, params))) + ending ) else: self.collected_sql.append(sql + ending) else: with self.connection.cursor() as cursor: cursor.execute(sql, params)
django/db/backends/base/schema.py
264
django
{ "docstring": "Execute the given SQL statement, with optional parameters.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
117
Python
91
9c19aff7c7561e3a82978a272ecdaad40dda5c00
schema.py
204,916
26
154
execute
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
420
0
50,973
19
1
3
def disconnect(self): self.is_connected = False return
mindsdb/integrations/handlers/pinot_handler/pinot_handler.py
23
mindsdb
{ "docstring": " Close any existing connections\r\n\r\n Should switch self.is_connected.\r\n ", "language": "en", "n_whitespaces": 22, "n_words": 7, "vocab_size": 7 }
6
Python
6
b6a81acd4cff972d66bffa341782e76f2897e134
pinot_handler.py
115,957
3
12
disconnect
https://github.com/mindsdb/mindsdb.git
updated the logic of the disconnect() method
27
0
25,613
7
1
8
def test_rl_backward_kill_word(lineedit, text, deleted, rest): _validate_deletion(lineedit, readlinecommands.rl_backward_kill_word, [], text, deleted, rest)
tests/unit/components/test_readlinecommands.py
41
qutebrowser
{ "docstring": "Delete to word beginning and see if it comes back with yank.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
11
Python
9
ab65c542a0551abf105eeb58803cd08bd040753b
test_readlinecommands.py
320,679
4
29
test_rl_backward_kill_word
https://github.com/qutebrowser/qutebrowser.git
Add :rl-rubout and :rl-filename-rubout Closes #4561
61
0
117,273
8
1
8
def test_upload_ban(self) -> None: data = b"Some evil data" self.helper.upload_media( self.upload_resource, data, tok=self.tok, expect_code=400 )
tests/rest/media/v1/test_media_storage.py
54
synapse
{ "docstring": "Attempt to upload some data that includes bytes \"evil\", which should\n get rejected by the spam checker.\n ", "language": "en", "n_whitespaces": 31, "n_words": 17, "vocab_size": 17 }
15
Python
15
32c828d0f760492711a98b11376e229d795fd1b3
test_media_storage.py
247,517
8
34
test_upload_ban
https://github.com/matrix-org/synapse.git
Add type hints to `tests/rest`. (#12208) Co-authored-by: Patrick Cloke <[email protected]>
54
0
71,710
9
1
7
def ismethod(obj): return _inspect.ismethod(tf.__internal__.decorator.unwrap(obj)[1])
keras/utils/tf_inspect.py
42
keras
{ "docstring": "TFDecorator-aware replacement for inspect.ismethod.", "language": "en", "n_whitespaces": 3, "n_words": 4, "vocab_size": 4 }
4
Python
4
84afc5193d38057e2e2badf9c889ea87d80d8fbf
tf_inspect.py
277,059
2
25
ismethod
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
10
0
81,836
12
3
13
def get_sales_orders(quotations): if not quotations: return [] quotation_names = [q.name for q in quotations] return frappe.db.sql( .format( ", ".join(["%s"] * len(quotation_names)) ), tuple(quotation_names), as_dict=1, ) # nosec
erpnext/selling/report/territory_wise_sales/territory_wise_sales.py
98
erpnext
{ "docstring": "\n\tSELECT so.`name`, so.`base_grand_total`, soi.prevdoc_docname as quotation\n\tFROM `tabSales Order` so, `tabSales Order Item` soi\n\tWHERE so.docstatus=1 AND so.name = soi.parent AND soi.prevdoc_docname in ({0})\n\t", "language": "en", "n_whitespaces": 21, "n_words": 24, "vocab_size": 21 }
27
Python
26
494bd9ef78313436f0424b918f200dab8fc7c20b
territory_wise_sales.py
67,455
15
59
get_sales_orders
https://github.com/frappe/erpnext.git
style: format code with black
17
0
14,529
14
1
25
def test_bug_13636(): pi, ki, pf = tensor_heads("pi, ki, pf", [LorentzIndex]) i0, i1, i2, i3, i4 = tensor_indices("i0:5", LorentzIndex) x = Symbol("x") pis = pi(i2) * G(-i2) kis = ki(i3) * G(-i3) pfs = pf(i4) * G(-i4) a = pfs * G(i0) * kis * G(i1) * pis * G(-i1) * kis * G(-i0) b = pfs * G(i0) * kis * G(i1) * pis * x * G(-i0) * pi(-i1) ta = gamma_trace(a) tb = gamma_trace(b) t_a_plus_b = gamma_trace(a + b) assert ta.equals( -16 * ki(i0) * ki(-i0) * pf(i1) * pi(-i1) + 32 * ki(i0) * ki(i1) * pf(-i0) * pi(-i1) ) assert tb.equals(-8 * x * ki(i0) * pf(-i0) * pi(i1) * pi(-i1)) assert t_a_plus_b.equals(ta + tb)
sympy/physics/hep/tests/test_gamma_matrices.py
416
sympy
{ "docstring": "Test issue 13636 regarding handling traces of sums of products \n of GammaMatrix mixed with other factors.", "language": "en", "n_whitespaces": 19, "n_words": 16, "vocab_size": 14 }
119
Python
58
83a11729046d30d413b4a8b82512ff765f11e0b5
test_gamma_matrices.py
198,989
18
254
test_bug_13636
https://github.com/sympy/sympy.git
Fixes issue #13636 regarding handling traces of sums of products of GammaMatrix mixed with other factors.
181
0
49,072
15
2
7
def SyncBatchNorm(*args, **kwargs): if paddle.get_device() == 'cpu': return nn.BatchNorm2D(*args, **kwargs) else: return nn.SyncBatchNorm(*args, **kwargs)
modules/image/semantic_segmentation/ann_resnet50_cityscapes/layers.py
74
PaddleHub
{ "docstring": "In cpu environment nn.SyncBatchNorm does not have kernel so use nn.BatchNorm2D instead", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
14
Python
12
1e69d066aa7f762a4b242c0519818577b7222e4c
layers.py
48,758
5
43
SyncBatchNorm
https://github.com/PaddlePaddle/PaddleHub.git
add 10 segmentation model
37
0
9,594
11
4
13
def bellman_ford_path_length(G, source, target, weight="weight"): if source == target: if source not in G: raise nx.NodeNotFound(f"Node {source} not found in graph") return 0 weight = _weight_function(G, weight) length = _bellman_ford(G, [source], weight, target=target) try: return length[target] except KeyError as err: raise nx.NetworkXNoPath(f"node {target} not reachable from {source}") from err
networkx/algorithms/shortest_paths/weighted.py
138
networkx
{ "docstring": "Returns the shortest path length from source to target\n in a weighted graph.\n\n Parameters\n ----------\n G : NetworkX graph\n\n source : node label\n starting node for path\n\n target : node label\n ending node for path\n\n weight : string or function (default=\"weight\")\n If this is a string, then edge weights will be accessed via the\n edge attribute with this key (that is, the weight of the edge\n joining `u` to `v` will be ``G.edges[u, v][weight]``). If no\n such edge attribute exists, the weight of the edge is assumed to\n be one.\n\n If this is a function, the weight of an edge is the value\n returned by the function. The function must accept exactly three\n positional arguments: the two endpoints of an edge and the\n dictionary of edge attributes for that edge. The function must\n return a number.\n\n Returns\n -------\n length : number\n Shortest path length.\n\n Raises\n ------\n NodeNotFound\n If `source` is not in `G`.\n\n NetworkXNoPath\n If no path exists between source and target.\n\n Examples\n --------\n >>> G = nx.path_graph(5)\n >>> nx.bellman_ford_path_length(G, 0, 4)\n 4\n\n Notes\n -----\n Edge weight attributes must be numerical.\n Distances are calculated as sums of weighted edges traversed.\n\n See Also\n --------\n dijkstra_path_length, bellman_ford_path\n ", "language": "en", "n_whitespaces": 381, "n_words": 195, "vocab_size": 116 }
49
Python
40
b5d41847b8db0c82372faf69cd3a339d11da7ef0
weighted.py
176,294
11
81
bellman_ford_path_length
https://github.com/networkx/networkx.git
DOC: Update documentation to include callables for weight argument (#5307) Update docs to include functions as valid input for weight argument.
106
0
41,811
13
1
23
def test_in_flight_requests_stop_being_in_flight(self) -> None: req1 = ensureDeferred( self.state_datastore._get_state_for_group_using_inflight_cache( 42, StateFilter.all() ) ) self.pump(by=0.1) # This should have gone to the database self.assertEqual(len(self.get_state_group_calls), 1) self.assertFalse(req1.called) # Complete the request right away. self._complete_request_fake(*self.get_state_group_calls[0]) self.assertTrue(req1.called) # Send off another request req2 = ensureDeferred( self.state_datastore._get_state_for_group_using_inflight_cache( 42, StateFilter.all() ) ) self.pump(by=0.1) # It should have gone to the database again, because the previous request # isn't in-flight and therefore isn't available for deduplication. self.assertEqual(len(self.get_state_group_calls), 2) self.assertFalse(req2.called) # Complete the request right away. self._complete_request_fake(*self.get_state_group_calls[1]) self.assertTrue(req2.called) groups, sf, d = self.get_state_group_calls[0] self.assertEqual(self.get_success(req1), FAKE_STATE) self.assertEqual(self.get_success(req2), FAKE_STATE)
tests/storage/databases/test_state_store.py
295
synapse
{ "docstring": "\n Tests that in-flight request deduplication doesn't somehow 'hold on'\n to completed requests: once they're done, they're taken out of the\n in-flight cache.\n ", "language": "en", "n_whitespaces": 51, "n_words": 22, "vocab_size": 20 }
88
Python
55
546b9c9e648f5e2b25bb7c8350570787ff9befae
test_state_store.py
246,370
29
186
test_in_flight_requests_stop_being_in_flight
https://github.com/matrix-org/synapse.git
Add more tests for in-flight state query duplication. (#12033)
330
0
71,182
12
1
5
def wait_for_handle(self, handle, timeout=None): return self._wait_for_handle(handle, timeout, False)
python3.10.4/Lib/asyncio/windows_events.py
35
XX-Net
{ "docstring": "Wait for a handle.\n\n Return a Future object. The result of the future is True if the wait\n completed, or False if the wait did not complete (on timeout).\n ", "language": "en", "n_whitespaces": 50, "n_words": 29, "vocab_size": 24 }
8
Python
8
8198943edd73a363c266633e1aa5b2a9e9c9f526
windows_events.py
220,981
2
23
wait_for_handle
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
22
0
56,179
7
2
9
def make_dist(name, version, **kwargs): summary = kwargs.pop('summary', 'Placeholder for summary') md = Metadata(**kwargs) md.name = name md.version = version md.summary = summary or 'Placeholder for summary' return Distribution(md)
.venv/lib/python3.8/site-packages/pip/_vendor/distlib/database.py
87
transferlearning
{ "docstring": "\n A convenience method for making a dist given just a name and version.\n ", "language": "en", "n_whitespaces": 20, "n_words": 13, "vocab_size": 12 }
28
Python
21
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
database.py
61,935
7
50
make_dist
https://github.com/jindongwang/transferlearning.git
upd; format
49
0
12,759
9
6
44
def create_posix_inventory(args, path, target_hosts, needs_ssh=False): # type: (EnvironmentConfig, str, t.List[HostProfile], bool) -> None target_hosts = t.cast(t.List[SshTargetHostProfile], target_hosts) if len(target_hosts) != 1: raise Exception() target_host = target_hosts[0] if isinstance(target_host, ControllerProfile) and not needs_ssh: inventory = Inventory( host_groups=dict( testgroup=dict( testhost=dict( ansible_connection='local', ansible_pipelining='yes', ansible_python_interpreter=target_host.python.path, ), ), ), ) else: connections = target_host.get_controller_target_connections() if len(connections) != 1: raise Exception() ssh = connections[0] testhost = dict( ansible_connection='ssh', ansible_pipelining='yes', ansible_python_interpreter=ssh.settings.python_interpreter, ansible_host=ssh.settings.host, ansible_port=ssh.settings.port, ansible_user=ssh.settings.user, ansible_ssh_private_key_file=ssh.settings.identity_file, ) # type: t.Dict[str, t.Optional[t.Union[str, int]]] if ssh.become: testhost.update( ansible_become='yes', ansible_become_method=ssh.become.method, ) testhost = exclude_none_values(testhost) inventory = Inventory( host_groups=dict( testgroup=dict( testhost=testhost, ), ), ) inventory.write(args, path)
test/lib/ansible_test/_internal/inventory.py
365
ansible
{ "docstring": "Create and return inventory for use in POSIX integration tests.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
94
Python
65
a06fa496d3f837cca3c437ab6e9858525633d147
inventory.py
266,784
45
234
create_posix_inventory
https://github.com/ansible/ansible.git
ansible-test - Code cleanup and refactoring. (#77169) * Remove unnecessary PyCharm ignores. * Ignore intentional undefined attribute usage. * Add missing type hints. Fix existing type hints. * Fix docstrings and comments. * Use function to register completion handler. * Pass strings to display functions. * Fix CompositeAction handling of dest argument. * Use consistent types in expressions/assignments. * Use custom function to keep linters happy. * Add missing raise for custom exception. * Clean up key/value type handling in cloud plugins. * Use dataclass instead of dict for results. * Add custom type_guard function to check lists. * Ignore return type that can't be checked (yet). * Avoid changing types on local variables.
571
0
78,587
22
11
35
def smart_resize(x, size, interpolation='bilinear'): if len(size) != 2: raise ValueError('Expected `size` to be a tuple of 2 integers, ' f'but got: {size}.') img = tf.convert_to_tensor(x) if img.shape.rank is not None: if img.shape.rank < 3 or img.shape.rank > 4: raise ValueError( 'Expected an image array with shape `(height, width, channels)`, ' 'or `(batch_size, height, width, channels)`, but ' f'got input with incorrect rank, of shape {img.shape}.') shape = tf.shape(img) height, width = shape[-3], shape[-2] target_height, target_width = size if img.shape.rank is not None: static_num_channels = img.shape[-1] else: static_num_channels = None crop_height = tf.cast( tf.cast(width * target_height, 'float32') / target_width, 'int32') crop_width = tf.cast( tf.cast(height * target_width, 'float32') / target_height, 'int32') # Set back to input height / width if crop_height / crop_width is not smaller. crop_height = tf.minimum(height, crop_height) crop_width = tf.minimum(width, crop_width) crop_box_hstart = tf.cast( tf.cast(height - crop_height, 'float32') / 2, 'int32') crop_box_wstart = tf.cast(tf.cast(width - crop_width, 'float32') / 2, 'int32') if img.shape.rank == 4: crop_box_start = tf.stack([0, crop_box_hstart, crop_box_wstart, 0]) crop_box_size = tf.stack([-1, crop_height, crop_width, -1]) else: crop_box_start = tf.stack([crop_box_hstart, crop_box_wstart, 0]) crop_box_size = tf.stack([crop_height, crop_width, -1]) img = tf.slice(img, crop_box_start, crop_box_size) img = tf.image.resize(images=img, size=size, method=interpolation) # Apparent bug in resize_images_v2 may cause shape to be lost if img.shape.rank is not None: if img.shape.rank == 4: img.set_shape((None, None, None, static_num_channels)) if img.shape.rank == 3: img.set_shape((None, None, static_num_channels)) if isinstance(x, np.ndarray): return img.numpy() return img
keras/utils/image_utils.py
646
keras
{ "docstring": "Resize images to a target size without aspect ratio distortion.\n\n Warning: `tf.keras.preprocessing.image.smart_resize` is not recommended for\n new code. Prefer `tf.keras.layers.Resizing`, which provides the same\n functionality as a preprocessing layer and adds `tf.RaggedTensor` support. See\n the [preprocessing layer guide](\n https://www.tensorflow.org/guide/keras/preprocessing_layers)\n for an overview of preprocessing layers.\n\n TensorFlow image datasets typically yield images that have each a different\n size. However, these images need to be batched before they can be\n processed by Keras layers. To be batched, images need to share the same height\n and width.\n\n You could simply do:\n\n ```python\n size = (200, 200)\n ds = ds.map(lambda img: tf.image.resize(img, size))\n ```\n\n However, if you do this, you distort the aspect ratio of your images, since\n in general they do not all have the same aspect ratio as `size`. This is\n fine in many cases, but not always (e.g. for GANs this can be a problem).\n\n Note that passing the argument `preserve_aspect_ratio=True` to `resize`\n will preserve the aspect ratio, but at the cost of no longer respecting the\n provided target size. Because `tf.image.resize` doesn't crop images,\n your output images will still have different sizes.\n\n This calls for:\n\n ```python\n size = (200, 200)\n ds = ds.map(lambda img: smart_resize(img, size))\n ```\n\n Your output images will actually be `(200, 200)`, and will not be distorted.\n Instead, the parts of the image that do not fit within the target size\n get cropped out.\n\n The resizing process is:\n\n 1. Take the largest centered crop of the image that has the same aspect ratio\n as the target size. For instance, if `size=(200, 200)` and the input image has\n size `(340, 500)`, we take a crop of `(340, 340)` centered along the width.\n 2. Resize the cropped image to the target size. In the example above,\n we resize the `(340, 340)` crop to `(200, 200)`.\n\n Args:\n x: Input image or batch of images (as a tensor or NumPy array). Must be in\n format `(height, width, channels)` or `(batch_size, height, width,\n channels)`.\n size: Tuple of `(height, width)` integer. Target size.\n interpolation: String, interpolation to use for resizing. Defaults to\n `'bilinear'`. Supports `bilinear`, `nearest`, `bicubic`, `area`,\n `lanczos3`, `lanczos5`, `gaussian`, `mitchellcubic`.\n\n Returns:\n Array with shape `(size[0], size[1], channels)`. If the input image was a\n NumPy array, the output is a NumPy array, and if it was a TF tensor,\n the output is a TF tensor.\n ", "language": "en", "n_whitespaces": 460, "n_words": 383, "vocab_size": 215 }
226
Python
122
9dc9a78cc6502226775a99725c654fab3298aa5f
image_utils.py
269,108
43
404
smart_resize
https://github.com/keras-team/keras.git
Expose all utilities in `keras.utils.__init__.py`, and resolve the hourglass import issues that led to the creation of an extraneous `all_utils.py` file / library. PiperOrigin-RevId: 435725558
360
0
79,905
15
1
2
def test_get_api_representation_calls_same_method_on_fields_with_context(self):
wagtail/core/tests/test_blocks.py
13
wagtail
{ "docstring": "\n The get_api_representation method of a StructBlock should invoke\n the block's get_api_representation method on each field and the\n context should be passed on.\n ", "language": "en", "n_whitespaces": 51, "n_words": 22, "vocab_size": 18 }
2
Python
2
d10f15e55806c6944827d801cd9c2d53f5da4186
test_blocks.py
74,165
17
85
test_get_api_representation_calls_same_method_on_fields_with_context
https://github.com/wagtail/wagtail.git
Reformat with black
9
0
16,227
6
4
11
def device_class(self) -> BinarySensorDeviceClass | None: if self._coordinator is None: return None if self._sensor_type is PlaatoKeg.Pins.LEAK_DETECTION: return BinarySensorDeviceClass.PROBLEM if self._sensor_type is PlaatoKeg.Pins.POURING: return BinarySensorDeviceClass.OPENING return None
homeassistant/components/plaato/binary_sensor.py
82
core
{ "docstring": "Return the class of this device, from BinarySensorDeviceClass.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
26
Python
16
3393b78e080f2e456337205d5bd1c9d2cd810625
binary_sensor.py
310,232
9
51
device_class
https://github.com/home-assistant/core.git
Remove plaato from mypy ignore list (#64516) Co-authored-by: epenet <[email protected]>
94
0
108,919
8
1
13
def binary_mask_dice_loss(self, mask_preds, gt_masks): mask_preds = mask_preds.flatten(1) gt_masks = gt_masks.flatten(1).float() numerator = 2 * torch.einsum('nc,mc->nm', mask_preds, gt_masks) denominator = mask_preds.sum(-1)[:, None] + gt_masks.sum(-1)[None, :] loss = 1 - (numerator + self.eps) / (denominator + self.eps) return loss
mmdet/core/bbox/match_costs/match_cost.py
146
mmdetection
{ "docstring": "\n Args:\n mask_preds (Tensor): Mask prediction in shape (num_query, *).\n gt_masks (Tensor): Ground truth in shape (num_gt, *)\n store 0 or 1, 0 for negative class and 1 for\n positive class.\n\n Returns:\n Tensor: Dice cost matrix in shape (num_query, num_gt).\n ", "language": "en", "n_whitespaces": 124, "n_words": 39, "vocab_size": 31 }
37
Python
28
cac356380d505bf15587f07c0529218cc36b9652
match_cost.py
244,031
7
92
binary_mask_dice_loss
https://github.com/open-mmlab/mmdetection.git
[Feature] Add Maskformer to mmdet (#7212) * first commit * add README * move model description from config to readme add description for binary_input add description for dice loss add a independent panoptic gt processing function add a independent panoptic gt processing function remove compatibility of pretrain in maskformer * update comments in maskformer_head * update docs format
86
0
70,203
11
1
2
def tab_clickable_evt() -> str: return
openbb_terminal/jupyter/widget_helpers.py
20
OpenBBTerminal
{ "docstring": "Adds javascript code within HTML at the bottom that allows the interactivity with tabs.\n\n Returns\n -------\n str\n javascript code in HTML to process interactive tabs\n \n <script>\n function menu(evt, menu_name) {\n var i, tabcontent, tablinks;\n tabcontent = document.getElementsByClassName(\"tabcontent\");\n for (i = 0; i < tabcontent.length; i++) {\n tabcontent[i].style.display = \"none\";\n }\n tablinks = document.getElementsByClassName(\"tablinks\");\n for (i = 0; i < tablinks.length; i++) {\n tablinks[i].className = tablinks[i].className.replace(\" active\", \"\");\n tablinks[i].style.backgroundColor = \"white\";\n tablinks[i].style.color = \"black\";\n }\n document.getElementById(menu_name).style.display = \"block\";\n\n evt.currentTarget.className += \" active\";\n evt.currentTarget.style.backgroundColor = \"black\";\n evt.currentTarget.style.color = \"white\";\n }\n\n window.onload=function(){\n menu(event, 'SUMMARY');\n };\n </script>", "language": "en", "n_whitespaces": 308, "n_words": 93, "vocab_size": 67 }
5
Python
5
f47e10918a3193c5e0c85981fb769e1b680f5f9d
widget_helpers.py
284,435
33
9
tab_clickable_evt
https://github.com/OpenBB-finance/OpenBBTerminal.git
Simplify way to generate report templates (#1788) * simplify way to generate report templates * add documentation Co-authored-by: James Maslek <[email protected]>
12
0
84,718
6
1
8
def register_search(cls): model = cls.model label = f'{model._meta.app_label}.{model._meta.model_name}' registry['search'][label] = cls return cls
netbox/netbox/search/__init__.py
66
netbox
{ "docstring": "\n Decorator for registering a SearchIndex class.\n ", "language": "en", "n_whitespaces": 13, "n_words": 6, "vocab_size": 6 }
13
Python
10
9628dead07ccef9608b32906aa8194bc948e5a09
__init__.py
265,888
5
26
register_search
https://github.com/netbox-community/netbox.git
Closes #10560: New global search (#10676) * Initial work on new search backend * Clean up search backends * Return only the most relevant result per object * Clear any pre-existing cached entries on cache() * #6003: Implement global search functionality for custom field values * Tweak field weights & document guidance * Extend search() to accept a lookup type * Move get_registry() out of SearchBackend * Enforce object permissions when returning search results * Add indexers for remaining models * Avoid calling remove() on non-cacheable objects * Use new search backend by default * Extend search backend to filter by object type * Clean up search view form * Enable specifying lookup logic * Add indexes for value field * Remove object type selector from search bar * Introduce SearchTable and enable HTMX for results * Enable pagination * Remove legacy search backend * Cleanup * Use a UUID for CachedValue primary key * Refactoring search methods * Define max search results limit * Extend reindex command to support specifying particular models * Add clear() and size to SearchBackend * Optimize bulk caching performance * Highlight matched portion of field value * Performance improvements for reindexing * Started on search tests * Cleanup & docs * Documentation updates * Clean up SearchIndex * Flatten search registry to register by app_label.model_name * Clean up search backend classes * Clean up RestrictedGenericForeignKey and RestrictedPrefetch * Resolve migrations conflict
28
0
78,228
10
1
5
def increment_iterations(self) -> None: self._iterations += 1 self._sessions[self._session_id]["iterations"] += 1
plugins/train/model/_base/model.py
45
faceswap
{ "docstring": " Increment :attr:`iterations` and session iterations by 1. ", "language": "en", "n_whitespaces": 8, "n_words": 7, "vocab_size": 7 }
10
Python
8
ff6b0209dd5ad57b81b0aca570df7f39a7119bfb
model.py
100,841
4
26
increment_iterations
https://github.com/deepfakes/faceswap.git
Refactoring and TravisCI to Github Actions (#1239) * refactor training * travis to actions
31
0
20,292
9
1
3
def slice_tensors(data, tensor_slice):
src/accelerate/utils.py
15
accelerate
{ "docstring": "\n Recursively takes a slice in a nested list/tuple/dictionary of tensors.\n\n Args:\n data (nested list/tuple/dictionary of `torch.Tensor`):\n The data to slice.\n tensor_slice (`slice`):\n The slice to take.\n\n Returns:\n The same data structure as `data` with all the tensors slices.\n ", "language": "en", "n_whitespaces": 94, "n_words": 38, "vocab_size": 29 }
3
Python
3
fb5ed62c102c0323486b89805e1888495de3db15
utils.py
337,294
3
19
slice_tensors
https://github.com/huggingface/accelerate.git
Convert documentation to the new front (#271) * Main conversion * Doc styling * Style * New front deploy * Fixes * Fixes * Fix new docstrings * Style
6
0
121,008
6
1
7
def get_metric(self): metrics = combine_results(self.results, rec_flag=False) self.reset() return metrics
ppocr/metrics/ct_metric.py
44
PaddleOCR
{ "docstring": "\n Input format: y0,x0, ..... yn,xn. Each detection is separated by the end of line token ('\\n')'\n ", "language": "en", "n_whitespaces": 31, "n_words": 16, "vocab_size": 16 }
9
Python
8
b5268dc3a0847dce2668265e07ff50d54265b2d8
ct_metric.py
25,044
4
25
get_metric
https://github.com/PaddlePaddle/PaddleOCR.git
add centripetal text model
37
0
4,849
9
1
7
def database_forwards(self, app_label, schema_editor, from_state, to_state): raise NotImplementedError( "subclasses of Operation must provide a database_forwards() method" )
django/db/migrations/operations/base.py
31
django
{ "docstring": "\n Perform the mutation on the database schema in the normal\n (forwards) direction.\n ", "language": "en", "n_whitespaces": 34, "n_words": 12, "vocab_size": 10 }
17
Python
17
9c19aff7c7561e3a82978a272ecdaad40dda5c00
base.py
205,316
4
19
database_forwards
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
49
0
51,091
8
3
6
def asList(self): return [res.asList() if isinstance(res, ParseResults) else res for res in self.__toklist]
.venv/lib/python3.8/site-packages/pip/_vendor/pyparsing.py
46
transferlearning
{ "docstring": "\n Returns the parse results as a nested list of matching tokens, all converted to strings.\n\n Example::\n\n patt = OneOrMore(Word(alphas))\n result = patt.parseString(\"sldkj lsdkj sldkj\")\n # even though the result prints in string-like form, it is actually a pyparsing ParseResults\n print(type(result), result) # -> <class 'pyparsing.ParseResults'> ['sldkj', 'lsdkj', 'sldkj']\n\n # Use asList() to create an actual list\n result_list = result.asList()\n print(type(result_list), result_list) # -> <class 'list'> ['sldkj', 'lsdkj', 'sldkj']\n ", "language": "en", "n_whitespaces": 167, "n_words": 68, "vocab_size": 53 }
13
Python
12
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
pyparsing.py
63,303
2
29
asList
https://github.com/jindongwang/transferlearning.git
upd; format
27
0
13,240
9
2
20
def _load(self, config_changeable_items): logger.debug("Loading State") if not os.path.exists(self._filename): logger.info("No existing state file found. Generating.") return state = self._serializer.load(self._filename) self._name = state.get("name", self._name) self._sessions = state.get("sessions", {}) self._lowest_avg_loss = state.get("lowest_avg_loss", {}) self._iterations = state.get("iterations", 0) self._config = state.get("config", {}) logger.debug("Loaded state: %s", state) self._replace_config(config_changeable_items)
plugins/train/model/_base.py
214
faceswap
{ "docstring": " Load a state file and set the serialized values to the class instance.\n\n Updates the model's config with the values stored in the state file.\n\n Parameters\n ----------\n config_changeable_items: dict\n Configuration options that can be altered when resuming a model, and their current\n values\n ", "language": "en", "n_whitespaces": 101, "n_words": 43, "vocab_size": 34 }
43
Python
35
c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf
_base.py
100,383
13
125
_load
https://github.com/deepfakes/faceswap.git
Update code to support Tensorflow versions up to 2.8 (#1213) * Update maximum tf version in setup + requirements * - bump max version of tf version in launcher - standardise tf version check * update keras get_custom_objects for tf>2.6 * bugfix: force black text in GUI file dialogs (linux) * dssim loss - Move to stock tf.ssim function * Update optimizer imports for compatibility * fix logging for tf2.8 * Fix GUI graphing for TF2.8 * update tests * bump requirements.txt versions * Remove limit on nvidia-ml-py * Graphing bugfixes - Prevent live graph from displaying if data not yet available * bugfix: Live graph. Collect loss labels correctly * fix: live graph - swallow inconsistent loss errors * Bugfix: Prevent live graph from clearing during training * Fix graphing for AMD
142
0
19,869
10
1
18
def _roots_with_zeros(p, num_leading_zeros): # Avoid lapack errors when p is all zero p = _where(len(p) == num_leading_zeros, 1.0, p) # Roll any leading zeros to the end & compute the roots roots = _roots_no_zeros(roll(p, -num_leading_zeros)) # Sort zero roots to the end. roots = lax.sort_key_val(roots == 0, roots)[1] # Set roots associated with num_leading_zeros to NaN return _where(arange(roots.size) < roots.size - num_leading_zeros, roots, complex(np.nan, np.nan)) @_wraps(np.roots, lax_description=, extra_params=)
jax/_src/numpy/polynomial.py
147
@_wraps(np.roots, lax_description="""\ Unlike the numpy version of this function, the JAX version returns the roots in a complex array regardless of the values of the roots. Additionally, the jax version of this function adds the ``strip_zeros`` function which must be set to False for the function to be compatible with JIT and other JAX transformations. With ``strip_zeros=False``, if your coefficients have leading zeros, the roots will be padded with NaN values: >>> coeffs = jnp.array([0, 1, 2]) # The default behavior matches numpy and strips leading zeros: >>> jnp.roots(coeffs) DeviceArray([-2.+0.j], dtype=complex64) # With strip_zeros=False, extra roots are set to NaN: >>> jnp.roots(coeffs, strip_zeros=False) DeviceArray([-2. +0.j, nan+nanj], dtype=complex64) """, extra_params=""" strip_zeros : bool, default=True If set to True, then leading zeros in the coefficients will be stripped, similar to :func:`numpy.roots`. If set to False, leading zeros will not be stripped, and undefined roots will be represented by NaN values in the function output. ``strip_zeros`` must be set to ``False`` for the function to be compatible with :func:`jax.jit` and other JAX transformations. """)
jax
{ "docstring": "\\\nUnlike the numpy version of this function, the JAX version returns the roots in\na complex array regardless of the values of the roots. Additionally, the jax\nversion of this function adds the ``strip_zeros`` function which must be set to\nFalse for the function to be compatible with JIT and other JAX transformations.\nWith ``strip_zeros=False``, if your coefficients have leading zeros, the\nroots will be padded with NaN values:\n\n>>> coeffs = jnp.array([0, 1, 2])\n\n# The default behavior matches numpy and strips leading zeros:\n>>> jnp.roots(coeffs)\nDeviceArray([-2.+0.j], dtype=complex64)\n\n# With strip_zeros=False, extra roots are set to NaN:\n>>> jnp.roots(coeffs, strip_zeros=False)\nDeviceArray([-2. +0.j, nan+nanj], dtype=complex64)\n\nstrip_zeros : bool, default=True\n If set to True, then leading zeros in the coefficients will be stripped, similar\n to :func:`numpy.roots`. If set to False, leading zeros will not be stripped, and\n undefined roots will be represented by NaN values in the function output.\n ``strip_zeros`` must be set to ``False`` for the function to be compatible with\n :func:`jax.jit` and other JAX transformations.\n", "language": "en", "n_whitespaces": 167, "n_words": 167, "vocab_size": 92 }
68
Python
51
f6476f7a03f8390627c1a8e2a2ec8702d8a320e5
polynomial.py
121,009
5
80
_roots_with_zeros
https://github.com/google/jax.git
jnp.roots: better support for computation under JIT
73
1
27,011
11
1
11
def test_config_overrides_environ(self): config = {"caches": {"per_cache_factors": {"foo": 2, "bar": 3}}} self.config._environ = { "SYNAPSE_CACHE_FACTOR_SOMETHING_OR_OTHER": "2", "SYNAPSE_CACHE_FACTOR_FOO": 1, } self.config.read_config(config, config_dir_path="", data_dir_path="") self.config.resize_all_caches() self.assertEqual( dict(self.config.cache_factors), {"foo": 1.0, "bar": 3.0, "something_or_other": 2.0}, )
tests/config/test_cache.py
163
synapse
{ "docstring": "\n Individual cache factors defined in the environment will take precedence\n over those in the config.\n ", "language": "en", "n_whitespaces": 37, "n_words": 15, "vocab_size": 13 }
31
Python
28
d38d242411b8910dfacde1e61fd3a0ec5cbcaa66
test_cache.py
248,246
12
98
test_config_overrides_environ
https://github.com/matrix-org/synapse.git
Reload cache factors from disk on SIGHUP (#12673)
131
0
72,177
13
1
3
def test_issue_5131(monkeypatch, tmpdir):
tests/functional/test_regression.py
15
pyinstaller
{ "docstring": "\n While fixing the endless recursion when the package's __init__ module is an extension (see\n tests/unit/test_modulegraph_more.py::package_init_is_extension_*), another error occurred:\n PyInstaller.building._utils._load_code() tried to complete the source code for extension module - triggered by\n PYZ.assemble(), which is collecting all source files - caused by this being marked as \"PYMODULE\" in the TOC.\n ", "language": "en", "n_whitespaces": 65, "n_words": 49, "vocab_size": 40 }
3
Python
3
1a7d704ffbabb433007e3ba04750c2f13ade48e5
test_regression.py
262,858
25
194
test_issue_5131
https://github.com/pyinstaller/pyinstaller.git
Fix typos (#6782) [skip ci]
6
0
77,409
6
11
22
def update_billed_amount_based_on_so(so_detail, update_modified=True): # Billed against Sales Order directly billed_against_so = frappe.db.sql(, so_detail) billed_against_so = billed_against_so and billed_against_so[0][0] or 0 # Get all Delivery Note Item rows against the Sales Order Item row dn_details = frappe.db.sql(, so_detail, as_dict=1) updated_dn = [] for dnd in dn_details: billed_amt_agianst_dn = 0 # If delivered against Sales Invoice if dnd.si_detail: billed_amt_agianst_dn = flt(dnd.amount) billed_against_so -= billed_amt_agianst_dn else: # Get billed amount directly against Delivery Note billed_amt_agianst_dn = frappe.db.sql(, dnd.name) billed_amt_agianst_dn = billed_amt_agianst_dn and billed_amt_agianst_dn[0][0] or 0 # Distribute billed amount directly against SO between DNs based on FIFO if billed_against_so and billed_amt_agianst_dn < dnd.amount: if dnd.returned_qty: pending_to_bill = flt(dnd.amount) * (dnd.stock_qty - dnd.returned_qty) / dnd.stock_qty else: pending_to_bill = flt(dnd.amount) pending_to_bill -= billed_amt_agianst_dn if pending_to_bill <= billed_against_so: billed_amt_agianst_dn += pending_to_bill billed_against_so -= pending_to_bill else: billed_amt_agianst_dn += billed_against_so billed_against_so = 0 frappe.db.set_value("Delivery Note Item", dnd.name, "billed_amt", billed_amt_agianst_dn, update_modified=update_modified) updated_dn.append(dnd.parent) return updated_dn
erpnext/stock/doctype/delivery_note/delivery_note.py
331
erpnext
{ "docstring": "select sum(si_item.amount)\n\t\tfrom `tabSales Invoice Item` si_item, `tabSales Invoice` si\n\t\twhere\n\t\t\tsi_item.parent = si.name\n\t\t\tand si_item.so_detail=%s\n\t\t\tand (si_item.dn_detail is null or si_item.dn_detail = '')\n\t\t\tand si_item.docstatus=1\n\t\t\tand si.update_stock = 0\n\t\tselect dn_item.name, dn_item.amount, dn_item.si_detail, dn_item.parent, dn_item.stock_qty, dn_item.returned_qty\n\t\tfrom `tabDelivery Note Item` dn_item, `tabDelivery Note` dn\n\t\twhere\n\t\t\tdn.name = dn_item.parent\n\t\t\tand dn_item.so_detail=%s\n\t\t\tand dn.docstatus=1\n\t\t\tand dn.is_return = 0\n\t\torder by dn.posting_date asc, dn.posting_time asc, dn.name ascselect sum(amount) from `tabSales Invoice Item`\n\t\t\t\twhere dn_detail=%s and docstatus=1", "language": "en", "n_whitespaces": 57, "n_words": 74, "vocab_size": 49 }
145
Python
75
b50036c04a116b2a3aa1784daf161a2f618765a8
delivery_note.py
64,240
44
205
update_billed_amount_based_on_so
https://github.com/frappe/erpnext.git
fix: consider returned_qty while updating billed_amt (cherry picked from commit 63aaa1e357280b24c537a502a479f7bb7a6654e4)
112
0
13,584
17
5
16
def parse_apps_and_model_labels(labels): apps = set() models = set() for label in labels: if "." in label: try: model = installed_apps.get_model(label) except LookupError: raise CommandError("Unknown model: %s" % label) models.add(model) else: try: app_config = installed_apps.get_app_config(label) except LookupError as e: raise CommandError(str(e)) apps.add(app_config) return models, apps
django/core/management/utils.py
152
django
{ "docstring": "\n Parse a list of \"app_label.ModelName\" or \"app_label\" strings into actual\n objects and return a two-element tuple:\n (set of model classes, set of app_configs).\n Raise a CommandError if some specified models or apps don't exist.\n ", "language": "en", "n_whitespaces": 54, "n_words": 34, "vocab_size": 29 }
44
Python
35
9c19aff7c7561e3a82978a272ecdaad40dda5c00
utils.py
204,726
17
87
parse_apps_and_model_labels
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
199
0
50,857
17
1
20
def test_double_stamping(self, subtests): self.app.conf.task_always_eager = True self.app.conf.task_store_eager_result = True self.app.conf.result_extended = True sig_1 = self.add.s(2, 2) sig_1.stamp(stamp1="stamp1") sig_1.stamp(stamp2="stamp2") sig_1_res = sig_1.freeze() sig_1.apply() with subtests.test("sig_1_res is stamped with stamp1", stamp1=["stamp1"]): assert sig_1_res._get_task_meta()["stamp1"] == ["stamp1"] with subtests.test("sig_1_res is stamped with stamp2", stamp2=["stamp2"]): assert sig_1_res._get_task_meta()["stamp2"] == ["stamp2"] with subtests.test("sig_1_res is stamped twice", stamped_headers=["stamp2", "stamp1"]): assert sig_1_res._get_task_meta()["stamped_headers"] == ["stamp2", "stamp1", "groups"]
t/unit/tasks/test_canvas.py
291
celery
{ "docstring": "\n Test manual signature stamping with two different stamps.\n ", "language": "en", "n_whitespaces": 23, "n_words": 8, "vocab_size": 8 }
57
Python
37
1c4ff33bd22cf94e297bd6449a06b5a30c2c1fbc
test_canvas.py
208,123
15
162
test_double_stamping
https://github.com/celery/celery.git
Canvas Header Stamping (#7384) * Strip down the header-stamping PR to the basics. * Serialize groups. * Add groups to result backend meta data. * Fix spelling mistake. * Revert changes to canvas.py * Revert changes to app/base.py * Add stamping implementation to canvas.py * Send task to AMQP with groups. * Successfully pass single group to result. * _freeze_gid dict merge fixed * First draft of the visitor API. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * OptionsVisitor created * Fixed canvas.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added test for simple test for chord and fixed chord implementation * Changed _IMMUTABLE_OPTIONS * Fixed chord interface * Fixed chord interface * Fixed chord interface * Fixed chord interface * Fixed list order * Fixed tests (stamp test and chord test), fixed order in groups * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed lint and elements * Changed implementation of stamp API and fix lint * Added documentation to Stamping API. Added chord with groups test * Implemented stamping inside replace and added test for an implementation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added test additonal tests for chord, improved coverage * Added test additonal tests for chord, improved coverage * Added test additonal tests for chord, improved coverage * Splitted into subtests * Group stamping rollback * group.id is None fixed * Added integration test * Added integration test * apply_async fixed * Integration test and test_chord fixed * Lint fixed * chord freeze fixed * Minor fixes. * Chain apply_async fixed and tests fixed * lint fixed * Added integration test for chord * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * type -> isinstance * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Redo header stamping (#7341) * _freeze_gid dict merge fixed * OptionsVisitor created * Fixed canvas.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added test for simple test for chord and fixed chord implementation * Changed _IMMUTABLE_OPTIONS * Fixed chord interface * Fixed chord interface * Fixed chord interface * Fixed chord interface * Fixed list order * Fixed tests (stamp test and chord test), fixed order in groups * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed lint and elements * Changed implementation of stamp API and fix lint * Added documentation to Stamping API. Added chord with groups test * Implemented stamping inside replace and added test for an implementation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added test additonal tests for chord, improved coverage * Added test additonal tests for chord, improved coverage * Added test additonal tests for chord, improved coverage * Splitted into subtests * Group stamping rollback * group.id is None fixed * Added integration test * Added integration test * apply_async fixed * Integration test and test_chord fixed * Lint fixed * chord freeze fixed * Minor fixes. * Chain apply_async fixed and tests fixed * lint fixed * Added integration test for chord * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * type -> isinstance * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Omer Katz <[email protected]> * Added stamping mechanism * Manual stamping improved * flake8 fixed * Added subtests * Add comma. * Moved groups to stamps * Fixed chord and added test for that * Strip down the header-stamping PR to the basics. * Serialize groups. * Add groups to result backend meta data. * Fix spelling mistake. * Revert changes to canvas.py * Revert changes to app/base.py * Add stamping implementation to canvas.py * Send task to AMQP with groups. * Successfully pass single group to result. * _freeze_gid dict merge fixed * First draft of the visitor API. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * OptionsVisitor created * Fixed canvas.py * Added test for simple test for chord and fixed chord implementation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Changed _IMMUTABLE_OPTIONS * Fixed chord interface * Fixed chord interface * Fixed chord interface * Fixed chord interface * Fixed list order * Fixed tests (stamp test and chord test), fixed order in groups * Fixed lint and elements * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Changed implementation of stamp API and fix lint * Added documentation to Stamping API. Added chord with groups test * Implemented stamping inside replace and added test for an implementation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added test additonal tests for chord, improved coverage * Added test additonal tests for chord, improved coverage * Added test additonal tests for chord, improved coverage * Splitted into subtests * Group stamping rollback * group.id is None fixed * Added integration test * Added integration test * apply_async fixed * Integration test and test_chord fixed * Lint fixed * chord freeze fixed * Minor fixes. * Chain apply_async fixed and tests fixed * lint fixed * Added integration test for chord * type -> isinstance * Added stamping mechanism * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Manual stamping improved * fail_ci_if_error uncommented * flake8 fixed * Added subtests * Changes * Add comma. * Fixed chord and added test for that * canvas.py fixed * Test chord.py fixed * Fixed stamped_headers * collections import fixed * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * collections import fixed * Update celery/backends/base.py Co-authored-by: Omer Katz <[email protected]> * ampq.py fixed * Refrain from using deprecated import path. * Fix test_complex_chain regression. Whenever we stamp a group we need to freeze it first if it wasn't already frozen. Somewhere along the line, the group id changed because we were freezing twice. This commit places the stamping operation after preparing the chain's steps which fixes the problem somehow. We don't know why yet. * Fixed integration tests * Fixed integration tests * Fixed integration tests * Fixed integration tests * Fixed issues with maybe_list. Add documentation * Fixed potential issue with integration tests * Fixed issues with _regen * Fixed issues with _regen * Fixed test_generator issues * Fixed _regen stamping * Fixed _regen stamping * Fixed TimeOut issue * Fixed TimeOut issue * Fixed TimeOut issue * Update docs/userguide/canvas.rst Co-authored-by: Omer Katz <[email protected]> * Fixed Couchbase * Better stamping intro * New GroupVisitor example * Adjust documentation. Co-authored-by: Naomi Elstein <[email protected]> Co-authored-by: Omer Katz <[email protected]> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Asif Saif Uddin <[email protected]> Co-authored-by: Omer Katz <[email protected]>
174
0
52,214
12
1
13
def deform_sampling(self, feat, offset): # it is an equivalent implementation of bilinear interpolation b, c, h, w = feat.shape weight = feat.new_ones(c, 1, 1, 1) y = deform_conv2d(feat, offset, weight, 1, 0, 1, c, c) return y
mmdet/models/dense_heads/tood_head.py
79
mmdetection
{ "docstring": "Sampling the feature x according to offset.\n\n Args:\n feat (Tensor): Feature\n offset (Tensor): Spatial offset for feature sampling\n ", "language": "en", "n_whitespaces": 54, "n_words": 18, "vocab_size": 15 }
37
Python
30
b403751bd409795cf63fcc6aa7ee280326358bac
tood_head.py
244,173
5
57
deform_sampling
https://github.com/open-mmlab/mmdetection.git
[Fix] Avoid invalid bbox after deform_sampling (#7567) * Avoid invalid bbox after deform_sampling * replace in-place opertion with torch.where, update docstring * Update
79
0
70,270
8
1
8
def test_session_lifetime_must_not_be_exceeded_by_smaller_lifetimes(self): config_dict = default_config("test") # First test all the error conditions with self.assertRaises(ConfigError): HomeServerConfig().parse_config_dict( { "session_lifetime": "30m", "nonrefreshable_access_token_lifetime": "31m", **config_dict, }, "", "", ) with self.assertRaises(ConfigError): HomeServerConfig().parse_config_dict( { "session_lifetime": "30m", "refreshable_access_token_lifetime": "31m", **config_dict, }, "", "", ) with self.assertRaises(ConfigError): HomeServerConfig().parse_config_dict( { "session_lifetime": "30m", "refresh_token_lifetime": "31m", **config_dict, }, "", "", ) # Then test all the fine conditions HomeServerConfig().parse_config_dict( { "session_lifetime": "31m", "nonrefreshable_access_token_lifetime": "31m", **config_dict, }, "", "", ) HomeServerConfig().parse_config_dict( { "session_lifetime": "31m", "refreshable_access_token_lifetime": "31m", **config_dict, }, "", "", ) HomeServerConfig().parse_config_dict( {"session_lifetime": "31m", "refresh_token_lifetime": "31m", **config_dict}, "", "", )
tests/config/test_registration_config.py
353
synapse
{ "docstring": "\n session_lifetime should logically be larger than, or at least as large as,\n all the different token lifetimes.\n Test that the user is faced with configuration errors if they make it\n smaller, as that configuration doesn't make sense.\n ", "language": "en", "n_whitespaces": 73, "n_words": 37, "vocab_size": 32 }
89
Python
30
4586119f0b0901be64f08655d3aaaef289a51bde
test_registration_config.py
248,006
55
185
test_session_lifetime_must_not_be_exceeded_by_smaller_lifetimes
https://github.com/matrix-org/synapse.git
Add missing type hints to config classes. (#12402)
808
0
72,044
12
3
13
def getlist(self, key, default=__marker): try: vals = self._container[key.lower()] except KeyError: if default is self.__marker: return [] return default else: return vals[1:] # Backwards compatibility for httplib getheaders = getlist getallmatchingheaders = getlist iget = getlist # Backwards compatibility for http.cookiejar get_all = getlist
.venv/lib/python3.8/site-packages/pip/_vendor/urllib3/_collections.py
102
transferlearning
{ "docstring": "Returns a list of all the values for the named field. Returns an\n empty list if the key doesn't exist.", "language": "en", "n_whitespaces": 26, "n_words": 20, "vocab_size": 16 }
43
Python
29
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
_collections.py
63,909
9
49
getlist
https://github.com/jindongwang/transferlearning.git
upd; format
148
0
13,528
11
4
18
def _set_train_queue(self): current_pairlist = self.config.get("exchange", {}).get("pair_whitelist") if not self.dd.pair_dict: logger.info('Set fresh train queue from whitelist.') return deque(current_pairlist) best_queue = deque() pair_dict_sorted = sorted(self.dd.pair_dict.items(), key=lambda k: k[1]['trained_timestamp']) for pair in pair_dict_sorted: if pair[0] in current_pairlist: best_queue.appendleft(pair[0]) logger.info('Set existing queue from trained timestamps.') return best_queue # Following methods which are overridden by user made prediction models. # See freqai/prediction_models/CatboostPredictionModel.py for an example.
freqtrade/freqai/freqai_interface.py
180
freqtrade
{ "docstring": "\n Sets train queue from existing train timestamps if they exist\n otherwise it sets the train queue based on the provided whitelist.\n ", "language": "en", "n_whitespaces": 43, "n_words": 21, "vocab_size": 17 }
60
Python
49
eaa43337d2d7c13eeeb8c809d212e047f5935470
freqai_interface.py
151,215
13
105
_set_train_queue
https://github.com/freqtrade/freqtrade.git
improve train queue system, ensure crash resilience in train queue.
203
0
34,978
12
4
9
def duplicates_removed(it, already_seen=()): lst = [] seen = set() for i in it: if i in seen or i in already_seen: continue lst.append(i) seen.add(i) return lst
pipenv/patched/notpip/_vendor/pygments/util.py
82
pipenv
{ "docstring": "\n Returns a list with duplicates removed from the iterable `it`.\n\n Order is preserved.\n ", "language": "en", "n_whitespaces": 23, "n_words": 13, "vocab_size": 13 }
26
Python
19
f3166e673fe8d40277b804d35d77dcdb760fc3b3
util.py
20,522
14
49
duplicates_removed
https://github.com/pypa/pipenv.git
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
73
0
3,409
9
1
12
def test_success_urlencoded(self) -> None: url = "/_synapse/admin/v1/auth_providers/another-auth-provider/users/a%3Acomplex%40external%2Fid" channel = self.make_request( "GET", url, access_token=self.admin_user_tok, ) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual( {"user_id": self.other_user}, channel.json_body, )
tests/rest/admin/test_user.py
99
synapse
{ "docstring": "Tests a successful external ID lookup with an url-encoded ID", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 9 }
22
Python
20
74f60cec92c5aff87d6e74d177e95ec5f1a69f2b
test_user.py
249,499
13
61
test_success_urlencoded
https://github.com/matrix-org/synapse.git
Add an admin API endpoint to find a user based on its external ID in an auth provider. (#13810)
126
0
72,954
10
1
12
def get_base_context(self) -> MutableMapping[str, Any]: return { "data": self.activity.data, "author": self.activity.user, "title": self.title, "project": self.project, "project_link": self.get_project_link(), **super().get_base_context(), }
src/sentry/notifications/notifications/activity/base.py
102
sentry
{ "docstring": "The most basic context shared by every notification type.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
19
Python
19
25469e1e30a717637456d6ab470bdd28864be301
base.py
99,100
10
61
get_base_context
https://github.com/getsentry/sentry.git
ref(notifications): Remove `get_activity_name()` (#34061)
106
0
19,615
11
2
10
def tracing_client_interceptor(self) -> Optional['OpenTelemetryClientInterceptor']: if self.tracing: from opentelemetry.instrumentation.grpc import ( client_interceptor as grpc_client_interceptor, ) return grpc_client_interceptor(self.tracer_provider) else: return None
jina/serve/instrumentation/__init__.py
65
jina
{ "docstring": "\n :returns: a gRPC client interceptor with the global tracing provider.\n ", "language": "en", "n_whitespaces": 25, "n_words": 10, "vocab_size": 10 }
19
Python
18
107631e955b21db8a4ddb3bee02130de3650d032
__init__.py
13,242
11
40
tracing_client_interceptor
https://github.com/jina-ai/jina.git
feat(instrumentation): add OpenTelemetry tracing and metrics with basic configurations (#5175)
99
0
2,578
10
1
17
def test_readonly_manytomany_backwards_ref(self): topping = Topping.objects.create(name="Salami") pizza = Pizza.objects.create(name="Americano") pizza.toppings.add(topping) response = self.client.get(reverse("admin:admin_views_topping_add")) self.assertEqual(response.status_code, 200)
tests/admin_views/tests.py
106
django
{ "docstring": "\n Regression test for #16433 - backwards references for related objects\n broke if the related field is read-only due to the help_text attribute\n ", "language": "en", "n_whitespaces": 44, "n_words": 22, "vocab_size": 19 }
14
Python
12
9c19aff7c7561e3a82978a272ecdaad40dda5c00
tests.py
207,855
6
61
test_readonly_manytomany_backwards_ref
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
56
0
52,136
11
16
37
def transform(self) -> DataFrame | Series: obj = self.obj func = self.orig_f axis = self.axis args = self.args kwargs = self.kwargs is_series = obj.ndim == 1 if obj._get_axis_number(axis) == 1: assert not is_series return obj.T.transform(func, 0, *args, **kwargs).T if is_list_like(func) and not is_dict_like(func): func = cast(List[AggFuncTypeBase], func) # Convert func equivalent dict if is_series: func = {com.get_callable_name(v) or v: v for v in func} else: func = {col: func for col in obj} if is_dict_like(func): func = cast(AggFuncTypeDict, func) return self.transform_dict_like(func) # func is either str or callable func = cast(AggFuncTypeBase, func) try: result = self.transform_str_or_callable(func) except TypeError: raise except Exception as err: raise ValueError("Transform function failed") from err # Functions that transform may return empty Series/DataFrame # when the dtype is not appropriate if ( isinstance(result, (ABCSeries, ABCDataFrame)) and result.empty and not obj.empty ): raise ValueError("Transform function failed") # error: Argument 1 to "__get__" of "AxisProperty" has incompatible type # "Union[Series, DataFrame, GroupBy[Any], SeriesGroupBy, # DataFrameGroupBy, BaseWindow, Resampler]"; expected "Union[DataFrame, # Series]" if not isinstance(result, (ABCSeries, ABCDataFrame)) or not result.index.equals( obj.index # type:ignore[arg-type] ): raise ValueError("Function did not transform") return result
pandas/core/apply.py
400
pandas
{ "docstring": "\n Transform a DataFrame or Series.\n\n Returns\n -------\n DataFrame or Series\n Result of applying ``func`` along the given axis of the\n Series or DataFrame.\n\n Raises\n ------\n ValueError\n If the transform function fails or does not transform.\n ", "language": "en", "n_whitespaces": 125, "n_words": 35, "vocab_size": 27 }
182
Python
115
7440fe27eef0aab0f217ca9fb434f1e3ac74836e
apply.py
167,120
51
246
transform
https://github.com/pandas-dev/pandas.git
TYP: setter for index/columns property-like (AxisProperty) (#46565) Co-authored-by: Matthew Roeschke <[email protected]>
582
0
39,941
15
9
17
async def acquire(self): if (not self._locked and (self._waiters is None or all(w.cancelled() for w in self._waiters))): self._locked = True return True if self._waiters is None: self._waiters = collections.deque() fut = self._get_loop().create_future() self._waiters.append(fut) # Finally block should be called before the CancelledError # handling as we don't want CancelledError to call # _wake_up_first() and attempt to wake up itself. try: try: await fut finally: self._waiters.remove(fut) except exceptions.CancelledError: if not self._locked: self._wake_up_first() raise self._locked = True return True
python3.10.4/Lib/asyncio/locks.py
203
XX-Net
{ "docstring": "Acquire a lock.\n\n This method blocks until the lock is unlocked, then sets it to\n locked and returns True.\n ", "language": "en", "n_whitespaces": 40, "n_words": 19, "vocab_size": 19 }
76
Python
56
8198943edd73a363c266633e1aa5b2a9e9c9f526
locks.py
220,550
20
119
acquire
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
297
0
56,049
13
35
69
def canonicalize(g, dummies, msym, *v): from sympy.combinatorics.testutil import canonicalize_naive if not isinstance(msym, list): if msym not in (0, 1, None): raise ValueError('msym must be 0, 1 or None') num_types = 1 else: num_types = len(msym) if not all(msymx in (0, 1, None) for msymx in msym): raise ValueError('msym entries must be 0, 1 or None') if len(dummies) != num_types: raise ValueError( 'dummies and msym must have the same number of elements') size = g.size num_tensors = 0 v1 = [] for base_i, gens_i, n_i, sym_i in v: # check that the BSGS is minimal; # this property is used in double_coset_can_rep; # if it is not minimal use canonicalize_naive if not _is_minimal_bsgs(base_i, gens_i): mbsgs = get_minimal_bsgs(base_i, gens_i) if not mbsgs: can = canonicalize_naive(g, dummies, msym, *v) return can base_i, gens_i = mbsgs v1.append((base_i, gens_i, [[]] * n_i, sym_i)) num_tensors += n_i if num_types == 1 and not isinstance(msym, list): dummies = [dummies] msym = [msym] flat_dummies = [] for dumx in dummies: flat_dummies.extend(dumx) if flat_dummies and flat_dummies != list(range(flat_dummies[0], flat_dummies[-1] + 1)): raise ValueError('dummies is not valid') # slot symmetry of the tensor size1, sbase, sgens = gens_products(*v1) if size != size1: raise ValueError( 'g has size %d, generators have size %d' % (size, size1)) free = [i for i in range(size - 2) if i not in flat_dummies] num_free = len(free) # g1 minimal tensor under slot symmetry g1 = canonical_free(sbase, sgens, g, num_free) if not flat_dummies: return g1 # save the sign of g1 sign = 0 if g1[-1] == size - 1 else 1 # the free indices are kept fixed. # Determine free_i, the list of slots of tensors which are fixed # since they are occupied by free indices, which are fixed. start = 0 for i, (base_i, gens_i, n_i, sym_i) in enumerate(v): free_i = [] len_tens = gens_i[0].size - 2 # for each component tensor get a list od fixed islots for j in range(n_i): # get the elements corresponding to the component tensor h = g1[start:(start + len_tens)] fr = [] # get the positions of the fixed elements in h for k in free: if k in h: fr.append(h.index(k)) free_i.append(fr) start += len_tens v1[i] = (base_i, gens_i, free_i, sym_i) # BSGS of the tensor with fixed free indices # if tensor_gens fails in gens_product, use canonicalize_naive size, sbase, sgens = gens_products(*v1) # reduce the permutations getting rid of the free indices pos_free = [g1.index(x) for x in range(num_free)] size_red = size - num_free g1_red = [x - num_free for x in g1 if x in flat_dummies] if sign: g1_red.extend([size_red - 1, size_red - 2]) else: g1_red.extend([size_red - 2, size_red - 1]) map_slots = _get_map_slots(size, pos_free) sbase_red = [map_slots[i] for i in sbase if i not in pos_free] sgens_red = [_af_new([map_slots[i] for i in y._array_form if i not in pos_free]) for y in sgens] dummies_red = [[x - num_free for x in y] for y in dummies] transv_red = get_transversals(sbase_red, sgens_red) g1_red = _af_new(g1_red) g2 = double_coset_can_rep( dummies_red, msym, sbase_red, sgens_red, transv_red, g1_red) if g2 == 0: return 0 # lift to the case with the free indices g3 = _lift_sgens(size, pos_free, free, g2) return g3
sympy/combinatorics/tensor_can.py
980
sympy
{ "docstring": "\n canonicalize tensor formed by tensors\n\n Parameters\n ==========\n\n g : permutation representing the tensor\n\n dummies : list representing the dummy indices\n it can be a list of dummy indices of the same type\n or a list of lists of dummy indices, one list for each\n type of index;\n the dummy indices must come after the free indices,\n and put in order contravariant, covariant\n [d0, -d0, d1,-d1,...]\n\n msym : symmetry of the metric(s)\n it can be an integer or a list;\n in the first case it is the symmetry of the dummy index metric;\n in the second case it is the list of the symmetries of the\n index metric for each type\n\n v : list, (base_i, gens_i, n_i, sym_i) for tensors of type `i`\n\n base_i, gens_i : BSGS for tensors of this type.\n The BSGS should have minimal base under lexicographic ordering;\n if not, an attempt is made do get the minimal BSGS;\n in case of failure,\n canonicalize_naive is used, which is much slower.\n\n n_i : number of tensors of type `i`.\n\n sym_i : symmetry under exchange of component tensors of type `i`.\n\n Both for msym and sym_i the cases are\n * None no symmetry\n * 0 commuting\n * 1 anticommuting\n\n Returns\n =======\n\n 0 if the tensor is zero, else return the array form of\n the permutation representing the canonical form of the tensor.\n\n Algorithm\n =========\n\n First one uses canonical_free to get the minimum tensor under\n lexicographic order, using only the slot symmetries.\n If the component tensors have not minimal BSGS, it is attempted\n to find it; if the attempt fails canonicalize_naive\n is used instead.\n\n Compute the residual slot symmetry keeping fixed the free indices\n using tensor_gens(base, gens, list_free_indices, sym).\n\n Reduce the problem eliminating the free indices.\n\n Then use double_coset_can_rep and lift back the result reintroducing\n the free indices.\n\n Examples\n ========\n\n one type of index with commuting metric;\n\n `A_{a b}` and `B_{a b}` antisymmetric and commuting\n\n `T = A_{d0 d1} * B^{d0}{}_{d2} * B^{d2 d1}`\n\n `ord = [d0,-d0,d1,-d1,d2,-d2]` order of the indices\n\n g = [1, 3, 0, 5, 4, 2, 6, 7]\n\n `T_c = 0`\n\n >>> from sympy.combinatorics.tensor_can import get_symmetric_group_sgs, canonicalize, bsgs_direct_product\n >>> from sympy.combinatorics import Permutation\n >>> base2a, gens2a = get_symmetric_group_sgs(2, 1)\n >>> t0 = (base2a, gens2a, 1, 0)\n >>> t1 = (base2a, gens2a, 2, 0)\n >>> g = Permutation([1, 3, 0, 5, 4, 2, 6, 7])\n >>> canonicalize(g, range(6), 0, t0, t1)\n 0\n\n same as above, but with `B_{a b}` anticommuting\n\n `T_c = -A^{d0 d1} * B_{d0}{}^{d2} * B_{d1 d2}`\n\n can = [0,2,1,4,3,5,7,6]\n\n >>> t1 = (base2a, gens2a, 2, 1)\n >>> canonicalize(g, range(6), 0, t0, t1)\n [0, 2, 1, 4, 3, 5, 7, 6]\n\n two types of indices `[a,b,c,d,e,f]` and `[m,n]`, in this order,\n both with commuting metric\n\n `f^{a b c}` antisymmetric, commuting\n\n `A_{m a}` no symmetry, commuting\n\n `T = f^c{}_{d a} * f^f{}_{e b} * A_m{}^d * A^{m b} * A_n{}^a * A^{n e}`\n\n ord = [c,f,a,-a,b,-b,d,-d,e,-e,m,-m,n,-n]\n\n g = [0,7,3, 1,9,5, 11,6, 10,4, 13,2, 12,8, 14,15]\n\n The canonical tensor is\n `T_c = -f^{c a b} * f^{f d e} * A^m{}_a * A_{m d} * A^n{}_b * A_{n e}`\n\n can = [0,2,4, 1,6,8, 10,3, 11,7, 12,5, 13,9, 15,14]\n\n >>> base_f, gens_f = get_symmetric_group_sgs(3, 1)\n >>> base1, gens1 = get_symmetric_group_sgs(1)\n >>> base_A, gens_A = bsgs_direct_product(base1, gens1, base1, gens1)\n >>> t0 = (base_f, gens_f, 2, 0)\n >>> t1 = (base_A, gens_A, 4, 0)\n >>> dummies = [range(2, 10), range(10, 14)]\n >>> g = Permutation([0, 7, 3, 1, 9, 5, 11, 6, 10, 4, 13, 2, 12, 8, 14, 15])\n >>> canonicalize(g, dummies, [0, 0], t0, t1)\n [0, 2, 4, 1, 6, 8, 10, 3, 11, 7, 12, 5, 13, 9, 15, 14]\n ", "language": "en", "n_whitespaces": 946, "n_words": 601, "vocab_size": 295 }
523
Python
243
7d773eb18daaef3c54f34d1ac6cbc5b83a5bb16c
tensor_can.py
198,366
76
638
canonicalize
https://github.com/sympy/sympy.git
Cleanup loops and ranges
1,079
0
48,885
17
1
7
def test_set_presence_with_status_msg_none(self) -> None: user_id = "@test:server" status_msg = "I'm here!" # Mark user as online self._set_presencestate_with_status_msg( user_id, PresenceState.ONLINE, status_msg ) # Mark user as online and `status_msg = None` self._set_presencestate_with_status_msg(user_id, PresenceState.ONLINE, None)
tests/handlers/test_presence.py
66
synapse
{ "docstring": "Test that if a user set again the presence manually\n and status is `None`, that `status_msg` is `None`.\n ", "language": "en", "n_whitespaces": 32, "n_words": 18, "vocab_size": 16 }
33
Python
24
652d1669c5a103b1c20478770c4aaf18849c09a3
test_presence.py
250,369
10
38
test_set_presence_with_status_msg_none
https://github.com/matrix-org/synapse.git
Add missing type hints to tests.handlers. (#14680) And do not allow untyped defs in tests.handlers.
100
0
73,405
8