id
int64
20
338k
vocab_size
int64
2
671
ast_levels
int64
4
32
nloc
int64
1
451
n_ast_nodes
int64
12
5.6k
n_identifiers
int64
1
186
n_ast_errors
int64
0
10
n_words
int64
2
2.17k
n_whitespaces
int64
2
13.8k
fun_name
stringlengths
2
73
commit_message
stringlengths
51
15.3k
url
stringlengths
31
59
code
stringlengths
51
31k
ast_errors
stringlengths
0
1.46k
token_counts
int64
6
3.32k
file_name
stringlengths
5
56
language
stringclasses
1 value
path
stringlengths
7
134
commit_id
stringlengths
40
40
repo
stringlengths
3
28
complexity
int64
1
153
176,400
40
16
31
173
18
0
57
117
_add_edge_keys
Fix missing backticks (#5381) * Fix missing backticks * one more backticks
https://github.com/networkx/networkx.git
def _add_edge_keys(G, betweenness, weight=None): r _weight = _weight_function(G, weight) edge_bc = dict.fromkeys(G.edges, 0.0) for u, v in betweenness: d = G[u][v] wt = _weight(u, v, d) keys = [k for k in d if _weight(u, v, {k: d[k]}) == wt] bc = betweenness[(u, v)] / len(keys) for k in keys: edge_bc[(u, v, k)] = bc return edge_bc
122
betweenness.py
Python
networkx/algorithms/centrality/betweenness.py
0ce72858168a8ece6b55f695677f4be80f144aff
networkx
5
130,235
21
12
7
72
10
0
23
60
get_other_nodes
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
https://github.com/ray-project/ray.git
def get_other_nodes(cluster, exclude_head=False): return [ node for node in cluster.list_all_nodes() if node._raylet_socket_name != ray.worker._global_node._raylet_socket_name and (exclude_head is False or node.head is False) ]
46
test_utils.py
Python
python/ray/_private/test_utils.py
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
5
118,639
41
14
22
276
24
0
60
353
test_multiple_connections
Rename and refactor `Report` machinery (#4141) This refactor renames (almost) everything related to the outdated "report" concept with more precise concepts that we use throughout our code, primarily "script run", "session", and "app".
https://github.com/streamlit/streamlit.git
def test_multiple_connections(self): with patch( "streamlit.server.server.LocalSourcesWatcher" ), self._patch_app_session(): yield self.start_server_loop() self.assertFalse(self.server.browser_is_connected) # Open a websocket connection ws_client1 = yield self.ws_connect() self.assertTrue(self.server.browser_is_connected) # Open another ws_client2 = yield self.ws_connect() self.assertTrue(self.server.browser_is_connected) # Assert that our session_infos are sane session_infos = list(self.server._session_info_by_id.values()) self.assertEqual(2, len(session_infos)) self.assertNotEqual( session_infos[0].session.id, session_infos[1].session.id, ) # Close the first ws_client1.close() yield gen.sleep(0.1) self.assertTrue(self.server.browser_is_connected) # Close the second ws_client2.close() yield gen.sleep(0.1) self.assertFalse(self.server.browser_is_connected)
166
server_test.py
Python
lib/tests/streamlit/server_test.py
704eab3478cf69847825b23dabf15813a8ac9fa2
streamlit
1
43,677
6
6
3
28
3
0
6
20
leaves
Map and Partial DAG authoring interface for Dynamic Task Mapping (#19965) * Make DAGNode a proper Abstract Base Class * Prevent mapping an already mapped Task/TaskGroup Also prevent calls like .partial(...).partial(...). It is uncertain whether these kinds of repeated partial/map calls have utility, so let's disable them entirely for now to simplify implementation. We can always add them if they are proven useful. Co-authored-by: Tzu-ping Chung <[email protected]>
https://github.com/apache/airflow.git
def leaves(self) -> List["MappedOperator"]: return [self]
15
baseoperator.py
Python
airflow/models/baseoperator.py
e9226139c2727a4754d734f19ec625c4d23028b3
airflow
1
215,150
16
13
5
117
15
1
16
42
free_port
adding functional tests for use_etag parameter in file.managed state
https://github.com/saltstack/salt.git
def free_port(): with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s: s.bind(("", 0)) s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) return s.getsockname()[1] @pytest.fixture(autouse=True, scope="session")
@pytest.fixture(autouse=True, scope="session")
57
test_file.py
Python
tests/pytests/functional/states/test_file.py
e535e1cbc2a56154fc77efa26957e1c076125911
salt
1
271,440
4
7
2
22
3
0
4
18
shape
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def shape(self): return self._type_spec.shape
12
keras_tensor.py
Python
keras/engine/keras_tensor.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
1
69,313
73
20
41
446
21
0
150
110
get_conditions
fix: typo in sales_register's filter mode_of_payment (#32371) * fix: typo in sales_register's filter mode_of_payment
https://github.com/frappe/erpnext.git
def get_conditions(filters): conditions = "" accounting_dimensions = get_accounting_dimensions(as_list=False) or [] accounting_dimensions_list = [d.fieldname for d in accounting_dimensions] if filters.get("company"): conditions += " and company=%(company)s" if filters.get("customer") and "customer" not in accounting_dimensions_list: conditions += " and customer = %(customer)s" if filters.get("from_date"): conditions += " and posting_date >= %(from_date)s" if filters.get("to_date"): conditions += " and posting_date <= %(to_date)s" if filters.get("owner"): conditions += " and owner = %(owner)s" def get_sales_invoice_item_field_condition(field, table="Sales Invoice Item") -> str: if not filters.get(field) or field in accounting_dimensions_list: return "" return f conditions += get_sales_invoice_item_field_condition("mode_of_payment", "Sales Invoice Payment") conditions += get_sales_invoice_item_field_condition("cost_center") conditions += get_sales_invoice_item_field_condition("warehouse") conditions += get_sales_invoice_item_field_condition("brand") conditions += get_sales_invoice_item_field_condition("item_group") if accounting_dimensions: common_condition = for dimension in accounting_dimensions: if filters.get(dimension.fieldname): if frappe.get_cached_value("DocType", dimension.document_type, "is_tree"): filters[dimension.fieldname] = get_dimension_with_children( dimension.document_type, filters.get(dimension.fieldname) ) conditions += ( common_condition + "and ifnull(`tabSales Invoice Item`.{0}, '') in %({0})s)".format(dimension.fieldname) ) else: conditions += ( common_condition + "and ifnull(`tabSales Invoice Item`.{0}, '') in %({0})s)".format(dimension.fieldname) ) return conditions
213
sales_register.py
Python
erpnext/accounts/report/sales_register/sales_register.py
62c5b286906a594e5ea58e3412e3d5fb4eb5add7
erpnext
13
259,467
38
12
5
139
23
1
44
98
_assert_predictor_equal
MNT Update to black 22.3.0 to resolve click error (#22983) * MNT Update to black 22.3.0 to resolve click error * STY Update for new black version
https://github.com/scikit-learn/scikit-learn.git
def _assert_predictor_equal(gb_1, gb_2, X): # Check identical nodes for each tree for pred_ith_1, pred_ith_2 in zip(gb_1._predictors, gb_2._predictors): for predictor_1, predictor_2 in zip(pred_ith_1, pred_ith_2): assert_array_equal(predictor_1.nodes, predictor_2.nodes) # Check identical predictions assert_allclose(gb_1.predict(X), gb_2.predict(X)) @pytest.mark.parametrize( "GradientBoosting, X, y", [ (HistGradientBoostingClassifier, X_classification, y_classification), (HistGradientBoostingRegressor, X_regression, y_regression), ], )
@pytest.mark.parametrize( "GradientBoosting, X, y", [ (HistGradientBoostingClassifier, X_classification, y_classification), (HistGradientBoostingRegressor, X_regression, y_regression), ], )
64
test_warm_start.py
Python
sklearn/ensemble/_hist_gradient_boosting/tests/test_warm_start.py
d4aad64b1eb2e42e76f49db2ccfbe4b4660d092b
scikit-learn
3
267,881
43
14
24
176
19
0
51
260
_setup_dynamic
ansible-test - Use more native type hints. (#78435) * ansible-test - Use more native type hints. Simple search and replace to switch from comments to native type hints for return types of functions with no arguments. * ansible-test - Use more native type hints. Conversion of simple single-line function annotation type comments to native type hints. * ansible-test - Use more native type hints. Conversion of single-line function annotation type comments with default values to native type hints. * ansible-test - Use more native type hints. Manual conversion of type annotation comments for functions which have pylint directives.
https://github.com/ansible/ansible.git
def _setup_dynamic(self) -> None: port = 8443 ports = [ port, ] cmd = ['start', 'master', '--listen', 'https://0.0.0.0:%d' % port] descriptor = run_support_container( self.args, self.platform, self.image, self.DOCKER_CONTAINER_NAME, ports, allow_existing=True, cleanup=CleanupMode.YES, cmd=cmd, ) if not descriptor: return if self.args.explain: config = '# Unknown' else: config = self._get_config(self.DOCKER_CONTAINER_NAME, 'https://%s:%s/' % (self.DOCKER_CONTAINER_NAME, port)) self._write_config(config)
110
openshift.py
Python
test/lib/ansible_test/_internal/commands/integration/cloud/openshift.py
3eb0485dd92c88cc92152d3656d94492db44b183
ansible
3
287,676
54
10
44
287
18
0
106
305
test_switching_adapters_based_on_zero_rssi
Handle default RSSI values from bleak in bluetooth (#78908)
https://github.com/home-assistant/core.git
async def test_switching_adapters_based_on_zero_rssi(hass, enable_bluetooth): address = "44:44:33:11:23:45" switchbot_device_no_rssi = BLEDevice(address, "wohand_poor_signal", rssi=0) switchbot_adv_no_rssi = AdvertisementData( local_name="wohand_no_rssi", service_uuids=[] ) inject_advertisement_with_source( hass, switchbot_device_no_rssi, switchbot_adv_no_rssi, "hci0" ) assert ( bluetooth.async_ble_device_from_address(hass, address) is switchbot_device_no_rssi ) switchbot_device_good_signal = BLEDevice(address, "wohand_good_signal", rssi=-60) switchbot_adv_good_signal = AdvertisementData( local_name="wohand_good_signal", service_uuids=[] ) inject_advertisement_with_source( hass, switchbot_device_good_signal, switchbot_adv_good_signal, "hci1" ) assert ( bluetooth.async_ble_device_from_address(hass, address) is switchbot_device_good_signal ) inject_advertisement_with_source( hass, switchbot_device_good_signal, switchbot_adv_no_rssi, "hci0" ) assert ( bluetooth.async_ble_device_from_address(hass, address) is switchbot_device_good_signal ) # We should not switch adapters unless the signal hits the threshold switchbot_device_similar_signal = BLEDevice( address, "wohand_similar_signal", rssi=-62 ) switchbot_adv_similar_signal = AdvertisementData( local_name="wohand_similar_signal", service_uuids=[] ) inject_advertisement_with_source( hass, switchbot_device_similar_signal, switchbot_adv_similar_signal, "hci0" ) assert ( bluetooth.async_ble_device_from_address(hass, address) is switchbot_device_good_signal )
180
test_manager.py
Python
tests/components/bluetooth/test_manager.py
5c294550e8c96d636ff22f4206c23de05b13bdb2
core
1
294,075
13
10
7
73
9
0
17
67
entity_picture
Add update platform to the Supervisor integration (#68475)
https://github.com/home-assistant/core.git
def entity_picture(self) -> str | None: if not self.available: return None if self.coordinator.data[DATA_KEY_ADDONS][self._addon_slug][ATTR_ICON]: return f"/api/hassio/addons/{self._addon_slug}/icon" return None
41
update.py
Python
homeassistant/components/hassio/update.py
d17f8e9ed6cd8b4e3e44e404b639fd58d595a3ac
core
3
337,974
15
13
8
94
14
0
17
44
test_load_states_by_epoch
Speed up main CI (#571) * Speed up ci by reducing training epochs
https://github.com/huggingface/accelerate.git
def test_load_states_by_epoch(self): testargs = f.split() output = run_command(self._launch_args + testargs, return_stdout=True) self.assertNotIn("epoch 0:", output) self.assertIn("epoch 1:", output)
43
test_examples.py
Python
tests/test_examples.py
7a49418e51a460fbd5229e065041d1ff0749e3c8
accelerate
1
215,142
23
12
10
119
10
0
32
94
wait_until
Fix the check in the hetzner cloud show_instance function to be an action.
https://github.com/saltstack/salt.git
def wait_until(name, state, timeout=300): start_time = time.time() node = show_instance(name, call="action") while True: if node["state"] == state: return True time.sleep(1) if time.time() - start_time > timeout: return False node = show_instance(name, call="action")
71
hetzner.py
Python
salt/cloud/clouds/hetzner.py
4b878dfc1c12034ac1deacaa9ebb1401971ce38c
salt
4
291,738
15
11
10
83
9
0
19
73
test_track_task_functions
Upgrade pytest-aiohttp (#82475) * Upgrade pytest-aiohttp * Make sure executors, tasks and timers are closed Some test will trigger warnings on garbage collect, these warnings spills over into next test. Some test trigger tasks that raise errors on shutdown, these spill over into next test. This is to mimic older pytest-aiohttp and it's behaviour on test cleanup. Discussions on similar changes for pytest-aiohttp are here: https://github.com/pytest-dev/pytest-asyncio/pull/309 * Replace loop with event_loop * Make sure time is frozen for tests * Make sure the ConditionType is not async /home-assistant/homeassistant/helpers/template.py:2082: RuntimeWarning: coroutine 'AsyncMockMixin._execute_mock_call' was never awaited def wrapper(*args, **kwargs): Enable tracemalloc to get traceback where the object was allocated. See https://docs.pytest.org/en/stable/how-to/capture-warnings.html#resource-warnings for more info. * Increase litejet press tests with a factor 10 The times are simulated anyway, and we can't stop the normal event from occuring. * Use async handlers for aiohttp tests/components/motioneye/test_camera.py::test_get_still_image_from_camera tests/components/motioneye/test_camera.py::test_get_still_image_from_camera tests/components/motioneye/test_camera.py::test_get_stream_from_camera tests/components/motioneye/test_camera.py::test_get_stream_from_camera tests/components/motioneye/test_camera.py::test_camera_option_stream_url_template tests/components/motioneye/test_camera.py::test_camera_option_stream_url_template /Users/joakim/src/hass/home-assistant/venv/lib/python3.9/site-packages/aiohttp/web_urldispatcher.py:189: DeprecationWarning: Bare functions are deprecated, use async ones warnings.warn( * Switch to freezegun in modbus tests The tests allowed clock to tick in between steps * Make sure skybell object are fully mocked Old tests would trigger attempts to post to could services: ``` DEBUG:aioskybell:HTTP post https://cloud.myskybell.com/api/v3/login/ Request with headers: {'content-type': 'application/json', 'accept': '*/*', 'x-skybell-app-id': 'd2b542c7-a7e4-4e1e-b77d-2b76911c7c46', 'x-skybell-client-id': '1f36a3c0-6dee-4997-a6db-4e1c67338e57'} ``` * Fix sorting that broke after rebase
https://github.com/home-assistant/core.git
async def test_track_task_functions(event_loop): hass = ha.HomeAssistant() try: assert hass._track_task hass.async_stop_track_tasks() assert not hass._track_task hass.async_track_tasks() assert hass._track_task finally: await hass.async_stop()
46
test_core.py
Python
tests/test_core.py
c576a68d336bc91fd82c299d9b3e5dfdc1c14960
core
2
296,853
31
10
5
75
9
0
31
85
handle_template_exception
Refactor history_stats to minimize database access (part 2) (#70255)
https://github.com/home-assistant/core.git
def handle_template_exception(ex, field): if ex.args and ex.args[0].startswith("UndefinedError: 'None' has no attribute"): # Common during HA startup - so just a warning _LOGGER.warning(ex) return _LOGGER.error("Error parsing template for field %s", field, exc_info=ex)
44
helpers.py
Python
homeassistant/components/history_stats/helpers.py
73a368c24246b081cdb98923ca3180937d436c3b
core
3
169,036
4
6
8
16
3
0
4
11
_get_column_format_based_on_dtypes
TYP: Autotyping (#48191) * annotate-magics * annotate-imprecise-magics * none-return * scalar-return * pyi files * ignore vendored file * manual changes * ignore pyright in pickle_compat (these errors would be legit if the current __new__ methods were called but I think these pickle tests call older __new__ methods which allowed providing multiple positional arguments) * run autotyping in pre-commit * remove final and expand safe (and add annotate-imprecise-magics)
https://github.com/pandas-dev/pandas.git
def _get_column_format_based_on_dtypes(self) -> str:
31
latex.py
Python
pandas/io/formats/latex.py
54347fe684e0f7844bf407b1fb958a5269646825
pandas
1
246,115
20
12
268
72
7
0
25
123
generate_config_section
Add a config flag to inhibit `M_USER_IN_USE` during registration (#11743) This is mostly motivated by the tchap use case, where usernames are automatically generated from the user's email address (in a way that allows figuring out the email address from the username). Therefore, it's an issue if we respond to requests on /register and /register/available with M_USER_IN_USE, because it can potentially leak email addresses (which include the user's real name and place of work). This commit adds a flag to inhibit the M_USER_IN_USE errors that are raised both by /register/available, and when providing a username early into the registration process. This error will still be raised if the user completes the registration process but the username conflicts. This is particularly useful when using modules (https://github.com/matrix-org/synapse/pull/11790 adds a module callback to set the username of users at registration) or SSO, since they can ensure the username is unique. More context is available in the PR that introduced this behaviour to synapse-dinsic: matrix-org/synapse-dinsic#48 - as well as the issue in the matrix-dinsic repo: matrix-org/matrix-dinsic#476
https://github.com/matrix-org/synapse.git
def generate_config_section(self, generate_secrets=False, **kwargs): if generate_secrets: registration_shared_secret = 'registration_shared_secret: "%s"' % ( random_string_with_symbols(50), ) else: registration_shared_secret = "#registration_shared_secret: <PRIVATE STRING>" return ( % locals() )
39
registration.py
Python
synapse/config/registration.py
95b3f952fa43e51feae166fa1678761c5e32d900
synapse
2
258,534
29
13
13
152
25
0
36
83
test_graph_feature_names_out
ENH Adds get_feature_names_out to neighbors module (#22212) Co-authored-by: Olivier Grisel <[email protected]>
https://github.com/scikit-learn/scikit-learn.git
def test_graph_feature_names_out(Klass): n_samples_fit = 20 n_features = 10 rng = np.random.RandomState(42) X = rng.randn(n_samples_fit, n_features) est = Klass().fit(X) names_out = est.get_feature_names_out() class_name_lower = Klass.__name__.lower() expected_names_out = np.array( [f"{class_name_lower}{i}" for i in range(est.n_samples_fit_)], dtype=object, ) assert_array_equal(names_out, expected_names_out)
89
test_graph.py
Python
sklearn/neighbors/tests/test_graph.py
330881a21ca48c543cc8a67aa0d4e4c1dc1001ab
scikit-learn
2
107,233
4
8
2
27
3
0
4
18
verts
Jointly track x and y in PolygonSelector. It's easier to track them in a single list. Also init _selection_artist and _polygon_handles with empty arrays, as there's no reason to pretend that they start with 0, 0. On the other hand, _xys does need to start as a non-empty array as the last point gets updated as being the cursor position.
https://github.com/matplotlib/matplotlib.git
def verts(self): return self._xys[:-1]
15
widgets.py
Python
lib/matplotlib/widgets.py
7f4eb87ef290ef9911d2febb7e8c60fcd5c3266e
matplotlib
1
130,249
25
12
7
109
15
0
27
84
match_entries
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
https://github.com/ray-project/ray.git
def match_entries(self, entries, separators=None): if not util._is_iterable(entries): raise TypeError("entries:{!r} is not an iterable.".format(entries)) entry_map = util._normalize_entries(entries, separators=separators) match_paths = util.match_files(self.patterns, iterkeys(entry_map)) for path in match_paths: yield entry_map[path]
68
pathspec.py
Python
python/ray/_private/thirdparty/pathspec/pathspec.py
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
3
209,841
38
8
21
111
15
0
58
152
get_service_status
[Hinty] Core typing: windows (#3684) * Core typing: windows Co-authored-by: Pierre <[email protected]>
https://github.com/secdev/scapy.git
def get_service_status(service): # type: (str) -> Dict[str, int] SERVICE_QUERY_STATUS = 0x0004 schSCManager = OpenSCManagerW( None, # Local machine None, # SERVICES_ACTIVE_DATABASE SERVICE_QUERY_STATUS ) service = OpenServiceW( schSCManager, service, SERVICE_QUERY_STATUS ) status = SERVICE_STATUS() QueryServiceStatus( service, status ) result = _struct_to_dict(status) CloseServiceHandle(service) CloseServiceHandle(schSCManager) return result ############################## ###### Define IPHLPAPI ###### ############################## iphlpapi = ctypes.windll.iphlpapi ############################## ########### Common ########### ##############################
56
structures.py
Python
scapy/arch/windows/structures.py
a2b7a28faff1db058dd22ce097a268e0ad5d1d33
scapy
1
153,096
13
8
4
38
5
0
13
27
inplace_applyier_builder
FIX-#3197: do not pass lambdas to the backend in GroupBy (#3373) Signed-off-by: Dmitry Chigarev <[email protected]>
https://github.com/modin-project/modin.git
def inplace_applyier_builder(cls, key, func=None): inplace_args = [] if func is None else [func]
28
groupby.py
Python
modin/core/dataframe/algebra/default2pandas/groupby.py
1e65a4afd191cf61ba05b80545d23f9b88962f41
modin
2
179,428
46
15
25
245
24
0
67
322
preprocess
Svelte migration (WIP) (#448) * first migration commit * style comment * first mvp working with calculator * ali components * carousel * more changes * changes * add examples * examples support * more changes * interpretation * interpretation * submission state * first migration commit * style comment * first mvp working with calculator * ali components * carousel * more changes * changes * add examples * examples support * more changes * interpretation * interpretation * submission state * base image cropper * add image editor * css tweaks * remove dead code * finalise sketch tools * add webcam snapshot source * tweak config * tweak config * tweak config * tweaks * reset egg files * lockfile v2 * image tweaks * record audio from mic * add audio input components * audio tweaks * editable table * more table tweaks * sort columns * add row/col to table * add output table * fix broken paths * fix svelte build destination * fix svelte build destination again * fix gitignore * fix css * add themes * add all themes * snake core classnames * actually fix themes this time * merge changes Co-authored-by: Ali Abid <[email protected]> Co-authored-by: Ali Abid <[email protected]> Co-authored-by: pngwn <[email protected]>
https://github.com/gradio-app/gradio.git
def preprocess(self, x): if x is None: return x file_name, file_data, is_example = ( x["name"], x["data"], x.get("is_example", False), ) if is_example: file = processing_utils.create_tmp_copy_of_file(file_name) else: file = processing_utils.decode_base64_to_file( file_data, file_path=file_name ) file_name = file.name uploaded_format = file_name.split(".")[-1].lower() if self.type is not None and uploaded_format != self.type: output_file_name = file_name[0: file_name.rindex( ".") + 1] + self.type ff = FFmpeg(inputs={file_name: None}, outputs={output_file_name: None}) ff.run() return output_file_name else: return file_name
152
inputs.py
Python
gradio/inputs.py
d6b1247e2198acf7b30f9e90a4c4c3b94bc72107
gradio
5
215,950
42
15
17
198
20
0
52
211
_get_disk_size
Update to latest ``pyupgrade`` hook. Stop skipping it on CI. Signed-off-by: Pedro Algarvio <[email protected]>
https://github.com/saltstack/salt.git
def _get_disk_size(self, device): out = __salt__["cmd.run_all"]("df {}".format(device)) if out["retcode"]: msg = "Disk size info error: {}".format(out["stderr"]) log.error(msg) raise SIException(msg) devpath, blocks, used, available, used_p, mountpoint = ( elm for elm in out["stdout"].split(os.linesep)[-1].split(" ") if elm ) return { "device": devpath, "blocks": blocks, "used": used, "available": available, "used (%)": used_p, "mounted": mountpoint, }
117
query.py
Python
salt/modules/inspectlib/query.py
f2a783643de61cac1ff3288b40241e5ce6e1ddc8
salt
4
87,074
24
13
12
116
10
0
26
158
test_sessions_metrics_with_metrics_only_field
fix(sessions): Handle edge case in case of wrong duplexer dispatch to `SessionsReleaseHealthBackend` [TET-481] (#40243)
https://github.com/getsentry/sentry.git
def test_sessions_metrics_with_metrics_only_field(self): response = self.do_request( { "organization_slug": [self.organization1], "project": [self.project1.id], "field": ["crash_free_rate(session)"], "groupBy": [], "interval": "1d", } ) assert len(response.data["groups"]) == 0 assert response.status_code == 200
66
test_metrics_sessions_v2.py
Python
tests/sentry/release_health/test_metrics_sessions_v2.py
89d7aaa5a23f4d4ff962ad12c3be23651ace5c29
sentry
1
208,027
61
20
20
185
18
0
84
432
find_module
Minor refactors, found by static analysis (#7587) * Remove deprecated methods in `celery.local.Proxy` * Collapse conditionals for readability * Remove unused parameter `uuid` * Remove unused import `ClusterOptions` * Remove dangerous mutable default argument Continues work from #5478 * Remove always `None` and unused global variable * Remove unreachable `elif` block * Consolidate import statements * Add missing parameter to `os._exit()` * Add missing assert statement * Remove unused global `WindowsError` * Use `mkstemp` instead of deprecated `mktemp` * No need for `for..else` constructs in loops that don't break In these cases where the loop returns or raises instead of breaking, it is simpler to just put the code that runs after the loop completes right after the loop instead. * Use the previously unused parameter `compat_modules` Previously this parameter was always overwritten by the value of `COMPAT_MODULES.get(name, ())`, which was very likely unintentional. * Remove unused local variable `tz` * Make `assert_received` actually check for `is_received` Previously, it called `is_accepted`, which was likely a copy-paste mistake from the `assert_accepted` method. * Use previously unused `args` and `kwargs` params Unlike other backends' `__reduce__` methods, the one from `RedisBackend` simply overwrites `args` and `kwargs` instead of adding to them. This change makes it more in line with other backends. * Update celery/backends/filesystem.py Co-authored-by: Gabriel Soldani <[email protected]> Co-authored-by: Asif Saif Uddin <[email protected]>
https://github.com/celery/celery.git
def find_module(module, path=None, imp=None): if imp is None: imp = import_module with cwd_in_path(): try: return imp(module) except ImportError: # Raise a more specific error if the problem is that one of the # dot-separated segments of the module name is not a package. if '.' in module: parts = module.split('.') for i, part in enumerate(parts[:-1]): package = '.'.join(parts[:i + 1]) try: mpart = imp(package) except ImportError: # Break out and re-raise the original ImportError # instead. break try: mpart.__path__ except AttributeError: raise NotAPackage(package) raise
105
imports.py
Python
celery/utils/imports.py
59263b0409e3f02dc16ca8a3bd1e42b5a3eba36d
celery
7
58,910
18
12
14
126
14
0
21
81
_generate_code_example
Adds default code example for blocks (#6755) * Adds method to generate a default code example for block subclasses * Adds test for code example to block standard test suite * Updates test case where no example is configured * Addresses review comments
https://github.com/PrefectHQ/prefect.git
def _generate_code_example(cls) -> str: qualified_name = to_qualified_name(cls) module_str = ".".join(qualified_name.split(".")[:-1]) class_name = cls.__name__ block_variable_name = f'{cls.get_block_type_slug().replace("-", "_")}_block' return dedent( f )
47
core.py
Python
src/prefect/blocks/core.py
d68e5c0d8f0e29810b9b75ed554a4c549fa18f2c
prefect
1
188,928
53
14
54
334
33
0
87
251
generate_css
Automated upgrade of code to python 3.7+ Done by https://github.com/asottile/pyupgrade Consists mainly of moving string formatting to f-strings and removing encoding declarations
https://github.com/kovidgoyal/calibre.git
def generate_css(self, dest_dir, docx, notes_nopb, nosupsub): ef = self.fonts.embed_fonts(dest_dir, docx) s = if not notes_nopb: s += s = s + if nosupsub: s = s + body_color = '' if self.body_color.lower() not in ('currentcolor', 'inherit'): body_color = f'color: {self.body_color};' prefix = textwrap.dedent(s) % (self.body_font_family, self.body_font_size, body_color) if ef: prefix = ef + '\n' + prefix ans = [] for (cls, css) in sorted(itervalues(self.classes), key=lambda x:x[0]): b = (f'\t{k}: {v};' for k, v in iteritems(css)) b = '\n'.join(b) ans.append('.{} {{\n{}\n}}\n'.format(cls, b.rstrip(';'))) return prefix + '\n' + '\n'.join(ans)
184
styles.py
Python
src/calibre/ebooks/docx/styles.py
eb78a761a99ac20a6364f85e12059fec6517d890
calibre
7
133,418
52
12
9
90
8
0
64
195
get_serialization_context
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
https://github.com/ray-project/ray.git
def get_serialization_context(self, job_id=None): # This function needs to be protected by a lock, because it will be # called by`register_class_for_serialization`, as well as the import # thread, from different threads. Also, this function will recursively # call itself, so we use RLock here. if job_id is None: job_id = self.current_job_id with self.lock: if job_id not in self.serialization_context_map: self.serialization_context_map[ job_id ] = serialization.SerializationContext(self) return self.serialization_context_map[job_id]
53
worker.py
Python
python/ray/worker.py
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
3
294,222
99
13
65
648
47
0
155
493
test_sync_request
Exclude hidden entities from google_assistant (#68554)
https://github.com/home-assistant/core.git
async def test_sync_request(hass_fixture, assistant_client, auth_header): entity_registry = mock_registry(hass_fixture) entity_entry1 = entity_registry.async_get_or_create( "switch", "test", "switch_config_id", suggested_object_id="config_switch", entity_category="config", ) entity_entry2 = entity_registry.async_get_or_create( "switch", "test", "switch_diagnostic_id", suggested_object_id="diagnostic_switch", entity_category="diagnostic", ) entity_entry3 = entity_registry.async_get_or_create( "switch", "test", "switch_system_id", suggested_object_id="system_switch", entity_category="system", ) entity_entry4 = entity_registry.async_get_or_create( "switch", "test", "switch_hidden_integration_id", suggested_object_id="hidden_integration_switch", hidden_by=er.RegistryEntryHider.INTEGRATION, ) entity_entry5 = entity_registry.async_get_or_create( "switch", "test", "switch_hidden_user_id", suggested_object_id="hidden_user_switch", hidden_by=er.RegistryEntryHider.USER, ) # These should not show up in the sync request hass_fixture.states.async_set(entity_entry1.entity_id, "on") hass_fixture.states.async_set(entity_entry2.entity_id, "something_else") hass_fixture.states.async_set(entity_entry3.entity_id, "blah") hass_fixture.states.async_set(entity_entry4.entity_id, "foo") hass_fixture.states.async_set(entity_entry5.entity_id, "bar") reqid = "5711642932632160983" data = {"requestId": reqid, "inputs": [{"intent": "action.devices.SYNC"}]} result = await assistant_client.post( ga.const.GOOGLE_ASSISTANT_API_ENDPOINT, data=json.dumps(data), headers=auth_header, ) assert result.status == HTTPStatus.OK body = await result.json() assert body.get("requestId") == reqid devices = body["payload"]["devices"] assert sorted(dev["id"] for dev in devices) == sorted( dev["id"] for dev in DEMO_DEVICES ) for dev in devices: assert dev["id"] not in CLOUD_NEVER_EXPOSED_ENTITIES for dev, demo in zip( sorted(devices, key=lambda d: d["id"]), sorted(DEMO_DEVICES, key=lambda d: d["id"]), ): assert dev["name"] == demo["name"] assert set(dev["traits"]) == set(demo["traits"]) assert dev["type"] == demo["type"]
382
test_google_assistant.py
Python
tests/components/google_assistant/test_google_assistant.py
dc0c3a4d2dde52c4bb485e8b9758d517e1141703
core
5
68,771
82
26
29
500
40
0
110
80
get_valuation_rate
chore: `get_valuation_rate` sider fixes - Use qb instead of db.sql - Don't use `args` as argument for function - Cleaner variable names
https://github.com/frappe/erpnext.git
def get_valuation_rate(data): from frappe.query_builder.functions import Sum item_code, company = data.get("item_code"), data.get("company") valuation_rate = 0.0 bin_table = frappe.qb.DocType("Bin") wh_table = frappe.qb.DocType("Warehouse") item_valuation = ( frappe.qb.from_(bin_table) .join(wh_table) .on(bin_table.warehouse == wh_table.name) .select((Sum(bin_table.stock_value) / Sum(bin_table.actual_qty)).as_("valuation_rate")) .where((bin_table.item_code == item_code) & (wh_table.company == company)) ).run(as_dict=True)[0] valuation_rate = item_valuation.get("valuation_rate") if (valuation_rate is not None) and valuation_rate <= 0: # Explicit null value check. If None, Bins don't exist, neither does SLE sle = frappe.qb.DocType("Stock Ledger Entry") last_val_rate = ( frappe.qb.from_(sle) .select(sle.valuation_rate) .where((sle.item_code == item_code) & (sle.valuation_rate > 0) & (sle.is_cancelled == 0)) .orderby(sle.posting_date, order=frappe.qb.desc) .orderby(sle.posting_time, order=frappe.qb.desc) .orderby(sle.creation, order=frappe.qb.desc) .limit(1) ).run(as_dict=True) valuation_rate = flt(last_val_rate[0].get("valuation_rate")) if last_val_rate else 0 if not valuation_rate: valuation_rate = frappe.db.get_value("Item", item_code, "valuation_rate") return flt(valuation_rate)
311
bom.py
Python
erpnext/manufacturing/doctype/bom/bom.py
7e41d84a116f2acd03984c98ec4eaa8e50ddc1d3
erpnext
5
246,344
34
7
3
39
4
0
40
96
test_push_unread_count_message_count
Prevent duplicate push notifications for room reads (#11835)
https://github.com/matrix-org/synapse.git
def test_push_unread_count_message_count(self): # Carry out common push count tests and setup self._test_push_unread_count() # Carry out our option-value specific test # # We're counting every unread message, so there should now be 3 since the # last read receipt self._check_push_attempt(6, 3)
19
test_http.py
Python
tests/push/test_http.py
40771773909cb03d9296e3f0505e4e32372f10aa
synapse
1
5,889
37
11
19
173
13
0
50
151
run_experiment_with_visualization
Use tempfile to automatically garbage collect data and modeling artifacts in ludwig integration tests. (#1642) * Use tmpdir to automatically garbage collect data and modeling artifacts in ludwig integration tests.
https://github.com/ludwig-ai/ludwig.git
def run_experiment_with_visualization(input_features, output_features, dataset): output_directory = os.path.dirname(dataset) config = { "input_features": input_features, "output_features": output_features, "combiner": {"type": "concat", "fc_size": 14}, "training": {"epochs": 2}, } args = { "config": config, "skip_save_processed_input": False, "skip_save_progress": False, "skip_save_unprocessed_output": False, "skip_save_eval_stats": False, "dataset": dataset, "output_directory": output_directory, } _, _, _, _, experiment_dir = experiment_cli(**args) return experiment_dir
101
test_visualization.py
Python
tests/integration_tests/test_visualization.py
4fb8f63181f5153b4f6778c6ef8dad61022c4f3f
ludwig
1
292,953
30
12
18
194
21
0
42
136
_mock_powerwall_with_fixtures
Add sensor to expose Powerwall backup reserve percentage (#66393)
https://github.com/home-assistant/core.git
async def _mock_powerwall_with_fixtures(hass): meters = await _async_load_json_fixture(hass, "meters.json") sitemaster = await _async_load_json_fixture(hass, "sitemaster.json") site_info = await _async_load_json_fixture(hass, "site_info.json") status = await _async_load_json_fixture(hass, "status.json") device_type = await _async_load_json_fixture(hass, "device_type.json") return _mock_powerwall_return_value( site_info=SiteInfo(site_info), charge=47.34587394586, sitemaster=SiteMaster(sitemaster), meters=MetersAggregates(meters), grid_services_active=True, grid_status=GridStatus.CONNECTED, status=PowerwallStatus(status), device_type=DeviceType(device_type["device_type"]), serial_numbers=["TG0123456789AB", "TG9876543210BA"], backup_reserve_percentage=15.0, )
123
mocks.py
Python
tests/components/powerwall/mocks.py
d077c3b8d106e7e102a5a58a8a07ed381ff06567
core
1
1,078
31
14
16
137
16
1
38
192
to_local_object_without_private_data_child
Renamed entities -> data subject, NDEPT -> phi tensor
https://github.com/OpenMined/PySyft.git
def to_local_object_without_private_data_child(self) -> PhiTensor: # relative from ..tensor import Tensor public_shape = getattr(self, "public_shape", None) public_dtype = getattr(self, "public_dtype", None) return Tensor( child=PhiTensor( child=FixedPrecisionTensor(value=None), data_subjects=self.data_subjects, min_vals=self.min_vals, # type: ignore max_vals=self.max_vals, # type: ignore ), public_shape=public_shape, public_dtype=public_dtype, ) @serializable(capnp_bytes=True)
@serializable(capnp_bytes=True)
79
phi_tensor.py
Python
packages/syft/src/syft/core/tensor/autodp/phi_tensor.py
44fa2242416c7131fef4f00db19c5ca36af031dc
PySyft
1
130,326
96
18
65
577
41
0
189
1,269
terminate_node
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
https://github.com/ray-project/ray.git
def terminate_node(self, node_id): resource_group = self.provider_config["resource_group"] try: # get metadata for node metadata = self._get_node(node_id) except KeyError: # node no longer exists return if self.cache_stopped_nodes: try: # stop machine and leave all resources logger.info( f"Stopping instance {node_id}" "(to fully terminate instead, " "set `cache_stopped_nodes: False` " "under `provider` in the cluster configuration)" ) stop = get_azure_sdk_function( client=self.compute_client.virtual_machines, function_name="deallocate", ) stop(resource_group_name=resource_group, vm_name=node_id) except Exception as e: logger.warning("Failed to stop VM: {}".format(e)) else: vm = self.compute_client.virtual_machines.get( resource_group_name=resource_group, vm_name=node_id ) disks = {d.name for d in vm.storage_profile.data_disks} disks.add(vm.storage_profile.os_disk.name) try: # delete machine, must wait for this to complete delete = get_azure_sdk_function( client=self.compute_client.virtual_machines, function_name="delete" ) delete(resource_group_name=resource_group, vm_name=node_id).wait() except Exception as e: logger.warning("Failed to delete VM: {}".format(e)) try: # delete nic delete = get_azure_sdk_function( client=self.network_client.network_interfaces, function_name="delete", ) delete( resource_group_name=resource_group, network_interface_name=metadata["nic_name"], ) except Exception as e: logger.warning("Failed to delete nic: {}".format(e)) # delete ip address if "public_ip_name" in metadata: try: delete = get_azure_sdk_function( client=self.network_client.public_ip_addresses, function_name="delete", ) delete( resource_group_name=resource_group, public_ip_address_name=metadata["public_ip_name"], ) except Exception as e: logger.warning("Failed to delete public ip: {}".format(e)) # delete disks for disk in disks: try: delete = get_azure_sdk_function( client=self.compute_client.disks, function_name="delete" ) delete(resource_group_name=resource_group, disk_name=disk) except Exception as e: logger.warning("Failed to delete disk: {}".format(e))
337
node_provider.py
Python
python/ray/autoscaler/_private/_azure/node_provider.py
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
11
299,235
19
11
6
79
12
0
19
69
async_added_to_hass
Restore ONVIF sensors (#70393) Co-authored-by: Paulus Schoutsen <[email protected]>
https://github.com/home-assistant/core.git
async def async_added_to_hass(self): self.async_on_remove( self.device.events.async_add_listener(self.async_write_ha_state) ) if (last_state := await self.async_get_last_state()) is not None: self._attr_is_on = last_state.state == STATE_ON
47
binary_sensor.py
Python
homeassistant/components/onvif/binary_sensor.py
29a2df3dfcf3b5d1fb6cf20b413e024eb0ebf597
core
2
110,576
11
8
3
54
9
0
11
32
get_extent
Reparametrize offsetbox calculations in terms of bboxes. Passing a single bbox instead of (xdescent, ydescent, width, height) separately is easier to follow (see e.g. the changes in VPacker and HPacker, which no longer have to repeatedly pack/unpack whd_list), and avoids having to figure out e.g. the sign of the descents and whether width/height includes the descents, for example. Currently get_offset keeps a back compatible signature (we *could* consider killing the old signature but let's not do that for now), and _get_bbox_and_child_offsets is private because I *may* want to later also change the convention to make offsets relative to the bbox (0, 0) point rather than the bbox lower-left corner.
https://github.com/matplotlib/matplotlib.git
def get_extent(self, renderer): bbox = self.get_bbox(renderer) return bbox.width, bbox.height, -bbox.x0, -bbox.y0
34
offsetbox.py
Python
lib/matplotlib/offsetbox.py
de2192589f8ea50c9dc90be87b649399ff623feb
matplotlib
1
100,972
30
13
12
186
12
0
46
162
_output_startup_info
Training: Add setting option to save optimizer weights
https://github.com/deepfakes/faceswap.git
def _output_startup_info(self): logger.debug("Launching Monitor") logger.info("===================================================") logger.info(" Starting") if self._args.preview: logger.info(" Using live preview") if sys.stdout.isatty(): logger.info(" Press '%s' to save and quit", "Stop" if self._args.redirect_gui or self._args.colab else "ENTER") if not self._args.redirect_gui and not self._args.colab and sys.stdout.isatty(): logger.info(" Press 'S' to save model weights immediately") logger.info("===================================================")
103
train.py
Python
scripts/train.py
06468c97d475c0125375e77aad3f4fc1a87e8fe6
faceswap
8
290,581
32
13
14
132
19
1
41
154
async_cluster_exists
Fix ZHA configuration APIs (#81874) * Fix ZHA configuration loading and saving issues * add tests
https://github.com/home-assistant/core.git
def async_cluster_exists(hass, cluster_id, skip_coordinator=True): zha_gateway = hass.data[DATA_ZHA][DATA_ZHA_GATEWAY] zha_devices = zha_gateway.devices.values() for zha_device in zha_devices: if skip_coordinator and zha_device.is_coordinator: continue clusters_by_endpoint = zha_device.async_get_clusters() for clusters in clusters_by_endpoint.values(): if ( cluster_id in clusters[CLUSTER_TYPE_IN] or cluster_id in clusters[CLUSTER_TYPE_OUT] ): return True return False @callback
@callback
82
helpers.py
Python
homeassistant/components/zha/core/helpers.py
ebffe0f33b61e87c348bb7c99714c1d551623f9c
core
7
134,156
12
11
7
64
12
0
12
49
_resize_image
Benchmarking Ray Data bulk ingest as input file size changes. (#29296) This PR adds a benchmark which takes work from https://github.com/anyscale/air-benchmarks and makes it run as a release test. Full metrics are stored in Databricks. Signed-off-by: Cade Daniel <[email protected]>
https://github.com/ray-project/ray.git
def _resize_image(image, height, width): return tf.compat.v1.image.resize( image, [height, width], method=tf.image.ResizeMethod.BILINEAR, align_corners=False, )
44
tf_utils.py
Python
release/air_tests/air_benchmarks/mlperf-train/tf_utils.py
02f911ce78137cb63ecb685a8ef8e56dcb60062c
ray
1
286,857
12
11
11
71
12
1
14
25
get_all_holiday_exchange_short_names
Addition of exchange holiday functionality under stocks/th (#3486) * Addition of exchange holiday calendars using PandasMarketCalendar * website update for holidays functionality * Disable pylint too many attributes * Changes to not show index for dataframe and include metavar * Setting of default value for holidays * Merge + black linter * test fix Co-authored-by: james <[email protected]> Co-authored-by: Jeroen Bouma <[email protected]>
https://github.com/OpenBB-finance/OpenBBTerminal.git
def get_all_holiday_exchange_short_names() -> pd.DataFrame: exchange_short_names = mcal.calendar_registry.get_calendar_names() df = pd.DataFrame(exchange_short_names, columns=["short_name"]) return df @log_start_end(log=logger)
@log_start_end(log=logger)
34
pandas_market_cal_model.py
Python
openbb_terminal/stocks/tradinghours/pandas_market_cal_model.py
7e4a657333c8b7bb1ebdcb7a4c8f06e8dc0d66f6
OpenBBTerminal
1
125,579
4
8
48
32
5
2
4
11
test_local_clusters
[core] ray.init defaults to an existing Ray instance if there is one (#26678) ray.init() will currently start a new Ray instance even if one is already existing, which is very confusing if you are a new user trying to go from local development to a cluster. This PR changes it so that, when no address is specified, we first try to find an existing Ray cluster that was created through `ray start`. If none is found, we will start a new one. This makes two changes to the ray.init() resolution order: 1. When `ray start` is called, the started cluster address was already written to a file called `/tmp/ray/ray_current_cluster`. For ray.init() and ray.init(address="auto"), we will first check this local file for an existing cluster address. The file is deleted on `ray stop`. If the file is empty, autodetect any running cluster (legacy behavior) if address="auto", or we will start a new local Ray instance if address=None. 2. When ray.init(address="local") is called, we will create a new local Ray instance, even if one is already existing. This behavior seems to be necessary mainly for `ray.client` use cases. This also surfaces the logs about which Ray instance we are connecting to. Previously these were hidden because we didn't set up the log until after connecting to Ray. So now Ray will log one of the following messages during ray.init: ``` (Connecting to existing Ray cluster at address: <IP>...) ...connection... (Started a local Ray cluster.| Connected to Ray Cluster.)( View the dashboard at <URL>) ``` Note that this changes the dashboard URL to be printed with `ray.init()` instead of when the dashboard is first started. Co-authored-by: Eric Liang <[email protected]>
https://github.com/ray-project/ray.git
def test_local_clusters(): driver_template =
""" import ray info = ray.client({address}).namespace("")[email protected]
173
test_client_builder.py
Python
python/ray/tests/test_client_builder.py
55a0f7bb2db941d8c6ff93f55e4b3193f404ddf0
ray
1
268,682
6
6
3
19
3
0
6
20
usable
ansible-test - Improve container management. (#78550) See changelogs/fragments/ansible-test-container-management.yml for details.
https://github.com/ansible/ansible.git
def usable(cls) -> bool: return False
10
runme.py
Python
test/integration/targets/ansible-test-container/runme.py
cda16cc5e9aa8703fb4e1ac0a0be6b631d9076cc
ansible
1
191,407
34
9
7
76
7
0
43
67
test_document_lookups_too_many
Harrison/add react chain (#24) from https://arxiv.org/abs/2210.03629 still need to think if docstore abstraction makes sense
https://github.com/hwchase17/langchain.git
def test_document_lookups_too_many() -> None: page = Document(page_content=_PAGE_CONTENT) # Start with lookup on "framework". output = page.lookup("framework") assert output == "(Result 1/1) It is a really cool framework." # Now try again, should be exhausted. output = page.lookup("framework") assert output == "No More Results"
39
test_document.py
Python
tests/unit_tests/docstore/test_document.py
ce7b14b84381c766ae42a0f71953b2a56c024dbb
langchain
1
289,001
21
11
10
106
14
0
26
101
async_added_to_hass
Adjust distance unit check in gdacs (#80235) * Adjust length unit check in gdacs * Use system compare * Use is not == * Apply suggestion Co-authored-by: Erik Montnemery <[email protected]> Co-authored-by: Erik Montnemery <[email protected]>
https://github.com/home-assistant/core.git
async def async_added_to_hass(self) -> None: if self.hass.config.units is IMPERIAL_SYSTEM: self._attr_unit_of_measurement = LENGTH_MILES self._remove_signal_delete = async_dispatcher_connect( self.hass, f"gdacs_delete_{self._external_id}", self._delete_callback ) self._remove_signal_update = async_dispatcher_connect( self.hass, f"gdacs_update_{self._external_id}", self._update_callback )
58
geo_location.py
Python
homeassistant/components/gdacs/geo_location.py
689dcb02dd46dd849593b9bafb4ed1844977fbe4
core
2
104,414
6
7
2
28
5
0
6
20
slice
Update docs to new frontend/UI (#3690) * WIP: update docs to new UI * make style * Rm unused * inject_arrow_table_documentation __annotations__ * hasattr(arrow_table_method, "__annotations__") * Update task_template.rst * Codeblock PT-TF-SPLIT * Convert loading scripts * Convert docs to mdx * Fix mdx * Add <Tip> * Convert mdx tables * Fix codeblock * Rm unneded hashlinks * Update index.mdx * Redo dev change * Rm circle ci `build_doc` & `deploy_doc` * Rm unneeded files * Update docs reamde * Standardize to `Example::` * mdx logging levels doc * Table properties inject_arrow_table_documentation * ``` to ```py mdx * Add Tips mdx * important,None -> <Tip warning={true}> * More misc * Center imgs * Update instllation page * `setup.py` docs section * Rm imgs since they are in hf.co * Update docs/source/access.mdx Co-authored-by: Steven Liu <[email protected]> * Update index mdx * Update docs/source/access.mdx Co-authored-by: Steven Liu <[email protected]> * just `Dataset` obj * Addedversion just italics * Update ReadInstruction doc example syntax * Change docstring for `prepare_for_task` * Chore * Remove `code` syntax from headings * Rm `code` syntax from headings * Hashlink backward compatability * S3FileSystem doc * S3FileSystem doc updates * index.mdx updates * Add darkmode gifs * Index logo img css classes * Index mdx dataset logo img size * Docs for DownloadMode class * Doc DownloadMode table * format docstrings * style * Add doc builder scripts (#3790) * add doc builder scripts * fix docker image * Docs new UI actions no self hosted (#3793) * No self hosted * replace doc injection by actual docstrings * Docstring formatted Co-authored-by: Quentin Lhoest <[email protected]> Co-authored-by: Mishig Davaadorj <[email protected]> Co-authored-by: Lysandre Debut <[email protected]> Co-authored-by: Mishig Davaadorj <[email protected]> * Rm notebooks from docs actions since they dont exi * Update tsting branch * More docstring * Chore * bump up node version * bump up node * ``` -> ```py for audio_process.mdx * Update .github/workflows/build_documentation.yml Co-authored-by: Quentin Lhoest <[email protected]> * Uodate dev doc build * remove run on PR * fix action * Fix gh doc workflow * forgot this change when merging master * Update build doc Co-authored-by: Steven Liu <[email protected]> Co-authored-by: Quentin Lhoest <[email protected]> Co-authored-by: Quentin Lhoest <[email protected]> Co-authored-by: Lysandre Debut <[email protected]>
https://github.com/huggingface/datasets.git
def slice(self, *args, **kwargs): raise NotImplementedError()
16
table.py
Python
src/datasets/table.py
e35be138148333078284b942ccc9ed7b1d826f97
datasets
1
60,298
30
9
9
156
21
0
35
98
test_crop_of_crop
Balanced joint maximum mean discrepancy for deep transfer learning
https://github.com/jindongwang/transferlearning.git
def test_crop_of_crop(self): n = coord_net_spec() offset = random.randint(0, 10) ax, a, b = coord_map_from_to(n.deconv, n.data) n.crop = L.Crop(n.deconv, n.data, axis=2, offset=offset) ax_crop, a_crop, b_crop = coord_map_from_to(n.crop, n.data) self.assertEquals(ax, ax_crop) self.assertEquals(a, a_crop) self.assertEquals(b + offset, b_crop)
103
test_coord_map.py
Python
code/deep/BJMMD/caffe/python/caffe/test/test_coord_map.py
cc4d0564756ca067516f71718a3d135996525909
transferlearning
1
259,222
137
12
64
854
33
0
346
772
test_ohe_infrequent_multiple_categories
ENH Adds infrequent categories to OneHotEncoder (#16018) * ENH Completely adds infrequent categories * STY Linting * STY Linting * DOC Improves wording * DOC Lint * BUG Fixes * CLN Address comments * CLN Address comments * DOC Uses math to description float min_frequency * DOC Adds comment regarding drop * BUG Fixes method name * DOC Clearer docstring * TST Adds more tests * FIX Fixes mege * CLN More pythonic * CLN Address comments * STY Flake8 * CLN Address comments * DOC Fix * MRG * WIP * ENH Address comments * STY Fix * ENH Use functiion call instead of property * ENH Adds counts feature * CLN Rename variables * DOC More details * CLN Remove unneeded line * CLN Less lines is less complicated * CLN Less diffs * CLN Improves readiabilty * BUG Fix * CLN Address comments * TST Fix * CLN Address comments * CLN Address comments * CLN Move docstring to userguide * DOC Better wrapping * TST Adds test to handle_unknown='error' * ENH Spelling error in docstring * BUG Fixes counter with nan values * BUG Removes unneeded test * BUG Fixes issue * ENH Sync with main * DOC Correct settings * DOC Adds docstring * DOC Immprove user guide * DOC Move to 1.0 * DOC Update docs * TST Remove test * DOC Update docstring * STY Linting * DOC Address comments * ENH Neater code * DOC Update explaination for auto * Update sklearn/preprocessing/_encoders.py Co-authored-by: Roman Yurchak <[email protected]> * TST Uses docstring instead of comments * TST Remove call to fit * TST Spelling error * ENH Adds support for drop + infrequent categories * ENH Adds infrequent_if_exist option * DOC Address comments for user guide * DOC Address comments for whats_new * DOC Update docstring based on comments * CLN Update test with suggestions * ENH Adds computed property infrequent_categories_ * DOC Adds where the infrequent column is located * TST Adds more test for infrequent_categories_ * DOC Adds docstring for _compute_drop_idx * CLN Moves _convert_to_infrequent_idx into its own method * TST Increases test coverage * TST Adds failing test * CLN Careful consideration of dropped and inverse_transform * STY Linting * DOC Adds docstrinb about dropping infrequent * DOC Uses only * DOC Numpydoc * TST Includes test for get_feature_names_out * DOC Move whats new * DOC Address docstring comments * DOC Docstring changes * TST Better comments * TST Adds check for handle_unknown='ignore' for infrequent * CLN Make _infrequent_indices private * CLN Change min_frequency default to None * DOC Adds comments * ENH adds support for max_categories=1 * ENH Describe lexicon ordering for ties * DOC Better docstring * STY Fix * CLN Error when explicity dropping an infrequent category * STY Grammar Co-authored-by: Joel Nothman <[email protected]> Co-authored-by: Roman Yurchak <[email protected]> Co-authored-by: Guillaume Lemaitre <[email protected]>
https://github.com/scikit-learn/scikit-learn.git
def test_ohe_infrequent_multiple_categories(): X = np.c_[ [0, 1, 3, 3, 3, 3, 2, 0, 3], [0, 0, 5, 1, 1, 10, 5, 5, 0], [1, 0, 1, 0, 1, 0, 1, 0, 1], ] ohe = OneHotEncoder( categories="auto", max_categories=3, handle_unknown="infrequent_if_exist" ) # X[:, 0] 1 and 2 are infrequent # X[:, 1] 1 and 10 are infrequent # X[:, 2] nothing is infrequent X_trans = ohe.fit_transform(X).toarray() assert_array_equal(ohe.infrequent_categories_[0], [1, 2]) assert_array_equal(ohe.infrequent_categories_[1], [1, 10]) assert_array_equal(ohe.infrequent_categories_[2], None) # 'infrequent' is used to denote the infrequent categories # For the first column, 1 and 2 have the same frequency. In this case, # 1 will be chosen to be the feature name because is smaller lexiconically for get_names in ["get_feature_names", "get_feature_names_out"]: feature_names = getattr(ohe, get_names)() assert_array_equal( [ "x0_0", "x0_3", "x0_infrequent_sklearn", "x1_0", "x1_5", "x1_infrequent_sklearn", "x2_0", "x2_1", ], feature_names, ) expected = [ [1, 0, 0, 1, 0, 0, 0, 1], [0, 0, 1, 1, 0, 0, 1, 0], [0, 1, 0, 0, 1, 0, 0, 1], [0, 1, 0, 0, 0, 1, 1, 0], [0, 1, 0, 0, 0, 1, 0, 1], [0, 1, 0, 0, 0, 1, 1, 0], [0, 0, 1, 0, 1, 0, 0, 1], [1, 0, 0, 0, 1, 0, 1, 0], [0, 1, 0, 1, 0, 0, 0, 1], ] assert_allclose(expected, X_trans) X_test = [[3, 1, 2], [4, 0, 3]] X_test_trans = ohe.transform(X_test) # X[:, 2] does not have an infrequent category, thus it is encoded as all # zeros expected = [[0, 1, 0, 0, 0, 1, 0, 0], [0, 0, 1, 1, 0, 0, 0, 0]] assert_allclose(expected, X_test_trans.toarray()) X_inv = ohe.inverse_transform(X_test_trans) expected_inv = np.array( [[3, "infrequent_sklearn", None], ["infrequent_sklearn", 0, None]], dtype=object ) assert_array_equal(expected_inv, X_inv) # error for unknown categories ohe = OneHotEncoder( categories="auto", max_categories=3, handle_unknown="error" ).fit(X) with pytest.raises(ValueError, match="Found unknown categories"): ohe.transform(X_test) # only infrequent or known categories X_test = [[1, 1, 1], [3, 10, 0]] X_test_trans = ohe.transform(X_test) expected = [[0, 0, 1, 0, 0, 1, 0, 1], [0, 1, 0, 0, 0, 1, 1, 0]] assert_allclose(expected, X_test_trans.toarray()) X_inv = ohe.inverse_transform(X_test_trans) expected_inv = np.array( [["infrequent_sklearn", "infrequent_sklearn", 1], [3, "infrequent_sklearn", 0]], dtype=object, ) assert_array_equal(expected_inv, X_inv)
632
test_encoders.py
Python
sklearn/preprocessing/tests/test_encoders.py
7f0006c8aad1a09621ad19c3db19c3ff0555a183
scikit-learn
2
247,884
31
12
15
133
19
0
39
184
get_success_or_raise
Remove redundant `get_success` calls in test code (#12346) There are a bunch of places we call get_success on an immediate value, which is unnecessary. Let's rip them out, and remove the redundant functionality in get_success and friends.
https://github.com/matrix-org/synapse.git
def get_success_or_raise(self, d, by=0.0): deferred: Deferred[TV] = ensureDeferred(d) results: list = [] deferred.addBoth(results.append) self.pump(by=by) if not results: self.fail( "Success result expected on {!r}, found no result instead".format( deferred ) ) result = results[0] if isinstance(result, Failure): result.raiseException() return result
83
unittest.py
Python
tests/unittest.py
33ebee47e4e96a2b6fdf72091769e59034dc550f
synapse
3
88,592
35
14
14
146
16
0
39
172
test_no_configs
ref(stacktrace_link): Add more than one code mapping in the tests (#41409) Include more than one code mapping in the setup code. Cleaning up a bit how we tag the transactions. This makes the PR for WOR-2395 a little easier to read.
https://github.com/getsentry/sentry.git
def test_no_configs(self): # new project that has no configurations set up for it project = self.create_project( name="bloop", organization=self.organization, teams=[self.create_team(organization=self.organization)], ) response = self.get_success_response( self.organization.slug, project.slug, qs_params={"file": self.filepath} ) assert response.data == { "config": None, "sourceUrl": None, "integrations": [serialized_integration(self.integration)], }
90
test_project_stacktrace_link.py
Python
tests/sentry/api/endpoints/test_project_stacktrace_link.py
2e0d2c856eb17a842c67d88363bed92c99578c20
sentry
1
135,598
2
6
9
13
2
0
2
5
test_single_worker_failure
[Train] Immediately fail on any worker failure (#29927) Signed-off-by: Amog Kamsetty [email protected] Follow up to #28314 #28314 did not cover all the cases. In particular, if one worker fails, but the other workers are hanging, then our shutdown logic will also hang since it's waiting for the actors to finish running their methods. Instead, we want to force shutdown all workers regardless of if they have finished their method or not. This PR also adds an e2e integration test.
https://github.com/ray-project/ray.git
def test_single_worker_failure(ray_start_4_cpus):
42
test_torch_trainer.py
Python
python/ray/train/tests/test_torch_trainer.py
152a8b900d2a0d3c462ed37a44916c26540826c5
ray
1
275,523
9
8
2
32
4
0
9
23
_call_if_callable
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def _call_if_callable(self, param): return param() if callable(param) else param
19
optimizer_v2.py
Python
keras/optimizers/optimizer_v2/optimizer_v2.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
2
265,388
48
15
16
264
19
0
67
203
_clean_side
#9102: Enable creating terminations in conjunction with cables via REST API
https://github.com/netbox-community/netbox.git
def _clean_side(self, side): assert side in 'ab', f"Invalid side designation: {side}" device = self.cleaned_data.get(f'side_{side}_device') content_type = self.cleaned_data.get(f'side_{side}_type') name = self.cleaned_data.get(f'side_{side}_name') if not device or not content_type or not name: return None model = content_type.model_class() try: termination_object = model.objects.get(device=device, name=name) if termination_object.cable is not None: raise forms.ValidationError(f"Side {side.upper()}: {device} {termination_object} is already connected") except ObjectDoesNotExist: raise forms.ValidationError(f"{side.upper()} side termination not found: {device} {name}") setattr(self.instance, f'{side}_terminations', [termination_object]) return termination_object
127
bulk_import.py
Python
netbox/dcim/forms/bulk_import.py
0b86326435fe6ea07ef376a81ff6fb592906fafc
netbox
6
322,894
9
12
5
64
6
0
10
53
string_position
Add NLP model interpretation (#1752) * upload NLP interpretation * fix problems and relocate project * remove abandoned picture * remove abandoned picture * fix dead link in README * fix dead link in README * fix code style problems * fix CR round 1 * remove .gitkeep files * fix code style * fix file encoding problem * fix code style * delete duplicated files due to directory rebuild * fix CR round 2 * fix code style * fix ernie tokenizer * fix code style * fix problem from CR round 1 * fix bugs * fix README * remove duplicated files * deal with diff of old and new tokenizer results * fix CR round 4 * fix code style * add missing dependence * fix broken import path * move some data file to cloud * MRC upper case to lower case Co-authored-by: Zeyu Chen <[email protected]> Co-authored-by: binlinquge <xxx> Co-authored-by: Guo Sheng <[email protected]>
https://github.com/PaddlePaddle/PaddleNLP.git
def string_position(self, id_): if self.bow: return self.string_start[self.positions[id_]] else: return self.string_start[[self.positions[id_]]]
41
lime_text.py
Python
examples/model_interpretation/task/senti/LIME/lime_text.py
93cae49c0c572b5c1ac972759140fbe924b0374d
PaddleNLP
2
101,420
63
16
14
184
19
0
80
331
update_config
Bugfix: convert - Gif Writer - Fix non-launch error on Gif Writer - convert plugins - linting - convert/fs_media/preview/queue_manager - typing - Change convert items from dict to Dataclass
https://github.com/deepfakes/faceswap.git
def update_config(self) -> None: for section, items in self.tk_vars.items(): for item, value in items.items(): try: new_value = str(value.get()) except tk.TclError as err: # When manually filling in text fields, blank values will # raise an error on numeric data types so return 0 logger.debug("Error getting value. Defaulting to 0. Error: %s", str(err)) new_value = str(0) old_value = self._config.config[section][item] if new_value != old_value: logger.trace("Updating config: %s, %s from %s to %s", # type: ignore section, item, old_value, new_value) self._config.config[section][item] = new_value
113
preview.py
Python
tools/preview/preview.py
1022651eb8a7741014f5d2ec7cbfe882120dfa5f
faceswap
5
132,867
20
9
8
66
9
0
22
79
all_trials_are_terminated
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
https://github.com/ray-project/ray.git
def all_trials_are_terminated(self) -> bool: if not self._snapshot: return False last_snapshot = self._snapshot[-1] return all( last_snapshot[trial_id] == Trial.TERMINATED for trial_id in last_snapshot )
41
mock.py
Python
python/ray/tune/utils/mock.py
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
3
133,003
26
12
9
109
19
0
30
104
_generate_nccl_uid
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
https://github.com/ray-project/ray.git
def _generate_nccl_uid(self, key): group_uid = nccl_util.get_nccl_unique_id() store_name = get_store_name(key) # Avoid a potential circular dependency in ray/actor.py from ray.util.collective.util import NCCLUniqueIDStore store = NCCLUniqueIDStore.options(name=store_name, lifetime="detached").remote( store_name ) ray.get([store.set_id.remote(group_uid)]) return group_uid
67
nccl_collective_group.py
Python
python/ray/util/collective/collective_group/nccl_collective_group.py
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
1
176,134
8
9
10
30
3
0
8
43
test_edgeql_functions_contains_05
Fix builtin polymorphic USING SQL functions with object arguments (#3319) We actually don't have a lot of these (most use USING SQL EXPRESSION). Fix is simple: don't try to pass the type to polymorphic arguments. Fixes #3318.
https://github.com/edgedb/edgedb.git
async def test_edgeql_functions_contains_05(self): await self.assert_query_result( r, [True], )
18
test_edgeql_functions.py
Python
tests/test_edgeql_functions.py
529247861f25dc9f55672f250473d6a7f0148e4e
edgedb
1
275,773
22
11
7
99
12
1
25
65
_remove_long_seq
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def _remove_long_seq(maxlen, seq, label): new_seq, new_label = [], [] for x, y in zip(seq, label): if len(x) < maxlen: new_seq.append(x) new_label.append(y) return new_seq, new_label @keras_export("keras.preprocessing.sequence.TimeseriesGenerator")
@keras_export("keras.preprocessing.sequence.TimeseriesGenerator")
55
sequence.py
Python
keras/preprocessing/sequence.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
3
260,720
110
14
44
345
26
0
186
761
fit
MAINT Parameters validation for `SimpleImputer` (#24109) Co-authored-by: Guillaume Lemaitre <[email protected]> Co-authored-by: jeremie du boisberranger <[email protected]>
https://github.com/scikit-learn/scikit-learn.git
def fit(self, X, y=None): self._validate_params() if self.verbose != "deprecated": warnings.warn( "The 'verbose' parameter was deprecated in version " "1.1 and will be removed in 1.3. A warning will " "always be raised upon the removal of empty columns " "in the future version.", FutureWarning, ) X = self._validate_input(X, in_fit=True) # default fill_value is 0 for numerical input and "missing_value" # otherwise if self.fill_value is None: if X.dtype.kind in ("i", "u", "f"): fill_value = 0 else: fill_value = "missing_value" else: fill_value = self.fill_value # fill_value should be numerical in case of numerical input if ( self.strategy == "constant" and X.dtype.kind in ("i", "u", "f") and not isinstance(fill_value, numbers.Real) ): raise ValueError( "'fill_value'={0} is invalid. Expected a " "numerical value when imputing numerical " "data".format(fill_value) ) if sp.issparse(X): # missing_values = 0 not allowed with sparse data as it would # force densification if self.missing_values == 0: raise ValueError( "Imputation not possible when missing_values " "== 0 and input is sparse. Provide a dense " "array instead." ) else: self.statistics_ = self._sparse_fit( X, self.strategy, self.missing_values, fill_value ) else: self.statistics_ = self._dense_fit( X, self.strategy, self.missing_values, fill_value ) return self
198
_base.py
Python
sklearn/impute/_base.py
593524d33bc79507eea07b54229f312d48e0a95f
scikit-learn
9
118,726
15
10
6
81
11
0
15
61
test_just_disabled
Add disabled to select_slider + tests + snapshots (#4314)
https://github.com/streamlit/streamlit.git
def test_just_disabled(self): st.select_slider( "the label", options=["red", "orange", "yellow"], disabled=True ) c = self.get_delta_from_queue().new_element.slider self.assertEqual(c.disabled, True)
47
select_slider_test.py
Python
lib/tests/streamlit/select_slider_test.py
8795e0c41c546880368c8bb9513b0f2ae9220e99
streamlit
1
31,896
46
22
16
203
17
0
61
173
assert_tensors_close
Add MVP model (#17787) * Add MVP model * Update README * Remove useless module * Update docs * Fix bugs in tokenizer * Remove useless test * Remove useless module * Update vocab * Remove specifying * Remove specifying * Add #Copied ... statement * Update paper link * Remove useless TFMvp * Add #Copied ... statement * Fix style in test mvp model * Fix some typos * Fix properties of unset special tokens in non verbose mode * Update paper link * Update MVP doc * Update MVP doc * Fix README * Fix typos in docs * Update docs
https://github.com/huggingface/transformers.git
def assert_tensors_close(a, b, atol=1e-12, prefix=""): if a is None and b is None: return True try: if torch.allclose(a, b, atol=atol): return True raise except Exception: pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item() if a.numel() > 100: msg = f"tensor values are {pct_different:.1%} percent different." else: msg = f"{a} != {b}" if prefix: msg = prefix + ": " + msg raise AssertionError(msg)
117
test_modeling_mvp.py
Python
tests/models/mvp/test_modeling_mvp.py
3cff4cc58730409c68f8afa2f3b9c61efa0e85c6
transformers
7
154,295
19
11
9
91
9
0
25
112
copy
PERF-#4842: `copy` should not trigger any previous computations (#4843) Signed-off-by: Myachev <[email protected]>
https://github.com/modin-project/modin.git
def copy(self): return self.__constructor__( self._partitions, self._index_cache.copy() if self._index_cache is not None else None, self._columns_cache.copy() if self._columns_cache is not None else None, self._row_lengths_cache, self._column_widths_cache, self._dtypes, )
62
dataframe.py
Python
modin/core/dataframe/pandas/dataframe/dataframe.py
3ca5005696a9a9cb7cce7d8986e34d6987aa8074
modin
3
168,200
84
14
28
300
31
0
121
420
remove_categories
PERF cache find_stack_level (#48023) cache stacklevel
https://github.com/pandas-dev/pandas.git
def remove_categories(self, removals, inplace=no_default): if inplace is not no_default: warn( "The `inplace` parameter in pandas.Categorical." "remove_categories is deprecated and will be removed in " "a future version. Removing unused categories will always " "return a new Categorical object.", FutureWarning, stacklevel=find_stack_level(inspect.currentframe()), ) else: inplace = False inplace = validate_bool_kwarg(inplace, "inplace") if not is_list_like(removals): removals = [removals] removal_set = set(removals) not_included = removal_set - set(self.dtype.categories) new_categories = [c for c in self.dtype.categories if c not in removal_set] # GH 10156 if any(isna(removals)): not_included = {x for x in not_included if notna(x)} new_categories = [x for x in new_categories if notna(x)] if len(not_included) != 0: raise ValueError(f"removals must all be in old categories: {not_included}") with catch_warnings(): simplefilter("ignore") return self.set_categories( new_categories, ordered=self.ordered, rename=False, inplace=inplace )
181
categorical.py
Python
pandas/core/arrays/categorical.py
2f8d0a36703e81e4dca52ca9fe4f58c910c1b304
pandas
11
259,787
143
15
64
556
51
0
200
981
fit
ENH Add sparse input support to OPTICS (#22965) Co-authored-by: huntzhan <[email protected]> Co-authored-by: Clickedbigfoot <[email protected]> Co-authored-by: Jérémie du Boisberranger <[email protected]> Co-authored-by: Thomas J. Fan <[email protected]>
https://github.com/scikit-learn/scikit-learn.git
def fit(self, X, y=None): dtype = bool if self.metric in PAIRWISE_BOOLEAN_FUNCTIONS else float if dtype == bool and X.dtype != bool: msg = ( "Data will be converted to boolean for" f" metric {self.metric}, to avoid this warning," " you may convert the data prior to calling fit." ) warnings.warn(msg, DataConversionWarning) X = self._validate_data(X, dtype=dtype, accept_sparse="csr") if self.metric == "precomputed" and issparse(X): with warnings.catch_warnings(): warnings.simplefilter("ignore", SparseEfficiencyWarning) # Set each diagonal to an explicit value so each point is its # own neighbor X.setdiag(X.diagonal()) memory = check_memory(self.memory) if self.cluster_method not in ["dbscan", "xi"]: raise ValueError( "cluster_method should be one of 'dbscan' or 'xi' but is %s" % self.cluster_method ) ( self.ordering_, self.core_distances_, self.reachability_, self.predecessor_, ) = memory.cache(compute_optics_graph)( X=X, min_samples=self.min_samples, algorithm=self.algorithm, leaf_size=self.leaf_size, metric=self.metric, metric_params=self.metric_params, p=self.p, n_jobs=self.n_jobs, max_eps=self.max_eps, ) # Extract clusters from the calculated orders and reachability if self.cluster_method == "xi": labels_, clusters_ = cluster_optics_xi( reachability=self.reachability_, predecessor=self.predecessor_, ordering=self.ordering_, min_samples=self.min_samples, min_cluster_size=self.min_cluster_size, xi=self.xi, predecessor_correction=self.predecessor_correction, ) self.cluster_hierarchy_ = clusters_ elif self.cluster_method == "dbscan": if self.eps is None: eps = self.max_eps else: eps = self.eps if eps > self.max_eps: raise ValueError( "Specify an epsilon smaller than %s. Got %s." % (self.max_eps, eps) ) labels_ = cluster_optics_dbscan( reachability=self.reachability_, core_distances=self.core_distances_, ordering=self.ordering_, eps=eps, ) self.labels_ = labels_ return self
352
_optics.py
Python
sklearn/cluster/_optics.py
af5b6a100357852f4c3040ff2cb06cb8691023e9
scikit-learn
11
181,811
26
10
7
72
8
0
27
92
predict
Revert "Deployed 7ccda9a with MkDocs version: 1.3.0" This reverts commit bd9629c40e01241766197119b581a99409b07068.
https://github.com/EpistasisLab/tpot.git
def predict(self, features): if not self.fitted_pipeline_: raise RuntimeError( "A pipeline has not yet been optimized. Please call fit() first." ) features = self._check_dataset(features, target=None, sample_weight=None) return self.fitted_pipeline_.predict(features)
44
base.py
Python
tpot/base.py
388616b6247ca4ea8de4e2f340d6206aee523541
tpot
2
8,245
168
17
42
464
55
0
292
782
explain
Explanation API and feature importance for GBM (#2564) * add docstring for explain_ig * solidify Explainer API * add gbm explainer * add dataclasses for typed explanations * add GBM feature importance * remove unused imports * add tests * fix test * extract explanation into file * rename base to explainer * remove unused kwargs * remove device placement from base explainer * use proper field from gbm
https://github.com/ludwig-ai/ludwig.git
def explain(self) -> Tuple[List[Explanation], List[float]]: self.model.model.to(DEVICE) # Convert input data into embedding tensors from the output of the model encoders. inputs_encoded = get_input_tensors(self.model, self.inputs_df) sample_encoded = get_input_tensors(self.model, self.sample_df) # For a robust baseline, we take the mean of all embeddings in the sample from the training data. # TODO(travis): pre-compute this during training from the full training dataset. baseline = [torch.unsqueeze(torch.mean(t, dim=0), 0) for t in sample_encoded] # Configure the explainer, which includes wrapping the model so its interface conforms to # the format expected by Captum. explanation_model = WrapperModule(self.model.model, self.target_feature_name) explainer = IntegratedGradients(explanation_model) # Compute attribution for each possible output feature label separately. expected_values = [] for target_idx in range(self.vocab_size): attribution, delta = explainer.attribute( tuple(inputs_encoded), baselines=tuple(baseline), target=target_idx if self.is_category_target else None, internal_batch_size=self.model.config["trainer"]["batch_size"], return_convergence_delta=True, ) # Attribution over the feature embeddings returns a vector with the same dimensions of # shape [batch_size, embedding_size], so take the sum over this vector in order to return a single # floating point attribution value per input feature. attribution = np.array([t.detach().numpy().sum(1) for t in attribution]) # Transpose to [batch_size, num_input_features] attribution = attribution.T for feature_attributions, explanation in zip(attribution, self.explanations): # Add the feature attributions to the explanation object for this row. explanation.add(feature_attributions) # The convergence delta is given per row, so take the mean to compute the # average delta for the feature. # TODO(travis): this isn't really the expected value as it is for shap, so # find a better name. expected_value = delta.detach().numpy().mean() expected_values.append(expected_value) if self.is_binary_target: # For binary targets, we only need to compute attribution for the positive class (see below). break # For binary targets, add an extra attribution for the negative class (false). if self.is_binary_target: for explanation in self.explanations: le_true = explanation.label_explanations[0] explanation.add(le_true.feature_attributions * -1) expected_values.append(expected_values[0] * -1) return self.explanations, expected_values
289
captum.py
Python
ludwig/explain/captum.py
1caede3a2da4ec71cb8650c7e45120c26948a5b9
ludwig
9
48,314
46
10
19
187
31
0
52
209
test_mark_success_no_kill
AIP45 Remove dag parsing in airflow run local (#21877)
https://github.com/apache/airflow.git
def test_mark_success_no_kill(self, caplog, get_test_dag, session): dag = get_test_dag('test_mark_state') dr = dag.create_dagrun( state=State.RUNNING, execution_date=DEFAULT_DATE, run_type=DagRunType.SCHEDULED, session=session, ) task = dag.get_task(task_id='test_mark_success_no_kill') ti = dr.get_task_instance(task.task_id) ti.refresh_from_task(task) job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True) with timeout(30): job1.run() ti.refresh_from_db() assert State.SUCCESS == ti.state assert ( "State of this instance has been externally set to success. Terminating instance." in caplog.text )
115
test_local_task_job.py
Python
tests/jobs/test_local_task_job.py
3138604b264878f27505223bd14c7814eacc1e57
airflow
1
124,673
15
12
6
79
14
1
16
61
reconfigure
[Serve] [AIR] Adding reconfigure method to model deployment (#26026)
https://github.com/ray-project/ray.git
def reconfigure(self, config): from ray.air.checkpoint import Checkpoint predictor_cls = _load_predictor_cls(config["predictor_cls"]) self.model = predictor_cls.from_checkpoint( Checkpoint.from_dict(config["checkpoint"]) ) @serve.deployment
@serve.deployment
43
air_integrations.py
Python
python/ray/serve/air_integrations.py
980a59477de62ed8b3441a1fd5f8fb9e18df0f14
ray
1
60,285
75
14
22
326
26
0
114
269
_Net_backward
Balanced joint maximum mean discrepancy for deep transfer learning
https://github.com/jindongwang/transferlearning.git
def _Net_backward(self, diffs=None, start=None, end=None, **kwargs): if diffs is None: diffs = [] if start is not None: start_ind = list(self._layer_names).index(start) else: start_ind = len(self.layers) - 1 if end is not None: end_ind = list(self._layer_names).index(end) outputs = set([end] + diffs) else: end_ind = 0 outputs = set(self.inputs + diffs) if kwargs: if set(kwargs.keys()) != set(self.outputs): raise Exception('Top diff arguments do not match net outputs.') # Set top diffs according to defined shapes and make arrays single and # C-contiguous as Caffe expects. for top, diff in six.iteritems(kwargs): if diff.shape[0] != self.blobs[top].shape[0]: raise Exception('Diff is not batch sized') self.blobs[top].diff[...] = diff self._backward(start_ind, end_ind) # Unpack diffs to extract return {out: self.blobs[out].diff for out in outputs}
205
pycaffe.py
Python
code/deep/BJMMD/caffe/python/caffe/pycaffe.py
cc4d0564756ca067516f71718a3d135996525909
transferlearning
9
269,306
6
8
2
50
8
1
6
10
selu
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def selu(x): return tf.nn.selu(x) @keras_export("keras.activations.softplus") @tf.__internal__.dispatch.add_dispatch_support
@keras_export("keras.activations.softplus") @tf.__internal__.dispatch.add_dispatch_support
15
activations.py
Python
keras/activations.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
1
247,973
29
12
21
139
10
0
41
226
test_get_global
Add Module API for reading and writing global account data. (#12391)
https://github.com/matrix-org/synapse.git
def test_get_global(self) -> None: self.get_success( self._store.add_account_data_for_user( self.user_id, "test.data", {"wombat": True} ) ) # Getting existent account data works as expected. self.assertEqual( self.get_success( self._account_data_mgr.get_global(self.user_id, "test.data") ), {"wombat": True}, ) # Getting non-existent account data returns None. self.assertIsNone( self.get_success( self._account_data_mgr.get_global(self.user_id, "no.data.at.all") ) )
82
test_account_data_manager.py
Python
tests/module_api/test_account_data_manager.py
85ca963c1add5ca12f59238a50dfc63df4846bb7
synapse
1
105,895
16
12
7
52
6
0
17
51
require_spacy
Make torch.Tensor and spacy models cacheable (#5191) * Make torch.Tensor and spacy models cacheable * Use newest models * Address comments * Small optim
https://github.com/huggingface/datasets.git
def require_spacy(test_case): try: import spacy # noqa F401 except ImportError: return unittest.skip("test requires spacy")(test_case) else: return test_case
27
utils.py
Python
tests/utils.py
0d9c12ad5155c6d505e70813a07c0aecd7120405
datasets
2
290,582
14
11
11
61
9
1
14
94
required_platform_only
Fix ZHA configuration APIs (#81874) * Fix ZHA configuration loading and saving issues * add tests
https://github.com/home-assistant/core.git
def required_platform_only(): with patch( "homeassistant.components.zha.PLATFORMS", ( Platform.ALARM_CONTROL_PANEL, Platform.SELECT, Platform.SENSOR, Platform.SWITCH, ), ): yield @pytest.fixture
@pytest.fixture
32
test_api.py
Python
tests/components/zha/test_api.py
ebffe0f33b61e87c348bb7c99714c1d551623f9c
core
1
111,341
9
13
4
56
9
0
9
34
_require_patterns
Add SpanRuler component (#9880) * Add SpanRuler component Add a `SpanRuler` component similar to `EntityRuler` that saves a list of matched spans to `Doc.spans[spans_key]`. The matches from the token and phrase matchers are deduplicated and sorted before assignment but are not otherwise filtered. * Update spacy/pipeline/span_ruler.py Co-authored-by: Sofie Van Landeghem <[email protected]> * Fix cast * Add self.key property * Use number of patterns as length * Remove patterns kwarg from init * Update spacy/tests/pipeline/test_span_ruler.py Co-authored-by: Sofie Van Landeghem <[email protected]> * Add options for spans filter and setting to ents * Add `spans_filter` option as a registered function' * Make `spans_key` optional and if `None`, set to `doc.ents` instead of `doc.spans[spans_key]`. * Update and generalize tests * Add test for setting doc.ents, fix key property type * Fix typing * Allow independent doc.spans and doc.ents * If `spans_key` is set, set `doc.spans` with `spans_filter`. * If `annotate_ents` is set, set `doc.ents` with `ents_fitler`. * Use `util.filter_spans` by default as `ents_filter`. * Use a custom warning if the filter does not work for `doc.ents`. * Enable use of SpanC.id in Span * Support id in SpanRuler as Span.id * Update types * `id` can only be provided as string (already by `PatternType` definition) * Update all uses of Span.id/ent_id in Doc * Rename Span id kwarg to span_id * Update types and docs * Add ents filter to mimic EntityRuler overwrite_ents * Refactor `ents_filter` to take `entities, spans` args for more filtering options * Give registered filters more descriptive names * Allow registered `filter_spans` filter (`spacy.first_longest_spans_filter.v1`) to take any number of `Iterable[Span]` objects as args so it can be used for spans filter or ents filter * Implement future entity ruler as span ruler Implement a compatible `entity_ruler` as `future_entity_ruler` using `SpanRuler` as the underlying component: * Add `sort_key` and `sort_reverse` to allow the sorting behavior to be customized. (Necessary for the same sorting/filtering as in `EntityRuler`.) * Implement `overwrite_overlapping_ents_filter` and `preserve_existing_ents_filter` to support `EntityRuler.overwrite_ents` settings. * Add `remove_by_id` to support `EntityRuler.remove` functionality. * Refactor `entity_ruler` tests to parametrize all tests to test both `entity_ruler` and `future_entity_ruler` * Implement `SpanRuler.token_patterns` and `SpanRuler.phrase_patterns` properties. Additional changes: * Move all config settings to top-level attributes to avoid duplicating settings in the config vs. `span_ruler/cfg`. (Also avoids a lot of casting.) * Format * Fix filter make method name * Refactor to use same error for removing by label or ID * Also provide existing spans to spans filter * Support ids property * Remove token_patterns and phrase_patterns * Update docstrings * Add span ruler docs * Fix types * Apply suggestions from code review Co-authored-by: Sofie Van Landeghem <[email protected]> * Move sorting into filters * Check for all tokens in seen tokens in entity ruler filters * Remove registered sort key * Set Token.ent_id in a backwards-compatible way in Doc.set_ents * Remove sort options from API docs * Update docstrings * Rename entity ruler filters * Fix and parameterize scoring * Add id to Span API docs * Fix typo in API docs * Include explicit labeled=True for scorer Co-authored-by: Sofie Van Landeghem <[email protected]>
https://github.com/explosion/spaCy.git
def _require_patterns(self) -> None: if len(self) == 0: warnings.warn(Warnings.W036.format(name=self.name))
33
span_ruler.py
Python
spacy/pipeline/span_ruler.py
a322d6d5f2f85c2da6cded4fcd6143d41b5a9e96
spaCy
2
153,617
27
12
5
118
13
0
35
71
at_time
DOCS-#3099: Fix `BasePandasDataSet` docstrings warnings (#4333) Co-authored-by: Yaroslav Igoshev <[email protected]> Signed-off-by: Alexander Myskov <[email protected]>
https://github.com/modin-project/modin.git
def at_time(self, time, asof=False, axis=None): # noqa: PR01, RT01, D200 axis = self._get_axis_number(axis) idx = self.index if axis == 0 else self.columns indexer = pandas.Series(index=idx).at_time(time, asof=asof).index return self.loc[indexer] if axis == 0 else self.loc[:, indexer]
78
base.py
Python
modin/pandas/base.py
605efa618e7994681f57b11d04d417f353ef8d50
modin
3
118,727
57
13
14
153
20
0
63
208
bokeh_chart
Replace static apps with live Cloud apps (#4317) Co-authored-by: kajarenc <[email protected]>
https://github.com/streamlit/streamlit.git
def bokeh_chart(self, figure, use_container_width=False): import bokeh if bokeh.__version__ != ST_BOKEH_VERSION: raise StreamlitAPIException( f"Streamlit only supports Bokeh version {ST_BOKEH_VERSION}, " f"but you have version {bokeh.__version__} installed. Please " f"run `pip install --force-reinstall --no-deps bokeh==" f"{ST_BOKEH_VERSION}` to install the correct version." ) # Generate element ID from delta path delta_path = self.dg._get_delta_path_str() element_id = hashlib.md5(delta_path.encode()).hexdigest() bokeh_chart_proto = BokehChartProto() marshall(bokeh_chart_proto, figure, use_container_width, element_id) return self.dg._enqueue("bokeh_chart", bokeh_chart_proto)
84
bokeh_chart.py
Python
lib/streamlit/elements/bokeh_chart.py
72703b38029f9358a0ec7ca5ed875a6b438ece19
streamlit
2
304,164
36
13
15
193
23
0
47
204
async_step_user
Add Landis+Gyr Heat Meter integration (#73363) * Add Landis+Gyr Heat Meter integration * Add contant for better sensor config * Add test for init * Refactor some of the PR suggestions in config_flow * Apply small fix * Correct total_increasing to total * Add test for restore state * Add MWh entity that can be added as gas on the energy dashoard * Remove GJ as unit * Round MWh to 5 iso 3 digits * Update homeassistant/components/landisgyr_heat_meter/const.py * Update CODEOWNERS Co-authored-by: Erik Montnemery <[email protected]>
https://github.com/home-assistant/core.git
async def async_step_user(self, user_input=None): errors = {} if user_input is not None: if user_input[CONF_DEVICE] == CONF_MANUAL_PATH: return await self.async_step_setup_serial_manual_path() dev_path = await self.hass.async_add_executor_job( get_serial_by_id, user_input[CONF_DEVICE] ) try: return await self.validate_and_create_entry(dev_path) except CannotConnect: errors["base"] = "cannot_connect" ports = await self.get_ports() schema = vol.Schema({vol.Required(CONF_DEVICE): vol.In(ports)}) return self.async_show_form(step_id="user", data_schema=schema, errors=errors)
117
config_flow.py
Python
homeassistant/components/landisgyr_heat_meter/config_flow.py
7a497c1e6e5a0d44b9418a754470ca9dd35e9719
core
4
130,352
26
11
11
114
15
0
29
114
create_v_switch
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
https://github.com/ray-project/ray.git
def create_v_switch(self, vpc_id, zone_id, cidr_block): request = CreateVSwitchRequest() request.set_ZoneId(zone_id) request.set_VpcId(vpc_id) request.set_CidrBlock(cidr_block) response = self._send_request(request) if response is not None: return response.get("VSwitchId") else: logging.error("create_v_switch vpc_id %s failed.", vpc_id) return None
68
utils.py
Python
python/ray/autoscaler/_private/aliyun/utils.py
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
2
299,744
27
9
8
88
12
1
31
88
test_select_source_firetv
Review AndroidTV tests for media player entity (#71168)
https://github.com/home-assistant/core.git
async def test_select_source_firetv(hass, source, expected_arg, method_patch): conf_apps = { "com.app.test1": "TEST 1", "com.app.test3": None, } await _test_select_source( hass, CONFIG_FIRETV_DEFAULT, conf_apps, source, expected_arg, method_patch ) @pytest.mark.parametrize( "config", [ CONFIG_ANDROIDTV_DEFAULT, CONFIG_FIRETV_DEFAULT, ], )
@pytest.mark.parametrize( "config", [ CONFIG_ANDROIDTV_DEFAULT, CONFIG_FIRETV_DEFAULT, ], )
39
test_media_player.py
Python
tests/components/androidtv/test_media_player.py
ea456893f94c7dc88b0cc28f92dadf240fbb1fe7
core
1
224,037
15
10
4
68
9
0
18
53
_set_active
Remove spaces at the ends of docstrings, normalize quotes
https://github.com/mkdocs/mkdocs.git
def _set_active(self, value): self.__active = bool(value) if self.parent is not None: self.parent.active = bool(value) active = property(_get_active, _set_active)
34
nav.py
Python
mkdocs/structure/nav.py
e7f07cc82ab2be920ab426ba07456d8b2592714d
mkdocs
2
304,335
31
11
10
98
9
0
31
130
_filter_entries
Type feedreader strictly (#76707) * Type feedreader strictly * Run hassfest
https://github.com/home-assistant/core.git
def _filter_entries(self) -> None: assert self._feed is not None if len(self._feed.entries) > self._max_entries: _LOGGER.debug( "Processing only the first %s entries in feed %s", self._max_entries, self._url, ) self._feed.entries = self._feed.entries[0 : self._max_entries]
62
__init__.py
Python
homeassistant/components/feedreader/__init__.py
d0986c765083fd7d597f03ea4679245417d8a6f8
core
2
127,695
9
8
4
39
5
0
10
38
job_id
[core/docs] Update worker docstring (#28495) Co-authored-by: Philipp Moritz <[email protected]>
https://github.com/ray-project/ray.git
def job_id(self): job_id = self.worker.current_job_id assert not job_id.is_nil() return job_id
22
runtime_context.py
Python
python/ray/runtime_context.py
8ffe435173aee0f313f786e7301d05f608b6d5be
ray
1
262,764
69
18
18
183
11
0
108
303
binary_to_target_arch
building: macOS: limit binaries' architecture validation to extensions As demonstrated by scipy 1.8.0, the multi-arch universal2 extensions may have their individual arch slices linked against distinct single-arch thin shared libraries. Such thin shared libraries will fail the current strict architecture validation, either by virtue of being single-arch (whereas the target architecture is universal2) or by virtue of at least one single-arch thin shared library being of incompatible architecture (e.g., arm64 thin shared library will be flagged as incompatible for x86_64 target, and x86_64 thin shared library will be flagged as incompatible for arm64 target). Therefore, limit the architecture validation only to python extension modules, which do need to be fully compatible with the target arch (at least until someone decides to ship distinct, arch-specific modules. But if that does happen, we can probably prevent the collection of incompatible module via hooks). The extension validation should hopefully still catch the attempts at using incompatible single-arch packages when trying to build a universal2 application or a single-arch application for architecture that's different from the running one.
https://github.com/pyinstaller/pyinstaller.git
def binary_to_target_arch(filename, target_arch, display_name=None): if not display_name: display_name = filename # Same as input file # Check the binary is_fat, archs = get_binary_architectures(filename) if target_arch == 'universal2': if not is_fat: raise IncompatibleBinaryArchError(f"{display_name} is not a fat binary!") # Assume fat binary is universal2; nothing to do else: if is_fat: if target_arch not in archs: raise IncompatibleBinaryArchError(f"{display_name} does not contain slice for {target_arch}!") # Convert to thin arch logger.debug("Converting fat binary %s (%s) to thin binary (%s)", filename, display_name, target_arch) convert_binary_to_thin_arch(filename, target_arch) else: if target_arch not in archs: raise IncompatibleBinaryArchError( f"{display_name} is incompatible with target arch {target_arch} (has arch: {archs[0]})!" ) # Binary has correct arch; nothing to do
91
osx.py
Python
PyInstaller/utils/osx.py
b401095d572789211857fbc47a021d7f712e555a
pyinstaller
7
248,551
135
15
125
890
25
0
337
1,750
test_join_rules_msc3083_restricted
EventAuthTestCase: build events for the right room version In practice, when we run the auth rules, all of the events have the right room version. Let's stop building Room V1 events for these tests and use the right version.
https://github.com/matrix-org/synapse.git
def test_join_rules_msc3083_restricted(self) -> None: creator = "@creator:example.com" pleb = "@joiner:example.com" auth_events = { ("m.room.create", ""): _create_event(RoomVersions.V8, creator), ("m.room.member", creator): _join_event(RoomVersions.V8, creator), ("m.room.power_levels", ""): _power_levels_event( RoomVersions.V8, creator, {"invite": 0} ), ("m.room.join_rules", ""): _join_rules_event( RoomVersions.V8, creator, "restricted" ), } # A properly formatted join event should work. authorised_join_event = _join_event( RoomVersions.V8, pleb, additional_content={ EventContentFields.AUTHORISING_USER: "@creator:example.com" }, ) event_auth.check_auth_rules_for_event( RoomVersions.V8, authorised_join_event, auth_events.values(), ) # A join issued by a specific user works (i.e. the power level checks # are done properly). pl_auth_events = auth_events.copy() pl_auth_events[("m.room.power_levels", "")] = _power_levels_event( RoomVersions.V8, creator, {"invite": 100, "users": {"@inviter:foo.test": 150}}, ) pl_auth_events[("m.room.member", "@inviter:foo.test")] = _join_event( RoomVersions.V8, "@inviter:foo.test" ) event_auth.check_auth_rules_for_event( RoomVersions.V8, _join_event( RoomVersions.V8, pleb, additional_content={ EventContentFields.AUTHORISING_USER: "@inviter:foo.test" }, ), pl_auth_events.values(), ) # A join which is missing an authorised server is rejected. with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( RoomVersions.V8, _join_event(RoomVersions.V8, pleb), auth_events.values(), ) # An join authorised by a user who is not in the room is rejected. pl_auth_events = auth_events.copy() pl_auth_events[("m.room.power_levels", "")] = _power_levels_event( RoomVersions.V8, creator, {"invite": 100, "users": {"@other:example.com": 150}}, ) with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( RoomVersions.V8, _join_event( RoomVersions.V8, pleb, additional_content={ EventContentFields.AUTHORISING_USER: "@other:example.com" }, ), auth_events.values(), ) # A user cannot be force-joined to a room. (This uses an event which # *would* be valid, but is sent be a different user.) with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( RoomVersions.V8, _member_event( RoomVersions.V8, pleb, "join", sender=creator, additional_content={ EventContentFields.AUTHORISING_USER: "@inviter:foo.test" }, ), auth_events.values(), ) # Banned should be rejected. auth_events[("m.room.member", pleb)] = _member_event( RoomVersions.V8, pleb, "ban" ) with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( RoomVersions.V8, authorised_join_event, auth_events.values(), ) # A user who left can re-join. auth_events[("m.room.member", pleb)] = _member_event( RoomVersions.V8, pleb, "leave" ) event_auth.check_auth_rules_for_event( RoomVersions.V8, authorised_join_event, auth_events.values(), ) # A user can send a join if they're in the room. (This doesn't need to # be authorised since the user is already joined.) auth_events[("m.room.member", pleb)] = _member_event( RoomVersions.V8, pleb, "join" ) event_auth.check_auth_rules_for_event( RoomVersions.V8, _join_event(RoomVersions.V8, pleb), auth_events.values(), ) # A user can accept an invite. (This doesn't need to be authorised since # the user was invited.) auth_events[("m.room.member", pleb)] = _member_event( RoomVersions.V8, pleb, "invite", sender=creator ) event_auth.check_auth_rules_for_event( RoomVersions.V8, _join_event(RoomVersions.V8, pleb), auth_events.values(), ) # helpers for making events TEST_ROOM_ID = "!test:room"
548
test_event_auth.py
Python
tests/test_event_auth.py
2959184a42398277ff916206235b844a8f7be5d7
synapse
1
246,359
70
17
48
234
18
0
127
443
return_expanded
Fix bug in `StateFilter.return_expanded()` and add some tests. (#12016)
https://github.com/matrix-org/synapse.git
def return_expanded(self) -> "StateFilter": if self.is_full(): # If we're going to return everything then there's nothing to do return self if not self.has_wildcards(): # If there are no wild cards, there's nothing to do return self if EventTypes.Member in self.types: get_all_members = self.types[EventTypes.Member] is None else: get_all_members = self.include_others has_non_member_wildcard = self.include_others or any( state_keys is None for t, state_keys in self.types.items() if t != EventTypes.Member ) if not has_non_member_wildcard: # If there are no non-member wild cards we can just return ourselves return self if get_all_members: # We want to return everything. return StateFilter.all() elif EventTypes.Member in self.types: # We want to return all non-members, but only particular # memberships return StateFilter( types=frozendict({EventTypes.Member: self.types[EventTypes.Member]}), include_others=True, ) else: # We want to return all non-members return _ALL_NON_MEMBER_STATE_FILTER
141
state.py
Python
synapse/storage/state.py
eb609c65d0794dd49efcd924bdc8743fd4253a93
synapse
10
255,151
17
12
11
99
15
0
21
75
tests
Use Python type annotations rather than comments (#3962) * These have been supported since Python 3.5. ONNX doesn't support Python < 3.6, so we can use the annotations. Diffs generated by https://pypi.org/project/com2ann/. Signed-off-by: Gary Miguel <[email protected]> * Remove MYPY conditional logic in gen_proto.py It breaks the type annotations and shouldn't be needed. Signed-off-by: Gary Miguel <[email protected]> * Get rid of MYPY bool from more scripts Signed-off-by: Gary Miguel <[email protected]> * move Descriptors class above where its referenced in type annotation Signed-off-by: Gary Miguel <[email protected]> * fixes Signed-off-by: Gary Miguel <[email protected]> * remove extra blank line Signed-off-by: Gary Miguel <[email protected]> * fix type annotations Signed-off-by: Gary Miguel <[email protected]> * fix type annotation in gen_docs Signed-off-by: Gary Miguel <[email protected]> * fix Operators.md Signed-off-by: Gary Miguel <[email protected]> * fix TestCoverage.md Signed-off-by: Gary Miguel <[email protected]> * fix protoc-gen-mypy.py Signed-off-by: Gary Miguel <[email protected]>
https://github.com/onnx/onnx.git
def tests(self) -> Type[unittest.TestCase]: tests = self._get_test_case('OnnxBackendTest') for items_map in sorted(self._filtered_test_items.values()): for name, item in sorted(items_map.items()): setattr(tests, name, item.func) return tests
61
__init__.py
Python
onnx/backend/test/runner/__init__.py
83fa57c74edfd13ddac9548b8a12f9e3e2ed05bd
onnx
3
133,619
7
8
16
30
3
0
8
17
test_ssh_sync
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
https://github.com/ray-project/ray.git
def test_ssh_sync(): experiment_name = "cloud_ssh_sync" indicator_file = f"/tmp/{experiment_name}_indicator"
70
run_cloud_test.py
Python
release/tune_tests/cloud_tests/workloads/run_cloud_test.py
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
2
293,767
24
11
9
115
12
0
27
66
test_from_event_to_db_state_attributes
Separate attrs into another table (reduces database size) (#68224)
https://github.com/home-assistant/core.git
def test_from_event_to_db_state_attributes(): attrs = {"this_attr": True} state = ha.State("sensor.temperature", "18", attrs) event = ha.Event( EVENT_STATE_CHANGED, {"entity_id": "sensor.temperature", "old_state": None, "new_state": state}, context=state.context, ) assert StateAttributes.from_event(event).to_native() == attrs
66
test_models.py
Python
tests/components/recorder/test_models.py
9215702388eef03c7c3ed9f756ea0db533d5beec
core
1
101,983
26
12
13
122
13
0
32
164
destroy_widgets
GUI - Preview updates - Training preview. Embed preview pop-out window - Bugfix - convert/extract previews
https://github.com/deepfakes/faceswap.git
def destroy_widgets(self) -> None: if self._is_standalone: return for widget in self._gui_mapped: if widget.winfo_ismapped(): logger.debug("Removing widget: %s", widget) widget.pack_forget() widget.destroy() del widget for var in list(self._vars): logger.debug("Deleting tk variable: %s", var) del self._vars[var]
73
preview_tk.py
Python
lib/training/preview_tk.py
2e8ef5e3c8f2df0f1cca9b342baa8aaa6f620650
faceswap
5
281,251
6
6
3
25
4
0
6
20
custom_reset
Baseclass (#1141) * A working decorator * Basic intro * Added more * Refactor * Refactor * Cleaned code * Simplified function (thanks Chavi) * Small change * Updating tests : fix issue with mock * Updating tests : fix remaining mocks after merging * Updating tests : black * Cleaned up * Finished base cases * Notes * Slight changes * Added dynamic options handling, error persists * Fixed pylint issues * Fixed mock * fix decorator with dynamic dictionary of args * move choices from dynamic to const in crypto/ov * Updated var names * Check * Moved decorators * Fixed import issues * Fixed tests, update payoff controller * Fixed tests * Fixed pylint * Updated files * Added base class * Added reset * Improved base class * For James * More menues converted * Added contexts * 24 controllers left * 18 Controllers left * Changes choices * 9 controllers left * Added all controllers * Fixed glitch * Replaced all improper callings of class * Removed menu decorator * refactored try_except * Last commit * Black fix * Bug fix * Added James' new menus * Fixed tests * Fixed 8 tests * Fixing mypy issue * Updating tests : stocks/options * Fixed options * Fixed tests * Updating tests : stocks/options * Fixed tests * More test fixes * Updating tests : stocks/ba * Fixed options test * More bug fixes * Fixed tests * fixed pylint * Skipped test_call_load * Add typings to base class * Fix issue with appending auto completer options + bugfixes * Add typings to base class * Terminal throws error for bad path * sexy solution to auto completer in runtime * more sexy reset with reset_level stored * no so sexy jump between indirect menus * Removing choices argument * refactor custom_reset * Fixed tests * Theo fixes * Added back function * Fixed tests Co-authored-by: Chavithra PARANA <[email protected]> Co-authored-by: DidierRLopes <[email protected]>
https://github.com/OpenBB-finance/OpenBBTerminal.git
def custom_reset(self) -> List[str]: return []
14
parent_classes.py
Python
gamestonk_terminal/parent_classes.py
006b3570b795215a17c64841110b649b03db9a98
OpenBBTerminal
1
147,859
5
11
15
77
26
5
5
12
options
[core] Simplify options handling [Part 1] (#23127) * handle options * update doc * fix serve
https://github.com/ray-project/ray.git
def options(self, args=None, kwargs=None, **actor_options):
"""overrides the actor instantiation parameters. The arguments are thethose that can be:obj:`ray.remote`. Examples: .. code-block:: .
86
actor.py
Python
python/ray/actor.py
d7ef546352c78f5080938a41432b8de4c0c81ff0
ray
2
260,356
18
10
8
87
13
0
23
83
transform
MAINT Use _validate_params in SparsePCA and MiniBatchSparsePCA (#23710) Co-authored-by: Guillaume Lemaitre <[email protected]> Co-authored-by: jeremiedbb <[email protected]>
https://github.com/scikit-learn/scikit-learn.git
def transform(self, X): check_is_fitted(self) X = self._validate_data(X, reset=False) X = X - self.mean_ U = ridge_regression( self.components_.T, X.T, self.ridge_alpha, solver="cholesky" ) return U
55
_sparse_pca.py
Python
sklearn/decomposition/_sparse_pca.py
db6123fe40400828918037f3fae949bfcc4d9d05
scikit-learn
1
191,406
29
9
9
103
13
0
37
61
test_predict_until_observation_repeat
Harrison/add react chain (#24) from https://arxiv.org/abs/2210.03629 still need to think if docstore abstraction makes sense
https://github.com/hwchase17/langchain.git
def test_predict_until_observation_repeat() -> None: outputs = ["foo", " search[foo]"] fake_llm = FakeListLLM(outputs) fake_llm_chain = LLMChain(llm=fake_llm, prompt=_FAKE_PROMPT) ret_text, action, directive = predict_until_observation(fake_llm_chain, "", 1) assert ret_text == "foo\nAction 1: search[foo]" assert action == "search" assert directive == "foo"
58
test_react.py
Python
tests/unit_tests/chains/test_react.py
ce7b14b84381c766ae42a0f71953b2a56c024dbb
langchain
1
157,004
15
12
3
87
10
1
15
27
_emulate
Filter out `numeric_only` warnings from `pandas` (#9496) * Initial checkpoint * test-upstream * Pass method name [test-upstream] * Groupby [test-upstream] * Cleanup [test-upstream] * More specific warning catching [test-upstream] * Remove stray breakpoint [test-upstream] * Fix categorical tests [test-upstream] * Restore npartitions after debugging [test-upstream] * Updates [test-upstream] * Roll back columns [test-upstream] * Be more explicit about method name in _getattr_numeric_only [test-upstream] * Use more specific parameter for method name [test-upstream]
https://github.com/dask/dask.git
def _emulate(func, *args, udf=False, **kwargs): with raise_on_meta_error(funcname(func), udf=udf), check_numeric_only_deprecation(): return func(*_extract_meta(args, True), **_extract_meta(kwargs, True)) @insert_meta_param_description
@insert_meta_param_description
52
core.py
Python
dask/dataframe/core.py
1a8533fddb7de0c9981acee0c33408e7205f8c7a
dask
1
292,209
79
12
35
367
26
0
123
300
test_cleanup_trigger
Improve MQTT device removal (#66766) * Improve MQTT device removal * Update homeassistant/components/mqtt/mixins.py Co-authored-by: Martin Hjelmare <[email protected]> * Adjust tests * Improve test coverage Co-authored-by: Martin Hjelmare <[email protected]>
https://github.com/home-assistant/core.git
async def test_cleanup_trigger(hass, hass_ws_client, device_reg, entity_reg, mqtt_mock): assert await async_setup_component(hass, "config", {}) ws_client = await hass_ws_client(hass) config = { "automation_type": "trigger", "topic": "test-topic", "type": "foo", "subtype": "bar", "device": {"identifiers": ["helloworld"]}, } data = json.dumps(config) async_fire_mqtt_message(hass, "homeassistant/device_automation/bla/config", data) await hass.async_block_till_done() # Verify device registry entry is created device_entry = device_reg.async_get_device({("mqtt", "helloworld")}) assert device_entry is not None triggers = await async_get_device_automations( hass, DeviceAutomationType.TRIGGER, device_entry.id ) assert triggers[0]["type"] == "foo" # Remove MQTT from the device await ws_client.send_json( { "id": 6, "type": "mqtt/device/remove", "device_id": device_entry.id, } ) response = await ws_client.receive_json() assert response["success"] await hass.async_block_till_done() await hass.async_block_till_done() # Verify device registry entry is cleared device_entry = device_reg.async_get_device({("mqtt", "helloworld")}) assert device_entry is None # Verify retained discovery topic has been cleared mqtt_mock.async_publish.assert_called_once_with( "homeassistant/device_automation/bla/config", "", 0, True )
207
test_device_trigger.py
Python
tests/components/mqtt/test_device_trigger.py
ba6d1976dff8df2aa32726ff2acbf0ba61e5c550
core
1
150,089
64
16
16
182
16
0
78
266
load_historic_predictions_from_disk
start collecting indefinite history of predictions. Allow user to generate statistics on these predictions. Direct FreqAI to save these to disk and reload them if available.
https://github.com/freqtrade/freqtrade.git
def load_historic_predictions_from_disk(self): exists = Path(self.full_path / str("historic_predictions.json")).resolve().exists() if exists: with open(self.full_path / str("historic_predictions.json"), "r") as fp: self.pair_dict = json.load(fp) logger.info(f"Found existing historic predictions at {self.full_path}, but beware of " "that statistics may be inaccurate if the bot has been offline for " "an extended period of time.") elif not self.follow_mode: logger.info("Could not find existing historic_predictions, starting from scratch") else: logger.warning( f"Follower could not find historic predictions at {self.full_path} " "sending null values back to strategy" ) return exists
90
data_drawer.py
Python
freqtrade/freqai/data_drawer.py
8ce6b183180e69411d4b44b51489451b31475f35
freqtrade
3
281,543
19
9
32
93
11
0
26
61
print_help
Terminal Wide Rich (#1161) * My idea for how we handle Rich moving forward * remove independent consoles * FIxed pylint issues * add a few vars * Switched print to console * More transitions * Changed more prints * Replaced all prints * Fixing tabulate * Finished replace tabulate * Finished removing rich from Tabulate * add Panel around menu * add GST watermark under feature flag * Fixed 46 tests * Delete test_screener[False].yaml * Delete test_screener[True].yaml * Fixed the rest of the tests * add help and source color vars and use rgb * rich on stocks/options * update rich on disc, dps, sia * rich in gov, ins and scr menus * ba and ca menus with rich * Fixed import issue * Fixed some tests * removed termcolor * Removed prettytable * add rich to remaining stocks menus * FIxed linting issue * Added James' changes * Updated dependencies * Add rich to cryptocurrency menu * refactor economy and forex * refactor etf with rich * refactor mfunds * refactor rich rest * not specify style so default color works well on any background * Fixing mypy issues * Updated tests * More test fixes * James' test fixes * Updating tests : stocks/screener - fix cassettes using BR * Updating tests : crypto * Updating tests : disable DEBUG_MODE * Updating tests : stocks/fa/yfinance * minor fixes that escape * Improve the rich table function (that replaces tabulate :D ) * Fixed bad code * delete rogue file + dcf fix + NoConsole * sia mypy * fuck you linter * fuck you linter pt 2 * skip hehe * i hate the black linter * ubuntu mypy attempt * Update : rich_config + gtff * Updating tests : conftest * Updating tests : stocks * Update : rich_config * Updating : rich_config * make panel configurable for Theodore :b * colors update * Merged * Updating : rich_config + feature_flags * Updating : rich_config * Updating tests : stocks * Updating : feature_flags Co-authored-by: DidierRLopes <[email protected]> Co-authored-by: Chavithra PARANA <[email protected]> Co-authored-by: james <[email protected]> Co-authored-by: jose-donato <[email protected]>
https://github.com/OpenBB-finance/OpenBBTerminal.git
def print_help(self): is_foreign_start = "" if not self.suffix else "[unvl]" is_foreign_end = "" if not self.suffix else "[/unvl]" help_text = f console.print(text=help_text, menu="Stocks - Fundamental Analysis")
42
fa_controller.py
Python
gamestonk_terminal/stocks/fundamental_analysis/fa_controller.py
82747072c511beb1b2672846ae2ee4aec53eb562
OpenBBTerminal
3
162,356
6
5
15
28
11
1
6
13
_entries
[PRX] Add Extractors (#2245) Closes #2144, https://github.com/ytdl-org/youtube-dl/issues/15948 Authored by: coletdjnz
https://github.com/yt-dlp/yt-dlp.git
def _entries(self, item_id, endpoint, entry_func, query=None):
""" Extract entries from paginated list API
106
prx.py
Python
yt_dlp/extractor/prx.py
85fee2215295b099d34350d9a9ff42c086e3aef2
yt-dlp
6