complexity
int64
1
56
n_identifiers
int64
1
114
code
stringlengths
19
12.7k
path
stringlengths
8
134
n_ast_nodes
int64
12
2.35k
ast_errors
stringlengths
0
4.01k
repo
stringlengths
3
28
documentation
dict
n_words
int64
2
866
language
stringclasses
1 value
vocab_size
int64
2
323
commit_id
stringlengths
40
40
file_name
stringlengths
5
79
id
int64
243
338k
nloc
int64
1
228
token_counts
int64
5
1.4k
fun_name
stringlengths
1
77
url
stringlengths
31
60
commit_message
stringlengths
3
15.3k
n_whitespaces
int64
1
3.23k
n_ast_errors
int64
0
20
d_id
int64
74
121k
ast_levels
int64
4
29
1
8
def test_caching(self): # The first user ignores a user. self._update_ignore_list("@other:test") self.assert_ignored(self.user, {"@other:test"}) self.assert_ignorers("@other:test", {self.user}) # The second user ignores them. self._update_ignore_list("@other:test", ignorer_user_id="@second:test") self.assert_ignored("@second:test", {"@other:test"}) self.assert_ignorers("@other:test", {self.user, "@second:test"}) # The first user un-ignores them. self._update_ignore_list() self.assert_ignored(self.user, set()) self.assert_ignorers("@other:test", {"@second:test"})
tests/storage/test_account_data.py
177
synapse
{ "docstring": "Ensure that caching works properly between different users.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
38
Python
25
dda9b7fc4d2e6ca84a1a994a7ff1943b590e71df
test_account_data.py
247,675
10
97
test_caching
https://github.com/matrix-org/synapse.git
Use the ignored_users table to test event visibility & sync. (#12225) Instead of fetching the raw account data and re-parsing it. The ignored_users table is a denormalised version of the account data for quick searching.
129
0
71,828
9
4
11
def _assign_default_kwargs(kws, call_func, source_func): # This exists so that axes-level functions and figure-level functions can # both call a Plotter method while having the default kwargs be defined in # the signature of the axes-level function. # An alternative would be to have a decorator on the method that sets its # defaults based on those defined in the axes-level function. # Then the figure-level function would not need to worry about defaults. # I am not sure which is better. needed = inspect.signature(call_func).parameters defaults = inspect.signature(source_func).parameters for param in needed: if param in defaults and param not in kws: kws[param] = defaults[param].default return kws
seaborn/utils.py
97
seaborn
{ "docstring": "Assign default kwargs for call_func using values from source_func.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
105
Python
68
6460a21555ba6557e1f6f06f4d677d9c19148169
utils.py
42,076
7
58
_assign_default_kwargs
https://github.com/mwaskom/seaborn.git
Workaround for matplotlib rc_context issue (#2925) * Workaround for matplotlib rc_context issue Fixes #2914 * Add some additional comments about this workaround
159
0
7,476
12
1
2
def test_is_anonymous_authenticated_methods(self):
tests/auth_tests/test_checks.py
13
django
{ "docstring": "\n <User Model>.is_anonymous/is_authenticated must not be methods.\n ", "language": "en", "n_whitespaces": 21, "n_words": 6, "vocab_size": 6 }
2
Python
2
9c19aff7c7561e3a82978a272ecdaad40dda5c00
test_checks.py
201,183
26
99
test_is_anonymous_authenticated_methods
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
9
0
49,893
6
1
4
def trace_symbol(self) -> Any: raise NotImplementedError()
nni/common/serializer.py
23
nni
{ "docstring": "\n Symbol object. Could be a class or a function.\n ``get_hybrid_cls_or_func_name`` and ``import_cls_or_func_from_hybrid_name`` is a pair to\n convert the symbol into a string and convert the string back to symbol.\n ", "language": "en", "n_whitespaces": 58, "n_words": 29, "vocab_size": 21 }
6
Python
6
21abc280257fb8868be61264abe42534aa09188b
serializer.py
111,886
7
12
trace_symbol
https://github.com/microsoft/nni.git
Fix #4434: support pickle in serializer (#4552)
20
0
24,497
7
2
8
def print_colored(text, color="OK", bold=False): color = _COLORS.get(color, color) fmt = '' if not bold else _COLORS['BOLD'] print(f"{color}{fmt}{text}{_COLORS['ENDC']}")
tests/simple_tests.py
91
faceswap
{ "docstring": " Print colored text\n This might not work on windows,\n although travis runs windows stuff in git bash, so it might ?\n ", "language": "en", "n_whitespaces": 31, "n_words": 21, "vocab_size": 20 }
17
Python
16
98a65277d8c55cfcbdbfa629f790a8f8731621a8
simple_tests.py
100,861
4
40
print_colored
https://github.com/deepfakes/faceswap.git
Fix AMD Tests + docs
29
0
20,312
11
9
15
def _light_internal_color_mode(self) -> str: if (color_mode := self.color_mode) is None: # Backwards compatibility for color_mode added in 2021.4 # Add warning in 2021.6, remove in 2021.10 supported = self._light_internal_supported_color_modes if ColorMode.HS in supported and self.hs_color is not None: return ColorMode.HS if ColorMode.COLOR_TEMP in supported and self.color_temp_kelvin is not None: return ColorMode.COLOR_TEMP if ColorMode.BRIGHTNESS in supported and self.brightness is not None: return ColorMode.BRIGHTNESS if ColorMode.ONOFF in supported: return ColorMode.ONOFF return ColorMode.UNKNOWN return color_mode
homeassistant/components/light/__init__.py
150
core
{ "docstring": "Return the color mode of the light with backwards compatibility.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 9 }
72
Python
38
47d0598e75487f63901931875f69f802a477df13
__init__.py
288,598
14
95
_light_internal_color_mode
https://github.com/home-assistant/core.git
Use Kelvin as the preferred color temperature unit (#79591) * Use Kelvin as the preferred white temperature unit * Update homekit * Adjust tests
241
0
87,754
10
1
6
def test_backend_has_no_autograd_key_but_provides_entries(self) -> None: yaml_str = output_error = self.get_errors_from_gen_backend_stubs(yaml_str) self.assertExpectedInline(output_error, ) # noqa: B950 # in an operator group, currently all operators must either be registered to the backend or autograd kernel. # Here, functional and out mismatch
tools/test/test_gen_backend_stubs.py
50
pytorch
{ "docstring": "\\\nbackend: Vulkan\ncpp_namespace: torch_vulkan\nsupported:\n- add\nautograd:\n- subFound an invalid operator name: add", "language": "en", "n_whitespaces": 9, "n_words": 16, "vocab_size": 14 }
38
Python
35
bb5b4cceb6f737448eaaa6817cd773b6f4b0e77d
test_gen_backend_stubs.py
102,167
10
26
test_backend_has_no_autograd_key_but_provides_entries
https://github.com/pytorch/pytorch.git
Revert "Revert D32498569: allow external backend codegen to toggle whether to generate out= and inplace kernels" (#69950) Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/69950 This reverts commit f6cad53443704dfe5a20cc62bee14d91e3bffcaa. Test Plan: Imported from OSS Reviewed By: albanD Differential Revision: D33113545 Pulled By: bdhirsh fbshipit-source-id: d6590294662588d36c09662dea65919ad4e1e288
66
0
21,482
8
1
11
def test_dict_failure(self): with pytest.raises(validate.ValidationError) as cm: validate.validate(validate.union({"foo": int}), "value") assert_validationerror(cm.value, )
tests/test_api_validate.py
75
streamlink
{ "docstring": "\n ValidationError(UnionSchema):\n Could not validate union\n Context(dict):\n Unable to validate union 'foo'\n Context(type):\n Type of 'value' should be int, but is str\n ", "language": "en", "n_whitespaces": 113, "n_words": 21, "vocab_size": 19 }
11
Python
11
d09112ab1f6db6aa605650fe1ff6a3028344f90d
test_api_validate.py
187,200
11
42
test_dict_failure
https://github.com/streamlink/streamlink.git
plugin.api.validate: rewrite tests Completely rewrite tests using pytest, with full coverage
35
0
45,739
14
1
28
def test_submit_event(session, create_task_instance): # Make a trigger trigger = Trigger(classpath="airflow.triggers.testing.SuccessTrigger", kwargs={}) trigger.id = 1 session.add(trigger) session.commit() # Make a TaskInstance that's deferred and waiting on it task_instance = create_task_instance( session=session, execution_date=timezone.utcnow(), state=State.DEFERRED ) task_instance.trigger_id = trigger.id task_instance.next_kwargs = {"cheesecake": True} session.commit() # Call submit_event Trigger.submit_event(trigger.id, TriggerEvent(42), session=session) # commit changes made by submit event and expire all cache to read from db. session.flush() session.expunge_all() # Check that the task instance is now scheduled updated_task_instance = session.query(TaskInstance).one() assert updated_task_instance.state == State.SCHEDULED assert updated_task_instance.next_kwargs == {"event": 42, "cheesecake": True}
tests/models/test_trigger.py
237
airflow
{ "docstring": "\n Tests that events submitted to a trigger re-wake their dependent\n task instances.\n ", "language": "en", "n_whitespaces": 22, "n_words": 12, "vocab_size": 12 }
87
Python
69
bab740c0a49b828401a8baf04eb297d083605ae8
test_trigger.py
47,418
17
141
test_submit_event
https://github.com/apache/airflow.git
Fix trigger event payload is not persisted in db (#22944) Co-authored-by: Kaxil Naik <[email protected]> Co-authored-by: Ash Berlin-Taylor <[email protected]>
157
0
9,106
11
1
43
def test_fsspec_filesystem(ray_start_regular_shared, tmp_path): df1 = pd.DataFrame({"one": [1, 2, 3], "two": ["a", "b", "c"]}) table = pa.Table.from_pandas(df1) path1 = os.path.join(str(tmp_path), "test1.parquet") pq.write_table(table, path1) df2 = pd.DataFrame({"one": [4, 5, 6], "two": ["e", "f", "g"]}) table = pa.Table.from_pandas(df2) path2 = os.path.join(str(tmp_path), "test2.parquet") pq.write_table(table, path2) fs = LocalFileSystem() ds = ray.data.read_parquet([path1, path2], filesystem=fs) # Test metadata-only parquet ops. assert ds._blocks._num_computed() == 1 assert ds.count() == 6 out_path = os.path.join(tmp_path, "out") os.mkdir(out_path) ds._set_uuid("data") ds.write_parquet(out_path) ds_df1 = pd.read_parquet(os.path.join(out_path, "data_000000.parquet")) ds_df2 = pd.read_parquet(os.path.join(out_path, "data_000001.parquet")) ds_df = pd.concat([ds_df1, ds_df2]) df = pd.concat([df1, df2]) assert ds_df.equals(df) @pytest.mark.parametrize( "fs,data_path", [ (None, lazy_fixture("local_path")), (lazy_fixture("local_fs"), lazy_fixture("local_path")), (lazy_fixture("s3_fs"), lazy_fixture("s3_path")), ( lazy_fixture("s3_fs_with_space"), lazy_fixture("s3_path_with_space"), ), # Path contains space. ], )
python/ray/data/tests/test_dataset_formats.py
538
@pytest.mark.parametrize( "fs,data_path", [ (None, lazy_fixture("local_path")), (lazy_fixture("local_fs"), lazy_fixture("local_path")), (lazy_fixture("s3_fs"), lazy_fixture("s3_path")), ( lazy_fixture("s3_fs_with_space"), lazy_fixture("s3_path_with_space"), ), # Path contains space. ], )
ray
{ "docstring": "Same as `test_parquet_write` but using a custom, fsspec filesystem.\n\n TODO (Alex): We should write a similar test with a mock PyArrow fs, but\n unfortunately pa.fs._MockFileSystem isn't serializable, so this may require\n some effort.\n ", "language": "en", "n_whitespaces": 45, "n_words": 33, "vocab_size": 30 }
106
Python
83
85d6946c9524d8544e69262f737018151efb1567
test_dataset_formats.py
144,772
22
266
test_fsspec_filesystem
https://github.com/ray-project/ray.git
Split test_dataset.py into two (#22303)
240
1
33,307
12
8
17
def dtype(x, *, canonicalize=False): if x is None: raise ValueError(f"Invalid argument to dtype: {x}.") elif isinstance(x, type) and x in python_scalar_dtypes: dt = python_scalar_dtypes[x] elif type(x) in python_scalar_dtypes: dt = python_scalar_dtypes[type(x)] elif jax.core.is_opaque_dtype(getattr(x, 'dtype', None)): dt = x.dtype else: dt = np.result_type(x) if dt not in _jax_dtype_set: raise TypeError(f"Value '{x}' with dtype {dt} is not a valid JAX array " "type. Only arrays of numeric types are supported by JAX.") return canonicalize_dtype(dt) if canonicalize else dt
jax/_src/dtypes.py
193
jax
{ "docstring": "Return the dtype object for a value or type, optionally canonicalized based on X64 mode.", "language": "en", "n_whitespaces": 14, "n_words": 15, "vocab_size": 15 }
76
Python
57
6d2aaac2454117d54997243714c1a009827707ca
dtypes.py
122,245
15
112
dtype
https://github.com/google/jax.git
implement bint arrays (opaque dtypes), add padding rules Co-authored-by: Sharad Vikram <[email protected]>
121
0
27,139
12
3
11
def calc_position(self, x): if x < self.x[0]: return None elif x > self.x[-1]: return None i = self.__search_index(x) dx = x - self.x[i] position = self.a[i] + self.b[i] * dx + \ self.c[i] * dx ** 2.0 + self.d[i] * dx ** 3.0 return position
PathPlanning/CubicSpline/cubic_spline_planner.py
141
PythonRobotics
{ "docstring": "\n Calc `y` position for given `x`.\n\n if `x` is outside the data point's `x` range, return None.\n\n Returns\n -------\n y : float\n y position for given x.\n ", "language": "en", "n_whitespaces": 81, "n_words": 27, "vocab_size": 22 }
45
Python
29
def289b723e9216830c2a7b2577cb31b55710167
cubic_spline_planner.py
19,355
10
97
calc_position
https://github.com/AtsushiSakai/PythonRobotics.git
enhance cubic spline path doc (#698) * enhance cublic spline path doc * enhance cublic spline path doc * enhance cublic spline path doc * enhance cublic spline path doc * enhance cublic spline path doc * enhance cublic spline path doc * enhance cublic spline path doc * enhance cublic spline path doc * enhance cublic spline path doc * enhance cublic spline path doc * enhance cublic spline path doc * enhance cubic spline path doc * enhance cubic spline path doc * enhance cubic spline path doc * enhance cubic spline path doc * enhance cubic spline path doc * enhance cubic spline path doc * enhance cubic spline path doc * enhance cubic spline path doc * enhance cubic spline path doc * enhance cubic spline path doc * enhance cubic spline path doc * enhance cubic spline path doc * enhance cubic spline path doc * enhance cubic spline path doc
127
0
2,943
12
5
31
def _enable_ocsp_stapling(self, ssl_vhost, unused_options): min_apache_ver = (2, 3, 3) if self.get_version() < min_apache_ver: raise errors.PluginError( "Unable to set OCSP directives.\n" "Apache version is below 2.3.3.") if "socache_shmcb_module" not in self.parser.modules: self.enable_mod("socache_shmcb") # Check if there's an existing SSLUseStapling directive on. use_stapling_aug_path = self.parser.find_dir("SSLUseStapling", "on", start=ssl_vhost.path) if not use_stapling_aug_path: self.parser.add_dir(ssl_vhost.path, "SSLUseStapling", "on") ssl_vhost_aug_path = self._escape(parser.get_aug_path(ssl_vhost.filep)) # Check if there's an existing SSLStaplingCache directive. stapling_cache_aug_path = self.parser.find_dir('SSLStaplingCache', None, ssl_vhost_aug_path) # We'll simply delete the directive, so that we'll have a # consistent OCSP cache path. if stapling_cache_aug_path: self.parser.aug.remove( re.sub(r"/\w*$", "", stapling_cache_aug_path[0])) self.parser.add_dir_to_ifmodssl(ssl_vhost_aug_path, "SSLStaplingCache", ["shmcb:/var/run/apache2/stapling_cache(128000)"]) msg = "OCSP Stapling was enabled on SSL Vhost: %s.\n"%( ssl_vhost.filep) self.save_notes += msg self.save() logger.info(msg)
certbot-apache/certbot_apache/_internal/configurator.py
311
certbot
{ "docstring": "Enables OCSP Stapling\n\n In OCSP, each client (e.g. browser) would have to query the\n OCSP Responder to validate that the site certificate was not revoked.\n\n Enabling OCSP Stapling, would allow the web-server to query the OCSP\n Responder, and staple its response to the offered certificate during\n TLS. i.e. clients would not have to query the OCSP responder.\n\n OCSP Stapling enablement on Apache implicitly depends on\n SSLCertificateChainFile being set by other code.\n\n .. note:: This function saves the configuration\n\n :param ssl_vhost: Destination of traffic, an ssl enabled vhost\n :type ssl_vhost: :class:`~certbot_apache._internal.obj.VirtualHost`\n\n :param unused_options: Not currently used\n :type unused_options: Not Available\n\n :returns: Success, general_vhost (HTTP vhost)\n :rtype: (bool, :class:`~certbot_apache._internal.obj.VirtualHost`)\n\n ", "language": "en", "n_whitespaces": 212, "n_words": 107, "vocab_size": 78 }
108
Python
89
eeca208c8f57304590ac1af80b496e61021aaa45
configurator.py
186,377
26
182
_enable_ocsp_stapling
https://github.com/certbot/certbot.git
Various clean-ups in certbot-apache. Use f-strings. (#9132) * Various clean-ups in certbot-apache. Use f-strings. * Smaller tweaks
518
0
45,473
12
5
33
def test_upgrade(tctx, proto): if proto != "websocket": tctx.options.websocket = False if proto != "tcp": tctx.options.rawtcp = False tctx.server.address = ("example.com", 80) tctx.server.state = ConnectionState.OPEN http_flow = Placeholder(HTTPFlow) playbook = Playbook(http.HttpLayer(tctx, HTTPMode.transparent)) ( playbook >> DataReceived( tctx.client, b"GET / HTTP/1.1\r\n" b"Connection: upgrade\r\n" b"Upgrade: websocket\r\n" b"Sec-WebSocket-Version: 13\r\n" b"\r\n", ) << http.HttpRequestHeadersHook(http_flow) >> reply() << http.HttpRequestHook(http_flow) >> reply() << SendData( tctx.server, b"GET / HTTP/1.1\r\n" b"Connection: upgrade\r\n" b"Upgrade: websocket\r\n" b"Sec-WebSocket-Version: 13\r\n" b"\r\n", ) >> DataReceived( tctx.server, b"HTTP/1.1 101 Switching Protocols\r\n" b"Upgrade: websocket\r\n" b"Connection: Upgrade\r\n" b"\r\n", ) << http.HttpResponseHeadersHook(http_flow) >> reply() << http.HttpResponseHook(http_flow) >> reply() << SendData( tctx.client, b"HTTP/1.1 101 Switching Protocols\r\n" b"Upgrade: websocket\r\n" b"Connection: Upgrade\r\n" b"\r\n", ) ) if proto == "websocket": assert playbook << WebsocketStartHook(http_flow) elif proto == "tcp": assert playbook << TcpStartHook(Placeholder(TCPFlow)) else: assert ( playbook << Log( "Sent HTTP 101 response, but no protocol is enabled to upgrade to.", "warn", ) << CloseConnection(tctx.client) )
test/mitmproxy/proxy/layers/http/test_http.py
428
mitmproxy
{ "docstring": "Test a HTTP -> WebSocket upgrade with different protocols enabled", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
143
Python
70
b3587b52b25077f68116b9852b041d33e7fc6601
test_http.py
251,864
63
256
test_upgrade
https://github.com/mitmproxy/mitmproxy.git
make it black!
656
0
73,865
22
2
12
def copy(self) -> "ExecutionPlan": plan_copy = ExecutionPlan( self._in_blocks, self._in_stats, run_by_consumer=self._run_by_consumer ) if self._snapshot_blocks is not None: # Copy over the existing snapshot. plan_copy._snapshot_blocks = self._snapshot_blocks plan_copy._snapshot_stats = self._snapshot_stats plan_copy._stages_before_snapshot = self._stages_before_snapshot.copy() plan_copy._stages_after_snapshot = self._stages_after_snapshot.copy() return plan_copy
python/ray/data/_internal/plan.py
118
ray
{ "docstring": "Create a shallow copy of this execution plan.\n\n This copy can be executed without mutating the original, but clearing the copy\n will also clear the original.\n\n Returns:\n A shallow copy of this execution plan.\n ", "language": "en", "n_whitespaces": 73, "n_words": 34, "vocab_size": 24 }
36
Python
30
8553df49bba654a9edd6befce198be90d6524fca
plan.py
125,418
18
72
copy
https://github.com/ray-project/ray.git
Make execution plan/blocklist aware of the memory ownership and who runs the plan (#26650) Having the indicator about who's running the stage and who created a blocklist will enable the eager memory releasing. This is an alternative with better abstraction to https://github.com/ray-project/ray/pull/26196. Note: this doesn't work for Dataset.split() yet, will do in a followup PR.
129
0
27,862
10
2
6
def test_select_on_save_lying_update(self): # Change the manager to not return "row matched" for update(). # We are going to change the Article's _base_manager class # dynamically. This is a bit of a hack, but it seems hard to # test this properly otherwise. Article's manager, because # proxy models use their parent model's _base_manager. orig_class = Article._base_manager._queryset_class
tests/basic/tests.py
29
django
{ "docstring": "\n select_on_save works correctly if the database doesn't return correct\n information about matched rows from UPDATE.\n ", "language": "en", "n_whitespaces": 37, "n_words": 15, "vocab_size": 15 }
56
Python
47
9c19aff7c7561e3a82978a272ecdaad40dda5c00
tests.py
201,891
23
125
test_select_on_save_lying_update
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
105
0
50,017
8
3
15
def generate_config(): config_path = get_config_file() if config_path.exists(): overwrite_config = input("Config file already exists. Overwrite? (y/N): ") if overwrite_config.lower() != "y": print("Exiting...") return None with open(config_path, "w", encoding="utf-8") as config_file: json.dump(DEFAULT_CONFIG, config_file, indent=4) print(f"Config file generated at {config_path}") return None
spotdl/utils/console.py
134
spotify-downloader
{ "docstring": "\n Generate the config file if it doesn't exist\n This is done before the argument parser so it doesn't requires `operation`\n and `query` to be passed.\n ", "language": "en", "n_whitespaces": 38, "n_words": 25, "vocab_size": 22 }
39
Python
34
deca40c2e26afed62e1f9ec4be14aff9e125929b
console.py
30,423
11
71
generate_config
https://github.com/spotDL/spotify-downloader.git
moved console actions to a new file
100
0
5,567
12
3
7
def timezone(self): if not settings.USE_TZ: return None elif self.settings_dict["TIME_ZONE"] is None: return timezone.utc else: return timezone_constructor(self.settings_dict["TIME_ZONE"])
django/db/backends/base/base.py
70
django
{ "docstring": "\n Return a tzinfo of the database connection time zone.\n\n This is only used when time zone support is enabled. When a datetime is\n read from the database, it is always returned in this time zone.\n\n When the database backend supports time zones, it doesn't matter which\n time zone Django uses, as long as aware datetimes are used everywhere.\n Other users connecting to the database can choose their own time zone.\n\n When the database backend doesn't support time zones, the time zone\n Django uses may be constrained by the requirements of other users of\n the database.\n ", "language": "en", "n_whitespaces": 166, "n_words": 95, "vocab_size": 57 }
16
Python
14
9c19aff7c7561e3a82978a272ecdaad40dda5c00
base.py
204,817
7
40
timezone
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
77
0
50,902
12
7
37
def check_video_data(path_control_data, path_video_gen): # movie file specification path_sec_gen = Path(path_video_gen).parent.absolute() / "sections" control_data = load_control_data(path_control_data) movie_meta_gen = get_video_metadata(path_video_gen) movie_meta_exp = control_data["movie_metadata"] assert_shallow_dict_compare( movie_meta_gen, movie_meta_exp, "Movie file metadata mismatch:" ) # sections directory layout sec_dir_layout_gen = set(get_section_dir_layout(path_sec_gen)) sec_dir_layout_exp = set(control_data["section_dir_layout"]) unexp_gen = sec_dir_layout_gen - sec_dir_layout_exp ungen_exp = sec_dir_layout_exp - sec_dir_layout_gen if len(unexp_gen) or len(ungen_exp): dif = [f"'{dif}' got unexpectedly generated" for dif in unexp_gen] + [ f"'{dif}' didn't get generated" for dif in ungen_exp ] mismatch = "\n".join(dif) raise AssertionError(f"Sections don't match:\n{mismatch}") # sections index file scene_name = Path(path_video_gen).stem path_sec_index_gen = path_sec_gen / f"{scene_name}.json" sec_index_gen = get_section_index(path_sec_index_gen) sec_index_exp = control_data["section_index"] if len(sec_index_gen) != len(sec_index_exp): raise AssertionError( f"expected {len(sec_index_exp)} sections ({', '.join([el['name'] for el in sec_index_exp])}), but {len(sec_index_gen)} ({', '.join([el['name'] for el in sec_index_gen])}) got generated (in '{path_sec_index_gen}')" ) # check individual sections for sec_gen, sec_exp in zip(sec_index_gen, sec_index_exp): assert_shallow_dict_compare( sec_gen, sec_exp, # using json to pretty print dicts f"Section {json.dumps(sec_gen, indent=4)} (in '{path_sec_index_gen}') doesn't match expected Section (in '{json.dumps(sec_exp, indent=4)}'):", )
tests/utils/video_tester.py
444
manim
{ "docstring": "Compare control data with generated output.\n Used abbreviations:\n exp -> expected\n gen -> generated\n sec -> section\n meta -> metadata\n ", "language": "en", "n_whitespaces": 57, "n_words": 20, "vocab_size": 16 }
160
Python
107
7a13f0e969e6a70af9084cdbf41cea49c7f1813c
video_tester.py
189,828
32
185
check_video_data
https://github.com/ManimCommunity/manim.git
Migrate from os.path to pathlib in Testing Scripts (#2685) * updated styling * final commit * fixed style * removed exist_ok=true * added parents=True * potentially .exists() is the problem * fixed style' * fixed style on revisions * style check processed * Update tests/helpers/graphical_units.py Co-authored-by: ad_chaos <[email protected]> * fixed changes * made get_dir_layout also accept path. * removed small auto import error Co-authored-by: ad_chaos <[email protected]> Co-authored-by: Naveen M K <[email protected]>
355
0
46,227
16
11
31
def remove_categories(self, removals, inplace=no_default): if inplace is not no_default: warn( "The `inplace` parameter in pandas.Categorical." "remove_categories is deprecated and will be removed in " "a future version. Removing unused categories will always " "return a new Categorical object.", FutureWarning, stacklevel=find_stack_level(inspect.currentframe()), ) else: inplace = False inplace = validate_bool_kwarg(inplace, "inplace") if not is_list_like(removals): removals = [removals] removal_set = set(removals) not_included = removal_set - set(self.dtype.categories) new_categories = [c for c in self.dtype.categories if c not in removal_set] # GH 10156 if any(isna(removals)): not_included = {x for x in not_included if notna(x)} new_categories = [x for x in new_categories if notna(x)] if len(not_included) != 0: raise ValueError(f"removals must all be in old categories: {not_included}") with catch_warnings(): simplefilter("ignore") return self.set_categories( new_categories, ordered=self.ordered, rename=False, inplace=inplace )
pandas/core/arrays/categorical.py
300
pandas
{ "docstring": "\n Remove the specified categories.\n\n `removals` must be included in the old categories. Values which were in\n the removed categories will be set to NaN\n\n Parameters\n ----------\n removals : category or list of categories\n The categories which should be removed.\n inplace : bool, default False\n Whether or not to remove the categories inplace or return a copy of\n this categorical with removed categories.\n\n .. deprecated:: 1.3.0\n\n Returns\n -------\n cat : Categorical or None\n Categorical with removed categories or None if ``inplace=True``.\n\n Raises\n ------\n ValueError\n If the removals are not contained in the categories\n\n See Also\n --------\n rename_categories : Rename categories.\n reorder_categories : Reorder categories.\n add_categories : Add new categories.\n remove_unused_categories : Remove categories which are not used.\n set_categories : Set the categories to the specified ones.\n\n Examples\n --------\n >>> c = pd.Categorical(['a', 'c', 'b', 'c', 'd'])\n >>> c\n ['a', 'c', 'b', 'c', 'd']\n Categories (4, object): ['a', 'b', 'c', 'd']\n\n >>> c.remove_categories(['d', 'a'])\n [NaN, 'c', 'b', 'c', NaN]\n Categories (2, object): ['b', 'c']\n ", "language": "en", "n_whitespaces": 435, "n_words": 162, "vocab_size": 94 }
121
Python
84
2f8d0a36703e81e4dca52ca9fe4f58c910c1b304
categorical.py
168,200
28
181
remove_categories
https://github.com/pandas-dev/pandas.git
PERF cache find_stack_level (#48023) cache stacklevel
420
0
40,226
14
2
8
def enter(self, *path): return Meta(self._meta, path=self._path + [str(p) for p in path])
src/sentry/utils/meta.py
52
sentry
{ "docstring": "\n Enters into sub meta data at the specified path. This always returns a\n new ``Meta`` object, regardless whether the path already exists.\n ", "language": "en", "n_whitespaces": 44, "n_words": 22, "vocab_size": 21 }
12
Python
12
522d6f27c28dc5fd4d996ed605865c42fbda0da8
meta.py
93,031
2
33
enter
https://github.com/getsentry/sentry.git
ref: replace legacy compat.map with list comprehensions (#36372)
26
0
18,968
12
9
26
def data_for_grouping(dtype): pa_dtype = dtype.pyarrow_dtype if pa.types.is_boolean(pa_dtype): A = False B = True C = True elif pa.types.is_floating(pa_dtype): A = -1.1 B = 0.0 C = 1.1 elif pa.types.is_signed_integer(pa_dtype): A = -1 B = 0 C = 1 elif pa.types.is_unsigned_integer(pa_dtype): A = 0 B = 1 C = 10 elif pa.types.is_date(pa_dtype): A = date(1999, 12, 31) B = date(2010, 1, 1) C = date(2022, 1, 1) elif pa.types.is_timestamp(pa_dtype): A = datetime(1999, 1, 1, 1, 1, 1, 1) B = datetime(2020, 1, 1) C = datetime(2020, 1, 1, 1) elif pa.types.is_duration(pa_dtype): A = timedelta(-1) B = timedelta(0) C = timedelta(1, 4) elif pa.types.is_time(pa_dtype): A = time(0, 0) B = time(0, 12) C = time(12, 12) else: raise NotImplementedError return pd.array([B, B, None, None, A, A, B, C], dtype=dtype) @pytest.fixture
pandas/tests/extension/test_arrow.py
424
@pytest.fixture
pandas
{ "docstring": "\n Data for factorization, grouping, and unique tests.\n\n Expected to be like [B, B, NA, NA, A, A, B, C]\n\n Where A < B < C and NA is missing\n ", "language": "en", "n_whitespaces": 42, "n_words": 29, "vocab_size": 24 }
128
Python
55
b81f4318fc0d796760d16237a8f616dad73912eb
test_arrow.py
167,622
37
281
data_for_grouping
https://github.com/pandas-dev/pandas.git
ENH/TST: Add BaseGroupbyTests tests for ArrowExtensionArray (#47515)
338
1
40,070
12
4
12
def _end_of_line(self, y): self._update_max_yx() last = self.maxx while True: if curses.ascii.ascii(self.win.inch(y, last)) != curses.ascii.SP: last = min(self.maxx, last+1) break elif last == 0: break last = last - 1 return last
python3.10.4/Lib/curses/textpad.py
116
XX-Net
{ "docstring": "Go to the location of the first blank on the given line,\n returning the index of the last non-blank character.", "language": "en", "n_whitespaces": 26, "n_words": 20, "vocab_size": 15 }
31
Python
23
8198943edd73a363c266633e1aa5b2a9e9c9f526
textpad.py
222,302
11
72
_end_of_line
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
144
0
56,535
13
1
5
def hw_version(self) -> str | None: return self.status.get("FIRMWARE")
homeassistant/components/apcupsd/__init__.py
35
core
{ "docstring": "Return the firmware version of the UPS, if available.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 8 }
8
Python
8
52307708c843b947a2d631f2fe7ddaa8bd9a90d7
__init__.py
288,026
3
19
hw_version
https://github.com/home-assistant/core.git
Refactor apcupsd to use config flow (#64809) * Add Config Flow to APCUPSd integration and remove YAML support. * Hide the binary sensor if user does not select STATFLAG resource. * Add tests for config flows. * Simplify config flow code. * Spell fix. * Fix pylint warnings. * Simplify the code for config flow. * First attempt to implement import flows to suppport legacy YAML configurations. * Remove unnecessary log calls. * Wrap synchronous update call with `hass.async_add_executor_job`. * Import the YAML configurations when sensor platform is set up. * Move the logger call since the variables are not properly set up. * Add codeowner. * Fix name field of manifest.json. * Fix linting issue. * Fix incorrect dependency due to incorrect rebase. * Update codeowner and config flows via hassfest. * Postpone the deprecation warning to 2022.7. * Import future annotations for init file. * Add an newline at the end to make prettier happy. * Update github id. * Add type hints for return types of steps in config flow. * Move the deprecation date for YAML config to 2022.12. * Update according to reviews. * Use async_forward_entry_setups. * Add helper properties to `APCUPSdData` class. * Add device_info for binary sensor. * Simplify config flow. * Remove options flow strings. * update the tests according to the changes. * Add `entity_registry_enabled_default` to entities and use imported CONF_RESOURCES to disable entities instead of skipping them. * Update according to reviews. * Do not use model of the UPS as the title for the integration. Instead, simply use "APCUPSd" as the integration title and let the device info serve as title for each device instead. * Change schema to be a global variable. * Add more comments. * Rewrite the tests for config flows. * Fix enabled_by_default. * Show friendly titles in the integration. * Add import check in `async_setup_platform` to avoid importing in sensor platform setup. * Add import check in `async_setup_platform` to avoid importing in sensor platform setup. * Update comments in test files. * Use parametrize instead of manually iterating different test cases. * Swap the order of the platform constants. * Avoid using broad exceptions. * Set up device info via `_attr_device_info`. * Remove unrelated test in `test_config_flow`. * Use `DeviceInfo` instead of dict to assign to `_attr_device_info`. * Add english translation. * Add `async_create_issue` for deprecated YAML configuration. * Enable UPS status by default since it could show "online, charging, on battery etc" which is meaningful for all users. * Apply suggestions from code review * Apply suggestion * Apply suggestion Co-authored-by: Martin Hjelmare <[email protected]>
22
0
87,207
8
1
9
def print_help(self): help_text = f console.print(text=help_text, menu="Stocks - Options - Pricing")
gamestonk_terminal/stocks/options/pricing_controller.py
60
OpenBBTerminal
{ "docstring": "Print help\n[param]Ticker: [/param]{self.ticker or None}\n[param]Expiry: [/param]{self.selected_date or None}\n[cmds]\n add add an expected price to the list\n rmv remove an expected price from the list\n\n show show the listed of expected prices\n rnval risk neutral valuation for an option[/cmds]\n ", "language": "en", "n_whitespaces": 94, "n_words": 41, "vocab_size": 29 }
11
Python
10
82747072c511beb1b2672846ae2ee4aec53eb562
pricing_controller.py
281,554
12
22
print_help
https://github.com/OpenBB-finance/OpenBBTerminal.git
Terminal Wide Rich (#1161) * My idea for how we handle Rich moving forward * remove independent consoles * FIxed pylint issues * add a few vars * Switched print to console * More transitions * Changed more prints * Replaced all prints * Fixing tabulate * Finished replace tabulate * Finished removing rich from Tabulate * add Panel around menu * add GST watermark under feature flag * Fixed 46 tests * Delete test_screener[False].yaml * Delete test_screener[True].yaml * Fixed the rest of the tests * add help and source color vars and use rgb * rich on stocks/options * update rich on disc, dps, sia * rich in gov, ins and scr menus * ba and ca menus with rich * Fixed import issue * Fixed some tests * removed termcolor * Removed prettytable * add rich to remaining stocks menus * FIxed linting issue * Added James' changes * Updated dependencies * Add rich to cryptocurrency menu * refactor economy and forex * refactor etf with rich * refactor mfunds * refactor rich rest * not specify style so default color works well on any background * Fixing mypy issues * Updated tests * More test fixes * James' test fixes * Updating tests : stocks/screener - fix cassettes using BR * Updating tests : crypto * Updating tests : disable DEBUG_MODE * Updating tests : stocks/fa/yfinance * minor fixes that escape * Improve the rich table function (that replaces tabulate :D ) * Fixed bad code * delete rogue file + dcf fix + NoConsole * sia mypy * fuck you linter * fuck you linter pt 2 * skip hehe * i hate the black linter * ubuntu mypy attempt * Update : rich_config + gtff * Updating tests : conftest * Updating tests : stocks * Update : rich_config * Updating : rich_config * make panel configurable for Theodore :b * colors update * Merged * Updating : rich_config + feature_flags * Updating : rich_config * Updating tests : stocks * Updating : feature_flags Co-authored-by: DidierRLopes <[email protected]> Co-authored-by: Chavithra PARANA <[email protected]> Co-authored-by: james <[email protected]> Co-authored-by: jose-donato <[email protected]>
32
0
83,851
10
5
14
def version_parts(self, best=False): # type: (bool) -> Tuple[str, str, str] version_str = self.version(best=best) if version_str: version_regex = re.compile(r"(\d+)\.?(\d+)?\.?(\d+)?") matches = version_regex.match(version_str) if matches: major, minor, build_number = matches.groups() return major, minor or "", build_number or "" return "", "", ""
pipenv/patched/notpip/_vendor/distro.py
122
pipenv
{ "docstring": "\n Return the version of the OS distribution, as a tuple of version\n numbers.\n\n For details, see :func:`distro.version_parts`.\n ", "language": "en", "n_whitespaces": 46, "n_words": 17, "vocab_size": 14 }
40
Python
29
f3166e673fe8d40277b804d35d77dcdb760fc3b3
distro.py
20,062
9
70
version_parts
https://github.com/pypa/pipenv.git
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
138
0
3,208
12
4
11
def _validate_estimators(self): if len(self.estimators) == 0: raise ValueError( "Invalid 'estimators' attribute, 'estimators' should be a " "non-empty list of (string, estimator) tuples." ) names, estimators = zip(*self.estimators) self._validate_names(names) has_estimator = any(est != "drop" for est in estimators) if not has_estimator: raise ValueError( "All estimators are dropped. At least one is required " "to be an estimator." ) return names, estimators
sklearn/ensemble/_stacking.py
118
scikit-learn
{ "docstring": "Overload the method of `_BaseHeterogeneousEnsemble` to be more\n lenient towards the type of `estimators`.\n\n Regressors can be accepted for some cases such as ordinal regression.\n ", "language": "en", "n_whitespaces": 46, "n_words": 25, "vocab_size": 22 }
60
Python
49
b1807ff8ead319a08294beeaae90c3f03b2bb8ac
_stacking.py
261,489
15
65
_validate_estimators
https://github.com/scikit-learn/scikit-learn.git
ENH StackingClassifier allows regressors in its first layer (#24538) Co-authored-by: Tom Dupré la Tour <[email protected]> Co-authored-by: Guillaume Lemaitre <[email protected]>
213
0
76,834
11
1
6
def head(self, url, **kwargs): r kwargs.setdefault("allow_redirects", False) return self.request("HEAD", url, **kwargs)
pipenv/patched/pip/_vendor/requests/sessions.py
52
pipenv
{ "docstring": "Sends a HEAD request. Returns :class:`Response` object.\n\n :param url: URL for the new :class:`Request` object.\n :param \\*\\*kwargs: Optional arguments that ``request`` takes.\n :rtype: requests.Response\n ", "language": "en", "n_whitespaces": 52, "n_words": 24, "vocab_size": 22 }
11
Python
10
cd5a9683be69c86c8f3adcd13385a9bc5db198ec
sessions.py
22,110
9
32
head
https://github.com/pypa/pipenv.git
Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.
31
0
4,186
8
1
18
def test_consent(self) -> None: # Have the admin user accept the terms. self.get_success(self.store.user_set_consent_version(self.admin_user, "1.0")) # First, cheekily accept the terms and create a room self.get_success(self.store.user_set_consent_version(self.other_user, "1.0")) room_id = self.helper.create_room_as(self.other_user, tok=self.other_user_tok) self.helper.send_event(room_id, "com.example.test", tok=self.other_user_tok) # Now unaccept it and check that we can't send an event self.get_success(self.store.user_set_consent_version(self.other_user, "0.0")) self.helper.send_event( room_id, "com.example.test", tok=self.other_user_tok, expect_code=HTTPStatus.FORBIDDEN, ) # Login in as the user puppet_token = self._get_token() # Sending an event on their behalf should work fine self.helper.send_event(room_id, "com.example.test", tok=puppet_token)
tests/rest/admin/test_user.py
225
synapse
{ "docstring": "Test that sending a message is not subject to the privacy policies.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
75
Python
56
901b264c0c88f39cbfb8b2229e0dc57968882658
test_user.py
246,185
15
137
test_consent
https://github.com/matrix-org/synapse.git
Add type hints to `tests/rest/admin` (#11851)
224
0
71,079
10
1
17
def dry_run_migrations() -> None: url = db_interface.database_config.connection_url context.script.version_locations = [db_interface.orm.versions_dir] context.configure( url=url, target_metadata=target_metadata, literal_binds=True, include_schemas=True, dialect_opts={"paramstyle": "named"}, ) with context.begin_transaction(): context.run_migrations()
src/prefect/orion/database/migrations/env.py
113
prefect
{ "docstring": "\n Perform a dry run of migrations.\n\n This will create the sql statements without actually running them against the\n database and output them to stdout.\n ", "language": "en", "n_whitespaces": 37, "n_words": 24, "vocab_size": 22 }
21
Python
20
36e7e0838aeaffc9492b330297e4905f3ab4b11f
env.py
53,274
18
68
dry_run_migrations
https://github.com/PrefectHQ/prefect.git
code review revisions pt3
81
0
10,769
11
1
24
def test_fk_with_to_field(self): modeladmin = EmployeeAdmin(Employee, site) request = self.request_factory.get("/", {}) request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) # Make sure the correct queryset is returned queryset = changelist.get_queryset(request) self.assertEqual(list(queryset), [self.jack, self.john]) filterspec = changelist.get_filters(request)[0][-1] self.assertEqual(filterspec.title, "department") choices = list(filterspec.choices(changelist)) self.assertEqual(choices[0]["display"], "All") self.assertIs(choices[0]["selected"], True) self.assertEqual(choices[0]["query_string"], "?") self.assertEqual(choices[1]["display"], "Development") self.assertIs(choices[1]["selected"], False) self.assertEqual(choices[1]["query_string"], "?department__code__exact=DEV") self.assertEqual(choices[2]["display"], "Design") self.assertIs(choices[2]["selected"], False) self.assertEqual(choices[2]["query_string"], "?department__code__exact=DSN") # Filter by Department=='Development' -------------------------------- request = self.request_factory.get("/", {"department__code__exact": "DEV"}) request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) # Make sure the correct queryset is returned queryset = changelist.get_queryset(request) self.assertEqual(list(queryset), [self.john]) filterspec = changelist.get_filters(request)[0][-1] self.assertEqual(filterspec.title, "department") choices = list(filterspec.choices(changelist)) self.assertEqual(choices[0]["display"], "All") self.assertIs(choices[0]["selected"], False) self.assertEqual(choices[0]["query_string"], "?") self.assertEqual(choices[1]["display"], "Development") self.assertIs(choices[1]["selected"], True) self.assertEqual(choices[1]["query_string"], "?department__code__exact=DEV") self.assertEqual(choices[2]["display"], "Design") self.assertIs(choices[2]["selected"], False) self.assertEqual(choices[2]["query_string"], "?department__code__exact=DSN")
tests/admin_filters/tests.py
741
django
{ "docstring": "\n A filter on a FK respects the FK's to_field attribute (#17972).\n ", "language": "en", "n_whitespaces": 26, "n_words": 11, "vocab_size": 11 }
111
Python
55
9c19aff7c7561e3a82978a272ecdaad40dda5c00
tests.py
207,124
36
447
test_fk_with_to_field
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
384
0
51,871
11
1
4
def placeholder_value(self, placeholder_context=None): return self._value
keras/mixed_precision/autocast_variable.py
24
keras
{ "docstring": "Use the AutoCastVariable value itself as a placeholder.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
5
Python
5
ed99e34f279a2d2d6a44af87ee64f8fc98c7e8b9
autocast_variable.py
280,806
2
14
placeholder_value
https://github.com/keras-team/keras.git
Implement TraceType for AutoCastVariable to support tracing with tf.function layering efforts. PiperOrigin-RevId: 498447924
19
0
83,439
6
2
33
def save_config(self) -> TritonArtifact: device = self.device if self.inference_stage != PREDICTOR: device = "cpu" self.config = TritonConfig( self.full_model_name, self.input_features, self.output_features, self.max_batch_size, self.max_queue_delay_microseconds, device, self.model_instance_count, self.inference_stage, ) config_path = os.path.join(self.base_path, "config.pbtxt") with open(config_path, "w") as f: formatted_config = remove_empty_lines(self.config.get_model_config()) f.write(formatted_config) config_artifact = TritonArtifact( model_name=self.full_model_name, model_version=self.model_version, platform="pytorch_libtorch", path=config_path, content_type="text/x-protobuf", content_length=os.path.getsize(config_path), ) return config_artifact @dataclass
ludwig/utils/triton_utils.py
231
@dataclass
ludwig
{ "docstring": "Save the Triton config.\n\n Return the appropriate artifact.\n ", "language": "en", "n_whitespaces": 22, "n_words": 8, "vocab_size": 7 }
52
Python
44
ed8d9cf20843744f18593b22fb6a30eaf5f325eb
triton_utils.py
7,512
31
144
save_config
https://github.com/ludwig-ai/ludwig.git
Triton ensemble export (#2251)
308
1
1,224
13
1
5
def abort(self): self._ssl_protocol._abort() self._closed = True
python3.10.4/Lib/asyncio/sslproto.py
33
XX-Net
{ "docstring": "Close the transport immediately.\n\n Buffered data will be lost. No more data will be received.\n The protocol's connection_lost() method will (eventually) be\n called with None as its argument.\n ", "language": "en", "n_whitespaces": 57, "n_words": 28, "vocab_size": 23 }
6
Python
6
8198943edd73a363c266633e1aa5b2a9e9c9f526
sslproto.py
220,732
3
18
abort
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
27
0
56,106
8
5
15
def clone(self): attrs = {} for field_name in getattr(self, 'clone_fields', []): field = self._meta.get_field(field_name) field_value = field.value_from_object(self) if field_value not in (None, ''): attrs[field_name] = field_value # Include tags (if applicable) if is_taggable(self): attrs['tags'] = [tag.pk for tag in self.tags.all()] return attrs
netbox/netbox/models/__init__.py
140
netbox
{ "docstring": "\n Return a dictionary of attributes suitable for creating a copy of the current instance. This is used for pre-\n populating an object creation form in the UI.\n ", "language": "en", "n_whitespaces": 49, "n_words": 27, "vocab_size": 23 }
42
Python
31
f9d81fd36232e9bf3f60a215d2c6a405b9b342fb
__init__.py
265,188
10
85
clone
https://github.com/netbox-community/netbox.git
Closes #9414: Add clone() method to NetBoxModel for copying instance attributes
143
0
78,019
13
6
16
def preprocess_datasets(self) -> None: # Evaluate all datasets. self.datasets = {k: d() if callable(d) else d for k, d in self.datasets.items()} if self.preprocessor: train_dataset = self.datasets.get(TRAIN_DATASET_KEY, None) if train_dataset: self.preprocessor.fit(train_dataset) # Execute dataset transformations serially for now. # Cannot execute them in remote tasks due to dataset ownership model: # if datasets are created on a remote node, then if that node fails, # we cannot recover the dataset. new_datasets = {} for key, dataset in self.datasets.items(): new_datasets[key] = self.preprocessor.transform(dataset) self.datasets = new_datasets
python/ray/train/base_trainer.py
166
ray
{ "docstring": "Called during fit() to preprocess dataset attributes with preprocessor.\n\n .. note:: This method is run on a remote process.\n\n This method is called prior to entering the training_loop.\n\n If the ``Trainer`` has both a datasets dict and\n a preprocessor, the datasets dict contains a training dataset (denoted by\n the \"train\" key), and the preprocessor has not yet\n been fit, then it will be fit on the train dataset.\n\n Then, all Trainer's datasets will be transformed by the preprocessor.\n\n The transformed datasets will be set back in the ``self.datasets`` attribute\n of the Trainer to be used when overriding ``training_loop``.\n ", "language": "en", "n_whitespaces": 168, "n_words": 98, "vocab_size": 66 }
83
Python
61
f15ed3836d710f655856d5cd1dbbf40b08953f86
base_trainer.py
126,801
26
101
preprocess_datasets
https://github.com/ray-project/ray.git
[air] Render trainer docstring signatures (#27590) Signed-off-by: Richard Liaw <[email protected]>
240
0
28,271
13
10
37
def get_fixers(self): pre_order_fixers = [] post_order_fixers = [] for fix_mod_path in self.fixers: mod = __import__(fix_mod_path, {}, {}, ["*"]) fix_name = fix_mod_path.rsplit(".", 1)[-1] if fix_name.startswith(self.FILE_PREFIX): fix_name = fix_name[len(self.FILE_PREFIX):] parts = fix_name.split("_") class_name = self.CLASS_PREFIX + "".join([p.title() for p in parts]) try: fix_class = getattr(mod, class_name) except AttributeError: raise FixerError("Can't find %s.%s" % (fix_name, class_name)) from None fixer = fix_class(self.options, self.fixer_log) if fixer.explicit and self.explicit is not True and \ fix_mod_path not in self.explicit: self.log_message("Skipping optional fixer: %s", fix_name) continue self.log_debug("Adding transformation: %s", fix_name) if fixer.order == "pre": pre_order_fixers.append(fixer) elif fixer.order == "post": post_order_fixers.append(fixer) else: raise FixerError("Illegal fixer order: %r" % fixer.order) key_func = operator.attrgetter("run_order") pre_order_fixers.sort(key=key_func) post_order_fixers.sort(key=key_func) return (pre_order_fixers, post_order_fixers)
python3.10.4/Lib/lib2to3/refactor.py
403
XX-Net
{ "docstring": "Inspects the options to load the requested patterns and handlers.\n\n Returns:\n (pre_order, post_order), where pre_order is the list of fixers that\n want a pre-order AST traversal, and post_order is the list that want\n post-order traversal.\n ", "language": "en", "n_whitespaces": 76, "n_words": 35, "vocab_size": 27 }
108
Python
81
8198943edd73a363c266633e1aa5b2a9e9c9f526
refactor.py
218,885
30
245
get_fixers
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
446
0
55,529
15
2
8
def for_each_coloraxis(self, fn, selector=None, row=None, col=None) -> "Figure": for obj in self.select_coloraxes(selector=selector, row=row, col=col): fn(obj) return self
packages/python/plotly/plotly/graph_objs/_figure.py
73
plotly.py
{ "docstring": "\n Apply a function to all coloraxis objects that satisfy the\n specified selection criteria\n\n Parameters\n ----------\n fn:\n Function that inputs a single coloraxis object.\n selector: dict, function, or None (default None)\n Dict to use as selection criteria.\n coloraxis objects will be selected if they contain\n properties corresponding to all of the dictionary's keys, with\n values that exactly match the supplied values. If None\n (the default), all coloraxis objects are selected. If a\n function, it must be a function accepting a single argument and\n returning a boolean. The function will be called on each\n coloraxis and those for which the function returned True will\n be in the selection.\n row, col: int or None (default None)\n Subplot row and column index of coloraxis objects to select.\n To select coloraxis objects by row and column, the Figure\n must have been created using plotly.subplots.make_subplots.\n If None (the default), all coloraxis objects are selected.\n Returns\n -------\n self\n Returns the Figure object that the method was called on\n ", "language": "en", "n_whitespaces": 404, "n_words": 161, "vocab_size": 95 }
17
Python
17
c95b4fa4388f29e50b6966e45c94c5980013a01d
_figure.py
240,291
32
48
for_each_coloraxis
https://github.com/plotly/plotly.py.git
type annotations for chainable Figure methods
49
0
68,402
9
1
23
def test_run_query_with_multiple_groupby_orderby_null_values_in_first_entity(self): self.setup_orderby_data() self.store_transaction_metric(200, tags={"transaction": "baz_transaction"}) query = MetricsQueryBuilder( self.params, f"project:{self.project.slug}", dataset=Dataset.PerformanceMetrics, selected_columns=[ "transaction", "project", "p95(transaction.duration)", "count_unique(user)", ], orderby="count_unique(user)", ) result = query.run_query("test_query") assert len(result["data"]) == 3 assert result["data"][0] == { "transaction": resolve_tag_value( UseCaseKey.PERFORMANCE, self.organization.id, "baz_transaction", ), "project": self.project.id, "p95_transaction_duration": 200, } assert result["data"][1] == { "transaction": resolve_tag_value( UseCaseKey.PERFORMANCE, self.organization.id, "foo_transaction", ), "project": self.project.id, "p95_transaction_duration": 100, "count_unique_user": 1, } assert result["data"][2] == { "transaction": resolve_tag_value( UseCaseKey.PERFORMANCE, self.organization.id, "bar_transaction", ), "project": self.project.id, "p95_transaction_duration": 50, "count_unique_user": 2, }
tests/sentry/search/events/test_builder.py
357
sentry
{ "docstring": "But if the null value is in the first entity, it won't show up in the groupby values, which means the\n transaction will be missing", "language": "en", "n_whitespaces": 31, "n_words": 25, "vocab_size": 21 }
76
Python
48
bf416f7ad23d7537a84c9727cfe1c0a7effd27bb
test_builder.py
87,131
46
209
test_run_query_with_multiple_groupby_orderby_null_values_in_first_entity
https://github.com/getsentry/sentry.git
feat(discover): Only transform when ordering project (#39468) - This updates the querybuilder with a orderby resolver so we can implement more custom orderbys(orderbies?) in the future - This changes the project field to just select the project_id only, which results in needing a new post-processing capability to the querybuilder - This is done via the `value_resolver_map` and the `meta_resolver_map` - Removed the snuba_filter param from transform_results since we no longer use it - Removes the old discover 1 query since it shouldn't be supported and no longer is being used - Needed to update ds code too since it relied on the old project behaviour but doesn't use `discover.query`
582
0
18,229
12
1
20
def test_get_blame_for_file(self, get_jwt): responses.add( method=responses.POST, url="https://api.github.com/app/installations/1/access_tokens", body='{"token": "12345token", "expires_at": "2030-01-01T00:00:00Z"}', content_type="application/json", ) path = "src/sentry/integrations/github/client.py" ref = "master" query = f responses.add( method=responses.POST, url="https://api.github.com/graphql", json={"query": query}, content_type="application/json", ) resp = self.client.get_blame_for_file(self.repo, path, ref) assert ( responses.calls[1].request.body == b'{"query": "query {\\n repository(name: foo, owner: Test-Organization) {\\n ref(qualifiedName: master) {\\n target {\\n ... on Commit {\\n blame(path: src/sentry/integrations/github/client.py) {\\n ranges {\\n commit {\\n oid\\n author {\\n name\\n email\\n }\\n message\\n committedDate\\n }\\n startingLine\\n endingLine\\n age\\n }\\n }\\n }\\n }\\n }\\n }\\n }"}' ) assert resp == []
tests/sentry/integrations/github/test_client.py
220
sentry
{ "docstring": "query {{\n repository(name: foo, owner: Test-Organization) {{\n ref(qualifiedName: {ref}) {{\n target {{\n ... on Commit {{\n blame(path: {path}) {{\n ranges {{\n commit {{\n oid\n author {{\n name\n email\n }}\n message\n committedDate\n }}\n startingLine\n endingLine\n age\n }}\n }}\n }}\n }}\n }}\n }}\n }}", "language": "en", "n_whitespaces": 792, "n_words": 42, "vocab_size": 26 }
85
Python
59
bdcd185bc020080da29961b9c60a5a0dabd3ab03
test_client.py
86,044
47
104
test_get_blame_for_file
https://github.com/getsentry/sentry.git
feat(commit-context): Process commit context task (#38984) ## Objective: We want to use the GitHub blame API to determine who is the committer for the first in_app stacktrace frame and assign them as a Suspect Commit GroupOwner. This task is feature flagged for an internal release.
1,022
0
18,075
11
3
8
def tosequence(x): if isinstance(x, np.ndarray): return np.asarray(x) elif isinstance(x, Sequence): return x else: return list(x)
sklearn/utils/__init__.py
66
scikit-learn
{ "docstring": "Cast iterable x to a Sequence, avoiding a copy if possible.\n\n Parameters\n ----------\n x : iterable\n The iterable to be converted.\n\n Returns\n -------\n x : Sequence\n If `x` is a NumPy array, it returns it as a `ndarray`. If `x`\n is a `Sequence`, `x` is returned as-is. If `x` is from any other\n type, `x` is returned casted as a list.\n ", "language": "en", "n_whitespaces": 110, "n_words": 61, "vocab_size": 37 }
15
Python
12
8abc6d890e8bb4be7abe2984b3f373585f8f3c57
__init__.py
258,993
7
40
tosequence
https://github.com/scikit-learn/scikit-learn.git
DOC Ensure that tosequence passes numpydoc validation (#22494) Co-authored-by: Guillaume Lemaitre <[email protected]>
48
0
75,512
10
1
2
def type(self): return self["type"]
packages/python/plotly/plotly/graph_objs/bar/_error_x.py
22
plotly.py
{ "docstring": "\n Determines the rule used to generate the error bars. If\n *constant`, the bar lengths are of a constant value. Set this\n constant in `value`. If \"percent\", the bar lengths correspond\n to a percentage of underlying data. Set this percentage in\n `value`. If \"sqrt\", the bar lengths correspond to the square of\n the underlying data. If \"data\", the bar lengths are set with\n data set `array`.\n\n The 'type' property is an enumeration that may be specified as:\n - One of the following enumeration values:\n ['percent', 'constant', 'sqrt', 'data']\n\n Returns\n -------\n Any\n ", "language": "en", "n_whitespaces": 199, "n_words": 90, "vocab_size": 55 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_error_x.py
228,646
2
11
type
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
60,319
7
5
46
def _fit_multiclass(self, X, y, alpha, C, learning_rate, sample_weight, max_iter): # Precompute the validation split using the multiclass labels # to ensure proper balancing of the classes. validation_mask = self._make_validation_split(y) # Use joblib to fit OvA in parallel. # Pick the random seed for each job outside of fit_binary to avoid # sharing the estimator random state between threads which could lead # to non-deterministic behavior random_state = check_random_state(self.random_state) seeds = random_state.randint(MAX_INT, size=len(self.classes_)) result = Parallel( n_jobs=self.n_jobs, verbose=self.verbose, require="sharedmem" )( delayed(fit_binary)( self, i, X, y, alpha, C, learning_rate, max_iter, self._expanded_class_weight[i], 1.0, sample_weight, validation_mask=validation_mask, random_state=seed, ) for i, seed in enumerate(seeds) ) # take the maximum of n_iter_ over every binary fit n_iter_ = 0.0 for i, (_, intercept, n_iter_i) in enumerate(result): self.intercept_[i] = intercept n_iter_ = max(n_iter_, n_iter_i) self.t_ += n_iter_ * X.shape[0] self.n_iter_ = n_iter_ if self.average > 0: if self.average <= self.t_ - 1.0: self.coef_ = self._average_coef self.intercept_ = self._average_intercept else: self.coef_ = self._standard_coef self._standard_intercept = np.atleast_1d(self.intercept_) self.intercept_ = self._standard_intercept
sklearn/linear_model/_stochastic_gradient.py
355
scikit-learn
{ "docstring": "Fit a multi-class classifier by combining binary classifiers\n\n Each binary classifier predicts one class versus all others. This\n strategy is called OvA (One versus All) or OvR (One versus Rest).\n ", "language": "en", "n_whitespaces": 51, "n_words": 30, "vocab_size": 25 }
161
Python
106
5f75acdd12d77b973471961ad716367c6199d01c
_stochastic_gradient.py
258,738
38
243
_fit_multiclass
https://github.com/scikit-learn/scikit-learn.git
MNT Bump joblib version dependency to 1.0.0 (#22365)
652
0
75,397
14
17
36
def approximants(l, X=Symbol('x'), simplify=False): from sympy.simplify import simplify as simp from sympy.simplify.radsimp import denom p1, q1 = [S.One], [S.Zero] p2, q2 = [S.Zero], [S.One] while len(l): b = 0 while l[b]==0: b += 1 if b == len(l): return m = [S.One/l[b]] for k in range(b+1, len(l)): s = 0 for j in range(b, k): s -= l[j+1] * m[b-j-1] m.append(s/l[b]) l = m a, l[0] = l[0], 0 p = [0] * max(len(p2), b+len(p1)) q = [0] * max(len(q2), b+len(q1)) for k in range(len(p2)): p[k] = a*p2[k] for k in range(b, b+len(p1)): p[k] += p1[k-b] for k in range(len(q2)): q[k] = a*q2[k] for k in range(b, b+len(q1)): q[k] += q1[k-b] while p[-1]==0: p.pop() while q[-1]==0: q.pop() p1, p2 = p2, p q1, q2 = q2, q # yield result c = 1 for x in p: c = lcm(c, denom(x)) for x in q: c = lcm(c, denom(x)) out = ( sum(c*e*X**k for k, e in enumerate(p)) / sum(c*e*X**k for k, e in enumerate(q)) ) if simplify: yield(simp(out)) else: yield out return
sympy/series/approximants.py
689
sympy
{ "docstring": "\n Return a generator for consecutive Pade approximants for a series.\n It can also be used for computing the rational generating function of a\n series when possible, since the last approximant returned by the generator\n will be the generating function (if any).\n\n Explanation\n ===========\n\n The input list can contain more complex expressions than integer or rational\n numbers; symbols may also be involved in the computation. An example below\n show how to compute the generating function of the whole Pascal triangle.\n\n The generator can be asked to apply the sympy.simplify function on each\n generated term, which will make the computation slower; however it may be\n useful when symbols are involved in the expressions.\n\n Examples\n ========\n\n >>> from sympy.series import approximants\n >>> from sympy import lucas, fibonacci, symbols, binomial\n >>> g = [lucas(k) for k in range(16)]\n >>> [e for e in approximants(g)]\n [2, -4/(x - 2), (5*x - 2)/(3*x - 1), (x - 2)/(x**2 + x - 1)]\n\n >>> h = [fibonacci(k) for k in range(16)]\n >>> [e for e in approximants(h)]\n [x, -x/(x - 1), (x**2 - x)/(2*x - 1), -x/(x**2 + x - 1)]\n\n >>> x, t = symbols(\"x,t\")\n >>> p=[sum(binomial(k,i)*x**i for i in range(k+1)) for k in range(16)]\n >>> y = approximants(p, t)\n >>> for k in range(3): print(next(y))\n 1\n (x + 1)/((-x - 1)*(t*(x + 1) + (x + 1)/(-x - 1)))\n nan\n\n >>> y = approximants(p, t, simplify=True)\n >>> for k in range(3): print(next(y))\n 1\n -1/(t*(x + 1) - 1)\n nan\n\n See Also\n ========\n\n See function sympy.concrete.guess.guess_generating_function_rational and\n function mpmath.pade\n\n ", "language": "en", "n_whitespaces": 371, "n_words": 253, "vocab_size": 139 }
173
Python
96
f757f3daae6e11ea0cfb7dadc133274d8d74315f
approximants.py
196,814
96
447
approximants
https://github.com/sympy/sympy.git
Reordered imports 2
541
0
48,194
16
8
15
def deserialize(obj): if isinstance(obj, ObjectIDType): return ray.get(obj) elif isinstance(obj, (tuple, list)) and any( isinstance(o, ObjectIDType) for o in obj ): return ray.get(list(obj)) elif isinstance(obj, dict) and any( isinstance(val, ObjectIDType) for val in obj.values() ): return dict(zip(obj.keys(), ray.get(list(obj.values())))) else: return obj
modin/core/execution/ray/common/utils.py
177
modin
{ "docstring": "\n Deserialize a Ray object.\n\n Parameters\n ----------\n obj : ObjectIDType, iterable of ObjectIDType, or mapping of keys to ObjectIDTypes\n Object(s) to deserialize.\n\n Returns\n -------\n obj\n The deserialized object.\n ", "language": "en", "n_whitespaces": 66, "n_words": 27, "vocab_size": 22 }
40
Python
27
b22b93df20ad25ae7a11f0c89d32fb2f234d4641
utils.py
153,841
13
113
deserialize
https://github.com/modin-project/modin.git
FIX-#4464: Refactor Ray utils and quick fix groupby.count failing on virtual partitions (#4490) Co-authored-by: Devin Petersohn <[email protected]> Signed-off-by: jeffreykennethli <[email protected]>
103
0
35,654
18
1
4
async def async_added_to_hass(self) -> None: await self._async_restore_state() await super().async_added_to_hass()
homeassistant/components/here_travel_time/sensor.py
43
core
{ "docstring": "Wait for start so origin and destination entities can be resolved.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
9
Python
8
dffdc78915ad9d25f54be90ef62659b2c68de347
sensor.py
291,798
6
38
async_added_to_hass
https://github.com/home-assistant/core.git
Make HERETravelTimeSensor extend RestoreSensor (#82400)
30
0
90,902
10
1
22
def test_rtl_language_detection(self): parser = RasterisedDocumentParser(None) with mock.patch.object( parser, "construct_ocrmypdf_parameters", wraps=parser.construct_ocrmypdf_parameters, ) as wrapped: parser.parse( os.path.join(self.SAMPLE_FILES, "rtl-test.pdf"), "application/pdf", ) # There isn't a good way to actually check this working, with RTL correctly return # as it would require tesseract-ocr-ara installed for everyone running the # test suite. This test does provide the coverage though and attempts to ensure # the force OCR happens self.assertIsNotNone(parser.get_text()) self.assertEqual(parser.construct_ocrmypdf_parameters.call_count, 2) # Check the last call kwargs self.assertTrue( parser.construct_ocrmypdf_parameters.call_args.kwargs["safe_fallback"], )
src/paperless_tesseract/tests/test_parser.py
157
paperless-ngx
{ "docstring": "\n GIVEN:\n - File with text in an RTL language\n WHEN:\n - Document is parsed\n THEN:\n - Text from the document is extracted\n ", "language": "en", "n_whitespaces": 84, "n_words": 22, "vocab_size": 19 }
75
Python
62
a2b7687c3b88aadc55ec38a2249c299eaefd394d
test_parser.py
320,554
16
91
test_rtl_language_detection
https://github.com/paperless-ngx/paperless-ngx.git
In the case of an RTL language being extracted via pdfminer.six, fall back to forced OCR, which handles RTL text better
305
0
117,215
13
3
7
def _get_experiencer_side_only(self, opt): base_datatype = self._get_base_datatype(opt) return ( opt.get('train_experiencer_only', DEFAULT_TRAIN_EXPERIENCER_ONLY) and base_datatype == 'train' ) or base_datatype != 'train'
parlai/tasks/empathetic_dialogues/agents.py
62
ParlAI
{ "docstring": "\n Determine which side(s) of the conversation to use.\n ", "language": "en", "n_whitespaces": 23, "n_words": 8, "vocab_size": 8 }
19
Python
16
4f7b38e7970424e4329cb57ab65710291a50f3f7
agents.py
194,827
6
35
_get_experiencer_side_only
https://github.com/facebookresearch/ParlAI.git
[Empathetic Dialogues] Switch to DialogTeacher (#4405) * Start revising ED teacher * Convert over ED teacher * Silly * Minor * Remove episode_done * More cleanup * Fix * Test fix * Force new CI check * Note * Cleanup * Update parlai/tasks/empathetic_dialogues/agents.py Co-authored-by: Stephen Roller <[email protected]> * Update parlai/tasks/empathetic_dialogues/agents.py Co-authored-by: Stephen Roller <[email protected]> * Minor * Fixes * EDPersonaTopicifierTeacher fix * Fix ID * Hopefully fix style gen teacher PR * Add back fields * Update test_blended_skill_talk.py * Update test_blended_skill_talk.py * Convert over EDPersonaTopicifierTeacher * EDPersonaTopicifierTeacher overhaul * Minor * Minor * Remove imports * Black Co-authored-by: Stephen Roller <[email protected]>
69
0
47,102
11
10
49
def generate_level_targets(self, img_size, text_polys, ignore_polys): h, w = img_size lv_size_divs = self.level_size_divisors lv_proportion_range = self.level_proportion_range lv_text_polys = [[] for i in range(len(lv_size_divs))] lv_ignore_polys = [[] for i in range(len(lv_size_divs))] level_maps = [] for poly in text_polys: # assert len(poly) == 1 # text_instance = [[poly[i], poly[i + 1]] # for i in range(0, len(poly), 2)] polygon = np.array(poly, dtype=np.int).reshape((1, -1, 2)) _, _, box_w, box_h = cv2.boundingRect(polygon) proportion = max(box_h, box_w) / (h + 1e-8) for ind, proportion_range in enumerate(lv_proportion_range): if proportion_range[0] < proportion < proportion_range[1]: lv_text_polys[ind].append(poly / lv_size_divs[ind]) for ignore_poly in ignore_polys: # assert len(ignore_poly) == 1 # text_instance = [[ignore_poly[i], ignore_poly[i + 1]] # for i in range(0, len(ignore_poly), 2)] polygon = np.array(ignore_poly, dtype=np.int).reshape((1, -1, 2)) _, _, box_w, box_h = cv2.boundingRect(polygon) proportion = max(box_h, box_w) / (h + 1e-8) for ind, proportion_range in enumerate(lv_proportion_range): if proportion_range[0] < proportion < proportion_range[1]: lv_ignore_polys[ind].append(ignore_poly / lv_size_divs[ind]) for ind, size_divisor in enumerate(lv_size_divs): current_level_maps = [] level_img_size = (h // size_divisor, w // size_divisor) text_region = self.generate_text_region_mask( level_img_size, lv_text_polys[ind])[None] current_level_maps.append(text_region) center_region = self.generate_center_region_mask( level_img_size, lv_text_polys[ind])[None] current_level_maps.append(center_region) effective_mask = self.generate_effective_mask( level_img_size, lv_ignore_polys[ind])[None] current_level_maps.append(effective_mask) fourier_real_map, fourier_image_maps = self.generate_fourier_maps( level_img_size, lv_text_polys[ind]) current_level_maps.append(fourier_real_map) current_level_maps.append(fourier_image_maps) level_maps.append(np.concatenate(current_level_maps)) return level_maps
ppocr/data/imaug/fce_targets.py
586
PaddleOCR
{ "docstring": "Generate ground truth target on each level.\n\n Args:\n img_size (list[int]): Shape of input image.\n text_polys (list[list[ndarray]]): A list of ground truth polygons.\n ignore_polys (list[list[ndarray]]): A list of ignored polygons.\n Returns:\n level_maps (list(ndarray)): A list of ground target on each level.\n ", "language": "en", "n_whitespaces": 105, "n_words": 40, "vocab_size": 24 }
191
Python
96
9f62b610dea6161627200ed85d92e19b1923279a
fce_targets.py
23,192
39
384
generate_level_targets
https://github.com/PaddlePaddle/PaddleOCR.git
add fcenet
716
0
4,536
15
2
15
def test_driver_3(): args_list = [ 'tests/tests.csv', '-is', ',', '-target', 'class', '-g', '1', '-p', '2', '-cv', '3', '-s',' 45', '-config', 'TPOT light', '-v', '2' ] args = _get_arg_parser().parse_args(args_list) with captured_output() as (out, err): tpot_driver(args) ret_stdout = out.getvalue() assert "TPOT settings" in ret_stdout assert "Final Pareto front testing scores" not in ret_stdout try: ret_val = float(ret_stdout.split('\n')[-2].split(': ')[-1]) except Exception: ret_val = -float('inf') assert ret_val > 0.0
tests/driver_tests.py
231
tpot
{ "docstring": "Assert that the tpot_driver() in TPOT driver outputs normal result with verbosity = 2.", "language": "en", "n_whitespaces": 13, "n_words": 14, "vocab_size": 14 }
64
Python
53
388616b6247ca4ea8de4e2f340d6206aee523541
driver_tests.py
181,598
23
125
test_driver_3
https://github.com/EpistasisLab/tpot.git
Revert "Deployed 7ccda9a with MkDocs version: 1.3.0" This reverts commit bd9629c40e01241766197119b581a99409b07068.
265
0
43,387
17
2
6
def _has_nchw_support(): explicitly_on_cpu = _is_current_explicit_device("CPU") gpus_available = bool(_get_available_gpus()) return not explicitly_on_cpu and gpus_available # VARIABLE MANIPULATION
keras/backend.py
47
keras
{ "docstring": "Check whether the current scope supports NCHW ops.\n\n TensorFlow does not support NCHW on CPU. Therefore we check if we are not\n explicitly put on\n CPU, and have GPUs available. In this case there will be soft-placing on the\n GPU device.\n\n Returns:\n bool: if the current scope device placement would support nchw\n ", "language": "en", "n_whitespaces": 77, "n_words": 52, "vocab_size": 41 }
16
Python
13
84afc5193d38057e2e2badf9c889ea87d80d8fbf
backend.py
269,606
4
24
_has_nchw_support
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
27
0
80,226
10
1
11
def _search(self, check_return_type=True) -> Union[SourceReadList, DestinationReadList, ConnectionReadList]: return self._search_fn(self.api_instance, self.search_payload, _check_return_type=check_return_type)
octavia-cli/octavia_cli/apply/resources.py
53
airbyte
{ "docstring": "Run search of a resources on the remote Airbyte instance.\n\n Returns:\n Union[SourceReadList, DestinationReadList, ConnectionReadList]: Search results\n ", "language": "en", "n_whitespaces": 41, "n_words": 16, "vocab_size": 16 }
11
Python
11
56bf982cb96f831fe04f5e44a92ee4a669b9e16a
resources.py
4,247
7
36
_search
https://github.com/airbytehq/airbyte.git
🐙 octavia-cli: `apply` connections (#10881)
25
0
641
8
1
43
def test_in_non_semver_projects_resolved_in_next_release_is_equated_to_in_release(self): release_1 = self.create_release( date_added=timezone.now() - timedelta(minutes=45), version="foobar 1" ) release_2 = self.create_release(version="foobar 2") self.create_release(version="foobar 3") group = self.store_event( data={ "timestamp": iso_format(before_now(seconds=12)), "fingerprint": ["group-1"], "release": release_1.version, }, project_id=self.project.id, ).group self.login_as(user=self.user) response = self.get_success_response( qs_params={"id": group.id}, status="resolvedInNextRelease" ) assert response.data["status"] == "resolved" assert response.data["statusDetails"]["inNextRelease"] grp_resolution = GroupResolution.objects.filter(group=group) assert len(grp_resolution) == 1 grp_resolution = grp_resolution[0] assert grp_resolution.current_release_version == release_1.version assert grp_resolution.release.id == release_2.id assert grp_resolution.type == GroupResolution.Type.in_release assert grp_resolution.status == GroupResolution.Status.resolved activity = Activity.objects.filter( group=grp_resolution.group, type=Activity.SET_RESOLVED_IN_RELEASE, ident=grp_resolution.id, ).first() assert activity.data["version"] == release_2.version
tests/snuba/api/endpoints/test_organization_group_index.py
407
sentry
{ "docstring": "\n Test that ensures that if we basically know the next release when clicking on Resolved\n In Next Release because that release exists, then we can short circuit setting\n GroupResolution to type \"inNextRelease\", and then having `clear_exrired_resolutions` run\n once a new release is created to convert GroupResolution to in_release and set Activity.\n Basically we treat \"ResolvedInNextRelease\" as \"ResolvedInRelease\" when there is a release\n that was created after the last release associated with the group being resolved\n ", "language": "en", "n_whitespaces": 125, "n_words": 75, "vocab_size": 55 }
81
Python
59
096b5511e244eecd8799b2a0324655207ce8985e
test_organization_group_index.py
90,257
33
249
test_in_non_semver_projects_resolved_in_next_release_is_equated_to_in_release
https://github.com/getsentry/sentry.git
ref(tests): Remove `get_valid_response()` (#34822)
368
0
18,657
17
4
18
def schreier_vector(self, alpha): n = self.degree v = [None]*n v[alpha] = -1 orb = [alpha] used = [False]*n used[alpha] = True gens = self.generators r = len(gens) for b in orb: for i in range(r): temp = gens[i]._array_form[b] if used[temp] is False: orb.append(temp) used[temp] = True v[temp] = i return v
sympy/combinatorics/perm_groups.py
169
sympy
{ "docstring": "Computes the schreier vector for ``alpha``.\n\n Explanation\n ===========\n\n The Schreier vector efficiently stores information\n about the orbit of ``alpha``. It can later be used to quickly obtain\n elements of the group that send ``alpha`` to a particular element\n in the orbit. Notice that the Schreier vector depends on the order\n in which the group generators are listed. For a definition, see [3].\n Since list indices start from zero, we adopt the convention to use\n \"None\" instead of 0 to signify that an element doesn't belong\n to the orbit.\n For the algorithm and its correctness, see [2], pp.78-80.\n\n Examples\n ========\n\n >>> from sympy.combinatorics import Permutation, PermutationGroup\n >>> a = Permutation([2, 4, 6, 3, 1, 5, 0])\n >>> b = Permutation([0, 1, 3, 5, 4, 6, 2])\n >>> G = PermutationGroup([a, b])\n >>> G.schreier_vector(0)\n [-1, None, 0, 1, None, 1, 0]\n\n See Also\n ========\n\n orbit\n\n ", "language": "en", "n_whitespaces": 304, "n_words": 143, "vocab_size": 97 }
51
Python
35
498015021131af4dbb07eb110e5badaba8250c7b
perm_groups.py
196,118
17
108
schreier_vector
https://github.com/sympy/sympy.git
Updated import locations
226
0
47,618
13
2
13
def test_workers_threads(self): default_workers = 1 for i in range(1, 64): with mock.patch( "paperless.settings.multiprocessing.cpu_count", ) as cpu_count: cpu_count.return_value = i default_threads = default_threads_per_worker(default_workers) self.assertGreaterEqual(default_threads, 1) self.assertLessEqual(default_workers * default_threads, i)
src/paperless/tests/test_settings.py
99
paperless-ngx
{ "docstring": "\n GIVEN:\n - Certain CPU counts\n WHEN:\n - Threads per worker is calculated\n THEN:\n - Threads per worker less than or equal to CPU count\n - At least 1 thread per worker\n ", "language": "en", "n_whitespaces": 104, "n_words": 31, "vocab_size": 22 }
28
Python
25
edaaedae36ee2bb99859b1ca22455b3b7381d0bd
test_settings.py
319,864
10
59
test_workers_threads
https://github.com/paperless-ngx/paperless-ngx.git
Reduces webserver and task worker count to 1 by default
146
0
117,009
13
1
17
def test_query_product_type_for_federation(api_client, product, channel_USD): product_type = product.product_type product_type_id = graphene.Node.to_global_id("ProductType", product_type.pk) variables = { "representations": [ { "__typename": "ProductType", "id": product_type_id, }, ], } query = response = api_client.post_graphql(query, variables) content = get_graphql_content(response) assert content["data"]["_entities"] == [ { "__typename": "ProductType", "id": product_type_id, "name": product_type.name, } ]
saleor/graphql/product/tests/queries/test_product_type_query.py
161
saleor
{ "docstring": "\n query GetProductTypeInFederation($representations: [_Any]) {\n _entities(representations: $representations) {\n __typename\n ... on ProductType {\n id\n name\n }\n }\n }\n ", "language": "en", "n_whitespaces": 94, "n_words": 17, "vocab_size": 13 }
46
Python
33
d90be220d6b687d08153934a51354011a3cb5ca1
test_product_type_query.py
29,297
31
94
test_query_product_type_for_federation
https://github.com/saleor/saleor.git
Split test_product.py and test_variant.py into multiple files (#11173) * Split test_product.py into multiple files * Split test_variant.py into multiple files
186
0
5,212
12
1
9
def _clear_combo_focus(self, *args) -> None: # pylint: disable=unused-argument logger.debug("Clearing scale combo focus") self._scale.selection_clear() self._scale.winfo_toplevel().focus_set() logger.debug("Cleared scale combo focus")
lib/training/preview_tk.py
76
faceswap
{ "docstring": " Remove the highlighting and stealing of focus that the combobox annoyingly\n implements. ", "language": "en", "n_whitespaces": 20, "n_words": 12, "vocab_size": 11 }
18
Python
15
7da2cc3dd266aabebf41a31384cc2e0e7e5af6e5
preview_tk.py
101,547
7
41
_clear_combo_focus
https://github.com/deepfakes/faceswap.git
Training - Use custom preview pop-out
54
0
20,957
10
1
39
def test_remote_workspace_value(): HOST = __default_host__ client = JinaDClient(host=HOST, port=8000) workspace_id = client.workspaces.create(paths=[os.path.join(cur_dir, 'yamls')]) flow_id = client.flows.create( workspace_id=workspace_id, filename='flow_workspace_validate.yml' ) args = client.flows.get(flow_id)['arguments']['object']['arguments'] response = Client( host=HOST, port=args['port_expose'], protocol=args['protocol'], return_responses=True, ).post(on='/', inputs=[Document()], show_progress=True, return_results=True) assert ( response[0] .data.docs[0] .text.startswith(f'{__partial_workspace__}/WorkspaceValidator/0') ) assert client.flows.delete(flow_id) assert client.workspaces.delete(workspace_id) @pytest.mark.parametrize('gpus', ['all', '2'])
tests/distributed/test_topologies/test_topologies.py
301
@pytest.mark.parametrize('gpus', ['all', '2'])
jina
{ "docstring": "\n This tests the value set in `self.workspace` in a remote Flow.\n It should always be `/workspace/ExecutorName/...\n ", "language": "en", "n_whitespaces": 26, "n_words": 16, "vocab_size": 15 }
46
Python
38
2efe175c975975532f6e3fd326ed280addf20eba
test_topologies.py
11,257
21
168
test_remote_workspace_value
https://github.com/jina-ai/jina.git
fix: return responses (#4343)
140
1
2,028
13
1
10
def test__yes_version_less_than(): mock_version = MagicMock(return_value="0.9.0") with patch("salt.modules.chocolatey.chocolatey_version", mock_version): result = chocolatey._yes() expected = [] # Did it return correctly assert result == expected # Did it populate __context__ assert chocolatey.__context__["chocolatey._yes"] == expected
tests/pytests/unit/modules/test_chocolatey.py
85
salt
{ "docstring": "\n Test _yes when Chocolatey version is less than 0.9.9\n ", "language": "en", "n_whitespaces": 16, "n_words": 9, "vocab_size": 9 }
32
Python
22
1ff576163e64dac4ea1005121b32a1b3a4168f70
test_chocolatey.py
215,701
7
45
test__yes_version_less_than
https://github.com/saltstack/salt.git
move chocolatey exec module tests to pytest
83
0
54,109
10
8
15
def setimage(self, im, extents=None): # following c code self.im = im if extents: (x0, y0, x1, y1) = extents else: (x0, y0, x1, y1) = (0, 0, 0, 0) if x0 == 0 and x1 == 0: self.state.xsize, self.state.ysize = self.im.size else: self.state.xoff = x0 self.state.yoff = y0 self.state.xsize = x1 - x0 self.state.ysize = y1 - y0 if self.state.xsize <= 0 or self.state.ysize <= 0: raise ValueError("Size cannot be negative") if ( self.state.xsize + self.state.xoff > self.im.size[0] or self.state.ysize + self.state.yoff > self.im.size[1] ): raise ValueError("Tile cannot extend outside image")
src/PIL/ImageFile.py
282
Pillow
{ "docstring": "\n Called from ImageFile to set the core output image for the codec\n\n :param im: A core image object\n :param extents: a 4 tuple of (x0, y0, x1, y1) defining the rectangle\n for this tile\n :returns: None\n ", "language": "en", "n_whitespaces": 83, "n_words": 36, "vocab_size": 30 }
91
Python
54
a0e1fde1eddf45f26653e2ff6080d31e177adbec
ImageFile.py
242,434
20
184
setimage
https://github.com/python-pillow/Pillow.git
Added PyEncoder
282
0
69,856
11
1
9
def get_parent_customer_groups(customer_group): lft, rgt = frappe.db.get_value("Customer Group", customer_group, ["lft", "rgt"]) return frappe.db.sql( , (lft, rgt), as_dict=True, )
erpnext/setup/doctype/customer_group/customer_group.py
72
erpnext
{ "docstring": "select name from `tabCustomer Group`\n\t\twhere lft <= %s and rgt >= %s\n\t\torder by lft asc", "language": "en", "n_whitespaces": 14, "n_words": 17, "vocab_size": 15 }
17
Python
17
494bd9ef78313436f0424b918f200dab8fc7c20b
customer_group.py
67,481
9
45
get_parent_customer_groups
https://github.com/frappe/erpnext.git
style: format code with black
10
0
14,539
10
1
21
def test_unassignment(self, mock_func): notification = UnassignedActivityNotification( Activity( project=self.project, group=self.group, user=self.user, type=ActivityType.ASSIGNED, data={"assignee": ""}, ) ) with self.tasks(): notification.send() attachment, text = get_attachment() assert text == f"Issue unassigned by {self.name}" assert attachment["title"] == self.group.title assert ( attachment["footer"] == f"{self.project.slug} | <http://testserver/settings/account/notifications/workflow/?referrer=unassigned_activity-slack-user|Notification Settings>" )
tests/sentry/integrations/slack/notifications/test_unassigned.py
171
sentry
{ "docstring": "\n Test that a Slack message is sent with the expected payload when an issue is unassigned\n ", "language": "en", "n_whitespaces": 31, "n_words": 16, "vocab_size": 15 }
42
Python
34
1730c481f1a8a71446326fa1ff72e10663016385
test_unassigned.py
99,587
19
93
test_unassignment
https://github.com/getsentry/sentry.git
fix(notifications): Use `metrics_key` (#34572)
235
0
19,665
14
2
10
def to_tuple(x): warnings.warn( "to_tuple is deprecated and will be removed in NetworkX 3.0.", DeprecationWarning, stacklevel=2, ) if not isinstance(x, (tuple, list)): return x return tuple(map(to_tuple, x))
networkx/utils/misc.py
70
networkx
{ "docstring": "Converts lists to tuples.\n\n .. deprecated:: 2.8\n\n to_tuple is deprecated and will be removed in NetworkX 3.0.\n\n Examples\n --------\n >>> from networkx.utils import to_tuple\n >>> a_list = [1, 2, [1, 4]]\n >>> to_tuple(a_list)\n (1, 2, (1, 4))\n ", "language": "en", "n_whitespaces": 67, "n_words": 37, "vocab_size": 31 }
26
Python
25
6801db694e6a3603ab943079c3399baa0c5ff686
misc.py
176,466
9
44
to_tuple
https://github.com/networkx/networkx.git
Deprecate `to_tuple` (#5430) * Add _to_tuple private fn to node_link.py. * Deprecate utils.misc.to_tuple. * Add deprecation note. * Add release note.
69
0
41,921
9
2
16
def heldout_score(clf, X_test, y_test): score = np.zeros((n_estimators,), dtype=np.float64) for i, y_pred in enumerate(clf.staged_decision_function(X_test)): score[i] = binomial_deviance(y_test, y_pred.ravel()) return score
examples/ensemble/plot_gradient_boosting_oob.py
91
scikit-learn
{ "docstring": "compute deviance scores on ``X_test`` and ``y_test``.", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
19
Python
17
2c1581c32e641e535305647eb57a1787bcf803f0
plot_gradient_boosting_oob.py
261,569
5
59
heldout_score
https://github.com/scikit-learn/scikit-learn.git
DOC Fix FutureWarning in ensemble/plot_gradient_boosting_oob.py (#24948)
38
0
76,867
12
2
4
def battery_level(self) -> int | None: if not self._data: return None return self._data.battery_level
homeassistant/components/life360/device_tracker.py
41
core
{ "docstring": "Return the battery level of the device.\n\n Percentage from 0-100.\n ", "language": "en", "n_whitespaces": 24, "n_words": 10, "vocab_size": 9 }
13
Python
12
343508a0151378ec4958bd04fa87ca772aaf0e4e
device_tracker.py
303,407
8
24
battery_level
https://github.com/home-assistant/core.git
Fix Life360 recovery from server errors (#76231)
45
0
102,227
7
1
16
def test_ticket_11293_q_immutable(self): q1 = Q(isbn="") q2 = Q(authors__count__gt=1) query = Book.objects.annotate(Count("authors")) query.filter(q1 | q2) self.assertEqual(len(q2.children), 1)
tests/aggregation_regress/tests.py
97
django
{ "docstring": "\n Splitting a q object to parts for where/having doesn't alter\n the original q-object.\n ", "language": "en", "n_whitespaces": 35, "n_words": 13, "vocab_size": 13 }
16
Python
14
9c19aff7c7561e3a82978a272ecdaad40dda5c00
tests.py
200,943
6
56
test_ticket_11293_q_immutable
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
58
0
49,831
11
4
18
def on_advance_end(self) -> None: # ----------------------------------------- # VALIDATE IF NEEDED + CHECKPOINT CALLBACK # ----------------------------------------- should_check_val = self._should_check_val_fx(self.batch_idx, self.batch_progress.is_last_batch) if should_check_val: self.trainer.validating = True self._run_validation() self.trainer.training = True # ----------------------------------------- # SAVE LOGGERS (ie: Tensorboard, etc...) # ----------------------------------------- self._save_loggers_on_train_batch_end() # update plateau LR scheduler after metrics are logged self.update_lr_schedulers("step", update_plateau_schedulers=True) if not self._should_accumulate(): # progress global step according to grads progress self.global_step += 1 # if training finished, defer exit to the parent. this assumes there will be enough time in between # which might not be the case depending on what's in the `*_epoch_end` hooks if not self._is_training_done: # if fault tolerant is enabled and process has been notified, exit. self.trainer._exit_gracefully_on_signal()
pytorch_lightning/loops/epoch/training_epoch_loop.py
157
lightning
{ "docstring": "Runs validation and Checkpointing if necessary.\n\n Raises:\n StopIteration: if :attr:`done` evaluates to ``True`` to finish this epoch\n ", "language": "en", "n_whitespaces": 42, "n_words": 17, "vocab_size": 15 }
112
Python
84
59a7ba760548baadf6dbb30864b54cb01c7225a3
training_epoch_loop.py
241,703
17
87
on_advance_end
https://github.com/Lightning-AI/lightning.git
Move `epoch_{start,end}` hooks from `TrainingEpochLoop` to `FitLoop` (#11201)
301
0
69,658
10
1
16
def test_get_feature_names_invalid_dtypes(names, dtypes): pd = pytest.importorskip("pandas") X = pd.DataFrame([[1, 2], [4, 5], [5, 6]], columns=names) msg = re.escape( "Feature names only support names that are all strings. " f"Got feature names with dtypes: {dtypes}." ) with pytest.raises(TypeError, match=msg): names = _get_feature_names(X)
sklearn/utils/tests/test_validation.py
123
scikit-learn
{ "docstring": "Get feature names errors when the feature names have mixed dtypes", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 9 }
41
Python
34
9f9f1684e91fbfffbc446f786a8c64628b752efb
test_validation.py
261,047
9
74
test_get_feature_names_invalid_dtypes
https://github.com/scikit-learn/scikit-learn.git
MAINT Clean deprecation for 1.2: validation (#24493) * cln deprecations * cln * fix tst switch to pytest.raises
80
0
76,648
11
10
48
def svd(a, full_matrices=True, compute_uv=True, hermitian=False): import numpy as _nx a, wrap = _makearray(a) if hermitian: # note: lapack svd returns eigenvalues with s ** 2 sorted descending, # but eig returns s sorted ascending, so we re-order the eigenvalues # and related arrays to have the correct order if compute_uv: s, u = eigh(a) sgn = sign(s) s = abs(s) sidx = argsort(s)[..., ::-1] sgn = _nx.take_along_axis(sgn, sidx, axis=-1) s = _nx.take_along_axis(s, sidx, axis=-1) u = _nx.take_along_axis(u, sidx[..., None, :], axis=-1) # singular values are unsigned, move the sign into v vt = transpose(u * sgn[..., None, :]).conjugate() return wrap(u), s, wrap(vt) else: s = eigvalsh(a) s = s[..., ::-1] s = abs(s) return sort(s)[..., ::-1] _assert_stacked_2d(a) t, result_t = _commonType(a) extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence) m, n = a.shape[-2:] if compute_uv: if full_matrices: if m < n: gufunc = _umath_linalg.svd_m_f else: gufunc = _umath_linalg.svd_n_f else: if m < n: gufunc = _umath_linalg.svd_m_s else: gufunc = _umath_linalg.svd_n_s signature = 'D->DdD' if isComplexType(t) else 'd->ddd' u, s, vh = gufunc(a, signature=signature, extobj=extobj) u = u.astype(result_t, copy=False) s = s.astype(_realType(result_t), copy=False) vh = vh.astype(result_t, copy=False) return wrap(u), s, wrap(vh) else: if m < n: gufunc = _umath_linalg.svd_m else: gufunc = _umath_linalg.svd_n signature = 'D->d' if isComplexType(t) else 'd->d' s = gufunc(a, signature=signature, extobj=extobj) s = s.astype(_realType(result_t), copy=False) return s
numpy/linalg/linalg.py
635
numpy
{ "docstring": "\n Singular Value Decomposition.\n\n When `a` is a 2D array, and when `full_matrices` is `False`,\n it is factorized as ``u @ np.diag(s) @ vh = (u * s) @ vh``,\n where `u` and `vh` are 2D unitary arrays and `s` is a 1D\n array of `a`'s singular values. When `a` is higher-dimensional, SVD is\n applied in stacked mode as explained below.\n\n Parameters\n ----------\n a : (..., M, N) array_like\n A real or complex array with ``a.ndim >= 2``.\n full_matrices : bool, optional\n If True (default), `u` and `vh` have the shapes ``(..., M, M)`` and\n ``(..., N, N)``, respectively. Otherwise, the shapes are\n ``(..., M, K)`` and ``(..., K, N)``, respectively, where\n ``K = min(M, N)``.\n compute_uv : bool, optional\n Whether or not to compute `u` and `vh` in addition to `s`. True\n by default.\n hermitian : bool, optional\n If True, `a` is assumed to be Hermitian (symmetric if real-valued),\n enabling a more efficient method for finding singular values.\n Defaults to False.\n\n .. versionadded:: 1.17.0\n\n Returns\n -------\n u : { (..., M, M), (..., M, K) } array\n Unitary array(s). The first ``a.ndim - 2`` dimensions have the same\n size as those of the input `a`. The size of the last two dimensions\n depends on the value of `full_matrices`. Only returned when\n `compute_uv` is True.\n s : (..., K) array\n Vector(s) with the singular values, within each vector sorted in\n descending order. The first ``a.ndim - 2`` dimensions have the same\n size as those of the input `a`.\n vh : { (..., N, N), (..., K, N) } array\n Unitary array(s). The first ``a.ndim - 2`` dimensions have the same\n size as those of the input `a`. The size of the last two dimensions\n depends on the value of `full_matrices`. Only returned when\n `compute_uv` is True.\n\n Raises\n ------\n LinAlgError\n If SVD computation does not converge.\n\n See Also\n --------\n scipy.linalg.svd : Similar function in SciPy.\n scipy.linalg.svdvals : Compute singular values of a matrix.\n\n Notes\n -----\n\n .. versionchanged:: 1.8.0\n Broadcasting rules apply, see the `numpy.linalg` documentation for\n details.\n\n The decomposition is performed using LAPACK routine ``_gesdd``.\n\n SVD is usually described for the factorization of a 2D matrix :math:`A`.\n The higher-dimensional case will be discussed below. In the 2D case, SVD is\n written as :math:`A = U S V^H`, where :math:`A = a`, :math:`U= u`,\n :math:`S= \\\\mathtt{np.diag}(s)` and :math:`V^H = vh`. The 1D array `s`\n contains the singular values of `a` and `u` and `vh` are unitary. The rows\n of `vh` are the eigenvectors of :math:`A^H A` and the columns of `u` are\n the eigenvectors of :math:`A A^H`. In both cases the corresponding\n (possibly non-zero) eigenvalues are given by ``s**2``.\n\n If `a` has more than two dimensions, then broadcasting rules apply, as\n explained in :ref:`routines.linalg-broadcasting`. This means that SVD is\n working in \"stacked\" mode: it iterates over all indices of the first\n ``a.ndim - 2`` dimensions and for each combination SVD is applied to the\n last two indices. The matrix `a` can be reconstructed from the\n decomposition with either ``(u * s[..., None, :]) @ vh`` or\n ``u @ (s[..., None] * vh)``. (The ``@`` operator can be replaced by the\n function ``np.matmul`` for python versions below 3.5.)\n\n If `a` is a ``matrix`` object (as opposed to an ``ndarray``), then so are\n all the return values.\n\n Examples\n --------\n >>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)\n >>> b = np.random.randn(2, 7, 8, 3) + 1j*np.random.randn(2, 7, 8, 3)\n\n Reconstruction based on full SVD, 2D case:\n\n >>> u, s, vh = np.linalg.svd(a, full_matrices=True)\n >>> u.shape, s.shape, vh.shape\n ((9, 9), (6,), (6, 6))\n >>> np.allclose(a, np.dot(u[:, :6] * s, vh))\n True\n >>> smat = np.zeros((9, 6), dtype=complex)\n >>> smat[:6, :6] = np.diag(s)\n >>> np.allclose(a, np.dot(u, np.dot(smat, vh)))\n True\n\n Reconstruction based on reduced SVD, 2D case:\n\n >>> u, s, vh = np.linalg.svd(a, full_matrices=False)\n >>> u.shape, s.shape, vh.shape\n ((9, 6), (6,), (6, 6))\n >>> np.allclose(a, np.dot(u * s, vh))\n True\n >>> smat = np.diag(s)\n >>> np.allclose(a, np.dot(u, np.dot(smat, vh)))\n True\n\n Reconstruction based on full SVD, 4D case:\n\n >>> u, s, vh = np.linalg.svd(b, full_matrices=True)\n >>> u.shape, s.shape, vh.shape\n ((2, 7, 8, 8), (2, 7, 3), (2, 7, 3, 3))\n >>> np.allclose(b, np.matmul(u[..., :3] * s[..., None, :], vh))\n True\n >>> np.allclose(b, np.matmul(u[..., :3], s[..., None] * vh))\n True\n\n Reconstruction based on reduced SVD, 4D case:\n\n >>> u, s, vh = np.linalg.svd(b, full_matrices=False)\n >>> u.shape, s.shape, vh.shape\n ((2, 7, 8, 3), (2, 7, 3), (2, 7, 3, 3))\n >>> np.allclose(b, np.matmul(u * s[..., None, :], vh))\n True\n >>> np.allclose(b, np.matmul(u, s[..., None] * vh))\n True\n\n ", "language": "en", "n_whitespaces": 1180, "n_words": 746, "vocab_size": 328 }
215
Python
116
40747ae50620631941e43dbbd5baaccab669922f
linalg.py
160,044
49
399
svd
https://github.com/numpy/numpy.git
clarify svd documentation `u @ np.diag(s) @ vh` can only reproduce the original matrix when `full_matrices` is `False`, otherwise dimension does not match.
658
0
38,474
17
4
13
def trace_with_input_signature(self): if self._layer_inputs[0] is None: return args, kwargs = self._layer_inputs if self._expects_training_arg: args, kwargs = self._call_spec.set_arg_value( "training", False, args, kwargs, inputs_in_args=True ) if None not in tf.nest.flatten([args, kwargs]): # Manually add traces for layers that have keyword arguments and have # a fully defined input signature. self.add_trace(*args, **kwargs)
keras/saving/saved_model/save_impl.py
124
keras
{ "docstring": "Trace with the layer/models inferred input signature if possible.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
49
Python
41
84afc5193d38057e2e2badf9c889ea87d80d8fbf
save_impl.py
276,110
10
78
trace_with_input_signature
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
165
0
81,562
11
1
19
def test_perfect_horizontal_line(): X = np.arange(100)[:, None] y = np.zeros((100,)) estimator = LinearRegression() ransac_estimator = RANSACRegressor(estimator, random_state=0) ransac_estimator.fit(X, y) assert_allclose(ransac_estimator.estimator_.coef_, 0.0) assert_allclose(ransac_estimator.estimator_.intercept_, 0.0) # TODO: Remove in v1.2 @pytest.mark.parametrize( "old_loss, new_loss", [ ("absolute_loss", "squared_error"), ("squared_loss", "absolute_error"), ], )
sklearn/linear_model/tests/test_ransac.py
160
@pytest.mark.parametrize( "old_loss, new_loss", [ ("absolute_loss", "squared_error"), ("squared_loss", "absolute_error"), ], )
scikit-learn
{ "docstring": "Check that we can fit a line where all samples are inliers.\n Non-regression test for:\n https://github.com/scikit-learn/scikit-learn/issues/19497\n ", "language": "en", "n_whitespaces": 25, "n_words": 16, "vocab_size": 16 }
37
Python
33
1c24595c74e0bea246737b19f8fdfc8a1ffa2282
test_ransac.py
258,465
8
76
test_perfect_horizontal_line
https://github.com/scikit-learn/scikit-learn.git
MAINT rename base_estimator to estimator in RANSACRegressor (#22062)
81
1
75,246
9
5
28
def sync_transactions(bank, bank_account): last_transaction_date = frappe.db.get_value("Bank Account", bank_account, "last_integration_date") if last_transaction_date: start_date = formatdate(last_transaction_date, "YYYY-MM-dd") else: start_date = formatdate(add_months(today(), -12), "YYYY-MM-dd") end_date = formatdate(today(), "YYYY-MM-dd") try: transactions = get_transactions( bank=bank, bank_account=bank_account, start_date=start_date, end_date=end_date ) result = [] for transaction in reversed(transactions): result += new_bank_transaction(transaction) if result: last_transaction_date = frappe.db.get_value("Bank Transaction", result.pop(), "date") frappe.logger().info( "Plaid added {} new Bank Transactions from '{}' between {} and {}".format( len(result), bank_account, start_date, end_date ) ) frappe.db.set_value( "Bank Account", bank_account, "last_integration_date", last_transaction_date ) except Exception: frappe.log_error(frappe.get_traceback(), _("Plaid transactions sync error"))
erpnext/erpnext_integrations/doctype/plaid_settings/plaid_settings.py
296
erpnext
{ "docstring": "Sync transactions based on the last integration date as the start date, after sync is completed\n\tadd the transaction date of the oldest transaction as the last integration date.", "language": "en", "n_whitespaces": 27, "n_words": 29, "vocab_size": 20 }
85
Python
62
494bd9ef78313436f0424b918f200dab8fc7c20b
plaid_settings.py
66,003
26
178
sync_transactions
https://github.com/frappe/erpnext.git
style: format code with black
59
0
14,088
15
15
17
def select_related_descend(field, restricted, requested, load_fields, reverse=False): if not field.remote_field: return False if field.remote_field.parent_link and not reverse: return False if restricted: if reverse and field.related_query_name() not in requested: return False if not reverse and field.name not in requested: return False if not restricted and field.null: return False if load_fields: if field.attname not in load_fields: if restricted and field.name in requested: msg = ( "Field %s.%s cannot be both deferred and traversed using " "select_related at the same time." ) % (field.model._meta.object_name, field.name) raise FieldError(msg) return True
django/db/models/query_utils.py
195
django
{ "docstring": "\n Return True if this field should be used to descend deeper for\n select_related() purposes. Used by both the query construction code\n (sql.query.fill_related_selections()) and the model instance creation code\n (query.get_klass_info()).\n\n Arguments:\n * field - the field to be checked\n * restricted - a boolean field, indicating if the field list has been\n manually restricted using a requested clause)\n * requested - The select_related() dictionary.\n * load_fields - the set of fields to be loaded on this model\n * reverse - boolean, True if we are checking a reverse select related\n ", "language": "en", "n_whitespaces": 134, "n_words": 89, "vocab_size": 58 }
85
Python
48
9c19aff7c7561e3a82978a272ecdaad40dda5c00
query_utils.py
205,802
21
123
select_related_descend
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
264
0
51,220
17
2
13
def test_notification_permission_workaround(): try: notifications = QWebEnginePage.Feature.Notifications except AttributeError: pytest.skip("No Notifications member") permissions = webenginetab._WebEnginePermissions assert permissions._options[notifications] == 'content.notifications.enabled' assert permissions._messages[notifications] == 'show notifications'
tests/unit/browser/webengine/test_webenginetab.py
82
qutebrowser
{ "docstring": "Make sure the value for QWebEnginePage::Notifications is correct.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
23
Python
20
0877fb0d78635692e481c8bde224fac5ad0dd430
test_webenginetab.py
321,330
8
46
test_notification_permission_workaround
https://github.com/qutebrowser/qutebrowser.git
Run scripts/dev/rewrite_enums.py
55
0
117,667
11
1
14
def test_get_settings_request_context_use_default(self): request = self.get_request(site=self.other_site) context = Context({"request": request}) # This should use the default site, ignoring the site in the request template = Template( "{% load wagtailsettings_tags %}" "{% get_settings use_default_site=True %}" "{{ settings.tests.testsetting.title}}" ) self.assertEqual(template.render(context), self.default_site_settings.title)
wagtail/contrib/settings/tests/test_templates.py
96
wagtail
{ "docstring": "\n Check that the {% get_settings use_default_site=True %} option\n overrides a request in the context.\n ", "language": "en", "n_whitespaces": 36, "n_words": 14, "vocab_size": 13 }
38
Python
31
d10f15e55806c6944827d801cd9c2d53f5da4186
test_templates.py
73,525
9
53
test_get_settings_request_context_use_default
https://github.com/wagtail/wagtail.git
Reformat with black
120
0
16,042
11
1
4
def adj(self): return MultiAdjacencyView(self._adj)
networkx/classes/multigraph.py
24
networkx
{ "docstring": "Graph adjacency object holding the neighbors of each node.\n\n This object is a read-only dict-like structure with node keys\n and neighbor-dict values. The neighbor-dict is keyed by neighbor\n to the edgekey-data-dict. So `G.adj[3][2][0]['color'] = 'blue'` sets\n the color of the edge `(3, 2, 0)` to `\"blue\"`.\n\n Iterating over G.adj behaves like a dict. Useful idioms include\n `for nbr, edgesdict in G.adj[n].items():`.\n\n The neighbor information is also provided by subscripting the graph.\n\n Examples\n --------\n >>> e = [(1, 2), (1, 2), (1, 3), (3, 4)] # list of edges\n >>> G = nx.MultiGraph(e)\n >>> G.edges[1, 2, 0][\"weight\"] = 3\n >>> result = set()\n >>> for edgekey, data in G[1][2].items():\n ... result.add(data.get('weight', 1))\n >>> result\n {1, 3}\n\n For directed graphs, `G.adj` holds outgoing (successor) info.\n ", "language": "en", "n_whitespaces": 263, "n_words": 123, "vocab_size": 94 }
4
Python
4
8f4c99debc9440728c5e85f8bffa5d26b232eb6f
multigraph.py
176,417
2
13
adj
https://github.com/networkx/networkx.git
Multigraph docs update (#5389) * Updated MultiDiGraph documentation to include more examples of actually using parallel edges, and fixed references to things like G[u, v] where G[u, v, k] is required for a MultiDigraph. Have not made parallel changes in MultiGraph which should maybe also be made? Docs tests pass on my end; no code outside of comments was changed. -Peter Mawhorter * Updated docs for MultiGraph to add more multigraph-specific examples and fix a few places where untested examples were wrong. -Peter Mawhorter * [DOC] fix typo * add the right amount of separators Co-authored-by: Mridul Seth <[email protected]>
18
0
41,882
8
7
21
def _check_m2m_through_same_relationship(cls): errors = [] seen_intermediary_signatures = [] fields = cls._meta.local_many_to_many # Skip when the target model wasn't found. fields = (f for f in fields if isinstance(f.remote_field.model, ModelBase)) # Skip when the relationship model wasn't found. fields = (f for f in fields if isinstance(f.remote_field.through, ModelBase)) for f in fields: signature = ( f.remote_field.model, cls, f.remote_field.through, f.remote_field.through_fields, ) if signature in seen_intermediary_signatures: errors.append( checks.Error( "The model has two identical many-to-many relations " "through the intermediate model '%s'." % f.remote_field.through._meta.label, obj=cls, id="models.E003", ) ) else: seen_intermediary_signatures.append(signature) return errors
django/db/models/base.py
215
django
{ "docstring": "Check if no relationship model is used by more than one m2m field.", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 13 }
88
Python
53
9c19aff7c7561e3a82978a272ecdaad40dda5c00
base.py
205,412
26
136
_check_m2m_through_same_relationship
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
460
0
51,119
18
1
32
def forward(self, query, key, value, pos_emb, mask): q, k, v = self.forward_qkv(query, key, value) q = q.transpose(1, 2) # (batch, time1, head, d_k) n_batch_pos = pos_emb.size(0) p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k) p = p.transpose(1, 2) # (batch, head, time1, d_k) # (batch, head, time1, d_k) q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2) # (batch, head, time1, d_k) q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2) # compute attention score # first compute matrix a and matrix c # as described in https://arxiv.org/abs/1901.02860 Section 3.3 # (batch, head, time1, time2) matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1)) # compute matrix b and matrix d # (batch, head, time1, time2) matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1)) matrix_bd = self.rel_shift(matrix_bd) scores = (matrix_ac + matrix_bd) / math.sqrt( self.d_k ) # (batch, head, time1, time2) return self.forward_attention(v, scores, mask)
ppg_extractor/encoder/attention.py
293
MockingBird
{ "docstring": "Compute 'Scaled Dot Product Attention' with rel. positional encoding.\n\n :param torch.Tensor query: (batch, time1, size)\n :param torch.Tensor key: (batch, time2, size)\n :param torch.Tensor value: (batch, time2, size)\n :param torch.Tensor pos_emb: (batch, time1, size)\n :param torch.Tensor mask: (batch, time1, time2)\n :param torch.nn.Dropout dropout:\n :return torch.Tensor: attention output (batch, time1, d_model)\n ", "language": "en", "n_whitespaces": 106, "n_words": 49, "vocab_size": 28 }
130
Python
71
b617a87ee40ab384767a27335313c2c65ee094ec
attention.py
161,087
15
189
forward
https://github.com/babysor/MockingBird.git
Init ppg extractor and ppg2mel (#375) * Init ppg extractor and ppg2mel * add preprocess and training * FIx known issues * Update __init__.py Allow to gen audio * Fix length issue * Fix bug of preparing fid * Fix sample issues * Add UI usage of PPG-vc
298
0
38,899
11
1
6
def on_page_context(self, context, page, config, nav): return context
mkdocs/plugins.py
24
mkdocs
{ "docstring": "\n The `page_context` event is called after the context for a page is created\n and can be used to alter the context for that specific page only.\n\n Parameters:\n context: dict of template context variables\n page: `mkdocs.nav.Page` instance\n config: global configuration object\n nav: global navigation object\n\n Returns:\n dict of template context variables\n ", "language": "en", "n_whitespaces": 141, "n_words": 50, "vocab_size": 37 }
8
Python
8
f79b34d174e41084391868e7b503f5c61b8b1bdf
plugins.py
224,449
2
16
on_page_context
https://github.com/mkdocs/mkdocs.git
Move plugin events docs into source code + refactor * Create real (no-op) methods for each event in the base class. * Refactor event dispatcher to not check for methods' existence, instead just call them. * Move documentation from Markdown into docstrings of these methods. * Activate the 'mkdocstrings' plugin. * Use 'mkdocstrings' to insert documentation from those docstrings into the site.
22
0
57,294
6
7
58
def _read(cls, path_or_buf, **kwargs): path_or_buf = cls.get_path_or_buffer(path_or_buf) if isinstance(path_or_buf, str): if not cls.file_exists(path_or_buf): return cls.single_worker_read(path_or_buf, **kwargs) path_or_buf = cls.get_path(path_or_buf) elif not cls.pathlib_or_pypath(path_or_buf): return cls.single_worker_read(path_or_buf, **kwargs) if not kwargs.get("lines", False): return cls.single_worker_read(path_or_buf, **kwargs) with OpenFile(path_or_buf, "rb") as f: columns = pandas.read_json(BytesIO(b"" + f.readline()), lines=True).columns kwargs["columns"] = columns empty_pd_df = pandas.DataFrame(columns=columns) with OpenFile(path_or_buf, "rb", kwargs.get("compression", "infer")) as f: partition_ids = [] index_ids = [] dtypes_ids = [] column_widths, num_splits = cls._define_metadata(empty_pd_df, columns) args = {"fname": path_or_buf, "num_splits": num_splits, **kwargs} splits = cls.partitioned_file( f, num_partitions=NPartitions.get(), ) for start, end in splits: args.update({"start": start, "end": end}) partition_id = cls.deploy(cls.parse, num_returns=num_splits + 3, **args) partition_ids.append(partition_id[:-3]) index_ids.append(partition_id[-3]) dtypes_ids.append(partition_id[-2]) # partition_id[-1] contains the columns for each partition, which will be useful # for implementing when `lines=False`. row_lengths = cls.materialize(index_ids) new_index = pandas.RangeIndex(sum(row_lengths)) dtypes = cls.get_dtypes(dtypes_ids) partition_ids = cls.build_partition(partition_ids, row_lengths, column_widths) if isinstance(dtypes, pandas.Series): dtypes.index = columns else: dtypes = pandas.Series(dtypes, index=columns) new_frame = cls.frame_cls( np.array(partition_ids), new_index, columns, row_lengths, column_widths, dtypes=dtypes, ) new_frame.synchronize_labels(axis=0) return cls.query_compiler_cls(new_frame)
modin/core/io/text/json_dispatcher.py
641
modin
{ "docstring": "\n Read data from `path_or_buf` according to the passed `read_json` `kwargs` parameters.\n\n Parameters\n ----------\n path_or_buf : str, path object or file-like object\n `path_or_buf` parameter of `read_json` function.\n **kwargs : dict\n Parameters of `read_json` function.\n\n Returns\n -------\n BaseQueryCompiler\n Query compiler with imported data for further processing.\n ", "language": "en", "n_whitespaces": 141, "n_words": 44, "vocab_size": 35 }
157
Python
106
97769988a6f19e4b76f34238c97bf159ee7626a5
json_dispatcher.py
153,549
48
398
_read
https://github.com/modin-project/modin.git
REFACTOR-#3853: interacting with Dask interface through 'DaskWrapper' class (#3854) Co-authored-by: Devin Petersohn <[email protected]> Co-authored-by: Dmitry Chigarev <[email protected]> Co-authored-by: Yaroslav Igoshev <[email protected]> Signed-off-by: Anatoly Myachev <[email protected]>
655
0
35,438
16
5
21
def write_shared_locations(self, paths, dry_run=False): shared_path = os.path.join(self.path, 'SHARED') logger.info('creating %s', shared_path) if dry_run: return None lines = [] for key in ('prefix', 'lib', 'headers', 'scripts', 'data'): path = paths[key] if os.path.isdir(paths[key]): lines.append('%s=%s' % (key, path)) for ns in paths.get('namespace', ()): lines.append('namespace=%s' % ns) with codecs.open(shared_path, 'w', encoding='utf-8') as f: f.write('\n'.join(lines)) return shared_path
.venv/lib/python3.8/site-packages/pip/_vendor/distlib/database.py
239
transferlearning
{ "docstring": "\n Write shared location information to the SHARED file in .dist-info.\n :param paths: A dictionary as described in the documentation for\n :meth:`shared_locations`.\n :param dry_run: If True, the action is logged but no file is actually\n written.\n :return: The path of the file written to.\n ", "language": "en", "n_whitespaces": 109, "n_words": 43, "vocab_size": 35 }
52
Python
44
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
database.py
61,979
15
139
write_shared_locations
https://github.com/jindongwang/transferlearning.git
upd; format
186
0
12,792
13
3
7
def get_body_encoding(self): assert self.body_encoding != SHORTEST if self.body_encoding == QP: return 'quoted-printable' elif self.body_encoding == BASE64: return 'base64' else: return encode_7or8bit
python3.10.4/Lib/email/charset.py
61
XX-Net
{ "docstring": "Return the content-transfer-encoding used for body encoding.\n\n This is either the string `quoted-printable' or `base64' depending on\n the encoding used, or it is a function in which case you should call\n the function with a single argument, the Message object being\n encoded. The function should then set the Content-Transfer-Encoding\n header itself to whatever is appropriate.\n\n Returns \"quoted-printable\" if self.body_encoding is QP.\n Returns \"base64\" if self.body_encoding is BASE64.\n Returns conversion function otherwise.\n ", "language": "en", "n_whitespaces": 135, "n_words": 71, "vocab_size": 52 }
21
Python
16
8198943edd73a363c266633e1aa5b2a9e9c9f526
charset.py
223,651
8
34
get_body_encoding
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
89
0
57,033
9
2
19
def manage_matplotlib_context() -> Any: originalRcParams = matplotlib.rcParams.copy() # Credits for this style go to the ggplot and seaborn packages. # We copied the style file to remove dependencies on the Seaborn package. # Check it out, it's an awesome library for plotting customRcParams = { "patch.facecolor": "#348ABD", # blue "patch.antialiased": True, "font.size": 10.0, "figure.edgecolor": "0.50", # Seaborn common parameters "figure.facecolor": "white", "text.color": ".15", "axes.labelcolor": ".15", "legend.numpoints": 1, "legend.scatterpoints": 1, "xtick.direction": "out", "ytick.direction": "out", "xtick.color": ".15", "ytick.color": ".15", "axes.axisbelow": True, "image.cmap": "Greys", "font.family": ["sans-serif"], "font.sans-serif": [ "Arial", "Liberation Sans", "Bitstream Vera Sans", "sans-serif", ], "grid.linestyle": "-", "lines.solid_capstyle": "round", # Seaborn darkgrid parameters # .15 = dark_gray # .8 = light_gray "axes.grid": True, "axes.facecolor": "#EAEAF2", "axes.edgecolor": "white", "axes.linewidth": 0, "grid.color": "white", # Seaborn notebook context "figure.figsize": [8.0, 5.5], "axes.labelsize": 11, "axes.titlesize": 12, "xtick.labelsize": 10, "ytick.labelsize": 10, "legend.fontsize": 10, "grid.linewidth": 1, "lines.linewidth": 1.75, "patch.linewidth": 0.3, "lines.markersize": 7, "lines.markeredgewidth": 0, "xtick.major.width": 1, "ytick.major.width": 1, "xtick.minor.width": 0.5, "ytick.minor.width": 0.5, "xtick.major.pad": 7, "ytick.major.pad": 7, "backend": "agg", } try: register_matplotlib_converters() matplotlib.rcParams.update(customRcParams) sns.set_style(style="white") yield finally: deregister_matplotlib_converters() # revert to original unit registries with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=matplotlib.cbook.mplDeprecation) matplotlib.rcParams.update(originalRcParams) # revert to original rcParams
src/pandas_profiling/visualisation/context.py
503
ydata-profiling
{ "docstring": "Return a context manager for temporarily changing matplotlib unit registries and rcParams.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
184
Python
139
11e1a8a3fa8d13513fe926b731fb907a066af2a1
context.py
191,835
62
273
manage_matplotlib_context
https://github.com/ydataai/ydata-profiling.git
fix: change context managed backend (#1149)
662
0
46,847
15
1
3
def force_update(self) -> bool: return False
homeassistant/components/life360/device_tracker.py
19
core
{ "docstring": "Return True if state updates should be forced.\n\n Overridden because CoordinatorEntity sets `should_poll` to False,\n which causes TrackerEntity to set `force_update` to True.\n ", "language": "en", "n_whitespaces": 44, "n_words": 23, "vocab_size": 21 }
6
Python
6
e3fb04e1166d15f576d4b6fdec962f13871aaafe
device_tracker.py
306,735
7
10
force_update
https://github.com/home-assistant/core.git
Add comment to life360 device tracker (#77879)
20
0
105,519
6
3
65
def glm_dataset(global_random_seed, request): data_type, model = request.param # Make larger dim more than double as big as the smaller one. # This helps when constructing singular matrices like (X, X). if data_type == "long": n_samples, n_features = 12, 4 else: n_samples, n_features = 4, 12 k = min(n_samples, n_features) rng = np.random.RandomState(global_random_seed) X = make_low_rank_matrix( n_samples=n_samples, n_features=n_features, effective_rank=k, tail_strength=0.1, random_state=rng, ) X[:, -1] = 1 # last columns acts as intercept U, s, Vt = linalg.svd(X, full_matrices=False) assert np.all(s > 1e-3) # to be sure assert np.max(s) / np.min(s) < 100 # condition number of X if data_type == "long": coef_unpenalized = rng.uniform(low=1, high=3, size=n_features) coef_unpenalized *= rng.choice([-1, 1], size=n_features) raw_prediction = X @ coef_unpenalized else: raw_prediction = rng.uniform(low=-3, high=3, size=n_samples) # minimum norm solution min ||w||_2 such that raw_prediction = X w: # w = X'(XX')^-1 raw_prediction = V s^-1 U' raw_prediction coef_unpenalized = Vt.T @ np.diag(1 / s) @ U.T @ raw_prediction linear_loss = LinearModelLoss(base_loss=model._get_loss(), fit_intercept=True) sw = np.full(shape=n_samples, fill_value=1 / n_samples) y = linear_loss.base_loss.link.inverse(raw_prediction) # Add penalty l2_reg_strength * ||coef||_2^2 for l2_reg_strength=1 and solve with # optimizer. Note that the problem is well conditioned such that we get accurate # results. l2_reg_strength = 1 fun = partial( linear_loss.loss, X=X[:, :-1], y=y, sample_weight=sw, l2_reg_strength=l2_reg_strength, ) grad = partial( linear_loss.gradient, X=X[:, :-1], y=y, sample_weight=sw, l2_reg_strength=l2_reg_strength, ) coef_penalized_with_intercept = _special_minimize( fun, grad, coef_unpenalized, tol_NM=1e-6, tol=1e-14 ) linear_loss = LinearModelLoss(base_loss=model._get_loss(), fit_intercept=False) fun = partial( linear_loss.loss, X=X[:, :-1], y=y, sample_weight=sw, l2_reg_strength=l2_reg_strength, ) grad = partial( linear_loss.gradient, X=X[:, :-1], y=y, sample_weight=sw, l2_reg_strength=l2_reg_strength, ) coef_penalized_without_intercept = _special_minimize( fun, grad, coef_unpenalized[:-1], tol_NM=1e-6, tol=1e-14 ) # To be sure assert np.linalg.norm(coef_penalized_with_intercept) < np.linalg.norm( coef_unpenalized ) return ( model, X, y, coef_unpenalized, coef_penalized_with_intercept, coef_penalized_without_intercept, l2_reg_strength, ) @pytest.mark.parametrize("solver", SOLVERS) @pytest.mark.parametrize("fit_intercept", [False, True])
sklearn/linear_model/_glm/tests/test_glm.py
766
@pytest.mark.parametrize("solver", SOLVERS) @pytest.mark.parametrize("fit_intercept", [False, True])
scikit-learn
{ "docstring": "Dataset with GLM solutions, well conditioned X.\n\n This is inspired by ols_ridge_dataset in test_ridge.py.\n\n The construction is based on the SVD decomposition of X = U S V'.\n\n Parameters\n ----------\n type : {\"long\", \"wide\"}\n If \"long\", then n_samples > n_features.\n If \"wide\", then n_features > n_samples.\n model : a GLM model\n\n For \"wide\", we return the minimum norm solution:\n\n min ||w||_2 subject to w = argmin deviance(X, y, w)\n\n Note that the deviance is always minimized if y = inverse_link(X w) is possible to\n achieve, which it is in the wide data case. Therefore, we can construct the\n solution with minimum norm like (wide) OLS:\n\n min ||w||_2 subject to link(y) = raw_prediction = X w\n\n Returns\n -------\n model : GLM model\n X : ndarray\n Last column of 1, i.e. intercept.\n y : ndarray\n coef_unpenalized : ndarray\n Minimum norm solutions, i.e. min sum(loss(w)) (with mininum ||w||_2 in\n case of ambiguity)\n Last coefficient is intercept.\n coef_penalized : ndarray\n GLM solution with alpha=l2_reg_strength=1, i.e.\n min 1/n * sum(loss) + ||w[:-1]||_2^2.\n Last coefficient is intercept.\n l2_reg_strength : float\n Always equal 1.\n ", "language": "en", "n_whitespaces": 318, "n_words": 177, "vocab_size": 111 }
284
Python
172
9d863aba2b6dab9c9cbbcf2f7c3b7a99b6ad168f
test_glm.py
260,403
77
495
glm_dataset
https://github.com/scikit-learn/scikit-learn.git
TST tight tests for GLMs (#23619) Co-authored-by: Olivier Grisel <[email protected]>
716
1
76,228
15
3
13
def get_telemetry_id() -> Optional[Text]: try: telemetry_config = ( rasa_utils.read_global_config_value(CONFIG_FILE_TELEMETRY_KEY) or {} ) return telemetry_config.get(CONFIG_TELEMETRY_ID) except Exception as e: # skipcq:PYL-W0703 logger.debug(f"Unable to retrieve telemetry ID: {e}") return None
rasa/telemetry.py
84
rasa
{ "docstring": "Return the unique telemetry identifier for this Rasa Open Source install.\n\n The identifier can be any string, but it should be a UUID.\n\n Returns:\n The identifier, if it is configured correctly.\n ", "language": "en", "n_whitespaces": 47, "n_words": 31, "vocab_size": 27 }
28
Python
27
6339856514897056716bb531acb8489c9cf05d26
telemetry.py
159,326
16
46
get_telemetry_id
https://github.com/RasaHQ/rasa.git
Add support for different recipes (#10641) * Add support for different recipes Fixes https://github.com/RasaHQ/rasa/issues/10473 * Update docs/docs/graph-recipe.mdx Co-authored-by: Joe Juzl <[email protected]>
84
0
38,198
12
3
13
def get_changelist_form(self, request, **kwargs): defaults = { "formfield_callback": partial(self.formfield_for_dbfield, request=request), **kwargs, } if defaults.get("fields") is None and not modelform_defines_fields( defaults.get("form") ): defaults["fields"] = forms.ALL_FIELDS return modelform_factory(self.model, **defaults)
django/contrib/admin/options.py
117
django
{ "docstring": "\n Return a Form class for use in the Formset on the changelist page.\n ", "language": "en", "n_whitespaces": 28, "n_words": 13, "vocab_size": 12 }
27
Python
26
9c19aff7c7561e3a82978a272ecdaad40dda5c00
options.py
203,409
10
70
get_changelist_form
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
113
0
50,357
12
1
13
def get_all_sales_person(date_range, company, field=None, limit=0): date_condition = get_date_condition(date_range, "sales_order.transaction_date") return frappe.db.sql( .format( date_condition=date_condition ), (company, cint(limit)), as_dict=1, )
erpnext/startup/leaderboard.py
80
erpnext
{ "docstring": "\n\t\tselect sales_team.sales_person as name, sum(sales_order.base_net_total) as value\n\t\tfrom `tabSales Order` as sales_order join `tabSales Team` as sales_team\n\t\t\ton sales_order.name = sales_team.parent and sales_team.parenttype = 'Sales Order'\n\t\twhere sales_order.docstatus = 1\n\t\t\tand sales_order.company = %s\n\t\t\t{date_condition}\n\t\tgroup by sales_team.sales_person\n\t\torder by value DESC\n\t\tlimit %s\n\t", "language": "en", "n_whitespaces": 35, "n_words": 44, "vocab_size": 32 }
18
Python
18
494bd9ef78313436f0424b918f200dab8fc7c20b
leaderboard.py
67,565
19
53
get_all_sales_person
https://github.com/frappe/erpnext.git
style: format code with black
9
0
14,557
10
6
32
def load_digits(*, n_class=10, return_X_y=False, as_frame=False): data, fdescr = load_gzip_compressed_csv_data( data_file_name="digits.csv.gz", descr_file_name="digits.rst", delimiter="," ) target = data[:, -1].astype(int, copy=False) flat_data = data[:, :-1] images = flat_data.view() images.shape = (-1, 8, 8) if n_class < 10: idx = target < n_class flat_data, target = flat_data[idx], target[idx] images = images[idx] feature_names = [ "pixel_{}_{}".format(row_idx, col_idx) for row_idx in range(8) for col_idx in range(8) ] frame = None target_columns = [ "target", ] if as_frame: frame, flat_data, target = _convert_data_dataframe( "load_digits", flat_data, target, feature_names, target_columns ) if return_X_y: return flat_data, target return Bunch( data=flat_data, target=target, frame=frame, feature_names=feature_names, target_names=np.arange(10), images=images, DESCR=fdescr, )
sklearn/datasets/_base.py
324
scikit-learn
{ "docstring": "Load and return the digits dataset (classification).\n\n Each datapoint is a 8x8 image of a digit.\n\n ================= ==============\n Classes 10\n Samples per class ~180\n Samples total 1797\n Dimensionality 64\n Features integers 0-16\n ================= ==============\n\n This is a copy of the test set of the UCI ML hand-written digits datasets\n https://archive.ics.uci.edu/ml/datasets/Optical+Recognition+of+Handwritten+Digits\n\n Read more in the :ref:`User Guide <digits_dataset>`.\n\n Parameters\n ----------\n n_class : int, default=10\n The number of classes to return. Between 0 and 10.\n\n return_X_y : bool, default=False\n If True, returns ``(data, target)`` instead of a Bunch object.\n See below for more information about the `data` and `target` object.\n\n .. versionadded:: 0.18\n\n as_frame : bool, default=False\n If True, the data is a pandas DataFrame including columns with\n appropriate dtypes (numeric). The target is\n a pandas DataFrame or Series depending on the number of target columns.\n If `return_X_y` is True, then (`data`, `target`) will be pandas\n DataFrames or Series as described below.\n\n .. versionadded:: 0.23\n\n Returns\n -------\n data : :class:`~sklearn.utils.Bunch`\n Dictionary-like object, with the following attributes.\n\n data : {ndarray, dataframe} of shape (1797, 64)\n The flattened data matrix. If `as_frame=True`, `data` will be\n a pandas DataFrame.\n target: {ndarray, Series} of shape (1797,)\n The classification target. If `as_frame=True`, `target` will be\n a pandas Series.\n feature_names: list\n The names of the dataset columns.\n target_names: list\n The names of target classes.\n\n .. versionadded:: 0.20\n\n frame: DataFrame of shape (1797, 65)\n Only present when `as_frame=True`. DataFrame with `data` and\n `target`.\n\n .. versionadded:: 0.23\n images: {ndarray} of shape (1797, 8, 8)\n The raw image data.\n DESCR: str\n The full description of the dataset.\n\n (data, target) : tuple if ``return_X_y`` is True\n A tuple of two ndarrays by default. The first contains a 2D ndarray of\n shape (1797, 64) with each row representing one sample and each column\n representing the features. The second ndarray of shape (1797) contains\n the target samples. If `as_frame=True`, both arrays are pandas objects,\n i.e. `X` a dataframe and `y` a series.\n\n .. versionadded:: 0.18\n\n Examples\n --------\n To load the data and visualize the images::\n\n >>> from sklearn.datasets import load_digits\n >>> digits = load_digits()\n >>> print(digits.data.shape)\n (1797, 64)\n >>> import matplotlib.pyplot as plt\n >>> plt.gray()\n >>> plt.matshow(digits.images[0])\n <...>\n >>> plt.show()\n ", "language": "en", "n_whitespaces": 877, "n_words": 356, "vocab_size": 207 }
97
Python
64
39c341ad91b545c895ede9c6240a04659b82defb
_base.py
258,753
36
212
load_digits
https://github.com/scikit-learn/scikit-learn.git
DOC Ensures that load_digits passes numpydoc validation (#22392)
285
0
75,403
11
2
11
def test_family_deprecation(est, family): with pytest.warns(FutureWarning, match="`family` was deprecated"): if isinstance(family, str): assert est.family == family else: assert est.family.__class__ == family.__class__ assert est.family.power == family.power
sklearn/linear_model/_glm/tests/test_glm.py
92
scikit-learn
{ "docstring": "Test backward compatibility of the family property.", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
24
Python
20
75a94f518f7bd7d0bf581ffb67d9f961e3c4efbc
test_glm.py
259,455
7
56
test_family_deprecation
https://github.com/scikit-learn/scikit-learn.git
ENH migrate GLMs / TweedieRegressor to linear loss (#22548) Co-authored-by: Olivier Grisel <[email protected]> Co-authored-by: Thomas J. Fan <[email protected]>
77
0
75,785
13
3
16
async def async_start(self) -> None: _LOGGER.info("Starting Home Assistant") setattr(self.loop, "_thread_ident", threading.get_ident()) self.state = CoreState.starting self.bus.async_fire(EVENT_CORE_CONFIG_UPDATE) self.bus.async_fire(EVENT_HOMEASSISTANT_START) try: # Only block for EVENT_HOMEASSISTANT_START listener self.async_stop_track_tasks()
homeassistant/core.py
101
async def async_start(self) -> None: """Finalize startup from inside the event loop. This method is a coroutine. """ _LOGGER.info("Starting Home Assistant") setattr(self.loop, "_thread_ident", threading.get_ident()) self.state = CoreState.starting self.bus.async_fire(EVENT_CORE_CONFIG_UPDATE) self.bus.async_fire(EVENT_HOMEASSISTANT_START) try: # Only block for EVENT_HOMEASSISTANT_START listener self.async_stop_track_tasks()
core
{ "docstring": "Finalize startup from inside the event loop.\n\n This method is a coroutine.\n ", "language": "en", "n_whitespaces": 26, "n_words": 12, "vocab_size": 12 }
24
Python
24
b0cee0bc46cbd7efe0e6421da18d91595c7a25ad
core.py
297,831
34
150
async_start
https://github.com/home-assistant/core.git
String formatting and max line length - Part 1 (#84390) Co-authored-by: Erik Montnemery <[email protected]>
95
1
96,788
8
4
9
def get_module_by_name(model, module_name): name_list = module_name.split(".") for name in name_list[:-1]: if hasattr(model, name): model = getattr(model, name) else: return None, None if hasattr(model, name_list[-1]): leaf_module = getattr(model, name_list[-1]) return model, leaf_module else: return None, None
nni/compression/pytorch/utils/pruning.py
131
nni
{ "docstring": "\n Get a module specified by its module name\n Parameters\n ----------\n model : pytorch model\n the pytorch model from which to get its module\n module_name : str\n the name of the required module\n Returns\n -------\n module, module\n the parent module of the required module, the required module\n ", "language": "en", "n_whitespaces": 95, "n_words": 46, "vocab_size": 25 }
35
Python
24
d68c786ff81bad19c04619d6a999ff34aaa724e7
pruning.py
113,688
12
82
get_module_by_name
https://github.com/microsoft/nni.git
[Compression] remove pruning v1 & refactor directory (#5228)
107
0
25,005
12
2
6
def _get_chromecast(self) -> pychromecast.Chromecast: if self._chromecast is None: raise HomeAssistantError("Chromecast is not available.") return self._chromecast
homeassistant/components/cast/media_player.py
46
core
{ "docstring": "Ensure chromecast is available, to facilitate type checking.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
15
Python
13
ed60611b07e38e7009c6cc266c14625a751e7b32
media_player.py
304,531
5
26
_get_chromecast
https://github.com/home-assistant/core.git
Improve type hint in cast media_player entity (#77025) * Improve type hint in cast media_player entity * Update docstring
47
0
103,338
10
2
6
def is_wheel_installed() -> bool: try: import pipenv.vendor.wheel as wheel # noqa: F401 except ImportError: return False return True
pipenv/patched/notpip/_internal/utils/misc.py
42
pipenv
{ "docstring": "\n Return whether the wheel package is installed.\n ", "language": "en", "n_whitespaces": 14, "n_words": 7, "vocab_size": 7 }
18
Python
17
f3166e673fe8d40277b804d35d77dcdb760fc3b3
misc.py
19,993
9
24
is_wheel_installed
https://github.com/pypa/pipenv.git
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
45
0
3,167
9
7
14
def test_pick_two_individuals_eligible_for_crossover(): ind1 = creator.Individual.from_string( 'BernoulliNB(input_matrix, BernoulliNB__alpha=1.0, BernoulliNB__fit_prior=True)', tpot_obj._pset ) ind2 = creator.Individual.from_string( 'BernoulliNB(input_matrix, BernoulliNB__alpha=10.0, BernoulliNB__fit_prior=True)', tpot_obj._pset ) ind3 = creator.Individual.from_string( 'GaussianNB(input_matrix)', tpot_obj._pset ) pick1, pick2 = pick_two_individuals_eligible_for_crossover([ind1, ind2, ind3]) assert ((str(pick1) == str(ind1) and str(pick2) == str(ind2)) or str(pick1) == str(ind2) and str(pick2) == str(ind1)) ind4 = creator.Individual.from_string( 'KNeighborsClassifier(' 'BernoulliNB(input_matrix, BernoulliNB__alpha=10.0, BernoulliNB__fit_prior=True),' 'KNeighborsClassifier__n_neighbors=10, ' 'KNeighborsClassifier__p=1, ' 'KNeighborsClassifier__weights=uniform' ')', tpot_obj._pset ) # Eventhough ind4 does not have the same primitive at the root, the tree shares a primitive with ind1 pick1, pick2 = pick_two_individuals_eligible_for_crossover([ind1, ind3, ind4]) assert ((str(pick1) == str(ind1) and str(pick2) == str(ind4)) or str(pick1) == str(ind4) and str(pick2) == str(ind1))
tests/tpot_tests.py
301
tpot
{ "docstring": "Assert that pick_two_individuals_eligible_for_crossover() picks the correct pair of nodes to perform crossover with", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 13 }
102
Python
56
388616b6247ca4ea8de4e2f340d6206aee523541
tpot_tests.py
181,704
28
182
test_pick_two_individuals_eligible_for_crossover
https://github.com/EpistasisLab/tpot.git
Revert "Deployed 7ccda9a with MkDocs version: 1.3.0" This reverts commit bd9629c40e01241766197119b581a99409b07068.
259
0
43,491
12
4
37
def test_qgelu(self): shapes = ((4,), (4, 4), (4, 4, 4), (4, 4, 4, 4)) dtypes = (torch.quint8, torch.qint8) memory_formats = (torch.channels_last, torch.contiguous_format) test_cases = itertools.product(shapes, dtypes, memory_formats) for shape, dtype, memory_format in test_cases: if memory_format == torch.channels_last and len(shape) != 4: continue X, scale, zero_point, torch_type = \ torch.randn(*shape), 0.1, 0, dtype X = X.to(memory_format=memory_format) qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point, dtype=torch_type) dqX = qX.dequantize() op = torch.nn.functional.gelu dqY = op(dqX) qY = torch.quantize_per_tensor(dqY, scale=scale, zero_point=zero_point, dtype=torch_type) qY_hat = op(qX) self.assertEqual(qY.dequantize(), qY_hat.dequantize(), msg="F.gelu failed ({} vs {})".format(qY, qY_hat))
test/quantization/core/test_quantized_op.py
327
pytorch
{ "docstring": "Tests the correctness of the quantized::qlayer_norm op.", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 6 }
87
Python
67
32bf5e0ef9177a9f8e65cea6cdf6d17e2cc5eaff
test_quantized_op.py
102,401
21
224
test_qgelu
https://github.com/pytorch/pytorch.git
Add native impl of gelu for QuantizedCPU (#69968) Summary: Add native implementation of gelu for quantized CPU. cc jerryzh168 jianyuh raghuramank100 jamesr66a vkuzo Pull Request resolved: https://github.com/pytorch/pytorch/pull/69968 Reviewed By: ejguan Differential Revision: D33187095 Pulled By: vkuzo fbshipit-source-id: 4c4bf0eb47d2d9c2b8827174f2ccdea41986148a
377
0
21,520
13
4
29
def _upsample_conv_2d(x, w, k=None, factor=2, gain=1): assert isinstance(factor, int) and factor >= 1 # Check weight shape. assert len(w.shape) == 4 convH = w.shape[2] convW = w.shape[3] inC = w.shape[1] assert convW == convH # Setup filter kernel. if k is None: k = [1] * factor k = _setup_kernel(k) * (gain * (factor**2)) p = (k.shape[0] - factor) - (convW - 1) stride = (factor, factor) # Determine data dimensions. stride = [1, 1, factor, factor] output_shape = ((x.shape[2] - 1) * factor + convH, (x.shape[3] - 1) * factor + convW) output_padding = ( output_shape[0] - (x.shape[2] - 1) * stride[0] - convH, output_shape[1] - (x.shape[3] - 1) * stride[1] - convW, ) assert output_padding[0] >= 0 and output_padding[1] >= 0 num_groups = x.shape[1] // inC # Transpose weights. w = torch.reshape(w, (num_groups, -1, inC, convH, convW)) w = w[..., ::-1, ::-1].permute(0, 2, 1, 3, 4) w = torch.reshape(w, (num_groups * inC, -1, convH, convW)) x = F.conv_transpose2d(x, w, stride=stride, output_padding=output_padding, padding=0) return upfirdn2d(x, torch.tensor(k, device=x.device), pad=((p + 1) // 2 + factor - 1, p // 2 + 1))
src/diffusers/models/unet_sde_score_estimation.py
538
diffusers
{ "docstring": "Fused `upsample_2d()` followed by `Conv2d()`.\n\n Args:\n Padding is performed only once at the beginning, not between the operations. The fused op is considerably more\n efficient than performing the same calculation using standard TensorFlow ops. It supports gradients of arbitrary\n order.\n x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W,\n C]`.\n w: Weight tensor of the shape `[filterH, filterW, inChannels,\n outChannels]`. Grouped convolution can be performed by `inChannels = x.shape[0] // numGroups`.\n k: FIR filter of the shape `[firH, firW]` or `[firN]`\n (separable). The default is `[1] * factor`, which corresponds to nearest-neighbor upsampling.\n factor: Integer upsampling factor (default: 2). gain: Scaling factor for signal magnitude (default: 1.0).\n\n Returns:\n Tensor of the shape `[N, C, H * factor, W * factor]` or `[N, H * factor, W * factor, C]`, and same datatype as\n `x`.\n ", "language": "en", "n_whitespaces": 208, "n_words": 139, "vocab_size": 102 }
182
Python
102
3e2cff4da25642e964c48fa44d7c00d3314b1ce8
unet_sde_score_estimation.py
335,695
25
356
_upsample_conv_2d
https://github.com/huggingface/diffusers.git
better names and more cleanup
281
0
120,826
14
4
21
def _get_supported_devices(self) -> List[plaidml._DeviceConfig]: experimental_setting = plaidml.settings.experimental plaidml.settings.experimental = False devices = plaidml.devices(self._ctx, limit=100, return_all=True)[0] plaidml.settings.experimental = experimental_setting supported = [d for d in devices if d.details and json.loads(d.details.decode("utf-8")).get("type", "cpu").lower() == "gpu"] self._log("debug", f"Obtained supported devices: {supported}") return supported
lib/gpu_stats/amd.py
178
faceswap
{ "docstring": " Obtain GPU devices from PlaidML that are marked as \"supported\".\n\n Returns\n -------\n list_LOGGER.\n The :class:`plaidml._DeviceConfig` objects for all supported GPUs that PlaidML has\n discovered.\n ", "language": "en", "n_whitespaces": 75, "n_words": 24, "vocab_size": 22 }
39
Python
29
bdbbad4d310fb606b6f412aa81e9f57ccd994e97
amd.py
100,548
18
106
_get_supported_devices
https://github.com/deepfakes/faceswap.git
Refactor lib.gpu_stats (#1218) * inital gpu_stats refactor * Add dummy CPU Backend * Update Sphinx documentation
135
0
20,012
19
1
5
def save_object(self, object_form, request): return object_form.save()
netbox/netbox/views/generic/bulk_views.py
27
netbox
{ "docstring": "\n Provide a hook to modify the object immediately before saving it (e.g. to encrypt secret data).\n\n Args:\n object_form: The model form instance\n request: The current request\n ", "language": "en", "n_whitespaces": 70, "n_words": 26, "vocab_size": 24 }
6
Python
6
80f5c96af3e78232ffe2bcce7c27995612964596
bulk_views.py
266,159
2
16
save_object
https://github.com/netbox-community/netbox.git
Document save_object() on BulkImportView
20
0
78,320
7
3
54
def forward(self, feats, img_metas): batch_size = len(img_metas) mask_features, multi_scale_memorys = self.pixel_decoder(feats) # multi_scale_memorys (from low resolution to high resolution) decoder_inputs = [] decoder_positional_encodings = [] for i in range(self.num_transformer_feat_level): decoder_input = self.decoder_input_projs[i](multi_scale_memorys[i]) # shape (batch_size, c, h, w) -> (h*w, batch_size, c) decoder_input = decoder_input.flatten(2).permute(2, 0, 1) level_embed = self.level_embed.weight[i].view(1, 1, -1) decoder_input = decoder_input + level_embed # shape (batch_size, c, h, w) -> (h*w, batch_size, c) mask = decoder_input.new_zeros( (batch_size, ) + multi_scale_memorys[i].shape[-2:], dtype=torch.bool) decoder_positional_encoding = self.decoder_positional_encoding( mask) decoder_positional_encoding = decoder_positional_encoding.flatten( 2).permute(2, 0, 1) decoder_inputs.append(decoder_input) decoder_positional_encodings.append(decoder_positional_encoding) # shape (num_queries, c) -> (num_queries, batch_size, c) query_feat = self.query_feat.weight.unsqueeze(1).repeat( (1, batch_size, 1)) query_embed = self.query_embed.weight.unsqueeze(1).repeat( (1, batch_size, 1)) cls_pred_list = [] mask_pred_list = [] cls_pred, mask_pred, attn_mask = self.forward_head( query_feat, mask_features, multi_scale_memorys[0].shape[-2:]) cls_pred_list.append(cls_pred) mask_pred_list.append(mask_pred) for i in range(self.num_transformer_decoder_layers): level_idx = i % self.num_transformer_feat_level # if a mask is all True(all background), then set it all False. attn_mask[torch.where( attn_mask.sum(-1) == attn_mask.shape[-1])] = False # cross_attn + self_attn layer = self.transformer_decoder.layers[i] attn_masks = [attn_mask, None] query_feat = layer( query=query_feat, key=decoder_inputs[level_idx], value=decoder_inputs[level_idx], query_pos=query_embed, key_pos=decoder_positional_encodings[level_idx], attn_masks=attn_masks, query_key_padding_mask=None, # here we do not apply masking on padded region key_padding_mask=None) cls_pred, mask_pred, attn_mask = self.forward_head( query_feat, mask_features, multi_scale_memorys[ (i + 1) % self.num_transformer_feat_level].shape[-2:]) cls_pred_list.append(cls_pred) mask_pred_list.append(mask_pred) return cls_pred_list, mask_pred_list
mmdet/models/dense_heads/mask2former_head.py
632
mmdetection
{ "docstring": "Forward function.\n\n Args:\n feats (list[Tensor]): Multi scale Features from the\n upstream network, each is a 4D-tensor.\n img_metas (list[dict]): List of image information.\n\n Returns:\n tuple: A tuple contains two elements.\n\n - cls_pred_list (list[Tensor)]: Classification logits \\\n for each decoder layer. Each is a 3D-tensor with shape \\\n (batch_size, num_queries, cls_out_channels). \\\n Note `cls_out_channels` should includes background.\n - mask_pred_list (list[Tensor]): Mask logits for each \\\n decoder layer. Each with shape (batch_size, num_queries, \\\n h, w).\n ", "language": "en", "n_whitespaces": 240, "n_words": 73, "vocab_size": 54 }
201
Python
121
14f0e9585c15c28f0c31dcc3ea352449bbe5eb96
mask2former_head.py
244,142
50
412
forward
https://github.com/open-mmlab/mmdetection.git
[Feature] Add Mask2Former to mmdet (#6938) update doc update doc format deepcopy pixel_decoder cfg move mask_pseudo_sampler cfg to config file move part of postprocess from head to detector fix bug in postprocessing move class setting from head to config file remove if else move mask2bbox to mask/util update docstring update docstring in result2json fix bug update class_weight add maskformer_fusion_head add maskformer fusion head update add cfg for filter_low_score update maskformer update class_weight update config update unit test rename param update comments in config rename variable, rm arg, update unit tests update mask2bbox add unit test for mask2bbox replace unsqueeze(1) and squeeze(1) add unit test for maskformer_fusion_head update docstrings update docstring delete \ remove modification to ce loss update docstring update docstring update docstring of ce loss update unit test update docstring update docstring update docstring rename rename add msdeformattn pixel decoder maskformer refactor add strides in config remove redundant code remove redundant code update unit test update config update
828
0
70,258
16