complexity
int64
1
56
n_identifiers
int64
1
114
code
stringlengths
19
12.7k
path
stringlengths
8
134
n_ast_nodes
int64
12
2.35k
ast_errors
stringlengths
0
4.01k
repo
stringlengths
3
28
documentation
dict
n_words
int64
2
866
language
stringclasses
1 value
vocab_size
int64
2
323
commit_id
stringlengths
40
40
file_name
stringlengths
5
79
id
int64
243
338k
nloc
int64
1
228
token_counts
int64
5
1.4k
fun_name
stringlengths
1
77
url
stringlengths
31
60
commit_message
stringlengths
3
15.3k
n_whitespaces
int64
1
3.23k
n_ast_errors
int64
0
20
d_id
int64
74
121k
ast_levels
int64
4
29
1
1
def tutorial_taskflow_api_etl_docker_virtualenv(): # [END instantiate_dag] # [START extract_virtualenv]
airflow/providers/docker/example_dags/tutorial_taskflow_api_etl_docker_virtualenv.py
14
airflow
{ "docstring": "\n ### TaskFlow API Tutorial Documentation\n This is a simple ETL data pipeline example which demonstrates the use of\n the TaskFlow API using three simple tasks for Extract, Transform, and Load.\n Documentation that goes along with the Airflow TaskFlow API tutorial is\n located\n [here](https://airflow.apache.org/docs/apache-airflow/stable/tutorial_taskflow_api.html)\n ", "language": "en", "n_whitespaces": 65, "n_words": 43, "vocab_size": 34 }
8
Python
7
5d89dea56843d7b76d5e308e373ba16ecbcffa77
tutorial_taskflow_api_etl_docker_virtualenv.py
45,428
14
66
tutorial_taskflow_api_etl_docker_virtualenv
https://github.com/apache/airflow.git
Switch to Debian 11 (bullseye) as base for our dockerfiles (#21378) Debian 11 Bullseye have been released some time ago as the new LTS Debian release and already all our dependencies (including MySQL and MSSQL ODBC drivers) caught up with it so we can finally migrate to it. This change switches base images to bullsey for our Dockerfiles as well as for Redis image we are using in CI. The relevant packages have been updated to include that and documentation have been updated. Examples of ours also are updated to use "bullseye" rather than buster. Closes: #18190 Closes: #18279
17
0
8,558
6
2
18
def call_score(self, other_args): parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="score", description=, ) ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED ) if ns_parser: pycoingecko_view.display_score( self.coin_map_df["CoinGecko"], ns_parser.export )
gamestonk_terminal/cryptocurrency/due_diligence/dd_controller.py
97
OpenBBTerminal
{ "docstring": "Process score command\n In this view you can find different kind of scores for loaded coin.\n Those scores represents different rankings, sentiment metrics, some user stats and others.\n You will see CoinGecko scores, Developer Scores, Community Scores, Sentiment, Reddit scores\n and many others.", "language": "en", "n_whitespaces": 86, "n_words": 43, "vocab_size": 37 }
24
Python
21
ea964109d654394cc0a5237e6ec5510ba6404097
dd_controller.py
281,134
18
61
call_score
https://github.com/OpenBB-finance/OpenBBTerminal.git
Crypto menu refactor (#1119) * enabled some crypto commands in dd to be called independent of source loaded * support for coin_map_df in all dd functions + load ta and plot chart refactor * updated tests and removed coingecko scrapping where possible * removed ref of command from hugo * updated pycoingecko version * refactoring load * refactored load to fetch prices; pred can run independent of source now * load by default usd on cp/cg and usdt on cb/bin * updated to rich for formatting and updated dependencies * fixed changes requested * update docs * revert discord requirements * removed absolute from calculate change for price * fixing pr issues * fix loading issue when similar coins exist, move coins to home, fill n/a * update docs for coins * adds load to ta and pred menu
158
0
83,546
11
13
28
def defragment6(packets): # Remove non fragments lst = [x for x in packets if IPv6ExtHdrFragment in x] if not lst: return [] id = lst[0][IPv6ExtHdrFragment].id llen = len(lst) lst = [x for x in lst if x[IPv6ExtHdrFragment].id == id] if len(lst) != llen: warning("defragment6: some fragmented packets have been removed from list") # noqa: E501 # reorder fragments res = [] while lst: min_pos = 0 min_offset = lst[0][IPv6ExtHdrFragment].offset for p in lst: cur_offset = p[IPv6ExtHdrFragment].offset if cur_offset < min_offset: min_pos = 0 min_offset = cur_offset res.append(lst[min_pos]) del lst[min_pos] # regenerate the fragmentable part fragmentable = b"" for p in res: q = p[IPv6ExtHdrFragment] offset = 8 * q.offset if offset != len(fragmentable): warning("Expected an offset of %d. Found %d. Padding with XXXX" % (len(fragmentable), offset)) # noqa: E501 fragmentable += b"X" * (offset - len(fragmentable)) fragmentable += raw(q.payload) # Regenerate the unfragmentable part. q = res[0].copy() nh = q[IPv6ExtHdrFragment].nh q[IPv6ExtHdrFragment].underlayer.nh = nh q[IPv6ExtHdrFragment].underlayer.plen = len(fragmentable) del q[IPv6ExtHdrFragment].underlayer.payload q /= conf.raw_layer(load=fragmentable) del q.plen if q[IPv6].underlayer: q[IPv6] = IPv6(raw(q[IPv6])) else: q = IPv6(raw(q)) return q
scapy/layers/inet6.py
473
scapy
{ "docstring": "\n Performs defragmentation of a list of IPv6 packets. Packets are reordered.\n Crap is dropped. What lacks is completed by 'X' characters.\n ", "language": "en", "n_whitespaces": 31, "n_words": 21, "vocab_size": 19 }
174
Python
102
08b1f9d67c8e716fd44036a027bdc90dcb9fcfdf
inet6.py
209,547
40
297
defragment6
https://github.com/secdev/scapy.git
E275 - Missing whitespace after keyword (#3711) Co-authored-by: Alexander Aring <[email protected]> Co-authored-by: Anmol Sarma <[email protected]> Co-authored-by: antoine.torre <[email protected]> Co-authored-by: Antoine Vacher <[email protected]> Co-authored-by: Arnaud Ebalard <[email protected]> Co-authored-by: atlowl <[email protected]> Co-authored-by: Brian Bienvenu <[email protected]> Co-authored-by: Chris Packham <[email protected]> Co-authored-by: CQ <[email protected]> Co-authored-by: Daniel Collins <[email protected]> Co-authored-by: Federico Maggi <[email protected]> Co-authored-by: Florian Maury <[email protected]> Co-authored-by: _Frky <[email protected]> Co-authored-by: g-mahieux <[email protected]> Co-authored-by: gpotter2 <[email protected]> Co-authored-by: Guillaume Valadon <[email protected]> Co-authored-by: Hao Zheng <[email protected]> Co-authored-by: Haresh Khandelwal <[email protected]> Co-authored-by: Harri Hämäläinen <[email protected]> Co-authored-by: hecke <[email protected]> Co-authored-by: Jan Romann <[email protected]> Co-authored-by: Jan Sebechlebsky <[email protected]> Co-authored-by: jdiog0 <[email protected]> Co-authored-by: jockque <[email protected]> Co-authored-by: Julien Bedel <[email protected]> Co-authored-by: Keith Scott <[email protected]> Co-authored-by: Kfir Gollan <[email protected]> Co-authored-by: Lars Munch <[email protected]> Co-authored-by: ldp77 <[email protected]> Co-authored-by: Leonard Crestez <[email protected]> Co-authored-by: Marcel Patzlaff <[email protected]> Co-authored-by: Martijn Thé <[email protected]> Co-authored-by: Martine Lenders <[email protected]> Co-authored-by: Michael Farrell <[email protected]> Co-authored-by: Michał Mirosław <[email protected]> Co-authored-by: mkaliszan <[email protected]> Co-authored-by: mtury <[email protected]> Co-authored-by: Neale Ranns <[email protected]> Co-authored-by: Octavian Toader <[email protected]> Co-authored-by: Peter Eisenlohr <[email protected]> Co-authored-by: Phil <[email protected]> Co-authored-by: Pierre Lalet <[email protected]> Co-authored-by: Pierre Lorinquer <[email protected]> Co-authored-by: piersoh <[email protected]> Co-authored-by: plorinquer <[email protected]> Co-authored-by: pvinci <[email protected]> Co-authored-by: Rahul Jadhav <[email protected]> Co-authored-by: Robin Jarry <[email protected]> Co-authored-by: romain-perez <[email protected]> Co-authored-by: rperez <rperez@debian> Co-authored-by: Sabrina Dubroca <[email protected]> Co-authored-by: Sebastian Baar <[email protected]> Co-authored-by: sebastien mainand <[email protected]> Co-authored-by: smehner1 <[email protected]> Co-authored-by: speakinghedge <[email protected]> Co-authored-by: Steven Van Acker <[email protected]> Co-authored-by: Thomas Faivre <[email protected]> Co-authored-by: Tran Tien Dat <[email protected]> Co-authored-by: Wael Mahlous <[email protected]> Co-authored-by: waeva <[email protected]> Co-authored-by: Alexander Aring <[email protected]> Co-authored-by: Anmol Sarma <[email protected]> Co-authored-by: antoine.torre <[email protected]> Co-authored-by: Antoine Vacher <[email protected]> Co-authored-by: Arnaud Ebalard <[email protected]> Co-authored-by: atlowl <[email protected]> Co-authored-by: Brian Bienvenu <[email protected]> Co-authored-by: Chris Packham <[email protected]> Co-authored-by: CQ <[email protected]> Co-authored-by: Daniel Collins <[email protected]> Co-authored-by: Federico Maggi <[email protected]> Co-authored-by: Florian Maury <[email protected]> Co-authored-by: _Frky <[email protected]> Co-authored-by: g-mahieux <[email protected]> Co-authored-by: gpotter2 <[email protected]> Co-authored-by: Guillaume Valadon <[email protected]> Co-authored-by: Hao Zheng <[email protected]> Co-authored-by: Haresh Khandelwal <[email protected]> Co-authored-by: Harri Hämäläinen <[email protected]> Co-authored-by: hecke <[email protected]> Co-authored-by: Jan Romann <[email protected]> Co-authored-by: Jan Sebechlebsky <[email protected]> Co-authored-by: jdiog0 <[email protected]> Co-authored-by: jockque <[email protected]> Co-authored-by: Julien Bedel <[email protected]> Co-authored-by: Keith Scott <[email protected]> Co-authored-by: Kfir Gollan <[email protected]> Co-authored-by: Lars Munch <[email protected]> Co-authored-by: ldp77 <[email protected]> Co-authored-by: Leonard Crestez <[email protected]> Co-authored-by: Marcel Patzlaff <[email protected]> Co-authored-by: Martijn Thé <[email protected]> Co-authored-by: Martine Lenders <[email protected]> Co-authored-by: Michael Farrell <[email protected]> Co-authored-by: Michał Mirosław <[email protected]> Co-authored-by: mkaliszan <[email protected]> Co-authored-by: mtury <[email protected]> Co-authored-by: Neale Ranns <[email protected]> Co-authored-by: Octavian Toader <[email protected]> Co-authored-by: Peter Eisenlohr <[email protected]> Co-authored-by: Phil <[email protected]> Co-authored-by: Pierre Lalet <[email protected]> Co-authored-by: Pierre Lorinquer <[email protected]> Co-authored-by: piersoh <[email protected]> Co-authored-by: pvinci <[email protected]> Co-authored-by: Rahul Jadhav <[email protected]> Co-authored-by: Robin Jarry <[email protected]> Co-authored-by: romain-perez <[email protected]> Co-authored-by: rperez <rperez@debian> Co-authored-by: Sabrina Dubroca <[email protected]> Co-authored-by: Sebastian Baar <[email protected]> Co-authored-by: sebastien mainand <[email protected]> Co-authored-by: smehner1 <[email protected]> Co-authored-by: Steven Van Acker <[email protected]> Co-authored-by: Thomas Faivre <[email protected]> Co-authored-by: Tran Tien Dat <[email protected]> Co-authored-by: Wael Mahlous <[email protected]> Co-authored-by: waeva <[email protected]>
412
0
52,730
15
8
15
def flattened_having(self) -> List[Condition]: flattened: List[Condition] = [] boolean_conditions: List[BooleanCondition] = [] for condition in self.having: if isinstance(condition, Condition): flattened.append(condition) elif isinstance(condition, BooleanCondition): boolean_conditions.append(condition) while len(boolean_conditions) > 0: boolean_condition = boolean_conditions.pop() for condition in boolean_condition.conditions: if isinstance(condition, Condition): flattened.append(condition) elif isinstance(condition, BooleanCondition): boolean_conditions.append(condition) return flattened
src/sentry/search/events/builder.py
184
sentry
{ "docstring": "Return self.having as a flattened list ignoring boolean operators\n This is because self.having can have a mix of BooleanConditions and Conditions. And each BooleanCondition can in\n turn be a mix of either type.\n ", "language": "en", "n_whitespaces": 54, "n_words": 33, "vocab_size": 27 }
45
Python
30
2a4da479b2d4a2faa901701f4c73ff823236e9e8
builder.py
95,431
20
116
flattened_having
https://github.com/getsentry/sentry.git
fix(snql): Add aggregations to select in auto_aggregation (#31061) - This is to fix an issue for queries that have the uniq aggregation in the HAVING clause, and is not selected. - Previously we would not add the aggregation to the select clause in these cases - Now anything in the having clause will get added to the select clause as well if auto_aggregation is enabled - if its disabled we raise an invalid search query error - This also fixes a bug where this having validation wasn't working correctly for boolean conditions
229
0
19,211
14
1
7
def scale_var(self) -> tk.StringVar: retval = self._vars["scale"] assert isinstance(retval, tk.StringVar) return retval
lib/training/preview_tk.py
49
faceswap
{ "docstring": ":class:`tkinter.StringVar`: The variable holding the currently selected \"##%\" formatted\n percentage scaling amount displayed in the Combobox. ", "language": "en", "n_whitespaces": 23, "n_words": 16, "vocab_size": 15 }
12
Python
11
7da2cc3dd266aabebf41a31384cc2e0e7e5af6e5
preview_tk.py
101,568
6
29
scale_var
https://github.com/deepfakes/faceswap.git
Training - Use custom preview pop-out
40
0
20,978
8
1
9
def test_second_upgrade_from_different_user(self) -> None: channel = self._upgrade_room() self.assertEqual(200, channel.code, channel.result) channel = self._upgrade_room(self.other_token, expire_cache=False) self.assertEqual(400, channel.code, channel.result)
tests/rest/client/test_upgrade_room.py
89
synapse
{ "docstring": "A second room upgrade from a different user is blocked.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
17
Python
13
99d3931974e65865d1102ee79d7b7e2b017a3180
test_upgrade_room.py
248,623
6
57
test_second_upgrade_from_different_user
https://github.com/matrix-org/synapse.git
Add more tests for room upgrades (#13074) Signed-off-by: Sean Quah <[email protected]>
52
0
72,381
9
5
18
def generate_tex_file(expression, environment=None, tex_template=None): if tex_template is None: tex_template = config["tex_template"] if environment is not None: output = tex_template.get_texcode_for_expression_in_env(expression, environment) else: output = tex_template.get_texcode_for_expression(expression) tex_dir = config.get_dir("tex_dir") if not tex_dir.exists(): tex_dir.mkdir() result = tex_dir / (tex_hash(output) + ".tex") if not result.exists(): logger.info( "Writing %(expression)s to %(path)s", {"expression": expression, "path": f"{result}"}, ) result.write_text(output, encoding="utf-8") return result
manim/utils/tex_file_writing.py
213
manim
{ "docstring": "Takes a tex expression (and an optional tex environment),\n and returns a fully formed tex file ready for compilation.\n\n Parameters\n ----------\n expression : :class:`str`\n String containing the TeX expression to be rendered, e.g. ``\\\\sqrt{2}`` or ``foo``\n environment : Optional[:class:`str`], optional\n The string containing the environment in which the expression should be typeset, e.g. ``align*``\n tex_template : Optional[:class:`~.TexTemplate`], optional\n Template class used to typesetting. If not set, use default template set via `config[\"tex_template\"]`\n\n Returns\n -------\n :class:`Path`\n Path to generated TeX file\n ", "language": "en", "n_whitespaces": 138, "n_words": 80, "vocab_size": 60 }
55
Python
40
9d1f066d637cb15baea10e6907ab85efff8fb36f
tex_file_writing.py
190,078
18
122
generate_tex_file
https://github.com/ManimCommunity/manim.git
Migrate more `os.path` to `pathlib` (#2980) * Migrate more `os.path` to `pathlib` * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix type errors with recent pathlib code * pathlib fixes * more pathlib fixes * remove unused imports introduced by pathlib migration * convert `open()` calls to pathlib * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Migrate tex_file_writing to pathlib * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * converted more old code to pathlib, and fixed a bug in module_ops * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix test failures * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix test failures * Apply suggestions from code review Co-authored-by: Benjamin Hackl <[email protected]> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Benjamin Hackl <[email protected]>
153
0
46,314
13
1
8
def find_legacy_row() -> StatementLambdaElement: return lambda_stmt(lambda: select(func.max(States.event_id)))
homeassistant/components/recorder/queries.py
42
core
{ "docstring": "Check if there are still states in the table with an event_id.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
7
Python
7
a70e2a33dcd85608f1145d8fc2e89a87620f4ef3
queries.py
300,743
3
24
find_legacy_row
https://github.com/home-assistant/core.git
Fixing purging legacy rows and improve performance (#71916)
13
0
99,602
13
2
7
def completed_count(self): return sum(int(result.successful()) for result in self.results)
celery/result.py
41
celery
{ "docstring": "Task completion count.\n\n Note that `complete` means `successful` in this context. In other words, the\n return value of this method is the number of ``successful`` tasks.\n\n Returns:\n int: the number of complete (i.e. successful) tasks.\n ", "language": "en", "n_whitespaces": 74, "n_words": 35, "vocab_size": 28 }
8
Python
8
720d1928c4b583f36ca0cce7607b616466f2ffbb
result.py
208,244
2
24
completed_count
https://github.com/celery/celery.git
Add clarifying information to completed_count documentation (#7873) * Add clarifying information to completed_count docstring * Update canvas documentation
22
0
52,260
11
1
15
def _create_mock_app_session(*args, **kwargs): mock_id = mock.PropertyMock( return_value="mock_id:%s" % ServerTestCase._next_session_id ) ServerTestCase._next_session_id += 1 mock_session = mock.MagicMock(AppSession, autospec=True, *args, **kwargs) type(mock_session).id = mock_id return mock_session
lib/tests/server_test_case.py
93
streamlit
{ "docstring": "Create a mock AppSession. Each mocked instance will have\n its own unique ID.", "language": "en", "n_whitespaces": 19, "n_words": 13, "vocab_size": 13 }
24
Python
19
704eab3478cf69847825b23dabf15813a8ac9fa2
server_test_case.py
118,583
8
57
_create_mock_app_session
https://github.com/streamlit/streamlit.git
Rename and refactor `Report` machinery (#4141) This refactor renames (almost) everything related to the outdated "report" concept with more precise concepts that we use throughout our code, primarily "script run", "session", and "app".
84
0
26,306
11
12
30
def upgrade(refresh=True, dist_upgrade=False, **kwargs): cache_valid_time = kwargs.pop("cache_valid_time", 0) if salt.utils.data.is_true(refresh): refresh_db(cache_valid_time) old = list_pkgs() if "force_conf_new" in kwargs and kwargs["force_conf_new"]: dpkg_options = ["--force-confnew"] else: dpkg_options = ["--force-confold", "--force-confdef"] cmd = [ "apt-get", "-q", "-y", ] for option in dpkg_options: cmd.append("-o") cmd.append("DPkg::Options::={}".format(option)) if kwargs.get("force_yes", False): cmd.append("--force-yes") if kwargs.get("skip_verify", False): cmd.append("--allow-unauthenticated") if kwargs.get("download_only", False) or kwargs.get("downloadonly", False): cmd.append("--download-only") if kwargs.get("allow_downgrades", False): cmd.append("--allow-downgrades") cmd.append("dist-upgrade" if dist_upgrade else "upgrade") result = _call_apt(cmd, env=DPKG_ENV_VARS.copy()) __context__.pop("pkg.list_pkgs", None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if result["retcode"] != 0: raise CommandExecutionError( "Problem encountered upgrading packages", info={"changes": ret, "result": result}, ) return ret
salt/modules/aptpkg.py
446
salt
{ "docstring": "\n .. versionchanged:: 2015.8.12,2016.3.3,2016.11.0\n On minions running systemd>=205, `systemd-run(1)`_ is now used to\n isolate commands which modify installed packages from the\n ``salt-minion`` daemon's control group. This is done to keep systemd\n from killing any apt-get/dpkg commands spawned by Salt when the\n ``salt-minion`` service is restarted. (see ``KillMode`` in the\n `systemd.kill(5)`_ manpage for more information). If desired, usage of\n `systemd-run(1)`_ can be suppressed by setting a :mod:`config option\n <salt.modules.config.get>` called ``systemd.scope``, with a value of\n ``False`` (no quotes).\n\n .. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html\n .. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html\n\n Upgrades all packages via ``apt-get upgrade`` or ``apt-get dist-upgrade``\n if ``dist_upgrade`` is ``True``.\n\n Returns a dictionary containing the changes:\n\n .. code-block:: python\n\n {'<package>': {'old': '<old-version>',\n 'new': '<new-version>'}}\n\n dist_upgrade\n Whether to perform the upgrade using dist-upgrade vs upgrade. Default\n is to use upgrade.\n\n .. versionadded:: 2014.7.0\n\n refresh : True\n If ``True``, the apt cache will be refreshed first. By default,\n this is ``True`` and a refresh is performed.\n\n cache_valid_time\n\n .. versionadded:: 2016.11.0\n\n Skip refreshing the package database if refresh has already occurred within\n <value> seconds\n\n download_only (or downloadonly)\n Only download the packages, don't unpack or install them. Use\n downloadonly to be in line with yum and zypper module.\n\n .. versionadded:: 2018.3.0\n\n force_conf_new\n Always install the new version of any configuration files.\n\n .. versionadded:: 2015.8.0\n\n allow_downgrades\n Allow apt to downgrade packages without a prompt.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' pkg.upgrade\n ", "language": "en", "n_whitespaces": 472, "n_words": 222, "vocab_size": 163 }
95
Python
74
9e1ca8b5b9e7006fea28f473711917755cf5a262
aptpkg.py
216,507
36
254
upgrade
https://github.com/saltstack/salt.git
Add --allow-downgrades capability for apt upgrade
275
0
54,618
13
1
9
def elu(x, alpha=1.0): return backend.elu(x, alpha) @keras_export("keras.activations.selu") @tf.__internal__.dispatch.add_dispatch_support
keras/activations.py
54
@keras_export("keras.activations.selu") @tf.__internal__.dispatch.add_dispatch_support
keras
{ "docstring": "Exponential Linear Unit.\n\n The exponential linear unit (ELU) with `alpha > 0` is:\n `x` if `x > 0` and\n `alpha * (exp(x) - 1)` if `x < 0`\n The ELU hyperparameter `alpha` controls the value to which an\n ELU saturates for negative net inputs. ELUs diminish the\n vanishing gradient effect.\n\n ELUs have negative values which pushes the mean of the activations\n closer to zero.\n Mean activations that are closer to zero enable faster learning as they\n bring the gradient closer to the natural gradient.\n ELUs saturate to a negative value when the argument gets smaller.\n Saturation means a small derivative which decreases the variation\n and the information that is propagated to the next layer.\n\n Example Usage:\n\n >>> import tensorflow as tf\n >>> model = tf.keras.Sequential()\n >>> model.add(tf.keras.layers.Conv2D(32, (3, 3), activation='elu',\n ... input_shape=(28, 28, 1)))\n >>> model.add(tf.keras.layers.MaxPooling2D((2, 2)))\n >>> model.add(tf.keras.layers.Conv2D(64, (3, 3), activation='elu'))\n >>> model.add(tf.keras.layers.MaxPooling2D((2, 2)))\n >>> model.add(tf.keras.layers.Conv2D(64, (3, 3), activation='elu'))\n\n <tensorflow.python.keras.engine.sequential.Sequential object ...>\n\n Args:\n x: Input tensor.\n alpha: A scalar, slope of negative section. `alpha` controls the value to\n which an ELU saturates for negative net inputs.\n\n Returns:\n The exponential linear unit (ELU) activation function: `x` if `x > 0` and\n `alpha * (exp(x) - 1)` if `x < 0`.\n\n\n Reference:\n [Fast and Accurate Deep Network Learning by Exponential Linear Units\n (ELUs) (Clevert et al, 2016)](https://arxiv.org/abs/1511.07289)\n ", "language": "en", "n_whitespaces": 357, "n_words": 216, "vocab_size": 128 }
8
Python
8
84afc5193d38057e2e2badf9c889ea87d80d8fbf
activations.py
269,313
2
21
elu
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
12
1
80,027
7
4
24
async def async_update(self) -> None: try: self._weather = await self._weather_api.async_get_weather(self._station) except (asyncio.TimeoutError, aiohttp.ClientError, ValueError) as error: _LOGGER.error("Could not fetch weather data: %s", error) return self._attr_native_value = getattr( self._weather, self.entity_description.api_key ) if ( self._attr_native_value is None and self.entity_description.key in NONE_IS_ZERO_SENSORS ): self._attr_native_value = 0 self._attr_extra_state_attributes = { ATTR_ACTIVE: self._weather.active, ATTR_MEASURE_TIME: self._weather.measure_time, }
homeassistant/components/trafikverket_weatherstation/sensor.py
173
core
{ "docstring": "Get the latest data from Trafikverket and updates the states.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 9 }
51
Python
46
395093351428d349246e4c1de526b159a167f382
sensor.py
308,863
19
109
async_update
https://github.com/home-assistant/core.git
Code improvements to trafikverket_weatherstation (#62854) * Code cleanup * Fix extra state attributes * Fix review comments * Fix precipitation_amount if None * Fix sensors returning None * Use const for sensors reporting None
213
0
107,592
12
1
7
def required_resources(self) -> Dict[str, float]: return _sum_bundles(self._bundles)
python/ray/tune/execution/placement_groups.py
33
ray
{ "docstring": "Returns a dict containing the sums of all resources", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
7
Python
7
96cceb08e8bf73df990437002e25883c5a72d30c
placement_groups.py
127,516
3
20
required_resources
https://github.com/ray-project/ray.git
[tune] Raise error in PGF if head and worker bundles are empty (#28445) Scheduling empty placement groups is not supported by Ray core (see e.g. #28443), so we shouldn't allow them to be created in the first place. If we need fully empty resource requests, we can include this in the upcoming execution/resource refactor. Signed-off-by: Kai Fricke <[email protected]>
21
0
28,458
8
1
5
def chvatal_graph(create_using=None): description = [ "adjacencylist", "Chvatal Graph", 12, [ [2, 5, 7, 10], [3, 6, 8], [4, 7, 9], [5, 8, 10], [6, 9], [11, 12], [11, 12], [9, 12], [11], [11, 12], [], [], ], ] G = make_small_undirected_graph(description, create_using) return G
networkx/generators/small.py
138
networkx
{ "docstring": "\n Returns the Chvátal Graph\n\n The Chvátal Graph is an undirected graph with 12 nodes and 24 edges [1]_.\n It has 370 distinct (directed) Hamiltonian cycles, giving a unique generalized\n LCF notation of order 4, two of order 6 , and 43 of order 1 [2]_.\n\n Parameters\n ----------\n create_using : NetworkX graph constructor, optional (default=nx.Graph)\n Graph type to create. If graph instance, then cleared before populated.\n\n Returns\n -------\n G : networkx Graph\n The Chvátal graph with 12 nodes and 24 edges\n\n References\n ----------\n .. [1] https://en.wikipedia.org/wiki/Chv%C3%A1tal_graph\n .. [2] https://mathworld.wolfram.com/ChvatalGraph.html\n\n ", "language": "en", "n_whitespaces": 147, "n_words": 88, "vocab_size": 64 }
44
Python
32
dec723f072eb997a497a159dbe8674cd39999ee9
small.py
176,152
22
105
chvatal_graph
https://github.com/networkx/networkx.git
Docstrings for the small.py module (#5240) * added description for the first 5 small graphs * modified descriptions based on comment and added description for two more functions * added doctrings to all the functions * Minor touchups. Co-authored-by: Ross Barnowski <[email protected]>
226
0
41,722
9
1
14
async def test_igration_and_updating_configuration(hass, hass_storage): core_data = { "data": { "elevation": 10, "latitude": 55, "location_name": "Home", "longitude": 13, "time_zone": "Europe/Copenhagen", "unit_system": "imperial", "external_url": "https://www.example.com", "internal_url": "http://example.local", "currency": "BTC", }, "key": "core.config", "version": 1, "minor_version": 1, } hass_storage["core.config"] = dict(core_data) await config_util.async_process_ha_core_config( hass, {"allowlist_external_dirs": "/etc"} ) await hass.config.async_update(latitude=50, currency="USD") expected_new_core_data = copy.deepcopy(core_data) # From async_update above expected_new_core_data["data"]["latitude"] = 50 expected_new_core_data["data"]["currency"] = "USD" # 1.1 -> 1.2 store migration with migrated unit system expected_new_core_data["data"]["unit_system_v2"] = "us_customary" expected_new_core_data["minor_version"] = 2 assert hass_storage["core.config"] == expected_new_core_data assert hass.config.latitude == 50 assert hass.config.currency == "USD"
tests/test_config.py
314
core
{ "docstring": "Test updating configuration stores the new configuration.", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
88
Python
70
67d1dde69fbacf33f2c39ea14d89f2afa425ed18
test_config.py
289,512
30
166
test_igration_and_updating_configuration
https://github.com/home-assistant/core.git
Rename IMPERIAL_SYSTEM to US_CUSTOMARY_SYSTEM (#80253) * Rename IMPERIAL_SYSTEM * Deprecate is_metric property and adjust tests * Adjust unit_system config validation * Add yaml tests * Add tests for private name * Fix incorrect rebase * Adjust docstring * Add store migration * Update unit_system.py * Minimise test tweaks * Fix tests * Add conversion to migration * Rename new key and adjust tests * Adjust websocket_detect_config * Move original_unit_system tracking to subclass
280
0
88,654
11
1
8
def mock_cpuinfo_config_flow() -> Generator[MagicMock, None, None]: with patch( "homeassistant.components.cpuspeed.config_flow.cpuinfo.get_cpu_info", return_value=True, ) as cpuinfo_mock: yield cpuinfo_mock @pytest.fixture
tests/components/cpuspeed/conftest.py
58
@pytest.fixture
core
{ "docstring": "Return a mocked get_cpu_info.\n\n It is only used to check truthy or falsy values, so it is mocked\n to return True.\n ", "language": "en", "n_whitespaces": 30, "n_words": 21, "vocab_size": 18 }
16
Python
16
63d519c1a896c6eb20f7ffb032cb7712bbac6b5c
conftest.py
291,535
11
29
mock_cpuinfo_config_flow
https://github.com/home-assistant/core.git
Spelling updates (#82867)
45
1
90,642
11
15
10
def _propagate_index_objs(self, axis=None): self._filter_empties() if axis is None or axis == 0: cum_row_lengths = np.cumsum([0] + self._row_lengths) if axis is None or axis == 1: cum_col_widths = np.cumsum([0] + self._column_widths) if axis is None:
modin/core/dataframe/pandas/dataframe/dataframe.py
107
modin
{ "docstring": "\n Synchronize labels by applying the index object for specific `axis` to the `self._partitions` lazily.\n\n Adds `set_axis` function to call-queue of each partition from `self._partitions`\n to apply new axis.\n\n Parameters\n ----------\n axis : int, default: None\n The axis to apply to. If it's None applies to both axes.\n ", "language": "en", "n_whitespaces": 108, "n_words": 47, "vocab_size": 38 }
34
Python
20
3c740dbfcdd69ddc3ab45a42be996e5c61104342
dataframe.py
152,956
64
373
_propagate_index_objs
https://github.com/modin-project/modin.git
FEAT-#3111: Ensure relabeling Modin Frame does not lose partition shape (#3662) Co-authored-by: Devin Petersohn <[email protected]> Signed-off-by: Naren Krishna <[email protected]>
91
0
35,202
12
5
28
def _get_counts(values, uniques): if values.dtype.kind in "OU": counter = _NaNCounter(values) output = np.zeros(len(uniques), dtype=np.int64) for i, item in enumerate(uniques): with suppress(KeyError): output[i] = counter[item] return output unique_values, counts = _unique_np(values, return_counts=True) # Recorder unique_values based on input: `uniques` uniques_in_values = np.isin(uniques, unique_values, assume_unique=True) if np.isnan(unique_values[-1]) and np.isnan(uniques[-1]): uniques_in_values[-1] = True unique_valid_indices = np.searchsorted(unique_values, uniques[uniques_in_values]) output = np.zeros_like(uniques, dtype=np.int64) output[uniques_in_values] = counts[unique_valid_indices] return output
sklearn/utils/_encode.py
252
scikit-learn
{ "docstring": "Get the count of each of the `uniques` in `values`.\n\n The counts will use the order passed in by `uniques`. For non-object dtypes,\n `uniques` is assumed to be sorted and `np.nan` is at the end.\n ", "language": "en", "n_whitespaces": 44, "n_words": 35, "vocab_size": 28 }
63
Python
47
7f0006c8aad1a09621ad19c3db19c3ff0555a183
_encode.py
259,242
16
161
_get_counts
https://github.com/scikit-learn/scikit-learn.git
ENH Adds infrequent categories to OneHotEncoder (#16018) * ENH Completely adds infrequent categories * STY Linting * STY Linting * DOC Improves wording * DOC Lint * BUG Fixes * CLN Address comments * CLN Address comments * DOC Uses math to description float min_frequency * DOC Adds comment regarding drop * BUG Fixes method name * DOC Clearer docstring * TST Adds more tests * FIX Fixes mege * CLN More pythonic * CLN Address comments * STY Flake8 * CLN Address comments * DOC Fix * MRG * WIP * ENH Address comments * STY Fix * ENH Use functiion call instead of property * ENH Adds counts feature * CLN Rename variables * DOC More details * CLN Remove unneeded line * CLN Less lines is less complicated * CLN Less diffs * CLN Improves readiabilty * BUG Fix * CLN Address comments * TST Fix * CLN Address comments * CLN Address comments * CLN Move docstring to userguide * DOC Better wrapping * TST Adds test to handle_unknown='error' * ENH Spelling error in docstring * BUG Fixes counter with nan values * BUG Removes unneeded test * BUG Fixes issue * ENH Sync with main * DOC Correct settings * DOC Adds docstring * DOC Immprove user guide * DOC Move to 1.0 * DOC Update docs * TST Remove test * DOC Update docstring * STY Linting * DOC Address comments * ENH Neater code * DOC Update explaination for auto * Update sklearn/preprocessing/_encoders.py Co-authored-by: Roman Yurchak <[email protected]> * TST Uses docstring instead of comments * TST Remove call to fit * TST Spelling error * ENH Adds support for drop + infrequent categories * ENH Adds infrequent_if_exist option * DOC Address comments for user guide * DOC Address comments for whats_new * DOC Update docstring based on comments * CLN Update test with suggestions * ENH Adds computed property infrequent_categories_ * DOC Adds where the infrequent column is located * TST Adds more test for infrequent_categories_ * DOC Adds docstring for _compute_drop_idx * CLN Moves _convert_to_infrequent_idx into its own method * TST Increases test coverage * TST Adds failing test * CLN Careful consideration of dropped and inverse_transform * STY Linting * DOC Adds docstrinb about dropping infrequent * DOC Uses only * DOC Numpydoc * TST Includes test for get_feature_names_out * DOC Move whats new * DOC Address docstring comments * DOC Docstring changes * TST Better comments * TST Adds check for handle_unknown='ignore' for infrequent * CLN Make _infrequent_indices private * CLN Change min_frequency default to None * DOC Adds comments * ENH adds support for max_categories=1 * ENH Describe lexicon ordering for ties * DOC Better docstring * STY Fix * CLN Error when explicity dropping an infrequent category * STY Grammar Co-authored-by: Joel Nothman <[email protected]> Co-authored-by: Roman Yurchak <[email protected]> Co-authored-by: Guillaume Lemaitre <[email protected]>
154
0
75,673
13
1
5
def log_error(self, format, *args): self.log_message(format, *args)
python3.10.4/Lib/http/server.py
33
XX-Net
{ "docstring": "Log an error.\n\n This is called when a request cannot be fulfilled. By\n default it passes the message on to log_message().\n\n Arguments are the same as for log_message().\n\n XXX This should go to the separate error log.\n\n ", "language": "en", "n_whitespaces": 73, "n_words": 37, "vocab_size": 32 }
6
Python
6
8198943edd73a363c266633e1aa5b2a9e9c9f526
server.py
217,889
2
20
log_error
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
20
0
54,992
8
1
12
def _video_tmp_file(self) -> str: path, filename = os.path.split(self._output_filename) retval = os.path.join(path, f"__tmp_{filename}") logger.debug(retval) return retval
plugins/convert/writer/ffmpeg.py
75
faceswap
{ "docstring": " str: Full path to the temporary video file that is generated prior to muxing final\n audio. ", "language": "en", "n_whitespaces": 24, "n_words": 16, "vocab_size": 15 }
15
Python
13
60291d49c4da1cd260fbc0b04aa6a312eedfefbb
ffmpeg.py
100,621
7
43
_video_tmp_file
https://github.com/deepfakes/faceswap.git
ffmpeg writer: Create new filename if output pre-exists
50
0
20,083
10
2
10
def bar_chart(self, data=None, width=0, height=0, use_container_width=True): if _use_arrow(): return self.dg._arrow_bar_chart(data, width, height, use_container_width) else: return self.dg._legacy_bar_chart(data, width, height, use_container_width)
lib/streamlit/elements/dataframe_selector.py
86
streamlit
{ "docstring": "Display a bar chart.\n\n This is just syntax-sugar around st.altair_chart. The main difference\n is this command uses the data's own column and indices to figure out\n the chart's spec. As a result this is easier to use for many \"just plot\n this\" scenarios, while being less customizable.\n\n If st.bar_chart does not guess the data specification\n correctly, try specifying your desired chart using st.altair_chart.\n\n Parameters\n ----------\n data : pandas.DataFrame, pandas.Styler, pyarrow.Table, numpy.ndarray, Iterable, or dict\n Data to be plotted.\n Pyarrow tables are not supported by Streamlit's legacy DataFrame serialization\n (i.e. with `config.dataFrameSerialization = \"legacy\"`).\n To use pyarrow tables, please enable pyarrow by changing the config setting,\n `config.dataFrameSerialization = \"arrow\"`.\n\n width : int\n The chart width in pixels. If 0, selects the width automatically.\n\n height : int\n The chart height in pixels. If 0, selects the height automatically.\n\n use_container_width : bool\n If True, set the chart width to the column width. This takes\n precedence over the width argument.\n\n Example\n -------\n >>> chart_data = pd.DataFrame(\n ... np.random.randn(50, 3),\n ... columns=[\"a\", \"b\", \"c\"])\n ...\n >>> st.bar_chart(chart_data)\n\n .. output::\n https://share.streamlit.io/streamlit/docs/main/python/api-examples-source/charts.bar_chart.py\n height: 400px\n\n ", "language": "en", "n_whitespaces": 451, "n_words": 177, "vocab_size": 125 }
19
Python
15
72703b38029f9358a0ec7ca5ed875a6b438ece19
dataframe_selector.py
118,728
5
59
bar_chart
https://github.com/streamlit/streamlit.git
Replace static apps with live Cloud apps (#4317) Co-authored-by: kajarenc <[email protected]>
62
0
26,385
11
1
11
def write_filepath(filepath, strategy): dirpath = os.path.dirname(filepath) base = os.path.basename(filepath) return os.path.join(write_dirpath(dirpath, strategy), base)
keras/distribute/distributed_file_utils.py
70
keras
{ "docstring": "Returns the writing file path to be used to save file distributedly.\n\n Directory to contain `filepath` would be created if it doesn't exist.\n\n Args:\n filepath: Original filepath that would be used without distribution.\n strategy: The tf.distribute strategy object currently used.\n\n Returns:\n The writing filepath that should be used to save file with distribution.\n ", "language": "en", "n_whitespaces": 80, "n_words": 53, "vocab_size": 36 }
13
Python
12
84afc5193d38057e2e2badf9c889ea87d80d8fbf
distributed_file_utils.py
270,311
4
44
write_filepath
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
25
0
80,425
9
1
11
def draw_tex(self, gc, x, y, s, prop, angle, *, mtext=None): self._draw_text_as_path(gc, x, y, s, prop, angle, ismath="TeX")
lib/matplotlib/backend_bases.py
60
matplotlib
{ "docstring": "\n Draw a TeX instance.\n\n Parameters\n ----------\n gc : `.GraphicsContextBase`\n The graphics context.\n x : float\n The x location of the text in display coords.\n y : float\n The y location of the text baseline in display coords.\n s : str\n The TeX text string.\n prop : `matplotlib.font_manager.FontProperties`\n The font properties.\n angle : float\n The rotation angle in degrees anti-clockwise.\n mtext : `matplotlib.text.Text`\n The original text object to be rendered.\n ", "language": "en", "n_whitespaces": 224, "n_words": 69, "vocab_size": 41 }
17
Python
12
3b52d2b64f58c1eb912bd343e7c197a1ed0b92b5
backend_bases.py
109,806
2
44
draw_tex
https://github.com/matplotlib/matplotlib.git
Remove redundant method, fix signature and add doc-string to draw_tex
31
0
23,757
9
2
31
def tax_account_query(doctype, txt, searchfield, start, page_len, filters): company_currency = erpnext.get_company_currency(filters.get("company")) def get_accounts(with_account_type_filter): account_type_condition = "" if with_account_type_filter: account_type_condition = "AND account_type in %(account_types)s" accounts = frappe.db.sql( .format( account_type_condition=account_type_condition, searchfield=searchfield, mcond=get_match_cond(doctype), ), dict( account_types=filters.get("account_type"), company=filters.get("company"), disabled=filters.get("disabled", 0), currency=company_currency, txt="%{}%".format(txt), offset=start, limit=page_len, ), ) return accounts tax_accounts = get_accounts(True) if not tax_accounts: tax_accounts = get_accounts(False) return tax_accounts @frappe.whitelist() @frappe.validate_and_sanitize_search_inputs
erpnext/controllers/queries.py
249
@frappe.whitelist() @frappe.validate_and_sanitize_search_inputs
erpnext
{ "docstring": "\n\t\t\tSELECT name, parent_account\n\t\t\tFROM `tabAccount`\n\t\t\tWHERE `tabAccount`.docstatus!=2\n\t\t\t\t{account_type_condition}\n\t\t\t\tAND is_group = 0\n\t\t\t\tAND company = %(company)s\n\t\t\t\tAND disabled = %(disabled)s\n\t\t\t\tAND (account_currency = %(currency)s or ifnull(account_currency, '') = '')\n\t\t\t\tAND `{searchfield}` LIKE %(txt)s\n\t\t\t\t{mcond}\n\t\t\tORDER BY idx DESC, name\n\t\t\tLIMIT %(offset)s, %(limit)s\n\t\t", "language": "en", "n_whitespaces": 30, "n_words": 42, "vocab_size": 33 }
57
Python
44
a1e3ae8869194a487acccc706a381db74c4aa1ff
queries.py
68,540
7
48
tax_account_query
https://github.com/frappe/erpnext.git
fix: user can select disabled accounts in taxes table
28
1
14,815
16
6
8
def filter_packages_untagged(self, package_data): matches = {} for package in package_data: if "metadata" in package and "container" in package["metadata"]: container_metadata = package["metadata"]["container"] if "tags" in container_metadata: container_tags = container_metadata["tags"] if not len(container_tags): matches[package["name"]] = package return matches
.github/scripts/cleanup-tags.py
121
paperless-ngx
{ "docstring": "\n Filters the given package data to those which have no tags at all\n ", "language": "en", "n_whitespaces": 28, "n_words": 13, "vocab_size": 13 }
36
Python
25
0fdd3d56f43c8442a0c9ecd3cad07a88137ff7de
cleanup-tags.py
319,884
10
67
filter_packages_untagged
https://github.com/paperless-ngx/paperless-ngx.git
Changes the cleanup images workflow so it uses a OAuth token with the correct scope (GITHUB_TOKEN is not enough). Also prevents running if the token is not defined and generally does commenting/cleanups"
166
0
117,011
17
9
30
def employee_query(doctype, txt, searchfield, start, page_len, filters): filters = frappe._dict(filters) conditions = [] include_employees = [] emp_cond = "" if not filters.payroll_frequency: frappe.throw(_("Select Payroll Frequency.")) if filters.start_date and filters.end_date: employee_list = get_employee_list(filters) emp = filters.get("employees") or [] include_employees = [ employee.employee for employee in employee_list if employee.employee not in emp ] filters.pop("start_date") filters.pop("end_date") filters.pop("salary_slip_based_on_timesheet") filters.pop("payroll_frequency") filters.pop("payroll_payable_account") filters.pop("currency") if filters.employees is not None: filters.pop("employees") if include_employees: emp_cond += "and employee in %(include_employees)s" return frappe.db.sql( .format( **{ "key": searchfield, "fcond": get_filters_cond(doctype, filters, conditions), "mcond": get_match_cond(doctype), "emp_cond": emp_cond, } ), { "txt": "%%%s%%" % txt, "_txt": txt.replace("%", ""), "start": start, "page_len": page_len, "include_employees": include_employees, }, )
erpnext/payroll/doctype/payroll_entry/payroll_entry.py
387
erpnext
{ "docstring": "select name, employee_name from `tabEmployee`\n\t\twhere status = 'Active'\n\t\t\tand docstatus < 2\n\t\t\tand ({key} like %(txt)s\n\t\t\t\tor employee_name like %(txt)s)\n\t\t\t{emp_cond}\n\t\t\t{fcond} {mcond}\n\t\torder by\n\t\t\tif(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999),\n\t\t\tif(locate(%(_txt)s, employee_name), locate(%(_txt)s, employee_name), 99999),\n\t\t\tidx desc,\n\t\t\tname, employee_name\n\t\tlimit %(start)s, %(page_len)s", "language": "en", "n_whitespaces": 30, "n_words": 43, "vocab_size": 33 }
103
Python
77
494bd9ef78313436f0424b918f200dab8fc7c20b
payroll_entry.py
66,903
52
224
employee_query
https://github.com/frappe/erpnext.git
style: format code with black
63
0
14,374
14
10
22
def _process_contour_level_args(self, args): if self.levels is None: if len(args) == 0: levels_arg = 7 # Default, hard-wired. else: levels_arg = args[0] else: levels_arg = self.levels if isinstance(levels_arg, Integral): self.levels = self._autolev(levels_arg) else: self.levels = np.asarray(levels_arg, np.float64) if not self.filled: inside = (self.levels > self.zmin) & (self.levels < self.zmax) levels_in = self.levels[inside] if len(levels_in) == 0: self.levels = [self.zmin] _api.warn_external( "No contour levels were found within the data range.") if self.filled and len(self.levels) < 2: raise ValueError("Filled contours require at least 2 levels.") if len(self.levels) > 1 and np.min(np.diff(self.levels)) <= 0.0: raise ValueError("Contour levels must be increasing")
lib/matplotlib/contour.py
299
matplotlib
{ "docstring": "\n Determine the contour levels and store in self.levels.\n ", "language": "en", "n_whitespaces": 23, "n_words": 8, "vocab_size": 8 }
96
Python
66
1068a6faa19767724437461bcfb88c6852ec435c
contour.py
110,019
23
185
_process_contour_level_args
https://github.com/matplotlib/matplotlib.git
Remove unnecessary np.{,as}array / astype calls. Quite often numpy will call asarray for us, saving us the need to call asarray explicitly. When we do call asarray (or array) ourselves, a dtype can directly be passed in, rather than immediately calling astype immediately after. Passing the dtype makes it unnecessary for asarray to infer the dtype of the passed-in container, and can also save an extra array allocation if asarray first has to allocate an array of a type and astype immediately has to allocate an array of another type.
342
0
23,889
12
3
13
def _get_classifier_global_metrics(is_binomial, y, y_pred, y_probs, labels): metrics = {} metrics["accuracy"] = sk_metrics.accuracy_score(y, y_pred) metrics["example_count"] = len(y) if not is_binomial: metrics["f1_score_micro"] = sk_metrics.f1_score(y, y_pred, average="micro", labels=labels) metrics["f1_score_macro"] = sk_metrics.f1_score(y, y_pred, average="macro", labels=labels) if y_probs is not None: metrics["log_loss"] = sk_metrics.log_loss(y, y_probs, labels=labels) return metrics
mlflow/models/evaluation/default_evaluator.py
176
mlflow
{ "docstring": "\n get classifier metrics which computing over all classes examples.\n ", "language": "en", "n_whitespaces": 16, "n_words": 9, "vocab_size": 9 }
43
Python
29
964f5ab75098c55f028f8acfeeae05df35ea68d5
default_evaluator.py
19,048
10
111
_get_classifier_global_metrics
https://github.com/mlflow/mlflow.git
Evaluation Default evaluator (#5092) * init Signed-off-by: Weichen Xu <[email protected]> * update Signed-off-by: Weichen Xu <[email protected]> * update Signed-off-by: Weichen Xu <[email protected]> * update Signed-off-by: Weichen Xu <[email protected]> * update Signed-off-by: Weichen Xu <[email protected]> * update Signed-off-by: Weichen Xu <[email protected]> * update Signed-off-by: Weichen Xu <[email protected]> * update Signed-off-by: Weichen Xu <[email protected]> * update Signed-off-by: Weichen Xu <[email protected]> * update Signed-off-by: Weichen Xu <[email protected]> * update Signed-off-by: Weichen Xu <[email protected]> * update Signed-off-by: Weichen Xu <[email protected]> * update Signed-off-by: Weichen Xu <[email protected]> * update Signed-off-by: Weichen Xu <[email protected]> * update Signed-off-by: Weichen Xu <[email protected]> * rename module Signed-off-by: Weichen Xu <[email protected]> * address comments Signed-off-by: Weichen Xu <[email protected]> * address comments Signed-off-by: Weichen Xu <[email protected]> * revert black change Signed-off-by: Weichen Xu <[email protected]> * change module path Signed-off-by: Weichen Xu <[email protected]> * update Signed-off-by: Weichen Xu <[email protected]> * update Signed-off-by: Weichen Xu <[email protected]> * update Signed-off-by: Weichen Xu <[email protected]> * update Signed-off-by: Weichen Xu <[email protected]> * address comments Signed-off-by: Weichen Xu <[email protected]> * fix Signed-off-by: Weichen Xu <[email protected]> * refactor Signed-off-by: Weichen Xu <[email protected]> * lazy load pyspark Signed-off-by: Weichen Xu <[email protected]> * revert export Signed-off-by: Weichen Xu <[email protected]> * fix curcit import Signed-off-by: Weichen Xu <[email protected]> * update tests Signed-off-by: Weichen Xu <[email protected]> * fix conftest.py Signed-off-by: Weichen Xu <[email protected]> * Revert "fix conftest.py" This reverts commit 2ea29c62bfffc5461bf77f3da15b5c00f51de19b. * fix tests Signed-off-by: Weichen Xu <[email protected]> * update doc Signed-off-by: Weichen Xu <[email protected]> * update Signed-off-by: Weichen Xu <[email protected]> * default evaluator Signed-off-by: Weichen Xu <[email protected]> * update Signed-off-by: Weichen Xu <[email protected]> * fix Signed-off-by: Weichen Xu <[email protected]> * fix Signed-off-by: Weichen Xu <[email protected]> * address comments Signed-off-by: Weichen Xu <[email protected]> * fix doc Signed-off-by: Weichen Xu <[email protected]> * fix doc Signed-off-by: Weichen Xu <[email protected]> * update import Signed-off-by: Weichen Xu <[email protected]> * fix doc Signed-off-by: Weichen Xu <[email protected]> * update hash algo Signed-off-by: Weichen Xu <[email protected]> * update import Signed-off-by: Weichen Xu <[email protected]> * address comment Signed-off-by: Weichen Xu <[email protected]> * add tests Signed-off-by: Weichen Xu <[email protected]> * fix lint Signed-off-by: Weichen Xu <[email protected]> * add tests Signed-off-by: Weichen Xu <[email protected]> * add more tests Signed-off-by: Weichen Xu <[email protected]> * add tests Signed-off-by: Weichen Xu <[email protected]> * fix lint Signed-off-by: Weichen Xu <[email protected]> * update shap explainer Signed-off-by: Weichen Xu <[email protected]> * address comments Signed-off-by: Weichen Xu <[email protected]> * update Signed-off-by: Weichen Xu <[email protected]> * update Signed-off-by: Weichen Xu <[email protected]> * update Signed-off-by: Weichen Xu <[email protected]> * update Signed-off-by: Weichen Xu <[email protected]> * update Signed-off-by: Weichen Xu <[email protected]> * update Signed-off-by: Weichen Xu <[email protected]> * update Signed-off-by: Weichen Xu <[email protected]> * remove scikitplot dep Signed-off-by: Weichen Xu <[email protected]> * add pr curve Signed-off-by: Weichen Xu <[email protected]> * add shap.summary_plot Signed-off-by: Weichen Xu <[email protected]> * log explainer Signed-off-by: Weichen Xu <[email protected]> * address comments Signed-off-by: Weichen Xu <[email protected]> * address comments Signed-off-by: Weichen Xu <[email protected]> * improve explainer code Signed-off-by: Weichen Xu <[email protected]> * address comments Signed-off-by: Weichen Xu <[email protected]> * update Signed-off-by: Weichen Xu <[email protected]> * update Signed-off-by: Weichen Xu <[email protected]> * address comments Signed-off-by: Weichen Xu <[email protected]> * update shap init Signed-off-by: Weichen Xu <[email protected]> * update explainer creating Signed-off-by: Weichen Xu <[email protected]> * update predict_proba Signed-off-by: Weichen Xu <[email protected]> * address comments Signed-off-by: Weichen Xu <[email protected]> * refactor Signed-off-by: Weichen Xu <[email protected]> * add multi-class metrics artifacts Signed-off-by: Weichen Xu <[email protected]> * update doc Signed-off-by: Weichen Xu <[email protected]> * add log_loss metric Signed-off-by: Weichen Xu <[email protected]> * lazy load pyspark Signed-off-by: Weichen Xu <[email protected]> * address ben comments Signed-off-by: Weichen Xu <[email protected]> * fix Signed-off-by: Weichen Xu <[email protected]> * prevent show shap logo, add tests Signed-off-by: Weichen Xu <[email protected]> * support spark model Signed-off-by: Weichen Xu <[email protected]> * add tests Signed-off-by: Weichen Xu <[email protected]> * add shap version check Signed-off-by: Weichen Xu <[email protected]> * update docs, loose classifier label limit Signed-off-by: Weichen Xu <[email protected]> * add tests Signed-off-by: Weichen Xu <[email protected]> * multiclass classifier merge metrics/plots Signed-off-by: Weichen Xu <[email protected]> * zfill feature name Signed-off-by: Weichen Xu <[email protected]> * update doc Signed-off-by: Weichen Xu <[email protected]> * add config max_num_classes_threshold_logging_roc_pr_curve_for_multiclass_classifier Signed-off-by: Weichen Xu <[email protected]> * refactor Signed-off-by: Weichen Xu <[email protected]> * update tests Signed-off-by: Weichen Xu <[email protected]> * improve label handling Signed-off-by: Weichen Xu <[email protected]> * refactor Signed-off-by: Weichen Xu <[email protected]> * add tests Signed-off-by: Weichen Xu <[email protected]> * black Signed-off-by: Weichen Xu <[email protected]> * update Signed-off-by: Weichen Xu <[email protected]> * increase plot dpi Signed-off-by: Weichen Xu <[email protected]> * fix test fixture Signed-off-by: Weichen Xu <[email protected]> * fix pylint Signed-off-by: Weichen Xu <[email protected]> * update doc Signed-off-by: Weichen Xu <[email protected]> * use matplot rc_context Signed-off-by: Weichen Xu <[email protected]> * fix shap import Signed-off-by: Weichen Xu <[email protected]> * refactor EvaluationDataset Signed-off-by: Weichen Xu <[email protected]> * limit user specify shap algos Signed-off-by: Weichen Xu <[email protected]> * clean Signed-off-by: Weichen Xu <[email protected]> * update evaluation dataset Signed-off-by: Weichen Xu <[email protected]> * use svg fig Signed-off-by: Weichen Xu <[email protected]> * revert svg Signed-off-by: Weichen Xu <[email protected]> * curve dashline, legend display ap/roc, legend move out Signed-off-by: Weichen Xu <[email protected]> * linewidth 1 Signed-off-by: Weichen Xu <[email protected]> * keyword arguments for evaluate, fix tests Signed-off-by: Weichen Xu <[email protected]> * mark abc.abstractmethod, kw args for ModelEvaluator methods Signed-off-by: Weichen Xu <[email protected]> * fix pylint Signed-off-by: Weichen Xu <[email protected]> * fix pylint Signed-off-by: Weichen Xu <[email protected]>
85
0
2,882
12
1
8
def test_show_message_twice(view, info1, info2, count): view.show_message(info1) view.show_message(info2) assert len(view._messages) == count
tests/unit/mainwindow/test_messageview.py
53
qutebrowser
{ "docstring": "Show the exact same message twice -> only one should be shown.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
11
Python
11
676e01677183825d19107d3b2fbf1bb2c0684ede
test_messageview.py
320,942
4
33
test_show_message_twice
https://github.com/qutebrowser/qutebrowser.git
Only replace the exact same message If we have a error message followed by an info message with the same text, they should both be shown, not replaced automatically.
23
0
117,451
9
1
2
def check_auth(f):
py/visdom/utils/server_utils.py
13
visdom
{ "docstring": "\n Wrapper for server access methods to ensure that the access\n is authorized.\n ", "language": "en", "n_whitespaces": 22, "n_words": 12, "vocab_size": 11 }
2
Python
2
60c90e313e106c0af62339d29eeda0e62823c648
server_utils.py
106,773
3
10
check_auth
https://github.com/fossasia/visdom.git
Refactoring server.py into more intentional files
5
0
22,434
6
1
8
def start_leader_mode(self): logger.info("Running rpc.replicate in Leader mode") logger.info("-" * 15) logger.info(f"API_KEY: {self.secret_api_key}") logger.info("-" * 15) self.register_leader_endpoint() self.submit_coroutine(self.leader_loop())
freqtrade/rpc/replicate/__init__.py
100
freqtrade
{ "docstring": "\n Register the endpoint and start the leader loop\n ", "language": "en", "n_whitespaces": 23, "n_words": 8, "vocab_size": 7 }
17
Python
14
9f6bba40af1a407f190a89f5c0c8b4e3f528ba46
__init__.py
150,410
7
50
start_leader_mode
https://github.com/freqtrade/freqtrade.git
initial concept for replicate, basic leader and follower logic
66
0
34,734
10
1
9
def test_exclude_glob_case5(): incl_dom = {} incl_glob = {} incl_ent = {"binary_sensor.working"} excl_dom = {} excl_glob = {"binary_sensor.*"} excl_ent = {"light.ignoreme", "sensor.notworking"} testfilter = generate_filter( incl_dom, incl_ent, excl_dom, excl_ent, incl_glob, excl_glob ) assert testfilter("sensor.test") assert testfilter("sensor.notworking") is False assert testfilter("light.test") assert testfilter("light.ignoreme") is False assert testfilter("binary_sensor.working") assert testfilter("binary_sensor.another") is False assert testfilter("sun.sun") is True
tests/helpers/test_entityfilter.py
169
core
{ "docstring": "Test case 5 - include and exclude specified, with excluded glob.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
53
Python
33
a8349a4866d22cddbca9ac9367d4affae39a8325
test_entityfilter.py
314,752
17
93
test_exclude_glob_case5
https://github.com/home-assistant/core.git
Adjust entity filters to make includes stronger than excludes (#74080) * Adjust entity filters to make includes stronger than excludes Fixes #59080 * adjust test for stronger entity glob includes * sync with docs
108
0
113,356
9
1
3
def _generate_client_device_id() -> str: return random_uuid_hex()
homeassistant/components/jellyfin/config_flow.py
22
core
{ "docstring": "Generate a random UUID4 string to identify ourselves.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
6
Python
6
5b0a37a44752edbbf785d6a200e3b7a3f5fa2047
config_flow.py
288,716
3
11
_generate_client_device_id
https://github.com/home-assistant/core.git
Use persistent device id for jellyfin requests (#79840)
12
0
87,869
7
2
16
def evaluate_loss(net, data_iter, loss): metric = d2l.Accumulator(2) # Sum of losses, no. of examples for X, y in data_iter: l = loss(net(X), y) metric.add(d2l.reduce_sum(l), d2l.size(l)) return metric[0] / metric[1] DATA_HUB = dict() DATA_URL = 'http://d2l-data.s3-accelerate.amazonaws.com/'
d2l/mxnet.py
116
d2l-zh
{ "docstring": "Evaluate the loss of a model on the given dataset.\n\n Defined in :numref:`sec_model_selection`", "language": "en", "n_whitespaces": 15, "n_words": 13, "vocab_size": 12 }
35
Python
31
b64b41d8c1ac23c43f7a4e3f9f6339d6f0012ab2
mxnet.py
158,210
6
64
evaluate_loss
https://github.com/d2l-ai/d2l-zh.git
[PaddlePaddle] Merge master into Paddle branch (#1186) * change 15.2 title in chinese version (#1109) change title ’15.2. 情感分析:使用递归神经网络‘ to ’15.2. 情感分析:使用循环神经网络‘ * 修改部分语义表述 (#1105) * Update r0.17.5 (#1120) * Bump versions in installation * 94行typo: (“bert.mall”)->(“bert.small”) (#1129) * line 313: "bert.mall" -> "bert.small" (#1130) * fix: update language as native reader (#1114) * Fix the translation of "stride" (#1115) * Update index.md (#1118) 修改部分语义表述 * Update self-attention-and-positional-encoding.md (#1133) 依照本书的翻译习惯,将pooling翻译成汇聚 * maybe a comment false (#1149) * maybe a little false * maybe a little false * A minor bug in the rcnn section (Chinese edition) (#1148) * Update bert.md (#1137) 一个笔误 # 假设batch_size=2,num_pred_positions=3 # 那么batch_idx应该是np.repeat( [0,1], 3 ) = [0,0,0,1,1,1] * Update calculus.md (#1135) * fix typo in git documentation (#1106) * fix: Update the Chinese translation in lr-scheduler.md (#1136) * Update lr-scheduler.md * Update chapter_optimization/lr-scheduler.md Co-authored-by: goldmermaid <[email protected]> Co-authored-by: goldmermaid <[email protected]> * fix translation for kaggle-house-price.md (#1107) * fix translation for kaggle-house-price.md * fix translation for kaggle-house-price.md Signed-off-by: sunhaizhou <[email protected]> * Update weight-decay.md (#1150) * Update weight-decay.md 关于“k多选d”这一部分,中文读者使用排列组合的方式可能更容易理解 关于“给定k个变量,阶数的个数为...”这句话是有歧义的,不是很像中国话,应该是说“阶数为d的项的个数为...”。 并增加了一句对“因此即使是阶数上的微小变化,比如从$2$到$3$,也会显著增加我们模型的复杂性。”的解释 解释为何会增加复杂性以及为何需要细粒度工具。 * Update chapter_multilayer-perceptrons/weight-decay.md yep Co-authored-by: goldmermaid <[email protected]> * Update chapter_multilayer-perceptrons/weight-decay.md yep Co-authored-by: goldmermaid <[email protected]> Co-authored-by: goldmermaid <[email protected]> * Fix a spelling error (#1161) * Update gru.md (#1152) The key distinction between vanilla RNNs and GRUs is that the latter support gating of the hidden state. 翻译错误 * Unify the function naming (#1113) Unify naming of the function 'init_xavier()'. * Update mlp-concise.md (#1166) * Update mlp-concise.md 语句不通顺 * Update environment.md 语序异常 * Update config.ini * fix the imprecise description (#1168) Co-authored-by: yuande <yuande> * fix typo in chapter_natural-language-processing-pretraining/glove.md (#1175) * Fix some typos. (#1163) * Update batch-norm.md (#1170) fixing typos u->x in article * Update linear-regression.md (#1090) We invoke Stuart Russell and Peter Norvig who, in their classic AI text book Artificial Intelligence: A Modern Approach :cite:Russell.Norvig.2016, pointed out that 原译文把who也直接翻译出来了。 * Update mlp.md (#1117) * Update mlp.md 修改部分语义表述 * Update chapter_multilayer-perceptrons/mlp.md Co-authored-by: goldmermaid <[email protected]> * Update chapter_multilayer-perceptrons/mlp.md Co-authored-by: Aston Zhang <[email protected]> Co-authored-by: goldmermaid <[email protected]> * Correct a translation error. (#1091) * Correct a translation error. * Update chapter_computer-vision/image-augmentation.md Co-authored-by: Aston Zhang <[email protected]> * Update aws.md (#1121) * Update aws.md * Update chapter_appendix-tools-for-deep-learning/aws.md Co-authored-by: Aston Zhang <[email protected]> * Update image-augmentation.md (#1093) * Update anchor.md (#1088) fix a minor issue in code * Update anchor.md * Update image-augmentation.md * fix typo and improve translation in chapter_linear-networks\softmax-regression.md (#1087) * Avoid `torch.meshgrid` user warning (#1174) Avoids the following user warning: ```python ~/anaconda3/envs/torch/lib/python3.10/site-packages/torch/functional.py:568: UserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at ../aten/src/ATen/native/TensorShape.cpp:2228.) return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined] ``` * bump to 2.0.0-beta1 * Update sequence.md * bump beta1 on readme * Add latex code block background to config * BLD: Bump python support version 3.9 (#1183) * BLD: Bump python support version 3.9 * Remove clear and manually downgrade protobuf 4.21.4 to 3.19.4 * BLD: Bump torch and tensorflow * Update Jenkinsfile * Update chapter_installation/index.md * Update chapter_installation/index.md Co-authored-by: Aston Zhang <[email protected]> * Update config.ini * Update INFO.md * Update INFO.md * Drop mint to show code in pdf, use Inconsolata font, apply code cell color (#1187) * resolve the conflicts * revise from publisher (#1089) * revise from publisher * d2l api * post_latex * revise from publisher * revise ch11 * Delete d2l-Copy1.bib * clear cache * rm d2lbook clear * debug anchor * keep original d2l doc Co-authored-by: Ubuntu <[email protected]> Co-authored-by: Aston Zhang <[email protected]> Co-authored-by: Aston Zhang <[email protected]> * 重复语句 (#1188) Co-authored-by: Aston Zhang <[email protected]> * Improve expression for chapter_preliminaries/pandas.md (#1184) * Update pandas.md * Improve expression * Improve expression * Update chapter_preliminaries/pandas.md Co-authored-by: Aston Zhang <[email protected]> * Improce expression for chapter_preliminaries/linear-algebra.md (#1185) * Improce expression * Improve code comments * Update chapter_preliminaries/linear-algebra.md * Update chapter_preliminaries/linear-algebra.md * Update chapter_preliminaries/linear-algebra.md * Update chapter_preliminaries/linear-algebra.md Co-authored-by: Aston Zhang <[email protected]> * Fix multibox_detection bugs * Update d2l to 0.17.5 version * restore older version * Upgrade pandas * change to python3.8 * Test warning log * relocate warning log * test logs filtering * Update gru.md * Add DeprecationWarning filter * Test warning log * Update attention mechanisms & computational performance * Update multilayer perceptron& linear & convolution networks & computer vision * Update recurrent&optimition&nlp pretraining & nlp applications * ignore warnings * Update index.md * Update linear networks * Update multilayer perceptrons&deep learning computation * Update preliminaries * Check and Add warning filter * Update kaggle-cifar10.md * Update object-detection-dataset.md * Update ssd.md fcn.md * Update hybridize.md * Update hybridize.md Signed-off-by: sunhaizhou <[email protected]> Co-authored-by: zhou201505013 <[email protected]> Co-authored-by: Xinwei Liu <[email protected]> Co-authored-by: Anirudh Dagar <[email protected]> Co-authored-by: Aston Zhang <[email protected]> Co-authored-by: hugo_han <[email protected]> Co-authored-by: gyro永不抽风 <[email protected]> Co-authored-by: CanChengZheng <[email protected]> Co-authored-by: linlin <[email protected]> Co-authored-by: iuk <[email protected]> Co-authored-by: yoos <[email protected]> Co-authored-by: Mr. Justice Lawrence John Wargrave <[email protected]> Co-authored-by: Chiyuan Fu <[email protected]> Co-authored-by: Sunhuashan <[email protected]> Co-authored-by: Haiker Sun <[email protected]> Co-authored-by: Ming Liu <[email protected]> Co-authored-by: goldmermaid <[email protected]> Co-authored-by: silenceZheng66 <[email protected]> Co-authored-by: Wenchao Yan <[email protected]> Co-authored-by: Kiki2049 <[email protected]> Co-authored-by: Krahets <[email protected]> Co-authored-by: friedmainfunction <[email protected]> Co-authored-by: Jameson <[email protected]> Co-authored-by: P. Yao <[email protected]> Co-authored-by: Yulv-git <[email protected]> Co-authored-by: Liu,Xiao <[email protected]> Co-authored-by: YIN, Gang <[email protected]> Co-authored-by: Joe-HZ <[email protected]> Co-authored-by: lybloveyou <[email protected]> Co-authored-by: VigourJiang <[email protected]> Co-authored-by: zxhd863943427 <[email protected]> Co-authored-by: LYF <[email protected]> Co-authored-by: Aston Zhang <[email protected]> Co-authored-by: xiaotinghe <[email protected]> Co-authored-by: Ubuntu <[email protected]> Co-authored-by: Holly-Max <[email protected]> Co-authored-by: HinGwenWoong <[email protected]> Co-authored-by: Shuai Zhang <[email protected]>
60
0
37,379
12
2
10
def _copy2_ignoring_special_files(src, dest): # type: (str, str) -> None try: copy2_fixed(src, dest) except shutil.SpecialFileError as e: # SpecialFileError may be raised due to either the source or # destination. If the destination was the cause then we would actually # care, but since the destination directory is deleted prior to # copy we ignore all of them assuming it is caused by the source. logger.warning( "Ignoring special file error '%s' encountered copying %s to %s.", str(e), src, dest, )
.venv/lib/python3.8/site-packages/pip/_internal/operations/prepare.py
69
transferlearning
{ "docstring": "Copying special files is not supported, but as a convenience to users\n we skip errors copying them. This supports tools that may create e.g.\n socket files in the project source directory.\n ", "language": "en", "n_whitespaces": 40, "n_words": 31, "vocab_size": 30 }
79
Python
66
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
prepare.py
60,951
10
39
_copy2_ignoring_special_files
https://github.com/jindongwang/transferlearning.git
upd; format
184
0
12,357
12
1
2
def prepare_template(self) -> None:
airflow/models/base.py
16
airflow
{ "docstring": "Hook triggered after the templated fields get replaced by their content.\n\n If you need your operator to alter the content of the file before the\n template is rendered, it should override this method to do so.\n ", "language": "en", "n_whitespaces": 57, "n_words": 36, "vocab_size": 32 }
4
Python
4
ff3bbc3db24f9f3f4f88033d48859fb08fc3237b
base.py
44,166
6
8
prepare_template
https://github.com/apache/airflow.git
Implement enough interface for MappedOperator to be baggable (#20945)
11
0
8,185
6
1
2
def hiddenlabels(self): return self["hiddenlabels"]
packages/python/plotly/plotly/graph_objs/_layout.py
22
plotly.py
{ "docstring": "\n hiddenlabels is the funnelarea & pie chart analog of\n visible:'legendonly' but it can contain many labels, and can\n simultaneously hide slices from several pies/funnelarea charts\n\n The 'hiddenlabels' property is an array that may be specified as a tuple,\n list, numpy array, or pandas Series\n\n Returns\n -------\n numpy.ndarray\n ", "language": "en", "n_whitespaces": 111, "n_words": 47, "vocab_size": 45 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_layout.py
227,329
2
11
hiddenlabels
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
59,002
7
1
11
def test_dataset_shard_error_with_both_format_and_loader_fn(self): dset = ray.data.range(100) config = { "input": "dataset", "input_config": { "format": "json", "paths": self.dset_path, "loader_fn": lambda: dset, }, } with self.assertRaises(ValueError): get_dataset_and_shards(config)
rllib/offline/tests/test_dataset_reader.py
105
ray
{ "docstring": "Tests whether the dataset_shard function raises an error when both format\n and loader_fn are specified.", "language": "en", "n_whitespaces": 21, "n_words": 15, "vocab_size": 15 }
24
Python
22
569fe0109629048d08e1d9e023f7769f10bd2244
test_dataset_reader.py
125,006
12
57
test_dataset_shard_error_with_both_format_and_loader_fn
https://github.com/ray-project/ray.git
[RLlib] improved unittests for dataset_reader and fixed bugs (#26458)
148
0
27,746
11
3
18
async def async_refresh_sensor(self) -> None: _LOGGER.debug("Refreshing library sensor for '%s'", self.name) try: await self.hass.async_add_executor_job(self._update_state_and_attrs) self._attr_available = True except NotFound: self._attr_available = False except requests.exceptions.RequestException as err: _LOGGER.error( "Could not update library sensor for '%s': %s", self.library_section.title, err, ) self._attr_available = False self.async_write_ha_state()
homeassistant/components/plex/sensor.py
132
core
{ "docstring": "Update state and attributes for the library sensor.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
42
Python
33
474844744bdd2b0dcba46b82d9d3fcd8e3dbad24
sensor.py
305,878
16
78
async_refresh_sensor
https://github.com/home-assistant/core.git
Improve entity type hints [p] (#77871)
195
0
104,662
12
1
15
def test_joindocuments_score_none(join_mode, sort_by_score): inputs = [ {"documents": [Document(content="text document 1", content_type="text", score=0.2)]}, {"documents": [Document(content="text document 2", content_type="text", score=None)]}, ] join_docs = JoinDocuments(join_mode=join_mode, sort_by_score=sort_by_score) result, _ = join_docs.run(inputs) assert len(result["documents"]) == 2 result, _ = join_docs.run(inputs, top_k_join=1) assert len(result["documents"]) == 1
test/nodes/test_other.py
181
haystack
{ "docstring": "Testing JoinDocuments() node when some of the documents have `score=None`", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
40
Python
28
408d8e6ff559ed0a195b68284baf3ab23707ffd7
test_other.py
257,752
10
112
test_joindocuments_score_none
https://github.com/deepset-ai/haystack.git
Enable the `JoinDocuments` node to work with documents with `score=None` (#2984) * Enable the `JoinDocuments` node to work with documents with `score=None` This fixes #2983 As of now, the `JoinDocuments` node will error out if any of the documents has `score=None` - which is possible, as some retriever are not able to provide a score, like the `TfidfRetriever` on Elasticsearch or the `BM25Retriever` on Weaviate. THe reason for the error is that the `JoinDocuments` always sorts the documents by score and cannot sort when `score=None`. There was a very similar issue for `JoinAnswers` too, which was addressed by this PR: https://github.com/deepset-ai/haystack/pull/2436 This solution applies the same solution to `JoinDocuments` - so both the `JoinAnswers` and `JoinDocuments` now will have the same additional argument to disable sorting when that is requried. The solution is to add an argument to `JoinDocuments` called `sort_by_score: bool`, which allows the user to turn off the sorting of documents by score, but keeps the current functionality of sorting being performed as the default. * Fixing test bug * Addressing PR review comments - Extending unit tests - Simplifying logic * Making the sorting work even with no scores By making the no score being sorted as -Inf * Forgot to commit the change in `join_docs.py` * [EMPTY] Re-trigger CI * Added am INFO log if the `JoinDocuments` is sorting while some of the docs have `score=None` * Adjusting the arguments of `any()` * [EMPTY] Re-trigger CI
78
0
75,129
14
2
33
def test_parent_skip_branch(session, dag_maker): start_date = pendulum.datetime(2020, 1, 1) with dag_maker( "test_parent_skip_branch_dag", schedule_interval=None, start_date=start_date, session=session, ): op1 = BranchPythonOperator(task_id="op1", python_callable=lambda: "op3") op2 = EmptyOperator(task_id="op2") op3 = EmptyOperator(task_id="op3") op1 >> [op2, op3] tis = { ti.task_id: ti for ti in dag_maker.create_dagrun(run_type=DagRunType.MANUAL, state=State.RUNNING).task_instances } tis["op1"].run() dep = NotPreviouslySkippedDep() assert len(list(dep.get_dep_statuses(tis["op2"], session, DepContext()))) == 1 assert not dep.is_met(tis["op2"], session) assert tis["op2"].state == State.SKIPPED
tests/ti_deps/deps/test_not_previously_skipped_dep.py
272
airflow
{ "docstring": "\n A simple DAG with a BranchPythonOperator that does not follow op2. NotPreviouslySkippedDep is not met.\n ", "language": "en", "n_whitespaces": 22, "n_words": 15, "vocab_size": 14 }
59
Python
49
49e336ae0302b386a2f47269a6d13988382d975f
test_not_previously_skipped_dep.py
47,672
21
168
test_parent_skip_branch
https://github.com/apache/airflow.git
Replace usage of `DummyOperator` with `EmptyOperator` (#22974) * Replace usage of `DummyOperator` with `EmptyOperator`
162
0
9,207
14
3
14
def _check_precomputed(X): if not issparse(X): X = check_array(X) check_non_negative(X, whom="precomputed distance matrix.") return X else: graph = X if graph.format not in ("csr", "csc", "coo", "lil"): raise TypeError( "Sparse matrix in {!r} format is not supported due to " "its handling of explicit zeros".format(graph.format) ) copied = graph.format != "csr" graph = check_array(graph, accept_sparse="csr") check_non_negative(graph, whom="precomputed distance matrix.") graph = sort_graph_by_row_values(graph, copy=not copied, warn_when_not_sorted=True) return graph
sklearn/neighbors/_base.py
184
scikit-learn
{ "docstring": "Check precomputed distance matrix.\n\n If the precomputed distance matrix is sparse, it checks that the non-zero\n entries are sorted by distances. If not, the matrix is copied and sorted.\n\n Parameters\n ----------\n X : {sparse matrix, array-like}, (n_samples, n_samples)\n Distance matrix to other samples. X may be a sparse matrix, in which\n case only non-zero elements may be considered neighbors.\n\n Returns\n -------\n X : {sparse matrix, array-like}, (n_samples, n_samples)\n Distance matrix to other samples. X may be a sparse matrix, in which\n case only non-zero elements may be considered neighbors.\n ", "language": "en", "n_whitespaces": 144, "n_words": 89, "vocab_size": 48 }
66
Python
48
b94bc5ea6821607d1e9826ce2d084c76379820ba
_base.py
259,864
17
105
_check_precomputed
https://github.com/scikit-learn/scikit-learn.git
ENH add new function sort_graph_by_row_values (#23139) Co-authored-by: Thomas J. Fan <[email protected]> Co-authored-by: Guillaume Lemaitre <[email protected]>
157
0
75,958
13
2
14
def _set_per_output_metric_attributes(self, metrics_dict, output_index): updated_metrics_dict = collections.OrderedDict() for metric_name, metric_fn in metrics_dict.items(): metric_name = self._add_unique_metric_name( metric_name, metric_fn, output_index ) # Update the name on the metric class to be the unique generated name. metric_fn._name = metric_name # pylint: disable=protected-access updated_metrics_dict[metric_name] = metric_fn # Keep track of metric name and function. self._compile_metric_functions.append(metric_fn) return updated_metrics_dict
keras/engine/training_v1.py
99
keras
{ "docstring": "Sets the metric attributes on the model for the given output.\n\n Args:\n metrics_dict: A dict with metric names as keys and metric fns as values.\n output_index: The index of the model output for which the metric\n attributes are added.\n\n Returns:\n Metrics dict updated with unique metric names as keys.\n ", "language": "en", "n_whitespaces": 108, "n_words": 49, "vocab_size": 33 }
53
Python
40
84afc5193d38057e2e2badf9c889ea87d80d8fbf
training_v1.py
272,015
10
61
_set_per_output_metric_attributes
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
174
0
80,937
10
1
20
async def test_peer_cert_ignored_with_supervisor(hass, tmpdir): cert_path, key_path, peer_cert_path = await hass.async_add_executor_job( _setup_empty_ssl_pem_files, tmpdir ) with patch("ssl.SSLContext.load_cert_chain"), patch( "homeassistant.components.http.supervisor.has_supervisor", return_value=True ), patch( "ssl.SSLContext.load_verify_locations" ) as mock_load_verify_locations, patch( "homeassistant.util.ssl.server_context_modern", side_effect=server_context_modern, ) as mock_context: assert ( await async_setup_component( hass, "http", { "http": { "ssl_peer_certificate": peer_cert_path, "ssl_profile": "modern", "ssl_certificate": cert_path, "ssl_key": key_path, } }, ) is True ) await hass.async_start() await hass.async_block_till_done() assert len(mock_context.mock_calls) == 1 mock_load_verify_locations.assert_not_called()
tests/components/http/test_init.py
209
core
{ "docstring": "Test peer certiicate requirement ignored in supervised deployments.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
62
Python
48
938b64081b0cbc21d1a9c1141c1e575824ce31ae
test_init.py
292,578
31
119
test_peer_cert_ignored_with_supervisor
https://github.com/home-assistant/core.git
Block peer certs on supervisor (#66837) Co-authored-by: Pascal Vizeli <[email protected]> Co-authored-by: Mike Degatano <[email protected]>
375
0
91,654
17
5
21
def get_default_locale(self): # Check if the object has any parental keys to another translatable model # If so, take the locale from the object referenced in that parental key parental_keys = [ field for field in self._meta.get_fields() if isinstance(field, ParentalKey) and issubclass(field.related_model, TranslatableMixin) ] if parental_keys: parent_id = parental_keys[0].value_from_object(self) return ( parental_keys[0] .related_model.objects.defer() .select_related("locale") .get(id=parent_id) .locale ) return Locale.get_default()
wagtail/core/models/i18n.py
140
wagtail
{ "docstring": "\n Finds the default locale to use for this object.\n\n This will be called just before the initial save.\n ", "language": "en", "n_whitespaces": 40, "n_words": 18, "vocab_size": 17 }
59
Python
48
d10f15e55806c6944827d801cd9c2d53f5da4186
i18n.py
73,907
17
86
get_default_locale
https://github.com/wagtail/wagtail.git
Reformat with black
260
0
16,170
18
1
13
def test_copy_with_encryption(self): op = DatabricksCopyIntoOperator( file_location=COPY_FILE_LOCATION, file_format='CSV', table_name='test', task_id=TASK_ID, encryption={'TYPE': 'AWS_SSE_C', 'MASTER_KEY': 'abc'}, ) assert ( op._create_sql_query() == f.strip() )
tests/providers/databricks/operators/test_databricks_sql.py
95
airflow
{ "docstring": "COPY INTO test\nFROM '{COPY_FILE_LOCATION}' WITH ( ENCRYPTION (TYPE = 'AWS_SSE_C', MASTER_KEY = 'abc'))\nFILEFORMAT = CSV\n", "language": "en", "n_whitespaces": 14, "n_words": 17, "vocab_size": 15 }
20
Python
19
401419432082d222b823e4f2a66f21e5cc3ab28d
test_databricks_sql.py
45,915
15
53
test_copy_with_encryption
https://github.com/apache/airflow.git
Add new options to DatabricksCopyIntoOperator (#22076) This includes: * `encryption` - to specify encryption options for a given location * `credential` - to specify authentication options for a given location * `validate` - to control validation of schema & data
124
0
8,742
12
5
7
def tokenize(lines, token='word'): if token == 'word': return [line.split() for line in lines] elif token == 'char': return [list(line) for line in lines] else: print('ERROR: unknown token type: ' + token)
d2l/mxnet.py
90
d2l-zh
{ "docstring": "Split text lines into word or character tokens.\n\n Defined in :numref:`sec_text_preprocessing`", "language": "en", "n_whitespaces": 13, "n_words": 11, "vocab_size": 11 }
31
Python
23
b64b41d8c1ac23c43f7a4e3f9f6339d6f0012ab2
mxnet.py
158,197
7
51
tokenize
https://github.com/d2l-ai/d2l-zh.git
[PaddlePaddle] Merge master into Paddle branch (#1186) * change 15.2 title in chinese version (#1109) change title ’15.2. 情感分析:使用递归神经网络‘ to ’15.2. 情感分析:使用循环神经网络‘ * 修改部分语义表述 (#1105) * Update r0.17.5 (#1120) * Bump versions in installation * 94行typo: (“bert.mall”)->(“bert.small”) (#1129) * line 313: "bert.mall" -> "bert.small" (#1130) * fix: update language as native reader (#1114) * Fix the translation of "stride" (#1115) * Update index.md (#1118) 修改部分语义表述 * Update self-attention-and-positional-encoding.md (#1133) 依照本书的翻译习惯,将pooling翻译成汇聚 * maybe a comment false (#1149) * maybe a little false * maybe a little false * A minor bug in the rcnn section (Chinese edition) (#1148) * Update bert.md (#1137) 一个笔误 # 假设batch_size=2,num_pred_positions=3 # 那么batch_idx应该是np.repeat( [0,1], 3 ) = [0,0,0,1,1,1] * Update calculus.md (#1135) * fix typo in git documentation (#1106) * fix: Update the Chinese translation in lr-scheduler.md (#1136) * Update lr-scheduler.md * Update chapter_optimization/lr-scheduler.md Co-authored-by: goldmermaid <[email protected]> Co-authored-by: goldmermaid <[email protected]> * fix translation for kaggle-house-price.md (#1107) * fix translation for kaggle-house-price.md * fix translation for kaggle-house-price.md Signed-off-by: sunhaizhou <[email protected]> * Update weight-decay.md (#1150) * Update weight-decay.md 关于“k多选d”这一部分,中文读者使用排列组合的方式可能更容易理解 关于“给定k个变量,阶数的个数为...”这句话是有歧义的,不是很像中国话,应该是说“阶数为d的项的个数为...”。 并增加了一句对“因此即使是阶数上的微小变化,比如从$2$到$3$,也会显著增加我们模型的复杂性。”的解释 解释为何会增加复杂性以及为何需要细粒度工具。 * Update chapter_multilayer-perceptrons/weight-decay.md yep Co-authored-by: goldmermaid <[email protected]> * Update chapter_multilayer-perceptrons/weight-decay.md yep Co-authored-by: goldmermaid <[email protected]> Co-authored-by: goldmermaid <[email protected]> * Fix a spelling error (#1161) * Update gru.md (#1152) The key distinction between vanilla RNNs and GRUs is that the latter support gating of the hidden state. 翻译错误 * Unify the function naming (#1113) Unify naming of the function 'init_xavier()'. * Update mlp-concise.md (#1166) * Update mlp-concise.md 语句不通顺 * Update environment.md 语序异常 * Update config.ini * fix the imprecise description (#1168) Co-authored-by: yuande <yuande> * fix typo in chapter_natural-language-processing-pretraining/glove.md (#1175) * Fix some typos. (#1163) * Update batch-norm.md (#1170) fixing typos u->x in article * Update linear-regression.md (#1090) We invoke Stuart Russell and Peter Norvig who, in their classic AI text book Artificial Intelligence: A Modern Approach :cite:Russell.Norvig.2016, pointed out that 原译文把who也直接翻译出来了。 * Update mlp.md (#1117) * Update mlp.md 修改部分语义表述 * Update chapter_multilayer-perceptrons/mlp.md Co-authored-by: goldmermaid <[email protected]> * Update chapter_multilayer-perceptrons/mlp.md Co-authored-by: Aston Zhang <[email protected]> Co-authored-by: goldmermaid <[email protected]> * Correct a translation error. (#1091) * Correct a translation error. * Update chapter_computer-vision/image-augmentation.md Co-authored-by: Aston Zhang <[email protected]> * Update aws.md (#1121) * Update aws.md * Update chapter_appendix-tools-for-deep-learning/aws.md Co-authored-by: Aston Zhang <[email protected]> * Update image-augmentation.md (#1093) * Update anchor.md (#1088) fix a minor issue in code * Update anchor.md * Update image-augmentation.md * fix typo and improve translation in chapter_linear-networks\softmax-regression.md (#1087) * Avoid `torch.meshgrid` user warning (#1174) Avoids the following user warning: ```python ~/anaconda3/envs/torch/lib/python3.10/site-packages/torch/functional.py:568: UserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at ../aten/src/ATen/native/TensorShape.cpp:2228.) return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined] ``` * bump to 2.0.0-beta1 * Update sequence.md * bump beta1 on readme * Add latex code block background to config * BLD: Bump python support version 3.9 (#1183) * BLD: Bump python support version 3.9 * Remove clear and manually downgrade protobuf 4.21.4 to 3.19.4 * BLD: Bump torch and tensorflow * Update Jenkinsfile * Update chapter_installation/index.md * Update chapter_installation/index.md Co-authored-by: Aston Zhang <[email protected]> * Update config.ini * Update INFO.md * Update INFO.md * Drop mint to show code in pdf, use Inconsolata font, apply code cell color (#1187) * resolve the conflicts * revise from publisher (#1089) * revise from publisher * d2l api * post_latex * revise from publisher * revise ch11 * Delete d2l-Copy1.bib * clear cache * rm d2lbook clear * debug anchor * keep original d2l doc Co-authored-by: Ubuntu <[email protected]> Co-authored-by: Aston Zhang <[email protected]> Co-authored-by: Aston Zhang <[email protected]> * 重复语句 (#1188) Co-authored-by: Aston Zhang <[email protected]> * Improve expression for chapter_preliminaries/pandas.md (#1184) * Update pandas.md * Improve expression * Improve expression * Update chapter_preliminaries/pandas.md Co-authored-by: Aston Zhang <[email protected]> * Improce expression for chapter_preliminaries/linear-algebra.md (#1185) * Improce expression * Improve code comments * Update chapter_preliminaries/linear-algebra.md * Update chapter_preliminaries/linear-algebra.md * Update chapter_preliminaries/linear-algebra.md * Update chapter_preliminaries/linear-algebra.md Co-authored-by: Aston Zhang <[email protected]> * Fix multibox_detection bugs * Update d2l to 0.17.5 version * restore older version * Upgrade pandas * change to python3.8 * Test warning log * relocate warning log * test logs filtering * Update gru.md * Add DeprecationWarning filter * Test warning log * Update attention mechanisms & computational performance * Update multilayer perceptron& linear & convolution networks & computer vision * Update recurrent&optimition&nlp pretraining & nlp applications * ignore warnings * Update index.md * Update linear networks * Update multilayer perceptrons&deep learning computation * Update preliminaries * Check and Add warning filter * Update kaggle-cifar10.md * Update object-detection-dataset.md * Update ssd.md fcn.md * Update hybridize.md * Update hybridize.md Signed-off-by: sunhaizhou <[email protected]> Co-authored-by: zhou201505013 <[email protected]> Co-authored-by: Xinwei Liu <[email protected]> Co-authored-by: Anirudh Dagar <[email protected]> Co-authored-by: Aston Zhang <[email protected]> Co-authored-by: hugo_han <[email protected]> Co-authored-by: gyro永不抽风 <[email protected]> Co-authored-by: CanChengZheng <[email protected]> Co-authored-by: linlin <[email protected]> Co-authored-by: iuk <[email protected]> Co-authored-by: yoos <[email protected]> Co-authored-by: Mr. Justice Lawrence John Wargrave <[email protected]> Co-authored-by: Chiyuan Fu <[email protected]> Co-authored-by: Sunhuashan <[email protected]> Co-authored-by: Haiker Sun <[email protected]> Co-authored-by: Ming Liu <[email protected]> Co-authored-by: goldmermaid <[email protected]> Co-authored-by: silenceZheng66 <[email protected]> Co-authored-by: Wenchao Yan <[email protected]> Co-authored-by: Kiki2049 <[email protected]> Co-authored-by: Krahets <[email protected]> Co-authored-by: friedmainfunction <[email protected]> Co-authored-by: Jameson <[email protected]> Co-authored-by: P. Yao <[email protected]> Co-authored-by: Yulv-git <[email protected]> Co-authored-by: Liu,Xiao <[email protected]> Co-authored-by: YIN, Gang <[email protected]> Co-authored-by: Joe-HZ <[email protected]> Co-authored-by: lybloveyou <[email protected]> Co-authored-by: VigourJiang <[email protected]> Co-authored-by: zxhd863943427 <[email protected]> Co-authored-by: LYF <[email protected]> Co-authored-by: Aston Zhang <[email protected]> Co-authored-by: xiaotinghe <[email protected]> Co-authored-by: Ubuntu <[email protected]> Co-authored-by: Holly-Max <[email protected]> Co-authored-by: HinGwenWoong <[email protected]> Co-authored-by: Shuai Zhang <[email protected]>
64
0
37,370
12
3
18
def export_model(self, model_path, calibration_path=None, onnx_path=None, input_shape=None, device=None): assert model_path is not None, 'model_path must be specified' self._unwrap_model() calibration_config = {} for name, module in self.bound_model.named_modules(): if hasattr(module, 'weight_bits'): calibration_config[name] = {} calibration_config[name]['weight_bits'] = int(module.weight_bits) self._del_simulated_attr(module) self.export_model_save(self.bound_model, model_path, calibration_config, calibration_path, onnx_path, input_shape, device) return calibration_config
nni/compression/pytorch/quantization/bnn_quantizer.py
168
nni
{ "docstring": "\n Export quantized model weights and calibration parameters(optional)\n\n Parameters\n ----------\n model_path : str\n path to save quantized model weight\n calibration_path : str\n (optional) path to save quantize parameters after calibration\n onnx_path : str\n (optional) path to save onnx model\n input_shape : list or tuple\n input shape to onnx model\n device : torch.device\n device of the model, used to place the dummy input tensor for exporting onnx file.\n the tensor is placed on cpu if ```device``` is None\n\n Returns\n -------\n Dict\n ", "language": "en", "n_whitespaces": 230, "n_words": 79, "vocab_size": 51 }
44
Python
39
d68c786ff81bad19c04619d6a999ff34aaa724e7
bnn_quantizer.py
113,612
11
110
export_model
https://github.com/microsoft/nni.git
[Compression] remove pruning v1 & refactor directory (#5228)
145
0
24,975
13
1
12
def test_new_room_user_is_not_local(self) -> None: channel = self.make_request( "DELETE", self.url, content={"new_room_user_id": "@not:exist.bla"}, access_token=self.admin_user_tok, ) self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual( "User must be our own: @not:exist.bla", channel.json_body["error"], )
tests/rest/admin/test_room.py
107
synapse
{ "docstring": "\n Check that only local users can create new room to move members.\n ", "language": "en", "n_whitespaces": 27, "n_words": 12, "vocab_size": 12 }
25
Python
24
2281427175e4c93a30c39607fb4ac23c2a1f399f
test_room.py
249,342
15
65
test_new_room_user_is_not_local
https://github.com/matrix-org/synapse.git
Use literals in place of `HTTPStatus` constants in tests (#13488) * Use literals in place of `HTTPStatus` constants in tests * newsfile * code style * code style
133
0
72,845
12
12
34
def _make_zipfile(base_name, base_dir, verbose=0, dry_run=0, logger=None): zip_filename = base_name + ".zip" archive_dir = os.path.dirname(base_name) if not os.path.exists(archive_dir): if logger is not None: logger.info("creating %s", archive_dir) if not dry_run: os.makedirs(archive_dir) # If zipfile module is not available, try spawning an external 'zip' # command. try: import zipfile except ImportError: zipfile = None if zipfile is None: _call_external_zip(base_dir, zip_filename, verbose, dry_run) else: if logger is not None: logger.info("creating '%s' and adding '%s' to it", zip_filename, base_dir) if not dry_run: zip = zipfile.ZipFile(zip_filename, "w", compression=zipfile.ZIP_DEFLATED) for dirpath, dirnames, filenames in os.walk(base_dir): for name in filenames: path = os.path.normpath(os.path.join(dirpath, name)) if os.path.isfile(path): zip.write(path, path) if logger is not None: logger.info("adding '%s'", path) zip.close() return zip_filename _ARCHIVE_FORMATS = { 'gztar': (_make_tarball, [('compress', 'gzip')], "gzip'ed tar-file"), 'bztar': (_make_tarball, [('compress', 'bzip2')], "bzip2'ed tar-file"), 'tar': (_make_tarball, [('compress', None)], "uncompressed tar file"), 'zip': (_make_zipfile, [], "ZIP file"), } if _BZ2_SUPPORTED: _ARCHIVE_FORMATS['bztar'] = (_make_tarball, [('compress', 'bzip2')], "bzip2'ed tar-file")
pipenv/patched/notpip/_vendor/distlib/_backport/shutil.py
479
pipenv
{ "docstring": "Create a zip file from all the files under 'base_dir'.\n\n The output zip file will be named 'base_name' + \".zip\". Uses either the\n \"zipfile\" Python module (if available) or the InfoZIP \"zip\" utility\n (if installed and found on the default search path). If neither tool is\n available, raises ExecError. Returns the name of the output zip\n file.\n ", "language": "en", "n_whitespaces": 78, "n_words": 57, "vocab_size": 47 }
148
Python
97
c69d55f7c82d5ae2cce542bcfb98d043ca4836a0
shutil.py
21,352
30
210
_make_zipfile
https://github.com/pypa/pipenv.git
Vendor in pip 22.1.2
529
0
3,780
21
1
20
def test_get_instance_view_filters_out_name_with_filter_backend(self): instance_view = InstanceView.as_view(filter_backends=(ExclusiveFilterBackend,)) request = factory.get('/1') response = instance_view(request, pk=1).render() assert response.status_code == status.HTTP_404_NOT_FOUND assert response.data == { 'detail': ErrorDetail( string='No BasicModel matches the given query.', code='not_found' ) }
tests/test_generics.py
116
django-rest-framework
{ "docstring": "\n GET requests to RetrieveUpdateDestroyAPIView should raise 404 when model filtered out.\n ", "language": "en", "n_whitespaces": 26, "n_words": 11, "vocab_size": 11 }
31
Python
27
56946fac8f29aa44ce84391f138d63c4c8a2a285
test_generics.py
48,681
11
68
test_get_instance_view_filters_out_name_with_filter_backend
https://github.com/encode/django-rest-framework.git
Preserve exception messages for wrapped Django exceptions (#8051) * Preserve messages for wrapped Django exceptions * Fix the test * Update test_generics.py * Update test_generics.py Co-authored-by: Tom Christie <[email protected]>
132
0
9,567
12
1
3
def onCopiedDLL(self, dll_filename): # Virtual method, pylint: disable=no-self-use,unused-argument return None
nuitka/plugins/PluginBase.py
19
Nuitka
{ "docstring": "Chance for a plugin to modify DLLs after copy, e.g. to compress it, remove attributes, etc.\n\n Args:\n dll_filename: the filename of the DLL\n\n Notes:\n Do not remove or add any files in this method, this will not work well, there\n is e.g. getExtraDLLs API to add things. This is only for post processing as\n described above.\n\n ", "language": "en", "n_whitespaces": 121, "n_words": 56, "vocab_size": 46 }
10
Python
10
87c7dd5551f276dc0b68168d952c55aa3e4d07f8
PluginBase.py
178,438
2
10
onCopiedDLL
https://github.com/Nuitka/Nuitka.git
Plugins: Add support for modifying DLLs after standalone copy
31
0
42,698
6
6
9
def set_video_backend(backend): global _video_backend if backend not in ["pyav", "video_reader", "cuda"]: raise ValueError("Invalid video backend '%s'. Options are 'pyav', 'video_reader' and 'cuda'" % backend) if backend == "video_reader" and not io._HAS_VIDEO_OPT: # TODO: better messages message = "video_reader video backend is not available. Please compile torchvision from source and try again" raise RuntimeError(message) elif backend == "cuda" and not _HAS_GPU_VIDEO_DECODER: # TODO: better messages message = "cuda video backend is not available." raise RuntimeError(message) else: _video_backend = backend
torchvision/__init__.py
125
vision
{ "docstring": "\n Specifies the package used to decode videos.\n\n Args:\n backend (string): Name of the video backend. one of {'pyav', 'video_reader'}.\n The :mod:`pyav` package uses the 3rd party PyAv library. It is a Pythonic\n binding for the FFmpeg libraries.\n The :mod:`video_reader` package includes a native C++ implementation on\n top of FFMPEG libraries, and a python API of TorchScript custom operator.\n It generally decodes faster than :mod:`pyav`, but is perhaps less robust.\n\n .. note::\n Building with FFMPEG is disabled by default in the latest `main`. If you want to use the 'video_reader'\n backend, please compile torchvision from source.\n ", "language": "en", "n_whitespaces": 184, "n_words": 95, "vocab_size": 77 }
78
Python
49
2e833520618dc460cbeb693e29e40b65a02ccafb
__init__.py
194,093
12
66
set_video_backend
https://github.com/pytorch/vision.git
Pyav backend for VideoReader API (#6598) * Test: add backend parameter * VideoReader object now works on backend * Frame reading now passes * Keyframe seek now passes * Pyav backend now supports metadata * changes in test to reflect GPU decoder change * Linter? * Test GPU output * Addressing Joao's comments * lint * lint * Revert "Test GPU output" This reverts commit f62e955d7dc81bcb23b40d58ea75413b9b62e76d. * lint? * lint * lint * Address issues in build? * hopefully doc fix * Arrgh * arrgh * fix typos * fix input options * remove read from memory option in pyav * skip read from mem test for gpu and pyab be * fix test * remove unused import * Hack to get reading from memory work with pyav * patch audio test Co-authored-by: Bruno Korbar <[email protected]> Co-authored-by: Joao Gomes <[email protected]>
152
0
46,938
11
1
17
def step(self, action, sumo_handler): logger.debug("Agent %s: action %d", self.agent_id, action) # Subscriptions EXAMPLE: # {"agent_0": {64: 14.603468282230542, 104: None}, # "agent_1": {64: 12.922797055918513, # 104: ("veh.19", 27.239870121802596)}} logger.debug( "Subscriptions: %s", pformat(sumo_handler.veh_subscriptions[self.agent_id]) ) previous_speed = sumo_handler.veh_subscriptions[self.agent_id][tc.VAR_SPEED] new_speed = previous_speed + self.action_to_meaning[action] logger.debug("Before %.2f", previous_speed) sumo_handler.traci_handler.vehicle.setSpeed(self.agent_id, new_speed) logger.debug("After %.2f", new_speed) return
rllib/examples/simulators/sumo/marlenvironment.py
157
ray
{ "docstring": "Implements the logic of each specific action passed as input.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
49
Python
40
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
marlenvironment.py
143,721
11
96
step
https://github.com/ray-project/ray.git
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
184
0
33,027
11
2
17
def test_kbinsdiscrtizer_get_feature_names_out(encode, expected_names): X = [[-2, 1, -4], [-1, 2, -3], [0, 3, -2], [1, 4, -1]] kbd = KBinsDiscretizer(n_bins=4, encode=encode).fit(X) Xt = kbd.transform(X) input_features = [f"feat{i}" for i in range(3)] output_names = kbd.get_feature_names_out(input_features) assert Xt.shape[1] == output_names.shape[0] assert_array_equal(output_names, expected_names)
sklearn/preprocessing/tests/test_discretization.py
174
scikit-learn
{ "docstring": "Check get_feature_names_out for different settings.\n Non-regression test for #22731\n ", "language": "en", "n_whitespaces": 15, "n_words": 9, "vocab_size": 8 }
40
Python
36
36c6c74e5fd9033e9b89a1348de2466e7ac48a8c
test_discretization.py
259,155
8
115
test_kbinsdiscrtizer_get_feature_names_out
https://github.com/scikit-learn/scikit-learn.git
FIX Fixes KBinsDiscretizer for encode=ordinal (#22735) Co-authored-by: Guillaume Lemaitre <[email protected]>
64
0
75,608
11
3
10
def _has_arrow_table(self): if not isinstance(self._op, FrameNode): return False return all(p.arrow_table is not None for p in self._partitions.flatten())
modin/experimental/core/execution/native/implementations/hdk_on_native/dataframe/dataframe.py
63
modin
{ "docstring": "\n Return True for materialized frame with Arrow table.\n\n Returns\n -------\n bool\n ", "language": "en", "n_whitespaces": 47, "n_words": 11, "vocab_size": 11 }
17
Python
15
027f92a7655ae5b473839b7956ff52bf7879f3cc
dataframe.py
154,793
4
39
_has_arrow_table
https://github.com/modin-project/modin.git
FIX-#4022: Fixed empty data frame with index (#4910) Signed-off-by: Andrey Pavlenko <[email protected]>
49
0
36,150
11
2
14
def get_experiment_checkpoint_dir(cls, run_obj, local_dir=None, name=None): assert run_obj local_dir = _get_local_dir_with_expand_user(local_dir) run_identifier = cls.get_trainable_name(run_obj) combined_name = name or run_identifier dir_name = _get_dir_name(run_obj, name, combined_name) return os.path.join(local_dir, dir_name)
python/ray/tune/experiment.py
89
ray
{ "docstring": "Get experiment checkpoint dir without setting up an experiment.\n\n This is only used internally for better support of Tuner API.\n\n Args:\n run_obj (str|function|class): Trainable to run.\n name (str): The name of the experiment specified by user.\n local_dir (str): The local_dir path.\n\n Returns:\n Checkpoint directory for experiment.\n ", "language": "en", "n_whitespaces": 118, "n_words": 46, "vocab_size": 38 }
26
Python
22
814b49356c1e773f2ec8d7c643acef48b6db08cf
experiment.py
146,782
7
58
get_experiment_checkpoint_dir
https://github.com/ray-project/ray.git
[tuner] Tuner impl. (#22848)
75
0
33,777
8
2
4
def dag_id(self) -> str: if self.dag: return self.dag.dag_id return "_in_memory_dag_"
airflow/models/taskmixin.py
38
airflow
{ "docstring": "Returns dag id if it has one or an adhoc/meaningless ID", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
10
Python
9
34154803ac73d62d3e969e480405df3073032622
taskmixin.py
46,825
5
21
dag_id
https://github.com/apache/airflow.git
Show tasks in grid view based on topological sort. (#22741) This takes the existing topological sort that existed on a DAG and moves it down to TaskGroup. In order to do this (and not have duplicated sort) the existing sort on DAG is re-implemented on top of the new method. This also surfaced a tiny bug in deserialize_task_group where the SerializedTaskGroup did not have `dag` set -- it didn't cause any problems until now but was needed to call `upstream_list` on a SerializedTaskGroup object.
42
0
9,007
9
5
21
def _check_pkg(target): log.debug(f"_check_pkg target '{target}'") ret = {} cmd = ["/usr/bin/lslpp", "-Lc", target] lines = __salt__["cmd.run_all"](cmd, python_shell=False).splitlines() name = "" version_num = "" rpmpkg = False for line in lines: if line.startswith("#"): continue comps = line.split(":") if len(comps) < 7: raise CommandExecutionError( "Error occurred finding fileset/package", info={"errors": comps[1].strip()}, ) # handle first matching line if "R" in comps[6]: name = comps[0] rpmpkg = True else: name = comps[1] # use fileset rather than rpm package version_num = comps[2] break log.debug( f"_check_pkg returning name '{name}', version number '{version_num}', rpmpkg '{rpmpkg}'" ) return name, version_num, rpmpkg
salt/modules/aixpkg.py
272
salt
{ "docstring": "\n Return name, version and if rpm package for specified target\n ", "language": "en", "n_whitespaces": 17, "n_words": 10, "vocab_size": 10 }
94
Python
70
b2f8271fed3f05160431c55ad7c4e8f3e3e95c3e
aixpkg.py
215,103
28
148
_check_pkg
https://github.com/saltstack/salt.git
Complete intial tests for AIX yum and dnf support
290
0
53,820
17
1
10
def polyfit(x, y, deg, rcond=None, full=False, w=None): return pu._fit(polyvander, x, y, deg, rcond, full, w)
numpy/polynomial/polynomial.py
56
numpy
{ "docstring": "\n Least-squares fit of a polynomial to data.\n\n Return the coefficients of a polynomial of degree `deg` that is the\n least squares fit to the data values `y` given at points `x`. If `y` is\n 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple\n fits are done, one for each column of `y`, and the resulting\n coefficients are stored in the corresponding columns of a 2-D return.\n The fitted polynomial(s) are in the form\n\n .. math:: p(x) = c_0 + c_1 * x + ... + c_n * x^n,\n\n where `n` is `deg`.\n\n Parameters\n ----------\n x : array_like, shape (`M`,)\n x-coordinates of the `M` sample (data) points ``(x[i], y[i])``.\n y : array_like, shape (`M`,) or (`M`, `K`)\n y-coordinates of the sample points. Several sets of sample points\n sharing the same x-coordinates can be (independently) fit with one\n call to `polyfit` by passing in for `y` a 2-D array that contains\n one data set per column.\n deg : int or 1-D array_like\n Degree(s) of the fitting polynomials. If `deg` is a single integer\n all terms up to and including the `deg`'th term are included in the\n fit. For NumPy versions >= 1.11.0 a list of integers specifying the\n degrees of the terms to include may be used instead.\n rcond : float, optional\n Relative condition number of the fit. Singular values smaller\n than `rcond`, relative to the largest singular value, will be\n ignored. The default value is ``len(x)*eps``, where `eps` is the\n relative precision of the platform's float type, about 2e-16 in\n most cases.\n full : bool, optional\n Switch determining the nature of the return value. When ``False``\n (the default) just the coefficients are returned; when ``True``,\n diagnostic information from the singular value decomposition (used\n to solve the fit's matrix equation) is also returned.\n w : array_like, shape (`M`,), optional\n Weights. If not None, the weight ``w[i]`` applies to the unsquared\n residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are\n chosen so that the errors of the products ``w[i]*y[i]`` all have the\n same variance. When using inverse-variance weighting, use\n ``w[i] = 1/sigma(y[i])``. The default value is None.\n\n .. versionadded:: 1.5.0\n\n Returns\n -------\n coef : ndarray, shape (`deg` + 1,) or (`deg` + 1, `K`)\n Polynomial coefficients ordered from low to high. If `y` was 2-D,\n the coefficients in column `k` of `coef` represent the polynomial\n fit to the data in `y`'s `k`-th column.\n\n [residuals, rank, singular_values, rcond] : list\n These values are only returned if ``full == True``\n\n - residuals -- sum of squared residuals of the least squares fit\n - rank -- the numerical rank of the scaled Vandermonde matrix\n - singular_values -- singular values of the scaled Vandermonde matrix\n - rcond -- value of `rcond`.\n\n For more details, see `numpy.linalg.lstsq`.\n\n Raises\n ------\n RankWarning\n Raised if the matrix in the least-squares fit is rank deficient.\n The warning is only raised if ``full == False``. The warnings can\n be turned off by:\n\n >>> import warnings\n >>> warnings.simplefilter('ignore', np.RankWarning)\n\n See Also\n --------\n numpy.polynomial.chebyshev.chebfit\n numpy.polynomial.legendre.legfit\n numpy.polynomial.laguerre.lagfit\n numpy.polynomial.hermite.hermfit\n numpy.polynomial.hermite_e.hermefit\n polyval : Evaluates a polynomial.\n polyvander : Vandermonde matrix for powers.\n numpy.linalg.lstsq : Computes a least-squares fit from the matrix.\n scipy.interpolate.UnivariateSpline : Computes spline fits.\n\n Notes\n -----\n The solution is the coefficients of the polynomial `p` that minimizes\n the sum of the weighted squared errors\n\n .. math:: E = \\\\sum_j w_j^2 * |y_j - p(x_j)|^2,\n\n where the :math:`w_j` are the weights. This problem is solved by\n setting up the (typically) over-determined matrix equation:\n\n .. math:: V(x) * c = w * y,\n\n where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the\n coefficients to be solved for, `w` are the weights, and `y` are the\n observed values. This equation is then solved using the singular value\n decomposition of `V`.\n\n If some of the singular values of `V` are so small that they are\n neglected (and `full` == ``False``), a `RankWarning` will be raised.\n This means that the coefficient values may be poorly determined.\n Fitting to a lower order polynomial will usually get rid of the warning\n (but may not be what you want, of course; if you have independent\n reason(s) for choosing the degree which isn't working, you may have to:\n a) reconsider those reasons, and/or b) reconsider the quality of your\n data). The `rcond` parameter can also be set to a value smaller than\n its default, but the resulting fit may be spurious and have large\n contributions from roundoff error.\n\n Polynomial fits using double precision tend to \"fail\" at about\n (polynomial) degree 20. Fits using Chebyshev or Legendre series are\n generally better conditioned, but much can still depend on the\n distribution of the sample points and the smoothness of the data. If\n the quality of the fit is inadequate, splines may be a good\n alternative.\n\n Examples\n --------\n >>> np.random.seed(123)\n >>> from numpy.polynomial import polynomial as P\n >>> x = np.linspace(-1,1,51) # x \"data\": [-1, -0.96, ..., 0.96, 1]\n >>> y = x**3 - x + np.random.randn(len(x)) # x^3 - x + Gaussian noise\n >>> c, stats = P.polyfit(x,y,3,full=True)\n >>> np.random.seed(123)\n >>> c # c[0], c[2] should be approx. 0, c[1] approx. -1, c[3] approx. 1\n array([ 0.01909725, -1.30598256, -0.00577963, 1.02644286]) # may vary\n >>> stats # note the large SSR, explaining the rather poor results\n [array([ 38.06116253]), 4, array([ 1.38446749, 1.32119158, 0.50443316, # may vary\n 0.28853036]), 1.1324274851176597e-014]\n\n Same thing without the added noise\n\n >>> y = x**3 - x\n >>> c, stats = P.polyfit(x,y,3,full=True)\n >>> c # c[0], c[2] should be \"very close to 0\", c[1] ~= -1, c[3] ~= 1\n array([-6.36925336e-18, -1.00000000e+00, -4.08053781e-16, 1.00000000e+00])\n >>> stats # note the minuscule SSR\n [array([ 7.46346754e-31]), 4, array([ 1.38446749, 1.32119158, # may vary\n 0.50443316, 0.28853036]), 1.1324274851176597e-014]\n\n ", "language": "en", "n_whitespaces": 1500, "n_words": 936, "vocab_size": 463 }
15
Python
13
4f479744bb9f5150d1406fdb0203c5d8714e7283
polynomial.py
160,848
2
41
polyfit
https://github.com/numpy/numpy.git
DOC: Replace the mathematical notation N(...) with text. The meaning of the second argument in the mathematical notation N(mu, b) for the normal distribution is not consistent. In some references it is the variance while in others it is the standard deviation. Let's avoid the ambiguity by not using the notation. Fixes #21296
21
0
38,758
7
1
10
def get_metrics(self, font, font_class, sym, fontsize, dpi): r info = self._get_info(font, font_class, sym, fontsize, dpi) return info.metrics
lib/matplotlib/_mathtext.py
50
matplotlib
{ "docstring": "\n Parameters\n ----------\n font : str\n One of the TeX font names: \"tt\", \"it\", \"rm\", \"cal\", \"sf\", \"bf\",\n \"default\", \"regular\", \"bb\", \"frak\", \"scr\". \"default\" and \"regular\"\n are synonyms and use the non-math font.\n font_class : str\n One of the TeX font names (as for *font*), but **not** \"bb\",\n \"frak\", or \"scr\". This is used to combine two font classes. The\n only supported combination currently is ``get_metrics(\"frak\", \"bf\",\n ...)``.\n sym : str\n A symbol in raw TeX form, e.g., \"1\", \"x\", or \"\\sigma\".\n fontsize : float\n Font size in points.\n dpi : float\n Rendering dots-per-inch.\n\n Returns\n -------\n object\n\n The returned object has the following attributes (all floats,\n except *slanted*):\n\n - *advance*: The advance distance (in points) of the glyph.\n - *height*: The height of the glyph in points.\n - *width*: The width of the glyph in points.\n - *xmin*, *xmax*, *ymin*, *ymax*: The ink rectangle of the glyph\n - *iceberg*: The distance from the baseline to the top of the\n glyph. (This corresponds to TeX's definition of \"height\".)\n - *slanted*: Whether the glyph should be considered as \"slanted\"\n (currently used for kerning sub/superscripts).\n ", "language": "en", "n_whitespaces": 487, "n_words": 181, "vocab_size": 117 }
17
Python
14
85f30cbd485eddc93e3c9ff115ac21c0886909d5
_mathtext.py
108,619
38
37
get_metrics
https://github.com/matplotlib/matplotlib.git
Remove *math* parameter of various mathtext internal APIs. The *math* parameter is passed through many layers of the call stack but is ultimately only used for a single purpose: deciding whether to replace the ASCII hyphen by a (longer) unicode minus. Instead of doing that, just do the substitution at the parsing stage. In particular, this fixes problematic unicode minus support with the "cm" fontset. This patch also reverts a significant part of 52003e4, as LogFormatters no longer need to pass unicode minuses in mathtext -- everything gets converted by mathtext. Likewise, this change also invalidates the test_log_scales baseline image (old, buggy wrt. unicode minus); replace it by a test that the drawn ticks are as expected (which was the intent in 90c1aa3).
37
0
23,274
8
3
18
def to_objectchange(self, action): from extras.models import ObjectChange objectchange = ObjectChange( changed_object=self, object_repr=str(self)[:200], action=action ) if hasattr(self, '_prechange_snapshot'): objectchange.prechange_data = self._prechange_snapshot if action in (ObjectChangeActionChoices.ACTION_CREATE, ObjectChangeActionChoices.ACTION_UPDATE): objectchange.postchange_data = serialize_object(self) return objectchange
netbox/netbox/models/features.py
118
netbox
{ "docstring": "\n Return a new ObjectChange representing a change made to this object. This will typically be called automatically\n by ChangeLoggingMiddleware.\n ", "language": "en", "n_whitespaces": 41, "n_words": 19, "vocab_size": 18 }
30
Python
26
b67859832afa52742defa0a5bd60f9be1ddbe8e4
features.py
264,331
12
75
to_objectchange
https://github.com/netbox-community/netbox.git
Refactor to_objectchange()
134
0
77,693
12
17
37
def custom_lemmas(self, tab_file, lang): lg = lang.split("_")[0] if len(lg) != 3: raise ValueError("lang should be a (3 character) ISO 639-3 code") self._lang_data[lang] = [ defaultdict(list), defaultdict(list), defaultdict(list), defaultdict(list), ] for line in tab_file.readlines(): if isinstance(line, bytes): # Support byte-stream files (e.g. as returned by Python 2's # open() function) as well as text-stream ones line = line.decode("utf-8") if not line.startswith("#"): triple = line.strip().split("\t") if len(triple) < 3: continue offset_pos, label = triple[:2] val = triple[-1] if self.map30: if offset_pos in self.map30: # Map offset_pos to current Wordnet version: offset_pos = self.map30[offset_pos] else: # Some OMW offsets were never in Wordnet: if ( offset_pos not in self.nomap and offset_pos.replace("a", "s") not in self.nomap ): warnings.warn( f"{lang}: invalid offset {offset_pos} in '{line}'" ) continue elif offset_pos[-1] == "a": wnss = self.of2ss(offset_pos) if wnss and wnss.pos() == "s": # Wordnet pos is "s" # Label OMW adjective satellites back to their Wordnet pos ("s") offset_pos = self.ss2of(wnss) pair = label.split(":") attr = pair[-1] if len(pair) == 1 or pair[0] == lg: if attr == "lemma": val = val.strip().replace(" ", "_") self._lang_data[lang][1][val.lower()].append(offset_pos) if attr in self.lg_attrs: self._lang_data[lang][self.lg_attrs.index(attr)][ offset_pos ].append(val)
nltk/corpus/reader/wordnet.py
554
nltk
{ "docstring": "\n Reads a custom tab file containing mappings of lemmas in the given\n language to Princeton WordNet 3.0 synset offsets, allowing NLTK's\n WordNet functions to then be used with that language.\n\n See the \"Tab files\" section at http://compling.hss.ntu.edu.sg/omw/ for\n documentation on the Multilingual WordNet tab file format.\n\n :param tab_file: Tab file as a file or file-like object\n :type: lang str\n :param: lang ISO 639-3 code of the language of the tab file\n ", "language": "en", "n_whitespaces": 135, "n_words": 71, "vocab_size": 53 }
185
Python
122
3ca43e26efd7d5aa37b3cd79446258d8bfa79561
wordnet.py
42,581
45
325
custom_lemmas
https://github.com/nltk/nltk.git
Fix wordnet's all_synsets() function (#3078) * Fix all_synsets() function * Add simple regression tests for #3077 * Add suggestions by @tomaarsen Co-authored-by: Tom Aarsen <[email protected]>
1,060
0
7,637
20
1
7
def connect(self, **kwargs) -> Dict[str, int]: raise NotImplementedError()
mindsdb/integrations/libs/base_handler.py
33
mindsdb
{ "docstring": "\n Set up any connections required by the handler here.\n\n Should return output of check_status() method after attempting connection.\n ", "language": "en", "n_whitespaces": 40, "n_words": 18, "vocab_size": 18 }
8
Python
8
c40c732253043ea111fbf197248a1bff4b7a524e
base_handler.py
115,077
7
20
connect
https://github.com/mindsdb/mindsdb.git
handlers
22
0
25,356
7
6
11
def get_function_for_token(frame, token, previous_frame=None): frame_function_name = frame.get("function") token_function_name = token.function_name # Try to use the function name we got from sourcemap-cache, filtering useless names. if token_function_name not in USELESS_FN_NAMES: return token_function_name # If not found, ask the callsite (previous token) for function name if possible. if previous_frame is not None: # `preprocess_frame` is supposed to make sure that `data` is present, # but better safe than sorry. last_token = (previous_frame.get("data") or {}).get("token") if last_token: return last_token.name # If there was no minified name at all, return even useless, filtered one from the original token. if not frame_function_name: return token_function_name # Otherwise fallback to the old, minified name. return frame_function_name
src/sentry/lang/javascript/processor.py
127
sentry
{ "docstring": "\n Get function name for a given frame based on the token resolved by symbolic.\n It tries following paths in order:\n - return token function name if we have a usable value (filtered through `USELESS_FN_NAMES` list),\n - return mapped name of the caller (previous frame) token if it had,\n - return token function name, including filtered values if it mapped to anything in the first place,\n - return current frames function name as a fallback\n ", "language": "en", "n_whitespaces": 96, "n_words": 74, "vocab_size": 50 }
109
Python
74
a00ada51c238564b48412cd59f261b84492b96a5
processor.py
87,555
12
72
get_function_for_token
https://github.com/getsentry/sentry.git
ref(processor): Try to fallback to previous frames token function name (#40602) This change applies the same heuristic that we previously used in the original `JavaScriptStacktraceProcessor`. The rest is described in `get_function_for_token` function comment.
195
0
18,314
15
1
13
def _expand_to_beam_size(self, x): r check_type(x, 'x', (Variable), 'BeamSearchDecoder._expand_to_beam_size') x = nn.unsqueeze(x, [1]) expand_times = [1] * len(x.shape) expand_times[1] = self.beam_size x = paddle.tile(x, expand_times) return x
modules/image/text_to_image/disco_diffusion_ernievil_base/vit_b_16x/ernievil2/transformers/beam.py
102
PaddleHub
{ "docstring": "\n This function takes a tensor t shaped `[batch_size, s0, s1, ...]` composed\n of minibatch entries `t[0], ..., t[batch_size - 1]` and tiles it to have a\n shape `[batch_size, beam_size, s0, s1, ...]` composed of minibatch entries\n `t[0], t[0], ..., t[1], t[1], ...` where each minibatch entry is repeated\n `beam_size` times.\n\n Parameters:\n x(Variable): A tensor with shape `[batch_size, ...]`, The data type\n should be float32, float64, int32, int64 or bool.\n\n Returns:\n Variable: A tensor with shape `[batch_size, beam_size, ...]`, whose \\\n data type is same as `x`.\n ", "language": "en", "n_whitespaces": 195, "n_words": 86, "vocab_size": 60 }
26
Python
21
ffcde21305c61d950a9f93e57e6180c9a9665b87
beam.py
50,182
22
65
_expand_to_beam_size
https://github.com/PaddlePaddle/PaddleHub.git
add disco_diffusion_ernievil_base
74
0
10,040
10
3
11
def get_object_id(cls, **data): object_id, ext_ref = data.get("id"), data.get("external_reference") validate_one_of_args_is_in_mutation( CoreErrorCode, "id", object_id, "external_reference", ext_ref ) if ext_ref and not object_id: object_id = ext_ref_to_global_id_or_error(cls._meta.model, ext_ref) return object_id
saleor/graphql/core/mutations.py
98
saleor
{ "docstring": "Resolve object id by given id or external reference.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 8 }
26
Python
21
db7e91a3e124b0df2c08d373a541d9a225ebcb05
mutations.py
29,997
8
58
get_object_id
https://github.com/saleor/saleor.git
Allow external references to be used instead of Saleor-assigned IDs (#11410) * Add external_reference to Product model; handle product query, update and delete by external_reference; cover changes with tests Handle ProductVariant Handle Order Handle Attribute Handle Warehouse query only Refactor resolvers Handle Account Code rafctor, fix tests Allow updating external_reference field; rename ext_ref in resolvers; * update changelog * Add tests to check externalReference uniqueness
90
0
5,272
12
1
21
def test_mod_gen_f77(capfd, hello_world_f90, monkeypatch): MNAME = "hi" foutl = get_io_paths(hello_world_f90, mname=MNAME) ipath = foutl.f90inp monkeypatch.setattr(sys, "argv", f'f2py {ipath} -m {MNAME}'.split()) with util.switchdir(ipath.parent): f2pycli() # Always generate C module assert Path.exists(foutl.cmodf) # File contains a function, check for F77 wrappers assert Path.exists(foutl.wrap77)
numpy/f2py/tests/test_f2py2e.py
134
numpy
{ "docstring": "Checks the generation of files based on a module name\n CLI :: -m\n ", "language": "en", "n_whitespaces": 19, "n_words": 13, "vocab_size": 13 }
41
Python
37
729ad4f92420231e2a7009b3223c6c7620b8b808
test_f2py2e.py
160,160
9
74
test_mod_gen_f77
https://github.com/numpy/numpy.git
TST: Initialize f2py2e tests of the F2PY CLI (#20668) Increases F2PY coverage by around 15 percent. For the CLI itself it covers the major features (around 70 percent), with the exception of mostly numpy.distutils stuff. More importantly, sets the groundwork for #20056, in that passing the same testsuite should indicate feature parity.
78
0
38,532
11
1
4
def average(inputs, **kwargs): return Average(**kwargs)(inputs)
keras/layers/merging/average.py
32
keras
{ "docstring": "Functional interface to the `tf.keras.layers.Average` layer.\n\n Example:\n\n >>> x1 = np.ones((2, 2))\n >>> x2 = np.zeros((2, 2))\n >>> y = tf.keras.layers.Average()([x1, x2])\n >>> y.numpy().tolist()\n [[0.5, 0.5], [0.5, 0.5]]\n\n Usage in a functional model:\n\n >>> input1 = tf.keras.layers.Input(shape=(16,))\n >>> x1 = tf.keras.layers.Dense(8, activation='relu')(input1)\n >>> input2 = tf.keras.layers.Input(shape=(32,))\n >>> x2 = tf.keras.layers.Dense(8, activation='relu')(input2)\n >>> avg = tf.keras.layers.Average()([x1, x2])\n >>> out = tf.keras.layers.Dense(4)(avg)\n >>> model = tf.keras.models.Model(inputs=[input1, input2], outputs=out)\n\n Args:\n inputs: A list of input tensors.\n **kwargs: Standard layer keyword arguments.\n\n Returns:\n A tensor, the average of the inputs.\n\n Raises:\n ValueError: If there is a shape mismatch between the inputs and the shapes\n cannot be broadcasted to match.\n ", "language": "en", "n_whitespaces": 192, "n_words": 105, "vocab_size": 72 }
5
Python
5
84afc5193d38057e2e2badf9c889ea87d80d8fbf
average.py
272,643
2
18
average
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
11
0
81,029
9
3
29
def get_data(filters): conditions = get_conditions(filters) data = frappe.db.sql( % conditions, as_dict=1, ) unit = { "Bag": "BAGS", "Bottle": "BOTTLES", "Kg": "KILOGRAMS", "Liter": "LITERS", "Meter": "METERS", "Nos": "NUMBERS", "PKT": "PACKS", "Roll": "ROLLS", "Set": "SETS", } # Regular expression set to remove all the special characters special_characters = r"[$%^*()+\\[\]{};':\"\\|<>.?]" for row in data: set_defaults(row) set_taxes(row, filters) set_address_details(row, special_characters) # Eway Bill accepts date as dd/mm/yyyy and not dd-mm-yyyy row.posting_date = "/".join(str(row.posting_date).replace("-", "/").split("/")[::-1]) row.lr_date = "/".join(str(row.lr_date).replace("-", "/").split("/")[::-1]) if row.gst_vehicle_type == "Over Dimensional Cargo (ODC)": row.gst_vehicle_type = "ODC" row.item_name = re.sub(special_characters, " ", row.item_name) row.description = row.item_name row.uom = unit.get(row.uom, row.uom) # For removing special charactes and numbers from customer. row.customer = re.sub(special_characters[:-1] + "&0-9" + "]", "", row.customer) return data
erpnext/regional/report/eway_bill/eway_bill.py
423
erpnext
{ "docstring": "\n\t\tSELECT\n\t\t\tdn.name as dn_id, dn.posting_date, dn.company, dn.company_gstin, dn.customer, dn.customer_gstin, dni.item_code, dni.item_name, dni.description, dni.gst_hsn_code, dni.uom, dni.qty, dni.amount, dn.mode_of_transport, dn.distance, dn.transporter_name, dn.gst_transporter_id, dn.lr_no, dn.lr_date, dn.vehicle_no, dn.gst_vehicle_type, dn.company_address, dn.shipping_address_name\n\t\tFROM\n\t\t\t`tabDelivery Note` AS dn join `tabDelivery Note Item` AS dni on (dni.parent = dn.name)\n\t\tWHERE\n\t\t\tdn.docstatus < 2\n\t\t\t%s ", "language": "en", "n_whitespaces": 40, "n_words": 46, "vocab_size": 44 }
117
Python
98
494bd9ef78313436f0424b918f200dab8fc7c20b
eway_bill.py
67,202
39
235
get_data
https://github.com/frappe/erpnext.git
style: format code with black
83
0
14,440
18
5
32
def _get_credentials_and_project_id(self) -> Tuple[google.auth.credentials.Credentials, Optional[str]]: if self._cached_credentials is not None: return self._cached_credentials, self._cached_project_id key_path: Optional[str] = self._get_field('key_path', None) try: keyfile_dict: Optional[str] = self._get_field('keyfile_dict', None) keyfile_dict_json: Optional[Dict[str, str]] = None if keyfile_dict: keyfile_dict_json = json.loads(keyfile_dict) except json.decoder.JSONDecodeError: raise AirflowException('Invalid key JSON.') key_secret_name: Optional[str] = self._get_field('key_secret_name', None) key_secret_project_id: Optional[str] = self._get_field('key_secret_project_id', None) target_principal, delegates = _get_target_principal_and_delegates(self.impersonation_chain) credentials, project_id = get_credentials_and_project_id( key_path=key_path, keyfile_dict=keyfile_dict_json, key_secret_name=key_secret_name, key_secret_project_id=key_secret_project_id, scopes=self.scopes, delegate_to=self.delegate_to, target_principal=target_principal, delegates=delegates, ) overridden_project_id = self._get_field('project') if overridden_project_id: project_id = overridden_project_id self._cached_credentials = credentials self._cached_project_id = project_id return credentials, project_id
airflow/providers/google/common/hooks/base_google.py
337
airflow
{ "docstring": "Returns the Credentials object for Google API and the associated project_id", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 10 }
84
Python
56
55fd02a38919526776cfe69d715873da75d6f26f
base_google.py
42,898
31
217
_get_credentials_and_project_id
https://github.com/apache/airflow.git
Add key_secret_project_id parameter which specifies a project with KeyFile (#23930)
358
0
7,759
12
4
15
def connect(self): if hasattr(self, "_connecting_future") and not self._connecting_future.done(): future = self._connecting_future else: future = salt.ext.tornado.concurrent.Future() self._connecting_future = future self.io_loop.add_callback(self._connect) # Add the callback only when a new future is created if self.connect_callback is not None:
salt/transport/tcp.py
112
salt
{ "docstring": "\n Ask for this client to reconnect to the origin\n ", "language": "en", "n_whitespaces": 24, "n_words": 9, "vocab_size": 8 }
35
Python
26
43277294a3454e5dcd9079e005f747bf880801f6
tcp.py
215,590
11
76
connect
https://github.com/saltstack/salt.git
Test fix
122
0
54,043
14
1
13
def test_state_changes_on_tab_change(backforward_widget, tabs, fake_web_tab): tab_with_history = fake_web_tab(can_go_back=True, can_go_forward=True) tab_without_history = fake_web_tab(can_go_back=False, can_go_forward=False) tabs.widget.tabs = [tab_with_history] backforward_widget.enabled = True backforward_widget.on_tab_cur_url_changed(tabs) assert backforward_widget.isVisible() tabs.widget.tabs = [tab_without_history] backforward_widget.on_tab_cur_url_changed(tabs) assert backforward_widget.text() == '' assert not backforward_widget.isVisible()
tests/unit/mainwindow/statusbar/test_backforward.py
146
qutebrowser
{ "docstring": "Test we go invisible when switching to a tab without history.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
32
Python
23
79bb6670d8969b850965cd5d895bcd8f09d59311
test_backforward.py
321,887
11
90
test_state_changes_on_tab_change
https://github.com/qutebrowser/qutebrowser.git
bar: Test enabled attribute on progress and backforward There is now some code in statusbar relying on the enabled attribute stopping events from being processed (or at least stopping them from showing the widget again). So add tests to make sure that behaviour keeps working. Also split the big test in test_backforward into a couple of smaller ones and pull some common lines out to a (still clunky) fixture.
65
0
117,965
9
2
7
def execute(): if frappe.db.exists("DocType", "Lost Reason Detail"): frappe.reload_doc("crm", "doctype", "opportunity_lost_reason") frappe.reload_doc("crm", "doctype", "opportunity_lost_reason_detail") frappe.reload_doc("setup", "doctype", "quotation_lost_reason_detail") frappe.db.sql( ) frappe.db.sql( ) frappe.db.sql( ) frappe.delete_doc("DocType", "Lost Reason Detail")
erpnext/patches/v12_0/rename_lost_reason_detail.py
151
erpnext
{ "docstring": "INSERT INTO `tabOpportunity Lost Reason Detail` SELECT * FROM `tabLost Reason Detail` WHERE `parenttype` = 'Opportunity'INSERT INTO `tabQuotation Lost Reason Detail` SELECT * FROM `tabLost Reason Detail` WHERE `parenttype` = 'Quotation'INSERT INTO `tabQuotation Lost Reason` (`name`, `creation`, `modified`, `modified_by`, `owner`, `docstatus`, `parent`, `parentfield`, `parenttype`, `idx`, `_comments`, `_assign`, `_user_tags`, `_liked_by`, `order_lost_reason`)\n SELECT o.`name`, o.`creation`, o.`modified`, o.`modified_by`, o.`owner`, o.`docstatus`, o.`parent`, o.`parentfield`, o.`parenttype`, o.`idx`, o.`_comments`, o.`_assign`, o.`_user_tags`, o.`_liked_by`, o.`lost_reason`\n FROM `tabOpportunity Lost Reason` o LEFT JOIN `tabQuotation Lost Reason` q ON q.name = o.name WHERE q.name IS NULL", "language": "en", "n_whitespaces": 106, "n_words": 85, "vocab_size": 56 }
26
Python
17
494bd9ef78313436f0424b918f200dab8fc7c20b
rename_lost_reason_detail.py
66,651
17
78
execute
https://github.com/frappe/erpnext.git
style: format code with black
14
0
14,272
10
1
3
async def _async_run_connected_events(self) -> None: await self._async_connect_ble_scanner()
homeassistant/components/shelly/coordinator.py
28
core
{ "docstring": "Run connected events.\n\n This will be executed on connect or when the config entry\n is updated.\n ", "language": "en", "n_whitespaces": 37, "n_words": 16, "vocab_size": 16 }
7
Python
7
435fc237375b86a5d6d8498ba5216c208b665ecc
coordinator.py
290,641
7
14
_async_run_connected_events
https://github.com/home-assistant/core.git
Add shelly ble scanner support (#82007)
21
0
89,755
8
1
9
def _tf_tensor_numpy_output(self, string): modified_string = self._NUMPY_OUTPUT_RE.sub(r"\1", string) return modified_string, modified_string != string MESSAGE = textwrap.dedent( )
keras/testing_infra/keras_doctest_lib.py
56
keras
{ "docstring": "\\n\n #############################################################\n Check the documentation (go/testable-docstrings) on how to\n write testable docstrings.\n #############################################################", "language": "en", "n_whitespaces": 40, "n_words": 13, "vocab_size": 12 }
16
Python
14
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras_doctest_lib.py
276,314
3
26
_tf_tensor_numpy_output
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
43
0
81,627
9
1
2
def cli(): @cli.command()
scripts-dev/release.py
23
@cli.command()
synapse
{ "docstring": "An interactive script to walk through the parts of creating a release.\n\n Requires the dev dependencies be installed, which can be done via:\n\n pip install -e .[dev]\n\n Then to use:\n\n ./scripts-dev/release.py prepare\n\n # ... ask others to look at the changelog ...\n\n ./scripts-dev/release.py tag\n\n # ... wait for assets to build ...\n\n ./scripts-dev/release.py publish\n ./scripts-dev/release.py upload\n\n # Optional: generate some nice links for the announcement\n\n ./scripts-dev/release.py upload\n\n If the env var GH_TOKEN (or GITHUB_TOKEN) is set, or passed into the\n `tag`/`publish` command, then a new draft release will be created/published.\n ", "language": "en", "n_whitespaces": 168, "n_words": 90, "vocab_size": 68 }
3
Python
3
12d1f82db213603972d60be3f46f6a36c3c2330f
release.py
247,752
1
5
cli
https://github.com/matrix-org/synapse.git
Generate announcement links in release script (#12242)
5
1
71,897
6
1
8
def check_for_new_doc_with_same_name_as_deleted_parent(doc): parent_creation_time = frappe.db.get_value(doc["parenttype"], doc["parent"], "creation") child_creation_time = doc["creation"] return getdate(parent_creation_time) > getdate(child_creation_time)
erpnext/patches/v13_0/delete_orphaned_tables.py
74
erpnext
{ "docstring": "\n\tCompares creation times of parent and child docs.\n\tSince Transaction Deletion Record resets the naming series after deletion,\n\tit allows the creation of new docs with the same names as the deleted ones.\n\t", "language": "en", "n_whitespaces": 30, "n_words": 33, "vocab_size": 28 }
14
Python
13
494bd9ef78313436f0424b918f200dab8fc7c20b
delete_orphaned_tables.py
66,743
4
42
check_for_new_doc_with_same_name_as_deleted_parent
https://github.com/frappe/erpnext.git
style: format code with black
10
0
14,315
10
1
7
def set_capstyle(self, s): cs = CapStyle(s) self._capstyle = cs self.stale = True
lib/matplotlib/patches.py
41
matplotlib
{ "docstring": "\n Set the `.CapStyle`.\n\n The default capstyle is 'round' for `.FancyArrowPatch` and 'butt' for\n all other patches.\n\n Parameters\n ----------\n s : `.CapStyle` or %(CapStyle)s\n ", "language": "en", "n_whitespaces": 73, "n_words": 23, "vocab_size": 22 }
12
Python
9
b24acb7772e0534f4bcdb0b5b492d9d94954dd91
patches.py
107,201
4
24
set_capstyle
https://github.com/matplotlib/matplotlib.git
DOC: Document default cap styles - remove '(default)' from cap style demo as this is only true for Line2D and the default rcParameters - document default cap styles for Line2D and Patch in their cap style setters - document default cap style for GraphicsContextBase in the same way as it's already done for joinstyle
40
0
22,641
8
1
9
def mode(self, axis=0, numeric_only=False, dropna=True): # noqa: PR01, RT01, D200 axis = self._get_axis_number(axis) return self.__constructor__( query_compiler=self._query_compiler.mode( axis=axis, numeric_only=numeric_only, dropna=dropna ) )
modin/pandas/base.py
80
modin
{ "docstring": "\n Get the mode(s) of each element along the selected axis.\n ", "language": "en", "n_whitespaces": 25, "n_words": 10, "vocab_size": 9 }
21
Python
20
605efa618e7994681f57b11d04d417f353ef8d50
base.py
153,576
7
52
mode
https://github.com/modin-project/modin.git
DOCS-#3099: Fix `BasePandasDataSet` docstrings warnings (#4333) Co-authored-by: Yaroslav Igoshev <[email protected]> Signed-off-by: Alexander Myskov <[email protected]>
87
0
35,457
11
1
3
def read_text(self, filename):
python3.10.4/Lib/importlib/metadata/__init__.py
15
XX-Net
{ "docstring": "Attempt to load metadata file given by the name.\n\n :param filename: The name of the file in the distribution info.\n :return: The text if found, otherwise None.\n ", "language": "en", "n_whitespaces": 48, "n_words": 27, "vocab_size": 23 }
3
Python
3
8198943edd73a363c266633e1aa5b2a9e9c9f526
__init__.py
218,296
1
8
read_text
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
10
0
55,244
6
1
14
async def test_load_types(hass, create_hdmi_network, create_cec_entity): config = {"platform": "media_player", "types": {"hdmi_cec.hdmi_3": "switch"}} hdmi_network = await create_hdmi_network(config=config) mock_hdmi_device = MockHDMIDevice(logical_address=3) await create_cec_entity(hdmi_network, mock_hdmi_device) mock_hdmi_device.set_update_callback.assert_called_once() state = hass.states.get("media_player.hdmi_3") assert state is None state = hass.states.get("switch.hdmi_3") assert state is not None mock_hdmi_device = MockHDMIDevice(logical_address=4) await create_cec_entity(hdmi_network, mock_hdmi_device) mock_hdmi_device.set_update_callback.assert_called_once() state = hass.states.get("media_player.hdmi_4") assert state is not None state = hass.states.get("switch.hdmi_4") assert state is None
tests/components/hdmi_cec/test_switch.py
235
core
{ "docstring": "Test that switch entity is loaded when types is set.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 9 }
60
Python
30
7cd4be1310b3f76398b4404d3f4ecb26b9533cee
test_switch.py
303,600
17
136
test_load_types
https://github.com/home-assistant/core.git
Add tests for the HDMI-CEC integration (#75094) * Add basic tests to the HDMI-CEC component * Add tests for the HDMI-CEC switch component * Add test for watchdog code * Start adding tests for the HDMI-CEC media player platform Also some cleanup and code move. * Add more tests for media_player And cleanup some switch tests. * Improve xfail message for features * Align test pyCEC dependency with main dependency * Make fixtures snake_case * Cleanup call asserts * Cleanup service tests * fix issues with media player tests * Cleanup MockHDMIDevice class * Cleanup watchdog tests * Add myself as code owner for the HDMI-CEC integration * Fix async fire time changed time jump * Fix event api sync context * Delint tests * Parametrize watchdog test Co-authored-by: Martin Hjelmare <[email protected]>
111
0
102,418
11
1
73
def track_tf1_style_variables(method):
keras/legacy_tf_layers/variable_scope_shim.py
244
"""Wrapmodulethis decorator to capturestyle weights. Decorating a `tf.keras.Layer`'s or `tf.Module`'s methods withwill cause the layermodule to track weightsviaand by extensioninside the decorated method. In addition to tracking the weights themselves under theif the methodto a `tf.keras.Layer` then any regularization losses specified via`tf.compat.v1.layers` regularizer arguments willtracked by the layer under the standard `layer.losses` property. This tracking enables using large classes ofpropertymodelcode inside of KerasTF2 behaviors enabled. Example of capturingmodeling codea Keras```
keras
{ "docstring": "Wrap layer & module methods in this decorator to capture tf1-style weights.\n\n Decorating a `tf.keras.Layer`'s or `tf.Module`'s methods with this\n decorator will cause the layer/module to track weights created/used\n via `tf.compat.v1.get_variable` (and by extension `tf.compat.v1.layers`)\n inside the decorated method.\n\n In addition to tracking the weights themselves under the standard\n `layer.variable`/`module.variable`/etc. properties, if the method belongs\n to a `tf.keras.Layer` then any regularization losses specified via the\n `get_variable` or `tf.compat.v1.layers` regularizer arguments will get\n tracked by the layer under the standard `layer.losses` property.\n\n This tracking enables using large classes of TF1-style model-forward-pass\n code inside of Keras layers or `tf.Modules` in TF2 with TF2 behaviors enabled.\n\n Example of capturing tf.compat.v1.layer-based modeling code as a Keras layer:\n\n ```python", "language": "en", "n_whitespaces": 153, "n_words": 114, "vocab_size": 81 }
2
Python
2
84afc5193d38057e2e2badf9c889ea87d80d8fbf
variable_scope_shim.py
274,464
5
25
track_tf1_style_variables
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
5
20
81,205
12
1
3
def warning(self, response): return '110 - "Response is Stale"'
.venv/lib/python3.8/site-packages/pip/_vendor/cachecontrol/heuristics.py
20
transferlearning
{ "docstring": "\n Return a valid 1xx warning header value describing the cache\n adjustments.\n\n The response is provided too allow warnings like 113\n http://tools.ietf.org/html/rfc7234#section-5.5.4 where we need\n to explicitly say response is over 24 hours old.\n ", "language": "en", "n_whitespaces": 76, "n_words": 33, "vocab_size": 31 }
9
Python
9
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
heuristics.py
61,500
2
10
warning
https://github.com/jindongwang/transferlearning.git
upd; format
23
0
12,595
6
2
26
def debug_launcher(function, args=(), num_processes=2): if is_torch_version("<", "1.5.0"): raise ImportError( "Using `debug_launcher` for distributed training on GPUs require torch >= 1.5.0, got " f"{torch.__version__}." ) from torch.multiprocessing import start_processes with tempfile.NamedTemporaryFile() as tmp_file: # torch.distributed will expect a few environment variable to be here. We set the ones common to each # process here (the other ones will be set be the launcher). with patch_environment( world_size=num_processes, master_addr="127.0.01", master_port="29500", mixed_precision="no", accelerate_debug_rdv_file=tmp_file.name, use_cpu="yes", ): launcher = PrepareForLaunch(function, debug=True) start_processes(launcher, args=args, nprocs=num_processes, start_method="fork")
src/accelerate/launchers.py
182
accelerate
{ "docstring": "\n Launches a training function using several processes on CPU for debugging purposes.\n\n <Tip warning={true}>\n\n This function is provided for internal testing and debugging, but it's not intended for real trainings. It will\n only use the CPU.\n\n </Tip>\n\n Args:\n function (`Callable`):\n The training function to execute.\n args (`Tuple`):\n Tuple of arguments to pass to the function (it will receive `*args`).\n num_processes (`int`, *optional*, defaults to 2):\n The number of processes to use for training.\n ", "language": "en", "n_whitespaces": 149, "n_words": 73, "vocab_size": 55 }
79
Python
70
f6ec2660f01e5bb37399407b3a01b72a43ceb328
launchers.py
337,603
36
102
debug_launcher
https://github.com/huggingface/accelerate.git
Refactor version checking into a utility (#395) Co-authored-by: Sylvain Gugger <[email protected]>
243
0
121,089
13
7
25
def _autodetect_num_gpus(): result = 0 if importlib.util.find_spec("GPUtil"): gpu_list = GPUtil.getGPUs() result = len(gpu_list) elif sys.platform.startswith("linux"): proc_gpus_path = "/proc/driver/nvidia/gpus" if os.path.isdir(proc_gpus_path): result = len(os.listdir(proc_gpus_path)) elif sys.platform == "win32": props = "AdapterCompatibility" cmdargs = ["WMIC", "PATH", "Win32_VideoController", "GET", props] lines = subprocess.check_output(cmdargs).splitlines()[1:] result = len([x.rstrip() for x in lines if x.startswith(b"NVIDIA")]) return result
python/ray/_private/resource_spec.py
227
ray
{ "docstring": "Attempt to detect the number of GPUs on this machine.\n\n TODO(rkn): Only detects NVidia GPUs (except when using WMIC on windows)\n\n Returns:\n The number of GPUs if any were detected, otherwise 0.\n ", "language": "en", "n_whitespaces": 48, "n_words": 32, "vocab_size": 27 }
51
Python
35
b87731c1b64988cea5ce80a6aec55207ef7efd6f
resource_spec.py
125,305
15
130
_autodetect_num_gpus
https://github.com/ray-project/ray.git
Windows gpu detection workaround with GPUtil (#25701) Because [WMIC is now deprecated](https://docs.microsoft.com/en-us/windows/deployment/planning/windows-10-deprecated-features), #9300 may stop working on recent Windows systems. As a workaround this PR extends GPUtil to do GPU detection when installed on Windows systems. Co-authored-by: Matti Picus <[email protected]>
136
0
27,830
16
3
16
def apply_valid_mask(losses, sw, mask, reduction): if mask is not None: mask = tf.cast(mask, losses.dtype) if reduction in (ReductionV2.AUTO, ReductionV2.SUM_OVER_BATCH_SIZE): # Valid entries have weight `total/valid`, while invalid ones # have 0. When summed over batch, they will be reduced to: # # mean(loss * sample_weight * total / valid) # = sum(loss * sample_weight * total / valid) / total # = sum(loss * sample_weight) / total * total / valid # = sum(loss * sample_weight) / valid total = tf.cast(tf.size(mask), losses.dtype) valid = tf.reduce_sum(mask) mask *= total / valid return apply_mask(losses, sw, mask)
keras/utils/losses_utils.py
131
keras
{ "docstring": "Redistribute sample weights considering only valid entries.", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
94
Python
51
4f1308112f4188c4e14fdf3a59af8fe5f30db61f
losses_utils.py
279,206
8
83
apply_valid_mask
https://github.com/keras-team/keras.git
Update docs
233
0
82,878
14
1
5
def modified_vars(self) -> Dict[str, "tk.BooleanVar"]: assert self.command_notebook is not None return self.command_notebook.modified_vars
lib/gui/utils.py
43
faceswap
{ "docstring": " dict: The command notebook modified tkinter variables. ", "language": "en", "n_whitespaces": 8, "n_words": 7, "vocab_size": 7 }
12
Python
12
dc18c74eea0c7837a820d27628cb12b0824fa30e
utils.py
101,524
4
26
modified_vars
https://github.com/deepfakes/faceswap.git
Bugfix: Preview for extract in batch mode
33
0
20,935
7
4
9
def _is_zero_copy_possible(self) -> bool: if self.__is_zero_copy_possible is None: if self._df._has_arrow_table(): # If PyArrow table is already materialized then we can # retrieve data zero-copy self.__is_zero_copy_possible = True elif not self._df._can_execute_arrow(): # When not able to execute the plan via PyArrow means # that we have to involve OmniSci, so no zero-copy. self.__is_zero_copy_possible = False else: # Check whether the plan for PyArrow can be executed zero-copy self.__is_zero_copy_possible = self._is_zero_copy_arrow_op(self._df._op) return self.__is_zero_copy_possible
modin/experimental/core/execution/native/implementations/omnisci_on_native/exchange/dataframe_protocol/dataframe.py
112
modin
{ "docstring": "\n Check whether it's possible to retrieve data from the DataFrame zero-copy.\n\n The 'zero-copy' term also means that no extra computations or data transers\n are needed to access the data.\n\n Returns\n -------\n bool\n ", "language": "en", "n_whitespaces": 82, "n_words": 32, "vocab_size": 29 }
71
Python
50
0c1a2129df64cf45bf1ff49c8ed92c510fdb1c82
dataframe.py
153,666
19
64
_is_zero_copy_possible
https://github.com/modin-project/modin.git
FEAT-#4244: Implement dataframe exchange protocol for OmniSci (#4269) Co-authored-by: Yaroslav Igoshev <[email protected]> Co-authored-by: Vasily Litvinov <[email protected]> Signed-off-by: Dmitry Chigarev <[email protected]>
245
0
35,528
15
6
43
def test_loss_of_perfect_prediction(loss, sample_weight): if not loss.is_multiclass: # Use small values such that exp(value) is not nan. raw_prediction = np.array([-10, -0.1, 0, 0.1, 3, 10]) # If link is identity, we must respect the interval of y_pred: if isinstance(loss.link, IdentityLink): eps = 1e-10 low = loss.interval_y_pred.low if not loss.interval_y_pred.low_inclusive: low = low + eps high = loss.interval_y_pred.high if not loss.interval_y_pred.high_inclusive: high = high - eps raw_prediction = np.clip(raw_prediction, low, high) y_true = loss.link.inverse(raw_prediction) else: # HalfMultinomialLoss y_true = np.arange(loss.n_classes).astype(float) # raw_prediction with entries -exp(10), but +exp(10) on the diagonal # this is close enough to np.inf which would produce nan raw_prediction = np.full( shape=(loss.n_classes, loss.n_classes), fill_value=-np.exp(10), dtype=float, ) raw_prediction.flat[:: loss.n_classes + 1] = np.exp(10) if sample_weight == "range": sample_weight = np.linspace(1, y_true.shape[0], num=y_true.shape[0]) loss_value = loss.loss( y_true=y_true, raw_prediction=raw_prediction, sample_weight=sample_weight, ) constant_term = loss.constant_to_optimal_zero( y_true=y_true, sample_weight=sample_weight ) # Comparing loss_value + constant_term to zero would result in large # round-off errors. assert_allclose(loss_value, -constant_term, atol=1e-14, rtol=1e-15) @pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name) @pytest.mark.parametrize("sample_weight", [None, "range"])
sklearn/_loss/tests/test_loss.py
446
@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name) @pytest.mark.parametrize("sample_weight", [None, "range"])
scikit-learn
{ "docstring": "Test value of perfect predictions.\n\n Loss of y_pred = y_true plus constant_to_optimal_zero should sums up to\n zero.\n ", "language": "en", "n_whitespaces": 26, "n_words": 17, "vocab_size": 16 }
159
Python
110
75a94f518f7bd7d0bf581ffb67d9f961e3c4efbc
test_loss.py
259,435
32
266
test_loss_of_perfect_prediction
https://github.com/scikit-learn/scikit-learn.git
ENH migrate GLMs / TweedieRegressor to linear loss (#22548) Co-authored-by: Olivier Grisel <[email protected]> Co-authored-by: Thomas J. Fan <[email protected]>
438
1
75,769
15
1
3
def head_bundle_is_empty(self): return self._head_bundle_is_empty
python/ray/air/execution/resources/request.py
19
ray
{ "docstring": "Returns True if head bundle is empty while child bundles\n need resources.\n\n This is considered an internal API within Tune.\n ", "language": "en", "n_whitespaces": 41, "n_words": 20, "vocab_size": 19 }
4
Python
4
edb17fd2069844f12237c85ba6607afae536401d
request.py
138,036
2
10
head_bundle_is_empty
https://github.com/ray-project/ray.git
[air/tune] Internal resource management 1 - Ray AIR resource manager implementation (#30777) Prerequisite to #30016 This PR adds a new Ray AIR resource manager to replace the PlacementGroupManager of Ray Tune. Details can be found in #30016. Specifically, this PR - Adds the main resource manager abstractions - Renames (and moves) PlacementGroupFactory to ResourceRequest - Adds implementations and tests for a placement group based manager and a budget based manager Signed-off-by: Kai Fricke <[email protected]> Signed-off-by: Kai Fricke <[email protected]> Co-authored-by: matthewdeng <[email protected]>
18
0
31,282
6
1
3
def _transform_unaggregated_gradients(self, grads_and_vars): return grads_and_vars
keras/optimizers/optimizer_v2/optimizer_v2.py
18
keras
{ "docstring": "Called in `apply_gradients` before gradient aggregation.", "language": "en", "n_whitespaces": 5, "n_words": 6, "vocab_size": 6 }
5
Python
5
84afc5193d38057e2e2badf9c889ea87d80d8fbf
optimizer_v2.py
275,502
2
10
_transform_unaggregated_gradients
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
19
0
81,400
6
1
4
def execute(): frappe.db.sql( )
erpnext/patches/v12_0/set_quotation_status.py
23
erpnext
{ "docstring": " UPDATE `tabQuotation` set status = 'Open'\n\t\twhere docstatus = 1 and status = 'Submitted' ", "language": "en", "n_whitespaces": 14, "n_words": 14, "vocab_size": 11 }
4
Python
4
494bd9ef78313436f0424b918f200dab8fc7c20b
set_quotation_status.py
66,677
5
12
execute
https://github.com/frappe/erpnext.git
style: format code with black
1
0
14,287
8
5
15
def temporary_settings(**kwargs): old_env = os.environ.copy() old_settings = prefect.settings.from_env() try: for setting in kwargs: os.environ[setting] = str(kwargs[setting]) assert old_env != os.environ, "Environment did not change" new_settings = prefect.settings.from_env() assert new_settings != old_settings, "Temporary settings did not change values" yield new_settings finally: for setting in kwargs: if old_env.get(setting): os.environ[setting] = old_env[setting] else: os.environ.pop(setting, None)
src/prefect/utilities/testing.py
178
prefect
{ "docstring": "\n Temporarily override setting values. \n \n This will _not_ mutate values that have been already been accessed at module\n load time.\n\n This function should only be used for testing.\n\n Example:\n >>> import prefect.settings\n >>> with temporary_settings(PREFECT_ORION_HOST=\"foo\"):\n >>> assert prefect.settings.from_env().orion_host == \"foo\"\n >>> assert prefect.settings.from_env().orion_host is None\n ", "language": "en", "n_whitespaces": 99, "n_words": 44, "vocab_size": 37 }
52
Python
35
1d4218a287ef343f32f1e32482592b471be5df1d
testing.py
53,411
16
109
temporary_settings
https://github.com/PrefectHQ/prefect.git
Move `prefect.settings` to `prefect.settings.from_env()`
172
0
10,795
16
14
55
def parse_semver(version, operator) -> Optional[SemverFilter]: (operator, negated) = handle_operator_negation(operator) try: operator = OPERATOR_TO_DJANGO[operator] except KeyError: raise InvalidSearchQuery("Invalid operation 'IN' for semantic version filter.") version = version if "@" in version else f"{SEMVER_FAKE_PACKAGE}@{version}" parsed = parse_release_relay(version) parsed_version = parsed.get("version_parsed") if parsed_version: # Convert `pre` to always be a string prerelease = parsed_version["pre"] if parsed_version["pre"] else "" semver_filter = SemverFilter( operator, [ parsed_version["major"], parsed_version["minor"], parsed_version["patch"], parsed_version["revision"], 0 if prerelease else 1, prerelease, ], negated=negated, ) if parsed["package"] and parsed["package"] != SEMVER_FAKE_PACKAGE: semver_filter.package = parsed["package"] return semver_filter else: # Try to parse as a wildcard match package, version = version.split("@", 1) version_parts = [] if version: for part in version.split(".", 3): if part in SEMVER_WILDCARDS: break try: # We assume all ints for a wildcard match - not handling prerelease as # part of these version_parts.append(int(part)) except ValueError: raise InvalidSearchQuery(INVALID_SEMVER_MESSAGE) package = package if package and package != SEMVER_FAKE_PACKAGE else None return SemverFilter("exact", version_parts, package, negated) key_conversion_map: Mapping[ str, Callable[[SearchFilter, str, Mapping[str, Union[int, str, datetime]]], Optional[Sequence[any]]], ] = { "environment": _environment_filter_converter, "message": _message_filter_converter, TRANSACTION_STATUS_ALIAS: _transaction_status_filter_converter, "issue.id": _issue_id_filter_converter, USER_DISPLAY_ALIAS: _user_display_filter_converter, ERROR_UNHANDLED_ALIAS: _error_unhandled_filter_converter, "error.handled": _error_handled_filter_converter, TEAM_KEY_TRANSACTION_ALIAS: _team_key_transaction_filter_converter, RELEASE_STAGE_ALIAS: _release_stage_filter_converter, SEMVER_ALIAS: _semver_filter_converter, SEMVER_PACKAGE_ALIAS: _semver_package_filter_converter, SEMVER_BUILD_ALIAS: _semver_build_filter_converter, }
src/sentry/search/events/filter.py
498
sentry
{ "docstring": "\n Attempts to parse a release version using our semver syntax. version should be in\n format `<package_name>@<version>` or `<version>`, where package_name is a string and\n version is a version string matching semver format (https://semver.org/). We've\n slightly extended this format to allow up to 4 integers. EG\n - [email protected]\n - [email protected]\n - 1.2.3.4\n - 1.2.3.4-alpha\n - 1.*\n ", "language": "en", "n_whitespaces": 91, "n_words": 55, "vocab_size": 39 }
191
Python
132
4ffb52489e662029a08169351cd997d525977e88
filter.py
98,450
50
224
parse_semver
https://github.com/getsentry/sentry.git
fix(events-search): Return helpful error message on semver filter (#33785) 'IN' type queries currently raise an unhandled KeyError, raising an InvalidSearchQuery instead.
651
0
19,571
18
5
2
def test_schedules_with_save_and_resume(self):
tests/training/learning_rate_schedulers/cosine_test.py
13
allennlp
{ "docstring": "Make sure scheduler will resume with the right state.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
2
Python
2
39b3c96181ab9f33a44b4fe591b348b5b48ecf76
cosine_test.py
280,865
15
130
test_schedules_with_save_and_resume
https://github.com/allenai/allennlp.git
Dependabot GitHub Actions (#5640) * chore: Included githubactions in the dependabot config This should help with keeping the GitHub actions updated on new releases. This will also help with keeping it secure. Dependabot helps in keeping the supply chain secure https://docs.github.com/en/code-security/dependabot GitHub actions up to date https://docs.github.com/en/code-security/dependabot/working-with-dependabot/keeping-your-actions-up-to-date-with-dependabot https://github.com/ossf/scorecard/blob/main/docs/checks.md#dependency-update-tool Signed-off-by: naveensrinivasan <[email protected]> * floats need approximate math Co-authored-by: naveensrinivasan <[email protected]>
9
0
83,445
6
3
14
def to_bytes(self, *, exclude=tuple()): self._validate_serialization_attrs() serialize = {} if hasattr(self, "cfg") and self.cfg is not None: serialize["cfg"] = lambda: srsly.json_dumps(self.cfg) serialize["vocab"] = lambda: self.vocab.to_bytes(exclude=exclude) serialize["kb"] = self.kb.to_bytes serialize["model"] = self.model.to_bytes return util.to_bytes(serialize, exclude)
spacy/pipeline/legacy/entity_linker.py
165
spaCy
{ "docstring": "Serialize the pipe to a bytestring.\n\n exclude (Iterable[str]): String names of serialization fields to exclude.\n RETURNS (bytes): The serialized object.\n\n DOCS: https://spacy.io/api/entitylinker#to_bytes\n ", "language": "en", "n_whitespaces": 50, "n_words": 22, "vocab_size": 21 }
33
Python
28
91acc3ea75d219ad07ed2b106e7b8bdcb01516dd
entity_linker.py
111,232
16
99
to_bytes
https://github.com/explosion/spaCy.git
Fix entity linker batching (#9669) * Partial fix of entity linker batching * Add import * Better name * Add `use_gold_ents` option, docs * Change to v2, create stub v1, update docs etc. * Fix error type Honestly no idea what the right type to use here is. ConfigValidationError seems wrong. Maybe a NotImplementedError? * Make mypy happy * Add hacky fix for init issue * Add legacy pipeline entity linker * Fix references to class name * Add __init__.py for legacy * Attempted fix for loss issue * Remove placeholder V1 * formatting * slightly more interesting train data * Handle batches with no usable examples This adds a test for batches that have docs but not entities, and a check in the component that detects such cases and skips the update step as thought the batch were empty. * Remove todo about data verification Check for empty data was moved further up so this should be OK now - the case in question shouldn't be possible. * Fix gradient calculation The model doesn't know which entities are not in the kb, so it generates embeddings for the context of all of them. However, the loss does know which entities aren't in the kb, and it ignores them, as there's no sensible gradient. This has the issue that the gradient will not be calculated for some of the input embeddings, which causes a dimension mismatch in backprop. That should have caused a clear error, but with numpyops it was causing nans to happen, which is another problem that should be addressed separately. This commit changes the loss to give a zero gradient for entities not in the kb. * add failing test for v1 EL legacy architecture * Add nasty but simple working check for legacy arch * Clarify why init hack works the way it does * Clarify use_gold_ents use case * Fix use gold ents related handling * Add tests for no gold ents and fix other tests * Use aligned ents function (not working) This doesn't actually work because the "aligned" ents are gold-only. But if I have a different function that returns the intersection, *then* this will work as desired. * Use proper matching ent check This changes the process when gold ents are not used so that the intersection of ents in the pred and gold is used. * Move get_matching_ents to Example * Use model attribute to check for legacy arch * Rename flag * bump spacy-legacy to lower 3.0.9 Co-authored-by: svlandeg <[email protected]>
100
0
24,362
12
1
15
def test_importable_project_name(self): bad_name = "os" args = ["startproject", bad_name] testproject_dir = os.path.join(self.test_dir, bad_name) out, err = self.run_django_admin(args) self.assertOutput( err, "CommandError: 'os' conflicts with the name of an existing " "Python module and cannot be used as a project name. Please try " "another name.", ) self.assertFalse(os.path.exists(testproject_dir))
tests/admin_scripts/tests.py
112
django
{ "docstring": "\n startproject validates that project name doesn't clash with existing\n Python modules.\n ", "language": "en", "n_whitespaces": 33, "n_words": 11, "vocab_size": 11 }
46
Python
42
9c19aff7c7561e3a82978a272ecdaad40dda5c00
tests.py
207,316
12
64
test_importable_project_name
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
146
0
51,927
10
2
12
async def _cancel_and_wait(fut, loop): waiter = loop.create_future() cb = functools.partial(_release_waiter, waiter) fut.add_done_callback(cb) try: fut.cancel() # We cannot wait on *fut* directly to make # sure _cancel_and_wait itself is reliably cancellable. await waiter finally: fut.remove_done_callback(cb) # This is *not* a @coroutine! It is just an iterator (yielding Futures).
python3.10.4/Lib/asyncio/tasks.py
87
XX-Net
{ "docstring": "Cancel the *fut* future or task and wait until it completes.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
47
Python
41
8198943edd73a363c266633e1aa5b2a9e9c9f526
tasks.py
220,813
9
48
_cancel_and_wait
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
100
0
56,126
10