complexity
int64
1
56
n_identifiers
int64
1
114
code
stringlengths
19
12.7k
path
stringlengths
8
134
n_ast_nodes
int64
12
2.35k
ast_errors
stringlengths
0
4.01k
repo
stringlengths
3
28
documentation
dict
n_words
int64
2
866
language
stringclasses
1 value
vocab_size
int64
2
323
commit_id
stringlengths
40
40
file_name
stringlengths
5
79
id
int64
243
338k
nloc
int64
1
228
token_counts
int64
5
1.4k
fun_name
stringlengths
1
77
url
stringlengths
31
60
commit_message
stringlengths
3
15.3k
n_whitespaces
int64
1
3.23k
n_ast_errors
int64
0
20
d_id
int64
74
121k
ast_levels
int64
4
29
1
10
def test_page_with_og(self) -> None: html = b parser = OpenGraphParser(html, "text/html; charset=UTF-8") result = parser.extract_data() self.assertEqual(result.title, "The Rock") self.assertEqual(result.description, "The Rock film")
zerver/tests/test_link_embed.py
79
zulip
{ "docstring": "<html>\n <head>\n <meta property=\"og:title\" content=\"The Rock\" />\n <meta property=\"og:type\" content=\"video.movie\" />\n <meta property=\"og:url\" content=\"http://www.imdb.com/title/tt0117500/\" />\n <meta property=\"og:image\" content=\"http://ia.media-imdb.com/images/rock.jpg\" />\n <meta property=\"og:description\" content=\"The Rock film\" />\n </head>\n </html>", "language": "en", "n_whitespaces": 96, "n_words": 27, "vocab_size": 18 }
22
Python
19
327ff9ea0f5e4712a34d767fee55a549cc1d3f39
test_link_embed.py
83,638
14
46
test_page_with_og
https://github.com/zulip/zulip.git
preview: Use a dataclass for the embed data. This is significantly cleaner than passing around `Dict[str, Any]` all of the time.
56
0
17,698
9
1
4
def upgrade(): op.create_index('idx_log_event', 'log', ['event'], unique=False)
airflow/migrations/versions/0109_1de7bc13c950_add_index_for_event_in_log.py
40
airflow
{ "docstring": "Apply Add index for ``event`` column in ``log`` table.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
6
Python
6
5d8cda8c5be42c8daaaa904d29a1011833c0c699
0109_1de7bc13c950_add_index_for_event_in_log.py
48,280
2
21
upgrade
https://github.com/apache/airflow.git
Add index for event column in log table (#23625)
12
0
9,421
9
2
6
def get_fields(self, include_parents=True, include_hidden=False): if include_parents is False: include_parents = PROXY_PARENTS return self._get_fields( include_parents=include_parents, include_hidden=include_hidden )
django/db/models/options.py
55
django
{ "docstring": "\n Return a list of fields associated to the model. By default, include\n forward and reverse fields, fields derived from inheritance, but not\n hidden fields. The returned fields can be changed using the parameters:\n\n - include_parents: include fields derived from inheritance\n - include_hidden: include fields that have a related_name that\n starts with a \"+\"\n ", "language": "en", "n_whitespaces": 123, "n_words": 53, "vocab_size": 40 }
16
Python
15
9c19aff7c7561e3a82978a272ecdaad40dda5c00
options.py
205,717
6
35
get_fields
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
66
0
51,174
8
1
5
def test_location_present(self): response = self.get(4) self.assertContains(response, "The North Pole", 1)
wagtail/contrib/modeladmin/tests/test_page_modeladmin.py
42
wagtail
{ "docstring": "\n The location should appear once, in the field listing\n ", "language": "en", "n_whitespaces": 24, "n_words": 9, "vocab_size": 9 }
10
Python
10
d10f15e55806c6944827d801cd9c2d53f5da4186
test_page_modeladmin.py
73,205
3
24
test_location_present
https://github.com/wagtail/wagtail.git
Reformat with black
31
0
15,989
8
1
5
def _patch_hf_hub_tqdm(): old_tqdm = huggingface_hub.file_download.tqdm huggingface_hub.file_download.tqdm = tqdm yield huggingface_hub.file_download.tqdm = old_tqdm
src/transformers/utils/hub.py
48
transformers
{ "docstring": "\n A context manager to make huggingface hub use the tqdm version of Transformers (which is controlled by some utils)\n in logging.\n ", "language": "en", "n_whitespaces": 31, "n_words": 21, "vocab_size": 21 }
12
Python
7
5cd40323684c183c30b34758aea1e877996a7ac9
hub.py
32,820
5
27
_patch_hf_hub_tqdm
https://github.com/huggingface/transformers.git
Use new huggingface_hub tools for download models (#18438) * Draft new cached_file * Initial draft for config and model * Small fixes * Fix first batch of tests * Look in cache when internet is down * Fix last tests * Bad black, not fixing all quality errors * Make diff less * Implement change for TF and Flax models * Add tokenizer and feature extractor * For compatibility with main * Add utils to move the cache and auto-do it at first use. * Quality * Deal with empty commit shas * Deal with empty etag * Address review comments
27
0
5,987
8
2
16
def test_failed_execution(self, api, started_job, batch): jobs = [started_job for _ in range(49)] batch.execute.side_effect = [batch, batch, None] update_in_batch(api=api, jobs=jobs) assert started_job.update_job.call_count == 49 assert len(api.new_batch.return_value) == 49 assert batch.execute.call_count == 3
airbyte-integrations/connectors/source-facebook-marketing/unit_tests/test_async_job.py
111
airbyte
{ "docstring": "Should execute batch until there are no failed tasks", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
31
Python
25
a3aae8017a0a40ff2006e2567f71dccb04c997a5
test_async_job.py
3,777
7
74
test_failed_execution
https://github.com/airbytehq/airbyte.git
🎉 🎉 Source FB Marketing: performance and reliability fixes (#9805) * Facebook Marketing performance improvement * add comments and little refactoring * fix integration tests with the new config * improve job status handling, limit concurrency to 10 * fix campaign jobs, refactor manager * big refactoring of async jobs, support random order of slices * update source _read_incremental to hook new state logic * fix issues with timeout * remove debugging and clean up, improve retry logic * merge changes from #8234 * fix call super _read_increment * generalize batch execution, add use_batch flag * improve coverage, do some refactoring of spec * update test, remove overrides of source * add split by AdSet * add smaller insights * fix end_date < start_date case * add account_id to PK * add notes * fix new streams * fix reversed incremental stream * update spec.json for SAT * upgrade CDK and bump version Co-authored-by: Dmytro Rezchykov <[email protected]> Co-authored-by: Eugene Kulak <[email protected]>
80
0
559
10
1
45
async def test_logbook_entity_matches_only_multiple(hass, hass_client, recorder_mock): await async_setup_component(hass, "logbook", {}) assert await async_setup_component( hass, "switch", { "switch": { "platform": "template", "switches": { "test_template_switch": { "value_template": "{{ states.switch.test_state.state }}", "turn_on": { "service": "switch.turn_on", "entity_id": "switch.test_state", }, "turn_off": { "service": "switch.turn_off", "entity_id": "switch.test_state", }, } }, } }, ) await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done) await hass.async_block_till_done() await hass.async_start() await hass.async_block_till_done() # Entity added (should not be logged) hass.states.async_set("switch.test_state", STATE_ON) hass.states.async_set("light.test_state", STATE_ON) await hass.async_block_till_done() # First state change (should be logged) hass.states.async_set("switch.test_state", STATE_OFF) hass.states.async_set("light.test_state", STATE_OFF) await hass.async_block_till_done() switch_turn_off_context = ha.Context( id="9c5bd62de45711eaaeb351041eec8dd9", user_id="9400facee45711eaa9308bfd3d19e474", ) hass.states.async_set( "switch.test_state", STATE_ON, context=switch_turn_off_context ) hass.states.async_set("light.test_state", STATE_ON, context=switch_turn_off_context) await hass.async_block_till_done() await hass.async_add_executor_job(trigger_db_commit, hass) await hass.async_block_till_done() await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done) client = await hass_client() # Today time 00:00:00 start = dt_util.utcnow().date() start_date = datetime(start.year, start.month, start.day) # Test today entries with filter by end_time end_time = start + timedelta(hours=24) response = await client.get( f"/api/logbook/{start_date.isoformat()}?end_time={end_time}&entity=switch.test_state,light.test_state&entity_matches_only" ) assert response.status == HTTPStatus.OK json_dict = await response.json() assert len(json_dict) == 4 assert json_dict[0]["entity_id"] == "switch.test_state" assert json_dict[1]["entity_id"] == "light.test_state" assert json_dict[2]["entity_id"] == "switch.test_state" assert json_dict[2]["context_user_id"] == "9400facee45711eaa9308bfd3d19e474" assert json_dict[3]["entity_id"] == "light.test_state" assert json_dict[3]["context_user_id"] == "9400facee45711eaa9308bfd3d19e474"
tests/components/logbook/test_init.py
680
core
{ "docstring": "Test the logbook view with a multiple entities and entity_matches_only.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
174
Python
100
982e314de630de2fe8e379b6f1106ec9fa945335
test_init.py
298,435
62
381
test_logbook_entity_matches_only_multiple
https://github.com/home-assistant/core.git
Use recorder_mock in tests (#70363) Co-authored-by: Paulus Schoutsen <[email protected]>
684
0
97,379
19
3
1
async def test_gather_is_robust_with_return_types_that_break_equality_checks():
tests/utilities/test_asyncio.py
13
prefect
{ "docstring": "\n Some libraries like pandas override the equality operator and can fail if gather\n performs an __eq__ check with the GatherIncomplete type\n ", "language": "en", "n_whitespaces": 31, "n_words": 21, "vocab_size": 20 }
3
Python
3
cfe630e97a5942c285b25d3bea5f1a7a47c4d9c5
test_asyncio.py
54,599
7
54
test_gather_is_robust_with_return_types_that_break_equality_checks
https://github.com/PrefectHQ/prefect.git
Fix issue where gather can fail when a task returns a pandas object
6
0
11,107
6
1
3
def _valuechoice_staticmethod_helper(orig_func): orig_func.__doc__ += return orig_func
nni/retiarii/nn/pytorch/api.py
22
nni
{ "docstring": "\n Notes\n -----\n This function performs lazy evaluation.\n Only the expression will be recorded when the function is called.\n The real evaluation happens when the inner value choice has determined its final decision.\n If no value choice is contained in the parameter list, the evaluation will be intermediate.", "language": "en", "n_whitespaces": 89, "n_words": 47, "vocab_size": 35 }
6
Python
6
a36dc07e8d39ec4438fd660c98f6f4551ff5f4a6
api.py
111,722
9
12
_valuechoice_staticmethod_helper
https://github.com/microsoft/nni.git
Composition of `ValueChoice` (#4435)
12
0
24,473
7
3
9
def get_buttons_from_dialog(dialog, channel): buttons = None if channel == "Follow": # get follow buttons. This approach will find the follow buttons and # ignore the Unfollow/Requested buttons. buttons = dialog.find_elements( By.XPATH, read_xpath(get_buttons_from_dialog.__name__, "follow_button") ) elif channel == "Unfollow": buttons = dialog.find_elements( By.XPATH, read_xpath(get_buttons_from_dialog.__name__, "unfollow_button") ) return buttons
instapy/unfollow_util.py
105
InstaPy
{ "docstring": "Gets buttons from the `Followers` or `Following` dialog boxes", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
47
Python
31
2a157d452611d37cf50ccb7d56ff1a06e9790ecb
unfollow_util.py
5,815
11
61
get_buttons_from_dialog
https://github.com/InstaPy/InstaPy.git
PR - Fix `extract_text_from_element()`and `find_element*()` to `find_element()` (#6438) * Updated getUserData() and find_element* Signed-off-by: elulcao <[email protected]> Thanks @breuerfelix for reviewing, 🚀 People in this thread please let me know if something is not OK, IG changed a lot these days. 🤗 @her
126
0
838
14
1
3
def circumcenter(self): return self.center
sympy/geometry/polygon.py
19
sympy
{ "docstring": "\n Alias for center.\n\n Examples\n ========\n\n >>> from sympy import RegularPolygon, Point\n >>> rp = RegularPolygon(Point(0, 0), 5, 4)\n >>> rp.circumcenter\n Point2D(0, 0)\n ", "language": "en", "n_whitespaces": 79, "n_words": 22, "vocab_size": 20 }
4
Python
4
498015021131af4dbb07eb110e5badaba8250c7b
polygon.py
196,293
2
10
circumcenter
https://github.com/sympy/sympy.git
Updated import locations
18
0
47,793
6
15
18
def is_maximal_matching(G, matching): if isinstance(matching, dict): matching = matching_dict_to_set(matching) # If the given set is not a matching, then it is not a maximal matching. edges = set() nodes = set() for edge in matching: if len(edge) != 2: raise nx.NetworkXError(f"matching has non-2-tuple edge {edge}") u, v = edge if u not in G or v not in G: raise nx.NetworkXError(f"matching contains edge {edge} with node not in G") if u == v: return False if not G.has_edge(u, v): return False if u in nodes or v in nodes: return False nodes.update(edge) edges.add(edge) edges.add((v, u)) # A matching is maximal if adding any new edge from G to it # causes the resulting set to match some node twice. # Be careful to check for adding selfloops for u, v in G.edges: if (u, v) not in edges: # could add edge (u, v) to edges and have a bigger matching if u not in nodes and v not in nodes and u != v: return False return True
networkx/algorithms/matching.py
276
networkx
{ "docstring": "Return True if ``matching`` is a maximal matching of ``G``\n\n A *maximal matching* in a graph is a matching in which adding any\n edge would cause the set to no longer be a valid matching.\n\n Parameters\n ----------\n G : NetworkX graph\n\n matching : dict or set\n A dictionary or set representing a matching. If a dictionary, it\n must have ``matching[u] == v`` and ``matching[v] == u`` for each\n edge ``(u, v)`` in the matching. If a set, it must have elements\n of the form ``(u, v)``, where ``(u, v)`` is an edge in the\n matching.\n\n Returns\n -------\n bool\n Whether the given set or dictionary represents a valid maximal\n matching in the graph.\n\n ", "language": "en", "n_whitespaces": 191, "n_words": 112, "vocab_size": 66 }
169
Python
84
28b3014d68d2b4e40d3e02219770296a827bd55c
matching.py
176,371
25
168
is_maximal_matching
https://github.com/networkx/networkx.git
Update matching functions for error validation and speed (#4897) * First steps to update matching functions for #4644 Expand tests Change API to raise NetworkXError when matching involves nodes not in G Update is_*_matching to 100+ times faster. * improve matching_dict_to_set and docs for min_weight_matching * fix sphinx error
371
0
41,857
13
4
8
def uri(self) -> Optional[str]: if self._uri: return self._uri if self._local_path and Path(self._local_path).exists(): return "file://" + self._local_path return None
python/ray/air/checkpoint.py
74
ray
{ "docstring": "Return checkpoint URI, if available.\n\n This will return a URI to cloud storage if this checkpoint is\n persisted on cloud, or a local ``file://`` URI if this checkpoint\n is persisted on local disk and available on the current node.\n\n In all other cases, this will return None. Users can then choose to\n persist to cloud with\n :meth:`Checkpoint.to_uri() <ray.air.Checkpoint.to_uri>`.\n\n Example:\n\n >>> from ray.air import Checkpoint\n >>> checkpoint = Checkpoint.from_uri(\"s3://some-bucket/some-location\")\n >>> assert checkpoint.uri == \"s3://some-bucket/some-location\"\n >>> checkpoint = Checkpoint.from_dict({\"data\": 1})\n >>> assert checkpoint.uri == None\n\n Returns:\n Checkpoint URI if this URI is reachable from the current node (e.g.\n cloud storage or locally available file URI).\n\n ", "language": "en", "n_whitespaces": 243, "n_words": 103, "vocab_size": 62 }
18
Python
14
1dede1c296a29332171df87b31d9ba92c26b40f7
checkpoint.py
128,028
29
44
uri
https://github.com/ray-project/ray.git
[air] Add `Checkpoint.uri` to return checkpoint URI, if available (#28731) A common ask is to retrieve the URI of a cloud checkpoint, e.g. after training. This PR introduces a property to the `Checkpoint` class that will return a URI if available and reachable from the local node (i.e. cloud storage or locally available file). If accepted, we should then return URI checkpoints from Tune if syncing to cloud is enabled. Signed-off-by: Kai Fricke <[email protected]>
68
0
28,588
11
40
14
def nD(i=None, brute=None, *, n=None, m=None): from sympy.integrals.integrals import integrate from sympy.functions.special.polynomials import laguerre from sympy.abc import x
sympy/functions/combinatorial/numbers.py
67
sympy
{ "docstring": "return the number of derangements for: ``n`` unique items, ``i``\n items (as a sequence or multiset), or multiplicities, ``m`` given\n as a sequence or multiset.\n\n Examples\n ========\n\n >>> from sympy.utilities.iterables import generate_derangements as enum\n >>> from sympy.functions.combinatorial.numbers import nD\n\n A derangement ``d`` of sequence ``s`` has all ``d[i] != s[i]``:\n\n >>> set([''.join(i) for i in enum('abc')])\n {'bca', 'cab'}\n >>> nD('abc')\n 2\n\n Input as iterable or dictionary (multiset form) is accepted:\n\n >>> assert nD([1, 2, 2, 3, 3, 3]) == nD({1: 1, 2: 2, 3: 3})\n\n By default, a brute-force enumeration and count of multiset permutations\n is only done if there are fewer than 9 elements. There may be cases when\n there is high multiplicty with few unique elements that will benefit\n from a brute-force enumeration, too. For this reason, the `brute`\n keyword (default None) is provided. When False, the brute-force\n enumeration will never be used. When True, it will always be used.\n\n >>> nD('1111222233', brute=True)\n 44\n\n For convenience, one may specify ``n`` distinct items using the\n ``n`` keyword:\n\n >>> assert nD(n=3) == nD('abc') == 2\n\n Since the number of derangments depends on the multiplicity of the\n elements and not the elements themselves, it may be more convenient\n to give a list or multiset of multiplicities using keyword ``m``:\n\n >>> assert nD('abc') == nD(m=(1,1,1)) == nD(m={1:3}) == 2\n\n ", "language": "en", "n_whitespaces": 304, "n_words": 217, "vocab_size": 140 }
18
Python
14
e0dc14eca132f37c5f49369eb4051eae37c9b119
numbers.py
197,011
67
562
nD
https://github.com/sympy/sympy.git
Refactored import ordering in functions
30
0
48,287
6
2
6
def _temperature_unit(self) -> str: if ( weather_option_temperature_unit := self._weather_option_temperature_unit ) is not None: return weather_option_temperature_unit return self._default_temperature_unit
homeassistant/components/weather/__init__.py
43
core
{ "docstring": "Return the converted unit of measurement for temperature.\n\n Should not be set by integrations.\n ", "language": "en", "n_whitespaces": 28, "n_words": 14, "vocab_size": 14 }
17
Python
15
90e1fb6ce2faadb9a35fdbe1774fce7b4456364f
__init__.py
314,204
10
26
_temperature_unit
https://github.com/home-assistant/core.git
Weather unit conversion (#73441) Co-authored-by: Erik <[email protected]>
67
0
112,812
9
1
7
def set_potential_energy(self, scalar): sympy_deprecation_warning( , deprecated_since_version="1.5", active_deprecations_target="deprecated-set-potential-energy", ) self.potential_energy = scalar
sympy/physics/mechanics/particle.py
43
sympy
{ "docstring": "\nThe sympy.physics.mechanics.Particle.set_potential_energy()\nmethod is deprecated. Instead use\n\n P.potential_energy = scalar\n ", "language": "en", "n_whitespaces": 23, "n_words": 10, "vocab_size": 10 }
11
Python
11
807ed058b5804382971f0045fa1395f087ff12cb
particle.py
197,090
12
25
set_potential_energy
https://github.com/sympy/sympy.git
Update the set_potential_energy() deprecation
56
0
48,332
9
1
3
def verify_request_params(params, headers):
tests/sentry/middleware/test_api_gateway.py
15
sentry
{ "docstring": "Wrapper for a callback function for responses.add_callback", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 6 }
3
Python
3
ec6965d597186ae0ecfba786472154f1c3cb7e42
test_api_gateway.py
86,320
3
12
verify_request_params
https://github.com/getsentry/sentry.git
feat(api-gateway): Unit test helpers (#39424) These functions will help with the creation of new test cases for the API gateway
6
0
18,100
6
5
2
def id(self): # type: () -> str
pipenv/patched/notpip/_vendor/distro.py
14
pipenv
{ "docstring": "Return the distro ID of the OS distribution, as a string.\n\n For details, see :func:`distro.id`.\n ", "language": "en", "n_whitespaces": 29, "n_words": 15, "vocab_size": 14 }
7
Python
7
f3166e673fe8d40277b804d35d77dcdb760fc3b3
distro.py
20,041
15
82
id
https://github.com/pypa/pipenv.git
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
21
0
3,190
6
2
13
def test_boolean_constraints(self): for field in (BooleanField(), BooleanField(null=True)): with self.subTest(field=field): field.set_attributes_from_name("is_nice") self.assertIn('"IS_NICE" IN (0,1)', field.db_check(connection)) @unittest.skipUnless(connection.vendor == "oracle", "Oracle tests")
tests/backends/oracle/tests.py
113
@unittest.skipUnless(connection.vendor == "oracle", "Oracle tests")
django
{ "docstring": "Boolean fields have check constraints on their values.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
19
Python
19
9c19aff7c7561e3a82978a272ecdaad40dda5c00
tests.py
201,726
5
51
test_boolean_constraints
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
73
1
49,982
13
2
8
def on_predict_batch_begin(self, batch, logs=None): if self._should_call_predict_batch_hooks: self._call_batch_hook(ModeKeys.PREDICT, "begin", batch, logs=logs)
keras/callbacks.py
52
keras
{ "docstring": "Calls the `on_predict_batch_begin` methods of its callbacks.\n\n Args:\n batch: Integer, index of batch within the current epoch.\n logs: Dict, contains the return value of `model.predict_step`,\n it typically returns a dict with a key 'outputs' containing\n the model's outputs.\n ", "language": "en", "n_whitespaces": 100, "n_words": 38, "vocab_size": 32 }
10
Python
9
84afc5193d38057e2e2badf9c889ea87d80d8fbf
callbacks.py
269,911
3
33
on_predict_batch_begin
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
35
0
80,325
10
5
31
def test_version_managing(self, data_handler): # set up df = pd.DataFrame([ {'a': 1, 'b': dt.datetime(2020, 1, 1)}, {'a': 2, 'b': dt.datetime(2020, 1, 2)}, {'a': 1, 'b': dt.datetime(2020, 1, 3)}, ]) self.set_handler(data_handler, name='pg', tables={'tasks': df}) # ================= retrain cycles ===================== # create folder self.run_sql('create database proj') # -- create model -- self.run_sql( ) self.wait_predictor('proj', 'task_model') assert data_handler().native_query.call_args[0][0] == 'select * from tasks' # tag works in create model ret = self.run_sql('select * from proj.models') assert ret['TAG'][0] == 'first' # use model ret = self.run_sql() assert len(ret) == 3 assert ret.predicted[0] == 42 # -- retrain predictor with tag -- data_handler.reset_mock() self.run_sql( ) self.wait_predictor('proj', 'task_model', {'tag': 'second'}) # get current model ret = self.run_sql('select * from proj.models') # check target assert ret['PREDICT'][0] == 'b' # check label assert ret['TAG'][0] == 'second' # check integration sql assert data_handler().native_query.call_args[0][0] == 'select * from tasks where a=2' # use model ret = self.run_sql() assert ret.predicted[0] == 42 # used model has tag 'second' models = self.get_models() model_id = ret.predictor_id[0] assert models[model_id].label == 'second' # -- retrain again with active=0 -- data_handler.reset_mock() self.run_sql( ) self.wait_predictor('proj', 'task_model', {'tag': 'third'}) ret = self.run_sql('select * from proj.models') # check target is from previous retrain assert ret['PREDICT'][0] == 'b' # use model ret = self.run_sql() # used model has tag 'second' (previous) models = self.get_models() model_id = ret.predictor_id[0] assert models[model_id].label == 'second' # ================ working with inactive versions ================= # run 3st version model and check used model version ret = self.run_sql() models = self.get_models() model_id = ret.predictor_id[0] assert models[model_id].label == 'third' # one-line query model by version ret = self.run_sql('SELECT * from proj.task_model.3 where a=1 and b=2') model_id = ret.predictor_id[0] assert models[model_id].label == 'third' # not existing version with pytest.raises(Exception) as exc_info: self.run_sql( 'SELECT * from proj.task_model.4 where a=1 and b=2', ) assert 'does not exists' in str(exc_info.value) # ================== managing versions ========================= # show models command # Show models <from | in> <project> where <expr> ret = self.run_sql('Show models') assert len(ret) == 1 and ret['NAME'][0] == 'task_model' ret = self.run_sql('Show models from proj') assert len(ret) == 1 and ret['NAME'][0] == 'task_model' ret = self.run_sql('Show models in proj') assert len(ret) == 1 and ret['NAME'][0] == 'task_model' ret = self.run_sql("Show models where name='task_model'") assert len(ret) == 1 and ret['NAME'][0] == 'task_model' ret = self.run_sql("Show models from proj where name='xxx'") assert len(ret) == 0 # ---------------- # See all versions ret = self.run_sql('select * from proj.models_versions') # we have all tags in versions assert set(ret['TAG']) == {'first', 'second', 'third'} # Set active selected version self.run_sql() # get active version ret = self.run_sql('select * from proj.models_versions where active = 1') assert ret['TAG'][0] == 'first' # use active version ? # Delete specific version self.run_sql() # deleted version not in list ret = self.run_sql('select * from proj.models_versions') assert len(ret) == 2 assert 'second' not in ret['TAG'] # try to use deleted version with pytest.raises(Exception) as exc_info: self.run_sql( 'SELECT * from proj.task_model.2 where a=1', ) assert 'does not exists' in str(exc_info.value) # exception with deleting active version with pytest.raises(Exception) as exc_info: self.run_sql() assert 'is not found' in str(exc_info.value) # drop predictor and check model is deleted and no versions self.run_sql('drop predictor proj.task_model') ret = self.run_sql('select * from proj.models') assert len(ret) == 0 ret = self.run_sql('select * from proj.models_versions') assert len(ret) == 0
tests/unit/test_project_structure.py
1,293
mindsdb
{ "docstring": "\n CREATE PREDICTOR proj.task_model\n from pg (select * from tasks)\n PREDICT a\n using engine='dummy_ml', tag = 'first'\n \n SELECT m.*\n FROM pg.tasks as t\n JOIN proj.task_model as m\n \n retrain proj.task_model\n from pg (select * from tasks where a=2)\n PREDICT b\n using tag = 'second'\n \n SELECT m.*\n FROM pg.tasks as t\n JOIN proj.task_model as m\n \n retrain proj.task_model\n from pg (select * from tasks where a=2)\n PREDICT a\n using tag='third', active=0\n \n SELECT m.*\n FROM pg.tasks as t\n JOIN proj.task_model as m\n \n SELECT m.*\n FROM pg.tasks as t\n JOIN proj.task_model.3 as m\n \n update proj.models_versions \n set active=1\n where version=1 and name='task_model' \n \n delete from proj.models_versions \n where version=2 \n and name='task_model'\n \n delete from proj.models_versions \n where version=3 \n and model='task_model'\n ", "language": "en", "n_whitespaces": 654, "n_words": 109, "vocab_size": 43 }
536
Python
173
3f1a5c30c2ccbd78b21f1f41b7dfdfca87bb7135
test_project_structure.py
117,547
130
716
test_version_managing
https://github.com/mindsdb/mindsdb.git
update and delete model version renaming (predictor->model)
1,445
0
26,025
13
3
18
def _compute_interactions(self, node): r # Note: # - Case of no interactions is already captured before function call. # - This is for nodes that are already split and have a # node.split_info.feature_idx. allowed_features = set() interaction_cst_indices = [] for i in node.interaction_cst_indices: if node.split_info.feature_idx in self.interaction_cst[i]: interaction_cst_indices.append(i) allowed_features.update(self.interaction_cst[i]) return ( np.fromiter(allowed_features, dtype=np.uint32, count=len(allowed_features)), interaction_cst_indices, )
sklearn/ensemble/_hist_gradient_boosting/grower.py
128
scikit-learn
{ "docstring": "Compute features allowed by interactions to be inherited by child nodes.\n\n Example: Assume constraints [{0, 1}, {1, 2}].\n 1 <- Both constraint groups could be applied from now on\n / \\\n 1 2 <- Left split still fulfills both constraint groups.\n / \\ / \\ Right split at feature 2 has only group {1, 2} from now on.\n\n LightGBM uses the same logic for overlapping groups. See\n https://github.com/microsoft/LightGBM/issues/4481 for details.\n\n Parameters:\n ----------\n node : TreeNode\n A node that might have children. Based on its feature_idx, the interaction\n constraints for possible child nodes are computed.\n\n Returns\n -------\n allowed_features : ndarray, dtype=uint32\n Indices of features allowed to split for children.\n interaction_cst_indices : list of ints\n Indices of the interaction sets that have to be applied on splits of\n child nodes. The fewer sets the stronger the constraint as fewer sets\n contain fewer features.\n ", "language": "en", "n_whitespaces": 333, "n_words": 141, "vocab_size": 90 }
56
Python
47
5ceb8a6a031ddff26a7ede413db1b53edb64166a
grower.py
261,258
37
81
_compute_interactions
https://github.com/scikit-learn/scikit-learn.git
ENH FEA add interaction constraints to HGBT (#21020) Co-authored-by: Loïc Estève <[email protected]>
193
0
76,716
13
1
2
def groups(self): return self["groups"]
packages/python/plotly/plotly/graph_objs/sankey/_node.py
22
plotly.py
{ "docstring": "\n Groups of nodes. Each group is defined by an array with the\n indices of the nodes it contains. Multiple groups can be\n specified.\n\n The 'groups' property is an info array that may be specified as:\n * a 2D list where:\n The 'groups[i][j]' property is a number and may be specified as:\n - An int or float\n\n Returns\n -------\n list\n ", "language": "en", "n_whitespaces": 141, "n_words": 59, "vocab_size": 44 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_node.py
233,318
2
11
groups
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
64,762
7
1
49
def test_exec_dataflow_runner(self, gcs_hook, dataflow_hook_mock, beam_hook_mock, persist_link_mock): dataflow_config = DataflowConfiguration() self.operator.runner = "DataflowRunner" self.operator.dataflow_config = dataflow_config gcs_provide_file = gcs_hook.return_value.provide_file self.operator.execute(None) job_name = dataflow_hook_mock.build_dataflow_job_name.return_value dataflow_hook_mock.assert_called_once_with( gcp_conn_id=dataflow_config.gcp_conn_id, delegate_to=dataflow_config.delegate_to, poll_sleep=dataflow_config.poll_sleep, impersonation_chain=dataflow_config.impersonation_chain, drain_pipeline=dataflow_config.drain_pipeline, cancel_timeout=dataflow_config.cancel_timeout, wait_until_finished=dataflow_config.wait_until_finished, ) expected_options = { 'project': dataflow_hook_mock.return_value.project_id, 'job_name': job_name, 'staging_location': 'gs://test/staging', 'output': 'gs://test/output', 'labels': {'foo': 'bar', 'airflow-version': TEST_VERSION}, 'region': 'us-central1', } gcs_provide_file.assert_called_once_with(object_url=PY_FILE) persist_link_mock.assert_called_once_with( self.operator, None, expected_options['project'], expected_options['region'], self.operator.dataflow_job_id, ) beam_hook_mock.return_value.start_python_pipeline.assert_called_once_with( variables=expected_options, py_file=gcs_provide_file.return_value.__enter__.return_value.name, py_options=PY_OPTIONS, py_interpreter=PY_INTERPRETER, py_requirements=None, py_system_site_packages=False, process_line_callback=mock.ANY, ) dataflow_hook_mock.return_value.wait_for_done.assert_called_once_with( job_id=self.operator.dataflow_job_id, job_name=job_name, location='us-central1', multiple_jobs=False, project_id=dataflow_config.project_id, ) dataflow_hook_mock.return_value.provide_authorized_gcloud.assert_called_once_with()
tests/providers/apache/beam/operators/test_beam.py
416
airflow
{ "docstring": "Test DataflowHook is created and the right args are passed to\n start_python_dataflow.\n ", "language": "en", "n_whitespaces": 26, "n_words": 12, "vocab_size": 12 }
75
Python
66
4a5250774be8f48629294785801879277f42cc62
test_beam.py
42,750
49
268
test_exec_dataflow_runner
https://github.com/apache/airflow.git
Added missing project_id to the wait_for_job (#24020)
538
0
7,719
12
3
19
def _get_no_faces(self) -> Generator[str, None, None]: self.output_message = "Frames with no faces" for frame in tqdm(cast(List[Dict[str, str]], self._items), desc=self.output_message, leave=False): logger.trace(frame) # type:ignore frame_name = frame["frame_fullname"] if not self._alignments.frame_has_faces(frame_name): logger.debug("Returning: '%s'", frame_name) yield frame_name
tools/alignments/jobs.py
137
faceswap
{ "docstring": " yield each frame that has no face match in alignments file\n\n Yields\n ------\n str\n The frame name of any frames which have no faces\n ", "language": "en", "n_whitespaces": 64, "n_words": 24, "vocab_size": 22 }
34
Python
32
e2a77e7c6e84e81f642cb22f528e25e3f2d2dbc1
jobs.py
101,722
17
86
_get_no_faces
https://github.com/deepfakes/faceswap.git
Alignments Tool - Typing, Documentation + Re-org
169
0
21,126
12
12
5
def CopyIcons(dstpath, srcpath): if isinstance(srcpath, str): # Just a single string, make it a one-element list. srcpath = [srcpath]
PyInstaller/utils/win32/icon.py
36
pyinstaller
{ "docstring": "\n Called from building/api.py to handle icons. If the input was by --icon on the command line, srcpath is a single\n string. However, it is possible to modify the spec file adding icon=['foo.ico','bar.ico'] to the EXE() statement.\n In that case, srcpath is a list of strings.\n\n The string format is either path-to-.ico or path-to-.exe,n for n an integer resource index in the .exe. In either\n case, the path can be relative or absolute.\n ", "language": "en", "n_whitespaces": 91, "n_words": 72, "vocab_size": 56 }
19
Python
18
3aad9af18641aa2181dd86cececc2aeb8a0dba06
icon.py
262,800
44
376
CopyIcons
https://github.com/pyinstaller/pyinstaller.git
Icon translation using PIL (#6697) Convert icons into the correct platform dependent format using PIL/Pillow if installed.
39
0
77,378
9
5
6
def local_ray_callbacks(callbacks=None): global_callbacks = callbacks is None if global_callbacks: callbacks, RayDaskCallback.ray_active = (RayDaskCallback.ray_active, set()) try: yield callbacks or () finally: if global_callbacks: RayDaskCallback.ray_active = callbacks
python/ray/util/dask/callbacks.py
82
ray
{ "docstring": "\n Allows Dask-Ray callbacks to work with nested schedulers.\n\n Callbacks will only be used by the first started scheduler they encounter.\n This means that only the outermost scheduler will use global callbacks.\n ", "language": "en", "n_whitespaces": 44, "n_words": 31, "vocab_size": 27 }
25
Python
18
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
callbacks.py
133,122
9
48
local_ray_callbacks
https://github.com/ray-project/ray.git
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
72
0
29,930
12
1
17
def test_local_media_retention(self) -> None: # Advance 31 days (in seconds) self.reactor.advance(31 * 24 * 60 * 60) # Check that media has been correctly purged. # Local media accessed <30 days ago should still exist. # Remote media should be unaffected. self._assert_if_mxc_uris_purged( purged=[ ( self.hs.config.server.server_name, self.local_not_recently_accessed_media, ), (self.hs.config.server.server_name, self.local_never_accessed_media), ], not_purged=[ (self.hs.config.server.server_name, self.local_recently_accessed_media), (self.remote_server_name, self.remote_recently_accessed_media), (self.remote_server_name, self.remote_not_recently_accessed_media), ], )
tests/rest/media/test_media_retention.py
158
synapse
{ "docstring": "\n Tests that local media that have not been accessed recently is purged, while\n cached remote media is unaffected.\n ", "language": "en", "n_whitespaces": 40, "n_words": 18, "vocab_size": 15 }
59
Python
47
2fc787c341ff540e5880932f116498ec0ed7a2c2
test_media_retention.py
248,468
20
106
test_local_media_retention
https://github.com/matrix-org/synapse.git
Add config options for media retention (#12732)
287
0
72,296
14
4
14
def add_simple_roots(self, root1, root2): alpha = self.simple_roots() if root1 > len(alpha) or root2 > len(alpha): raise ValueError("You've used a root that doesn't exist!") a1 = alpha[root1] a2 = alpha[root2] newroot = [_a1 + _a2 for _a1, _a2 in zip(a1, a2)] return newroot
sympy/liealgebras/root_system.py
110
sympy
{ "docstring": "Add two simple roots together\n\n The function takes as input two integers, root1 and root2. It then\n uses these integers as keys in the dictionary of simple roots, and gets\n the corresponding simple roots, and then adds them together.\n\n Examples\n ========\n\n >>> from sympy.liealgebras.root_system import RootSystem\n >>> c = RootSystem(\"A3\")\n >>> newroot = c.add_simple_roots(1, 2)\n >>> newroot\n [1, 0, -1, 0]\n\n ", "language": "en", "n_whitespaces": 139, "n_words": 61, "vocab_size": 47 }
42
Python
36
7d773eb18daaef3c54f34d1ac6cbc5b83a5bb16c
root_system.py
198,389
8
69
add_simple_roots
https://github.com/sympy/sympy.git
Cleanup loops and ranges
102
0
48,901
10
6
14
def test_get_release_wheel_url(): # This should be a commit for which wheels have already been built for # all platforms and python versions at # `s3://ray-wheels/releases/2.2.0/<commit>/`. test_commits = {"2.2.0": "b6af0887ee5f2e460202133791ad941a41f15beb"} for sys_platform in ["darwin", "linux", "win32"]: for py_version in ray_constants.RUNTIME_ENV_CONDA_PY_VERSIONS: for version, commit in test_commits.items(): if sys_platform == "win32" and py_version == (3, 6): # Windows wheels are not built for py3.6 anymore continue url = get_release_wheel_url(commit, sys_platform, version, py_version) assert requests.head(url).status_code == 200, url
python/ray/tests/test_runtime_env.py
136
ray
{ "docstring": "Test the code that generates the filenames of the `release` branch wheels.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 10 }
74
Python
53
98fef7732852cdb3e9377cd87c1ee1085b894928
test_runtime_env.py
137,593
9
80
test_get_release_wheel_url
https://github.com/ray-project/ray.git
[runtime env] Support python 3.10 for runtime_env conda (#30970) Signed-off-by: Archit Kulkarni <[email protected]> conda environments are isolated, so when runtime_env sets up a conda environment it must download the Ray wheel into the conda environment. It must download the wheel that matches the current Python and Ray version running, otherwise there will be incompatibility issues between the workers that use this runtime_env and the other workers and Ray processes. This PR updates the wheel name format logic to support Python 3.10.
193
0
31,197
15
5
12
def prepare_http01_modules(self) -> None: if self.configurator.conf("handle-modules"): needed_modules = ["rewrite"] if self.configurator.version < (2, 4): needed_modules.append("authz_host") else: needed_modules.append("authz_core") for mod in needed_modules: if mod + "_module" not in self.configurator.parser.modules: self.configurator.enable_mod(mod, temp=True)
certbot-apache/certbot_apache/_internal/http_01.py
139
certbot
{ "docstring": "Make sure that we have the needed modules available for http01", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
30
Python
26
7d9e9a49005de7961e84d2a7c608db57dbab3046
http_01.py
186,649
11
81
prepare_http01_modules
https://github.com/certbot/certbot.git
Add typing to certbot.apache (#9071) * Add typing to certbot.apache Co-authored-by: Adrien Ferrand <[email protected]>
152
0
45,558
14
2
9
def to_reader(self, *args, **kwargs): if config.PYARROW_VERSION.major < 8: raise NotImplementedError("`pyarrow>=8.0.0` is required to use this method") return self.table.to_reader(*args, **kwargs)
src/datasets/table.py
65
datasets
{ "docstring": "\n Convert the Table to a RecordBatchReader.\n\n Note that this method is zero-copy, it merely exposes the same data under a different API.\n\n Args:\n max_chunksize (:obj:`int`, defaults to :obj:`None`)\n Maximum size for RecordBatch chunks. Individual chunks may be smaller depending\n on the chunk layout of individual columns.\n\n Returns:\n :obj:`pyarrow.RecordBatchReader`\n\n <Tip warning={true}>\n\n pyarrow >= 8.0.0 needs to be installed to use this method.\n\n </Tip>\n ", "language": "en", "n_whitespaces": 171, "n_words": 62, "vocab_size": 54 }
19
Python
19
1ea4d091b7a4b83a85b2eeb8df65115d39af3766
table.py
105,694
4
39
to_reader
https://github.com/huggingface/datasets.git
Fast dataset iter (#5030) * Fast dataset iter * Final improvements + some minor fixes * Update src/datasets/arrow_dataset.py Co-authored-by: Quentin Lhoest <[email protected]> * Address comments Co-authored-by: Quentin Lhoest <[email protected]>
51
0
22,190
10
1
21
def test_you_must_be_realm_admin(self) -> None: user_profile = self.example_user("hamlet") self.login_user(user_profile) other_realm = do_create_realm(string_id="other", name="other") stream = self.make_stream("other_realm_stream", realm=other_realm) result = self.client_delete("/json/streams/" + str(stream.id)) self.assert_json_error(result, "Invalid stream ID") # Even becoming a realm admin doesn't help us for an out-of-realm # stream. do_change_user_role(user_profile, UserProfile.ROLE_REALM_ADMINISTRATOR, acting_user=None) result = self.client_delete("/json/streams/" + str(stream.id)) self.assert_json_error(result, "Invalid stream ID")
zerver/tests/test_subs.py
182
zulip
{ "docstring": "\n You must be on the realm to create a stream.\n ", "language": "en", "n_whitespaces": 25, "n_words": 10, "vocab_size": 10 }
51
Python
37
708204290ecebd608a575f76892489a0caad5836
test_subs.py
83,893
13
104
test_you_must_be_realm_admin
https://github.com/zulip/zulip.git
streams: Capitalize "ID" in invalid stream errors in API. This commit changes the error message from "Invalid stream id" to "Invalid stream ID" for cases where invalid stream IDs are passed to API endpoints to make it consistent with other similar error messages.
135
0
17,744
12
11
34
def _async_set_dynamic_options(self) -> None: if self.entity_description.ufp_options is not None: return if self.entity_description.key == _KEY_VIEWER: options = [ {"id": item.id, "name": item.name} for item in self.data.api.bootstrap.liveviews.values() ] elif self.entity_description.key == _KEY_DOORBELL_TEXT: default_message = ( self.data.api.bootstrap.nvr.doorbell_settings.default_message_text ) messages = self.data.api.bootstrap.nvr.doorbell_settings.all_messages built_messages = ( {"id": item.type.value, "name": item.text} for item in messages ) options = [ {"id": "", "name": f"Default Message ({default_message})"}, *built_messages, ] elif self.entity_description.key == _KEY_PAIRED_CAMERA: options = [{"id": TYPE_EMPTY_VALUE, "name": "Not Paired"}] for camera in self.data.api.bootstrap.cameras.values(): options.append({"id": camera.id, "name": camera.name}) self._attr_options = [item["name"] for item in options] self._hass_to_unifi_options = {item["name"]: item["id"] for item in options} self._unifi_to_hass_options = {item["id"]: item["name"] for item in options}
homeassistant/components/unifiprotect/select.py
416
core
{ "docstring": "Options that do not actually update dynamically.\n\n This is due to possible downstream platforms dependencies on these options.\n ", "language": "en", "n_whitespaces": 32, "n_words": 18, "vocab_size": 18 }
103
Python
60
a2677983a2924366ea13eab416bf286996a64bdb
select.py
308,567
31
252
_async_set_dynamic_options
https://github.com/home-assistant/core.git
Add UniFi Protect select platform (#63337)
396
0
107,315
16
1
8
def test_safedata(self): self.assertIsInstance( self.encode_decode(mark_safe('<b>Hello Django!</b>')).message, SafeData, ) self.assertNotIsInstance( self.encode_decode('<b>Hello Django!</b>').message, SafeData, )
tests/messages_tests/test_cookie.py
70
django
{ "docstring": "\n A message containing SafeData is keeping its safe status when\n retrieved from the message storage.\n ", "language": "en", "n_whitespaces": 37, "n_words": 15, "vocab_size": 14 }
12
Python
10
efb4478e484ae61c5fc23563d4e9df1f8f49df49
test_cookie.py
203,142
9
41
test_safedata
https://github.com/django/django.git
Fixed #33458 -- Fixed encoding of messages with empty string as extra_tags.
91
0
50,235
13
1
3
def test_make_tarball_extended(self): self.test_make_tarball('のアーカイブ') # japanese for archive
python3.10.4/Lib/distutils/tests/test_archive_util.py
26
XX-Net
{ "docstring": "\n Mirror test_make_tarball, except filename contains extended\n characters outside the latin charset.\n ", "language": "en", "n_whitespaces": 33, "n_words": 11, "vocab_size": 11 }
7
Python
7
8198943edd73a363c266633e1aa5b2a9e9c9f526
test_archive_util.py
223,024
2
12
test_make_tarball_extended
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
21
0
56,854
8
1
7
def effect_mandelbrot(size, extent, quality): return Image()._new(core.effect_mandelbrot(size, extent, quality))
src/PIL/Image.py
44
Pillow
{ "docstring": "\n Generate a Mandelbrot set covering the given extent.\n\n :param size: The requested size in pixels, as a 2-tuple:\n (width, height).\n :param extent: The extent to cover, as a 4-tuple:\n (x0, y0, x1, y1).\n :param quality: Quality.\n ", "language": "en", "n_whitespaces": 64, "n_words": 36, "vocab_size": 30 }
8
Python
7
f77aabf28134d93e35ca2d5622759c856333beb9
Image.py
242,898
2
28
effect_mandelbrot
https://github.com/python-pillow/Pillow.git
Update Image.py docstrings. Update Image.py file with a typo in effect_mandelbrot method. The Typo was in docstrings of the effect_mandelbrot method in Image module of PIL.
14
0
69,936
9
2
5
def subprocess_runner(self, runner): prev = self._subprocess_runner self._subprocess_runner = runner try: yield finally: self._subprocess_runner = prev
.venv/lib/python3.8/site-packages/pip/_vendor/pep517/wrappers.py
50
transferlearning
{ "docstring": "A context manager for temporarily overriding the default subprocess\n runner.\n ", "language": "en", "n_whitespaces": 24, "n_words": 10, "vocab_size": 10 }
15
Python
10
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
wrappers.py
63,022
7
28
subprocess_runner
https://github.com/jindongwang/transferlearning.git
upd; format
72
0
13,102
10
14
2
def test_recover_start_from_replica_actor_names(serve_instance): # Test failed to deploy with total of 2 replicas, # but first constructor call fails.
python/ray/serve/tests/fault_tolerance_tests/test_controller_recovery.py
15
ray
{ "docstring": "Test controller is able to recover starting -> running replicas from\n actor names.\n ", "language": "en", "n_whitespaces": 19, "n_words": 13, "vocab_size": 13 }
18
Python
17
09a6e5336ad6ab3c41e4a16e906c778aee2450bc
test_controller_recovery.py
124,881
62
343
test_recover_start_from_replica_actor_names
https://github.com/ray-project/ray.git
[Serve][Part2] Migrate the tests to use deployment graph api (#26507)
27
0
27,710
6
2
9
def get_profile_context() -> ProfileContext: profile_ctx = ProfileContext.get() if not profile_ctx: raise MissingContextError("No profile context found.") return profile_ctx _PROFILE_ENV_LOCK = threading.Lock() @contextmanager
src/prefect/context.py
63
@contextmanager
prefect
{ "docstring": "\n Returns a ProfileContext that contains the combination of user profile \n settings and environment variable settings present when the context was initialized\n ", "language": "en", "n_whitespaces": 32, "n_words": 21, "vocab_size": 19 }
21
Python
19
e11fd5aa4905c7c27dbdf6ec49442ee107daebac
context.py
54,856
9
25
get_profile_context
https://github.com/PrefectHQ/prefect.git
Bug fix for PrefectHQ/orion#1383, contains test
38
1
11,161
10
1
16
async def test_no_controller_triggers(hass, client, integration): dev_reg = async_get_dev_reg(hass) device = dev_reg.async_get_device( {get_device_id(client.driver, client.driver.controller.nodes[1])} ) assert device assert ( await async_get_device_automations( hass, DeviceAutomationType.TRIGGER, device.id ) == [] )
tests/components/zwave_js/test_device_trigger.py
98
core
{ "docstring": "Test that we do not get triggers for the controller.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
27
Python
22
41d5256533ec6ef1c102af0a43c7b7f26b8e06fb
test_device_trigger.py
297,253
12
63
test_no_controller_triggers
https://github.com/home-assistant/core.git
Add via_device support to zwave_js (#83219) Co-authored-by: Paulus Schoutsen <[email protected]>
87
0
96,222
15
2
8
def is_tarfile(name): try: t = open(name) t.close() return True except TarError: return False bltn_open = open open = TarFile.open
pipenv/patched/notpip/_vendor/distlib/_backport/tarfile.py
60
pipenv
{ "docstring": "Return True if name points to a tar archive that we\n are able to handle, else return False.\n ", "language": "en", "n_whitespaces": 27, "n_words": 18, "vocab_size": 17 }
19
Python
15
c69d55f7c82d5ae2cce542bcfb98d043ca4836a0
tarfile.py
21,410
7
26
is_tarfile
https://github.com/pypa/pipenv.git
Vendor in pip 22.1.2
54
0
3,815
10
8
20
def fallback_which(command, location=None, allow_global=False, system=False): from .vendor.pythonfinder import Finder if not command: raise ValueError("fallback_which: Must provide a command to search for...") if not isinstance(command, str): raise TypeError(f"Provided command must be a string, received {command!r}") global_search = system or allow_global if location is None: global_search = True finder = Finder(system=False, global_search=global_search, path=location) if is_python_command(command): result = find_python(finder, command) if result: return result result = finder.which(command) if result: return result.path.as_posix() return ""
pipenv/core.py
196
pipenv
{ "docstring": "\n A fallback implementation of the `which` utility command that relies exclusively on\n searching the path for commands.\n\n :param str command: The command to search for, optional\n :param str location: The search location to prioritize (prepend to path), defaults to None\n :param bool allow_global: Whether to search the global path, defaults to False\n :param bool system: Whether to use the system python instead of pipenv's python, defaults to False\n :raises ValueError: Raised if no command is provided\n :raises TypeError: Raised if the command provided is not a string\n :return: A path to the discovered command location\n :rtype: str\n ", "language": "en", "n_whitespaces": 131, "n_words": 97, "vocab_size": 58 }
70
Python
51
9a3b3ce70621af6f9adaa9eeac9cf83fa149319c
core.py
19,648
18
118
fallback_which
https://github.com/pypa/pipenv.git
Issue 4993 Add standard pre commit hooks and apply linting. (#4994) * Add .pre-commit-config.yaml to the project and exclude tests (for now). This does not include the MyPy linting that pip does but does include everything else.
156
0
3,046
11
3
6
def suggested_unit_of_measurement(self) -> str | None: if hasattr(self, "_attr_suggested_unit_of_measurement"): return self._attr_suggested_unit_of_measurement if hasattr(self, "entity_description"): return self.entity_description.suggested_unit_of_measurement return None
homeassistant/components/sensor/__init__.py
65
core
{ "docstring": "Return the unit which should be used for the sensor's state.\n\n This can be used by integrations to override automatic unit conversion rules,\n for example to make a temperature sensor display in °C even if the configured\n unit system prefers °F.\n\n For sensors without a `unique_id`, this takes precedence over legacy\n temperature conversion rules only.\n\n For sensors with a `unique_id`, this is applied only if the unit is not set by the user,\n and takes precedence over automatic device-class conversion rules.\n\n Note:\n suggested_unit_of_measurement is stored in the entity registry the first time\n the entity is seen, and then never updated.\n ", "language": "en", "n_whitespaces": 185, "n_words": 100, "vocab_size": 65 }
18
Python
14
6979cd95b0fe85c3ee8eca3dbc9881b8d05591e8
__init__.py
289,746
22
38
suggested_unit_of_measurement
https://github.com/home-assistant/core.git
Add suggested_unit_of_measurement attribute to sensors (#80638) * Add suggested_unit_of_measurement attribute to sensors * Lazy calculation of initial entity options * Add type alias for entity options * Small tweak * Add tests * Store suggested_unit_of_measurement in its own option key * Adapt to renaming of IMPERIAL_SYSTEM * Fix rebase mistakes * Apply suggestions from code review Co-authored-by: epenet <[email protected]> Co-authored-by: epenet <[email protected]>
68
0
88,882
9
6
37
def test_ppo_exploration_setup(self): config = copy.deepcopy(ppo.DEFAULT_CONFIG) config["num_workers"] = 0 # Run locally. config["env_config"] = {"is_slippery": False, "map_name": "4x4"} obs = np.array(0) # Test against all frameworks. for fw in framework_iterator(config): # Default Agent should be setup with StochasticSampling. trainer = ppo.PPOTrainer(config=config, env="FrozenLake-v1") # explore=False, always expect the same (deterministic) action. a_ = trainer.compute_single_action( obs, explore=False, prev_action=np.array(2), prev_reward=np.array(1.0) ) # Test whether this is really the argmax action over the logits. if fw != "tf": last_out = trainer.get_policy().model.last_output() if fw == "torch": check(a_, np.argmax(last_out.detach().cpu().numpy(), 1)[0]) else: check(a_, np.argmax(last_out.numpy(), 1)[0]) for _ in range(50): a = trainer.compute_single_action( obs, explore=False, prev_action=np.array(2), prev_reward=np.array(1.0), ) check(a, a_) # With explore=True (default), expect stochastic actions. actions = [] for _ in range(300): actions.append( trainer.compute_single_action( obs, prev_action=np.array(2), prev_reward=np.array(1.0) ) ) check(np.mean(actions), 1.5, atol=0.2) trainer.stop()
rllib/agents/ppo/tests/test_ppo.py
446
ray
{ "docstring": "Tests, whether PPO runs with different exploration setups.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
126
Python
87
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
test_ppo.py
133,802
33
285
test_ppo_exploration_setup
https://github.com/ray-project/ray.git
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
629
0
30,114
22
2
8
def pandas_version_info() -> Tuple[int, ...]: return tuple(int(s) for s in pd.__version__.split("."))
src/pandas_profiling/utils/compat.py
52
ydata-profiling
{ "docstring": "\n Get the Pandas version as a tuple of integers,\n akin to `sys.version_info` for the Python version.\n ", "language": "en", "n_whitespaces": 26, "n_words": 16, "vocab_size": 15 }
11
Python
11
5c5a710f23d83ba5ff1dc9ab6fc23b28094560fb
compat.py
191,814
6
31
pandas_version_info
https://github.com/ydataai/ydata-profiling.git
feat: add support for Pandas 1.5 (#1076)
17
0
46,844
11
2
10
def position_cursor(self) -> Control: if self._shape is not None: _, height = self._shape return Control( ControlType.CARRIAGE_RETURN, (ControlType.ERASE_IN_LINE, 2), *( ( (ControlType.CURSOR_UP, 1), (ControlType.ERASE_IN_LINE, 2), ) * (height - 1) ) ) return Control()
pipenv/patched/notpip/_vendor/rich/live_render.py
105
pipenv
{ "docstring": "Get control codes to move cursor to beginning of live render.\n\n Returns:\n Control: A control instance that may be printed.\n ", "language": "en", "n_whitespaces": 45, "n_words": 20, "vocab_size": 18 }
33
Python
27
f3166e673fe8d40277b804d35d77dcdb760fc3b3
live_render.py
20,773
20
70
position_cursor
https://github.com/pypa/pipenv.git
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
250
0
3,537
15
1
4
def _exit_buffer(self) -> None: self._buffer_index -= 1 self._check_buffer()
pipenv/patched/notpip/_vendor/rich/console.py
33
pipenv
{ "docstring": "Leave buffer context, and render content if required.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
8
Python
8
f3166e673fe8d40277b804d35d77dcdb760fc3b3
console.py
20,715
4
18
_exit_buffer
https://github.com/pypa/pipenv.git
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
29
0
3,497
7
2
6
def is_multiple_state(state_size): return (hasattr(state_size, '__len__') and not isinstance(state_size, tf.TensorShape))
keras/layers/rnn/rnn_utils.py
43
keras
{ "docstring": "Check whether the state_size contains multiple states.", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
9
Python
9
01c906c4178db5ae03b7eb2d298a052c952a0667
rnn_utils.py
268,977
3
25
is_multiple_state
https://github.com/keras-team/keras.git
Reorganize RNN layers, cells and wrappers into smaller logically organized files hosted under an `rnn` directory. PiperOrigin-RevId: 428841673
20
0
79,799
11
2
7
def remove(self, event, subscriber): subs = self._subscribers if event not in subs: raise ValueError('No subscribers: %r' % event) subs[event].remove(subscriber)
.venv/lib/python3.8/site-packages/pip/_vendor/distlib/util.py
61
transferlearning
{ "docstring": "\n Remove a subscriber for an event.\n\n :param event: The name of an event.\n :param subscriber: The subscriber to be removed.\n ", "language": "en", "n_whitespaces": 49, "n_words": 20, "vocab_size": 15 }
19
Python
19
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
util.py
62,204
5
37
remove
https://github.com/jindongwang/transferlearning.git
upd; format
58
0
12,898
11
2
10
def _format_ram(self): retval = [] for name in ("total", "available", "used", "free"): value = getattr(self, f"_ram_{name}") value = int(value / (1024 * 1024)) retval.append(f"{name.capitalize()}: {value}MB") return ", ".join(retval)
lib/sysinfo.py
121
faceswap
{ "docstring": " Format the RAM stats into Megabytes to make it more readable.\n\n Returns\n -------\n str\n The total, available, used and free RAM displayed in Megabytes\n ", "language": "en", "n_whitespaces": 64, "n_words": 24, "vocab_size": 22 }
28
Python
25
48c886b3dce3d3117ad16edaf35c8abd28dc51f5
sysinfo.py
102,079
7
58
_format_ram
https://github.com/deepfakes/faceswap.git
Allow decoding errors
89
0
21,444
13
3
11
def get_holiday(holiday_list, month): holiday_map = frappe._dict() for d in holiday_list: if d: holiday_map.setdefault( d, frappe.db.sql( , (d, month), ), ) return holiday_map @frappe.whitelist()
erpnext/hr/report/monthly_attendance_sheet/monthly_attendance_sheet.py
83
@frappe.whitelist()
erpnext
{ "docstring": "select day(holiday_date), weekly_off from `tabHoliday`\n\t\t\t\twhere parent=%s and month(holiday_date)=%s", "language": "en", "n_whitespaces": 7, "n_words": 9, "vocab_size": 9 }
23
Python
22
494bd9ef78313436f0424b918f200dab8fc7c20b
monthly_attendance_sheet.py
66,259
13
47
get_holiday
https://github.com/frappe/erpnext.git
style: format code with black
10
1
14,150
14
2
7
def __sub__(self, other): if self._delegate_binop(other): return NotImplemented return np.subtract(self, other)
numpy/ma/core.py
44
numpy
{ "docstring": "\n Subtract other from self, and return a new masked array.\n\n ", "language": "en", "n_whitespaces": 25, "n_words": 10, "vocab_size": 10 }
10
Python
9
6d77c591c59b5678f14ae5af2127eebb7d2415bc
core.py
160,870
4
27
__sub__
https://github.com/numpy/numpy.git
ENH: Adding __array_ufunc__ capability to MaskedArrays. This enables any ufunc numpy operations that are called on a MaskedArray to use the masked version of that function automatically without needing to resort to np.ma.func() calls.
42
0
38,770
7
1
8
def test_build_in_tf_function(self): m = metrics.MeanTensor(dtype=tf.float64)
keras/metrics/base_metric_test.py
32
keras
{ "docstring": "Ensure that variables are created correctly in a tf function.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
5
Python
5
84afc5193d38057e2e2badf9c889ea87d80d8fbf
base_metric_test.py
274,647
11
117
test_build_in_tf_function
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
19
0
81,253
10
1
3
def isatty(self) -> bool: return True
src/textual/app.py
19
textual
{ "docstring": "Pretend to be a terminal.\n\n Returns:\n bool: True if this is a tty.\n ", "language": "en", "n_whitespaces": 38, "n_words": 13, "vocab_size": 12 }
6
Python
6
0af9fed65969894d604e32a177120f0a03857265
app.py
185,685
7
10
isatty
https://github.com/Textualize/textual.git
added constant, and docstrings
20
0
45,109
6
1
9
def is_typeddict(tp):
pipenv/patched/notpip/_vendor/typing_extensions.py
26
"""Check if an annotation is a TypedDict class For example::anFor
pipenv
{ "docstring": "Check if an annotation is a TypedDict class\n\n For example::", "language": "en", "n_whitespaces": 16, "n_words": 10, "vocab_size": 10 }
2
Python
2
c69d55f7c82d5ae2cce542bcfb98d043ca4836a0
typing_extensions.py
21,607
2
16
is_typeddict
https://github.com/pypa/pipenv.git
Vendor in pip 22.1.2
9
3
3,951
5
1
17
def test_cache_with_asterisk_in_name(self): config = { "caches": { "per_cache_factors": {"*cache_a*": 5, "cache_b": 6, "cache_c": 2} } } self.config._environ = { "SYNAPSE_CACHE_FACTOR_CACHE_A": "2", "SYNAPSE_CACHE_FACTOR_CACHE_B": 3, } self.config.read_config(config, config_dir_path="", data_dir_path="") self.config.resize_all_caches() cache_a = LruCache(100) add_resizable_cache("*cache_a*", cache_resize_callback=cache_a.set_cache_factor) self.assertEqual(cache_a.max_size, 200) cache_b = LruCache(100) add_resizable_cache("*Cache_b*", cache_resize_callback=cache_b.set_cache_factor) self.assertEqual(cache_b.max_size, 300) cache_c = LruCache(100) add_resizable_cache("*cache_c*", cache_resize_callback=cache_c.set_cache_factor) self.assertEqual(cache_c.max_size, 200)
tests/config/test_cache.py
250
synapse
{ "docstring": "Some caches have asterisks in their name, test that they are set correctly.", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 13 }
49
Python
38
d38d242411b8910dfacde1e61fd3a0ec5cbcaa66
test_cache.py
248,242
21
146
test_cache_with_asterisk_in_name
https://github.com/matrix-org/synapse.git
Reload cache factors from disk on SIGHUP (#12673)
220
0
72,173
13
1
20
def test_model_tpu_one_core(): trainer = Trainer(tpu_cores=1, fast_dev_run=True, strategy=TPUSpawnStrategy(debug=True)) # assert training strategy attributes for device setting assert isinstance(trainer.strategy, TPUSpawnStrategy) assert not trainer.strategy.on_gpu assert trainer.strategy.on_tpu assert trainer.strategy.root_device == torch.device("xla", index=1) model = BoringModelTPU() trainer.fit(model) assert "PT_XLA_DEBUG" not in os.environ
tests/strategies/test_tpu_spawn.py
135
lightning
{ "docstring": "Tests if device/debug flag is set correctely when training and after teardown for TPUSpawnStrategy.", "language": "en", "n_whitespaces": 13, "n_words": 14, "vocab_size": 14 }
37
Python
30
650c710efacd633fa283955145342bb64063c883
test_tpu_spawn.py
241,590
9
83
test_model_tpu_one_core
https://github.com/Lightning-AI/lightning.git
Rename training plugin test files & names to strategy (#11303)
67
0
69,613
12
1
25
def test_subscriptions_query_count(self) -> None: user1 = self.example_user("cordelia") user2 = self.example_user("iago") new_streams = [ "query_count_stream_1", "query_count_stream_2", "query_count_stream_3", ] # Test creating a public stream when realm does not have a notification stream. with queries_captured() as queries: self.common_subscribe_to_streams( self.test_user, [new_streams[0]], dict(principals=orjson.dumps([user1.id, user2.id]).decode()), ) self.assert_length(queries, 37) # Test creating private stream. with queries_captured() as queries: self.common_subscribe_to_streams( self.test_user, [new_streams[1]], dict(principals=orjson.dumps([user1.id, user2.id]).decode()), invite_only=True, ) self.assert_length(queries, 36) # Test creating a public stream with announce when realm has a notification stream. notifications_stream = get_stream(self.streams[0], self.test_realm) self.test_realm.notifications_stream_id = notifications_stream.id self.test_realm.save() with queries_captured() as queries: self.common_subscribe_to_streams( self.test_user, [new_streams[2]], dict( announce="true", principals=orjson.dumps([user1.id, user2.id]).decode(), ), ) self.assert_length(queries, 45)
zerver/tests/test_subs.py
387
zulip
{ "docstring": "\n Test database query count when creating stream with api/v1/users/me/subscriptions.\n ", "language": "en", "n_whitespaces": 24, "n_words": 9, "vocab_size": 9 }
98
Python
59
b0de5c0f364632feb1e0a662f9be49aaf3412770
test_subs.py
84,790
39
239
test_subscriptions_query_count
https://github.com/zulip/zulip.git
streams: Set can_remove_subscribers_group while creating streams. This commit sets can_remove_subscribers_group to admins system group while creating streams as it will be the default value of this setting. In further we would provide an option to set value of this setting to any user group while creating streams using API or UI.
519
0
17,875
18
1
9
def test_meta_options_as_defaults(self): # this plugin relies on the base CMSPlugin and Model classes to # decide what the app_label and db_table should be plugin = TestPlugin.model self.assertEqual(plugin._meta.db_table, 'meta_testpluginmodel') self.assertEqual(plugin._meta.app_label, 'meta')
cms/tests/test_plugins.py
63
django-cms
{ "docstring": " handling when a CMSPlugin meta options are computed defaults ", "language": "en", "n_whitespaces": 10, "n_words": 9, "vocab_size": 9 }
30
Python
26
c1290c9ff89cb00caa5469129fd527e9d82cd820
test_plugins.py
82,419
4
35
test_meta_options_as_defaults
https://github.com/django-cms/django-cms.git
ci: Added codespell (#7355) Co-authored-by: Christian Clauss <[email protected]> * ci: codespell config taken from #7292
72
0
17,389
9
8
20
def to_dict(self, field_map={}) -> Dict: inv_field_map = {v: k for k, v in field_map.items()} _doc: Dict[str, str] = {} for k, v in self.__dict__.items(): # Exclude internal fields (Pydantic, ...) fields from the conversion process if k.startswith("__"): continue if k == "content": # Convert pd.DataFrame to list of rows for serialization if self.content_type == "table" and isinstance(self.content, pd.DataFrame): v = [self.content.columns.tolist()] + self.content.values.tolist() k = k if k not in inv_field_map else inv_field_map[k] _doc[k] = v return _doc
haystack/schema.py
210
haystack
{ "docstring": "\n Convert Document to dict. An optional field_map can be supplied to change the names of the keys in the\n resulting dict. This way you can work with standardized Document objects in Haystack, but adjust the format that\n they are serialized / stored in other places (e.g. elasticsearch)\n Example:\n | doc = Document(content=\"some text\", content_type=\"text\")\n | doc.to_dict(field_map={\"custom_content_field\": \"content\"})\n | >>> {\"custom_content_field\": \"some text\", content_type\": \"text\"}\n\n :param field_map: Dict with keys being the custom target keys and values being the standard Document attributes\n :return: dict with content of the Document\n ", "language": "en", "n_whitespaces": 159, "n_words": 88, "vocab_size": 65 }
78
Python
55
621e1af74c9c7d04b79ca5f5826ddcc06e1237f0
schema.py
257,821
24
130
to_dict
https://github.com/deepset-ai/haystack.git
refactor: improve support for dataclasses (#3142) * refactor: improve support for dataclasses * refactor: refactor class init * refactor: remove unused import * refactor: testing 3.7 diffs * refactor: checking meta where is Optional * refactor: reverting some changes on 3.7 * refactor: remove unused imports * build: manual pre-commit run * doc: run doc pre-commit manually * refactor: post initialization hack for 3.7-3.10 compat. TODO: investigate another method to improve 3.7 compatibility. * doc: force pre-commit * refactor: refactored for both Python 3.7 and 3.9 * docs: manually run pre-commit hooks * docs: run api docs manually * docs: fix wrong comment * refactor: change no type-checked test code * docs: update primitives * docs: api documentation * docs: api documentation * refactor: minor test refactoring * refactor: remova unused enumeration on test * refactor: remove unneeded dir in gitignore * refactor: exclude all private fields and change meta def * refactor: add pydantic comment * refactor : fix for mypy on Python 3.7 * refactor: revert custom init * docs: update docs to new pydoc-markdown style * Update test/nodes/test_generator.py Co-authored-by: Sara Zan <[email protected]>
232
0
75,140
18
1
7
def _resource_apply_sparse(self, grad, handle, indices, apply_state): raise NotImplementedError( "`_resource_apply_sparse` Must be implemented in subclasses." )
keras/optimizers/optimizer_v2/optimizer_v2.py
31
keras
{ "docstring": "Add ops to apply sparse gradients to the variable `handle`.\n\n Similar to `_apply_sparse`, the `indices` argument to this method has\n been de-duplicated. Optimizers which deal correctly with non-unique\n indices may instead override `_resource_apply_sparse_duplicate_indices`\n to avoid this overhead.\n\n Args:\n grad: a `Tensor` representing the gradient for the affected indices.\n handle: a `Tensor` of dtype `resource` which points to the variable to\n be updated.\n indices: a `Tensor` of integral type representing the indices for\n which the gradient is nonzero. Indices are unique.\n apply_state: A dict which is used across multiple apply calls.\n\n Returns:\n An `Operation` which updates the value of the variable.\n ", "language": "en", "n_whitespaces": 216, "n_words": 100, "vocab_size": 68 }
15
Python
15
be73ac1a1e25d9abd4d793cba9707098d7adf231
optimizer_v2.py
279,524
4
19
_resource_apply_sparse
https://github.com/keras-team/keras.git
Add f-string format and lint with flynt on the whole codebase
47
0
83,029
8
1
2
def on_shutdown(self) -> None:
mkdocs/plugins.py
16
mkdocs
{ "docstring": "\n The `shutdown` event runs once at the very end of an `mkdocs` invocation, before exiting.\n\n This event is relevant only for support of `mkdocs serve`, otherwise within a\n single build it's undistinguishable from `on_post_build`.\n\n New in MkDocs 1.4.\n\n The presence of an `on_shutdown` method (even if empty) migrates the plugin to the new\n system where the plugin object is kept across builds within one `mkdocs serve`.\n\n Note the `on_post_build` method is still preferred for cleanups, when possible, as it has\n a much higher chance of actually triggering. `on_shutdown` is \"best effort\" because it\n relies on detecting a graceful shutdown of MkDocs.\n ", "language": "en", "n_whitespaces": 172, "n_words": 101, "vocab_size": 78 }
4
Python
4
a56ac6e0513bdea6860ed1fdc3debc10410638cd
plugins.py
224,971
16
8
on_shutdown
https://github.com/mkdocs/mkdocs.git
Add plugin events that persist across builds in `mkdocs serve` "One-time events" `on_startup(command)`, `on_shutdown`. Their presence also shows that a plugin *wants* to persist across builds. Otherwise they will be re-created, to not change any existing behavior.
11
0
57,435
6
6
18
def _build_network_on_replica(model, mode, inputs=None, targets=None): # Need to do imports here since we run into a circular dependency error. from keras import models # pylint: disable=g-import-not-at-top from keras.engine import sequential # pylint: disable=g-import-not-at-top # We rely on the internal methods to avoid having share_weights weights in # the public API. if isinstance(model, sequential.Sequential): updated_model = models._clone_sequential_model( model, input_tensors=inputs, layer_fn=models.share_weights ) else: updated_model = models._clone_functional_model( model, input_tensors=inputs, layer_fn=models.share_weights ) # Callable losses added directly to a functional Model need to be added # here. updated_model._callable_losses = model._callable_losses # Recast all low precision outputs back to float32 since we only casted the # inputs to bfloat16 and not targets. This is done so that we can preserve # precision when calculating the loss value.
keras/distribute/distributed_training_utils_v1.py
133
keras
{ "docstring": "Build an updated model on replicas.\n\n We create a new Keras model while sharing the variables from the old graph.\n Building a new sub-graph is required since the original keras model creates\n placeholders for the input and the output that are not accessible till we\n call iterator.get_next() inside the step_fn for `fit`/`evaluate`/`predict`.\n\n The sharing of weights and layers between the old and the new model\n guarantee that we're using Strategy variables and any updates on either\n model are reflected correctly in callbacks and loop iterations.\n\n We need to make sure we share the optimizers between the old and the new\n model as well so that optimizer state is not lost if the user is running fit\n multiple times.\n\n Args:\n model: Model to be replicated across Replicas\n mode: Which of fit/eval/predict is building the distributed network\n inputs: Input variables to be passed to the model\n targets: Target tensor to be passed to model.compile\n\n Returns:\n A new model with shared layers with the old model.\n ", "language": "en", "n_whitespaces": 227, "n_words": 163, "vocab_size": 103 }
122
Python
88
b1105dca17670dcac229271e63d5073fe445b84c
distributed_training_utils_v1.py
278,045
33
188
_build_network_on_replica
https://github.com/keras-team/keras.git
resolve line-too-long in distribute
228
0
82,343
13
4
15
def _discard_tk_faces(self): keys = [f"{pnt_x}_{pnt_y}" for pnt_x, pnt_y in self._objects.visible_grid[:2].T.reshape(-1, 2)] for key in list(self._tk_faces): if key not in keys: del self._tk_faces[key] logger.trace("keys: %s allocated_faces: %s", keys, len(self._tk_faces))
tools/manual/faceviewer/viewport.py
124
faceswap
{ "docstring": " Remove any :class:`TKFace` objects from the cache that are not currently displayed. ", "language": "en", "n_whitespaces": 13, "n_words": 12, "vocab_size": 12 }
28
Python
24
5e73437be47f2410439a3c6716de96354e6a0c94
viewport.py
101,263
7
74
_discard_tk_faces
https://github.com/deepfakes/faceswap.git
lib.align updates: - alignments.py - Add typed dicts for imported alignments - Explicitly check for presence of thumb value in alignments dict - linting - detected_face.py - Typing - Linting - Legacy support for pre-aligned face - Update dependencies to new property names
97
0
20,683
14
1
11
def test_i18n_language_non_english_fallback(self): with self.settings(LANGUAGE_CODE="fr"), translation.override("none"): response = self.client.get(reverse("admin:jsi18n")) self.assertContains(response, "Choisir une heure")
tests/admin_views/tests.py
83
django
{ "docstring": "\n Makes sure that the fallback language is still working properly\n in cases where the selected language cannot be found.\n ", "language": "en", "n_whitespaces": 41, "n_words": 19, "vocab_size": 17 }
12
Python
12
9c19aff7c7561e3a82978a272ecdaad40dda5c00
tests.py
207,601
4
44
test_i18n_language_non_english_fallback
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
48
0
52,017
13
2
10
def name_scope_only_in_function_or_graph(name): if not tf.executing_eagerly(): return tf.name_scope(name) else: return NullContextmanager() @keras_export("keras.optimizers.Optimizer", metaclass=abc.ABCMeta)
keras/optimizers/optimizer_v2/optimizer_v2.py
68
@keras_export("keras.optimizers.Optimizer", metaclass=abc.ABCMeta)
keras
{ "docstring": "Internal-only entry point for `name_scope*`.\n\n Enters a compat.v1.name_scope only when in a function or graph,\n not when running fully eagerly.\n\n Args:\n name: The name argument that is passed to the op function.\n\n Returns:\n `name_scope*` context manager.\n ", "language": "en", "n_whitespaces": 61, "n_words": 36, "vocab_size": 34 }
12
Python
11
84afc5193d38057e2e2badf9c889ea87d80d8fbf
optimizer_v2.py
275,520
5
27
name_scope_only_in_function_or_graph
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
34
1
81,411
10
1
5
def name(self) -> PurePosixPath: return PurePosixPath(_as_posix(self).split("::")[0]).name
src/datasets/download/streaming_download_manager.py
46
datasets
{ "docstring": "Name function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs.\n\n Args:\n path (:obj:`~pathlib.Path`): Calling Path instance.\n\n Returns:\n :obj:`str`\n ", "language": "en", "n_whitespaces": 66, "n_words": 23, "vocab_size": 23 }
6
Python
6
ab7d3045ac9154e9c1c2602d0869130defdc6dc7
streaming_download_manager.py
105,102
10
26
name
https://github.com/huggingface/datasets.git
Support DataLoader with num_workers > 0 in streaming mode (#4375) * make TorchIterableDataset work in parallel - make it picklable - paralellize over the shards when num_workers is passed * start writing some tests * fix streaming extension and fsspec issues in subprocesses * fix some tests * fix more tests * fix import * fix and add tests * fix patch (handle successive patches and builtins) * revert unnecessary change to enriched_web_blg * style * use open locally to fix win permission errors * keep file opened in read_csv * fix compression for read_csv * consistency of read_csv: don't infer compression for file-like objects * stringify Path objects * comments + raise error if sharding is ambiguous * minor * Update src/datasets/iterable_dataset.py Co-authored-by: Mario Šaško <[email protected]> Co-authored-by: Mario Šaško <[email protected]>
20
0
22,070
13
8
23
def django_table_names(self, only_existing=False, include_views=True): tables = set() for model in self.get_migratable_models(): if not model._meta.managed: continue tables.add(model._meta.db_table) tables.update( f.m2m_db_table() for f in model._meta.local_many_to_many if f.remote_field.through._meta.managed ) tables = list(tables) if only_existing: existing_tables = set(self.table_names(include_views=include_views)) tables = [ t for t in tables if self.identifier_converter(t) in existing_tables ] return tables
django/db/backends/base/introspection.py
186
django
{ "docstring": "\n Return a list of all table names that have associated Django models and\n are in INSTALLED_APPS.\n\n If only_existing is True, include only the tables in the database.\n ", "language": "en", "n_whitespaces": 56, "n_words": 27, "vocab_size": 25 }
48
Python
31
9c19aff7c7561e3a82978a272ecdaad40dda5c00
introspection.py
204,854
18
117
django_table_names
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
242
0
50,932
14
2
6
def eulerline(self): if self.is_equilateral(): return self.orthocenter return Line(self.orthocenter, self.circumcenter)
sympy/geometry/polygon.py
47
sympy
{ "docstring": "The Euler line of the triangle.\n\n The line which passes through circumcenter, centroid and orthocenter.\n\n Returns\n =======\n\n eulerline : Line (or Point for equilateral triangles in which case all\n centers coincide)\n\n Examples\n ========\n\n >>> from sympy import Point, Triangle\n >>> p1, p2, p3 = Point(0, 0), Point(1, 0), Point(0, 1)\n >>> t = Triangle(p1, p2, p3)\n >>> t.eulerline\n Line2D(Point2D(0, 0), Point2D(1/2, 1/2))\n\n ", "language": "en", "n_whitespaces": 165, "n_words": 62, "vocab_size": 51 }
9
Python
8
498015021131af4dbb07eb110e5badaba8250c7b
polygon.py
196,289
4
28
eulerline
https://github.com/sympy/sympy.git
Updated import locations
41
0
47,789
8
2
9
def itermonthdays2(self, year, month): for i, d in enumerate(self.itermonthdays(year, month), self.firstweekday): yield d, i % 7
python3.10.4/Lib/calendar.py
57
XX-Net
{ "docstring": "\n Like itermonthdates(), but will yield (day number, weekday number)\n tuples. For days outside the specified month the day number is 0.\n ", "language": "en", "n_whitespaces": 43, "n_words": 21, "vocab_size": 20 }
16
Python
16
8198943edd73a363c266633e1aa5b2a9e9c9f526
calendar.py
221,238
3
37
itermonthdays2
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
41
0
56,285
10
4
2
def get_input_shape_and_dtype(layer):
keras/engine/training_utils.py
13
keras
{ "docstring": "Retrieves input shape and input dtype of layer if applicable.\n\n Args:\n layer: Layer (or model) instance.\n\n Returns:\n Tuple (input_shape, input_dtype). Both could be None if the layer\n does not have a defined input shape.\n\n Raises:\n ValueError: in case an empty Sequential or Functional model is passed.\n ", "language": "en", "n_whitespaces": 80, "n_words": 46, "vocab_size": 42 }
2
Python
2
84afc5193d38057e2e2badf9c889ea87d80d8fbf
training_utils.py
271,837
9
55
get_input_shape_and_dtype
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
5
0
80,858
6
2
11
def load_workflow_meta(self) -> Optional[WorkflowMetaData]: try: metadata = asyncio_run(self._get(self._key_workflow_metadata(), True)) return WorkflowMetaData(status=WorkflowStatus(metadata["status"])) except KeyNotFoundError: return None
python/ray/workflow/workflow_storage.py
81
ray
{ "docstring": "Load the metadata of the current workflow.\n\n Returns:\n The metadata of the current workflow. If it doesn't exist,\n return None.\n ", "language": "en", "n_whitespaces": 56, "n_words": 20, "vocab_size": 14 }
15
Python
14
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
workflow_storage.py
133,505
12
48
load_workflow_meta
https://github.com/ray-project/ray.git
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
69
0
30,038
14
3
11
def _update_code_co_name(self, code): if not hasattr(code, "replace"): # It may not be available on older versions of Python (only # available for 3.8 onwards). return code try: first_real_line = next(dis.findlinestarts(code))[1] except StopIteration: return code return code.replace(co_name="<cell line: %s>" % (first_real_line,))
IPython/core/interactiveshell.py
93
ipython
{ "docstring": "Python 3.10 changed the behaviour so that whenever a code object\n is assembled in the compile(ast) the co_firstlineno would be == 1.\n\n This makes pydevd/debugpy think that all cells invoked are the same\n since it caches information based on (co_firstlineno, co_name, co_filename).\n\n Given that, this function changes the code 'co_name' to be unique\n based on the first real lineno of the code (which also has a nice\n side effect of customizing the name so that it's not always <module>).\n\n See: https://github.com/ipython/ipykernel/issues/841\n ", "language": "en", "n_whitespaces": 137, "n_words": 81, "vocab_size": 64 }
40
Python
34
d11e987f174a15f1640f8006c86f58d884c3faa4
interactiveshell.py
208,651
8
54
_update_code_co_name
https://github.com/ipython/ipython.git
Set co_name for cells run line by line. Fixes https://github.com/ipython/ipykernel/issues/841
130
0
52,437
13
4
22
def send_trial(self, parameters, placement_constraint=None): self.parameters_count += 1 if placement_constraint is None: placement_constraint = { 'type': 'None', 'gpus': [] } self._validate_placement_constraint(placement_constraint) new_trial = { 'parameter_id': self.parameters_count, 'parameters': parameters, 'parameter_source': 'algorithm', 'placement_constraint': placement_constraint } _logger.debug('New trial sent: %s', new_trial) try: send_payload = nni.dump(new_trial, pickle_size_limit=int(os.getenv('PICKLE_SIZE_LIMIT', 64 * 1024))) except PayloadTooLarge: raise ValueError( 'Serialization failed when trying to dump the model because payload too large (larger than 64 KB). ' 'This is usually caused by pickling large objects (like datasets) by mistake. ' 'See the full error traceback for details and https://nni.readthedocs.io/en/stable/NAS/Serialization.html ' 'for how to resolve such issue. ' ) # trial parameters can be super large, disable pickle size limit here # nevertheless, there could still be blocked by pipe / nni-manager send(CommandType.NewTrialJob, send_payload) if self.send_trial_callback is not None: self.send_trial_callback(parameters) # pylint: disable=not-callable return self.parameters_count
nni/retiarii/integration.py
233
nni
{ "docstring": "\n Send parameters to NNI.\n\n Parameters\n ----------\n parameters : Any\n Any payload.\n\n Returns\n -------\n int\n Parameter ID that is assigned to this parameter,\n which will be used for identification in future.\n ", "language": "en", "n_whitespaces": 120, "n_words": 30, "vocab_size": 27 }
133
Python
108
d5ed88e4e7f9aa78f06922dce8219a82e3b52682
integration.py
111,621
28
132
send_trial
https://github.com/microsoft/nni.git
Retiarii serializer user experience improvements (#4437)
432
0
24,455
16
3
14
async def dry_run(self, empty, context) -> jina_pb2.StatusProto: from docarray import DocumentArray, Document from jina.serve.executors import __dry_run_endpoint__ da = DocumentArray([Document()]) try:
jina/serve/runtimes/gateway/grpc/gateway.py
63
async def dry_run(self, empty, context) -> jina_pb2.StatusProto: """ Process the call requested by having a dry run call to every Executor in the graph :param empty: The service expects an empty protobuf message :param context: grpc context :returns: the response request """ from docarray import DocumentArray, Document from jina.serve.executors import __dry_run_endpoint__ da = DocumentArray([Document()]) try:
jina
{ "docstring": "\n Process the call requested by having a dry run call to every Executor in the graph\n\n :param empty: The service expects an empty protobuf message\n :param context: grpc context\n :returns: the response request\n ", "language": "en", "n_whitespaces": 69, "n_words": 33, "vocab_size": 29 }
20
Python
18
e143ea3092ebae68f8c2cf7f784f86296cae68d7
gateway.py
13,668
23
103
dry_run
https://github.com/jina-ai/jina.git
refactor: use stream_docs from streamer (#5438)
55
1
2,721
10
4
9
def get_yaxis_transform(self, which='grid'): if which == 'grid': return self._yaxis_transform elif which == 'tick1': # for cartesian projection, this is bottom spine return self.spines.left.get_spine_transform() elif which == 'tick2': # for cartesian projection, this is top spine return self.spines.right.get_spine_transform() else: raise ValueError(f'unknown value for which: {which!r}')
lib/matplotlib/axes/_base.py
109
matplotlib
{ "docstring": "\n Get the transformation used for drawing y-axis labels, ticks\n and gridlines. The x-direction is in axis coordinates and the\n y-direction is in data coordinates.\n\n .. note::\n\n This transformation is primarily used by the\n `~matplotlib.axis.Axis` class, and is meant to be\n overridden by new kinds of projections that may need to\n place axis elements in different locations.\n ", "language": "en", "n_whitespaces": 137, "n_words": 56, "vocab_size": 42 }
44
Python
29
bf3a554ccd1299bc260647029811758aeaf577b1
_base.py
108,635
9
57
get_yaxis_transform
https://github.com/matplotlib/matplotlib.git
Add tests, improve error messages, and use argument checks to simplify code
145
0
23,279
12
3
15
def get_rfq_containing_supplier(doctype, txt, searchfield, start, page_len, filters): conditions = "" if txt: conditions += "and rfq.name like '%%" + txt + "%%' " if filters.get("transaction_date"): conditions += "and rfq.transaction_date = '{0}'".format(filters.get("transaction_date")) rfq_data = frappe.db.sql( f, { "page_len": page_len, "start": start, "company": filters.get("company"), "supplier": filters.get("supplier"), }, as_dict=1, ) return rfq_data
erpnext/buying/doctype/request_for_quotation/request_for_quotation.py
169
erpnext
{ "docstring": "\n\t\tselect\n\t\t\tdistinct rfq.name, rfq.transaction_date,\n\t\t\trfq.company\n\t\tfrom\n\t\t\t`tabRequest for Quotation` rfq, `tabRequest for Quotation Supplier` rfq_supplier\n\t\twhere\n\t\t\trfq.name = rfq_supplier.parent\n\t\t\tand rfq_supplier.supplier = %(supplier)s\n\t\t\tand rfq.docstatus = 1\n\t\t\tand rfq.company = %(company)s\n\t\t\t{conditions}\n\t\torder by rfq.transaction_date ASC\n\t\tlimit %(page_len)s offset %(start)s ", "language": "en", "n_whitespaces": 28, "n_words": 40, "vocab_size": 32 }
49
Python
38
34e4903ed7936c35176d6031a16d1a27654dcb40
request_for_quotation.py
69,552
30
96
get_rfq_containing_supplier
https://github.com/frappe/erpnext.git
refactor: search queries (#33004) - guard clauses for readability - use values or format
32
0
15,063
13
10
29
async def async_update(self): # Check if device is disconnected. if not self._attr_available: # Try to connect if await self.aftv.adb_connect(log_errors=self._failed_connect_count == 0): self._failed_connect_count = 0 self._attr_available = True else: self._failed_connect_count += 1 # If the ADB connection is not intact, don't update. if not self.available: return # Get the updated state and attributes. ( state, self._attr_app_id, running_apps, _, self._attr_is_volume_muted, self._attr_volume_level, self._attr_extra_state_attributes[ATTR_HDMI_INPUT], ) = await self.aftv.update(self._get_sources) self._attr_state = ANDROIDTV_STATES.get(state) if self._attr_state is None: self._attr_available = False if running_apps: self._attr_source = self._attr_app_name = self._app_id_to_name.get( self._attr_app_id, self._attr_app_id ) sources = [ self._app_id_to_name.get( app_id, app_id if not self._exclude_unnamed_apps else None ) for app_id in running_apps ] self._attr_source_list = [source for source in sources if source] else: self._attr_source_list = None
homeassistant/components/androidtv/media_player.py
288
core
{ "docstring": "Update the device state and, if necessary, re-connect.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
114
Python
74
0df30782a623204be2941da74ddee5bb110dd03b
media_player.py
296,246
34
184
async_update
https://github.com/home-assistant/core.git
Bump androidtv to 0.0.67 (improve connect attempt logging) (#69721)
520
0
95,241
14
1
9
def euler_poly(n, x=None, polys=False): return appell_poly(n, [[1], [1, QQ(-1,2)]], 1, lambda p, i: -p / 2, QQ, x, polys) @public
sympy/polys/appellseqs.py
81
@public
sympy
{ "docstring": "Generates the Euler polynomial of degree `n` in `x`.\n\n Parameters\n ==========\n\n n : int\n Degree of the polynomial.\n x : optional\n polys : bool, optional\n If True, return a Poly, otherwise (default) return an expression.\n ", "language": "en", "n_whitespaces": 67, "n_words": 35, "vocab_size": 29 }
20
Python
20
e875bdb804b0285e4a9bd8de0158436e792c03cb
appellseqs.py
199,618
2
55
euler_poly
https://github.com/sympy/sympy.git
Initial definition of Appell sequences
25
1
49,296
12
3
9
def _get_output_folder(self) -> str: if self._is_video and self._type == "frames": return os.path.dirname(self._source_dir) return self._source_dir
tools/alignments/jobs.py
58
faceswap
{ "docstring": " Return output folder. Needs to be in the root if input is a video and processing\n frames\n\n Returns\n -------\n str\n Full path to the output folder\n ", "language": "en", "n_whitespaces": 73, "n_words": 26, "vocab_size": 23 }
14
Python
13
e2a77e7c6e84e81f642cb22f528e25e3f2d2dbc1
jobs.py
101,716
12
34
_get_output_folder
https://github.com/deepfakes/faceswap.git
Alignments Tool - Typing, Documentation + Re-org
46
0
21,120
10
4
21
def update_cached_response(self, request, response): cache_url = self.cache_url(request.url) cached_response = self.serializer.loads(request, self.cache.get(cache_url)) if not cached_response: # we didn't have a cached response return response # Lets update our headers with the headers from the new request: # http://tools.ietf.org/html/draft-ietf-httpbis-p4-conditional-26#section-4.1 # # The server isn't supposed to send headers that would make # the cached body invalid. But... just in case, we'll be sure # to strip out ones we know that might be problmatic due to # typical assumptions. excluded_headers = ["content-length"] cached_response.headers.update( dict( (k, v) for k, v in response.headers.items() if k.lower() not in excluded_headers ) ) # we want a 200 b/c we have content via the cache cached_response.status = 200 # update our cache self._cache_set(cache_url, request, cached_response) return cached_response
pipenv/patched/notpip/_vendor/cachecontrol/controller.py
172
pipenv
{ "docstring": "On a 304 we will get a new set of headers that we want to\n update our cached value with, assuming we have one.\n\n This should only ever be called when we've sent an ETag and\n gotten a 304 as the response.\n ", "language": "en", "n_whitespaces": 70, "n_words": 42, "vocab_size": 37 }
120
Python
79
c69d55f7c82d5ae2cce542bcfb98d043ca4836a0
controller.py
21,319
16
103
update_cached_response
https://github.com/pypa/pipenv.git
Vendor in pip 22.1.2
342
0
3,761
13
5
13
def remote(self, *args, **kwargs): # Delayed import to avoid a cyclic import from ray.util.client.common import remote_decorator if len(args) == 1 and len(kwargs) == 0 and callable(args[0]): # This is the case where the decorator is just @ray.remote. return remote_decorator(options=None)(args[0]) error_string = ( "The @ray.remote decorator must be applied either " "with no arguments and no parentheses, for example " "'@ray.remote', or it must be applied using some of " "the arguments 'num_returns', 'num_cpus', 'num_gpus', " "'memory', 'object_store_memory', 'resources', " "'max_calls', or 'max_restarts', like " "'@ray.remote(num_returns=2, " 'resources={"CustomResource": 1})\'.' ) assert len(args) == 0 and len(kwargs) > 0, error_string return remote_decorator(options=kwargs) # TODO(mwtian): consider adding _internal_ prefix to call_remote / # call_release / call_retain.
python/ray/util/client/api.py
162
ray
{ "docstring": "remote is the hook stub passed on to replace `ray.remote`.\n\n This sets up remote functions or actors, as the decorator,\n but does not execute them.\n\n Args:\n args: opaque arguments\n kwargs: opaque keyword arguments\n ", "language": "en", "n_whitespaces": 83, "n_words": 33, "vocab_size": 29 }
113
Python
81
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
api.py
132,910
16
93
remote
https://github.com/ray-project/ray.git
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
285
0
29,866
11
2
23
def process_struct(fileobj): (key_id,) = struct.unpack("Q", fileobj.read(8)) (country_code,) = struct.unpack("2s", fileobj.read(2)) (recognized,) = struct.unpack("b", fileobj.read(1)) (timestamp,) = struct.unpack("I", fileobj.read(4)) (n_strokes,) = struct.unpack("H", fileobj.read(2)) drawing = [] for _ in range(n_strokes): (n_points,) = struct.unpack("H", fileobj.read(2)) fmt = str(n_points) + "B" x = struct.unpack(fmt, fileobj.read(n_points)) y = struct.unpack(fmt, fileobj.read(n_points)) drawing.append({"x": list(x), "y": list(y)}) return { "key_id": str(key_id), "recognized": recognized, "timestamp": datetime.fromtimestamp(timestamp), "countrycode": country_code.decode("utf-8"), "drawing": drawing, }
datasets/quickdraw/quickdraw.py
365
datasets
{ "docstring": "\n Process a struct from a binary file object.\n\n The code for this function is borrowed from the following link:\n https://github.com/googlecreativelab/quickdraw-dataset/blob/f0f3beef0fc86393b3771cdf1fc94828b76bc89b/examples/binary_file_parser.py#L19\n ", "language": "en", "n_whitespaces": 33, "n_words": 20, "vocab_size": 18 }
63
Python
49
1c1eaf96d5ef4623e36c9124d49e88ab476dd655
quickdraw.py
105,091
20
220
process_struct
https://github.com/huggingface/datasets.git
Add QuickDraw dataset (#3592) * Add QuickDraw dataset * Style * Add infos file, dummy data, improve script * Add info and dummy data * Test readme * Finish readme * Delete generate_dummy.py * Remove whitespace
163
0
22,068
13
2
9
def convert_x_to_bbox(x, score=None): w = np.sqrt(x[2] * x[3]) h = x[2] / w if (score == None): return np.array( [x[0] - w / 2., x[1] - h / 2., x[0] + w / 2., x[1] + h / 2.]).reshape((1, 4)) else: score = np.array([score]) return np.array([ x[0] - w / 2., x[1] - h / 2., x[0] + w / 2., x[1] + h / 2., score ]).reshape((1, 5))
deploy/pptracking/python/mot/tracker/ocsort_tracker.py
233
PaddleDetection
{ "docstring": "\n Takes a bounding box in the centre form [x,y,s,r] and returns it in the form\n [x1,y1,x2,y2] where x1,y1 is the top left and x2,y2 is the bottom right\n ", "language": "en", "n_whitespaces": 40, "n_words": 28, "vocab_size": 21 }
69
Python
31
c84153a355d9855fe55cf51d203b8b24e7d884e5
ocsort_tracker.py
211,029
12
167
convert_x_to_bbox
https://github.com/PaddlePaddle/PaddleDetection.git
[MOT] Add OC_SORT tracker (#6272) * add ocsort tracker * add ocsort deploy * merge develop * fix ocsort tracker codes * fix doc, test=document_fix * fix doc, test=document_fix
146
0
53,004
15
1
4
def multiply(inputs, **kwargs): return Multiply(**kwargs)(inputs)
keras/layers/merging/multiply.py
32
keras
{ "docstring": "Functional interface to the `Multiply` layer.\n\n Example:\n\n >>> x1 = np.arange(3.0)\n >>> x2 = np.arange(3.0)\n >>> tf.keras.layers.multiply([x1, x2])\n <tf.Tensor: shape=(3,), dtype=float32, numpy=array([0., 1., 4.], ...)>\n\n Usage in a functional model:\n\n >>> input1 = tf.keras.layers.Input(shape=(16,))\n >>> x1 = tf.keras.layers.Dense(8, activation='relu')(input1) #shape=(None, 8)\n >>> input2 = tf.keras.layers.Input(shape=(32,))\n >>> x2 = tf.keras.layers.Dense(8, activation='relu')(input2) #shape=(None, 8)\n >>> out = tf.keras.layers.multiply([x1,x2]) #shape=(None, 8)\n >>> out = tf.keras.layers.Dense(4)(out)\n >>> model = tf.keras.models.Model(inputs=[input1, input2], outputs=out)\n\n Args:\n inputs: A list of input tensors.\n **kwargs: Standard layer keyword arguments.\n\n Returns:\n A tensor, the element-wise product of the inputs.\n ", "language": "en", "n_whitespaces": 158, "n_words": 89, "vocab_size": 59 }
5
Python
5
84afc5193d38057e2e2badf9c889ea87d80d8fbf
multiply.py
272,694
2
18
multiply
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
11
0
81,038
9
1
4
def export_model(self): return self.tuner.get_best_model()
autokeras/auto_model.py
26
autokeras
{ "docstring": "Export the best Keras Model.\n\n # Returns\n keras.Model instance. The best model found during the search, loaded\n with trained weights.\n ", "language": "en", "n_whitespaces": 56, "n_words": 20, "vocab_size": 18 }
4
Python
4
b97d27d2e916025f65fed751d54c089d4d4bd022
auto_model.py
175,928
2
14
export_model
https://github.com/keras-team/autokeras.git
clean up imports
18
0
41,662
8
4
23
def async_remove_legacy_device_serial_numbers(self) -> None: _LOGGER.debug( "Removing legacy serial numbers from device registry entries for pairing %s", self.unique_id, ) device_registry = dr.async_get(self.hass) for accessory in self.entity_map.accessories: identifiers = { ( IDENTIFIER_ACCESSORY_ID, f"{self.unique_id}:aid:{accessory.aid}", ) } legacy_serial_identifier = ( IDENTIFIER_SERIAL_NUMBER, accessory.serial_number, ) device = device_registry.async_get_device(identifiers=identifiers) if not device or legacy_serial_identifier not in device.identifiers: continue device_registry.async_update_device(device.id, new_identifiers=identifiers)
homeassistant/components/homekit_controller/connection.py
160
core
{ "docstring": "Migrate remove legacy serial numbers from devices.\n\n We no longer use serial numbers as device identifiers\n since they are not reliable, and the HomeKit spec\n does not require them to be stable.\n ", "language": "en", "n_whitespaces": 60, "n_words": 32, "vocab_size": 29 }
53
Python
41
f23b1750e85f07091eb896a0b12b8f95e5646338
connection.py
288,840
27
93
async_remove_legacy_device_serial_numbers
https://github.com/home-assistant/core.git
Migrate HomeKit Controller to use stable identifiers (#80064)
300
0
87,989
13
1
15
def test_print_args(self): args_list = [ 'tests/tests.csv', '-is', ',' ] args = self.parser.parse_args(args_list) with captured_output() as (out, err): _print_args(args) output = out.getvalue() expected_output = self.assertEqual(_sort_lines(expected_output), _sort_lines(output))
tests/driver_tests.py
115
tpot
{ "docstring": "Assert that _print_args prints correct values for all parameters in default settings.\nTPOT settings:\nCHECKPOINT_FOLDER = None\nCONFIG_FILE = None\nCROSSOVER_RATE = 0.1\nEARLY_STOP = None\nGENERATIONS = 100\nINPUT_FILE = tests/tests.csv\nINPUT_SEPARATOR = ,\nLOG = None\nMAX_EVAL_MINS = 5\nMAX_TIME_MINS = None\nMEMORY = None\nMUTATION_RATE = 0.9\nNUM_CV_FOLDS = 5\nNUM_JOBS = 1\nOFFSPRING_SIZE = 100\nOUTPUT_FILE = None\nPOPULATION_SIZE = 100\nRANDOM_STATE = None\nSCORING_FN = accuracy\nSUBSAMPLE = 1.0\nTARGET_NAME = class\nTEMPLATE = None\nTPOT_MODE = classification\nVERBOSITY = 1\n\n", "language": "en", "n_whitespaces": 348, "n_words": 86, "vocab_size": 51 }
25
Python
22
388616b6247ca4ea8de4e2f340d6206aee523541
driver_tests.py
181,601
38
64
test_print_args
https://github.com/EpistasisLab/tpot.git
Revert "Deployed 7ccda9a with MkDocs version: 1.3.0" This reverts commit bd9629c40e01241766197119b581a99409b07068.
115
0
43,390
10
3
29
def crash_log(): original_traceback = traceback.format_exc().encode("utf-8") path = os.path.dirname(os.path.realpath(sys.argv[0])) filename = os.path.join(path, datetime.now().strftime("crash_report.%Y.%m.%d.%H%M%S%f.log")) freeze_log = [line.encode("utf-8") for line in _DEBUG_BUFFER] try: from lib.sysinfo import sysinfo # pylint:disable=import-outside-toplevel except Exception: # pylint:disable=broad-except sysinfo = ("\n\nThere was an error importing System Information from lib.sysinfo. This is " f"probably a bug which should be fixed:\n{traceback.format_exc()}") with open(filename, "wb") as outfile: outfile.writelines(freeze_log) outfile.write(original_traceback) outfile.write(sysinfo.encode("utf-8")) return filename _OLD_FACTORY = logging.getLogRecordFactory()
lib/logger.py
249
faceswap
{ "docstring": " On a crash, write out the contents of :func:`_DEBUG_BUFFER` containing the last 100 lines\n of debug messages to a crash report in the root Faceswap folder.\n\n Returns\n -------\n str\n The filename of the file that contains the crash report\n ", "language": "en", "n_whitespaces": 62, "n_words": 39, "vocab_size": 30 }
64
Python
55
afec52309326304f4323029039e49bfcf928ef43
logger.py
100,731
15
127
crash_log
https://github.com/deepfakes/faceswap.git
Bugfixes: - Stats graph - Handle NaNs in data - logger - de-elevate matplotlib font messages
145
0
20,186
15
6
27
def remap_palette(self, dest_map, source_palette=None): from . import ImagePalette if self.mode not in ("L", "P"): raise ValueError("illegal image mode") if source_palette is None: if self.mode == "P": self.load() source_palette = self.im.getpalette("RGB")[:768] else: # L-mode source_palette = bytearray(i // 3 for i in range(768)) palette_bytes = b"" new_positions = [0] * 256 # pick only the used colors from the palette for i, oldPosition in enumerate(dest_map): palette_bytes += source_palette[oldPosition * 3 : oldPosition * 3 + 3] new_positions[oldPosition] = i # replace the palette color id of all pixel with the new id # Palette images are [0..255], mapped through a 1 or 3 # byte/color map. We need to remap the whole image # from palette 1 to palette 2. New_positions is # an array of indexes into palette 1. Palette 2 is # palette 1 with any holes removed. # We're going to leverage the convert mechanism to use the # C code to remap the image from palette 1 to palette 2, # by forcing the source image into 'L' mode and adding a # mapping 'L' mode palette, then converting back to 'L' # sans palette thus converting the image bytes, then # assigning the optimized RGB palette. # perf reference, 9500x4000 gif, w/~135 colors # 14 sec prepatch, 1 sec postpatch with optimization forced. mapping_palette = bytearray(new_positions) m_im = self.copy() m_im.mode = "P" m_im.palette = ImagePalette.ImagePalette("RGB", palette=mapping_palette * 3) # possibly set palette dirty, then # m_im.putpalette(mapping_palette, 'L') # converts to 'P' # or just force it. # UNDONE -- this is part of the general issue with palettes m_im.im.putpalette("RGB;L", m_im.palette.tobytes()) m_im = m_im.convert("L") # Internally, we require 768 bytes for a palette. new_palette_bytes = palette_bytes + (768 - len(palette_bytes)) * b"\x00" m_im.putpalette(new_palette_bytes) m_im.palette = ImagePalette.ImagePalette("RGB", palette=palette_bytes) if "transparency" in self.info: m_im.info["transparency"] = new_positions[self.info["transparency"]] return m_im
src/PIL/Image.py
425
Pillow
{ "docstring": "\n Rewrites the image to reorder the palette.\n\n :param dest_map: A list of indexes into the original palette.\n e.g. ``[1,0]`` would swap a two item palette, and ``list(range(256))``\n is the identity transform.\n :param source_palette: Bytes or None.\n :returns: An :py:class:`~PIL.Image.Image` object.\n\n ", "language": "en", "n_whitespaces": 97, "n_words": 40, "vocab_size": 35 }
299
Python
177
46a80d144a16836af304a7aaa8e620962d91ac23
Image.py
242,978
27
231
remap_palette
https://github.com/python-pillow/Pillow.git
Update transparency when remapping the palette
680
0
69,947
16
1
4
def decode(self, buffer): raise NotImplementedError()
src/PIL/ImageFile.py
22
Pillow
{ "docstring": "\n Override to perform the decoding process.\n\n :param buffer: A bytes object with the data to be decoded.\n :returns: A tuple of ``(bytes consumed, errcode)``.\n If finished with decoding return 0 for the bytes consumed.\n Err codes are from :data:`.ImageFile.ERRORS`.\n ", "language": "en", "n_whitespaces": 90, "n_words": 39, "vocab_size": 32 }
5
Python
5
a0e1fde1eddf45f26653e2ff6080d31e177adbec
ImageFile.py
242,437
2
12
decode
https://github.com/python-pillow/Pillow.git
Added PyEncoder
19
0
69,859
7
1
3
def DeveloperAPI(obj): _mark_annotated(obj) return obj
rllib/utils/annotations.py
23
ray
{ "docstring": "Decorator for documenting developer APIs.\n\n Developer APIs are classes and methods explicitly exposed to developers\n for the purposes of building custom algorithms or advanced training\n strategies on top of RLlib internals. You can generally expect these APIs\n to be stable sans minor changes (but less stable than public APIs).\n\n Subclasses that inherit from a ``@DeveloperAPI`` base class can be\n assumed part of the RLlib developer API as well.\n\n Examples:\n >>> # Indicates that the `TorchPolicy` class is exposed to end users\n >>> # of RLlib and will remain (relatively) stable across RLlib\n >>> # releases.\n >>> from ray.rllib.policy import Policy\n >>> @DeveloperAPI # doctest: +SKIP\n ... class TorchPolicy(Policy): # doctest: +SKIP\n ... ... # doctest: +SKIP\n ", "language": "en", "n_whitespaces": 193, "n_words": 116, "vocab_size": 78 }
5
Python
5
55d039af320caaab7fe11d404585bd402e66d393
annotations.py
139,970
3
12
DeveloperAPI
https://github.com/ray-project/ray.git
Annotate datasources and add API annotation check script (#24999) Why are these changes needed? Add API stability annotations for datasource classes, and add a linter to check all data classes have appropriate annotations.
14
0
31,815
7
4
21
def upsample_2d(x, k=None, factor=2, gain=1): r assert isinstance(factor, int) and factor >= 1 if k is None: k = [1] * factor k = np.asarray(k, dtype=np.float32) if k.ndim == 1: k = np.outer(k, k) k /= np.sum(k) k = k * (gain * (factor**2)) p = k.shape[0] - factor return upfirdn2d_native(x, paddle.to_tensor(k), up=factor, pad=((p + 1) // 2 + factor - 1, p // 2))
modules/image/text_to_image/stable_diffusion/diffusers/models/resnet.py
209
PaddleHub
{ "docstring": "Upsample2D a batch of 2D images with the given filter.\n\n Args:\n Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]` and upsamples each image with the given\n filter. The filter is normalized so that if the input pixels are constant, they will be scaled by the specified\n `gain`. Pixels outside the image are assumed to be zero, and the filter is padded with zeros so that its shape is a:\n multiple of the upsampling factor.\n x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W,\n C]`.\n k: FIR filter of the shape `[firH, firW]` or `[firN]`\n (separable). The default is `[1] * factor`, which corresponds to nearest-neighbor upsampling.\n factor: Integer upsampling factor (default: 2). gain: Scaling factor for signal magnitude (default: 1.0).\n\n Returns:\n Tensor of the shape `[N, C, H * factor, W * factor]`\n ", "language": "en", "n_whitespaces": 215, "n_words": 148, "vocab_size": 89 }
65
Python
45
a6790a651a12eb391060e533868bf0ba197f6f7e
resnet.py
50,724
27
130
upsample_2d
https://github.com/PaddlePaddle/PaddleHub.git
Add stable diffusion module
105
0
10,204
14
2
4
def revert(self): if self._backup: self.set_state(self._backup) self._backup = None
mitmproxy/flow.py
42
mitmproxy
{ "docstring": "\n Revert to the last backed up state.\n ", "language": "en", "n_whitespaces": 22, "n_words": 7, "vocab_size": 7 }
8
Python
8
b3587b52b25077f68116b9852b041d33e7fc6601
flow.py
251,362
4
24
revert
https://github.com/mitmproxy/mitmproxy.git
make it black!
44
0
73,697
10
5
17
def homogeneity_completeness_v_measure(labels_true, labels_pred, *, beta=1.0): labels_true, labels_pred = check_clusterings(labels_true, labels_pred) if len(labels_true) == 0: return 1.0, 1.0, 1.0 entropy_C = entropy(labels_true) entropy_K = entropy(labels_pred) contingency = contingency_matrix(labels_true, labels_pred, sparse=True) MI = mutual_info_score(None, None, contingency=contingency) homogeneity = MI / (entropy_C) if entropy_C else 1.0 completeness = MI / (entropy_K) if entropy_K else 1.0 if homogeneity + completeness == 0.0: v_measure_score = 0.0 else: v_measure_score = ( (1 + beta) * homogeneity * completeness / (beta * homogeneity + completeness) ) return homogeneity, completeness, v_measure_score
sklearn/metrics/cluster/_supervised.py
205
scikit-learn
{ "docstring": "Compute the homogeneity and completeness and V-Measure scores at once.\n\n Those metrics are based on normalized conditional entropy measures of\n the clustering labeling to evaluate given the knowledge of a Ground\n Truth class labels of the same samples.\n\n A clustering result satisfies homogeneity if all of its clusters\n contain only data points which are members of a single class.\n\n A clustering result satisfies completeness if all the data points\n that are members of a given class are elements of the same cluster.\n\n Both scores have positive values between 0.0 and 1.0, larger values\n being desirable.\n\n Those 3 metrics are independent of the absolute values of the labels:\n a permutation of the class or cluster label values won't change the\n score values in any way.\n\n V-Measure is furthermore symmetric: swapping ``labels_true`` and\n ``label_pred`` will give the same score. This does not hold for\n homogeneity and completeness. V-Measure is identical to\n :func:`normalized_mutual_info_score` with the arithmetic averaging\n method.\n\n Read more in the :ref:`User Guide <homogeneity_completeness>`.\n\n Parameters\n ----------\n labels_true : int array, shape = [n_samples]\n Ground truth class labels to be used as a reference.\n\n labels_pred : array-like of shape (n_samples,)\n Gluster labels to evaluate.\n\n beta : float, default=1.0\n Ratio of weight attributed to ``homogeneity`` vs ``completeness``.\n If ``beta`` is greater than 1, ``completeness`` is weighted more\n strongly in the calculation. If ``beta`` is less than 1,\n ``homogeneity`` is weighted more strongly.\n\n Returns\n -------\n homogeneity : float\n Score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling.\n\n completeness : float\n Score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling.\n\n v_measure : float\n Harmonic mean of the first two.\n\n See Also\n --------\n homogeneity_score : Homogeneity metric of cluster labeling.\n completeness_score : Completeness metric of cluster labeling.\n v_measure_score : V-Measure (NMI with arithmetic mean option).\n ", "language": "en", "n_whitespaces": 457, "n_words": 292, "vocab_size": 166 }
83
Python
48
1ac8ea14847cad8bec5ac49a01013beef4361f79
_supervised.py
260,492
20
151
homogeneity_completeness_v_measure
https://github.com/scikit-learn/scikit-learn.git
DOC Ensure homogeneity_completeness_v_measure passes numpydoc validation (#23942)
191
0
76,288
15
3
8
def leaf_symbols(self) -> Iterable[Symbol]: for arg in self.arguments: if isinstance(arg, SymbolicExpression): yield from arg.leaf_symbols()
nni/mutable/symbol.py
54
nni
{ "docstring": "\n Return a generator of all leaf symbols.\n\n Useful for when you want to inspect when the symbols come from.\n No deduplication even if the symbols has duplicates.\n ", "language": "en", "n_whitespaces": 56, "n_words": 27, "vocab_size": 24 }
14
Python
14
8f454f3bf29e2c3cd0d359231a46edd8ee768d42
symbol.py
113,538
10
33
leaf_symbols
https://github.com/microsoft/nni.git
Mutable V3 (Stage 2) - Symbolic execution engine (#5195)
54
0
24,940
12
2
8
def get_existing_payment_request_amount(ref_dt, ref_dn): existing_payment_request_amount = frappe.db.sql( , (ref_dt, ref_dn), ) return flt(existing_payment_request_amount[0][0]) if existing_payment_request_amount else 0
erpnext/accounts/doctype/payment_request/payment_request.py
62
erpnext
{ "docstring": "\n\tGet the existing payment request which are unpaid or partially paid for payment channel other than Phone\n\tand get the summation of existing paid payment request for Phone payment channel.\n\t\n\t\tselect sum(grand_total)\n\t\tfrom `tabPayment Request`\n\t\twhere\n\t\t\treference_doctype = %s\n\t\t\tand reference_name = %s\n\t\t\tand docstatus = 1\n\t\t\tand (status != 'Paid'\n\t\t\tor (payment_channel = 'Phone'\n\t\t\t\tand status = 'Paid'))\n\t", "language": "en", "n_whitespaces": 48, "n_words": 59, "vocab_size": 40 }
16
Python
15
494bd9ef78313436f0424b918f200dab8fc7c20b
payment_request.py
64,926
16
40
get_existing_payment_request_amount
https://github.com/frappe/erpnext.git
style: format code with black
10
0
13,755
10
2
24
def argsort(self, axis=0, kind="quicksort", order=None) -> Series: values = self._values mask = isna(values) if mask.any(): result = np.full(len(self), -1, dtype=np.intp) notmask = ~mask result[notmask] = np.argsort(values[notmask], kind=kind) else: result = np.argsort(values, kind=kind) res = self._constructor(result, index=self.index, name=self.name, dtype=np.intp) return res.__finalize__(self, method="argsort")
pandas/core/series.py
203
pandas
{ "docstring": "\n Return the integer indices that would sort the Series values.\n\n Override ndarray.argsort. Argsorts the value, omitting NA/null values,\n and places the result in the same locations as the non-NA values.\n\n Parameters\n ----------\n axis : {0 or 'index'}\n Unused. Parameter needed for compatibility with DataFrame.\n kind : {'mergesort', 'quicksort', 'heapsort', 'stable'}, default 'quicksort'\n Choice of sorting algorithm. See :func:`numpy.sort` for more\n information. 'mergesort' and 'stable' are the only stable algorithms.\n order : None\n Has no effect but is accepted for compatibility with numpy.\n\n Returns\n -------\n Series[np.intp]\n Positions of values within the sort order with -1 indicating\n nan values.\n\n See Also\n --------\n numpy.ndarray.argsort : Returns the indices that would sort this array.\n ", "language": "en", "n_whitespaces": 282, "n_words": 110, "vocab_size": 82 }
41
Python
32
244f747bb63f45c1c439193f0672c6162853b168
series.py
166,613
37
131
argsort
https://github.com/pandas-dev/pandas.git
make series axis parameter docs consistent (#47109) * make series docs consistent add series unused param info to DF docs * fix trailing whitespace * fix docs build * add unused * add or update docs for all series methods * small fix * fix line length * fix param order * fix param order * add * add backticks to None and fix space Co-authored-by: uncjackg <[email protected]>
134
0
39,842
12
1
27
async def test_get_image_disabled(hass): patch_key, entity_id, config_entry = _setup(CONFIG_ANDROIDTV_DEFAULT) config_entry.add_to_hass(hass) hass.config_entries.async_update_entry( config_entry, options={CONF_SCREENCAP: False} ) with patchers.patch_connect(True)[patch_key], patchers.patch_shell( SHELL_RESPONSE_OFF )[patch_key]: assert await hass.config_entries.async_setup(config_entry.entry_id) await hass.async_block_till_done() with patchers.patch_shell("11")[patch_key]: await async_update_entity(hass, entity_id) media_player_name = "media_player." + slugify( CONFIG_ANDROIDTV_DEFAULT[TEST_ENTITY_NAME] ) state = hass.states.get(media_player_name) assert "entity_picture_local" not in state.attributes assert "entity_picture" not in state.attributes
tests/components/androidtv/test_media_player.py
218
core
{ "docstring": "Test that the screencap option can disable entity_picture.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
49
Python
38
d989e4373d576c403790c9a7e5eb7a29d08e3c47
test_media_player.py
317,428
19
130
test_get_image_disabled
https://github.com/home-assistant/core.git
Remove websocket_api send_big_result (#75452)
130
0
115,995
11