complexity
int64
1
56
n_identifiers
int64
1
114
code
stringlengths
19
12.7k
path
stringlengths
8
134
n_ast_nodes
int64
12
2.35k
ast_errors
stringlengths
0
4.01k
repo
stringlengths
3
28
documentation
dict
n_words
int64
2
866
language
stringclasses
1 value
vocab_size
int64
2
323
commit_id
stringlengths
40
40
file_name
stringlengths
5
79
id
int64
243
338k
nloc
int64
1
228
token_counts
int64
5
1.4k
fun_name
stringlengths
1
77
url
stringlengths
31
60
commit_message
stringlengths
3
15.3k
n_whitespaces
int64
1
3.23k
n_ast_errors
int64
0
20
d_id
int64
74
121k
ast_levels
int64
4
29
2
9
def get_uri(self) -> str: conn = self.get_connection(getattr(self, self.conn_name_attr)) conn.schema = self.__schema or conn.schema return conn.get_uri()
airflow/hooks/dbapi.py
66
airflow
{ "docstring": "\n Extract the URI from the connection.\n\n :return: the extracted uri.\n ", "language": "en", "n_whitespaces": 32, "n_words": 10, "vocab_size": 8 }
15
Python
13
59c450ee5425a2d23ef813dbf219cde14df7c85c
dbapi.py
45,221
9
40
get_uri
https://github.com/apache/airflow.git
Make DbApiHook use get_uri from Connection (#21764) DBApi has its own get_uri method which does not deal with quoting properly and neither with empty passwords. Connection also has a get_uri method that deals properly with the above issues. This also fixes issues with RFC compliancy.
43
0
8,509
11
1
22
def test_reading_post_data_raises_unreadable_post_error(self): req = self._get_POST_request_with_token() mw = CsrfViewMiddleware(post_form_view) mw.process_request(req) resp = mw.process_view(req, post_form_view, (), {}) self.assertIsNone(resp) req = self._get_POST_request_with_token(request_class=PostErrorRequest) req.post_error = UnreadablePostError("Error reading input data.") mw.process_request(req) with self.assertLogs("django.security.csrf", "WARNING") as cm: resp = mw.process_view(req, post_form_view, (), {}) self.assertEqual(resp.status_code, 403) self.assertEqual( cm.records[0].getMessage(), "Forbidden (%s): " % REASON_CSRF_TOKEN_MISSING, )
tests/csrf_tests/tests.py
214
django
{ "docstring": "\n An UnreadablePostError raised while reading the POST data should be\n handled by the middleware.\n ", "language": "en", "n_whitespaces": 36, "n_words": 14, "vocab_size": 13 }
47
Python
35
9c19aff7c7561e3a82978a272ecdaad40dda5c00
tests.py
202,376
16
129
test_reading_post_data_raises_unreadable_post_error
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
171
0
50,092
11
2
5
def broadcast_shapes(*shapes): # NOTE: We have both cached and uncached versions to handle Tracers in shapes. try: return _broadcast_shapes_cached(*shapes) except: return _broadcast_shapes_uncached(*shapes) @cache()
jax/_src/lax/lax.py
52
@cache()
jax
{ "docstring": "Returns the shape that results from NumPy broadcasting of `shapes`.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
23
Python
22
78ed03c4c2970e5e0d11f14a8d4fc968a4efbca2
lax.py
122,213
5
23
broadcast_shapes
https://github.com/google/jax.git
[typing] add annotations to jax.numpy.linalg
32
1
27,122
11
1
3
def fit(self) -> Any: return None
freqtrade/freqai/freqai_interface.py
19
freqtrade
{ "docstring": "\n Most regressors use the same function names and arguments e.g. user \n can drop in LGBMRegressor in place of CatBoostRegressor and all data\n management will be properly handled by Freqai.\n :params:\n :data_dictionary: the dictionary constructed by DataHandler to hold \n all the training and test data/labels.\n ", "language": "en", "n_whitespaces": 96, "n_words": 44, "vocab_size": 37 }
6
Python
6
fc837c4daa27a18ff0e86128f4d52089b88fa5fb
freqai_interface.py
149,761
10
10
fit
https://github.com/freqtrade/freqtrade.git
add freqao backend machinery, user interface, documentation
24
0
34,519
6
1
2
def sliders(self): return self["sliders"]
packages/python/plotly/plotly/graph_objs/_layout.py
22
plotly.py
{ "docstring": "\n The 'sliders' property is a tuple of instances of\n Slider that may be specified as:\n - A list or tuple of instances of plotly.graph_objs.layout.Slider\n - A list or tuple of dicts of string/value properties that\n will be passed to the Slider constructor\n\n Supported dict properties:\n\n active\n Determines which button (by index starting from\n 0) is considered active.\n activebgcolor\n Sets the background color of the slider grip\n while dragging.\n bgcolor\n Sets the background color of the slider.\n bordercolor\n Sets the color of the border enclosing the\n slider.\n borderwidth\n Sets the width (in px) of the border enclosing\n the slider.\n currentvalue\n :class:`plotly.graph_objects.layout.slider.Curr\n entvalue` instance or dict with compatible\n properties\n font\n Sets the font of the slider step labels.\n len\n Sets the length of the slider This measure\n excludes the padding of both ends. That is, the\n slider's length is this length minus the\n padding on both ends.\n lenmode\n Determines whether this slider length is set in\n units of plot \"fraction\" or in *pixels. Use\n `len` to set the value.\n minorticklen\n Sets the length in pixels of minor step tick\n marks\n name\n When used in a template, named items are\n created in the output figure in addition to any\n items the figure already has in this array. You\n can modify these items in the output figure by\n making your own item with `templateitemname`\n matching this `name` alongside your\n modifications (including `visible: false` or\n `enabled: false` to hide it). Has no effect\n outside of a template.\n pad\n Set the padding of the slider component along\n each side.\n steps\n A tuple of :class:`plotly.graph_objects.layout.\n slider.Step` instances or dicts with compatible\n properties\n stepdefaults\n When used in a template (as\n layout.template.layout.slider.stepdefaults),\n sets the default property values to use for\n elements of layout.slider.steps\n templateitemname\n Used to refer to a named item in this array in\n the template. Named items from the template\n will be created even without a matching item in\n the input figure, but you can modify one by\n making an item with `templateitemname` matching\n its `name`, alongside your modifications\n (including `visible: false` or `enabled: false`\n to hide it). If there is no template or no\n matching item, this item will be hidden unless\n you explicitly show it with `visible: true`.\n tickcolor\n Sets the color of the border enclosing the\n slider.\n ticklen\n Sets the length in pixels of step tick marks\n tickwidth\n Sets the tick width (in px).\n transition\n :class:`plotly.graph_objects.layout.slider.Tran\n sition` instance or dict with compatible\n properties\n visible\n Determines whether or not the slider is\n visible.\n x\n Sets the x position (in normalized coordinates)\n of the slider.\n xanchor\n Sets the slider's horizontal position anchor.\n This anchor binds the `x` position to the\n \"left\", \"center\" or \"right\" of the range\n selector.\n y\n Sets the y position (in normalized coordinates)\n of the slider.\n yanchor\n Sets the slider's vertical position anchor This\n anchor binds the `y` position to the \"top\",\n \"middle\" or \"bottom\" of the range selector.\n\n Returns\n -------\n tuple[plotly.graph_objs.layout.Slider]\n ", "language": "en", "n_whitespaces": 2252, "n_words": 479, "vocab_size": 216 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_layout.py
227,351
2
11
sliders
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
59,024
7
15
34
def get_dataset_path(path, annotation, image_dir): if _dataset_exists(path, annotation, image_dir): return path data_name = os.path.split(path.strip().lower())[-1] if data_name not in DOWNLOAD_DATASETS_LIST: raise ValueError( "Dataset {} is not valid for reason above, please check again.". format(osp.realpath(path))) else: logger.WARNING( "Dataset {} is not valid for reason above, try searching {} or " "downloading dataset...".format(osp.realpath(path), DATASET_HOME)) for name, dataset in DATASETS.items(): if data_name == name: logger.debug("Parse dataset_dir {} as dataset " "{}".format(path, name)) data_dir = osp.join(DATASET_HOME, name) if name == "spine_coco": if _dataset_exists(data_dir, annotation, image_dir): return data_dir # For voc, only check dir VOCdevkit/VOC2012, VOCdevkit/VOC2007 if name in ['voc', 'fruit', 'roadsign_voc']: exists = True for sub_dir in dataset[1]: check_dir = osp.join(data_dir, sub_dir) if osp.exists(check_dir): logger.info("Found {}".format(check_dir)) else: exists = False if exists: return data_dir # voc exist is checked above, voc is not exist here check_exist = name != 'voc' and name != 'fruit' and name != 'roadsign_voc' for url, md5sum in dataset[0]: get_path(url, data_dir, md5sum, check_exist) # voc should create list after download if name == 'voc': create_voc_list(data_dir) return data_dir raise ValueError("Dataset automaticly downloading Error.")
ppdet/utils/download.py
424
PaddleDetection
{ "docstring": "\n If path exists, return path.\n Otherwise, get dataset path from DATASET_HOME, if not exists,\n download it.\n ", "language": "en", "n_whitespaces": 29, "n_words": 16, "vocab_size": 14 }
170
Python
102
630304e0b66c0528ecaa3bf2e88b44a14b7f3383
download.py
211,846
37
253
get_dataset_path
https://github.com/PaddlePaddle/PaddleDetection.git
fix auto download logger info (#7550)
639
0
53,148
20
1
4
def is_package(self, fullname): raise ImportError
python3.10.4/Lib/importlib/abc.py
18
XX-Net
{ "docstring": "Optional method which when implemented should return whether the\n module is a package. The fullname is a str. Returns a bool.\n\n Raises ImportError if the module cannot be found.\n ", "language": "en", "n_whitespaces": 52, "n_words": 29, "vocab_size": 24 }
5
Python
5
8198943edd73a363c266633e1aa5b2a9e9c9f526
abc.py
218,183
2
10
is_package
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
19
0
55,185
6
3
18
def test_connect_and_rollback(self): new_connection = connection.copy() try: # Ensure the database default time zone is different than # the time zone in new_connection.settings_dict. We can # get the default time zone by reset & show. with new_connection.cursor() as cursor: cursor.execute("RESET TIMEZONE") cursor.execute("SHOW TIMEZONE") db_default_tz = cursor.fetchone()[0] new_tz = "Europe/Paris" if db_default_tz == "UTC" else "UTC" new_connection.close() # Invalidate timezone name cache, because the setting_changed # handler cannot know about new_connection. del new_connection.timezone_name # Fetch a new connection with the new_tz as default # time zone, run a query and rollback. with self.settings(TIME_ZONE=new_tz): new_connection.set_autocommit(False) new_connection.rollback() # Now let's see if the rollback rolled back the SET TIME ZONE. with new_connection.cursor() as cursor: cursor.execute("SHOW TIMEZONE") tz = cursor.fetchone()[0] self.assertEqual(new_tz, tz) finally: new_connection.close()
tests/backends/postgresql/tests.py
237
django
{ "docstring": "\n PostgreSQL shouldn't roll back SET TIME ZONE, even if the first\n transaction is rolled back (#17062).\n ", "language": "en", "n_whitespaces": 38, "n_words": 16, "vocab_size": 15 }
119
Python
79
9c19aff7c7561e3a82978a272ecdaad40dda5c00
tests.py
201,749
19
125
test_connect_and_rollback
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
448
0
49,987
15
3
10
def _get_container_name(self) -> Optional[str]: # Must match `/?[a-zA-Z0-9][a-zA-Z0-9_.-]+` in the end if not self.name: return None return ( slugify( self.name, lowercase=False, # Docker does not limit length but URL limits apply eventually so # limit the length for safety max_length=250, # Docker allows these characters for container names regex_pattern=r"[^a-zA-Z0-9_.-]+", ).lstrip( # Docker does not allow leading underscore, dash, or period "_-." ) # Docker does not allow 0 character names so cast to null if the name is # empty after slufification or None )
src/prefect/infrastructure/docker.py
85
prefect
{ "docstring": "\n Generates a container name to match the configured name, ensuring it is Docker\n compatible.\n ", "language": "en", "n_whitespaces": 36, "n_words": 14, "vocab_size": 14 }
85
Python
58
2bff1047c0c183ec79e606b9a1c4ac966e23c8d1
docker.py
57,431
18
49
_get_container_name
https://github.com/PrefectHQ/prefect.git
Add tests for docker container
332
0
11,652
13
6
24
def ray_dask_get_sync(dsk, keys, **kwargs): ray_callbacks = kwargs.pop("ray_callbacks", None) persist = kwargs.pop("ray_persist", False) with local_ray_callbacks(ray_callbacks) as ray_callbacks: # Unpack the Ray-specific callbacks. ( ray_presubmit_cbs, ray_postsubmit_cbs, ray_pretask_cbs, ray_posttask_cbs, ray_postsubmit_all_cbs, ray_finish_cbs, ) = unpack_ray_callbacks(ray_callbacks) # NOTE: We hijack Dask's `get_async` function, injecting a different # task executor. object_refs = get_async( _apply_async_wrapper( apply_sync, _rayify_task_wrapper, ray_presubmit_cbs, ray_postsubmit_cbs, ray_pretask_cbs, ray_posttask_cbs, ), 1, dsk, keys, **kwargs, ) if ray_postsubmit_all_cbs is not None: for cb in ray_postsubmit_all_cbs: cb(object_refs, dsk) # NOTE: We explicitly delete the Dask graph here so object references # are garbage-collected before this function returns, i.e. before all # Ray tasks are done. Otherwise, no intermediate objects will be # cleaned up until all Ray tasks are done. del dsk if persist: result = object_refs else: result = ray_get_unpack(object_refs) if ray_finish_cbs is not None: for cb in ray_finish_cbs: cb(result) return result @dataclass
python/ray/util/dask/scheduler.py
219
@dataclass
ray
{ "docstring": "\n A synchronous Dask-Ray scheduler. This scheduler will send top-level\n (non-inlined) Dask tasks to a Ray cluster for execution. The scheduler will\n wait for the tasks to finish executing, fetch the results, and repackage\n them into the appropriate Dask collections. This particular scheduler\n submits Ray tasks synchronously, which can be useful for debugging.\n\n This can be passed directly to `dask.compute()`, as the scheduler:\n\n >>> dask.compute(obj, scheduler=ray_dask_get_sync)\n\n You can override the currently active global Dask-Ray callbacks (e.g.\n supplied via a context manager):\n\n >>> dask.compute(\n obj,\n scheduler=ray_dask_get_sync,\n ray_callbacks=some_ray_dask_callbacks,\n )\n\n Args:\n dsk: Dask graph, represented as a task DAG dictionary.\n keys (List[str]): List of Dask graph keys whose values we wish to\n compute and return.\n\n Returns:\n Computed values corresponding to the provided keys.\n ", "language": "en", "n_whitespaces": 231, "n_words": 119, "vocab_size": 86 }
137
Python
99
905258dbc19753c81039f993477e7ab027960729
scheduler.py
140,551
38
138
ray_dask_get_sync
https://github.com/ray-project/ray.git
Clean up docstyle in python modules and add LINT rule (#25272)
563
1
32,022
13
2
6
async def async_refresh_providers(self) -> None: old_state = self._rtsp_to_webrtc self._rtsp_to_webrtc = await self._async_use_rtsp_to_webrtc() if old_state != self._rtsp_to_webrtc: self.async_write_ha_state()
homeassistant/components/camera/__init__.py
62
core
{ "docstring": "Determine if any of the registered providers are suitable for this entity.\n\n This affects state attributes, so it should be invoked any time the registered\n providers or inputs to the state attributes change.\n\n Returns True if any state was updated (and needs to be written)\n ", "language": "en", "n_whitespaces": 73, "n_words": 45, "vocab_size": 34 }
17
Python
14
81aff973ea421e848d2f3e084f123bf108bd808e
__init__.py
308,346
12
35
async_refresh_providers
https://github.com/home-assistant/core.git
Keep entity state management within entity (#63183) Simplify the entity state management for webrtc providers, incurring extra state writes on startup. Followup post-review comments for PR #62962
56
0
107,106
9
1
14
def test_runtime_install_error_message(call_ray_start): with pytest.raises(ConnectionAbortedError) as excinfo: ray.client("localhost:25031").env({"pip": ["ray-this-doesnt-exist"]}).connect() assert "No matching distribution found for ray-this-doesnt-exist" in str( excinfo.value ), str(excinfo.value) ray.util.disconnect()
python/ray/tests/test_client_proxy.py
110
ray
{ "docstring": "\n Check that an error while preparing the runtime environment for the client\n server yields an actionable, clear error on the *client side*.\n ", "language": "en", "n_whitespaces": 32, "n_words": 22, "vocab_size": 18 }
21
Python
21
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
test_client_proxy.py
131,426
7
60
test_runtime_install_error_message
https://github.com/ray-project/ray.git
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
50
0
29,523
15
1
8
def add_handler(self, handler): sympy_deprecation_warning( , deprecated_since_version="1.8", active_deprecations_target='deprecated-askhandler', ) self.handlers.append(handler)
sympy/assumptions/assume.py
48
sympy
{ "docstring": "\n The AskHandler system is deprecated. Predicate.add_handler()\n should be replaced with the multipledispatch handler of Predicate.\n ", "language": "en", "n_whitespaces": 49, "n_words": 15, "vocab_size": 15 }
9
Python
9
ad766d1c02943e86f50559abfd0c72e582c9ca6a
assume.py
196,757
10
28
add_handler
https://github.com/sympy/sympy.git
Update the AskHandler deprecation warnings n.b., the issue number in the original warning message was wrong. It should have been #20837.
62
0
48,153
9
2
34
def deserialize(config, custom_objects=None): # loss_scale_optimizer has a direct dependency of optimizer, import here # rather than top to avoid the cyclic dependency. from keras.mixed_precision import loss_scale_optimizer # pylint: disable=g-import-not-at-top all_classes = { 'adadelta': adadelta_v2.Adadelta, 'adagrad': adagrad_v2.Adagrad, 'adam': adam_v2.Adam, 'adamax': adamax_v2.Adamax, 'experimentaladadelta': adadelta_experimental.Adadelta, 'experimentaladagrad': adagrad_experimental.Adagrad, 'experimentaladam': adam_experimental.Adam, 'experimentalsgd': sgd_experimental.SGD, 'nadam': nadam_v2.Nadam, 'rmsprop': rmsprop_v2.RMSprop, 'sgd': gradient_descent_v2.SGD, 'ftrl': ftrl.Ftrl, 'lossscaleoptimizer': loss_scale_optimizer.LossScaleOptimizer, 'lossscaleoptimizerv3': loss_scale_optimizer.LossScaleOptimizerV3, # LossScaleOptimizerV1 was an old version of LSO that was removed. # Deserializing it turns it into a LossScaleOptimizer 'lossscaleoptimizerv1': loss_scale_optimizer.LossScaleOptimizer, } # Make deserialization case-insensitive for built-in optimizers. if config['class_name'].lower() in all_classes: config['class_name'] = config['class_name'].lower() return deserialize_keras_object( config, module_objects=all_classes, custom_objects=custom_objects, printable_module_name='optimizer') @keras_export('keras.optimizers.get')
keras/optimizers/__init__.py
271
@keras_export('keras.optimizers.get')
keras
{ "docstring": "Inverse of the `serialize` function.\n\n Args:\n config: Optimizer configuration dictionary.\n custom_objects: Optional dictionary mapping names (strings) to custom\n objects (classes and functions) to be considered during deserialization.\n\n Returns:\n A Keras Optimizer instance.\n ", "language": "en", "n_whitespaces": 57, "n_words": 32, "vocab_size": 30 }
103
Python
89
8ecef127f70db723c158dbe9ed3268b3d610ab55
__init__.py
269,012
26
152
deserialize
https://github.com/keras-team/keras.git
Remove experimental Keras mixed precision API. The non-experimental mixed precision API was added in TensorFlow 2.4, and since then the experimental API has been deprecated. This change removes the experimental API. Deserializing the now-removed PolicyV1 and LossScaleOptimizerV1 classes is still supported, if they were serialized with get_config() prior to this change. These classes are deserialized into the non-experimental Policy and LossScaleOptimizer classes, which has been the case since TensorFlow 2.4. Eventually, support for deserializing these classes may be removed. PiperOrigin-RevId: 429410341
220
1
79,830
12
2
10
def construct_edit_url(self, instance): if self.edit_url_name is None: raise ImproperlyConfigured( "%r must define edit_url_name or override construct_edit_url" % type(self) ) return reverse(self.edit_url_name, args=(quote(instance.pk),))
wagtail/admin/admin_url_finder.py
72
wagtail
{ "docstring": "\n Return the edit URL for the given instance - regardless of whether the user can access it -\n or None if no edit URL is available.\n ", "language": "en", "n_whitespaces": 48, "n_words": 26, "vocab_size": 21 }
22
Python
22
d10f15e55806c6944827d801cd9c2d53f5da4186
admin_url_finder.py
71,045
7
44
construct_edit_url
https://github.com/wagtail/wagtail.git
Reformat with black
95
0
15,608
12
1
14
def test_pagination_offset_without_orderby(self): response = self.get_response( self.organization.slug, field="count(sentry.transactions.measurements.lcp)", datasource="snuba", groupBy="transaction", cursor=Cursor(0, 1), ) assert response.status_code == 400 print(response.json()) assert response.json()["detail"] == ( "'cursor' is only supported in combination with 'orderBy'" )
tests/sentry/api/endpoints/test_organization_metrics.py
113
sentry
{ "docstring": "\n Test that ensures an exception is raised when pagination `per_page` parameter is sent\n without order by being set\n ", "language": "en", "n_whitespaces": 40, "n_words": 18, "vocab_size": 17 }
30
Python
27
6b29955072b4fbed6d8843ae193d65509e288f8f
test_organization_metrics.py
95,747
13
67
test_pagination_offset_without_orderby
https://github.com/getsentry/sentry.git
feat(metrics): Add pagination to OrganizationMetricsDataEndpoint [INGEST-851] (#31181) * feat(metrics): Add pagination to OrganizationMetricsDataEndpoint Adds new paginator class `MetricsDataSeriesPaginator` to add pagination to the response of api requests made to `OrganizationMetricsDataEndpoint`
145
0
19,228
11
13
92
def test_copy_page_nested_plugin(self): with self.settings(CMS_PERMISSION=False): # setup page 1 page_one = create_page( "Three Placeholder", "col_three.html", "en", position="last-child", published=True, in_navigation=True ) page_one_ph_one = page_one.placeholders.get(slot="col_sidebar") page_one_ph_two = page_one.placeholders.get(slot="col_left") page_one.placeholders.get(slot="col_right") # add the text plugin to placeholder one text_plugin_en = add_plugin(page_one_ph_one, "TextPlugin", "en", body="Hello World") self.assertEqual(text_plugin_en.id, CMSPlugin.objects.all()[0].id) self.assertEqual(text_plugin_en.get_children().count(), 0) pre_add_plugin_count = CMSPlugin.objects.count() self.assertEqual(pre_add_plugin_count, 1) ### # add a plugin to placeholder two ### pre_nesting_body = "<p>the nested text plugin with a link inside</p>" text_plugin_two = add_plugin(page_one_ph_two, "TextPlugin", "en", body=pre_nesting_body) text_plugin_two = self.reload(text_plugin_two) # prepare nesting plugin page_one_ph_two = self.reload(page_one_ph_two) text_plugin_two = self.reload(text_plugin_two) link_plugin = add_plugin(page_one_ph_two, "LinkPlugin", "en", target=text_plugin_two) link_plugin.name = "django-cms Link" link_plugin.external_link = "https://www.django-cms.org" link_plugin.parent = text_plugin_two link_plugin.save() link_plugin = self.reload(link_plugin) text_plugin_two = self.reload(text_plugin_two) in_txt = nesting_body = f"{text_plugin_two.body}<p>{(in_txt % (link_plugin.id))}</p>" # emulate the editor in admin that adds some txt for the nested plugin text_plugin_two.body = nesting_body text_plugin_two.save() text_plugin_two = self.reload(text_plugin_two) # the link is attached as a child? self.assertEqual(text_plugin_two.get_children().count(), 1) post_add_plugin_count = CMSPlugin.objects.filter(placeholder__page__publisher_is_draft=True).count() self.assertEqual(post_add_plugin_count, 3) page_one.save() # get the plugins from the original page page_one = self.reload(page_one) page_one_ph_one = page_one.placeholders.get(slot="col_sidebar") page_one_ph_two = page_one.placeholders.get(slot="col_left") page_one_ph_three = page_one.placeholders.get(slot="col_right") # verify that the plugins got created org_placeholder_one_plugins = page_one_ph_one.get_plugins() self.assertEqual(len(org_placeholder_one_plugins), 1) org_placeholder_two_plugins = page_one_ph_two.get_plugins() self.assertEqual(len(org_placeholder_two_plugins), 2) org_placeholder_three_plugins = page_one_ph_three.get_plugins() self.assertEqual(len(org_placeholder_three_plugins), 0) self.assertEqual(page_one.placeholders.count(), 3) placeholder_count = Placeholder.objects.filter(page__publisher_is_draft=True).count() self.assertEqual(placeholder_count, 3) self.assertEqual(CMSPlugin.objects.filter(placeholder__page__publisher_is_draft=True).count(), 3) ## # setup page_copy_target page ## page_copy_target = create_page( "Three Placeholder - page copy target", "col_three.html", "en", position="last-child", published=True, in_navigation=True ) all_page_count = Page.objects.drafts().count() pre_copy_placeholder_count = Placeholder.objects.filter(page__publisher_is_draft=True).count() self.assertEqual(pre_copy_placeholder_count, 6) # copy the page superuser = self.get_superuser() with self.login_user_context(superuser): page_two = self.copy_page(page_one, page_copy_target) # validate the expected pages,placeholders,plugins,pluginbodies after_copy_page_plugin_count = CMSPlugin.objects.filter( placeholder__page__publisher_is_draft=True ).count() self.assertEqual(after_copy_page_plugin_count, 6) # check the amount of copied stuff after_copy_page_count = Page.objects.drafts().count() after_copy_placeholder_count = Placeholder.objects.filter( page__publisher_is_draft=True ).count() self.assertGreater(after_copy_page_count, all_page_count, "no new page after copy") self.assertGreater(after_copy_page_plugin_count, post_add_plugin_count, "plugin count is not grown") self.assertGreater( after_copy_placeholder_count, pre_copy_placeholder_count, "placeholder count is not grown" ) self.assertEqual(after_copy_page_count, 3, "no new page after copy") # original placeholder page_one = self.reload(page_one) page_one_ph_one = page_one.placeholders.get(slot="col_sidebar") page_one_ph_two = page_one.placeholders.get(slot="col_left") page_one_ph_three = page_one.placeholders.get(slot="col_right") # check if there are multiple pages assigned to this placeholders found_page = page_one_ph_one.page if page_one_ph_one else None self.assertEqual(found_page, page_one) found_page = page_one_ph_two.page if page_one_ph_two else None self.assertEqual(found_page, page_one) found_page = page_one_ph_three.page if page_one_ph_three else None self.assertEqual(found_page, page_one) page_two = self.reload(page_two) page_two_ph_one = page_two.placeholders.get(slot="col_sidebar") page_two_ph_two = page_two.placeholders.get(slot="col_left") page_two_ph_three = page_two.placeholders.get(slot="col_right") # check if there are multiple pages assigned to this placeholders found_page = page_two_ph_one.page if page_two_ph_one else None self.assertEqual(found_page, page_two) found_page = page_two_ph_two.page if page_two_ph_two else None self.assertEqual(found_page, page_two) found_page = page_two_ph_three.page if page_two_ph_three else None self.assertEqual(found_page, page_two) # check the stored placeholders org vs copy msg = 'placehoder ids copy:{} org:{} copied page {} are identical - tree broken'.format( page_two_ph_one.pk, page_one_ph_one.pk, page_two.pk ) self.assertNotEqual(page_two_ph_one.pk, page_one_ph_one.pk, msg) msg = 'placehoder ids copy:{} org:{} copied page {} are identical - tree broken'.format( page_two_ph_two.pk, page_one_ph_two.pk, page_two.pk ) self.assertNotEqual(page_two_ph_two.pk, page_one_ph_two.pk, msg) msg = 'placehoder ids copy:{} org:{} copied page {} are identical - tree broken'.format( page_two_ph_three.pk, page_one_ph_three.pk, page_two.pk ) self.assertNotEqual(page_two_ph_three.pk, page_one_ph_three.pk, msg) # get the plugins from the original page org_placeholder_one_plugins = page_one_ph_one.get_plugins() self.assertEqual(len(org_placeholder_one_plugins), 1) org_placeholder_two_plugins = page_one_ph_two.get_plugins() self.assertEqual(len(org_placeholder_two_plugins), 2) org_placeholder_three_plugins = page_one_ph_three.get_plugins() self.assertEqual(len(org_placeholder_three_plugins), 0) # get the plugins from the copied page copied_placeholder_one_plugins = page_two_ph_one.get_plugins() self.assertEqual(len(copied_placeholder_one_plugins), 1) copied_placeholder_two_plugins = page_two_ph_two.get_plugins() self.assertEqual(len(copied_placeholder_two_plugins), 2) copied_placeholder_three_plugins = page_two_ph_three.get_plugins() self.assertEqual(len(copied_placeholder_three_plugins), 0) # verify the plugins got copied # placeholder 1 count_plugins_copied = len(copied_placeholder_one_plugins) count_plugins_org = len(org_placeholder_one_plugins) msg = f"plugin count {count_plugins_copied} {count_plugins_org} for placeholder one not equal" self.assertEqual(count_plugins_copied, count_plugins_org, msg) # placeholder 2 count_plugins_copied = len(copied_placeholder_two_plugins) count_plugins_org = len(org_placeholder_two_plugins) msg = f"plugin count {count_plugins_copied} {count_plugins_org} for placeholder two not equal" self.assertEqual(count_plugins_copied, count_plugins_org, msg) # placeholder 3 count_plugins_copied = len(copied_placeholder_three_plugins) count_plugins_org = len(org_placeholder_three_plugins) msg = f"plugin count {count_plugins_copied} {count_plugins_org} for placeholder three not equal" self.assertEqual(count_plugins_copied, count_plugins_org, msg) # verify the body of text plugin with nested link plugin # org to copied org_nested_text_plugin = None # do this iteration to find the real text plugin with the attached link # the inheritance mechanism for the cmsplugins works through # (tuple)get_plugin_instance() for x in org_placeholder_two_plugins: if x.plugin_type == "TextPlugin": instance = x.get_plugin_instance()[0] if instance.body.startswith(pre_nesting_body): org_nested_text_plugin = instance break copied_nested_text_plugin = None for x in copied_placeholder_two_plugins: if x.plugin_type == "TextPlugin": instance = x.get_plugin_instance()[0] if instance.body.startswith(pre_nesting_body): copied_nested_text_plugin = instance break msg = "original nested text plugin not found" self.assertNotEqual(org_nested_text_plugin, None, msg=msg) msg = "copied nested text plugin not found" self.assertNotEqual(copied_nested_text_plugin, None, msg=msg) # get the children ids of the texplugin with a nested link # to check if the body of the text is generated correctly org_link_child_plugin = org_nested_text_plugin.get_children()[0] copied_link_child_plugin = copied_nested_text_plugin.get_children()[0] # validate the textplugin body texts msg = "org plugin and copied plugin are the same" self.assertTrue(org_link_child_plugin.id != copied_link_child_plugin.id, msg) needle = "%s" msg = "child plugin id differs to parent in body" # linked child is in body self.assertTrue(org_nested_text_plugin.body.find(needle % (org_link_child_plugin.id)) != -1, msg) msg = "copy: child plugin id differs to parent in body" self.assertTrue(copied_nested_text_plugin.body.find(needle % (copied_link_child_plugin.id)) != -1, msg) # really nothing else msg = "child link plugin id differs to parent body" self.assertTrue(org_nested_text_plugin.body.find(needle % (copied_link_child_plugin.id)) == -1, msg) msg = "copy: child link plugin id differs to parent body" self.assertTrue(copied_nested_text_plugin.body.find(needle % (org_link_child_plugin.id)) == -1, msg) # now reverse lookup the placeholders from the plugins org_placeholder = org_link_child_plugin.placeholder copied_placeholder = copied_link_child_plugin.placeholder msg = "placeholder of the original plugin and copied plugin are the same" ok = (org_placeholder.id != copied_placeholder.id) self.assertTrue(ok, msg)
cms/tests/test_nested_plugins.py
2,353
django-cms
{ "docstring": "\n Test to verify that page copy with a nested plugin works\n page one - 3 placeholder\n col_sidebar: 1 text plugin\n col_left: 1 text plugin with nested link plugin\n col_right: no plugin\n page two (copy target)\n Verify copied page, placeholders, plugins and body text\n <cms-plugin id=\"%s\" title=\"Link\" alt=\"Link\"></cms-plugin>", "language": "en", "n_whitespaces": 139, "n_words": 47, "vocab_size": 36 }
866
Python
323
c1290c9ff89cb00caa5469129fd527e9d82cd820
test_nested_plugins.py
82,407
166
1,395
test_copy_page_nested_plugin
https://github.com/django-cms/django-cms.git
ci: Added codespell (#7355) Co-authored-by: Christian Clauss <[email protected]> * ci: codespell config taken from #7292
3,232
0
17,382
15
3
8
def tick_bottom(self): label = True if 'label1On' in self._major_tick_kw: label = (self._major_tick_kw['label1On'] or self._major_tick_kw['label2On']) self.set_ticks_position('bottom') # If labels were turned off before this was called, leave them off. self.set_tick_params(which='both', labelbottom=label)
lib/matplotlib/axis.py
93
matplotlib
{ "docstring": "\n Move ticks and ticklabels (if present) to the bottom of the Axes.\n ", "language": "en", "n_whitespaces": 27, "n_words": 12, "vocab_size": 11 }
30
Python
28
f156db08eee54d285ab0fb4e031e48d078ba6aa3
axis.py
107,482
7
51
tick_bottom
https://github.com/matplotlib/matplotlib.git
DOC: More cleanup axes -> Axes
103
0
22,771
12
1
15
def test_sample_weights_validation(): # scalar value but not positive X = [[1]] y = [1] weights = 0 glm = _GeneralizedLinearRegressor() # Positive weights are accepted glm.fit(X, y, sample_weight=1) # 2d array weights = [[0]] with pytest.raises(ValueError, match="must be 1D array or scalar"): glm.fit(X, y, weights) # 1d but wrong length weights = [1, 0] msg = r"sample_weight.shape == \(2,\), expected \(1,\)!" with pytest.raises(ValueError, match=msg): glm.fit(X, y, weights) @pytest.mark.parametrize("fit_intercept", ["not bool", 1, 0, [True]])
sklearn/linear_model/_glm/tests/test_glm.py
197
@pytest.mark.parametrize("fit_intercept", ["not bool", 1, 0, [True]])
scikit-learn
{ "docstring": "Test the raised errors in the validation of sample_weight.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 8 }
73
Python
52
75a94f518f7bd7d0bf581ffb67d9f961e3c4efbc
test_glm.py
259,456
13
99
test_sample_weights_validation
https://github.com/scikit-learn/scikit-learn.git
ENH migrate GLMs / TweedieRegressor to linear loss (#22548) Co-authored-by: Olivier Grisel <[email protected]> Co-authored-by: Thomas J. Fan <[email protected]>
131
1
75,786
11
1
6
def getoffset(self, text): deprecate("getoffset", 10, "getbbox") return self.font.getsize(text)[1]
src/PIL/ImageFont.py
48
Pillow
{ "docstring": "\n Returns the offset of given text. This is the gap between the\n starting coordinate and the first marking. Note that this gap is\n included in the result of :py:func:`~PIL.ImageFont.FreeTypeFont.getsize`.\n\n :param text: Text to measure.\n\n :return: A tuple of the x and y offset\n ", "language": "en", "n_whitespaces": 86, "n_words": 43, "vocab_size": 32 }
8
Python
8
406fe59242ad288bcd9f9fe663b227620eacd344
ImageFont.py
243,047
3
28
getoffset
https://github.com/python-pillow/Pillow.git
deprecate font.getsize and related functions
29
0
69,961
9
11
28
def build_index(cls, path, partition_ids, index_columns, storage_options): from pyarrow.parquet import read_table range_index = True column_names_to_read = [] for column in index_columns: # According to https://arrow.apache.org/docs/python/generated/pyarrow.Schema.html, # only RangeIndex will be stored as metadata. Otherwise, the default behavior is # to store the index as a column. if isinstance(column, str): column_names_to_read.append(column) range_index = False elif column["name"] is not None: column_names_to_read.append(column["name"]) # For the second check, let us consider the case where we have an empty dataframe, # that has a valid index. if range_index or (len(partition_ids) == 0 and len(column_names_to_read) != 0): fs, fs_path = cls._get_fs_and_fs_path(path, storage_options) complete_index = ( read_table(fs_path, columns=column_names_to_read, filesystem=fs) .to_pandas() .index ) # Empty DataFrame case elif len(partition_ids) == 0: return [], False else: index_ids = [part_id[0][1] for part_id in partition_ids if len(part_id) > 0] index_objs = cls.materialize(index_ids) complete_index = index_objs[0].append(index_objs[1:]) return complete_index, range_index or (len(index_columns) == 0)
modin/core/io/column_stores/parquet_dispatcher.py
307
modin
{ "docstring": "\n Compute index and its split sizes of resulting Modin DataFrame.\n\n Parameters\n ----------\n path : Pathlike\n Path to dataset.\n partition_ids : list\n Array with references to the partitions data.\n index_columns : list\n List of index columns specified by pandas metadata.\n storage_options : dict\n Parameters for specific storage engine.\n\n Returns\n -------\n index : pandas.Index\n Index of resulting Modin DataFrame.\n needs_index_sync : bool\n Whether the partition indices need to be synced with frame\n index because there's no index column, or at least one\n index column is a RangeIndex.\n\n Notes\n -----\n See `build_partition` for more detail on the contents of partitions_ids.\n ", "language": "en", "n_whitespaces": 291, "n_words": 97, "vocab_size": 73 }
140
Python
106
4548012a6372b8ce79d7e07c9ae13fd7444a91c8
parquet_dispatcher.py
154,144
24
193
build_index
https://github.com/modin-project/modin.git
FIX-#4756: Correctly propagate `storage_options` in `read_parquet` (#4764) Co-authored-by: Yaroslav Igoshev <[email protected]> Co-authored-by: Alexey Prutskov <[email protected]> Signed-off-by: Karthik Velayutham <[email protected]>
446
0
35,809
15
4
22
def test_grabclipboard(self): if sys.platform == "darwin": subprocess.call(["screencapture", "-cx"]) elif sys.platform == "win32": p = subprocess.Popen(["powershell", "-command", "-"], stdin=subprocess.PIPE) p.stdin.write( b ) p.communicate() else: if not shutil.which("wl-paste"): with pytest.raises(NotImplementedError) as e: ImageGrab.grabclipboard() assert ( str(e.value) == "wl-paste is required for ImageGrab.grabclipboard() on Linux" ) return ImageGrab.grabclipboard()
Tests/test_imagegrab.py
191
Pillow
{ "docstring": "[Reflection.Assembly]::LoadWithPartialName(\"System.Drawing\")\n[Reflection.Assembly]::LoadWithPartialName(\"System.Windows.Forms\")\n$bmp = New-Object Drawing.Bitmap 200, 200\n[Windows.Forms.Clipboard]::SetImage($bmp)", "language": "en", "n_whitespaces": 5, "n_words": 9, "vocab_size": 9 }
45
Python
38
ccac8540771120bdeb570ec5b7bbfc4e3e9a38dd
test_imagegrab.py
243,602
22
106
test_grabclipboard
https://github.com/python-pillow/Pillow.git
If available, use wl-paste for grabclipboard() on Linux
266
0
70,057
15
2
15
def test_symlink_exists_different_force(file, source): dif_source = source.parent / "dif_source.txt" target = source.parent / "symlink.lnk" target.symlink_to(dif_source) try: file.symlink(source, target, force=True) assert salt.utils.path.readlink(target) == str(source) finally: target.unlink()
tests/pytests/functional/modules/file/test_symlink.py
110
salt
{ "docstring": "\n Test symlink with an existing symlink to a different file with force=True\n Should destroy the existing symlink and generate a new one to the correct\n location\n ", "language": "en", "n_whitespaces": 39, "n_words": 26, "vocab_size": 19 }
24
Python
21
a35b29b2651bf33c5d5b45e64bc7765ffde4aff4
test_symlink.py
215,810
9
65
test_symlink_exists_different_force
https://github.com/saltstack/salt.git
Add some funtional tests Add functional tests for the following: - file.readlink - file.replace - file.symlink Remove unit tests for file.replace as they are duplicated in the added functional test
63
0
54,183
12
1
4
def _reset_logging_mixin(): global __logging_mixin __logging_mixin = LoggingMixin(logger)
freqtrade/exchange/common.py
25
freqtrade
{ "docstring": "\n Reset global logging mixin - used in tests only.\n ", "language": "en", "n_whitespaces": 16, "n_words": 9, "vocab_size": 9 }
7
Python
6
682daa4e941abf2235e60d9ecd1ad029eec5d3c4
common.py
149,904
3
13
_reset_logging_mixin
https://github.com/freqtrade/freqtrade.git
Reset logging mixin to avoid random test failure
16
0
34,583
8
2
8
def partial_fit(self, X, y): if not hasattr(self, "coefs_"): self._validate_params() return self._fit(X, y, incremental=True)
sklearn/neural_network/_multilayer_perceptron.py
60
scikit-learn
{ "docstring": "Update the model with a single iteration over the given data.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input data.\n\n y : ndarray of shape (n_samples,)\n The target values.\n\n Returns\n -------\n self : object\n Trained MLP model.\n ", "language": "en", "n_whitespaces": 131, "n_words": 42, "vocab_size": 35 }
13
Python
13
0206d3e08c0f0917ba2f1c65cb55569b97d9a9ba
_multilayer_perceptron.py
260,443
4
37
partial_fit
https://github.com/scikit-learn/scikit-learn.git
MAINT validate parameters for MLPRregressor and MLPClassifier (#23789) Co-authored-by: jeremie du boisberranger <[email protected]>
45
0
76,253
9
3
7
def unk_token(self) -> str: if self._unk_token is None: if self.verbose: logger.error("Using unk_token, but it is not set yet.") return None return str(self._unk_token)
src/transformers/tokenization_utils_base.py
61
transformers
{ "docstring": "\n `str`: Unknown token. Log an error if used while not having been set.\n ", "language": "en", "n_whitespaces": 28, "n_words": 13, "vocab_size": 13 }
22
Python
19
3eed5530ec74bb60ad9f8f612717d0f6ccf820f2
tokenization_utils_base.py
31,489
9
35
unk_token
https://github.com/huggingface/transformers.git
Fix properties of unset special tokens in non verbose mode (#17797) Co-authored-by: SaulLu <[email protected]>
80
0
5,764
12
1
2
def sizemin(self): return self["sizemin"]
packages/python/plotly/plotly/graph_objs/pointcloud/_marker.py
22
plotly.py
{ "docstring": "\n Sets the minimum size (in px) of the rendered marker points,\n effective when the `pointcloud` shows a million or more points.\n\n The 'sizemin' property is a number and may be specified as:\n - An int or float in the interval [0.1, 2]\n\n Returns\n -------\n int|float\n ", "language": "en", "n_whitespaces": 104, "n_words": 45, "vocab_size": 40 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_marker.py
233,259
2
11
sizemin
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
64,703
7
3
17
def show_file(self, path=None, **options): if path is None: if "file" in options: warnings.warn( "The 'file' argument is deprecated and will be removed in Pillow " "10 (2023-07-01). Use 'path' instead.", DeprecationWarning, ) path = options.pop("file") else: raise TypeError("Missing required argument: 'path'") subprocess.call(["open", "-a", "Preview.app", path]) subprocess.Popen( [ sys.executable, "-c", "import os, sys, time;time.sleep(20);os.remove(sys.argv[1])", path, ] ) return 1 if sys.platform == "darwin": register(MacViewer)
src/PIL/ImageShow.py
163
Pillow
{ "docstring": "\n Display given file.\n\n Before Pillow 9.1.0, the first argument was ``file``. This is now deprecated,\n and will be removed in Pillow 10.0.0 (2023-07-01). ``path`` should be used\n instead.\n ", "language": "en", "n_whitespaces": 64, "n_words": 28, "vocab_size": 26 }
63
Python
57
8da80130dbc747f3954b4904247d26289fe722f9
ImageShow.py
242,309
21
81
show_file
https://github.com/python-pillow/Pillow.git
In show_file, use os.remove to remove temporary images
328
0
69,823
13
12
57
def confirm(self): args = request.args dag_id = args.get('dag_id') task_id = args.get('task_id') dag_run_id = args.get('dag_run_id') state = args.get('state') origin = args.get('origin') if 'map_index' not in args: map_indexes: Optional[List[int]] = None else: map_indexes = args.getlist('map_index', type=int) upstream = to_boolean(args.get('upstream')) downstream = to_boolean(args.get('downstream')) future = to_boolean(args.get('future')) past = to_boolean(args.get('past')) origin = origin or url_for('Airflow.index') dag = get_airflow_app().dag_bag.get_dag(dag_id) if not dag: msg = f'DAG {dag_id} not found' return redirect_or_json(origin, msg, status='error', status_code=404) try: task = dag.get_task(task_id) except airflow.exceptions.TaskNotFound: msg = f"Task {task_id} not found" return redirect_or_json(origin, msg, status='error', status_code=404) task.dag = dag if state not in ( 'success', 'failed', ): msg = f"Invalid state {state}, must be either 'success' or 'failed'" return redirect_or_json(origin, msg, status='error', status_code=400) latest_execution_date = dag.get_latest_execution_date() if not latest_execution_date: msg = f"Cannot mark tasks as {state}, seem that dag {dag_id} has never run" return redirect_or_json(origin, msg, status='error', status_code=400) if map_indexes is None: tasks: Union[List[Operator], List[Tuple[Operator, int]]] = [task] else: tasks = [(task, map_index) for map_index in map_indexes] to_be_altered = set_state( tasks=tasks, run_id=dag_run_id, upstream=upstream, downstream=downstream, future=future, past=past, state=state, commit=False, ) if request.headers.get('Accept') == 'application/json': details = [str(t) for t in to_be_altered] return htmlsafe_json_dumps(details, separators=(',', ':')) details = "\n".join(str(t) for t in to_be_altered) response = self.render_template( "airflow/confirm.html", endpoint=url_for(f'Airflow.{state}'), message=f"Task instances you are about to mark as {state}:", details=details, ) return response
airflow/www/views.py
729
airflow
{ "docstring": "Show confirmation page for marking tasks as success or failed.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
208
Python
129
e2f19505bf3622935480e80bee55bf5b6d80097b
views.py
43,407
61
430
confirm
https://github.com/apache/airflow.git
Upgrade FAB to 4.1.1 (#24399) * Upgrade FAB to 4.1.1 The Flask Application Builder have been updated recently to support a number of newer dependencies. This PR is the attempt to migrate FAB to newer version. This includes: * update setup.py and setup.cfg upper and lower bounds to account for proper version of dependencies that FAB < 4.0.0 was blocking from upgrade * added typed Flask application retrieval with a custom application fields available for MyPy typing checks. * fix typing to account for typing hints added in multiple upgraded libraries optional values and content of request returned as Mapping * switch to PyJWT 2.* by using non-deprecated "required" claim as list rather than separate fields * add possibiliyt to install providers without constraints so that we could avoid errors on conflicting constraints when upgrade-to-newer-dependencies is used * add pre-commit to check that 2.4+ only get_airflow_app is not used in providers * avoid Bad Request in case the request sent to Flask 2.0 is not JSon content type * switch imports of internal classes to direct packages where classes are available rather than from "airflow.models" to satisfy MyPY * synchronize changes of FAB Security Manager 4.1.1 with our copy of the Security Manager. * add error handling for a few "None" cases detected by MyPY * corrected test cases that were broken by immutability of Flask 2 objects and better escaping done by Flask 2 * updated test cases to account for redirection to "path" rather than full URL by Flask2 Fixes: #22397 * fixup! Upgrade FAB to 4.1.1
751
0
7,960
13
1
6
def collection_name(self) -> t.Optional[str]: return self.config.collection_name
test/lib/ansible_test/_util/controller/sanity/pylint/plugins/deprecated.py
32
ansible
{ "docstring": "Return the collection name, or None if ansible-core is being tested.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
6
Python
6
89862fda3b4a427894061d90e2a96ad6efaf251c
deprecated.py
268,190
3
19
collection_name
https://github.com/ansible/ansible.git
ansible-test - Sanity test code cleanup. (#78497)
20
0
79,442
7
5
13
def inference_timer(self, do='start'): if do == 'start': self.pair_it += 1 self.begin_time = time.time() elif do == 'stop': end = time.time() self.inference_time += (end - self.begin_time) if self.pair_it == self.total_pairs: logger.info( f'Total time spent inferencing pairlist {self.inference_time:.2f} seconds') if self.inference_time > 0.25 * self.base_tf_seconds: logger.warning('Inference took over 25/% of the candle time. Reduce pairlist to' ' avoid blinding open trades and degrading performance.') self.pair_it = 0 self.inference_time = 0 return # Following methods which are overridden by user made prediction models. # See freqai/prediction_models/CatboostPredictionModel.py for an example.
freqtrade/freqai/freqai_interface.py
180
freqtrade
{ "docstring": "\n Timer designed to track the cumulative time spent in FreqAI for one pass through\n the whitelist. This will check if the time spent is more than 1/4 the time\n of a single candle, and if so, it will warn the user of degraded performance\n ", "language": "en", "n_whitespaces": 73, "n_words": 44, "vocab_size": 34 }
86
Python
69
8961b8d56042545b566d2ef5fea1cb34e2ebdb35
freqai_interface.py
150,371
16
99
inference_timer
https://github.com/freqtrade/freqtrade.git
merge in inference timer and historic predictions handling improvements.
307
0
34,720
16
3
10
def fromkeys(cls, iterable, value="", mutable=False, encoding=None): q = cls("", mutable=True, encoding=encoding) for key in iterable: q.appendlist(key, value) if not mutable: q._mutable = False return q
django/http/request.py
91
django
{ "docstring": "\n Return a new QueryDict with keys (may be repeated) from an iterable and\n values from value.\n ", "language": "en", "n_whitespaces": 38, "n_words": 16, "vocab_size": 15 }
25
Python
23
9c19aff7c7561e3a82978a272ecdaad40dda5c00
request.py
206,091
7
58
fromkeys
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
82
0
51,360
9
13
40
def get_rootwise_opening_balances(filters, report_type): additional_conditions = "" if not filters.show_unclosed_fy_pl_balances: additional_conditions = ( " and posting_date >= %(year_start_date)s" if report_type == "Profit and Loss" else "" ) if not flt(filters.with_period_closing_entry): additional_conditions += " and ifnull(voucher_type, '')!='Period Closing Voucher'" if filters.cost_center: lft, rgt = frappe.db.get_value("Cost Center", filters.cost_center, ["lft", "rgt"]) additional_conditions += % ( lft, rgt, ) if filters.project: additional_conditions += " and project = %(project)s" if filters.finance_book: fb_conditions = " AND finance_book = %(finance_book)s" if filters.include_default_book_entries: fb_conditions = ( " AND (finance_book in (%(finance_book)s, %(company_fb)s, '') OR finance_book IS NULL)" ) additional_conditions += fb_conditions accounting_dimensions = get_accounting_dimensions(as_list=False) query_filters = { "company": filters.company, "from_date": filters.from_date, "report_type": report_type, "year_start_date": filters.year_start_date, "project": filters.project, "finance_book": filters.finance_book, "company_fb": frappe.db.get_value("Company", filters.company, "default_finance_book"), } if accounting_dimensions: for dimension in accounting_dimensions: if filters.get(dimension.fieldname): if frappe.get_cached_value("DocType", dimension.document_type, "is_tree"): filters[dimension.fieldname] = get_dimension_with_children( dimension.document_type, filters.get(dimension.fieldname) ) additional_conditions += "and {0} in %({0})s".format(dimension.fieldname) else: additional_conditions += "and {0} in (%({0})s)".format(dimension.fieldname) query_filters.update({dimension.fieldname: filters.get(dimension.fieldname)}) gle = frappe.db.sql( .format( additional_conditions=additional_conditions ), query_filters, as_dict=True, ) opening = frappe._dict() for d in gle: opening.setdefault(d.account, d) return opening
erpnext/accounts/report/trial_balance/trial_balance.py
521
erpnext
{ "docstring": " and cost_center in (select name from `tabCost Center`\n\t\t\twhere lft >= %s and rgt <= %s)\n\t\tselect\n\t\t\taccount, sum(debit) as opening_debit, sum(credit) as opening_credit\n\t\tfrom `tabGL Entry`\n\t\twhere\n\t\t\tcompany=%(company)s\n\t\t\t{additional_conditions}\n\t\t\tand (posting_date < %(from_date)s or ifnull(is_opening, 'No') = 'Yes')\n\t\t\tand account in (select name from `tabAccount` where report_type=%(report_type)s)\n\t\t\tand is_cancelled = 0\n\t\tgroup by account", "language": "en", "n_whitespaces": 44, "n_words": 55, "vocab_size": 41 }
168
Python
105
494bd9ef78313436f0424b918f200dab8fc7c20b
trial_balance.py
65,378
66
311
get_rootwise_opening_balances
https://github.com/frappe/erpnext.git
style: format code with black
114
0
13,873
19
2
4
def addIncludedDataFilesFromFileOptions(): for included_datafile in _addIncludedDataFilesFromFileOptions(): addIncludedDataFile(included_datafile)
nuitka/freezer/IncludedDataFiles.py
30
Nuitka
{ "docstring": "Early data files, from user options that work with file system.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
7
Python
7
abfb99b0a05dd76d2ecc6ebc20732a271857c6c8
IncludedDataFiles.py
178,909
3
16
addIncludedDataFilesFromFileOptions
https://github.com/Nuitka/Nuitka.git
Plugins: Massive cleanup of data file handling * Move data file handling out of standalone only, allowing support for other modes as well. * Attach logger and tags to data file objects.
20
0
42,857
9
3
6
def _check_and_raise_error(self) -> bool: for plugin in self._active_plugins: if plugin.check_and_raise_error(): return True return False
plugins/extract/pipeline.py
44
faceswap
{ "docstring": " Check all threads for errors and raise if one occurs ", "language": "en", "n_whitespaces": 11, "n_words": 10, "vocab_size": 10 }
14
Python
13
13cfb3f39e72e9ca181f173b7b3db2a048db0d08
pipeline.py
101,459
6
26
_check_and_raise_error
https://github.com/deepfakes/faceswap.git
extract: Add batch processing mode
61
0
20,872
9
1
3
def async_remove(self) -> None: @callback
homeassistant/data_entry_flow.py
20
@callback
core
{ "docstring": "Notification that the config flow has been removed.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
5
Python
5
2224d0f43a048052cfc4572df95c7afcccdf3a57
data_entry_flow.py
305,039
2
8
async_remove
https://github.com/home-assistant/core.git
Add a callback for data flow handler removal (#77394) * Add a callback for when data flows are removed * Call `async_remove` at the very end * Handle and log exceptions caught during flow removal * Log the error as an exception, with a traceback * Adjust test's expected logging output to match updated format specifier
11
1
103,832
6
1
10
def test_is_unique_interval(self, closed): # unique overlapping - distinct endpoints idx = IntervalIndex.from_tuples([(0, 1), (0.5, 1.5)], inclusive=closed) assert idx.is_unique is True # unique overlapping - shared endpoints idx = IntervalIndex.from_tuples([(1, 2), (1, 3), (2, 3)], inclusive=closed) assert idx.is_unique is True # unique nested idx = IntervalIndex.from_tuples([(-1, 1), (-2, 2)], inclusive=closed) assert idx.is_unique is True # unique NaN idx = IntervalIndex.from_tuples([(np.NaN, np.NaN)], inclusive=closed) assert idx.is_unique is True # non-unique NaN idx = IntervalIndex.from_tuples( [(np.NaN, np.NaN), (np.NaN, np.NaN)], inclusive=closed ) assert idx.is_unique is False
pandas/tests/indexes/interval/test_interval.py
252
pandas
{ "docstring": "\n Interval specific tests for is_unique in addition to base class tests\n ", "language": "en", "n_whitespaces": 26, "n_words": 11, "vocab_size": 10 }
81
Python
42
7e23a37e1c5bda81234801a6584563e2880769eb
test_interval.py
166,762
13
176
test_is_unique_interval
https://github.com/pandas-dev/pandas.git
ENH: consistency of input args for boundaries - Interval (#46522)
211
0
39,858
11
1
5
def preprocess_func(cls, func): return unidist.put(func)
modin/core/execution/unidist/implementations/pandas_on_unidist/partitioning/partition.py
26
modin
{ "docstring": "\n Put a function into the object store to use in ``apply``.\n\n Parameters\n ----------\n func : callable\n A function to preprocess.\n\n Returns\n -------\n unidist.ObjectRef\n A reference to `func`.\n ", "language": "en", "n_whitespaces": 106, "n_words": 27, "vocab_size": 23 }
5
Python
5
193505fdf0c984743397ba3df56262f30aee13a8
partition.py
155,177
2
15
preprocess_func
https://github.com/modin-project/modin.git
FEAT-#5053: Add pandas on unidist execution with MPI backend (#5059) Signed-off-by: Igoshev, Iaroslav <[email protected]>
19
0
36,269
7
1
4
def on_page_read_source(self, page, config): return None
mkdocs/plugins.py
20
mkdocs
{ "docstring": "\n The `on_page_read_source` event can replace the default mechanism to read\n the contents of a page's source from the filesystem.\n\n Parameters:\n page: `mkdocs.nav.Page` instance\n config: global configuration object\n\n Returns:\n The raw source for a page as unicode string. If `None` is returned, the\n default loading from a file will be performed.\n ", "language": "en", "n_whitespaces": 134, "n_words": 50, "vocab_size": 41 }
6
Python
6
f79b34d174e41084391868e7b503f5c61b8b1bdf
plugins.py
224,450
2
12
on_page_read_source
https://github.com/mkdocs/mkdocs.git
Move plugin events docs into source code + refactor * Create real (no-op) methods for each event in the base class. * Refactor event dispatcher to not check for methods' existence, instead just call them. * Move documentation from Markdown into docstrings of these methods. * Activate the 'mkdocstrings' plugin. * Use 'mkdocstrings' to insert documentation from those docstrings into the site.
20
0
57,295
6
1
9
def mixed_type_frame() -> DataFrame: return DataFrame( { "a": 1.0, "b": 2, "c": "foo", "float32": np.array([1.0] * 10, dtype="float32"), "int32": np.array([1] * 10, dtype="int32"), }, index=np.arange(10), ) @pytest.fixture
pandas/conftest.py
125
@pytest.fixture
pandas
{ "docstring": "\n Fixture for DataFrame of float/int/string columns with RangeIndex\n Columns are ['a', 'b', 'c', 'float32', 'int32'].\n ", "language": "en", "n_whitespaces": 25, "n_words": 15, "vocab_size": 15 }
27
Python
25
f538568afc2c76c2d738d32e3544cf9fe6742960
conftest.py
167,605
15
73
mixed_type_frame
https://github.com/pandas-dev/pandas.git
TYP: misc return type annotations (#47558)
111
1
40,057
13
1
3
def _grad(f, argnums=0):
keras/integration_test/forwardprop_test.py
18
keras
{ "docstring": "Return a function which computes the gradient of `f`.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
3
Python
3
84afc5193d38057e2e2badf9c889ea87d80d8fbf
forwardprop_test.py
272,185
3
14
_grad
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
6
0
80,972
6
4
23
def axvline(self, x=0, ymin=0, ymax=1, **kwargs): self._check_no_units([ymin, ymax], ['ymin', 'ymax']) if "transform" in kwargs: raise ValueError("'transform' is not allowed as a keyword " "argument; axvline generates its own transform.") xmin, xmax = self.get_xbound() # Strip away the units for comparison with non-unitized bounds. xx, = self._process_unit_info([("x", x)], kwargs) scalex = (xx < xmin) or (xx > xmax) trans = self.get_xaxis_transform(which='grid') l = mlines.Line2D([x, x], [ymin, ymax], transform=trans, **kwargs) self.add_line(l) if scalex: self._request_autoscale_view("x") return l
lib/matplotlib/axes/_axes.py
227
matplotlib
{ "docstring": "\n Add a vertical line across the Axes.\n\n Parameters\n ----------\n x : float, default: 0\n x position in data coordinates of the vertical line.\n\n ymin : float, default: 0\n Should be between 0 and 1, 0 being the bottom of the plot, 1 the\n top of the plot.\n\n ymax : float, default: 1\n Should be between 0 and 1, 0 being the bottom of the plot, 1 the\n top of the plot.\n\n Returns\n -------\n `~matplotlib.lines.Line2D`\n\n Other Parameters\n ----------------\n **kwargs\n Valid keyword arguments are `.Line2D` properties, except for\n 'transform':\n\n %(Line2D:kwdoc)s\n\n See Also\n --------\n vlines : Add vertical lines in data coordinates.\n axvspan : Add a vertical span (rectangle) across the axis.\n axline : Add a line with an arbitrary slope.\n\n Examples\n --------\n * draw a thick red vline at *x* = 0 that spans the yrange::\n\n >>> axvline(linewidth=4, color='r')\n\n * draw a default vline at *x* = 1 that spans the yrange::\n\n >>> axvline(x=1)\n\n * draw a default vline at *x* = .5 that spans the middle half of\n the yrange::\n\n >>> axvline(x=.5, ymin=0.25, ymax=0.75)\n ", "language": "en", "n_whitespaces": 465, "n_words": 173, "vocab_size": 87 }
74
Python
66
383de519505964ed879c40b23ef36e90c17ebe0d
_axes.py
110,316
14
139
axvline
https://github.com/matplotlib/matplotlib.git
[Doc] fix more spelling and grammar
208
0
24,055
11
1
4
def DeprecatedModule(mod, deprecated_attributes=None, is_module_deprecated=True):
haystack/__init__.py
23
haystack
{ "docstring": "\n Return a wrapped object that warns about deprecated accesses at import\n ", "language": "en", "n_whitespaces": 18, "n_words": 11, "vocab_size": 11 }
4
Python
4
a59bca366174d9c692fa19750c24d65f47660ef7
__init__.py
256,180
5
30
DeprecatedModule
https://github.com/deepset-ai/haystack.git
Apply black formatting (#2115) * Testing black on ui/ * Applying black on docstores * Add latest docstring and tutorial changes * Create a single GH action for Black and docs to reduce commit noise to the minimum, slightly refactor the OpenAPI action too * Remove comments * Relax constraints on pydoc-markdown * Split temporary black from the docs. Pydoc-markdown was obsolete and needs a separate PR to upgrade * Fix a couple of bugs * Add a type: ignore that was missing somehow * Give path to black * Apply Black * Apply Black * Relocate a couple of type: ignore * Update documentation * Make Linux CI run after applying Black * Triggering Black * Apply Black * Remove dependency, does not work well * Remove manually double trailing commas * Update documentation Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
7
0
74,787
6
1
14
def test_cli_log_level_debug_used(): configure_logging_and_warnings(logging.DEBUG) rasa_logger = logging.getLogger("rasa") rasa_logger.level == logging.DEBUG matplotlib_logger = logging.getLogger("matplotlib") # Default log level for libraries is currently ERROR matplotlib_logger.level == logging.ERROR @mock.patch.dict(os.environ, {"LOG_LEVEL": "WARNING"})
tests/utils/test_common.py
105
@mock.patch.dict(os.environ, {"LOG_LEVEL": "WARNING"})
rasa
{ "docstring": "Test CLI with log level uses for rasa logger whereas libraries stay default.", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 13 }
27
Python
25
f00148b089d326c952880a0e5e6bd4b2dcb98ce5
test_common.py
159,095
6
41
test_cli_log_level_debug_used
https://github.com/RasaHQ/rasa.git
Configurable logging for libraries (#10614) * Make library level logging to be configurable Fixes https://github.com/RasaHQ/rasa/issues/10203 * Create log level documentation under cheatsheet in Rasa docs * Add log docs to `rasa shell --debug` (and others)
47
1
38,121
9
2
10
def partition_suite_by_case(suite): suite_class = type(suite) all_tests = iter_test_cases(suite) return [suite_class(tests) for _, tests in itertools.groupby(all_tests, type)]
django/test/runner.py
62
django
{ "docstring": "Partition a test suite by test case, preserving the order of tests.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 11 }
16
Python
15
9c19aff7c7561e3a82978a272ecdaad40dda5c00
runner.py
206,410
4
38
partition_suite_by_case
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
28
0
51,517
9
1
11
def test_get_invalid_filter_spec(self): # Get the image response = self.client.get( reverse("wagtailimages:preview", args=(self.image.id, "bad-filter-spec")) ) # Check response self.assertEqual(response.status_code, 400)
wagtail/images/tests/test_admin_views.py
71
wagtail
{ "docstring": "\n Test that an invalid filter spec returns a 400 response\n\n This is very unlikely to happen in reality. A user would have\n to create signature for the invalid filter spec which can't be\n done with Wagtails built in URL generator. We should test it\n anyway though.\n ", "language": "en", "n_whitespaces": 89, "n_words": 46, "vocab_size": 41 }
18
Python
16
d10f15e55806c6944827d801cd9c2d53f5da4186
test_admin_views.py
75,149
5
41
test_get_invalid_filter_spec
https://github.com/wagtail/wagtail.git
Reformat with black
71
0
16,368
14
3
13
def test_knn_imputer_keep_empty_features(keep_empty_features): X = np.array([[1, np.nan, 2], [3, np.nan, np.nan]]) imputer = KNNImputer(keep_empty_features=keep_empty_features) for method in ["fit_transform", "transform"]: X_imputed = getattr(imputer, method)(X) if keep_empty_features: assert X_imputed.shape == X.shape assert_array_equal(X_imputed[:, 1], 0) else: assert X_imputed.shape == (X.shape[0], X.shape[1] - 1)
sklearn/impute/tests/test_impute.py
168
scikit-learn
{ "docstring": "Check the behaviour of `keep_empty_features` for `KNNImputer`.", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
39
Python
33
d8fa96c29828e3ca79ddd5d7466521ac4d95213c
test_impute.py
261,584
10
110
test_knn_imputer_keep_empty_features
https://github.com/scikit-learn/scikit-learn.git
ENH keep features with all missing values during imputation (#24770) Co-authored-by: Chiara Marmo <[email protected]> Co-authored-by: Julien Jerphanion <[email protected]> Co-authored-by: Jérémie du Boisberranger <[email protected]> Co-authored-by: Vitor SRG <[email protected]> Fixes https://github.com/scikit-learn/scikit-learn/pull/16695 Fixes https://github.com/scikit-learn/scikit-learn/issues/16426 Fixes https://github.com/scikit-learn/scikit-learn/issues/16977
105
0
76,878
15
3
12
def requires(self, extras=()): dm = self._dep_map deps = [] deps.extend(dm.get(None, ())) for ext in extras: try: deps.extend(dm[safe_extra(ext)]) except KeyError: raise UnknownExtra( "%s has no such extra feature %r" % (self, ext) ) return deps
.venv/lib/python3.8/site-packages/pip/_vendor/pkg_resources/__init__.py
113
transferlearning
{ "docstring": "List of Requirements needed for this distro if `extras` are used", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
34
Python
32
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
__init__.py
63,049
13
69
requires
https://github.com/jindongwang/transferlearning.git
upd; format
162
0
13,115
14
8
35
def mode(a, axis=0, nan_policy='propagate'): a, axis = _chk_asarray(a, axis) if a.size == 0: return ModeResult(np.array([]), np.array([])) contains_nan, nan_policy = _contains_nan(a, nan_policy) if contains_nan and nan_policy == 'omit': a = ma.masked_invalid(a) return mstats_basic.mode(a, axis) if a.dtype == object and np.nan in set(a.ravel()): # Fall back to a slower method since np.unique does not work with NaN scores = set(np.ravel(a)) # get ALL unique values testshape = list(a.shape) testshape[axis] = 1 oldmostfreq = np.zeros(testshape, dtype=a.dtype) oldcounts = np.zeros(testshape, dtype=int) for score in scores: template = (a == score) counts = np.sum(template, axis, keepdims=True) mostfrequent = np.where(counts > oldcounts, score, oldmostfreq) oldcounts = np.maximum(counts, oldcounts) oldmostfreq = mostfrequent return ModeResult(mostfrequent, oldcounts)
scipy/stats/_stats_py.py
336
scipy
{ "docstring": "Return an array of the modal (most common) value in the passed array.\n\n If there is more than one such value, only the smallest is returned.\n The bin-count for the modal bins is also returned.\n\n Parameters\n ----------\n a : array_like\n n-dimensional array of which to find mode(s).\n axis : int or None, optional\n Axis along which to operate. Default is 0. If None, compute over\n the whole array `a`.\n nan_policy : {'propagate', 'raise', 'omit'}, optional\n Defines how to handle when input contains nan.\n The following options are available (default is 'propagate'):\n\n * 'propagate': returns nan\n * 'raise': throws an error\n * 'omit': performs the calculations ignoring nan values\n\n Returns\n -------\n mode : ndarray\n Array of modal values.\n count : ndarray\n Array of counts for each mode.\n\n Examples\n --------\n >>> a = np.array([[6, 8, 3, 0],\n ... [3, 2, 1, 7],\n ... [8, 1, 8, 4],\n ... [5, 3, 0, 5],\n ... [4, 7, 5, 9]])\n >>> from scipy import stats\n >>> stats.mode(a)\n ModeResult(mode=array([[3, 1, 0, 0]]), count=array([[1, 1, 1, 1]]))\n\n To get mode of whole array, specify ``axis=None``:\n\n >>> stats.mode(a, axis=None)\n ModeResult(mode=array([3]), count=array([3]))\n\n ", "language": "en", "n_whitespaces": 390, "n_words": 183, "vocab_size": 131 }
108
Python
78
7438fe5edfb565ff341fa6ab054461fcdd504aa2
_stats_py.py
241,885
31
340
mode
https://github.com/scipy/scipy.git
MAINT: stats: mode: fix negative axis issue with np.moveaxis instead of custom code (#15421)
259
0
69,724
13
1
8
def score(self, X, y, **fit_params): check_is_fitted(self) return self.estimator_.score(self.transform(X), y, **fit_params)
sklearn/feature_selection/_rfe.py
56
scikit-learn
{ "docstring": "Reduce X to the selected features and return the score of the estimator.\n\n Parameters\n ----------\n X : array of shape [n_samples, n_features]\n The input samples.\n\n y : array of shape [n_samples]\n The target values.\n\n **fit_params : dict\n Parameters to pass to the `score` method of the underlying\n estimator.\n\n .. versionadded:: 1.0\n\n Returns\n -------\n score : float\n Score of the underlying base estimator computed with the selected\n features returned by `rfe.transform(X)` and `y`.\n ", "language": "en", "n_whitespaces": 212, "n_words": 72, "vocab_size": 46 }
10
Python
9
6e5ef2e9b8c64e6788428610ae884b9bf3d298a2
_rfe.py
260,641
3
36
score
https://github.com/scikit-learn/scikit-learn.git
MAINT solve long line reported by flake8 (#24065)
31
0
76,391
9
2
11
def finalize(self) -> None: if self._warn_autoconfig: desc = configexc.ConfigErrorDesc( "autoconfig loading not specified", ("Your config.py should call either `config.load_autoconfig()`" " (to load settings configured via the GUI) or " "`config.load_autoconfig(False)` (to not do so)")) self.errors.append(desc) with self._handle_error("updating mutated values"): self._config.update_mutables()
qutebrowser/config/configfiles.py
95
qutebrowser
{ "docstring": "Do work which needs to be done after reading config.py.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
40
Python
37
d9e20f6b3071b86b479f281fe27d621e0b3ae7e5
configfiles.py
321,041
11
50
finalize
https://github.com/qutebrowser/qutebrowser.git
config: Handle config.py errors while updating mutables Fixes #3580
156
0
117,491
13
8
24
def predict_on_batch(self, x): self._check_call_args("predict_on_batch") if ( self._distribution_strategy and tf.distribute.in_cross_replica_context() ): raise NotImplementedError( "`predict_on_batch` is not supported for models distributed with" " tf.distribute.Strategy." ) # Validate and standardize user data. inputs, _, _ = self._standardize_user_data( x, extract_tensors_from_dataset=True ) # If `self._distribution_strategy` is True, then we are in a replica context # at this point. if self.run_eagerly or self._distribution_strategy: inputs = training_utils_v1.cast_if_floating_dtype(inputs) if isinstance(inputs, collections.abc.Sequence): # Unwrap lists with only one input, as we do when training on batch if len(inputs) == 1: inputs = inputs[0] return self(inputs) # pylint: disable=not-callable self._make_predict_function() outputs = self.predict_function(inputs) if len(outputs) == 1: return outputs[0] return outputs
keras/engine/training_v1.py
213
keras
{ "docstring": "Returns predictions for a single batch of samples.\n\n Args:\n x: Input data. It could be:\n - A Numpy array (or array-like), or a list of arrays\n (in case the model has multiple inputs).\n - A TensorFlow tensor, or a list of tensors\n (in case the model has multiple inputs).\n - A `tf.data` dataset.\n\n Returns:\n Numpy array(s) of predictions.\n\n Raises:\n ValueError: In case of mismatch between given number of inputs and\n expectations of the model.\n ", "language": "en", "n_whitespaces": 217, "n_words": 74, "vocab_size": 50 }
101
Python
80
84afc5193d38057e2e2badf9c889ea87d80d8fbf
training_v1.py
271,940
24
127
predict_on_batch
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
378
0
80,906
13
2
9
def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]: from .features import Value if self.decode: raise ValueError("Cannot flatten a decoded Audio feature.") return { "bytes": Value("binary"), "path": Value("string"), }
src/datasets/features/audio.py
93
datasets
{ "docstring": "If in the decodable state, raise an error, otherwise flatten the feature into a dictionary.", "language": "en", "n_whitespaces": 14, "n_words": 15, "vocab_size": 14 }
26
Python
26
3804442bb7cfcb9d52044d92688115cfdc69c2da
audio.py
104,575
9
50
flatten
https://github.com/huggingface/datasets.git
Fix flatten of complex feature types (#3723) * Flatten Translation and TranslationVariableLanguages * Add tests * Style * Flatten for decodable features * Fix flatten for non-dict types * Add test * Descriptive message in flatten for Audio feature * Small refactor * Add flatten to features * Update table_flatten * Revert changes in Dataset.flatten_/flatten * Apply Quentin's suggestions from code review Co-authored-by: Quentin Lhoest <[email protected]> * Improve table_flatten docstring * Fix tests * Add nested test * Minor fix * Remove comment Co-authored-by: Quentin Lhoest <[email protected]>
94
0
21,901
10
5
25
def gmean(a, axis=0, dtype=None, weights=None): r if not isinstance(a, np.ndarray): # if not an ndarray object attempt to convert it log_a = np.log(np.array(a, dtype=dtype)) elif dtype: # Must change the default dtype allowing array type if isinstance(a, np.ma.MaskedArray): log_a = np.log(np.ma.asarray(a, dtype=dtype)) else: log_a = np.log(np.asarray(a, dtype=dtype)) else: log_a = np.log(a) if weights is not None: weights = np.asanyarray(weights, dtype=dtype) return np.exp(np.average(log_a, axis=axis, weights=weights)) @_axis_nan_policy_factory( lambda x: x, n_samples=1, n_outputs=1, too_small=0, paired=True, result_unpacker=lambda x: (x,), kwd_samples=['weights'])
scipy/stats/_stats_py.py
286
@_axis_nan_policy_factory( lambda x: x, n_samples=1, n_outputs=1, too_small=0, paired=True, result_unpacker=lambda x: (x,), kwd_samples=['weights'])
scipy
{ "docstring": "Compute the weighted geometric mean along the specified axis.\n\n The weighted geometric mean of the array :math:`a_i` associated to weights\n :math:`w_i` is:\n\n .. math::\n\n \\exp \\left( \\frac{ \\sum_{i=1}^n w_i \\log a_i }{ \\sum_{i=1}^n w_i }\n \\right) \\, ,\n\n and, with equal weights, it falls backs to:\n\n .. math::\n\n \\sqrt[n]{ \\prod_{i=1}^n a_i } \\, .\n\n Parameters\n ----------\n a : array_like\n Input array or object that can be converted to an array.\n axis : int or None, optional\n Axis along which the geometric mean is computed. Default is 0.\n If None, compute over the whole array `a`.\n dtype : dtype, optional\n Type of the returned array and of the accumulator in which the\n elements are summed. If dtype is not specified, it defaults to the\n dtype of a, unless a has an integer dtype with a precision less than\n that of the default platform integer. In that case, the default\n platform integer is used.\n weights : array_like, optional\n The `weights` array must be broadcastable to the same shape as `a`.\n Default is None, which gives each value a weight of 1.0.\n\n Returns\n -------\n gmean : ndarray\n See `dtype` parameter above.\n\n See Also\n --------\n numpy.mean : Arithmetic average\n numpy.average : Weighted average\n hmean : Harmonic mean\n\n Notes\n -----\n The geometric average is computed over a single dimension of the input\n array, axis=0 by default, or all values in the array if axis=None.\n float64 intermediate and return values are used for integer inputs.\n\n References\n ----------\n .. [1] \"Weighted Geometric Mean\", *Wikipedia*,\n https://en.wikipedia.org/wiki/Weighted_geometric_mean.\n\n Examples\n --------\n >>> from scipy.stats import gmean\n >>> gmean([1, 4])\n 2.0\n >>> gmean([1, 2, 3, 4, 5, 6, 7])\n 3.3800151591412964\n >>> gmean([1, 4, 7], weights=[3, 1, 3])\n 2.80668351922014\n\n ", "language": "en", "n_whitespaces": 506, "n_words": 276, "vocab_size": 173 }
76
Python
57
56869131c8e0a0d6e1af86cc1a000c61e83efcf6
_stats_py.py
242,047
79
148
gmean
https://github.com/scipy/scipy.git
DOC: stats: correct doc display
177
1
69,761
17
7
21
def split_filename(filename, project_name=None): result = None pyver = None filename = unquote(filename).replace(' ', '-') m = PYTHON_VERSION.search(filename) if m: pyver = m.group(1) filename = filename[:m.start()] if project_name and len(filename) > len(project_name) + 1: m = re.match(re.escape(project_name) + r'\b', filename) if m: n = m.end() result = filename[:n], filename[n + 1:], pyver if result is None: m = PROJECT_NAME_AND_VERSION.match(filename) if m: result = m.group(1), m.group(3), pyver return result # Allow spaces in name because of legacy dists like "Twisted Core" NAME_VERSION_RE = re.compile(r'(?P<name>[\w .-]+)\s*' r'\(\s*(?P<ver>[^\s)]+)\)$')
.venv/lib/python3.8/site-packages/pip/_vendor/distlib/util.py
270
transferlearning
{ "docstring": "\n Extract name, version, python version from a filename (no extension)\n\n Return name, version, pyver or None\n ", "language": "en", "n_whitespaces": 26, "n_words": 16, "vocab_size": 14 }
84
Python
54
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
util.py
62,169
18
154
split_filename
https://github.com/jindongwang/transferlearning.git
upd; format
212
0
12,890
14
1
7
def test_mapped_literal_to_xcom_arg_verify_integrity(dag_maker, session): with dag_maker(session=session) as dag: t1 = BaseOperator(task_id='task_1')
tests/models/test_dagrun.py
49
airflow
{ "docstring": "Test that when we change from literal to a XComArg the TIs are removed", "language": "en", "n_whitespaces": 13, "n_words": 14, "vocab_size": 14 }
10
Python
10
91832a42d8124b040073481fd93c54e9e64c2609
test_dagrun.py
46,887
25
185
test_mapped_literal_to_xcom_arg_verify_integrity
https://github.com/apache/airflow.git
Expand mapped tasks at DagRun.Veriy_integrity (#22679) Create the necessary task instances for a mapped task at dagrun.verify_integrity Co-authored-by: Ash Berlin-Taylor <[email protected]>
23
0
9,034
12
1
3
def raise_on_http_errors(self) -> bool:
airbyte-cdk/python/airbyte_cdk/sources/declarative/requesters/requester.py
16
airbyte
{ "docstring": "\n If set to False, allows opting-out of raising HTTP code exception.\n ", "language": "en", "n_whitespaces": 26, "n_words": 11, "vocab_size": 11 }
4
Python
4
150ab593f8ca1f1aa960a0811aece26c46ba6c75
requester.py
5,320
4
8
raise_on_http_errors
https://github.com/airbytehq/airbyte.git
Low code connectors: core structure (#12850) * checkout from alex/cac * doc * doc * remove broken test * rename * rename file * delete unused file * rename * abstract property * isort * update state * Update comment * remove incremental mixin * delete comment * update comments * update comments * remove no_state * rename package * pass parameters through kwargs * update interface to pass source in interface * update interface to pass source in interface * rename to stream_slicer * Low code connectors: string interpolation with jinja (#12852) * checkout from alex/cac * Add missing tests * Add missing files * missing file * rename * jinja dependency * Add comment * comment * comment * Revert "delete unused file" This reverts commit 758e939367775ddbefcd52c6e1e832976d3ba9fe. * delete unused field * delete unused field * rename * pass kwargs directly * isort * Revert "isort" This reverts commit 4a792239440bc9950813ccc6ed368641ce2a96e4. * format * decoder * better error handling * remove nostate * isort * delete dead code * Update mapping type to [str, Any] * add comment * Add comment * pass parameters through kwargs * move test to right module * Add missing test * Use authbase instead of deprecated class * leverage generator * rename to declarative * rename the classes too
11
0
753
6
1
8
def _mask_lengths(mel_lens, log_c, log_alpha_scaled): mask_log_c = sequence_mask(mel_lens) log_c = log_c * mask_log_c mask_log_alpha_scaled = mask_log_c.unsqueeze(2) log_alpha_scaled = log_alpha_scaled * mask_log_alpha_scaled return log_c, log_alpha_scaled
TTS/tts/layers/overflow/neural_hmm.py
62
TTS
{ "docstring": "\n Mask the lengths of the forward variables so that the variable lenghts\n do not contribute in the loss calculation\n Args:\n mel_inputs (torch.FloatTensor): (batch, T, frame_channels)\n mel_inputs_lengths (torch.IntTensor): (batch)\n log_c (torch.FloatTensor): (batch, T)\n Returns:\n log_c (torch.FloatTensor) : scaled probabilities (batch, T)\n log_alpha_scaled (torch.FloatTensor): forward probabilities (batch, T, N)\n ", "language": "en", "n_whitespaces": 138, "n_words": 47, "vocab_size": 34 }
23
Python
13
3b8b105b0d6539ac12972de94e0b2a5077fa1ce2
neural_hmm.py
262,688
6
38
_mask_lengths
https://github.com/coqui-ai/TTS.git
Adding OverFlow (#2183) * Adding encoder * currently modifying hmm * Adding hmm * Adding overflow * Adding overflow setting up flat start * Removing runs * adding normalization parameters * Fixing models on same device * Training overflow and plotting evaluations * Adding inference * At the end of epoch the test sentences are coming on cpu instead of gpu * Adding figures from model during training to monitor * reverting tacotron2 training recipe * fixing inference on gpu for test sentences on config * moving helpers and texts within overflows source code * renaming to overflow * moving loss to the model file * Fixing the rename * Model training but not plotting the test config sentences's audios * Formatting logs * Changing model name to camelcase * Fixing test log * Fixing plotting bug * Adding some tests * Adding more tests to overflow * Adding all tests for overflow * making changes to camel case in config * Adding information about parameters and docstring * removing compute_mel_statistics moved statistic computation to the model instead * Added overflow in readme * Adding more test cases, now it doesn't saves transition_p like tensor and can be dumped as json
65
0
77,320
8
4
9
def get_default_frameworks(): frameworks = [] if is_torch_available(): frameworks.append("pt") if is_tf_available(): frameworks.append("tf") if is_flax_available(): frameworks.append("flax") return frameworks _re_model_mapping = re.compile("MODEL_([A-Z_]*)MAPPING_NAMES")
src/transformers/commands/add_new_model_like.py
100
transformers
{ "docstring": "\n Returns the list of frameworks (PyTorch, TensorFlow, Flax) that are installed in the environment.\n ", "language": "en", "n_whitespaces": 21, "n_words": 14, "vocab_size": 13 }
19
Python
15
0a5ef036e6c2d5093ed348c5fd706634f6ed5e38
add_new_model_like.py
36,274
9
44
get_default_frameworks
https://github.com/huggingface/transformers.git
Make `add-new-model-like` work in an env without all frameworks (#16239) * Make add-new-model-like work without all frameworks installed * A few fixes * Last default frameworks
57
0
6,594
10
1
19
def test_limited_api(tmp_path): # Based in part on test_cython from random.tests.test_extending here = os.path.dirname(__file__) ext_dir = os.path.join(here, "examples", "limited_api") cytest = str(tmp_path / "limited_api") shutil.copytree(ext_dir, cytest) # build the examples and "install" them into a temporary directory install_log = str(tmp_path / "tmp_install_log.txt") subprocess.check_call( [ sys.executable, "setup.py", "build", "install", "--prefix", str(tmp_path / "installdir"), "--single-version-externally-managed", "--record", install_log, ], cwd=cytest, )
numpy/core/tests/test_limited_api.py
158
numpy
{ "docstring": "Test building a third-party C extension with the limited API.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
57
Python
48
1edd6407d008fcaa282a6058ae355025c26517fd
test_limited_api.py
159,732
19
91
test_limited_api
https://github.com/numpy/numpy.git
TST: Split example package, skip limited API test for debug
196
0
38,413
12
1
16
def test_drange(): start = datetime.datetime(2011, 1, 1, tzinfo=mdates.UTC) end = datetime.datetime(2011, 1, 2, tzinfo=mdates.UTC) delta = datetime.timedelta(hours=1) # We expect 24 values in drange(start, end, delta), because drange returns # dates from an half open interval [start, end) assert len(mdates.drange(start, end, delta)) == 24 # Same if interval ends slightly earlier end = end - datetime.timedelta(microseconds=1) assert len(mdates.drange(start, end, delta)) == 24 # if end is a little bit later, we expect the range to contain one element # more end = end + datetime.timedelta(microseconds=2) assert len(mdates.drange(start, end, delta)) == 25 # reset end end = datetime.datetime(2011, 1, 2, tzinfo=mdates.UTC) # and tst drange with "complicated" floats: # 4 hours = 1/6 day, this is an "dangerous" float delta = datetime.timedelta(hours=4) daterange = mdates.drange(start, end, delta) assert len(daterange) == 6 assert mdates.num2date(daterange[-1]) == (end - delta) @_new_epoch_decorator
lib/matplotlib/tests/test_dates.py
292
@_new_epoch_decorator
matplotlib
{ "docstring": "\n This test should check if drange works as expected, and if all the\n rounding errors are fixed\n ", "language": "en", "n_whitespaces": 27, "n_words": 17, "vocab_size": 16 }
137
Python
80
9a03cb3b8c7253271054f146724c230eca96274b
test_dates.py
108,602
14
187
test_drange
https://github.com/matplotlib/matplotlib.git
Add tests for date module
202
1
23,271
10
2
9
def get_default_executor(cls) -> "BaseExecutor": if cls._default_executor is not None: return cls._default_executor from airflow.configuration import conf executor_name = conf.get('core', 'EXECUTOR') cls._default_executor = cls.load_executor(executor_name) return cls._default_executor
airflow/executors/executor_loader.py
86
airflow
{ "docstring": "Creates a new instance of the configured executor if none exists and returns it", "language": "en", "n_whitespaces": 13, "n_words": 14, "vocab_size": 14 }
24
Python
19
1a8a897120762692ca98ac5ce4da881678c073aa
executor_loader.py
44,553
8
50
get_default_executor
https://github.com/apache/airflow.git
Improve speed to run `airflow` by 6x (#21438) By delaying expensive/slow imports to where they are needed, this gets `airflow` printing it's usage information in under 0.8s, down from almost 3s which makes it feel much much snappier. By not loading BaseExecutor we can get down to <0.5s
77
0
8,295
9
1
15
def test_create_profile_from_existing(): save_profiles( ProfilesCollection( profiles=[ Profile(name="foo", settings={PREFECT_API_KEY: "foo"}), ], active=None, ) ) invoke_and_assert( ["profile", "create", "bar", "--from", "foo"], expected_output=( f ), ) profiles = load_profiles() assert profiles["foo"].settings == {PREFECT_API_KEY: "foo"}, "Foo is unchanged" assert profiles["bar"] == Profile( name="bar", settings={PREFECT_API_KEY: "foo"}, source=PREFECT_PROFILES_PATH.value(), )
tests/cli/test_profile.py
179
prefect
{ "docstring": "\n Created profile 'bar' matching 'foo'.\n\n Switch to your new profile with:\n\n prefect profile use 'bar'\n\n Or, to use it for a single command, include the `-p` option:\n\n prefect -p 'bar' config view\n ", "language": "en", "n_whitespaces": 107, "n_words": 32, "vocab_size": 25 }
42
Python
35
808660dd04465fc796a34e835467e8ae1f2449b3
test_profile.py
55,084
32
105
test_create_profile_from_existing
https://github.com/PrefectHQ/prefect.git
Add tests for profile CLI
180
0
11,204
17
1
6
def test_device_stats_gpu_from_nvidia(tmpdir): model = BoringModel() device_stats = DeviceStatsMonitor()
tests/callbacks/test_device_stats_monitor.py
31
lightning
{ "docstring": "Test GPU stats are logged using a logger with Pytorch < 1.8.0.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
8
Python
7
b56d8677ad0ff8513e566334f4a78a24b88480c3
test_device_stats_monitor.py
241,714
19
82
test_device_stats_gpu_from_nvidia
https://github.com/Lightning-AI/lightning.git
Update test_pruning.py to use `devices` instead of `gpus` or `ipus` (#11339)
17
0
69,667
8
3
13
def register_model_view(model, name, view_path, tab_label=None, tab_badge=None, tab_permission=None, kwargs=None): app_label = model._meta.app_label model_name = model._meta.model_name if model_name not in registry['views'][app_label]: registry['views'][app_label][model_name] = [] registry['views'][app_label][model_name].append({ 'name': name, 'path': view_path, 'tab_label': tab_label, 'tab_badge': tab_badge, 'tab_permission': tab_permission, 'kwargs': kwargs or {}, })
netbox/utilities/views.py
172
netbox
{ "docstring": "\n Register a subview for a core model.\n\n Args:\n model: The Django model class with which this view will be associated\n name: The name to register when creating a URL path\n view_path: A dotted path to the view class or function (e.g. 'myplugin.views.FooView')\n tab_label: The label to display for the view's tab under the model view (optional)\n tab_badge: A static value or callable to display a badge within the view's tab (optional). If a callable is\n specified, it must accept the current object as its single positional argument.\n tab_permission: The name of the permission required to display the tab (optional)\n kwargs: A dictionary of keyword arguments to send to the view (optional)\n ", "language": "en", "n_whitespaces": 181, "n_words": 111, "vocab_size": 71 }
38
Python
33
0d7851ed9de2792ea6d9ed223c315c235290ddd7
views.py
265,787
13
108
register_model_view
https://github.com/netbox-community/netbox.git
#9072: Implement a mechanism for dynamically registering model detail views
105
0
78,196
12
1
4
def done_adding(self) -> bool: raise NotImplementedError()
python/ray/data/_internal/batcher.py
23
ray
{ "docstring": "Indicate to the batcher that no more blocks will be added to the buffer.", "language": "en", "n_whitespaces": 13, "n_words": 14, "vocab_size": 12 }
6
Python
6
864af14f410ab12c7553332dd3a62e716f24a667
batcher.py
125,025
3
12
done_adding
https://github.com/ray-project/ray.git
[Datasets] [Local Shuffle - 1/N] Add local shuffling option. (#26094) Co-authored-by: Eric Liang <[email protected]> Co-authored-by: matthewdeng <[email protected]> Co-authored-by: Matthew Deng <[email protected]> Co-authored-by: Richard Liaw <[email protected]>
20
0
27,753
7
1
3
async def test_subscribe_deprecated_async(hass, mqtt_mock):
tests/components/mqtt/test_init.py
16
core
{ "docstring": "Test the subscription of a topic using deprecated coroutine signature.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
4
Python
4
845bf80e725af8c921915906b0f796c7a8164d11
test_init.py
292,660
27
184
test_subscribe_deprecated_async
https://github.com/home-assistant/core.git
Mqtt improve test coverage (#66279) Co-authored-by: Martin Hjelmare <[email protected]>
7
0
91,734
6
14
65
def validate(self, parameters, *args, **kwargs): result = ValidationResult(parameters) result._no_log_values.update(set_fallbacks(self.argument_spec, result._validated_parameters)) alias_warnings = [] alias_deprecations = [] try: result._aliases.update(_handle_aliases(self.argument_spec, result._validated_parameters, alias_warnings, alias_deprecations)) except (TypeError, ValueError) as e: result.errors.append(AliasError(to_native(e))) legal_inputs = _get_legal_inputs(self.argument_spec, result._validated_parameters, result._aliases) for option, alias in alias_warnings: result._warnings.append({'option': option, 'alias': alias}) for deprecation in alias_deprecations: result._deprecations.append({ 'name': deprecation['name'], 'version': deprecation.get('version'), 'date': deprecation.get('date'), 'collection_name': deprecation.get('collection_name'), }) try: result._no_log_values.update(_list_no_log_values(self.argument_spec, result._validated_parameters)) except TypeError as te: result.errors.append(NoLogError(to_native(te))) try: result._unsupported_parameters.update(_get_unsupported_parameters(self.argument_spec, result._validated_parameters, legal_inputs)) except TypeError as te: result.errors.append(RequiredDefaultError(to_native(te))) except ValueError as ve: result.errors.append(AliasError(to_native(ve))) try: check_mutually_exclusive(self._mutually_exclusive, result._validated_parameters) except TypeError as te: result.errors.append(MutuallyExclusiveError(to_native(te))) result._no_log_values.update(_set_defaults(self.argument_spec, result._validated_parameters, False)) try: check_required_arguments(self.argument_spec, result._validated_parameters) except TypeError as e: result.errors.append(RequiredError(to_native(e))) _validate_argument_types(self.argument_spec, result._validated_parameters, errors=result.errors) _validate_argument_values(self.argument_spec, result._validated_parameters, errors=result.errors) for check in _ADDITIONAL_CHECKS: try: check['func'](getattr(self, "_{attr}".format(attr=check['attr'])), result._validated_parameters) except TypeError as te: result.errors.append(check['err'](to_native(te))) result._no_log_values.update(_set_defaults(self.argument_spec, result._validated_parameters)) _validate_sub_spec(self.argument_spec, result._validated_parameters, errors=result.errors, no_log_values=result._no_log_values, unsupported_parameters=result._unsupported_parameters) if result._unsupported_parameters: flattened_names = [] for item in result._unsupported_parameters: if isinstance(item, tuple): flattened_names.append(".".join(item)) else: flattened_names.append(item) unsupported_string = ", ".join(sorted(list(flattened_names))) supported_string = ", ".join(self._valid_parameter_names) result.errors.append( UnsupportedError("{0}. Supported parameters include: {1}.".format(unsupported_string, supported_string))) return result
lib/ansible/module_utils/common/arg_spec.py
930
ansible
{ "docstring": "Validate ``parameters`` against argument spec.\n\n Error messages in the :class:`ValidationResult` may contain no_log values and should be\n sanitized with :func:`~ansible.module_utils.common.parameters.sanitize_keys` before logging or displaying.\n\n :arg parameters: Parameters to validate against the argument spec\n :type parameters: dict[str, dict]\n\n :return: :class:`ValidationResult` containing validated parameters.\n\n :Simple Example:\n\n .. code-block:: text\n\n argument_spec = {\n 'name': {'type': 'str'},\n 'age': {'type': 'int'},\n }\n\n parameters = {\n 'name': 'bo',\n 'age': '42',\n }\n\n validator = ArgumentSpecValidator(argument_spec)\n result = validator.validate(parameters)\n\n if result.error_messages:\n sys.exit(\"Validation failed: {0}\".format(\", \".join(result.error_messages))\n\n valid_params = result.validated_parameters\n ", "language": "en", "n_whitespaces": 355, "n_words": 80, "vocab_size": 66 }
154
Python
98
1b947eaf92b6833d2a4fd019a30d7b85406f1778
arg_spec.py
267,055
62
575
validate
https://github.com/ansible/ansible.git
arg_spec - Return aliases in validation result and update aliases (#77576) When looking up the `no_log` setting for a parameter that is an alias in `AnsibleModule._log_invocation()`, the alias value will always be an empty dictionary since `self.aliases` on the `AnsibleModule` instance is never updated after initialization. Since the `no_log` setting is on the canonical parameter not the alias, an incorrect warning is issued if the parameter matches `PASSWORD_MATCH`. This PR returns the aliases dictionary as an attribute of the `ValidationResult` and updates the `aliases` attribute on the `AnsibleModule` instance.
825
0
78,708
18
1
9
def test_avatar_constraints_file_size(self): self._setup_local_files( { "small": {"size": 40}, "big": {"size": 60}, } ) res = self.get_success( self.handler.check_avatar_size_and_mime_type("mxc://test/small") ) self.assertTrue(res) res = self.get_success( self.handler.check_avatar_size_and_mime_type("mxc://test/big") ) self.assertFalse(res)
tests/handlers/test_profile.py
127
synapse
{ "docstring": "Tests that a file that's above the allowed file size is forbidden but one\n that's below it is allowed.\n ", "language": "en", "n_whitespaces": 33, "n_words": 19, "vocab_size": 16 }
24
Python
18
bf60da1a60096fac5fb778b732ff2214862ac808
test_profile.py
246,128
15
71
test_avatar_constraints_file_size
https://github.com/matrix-org/synapse.git
Configurable limits on avatars (#11846) Only allow files which file size and content types match configured limits to be set as avatar. Most of the inspiration from the non-test code comes from matrix-org/synapse-dinsic#19
161
0
71,029
12
6
20
def get_source_segment(source, node, *, padded=False): try: if node.end_lineno is None or node.end_col_offset is None: return None lineno = node.lineno - 1 end_lineno = node.end_lineno - 1 col_offset = node.col_offset end_col_offset = node.end_col_offset except AttributeError: return None lines = _splitlines_no_ff(source) if end_lineno == lineno: return lines[lineno].encode()[col_offset:end_col_offset].decode() if padded: padding = _pad_whitespace(lines[lineno].encode()[:col_offset].decode()) else: padding = '' first = padding + lines[lineno].encode()[col_offset:].decode() last = lines[end_lineno].encode()[:end_col_offset].decode() lines = lines[lineno+1:end_lineno] lines.insert(0, first) lines.append(last) return ''.join(lines)
python3.10.4/Lib/ast.py
303
XX-Net
{ "docstring": "Get source code segment of the *source* that generated *node*.\n\n If some location information (`lineno`, `end_lineno`, `col_offset`,\n or `end_col_offset`) is missing, return None.\n\n If *padded* is `True`, the first line of a multi-line statement will\n be padded with spaces to match its original position.\n ", "language": "en", "n_whitespaces": 59, "n_words": 44, "vocab_size": 40 }
70
Python
45
8198943edd73a363c266633e1aa5b2a9e9c9f526
ast.py
220,161
23
187
get_source_segment
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
183
0
55,928
16
1
11
def _replace_cwa_config_variables(self) -> Dict[str, Any]: cwa_config = self._load_config_file("agent") self._replace_all_config_variables( cwa_config, self.node_id, self.cluster_name, self.provider_config["region"], ) return cwa_config
python/ray/autoscaler/_private/aws/cloudwatch/cloudwatch_helper.py
72
ray
{ "docstring": "\n replace known variable occurrences in\n Unified Cloudwatch Agent config file\n ", "language": "en", "n_whitespaces": 32, "n_words": 10, "vocab_size": 10 }
16
Python
15
71fae21e8e86c75bc58b53dccae563d15691610f
cloudwatch_helper.py
129,126
13
45
_replace_cwa_config_variables
https://github.com/ray-project/ray.git
[autoscaler] AWS Autoscaler CloudWatch Dashboard support (#20266) These changes add a set of improvements to enable automatic creation and update of CloudWatch dashboards when provisioning AWS Autoscaling clusters. Successful implementation of these improvements will allow AWS Autoscaler users to: 1. Get rapid insights into their cluster state via CloudWatch dashboards. 2. Allow users to update their CloudWatch dashboard JSON configuration files during Ray up execution time. Notes: 1. This PR is a follow-up PR for #18619, adds dashboard support.
95
0
28,902
9
4
20
def get_batch(self) -> Tuple[List[List[np.ndarray]], ...]: model_inputs: List[List[np.ndarray]] = [] model_targets: List[List[np.ndarray]] = [] for side in ("a", "b"): side_feed, side_targets = next(self._feeds[side]) if self._model.config["learn_mask"]: # Add the face mask as it's own target side_targets += [side_targets[-1][..., 3][..., None]] logger.trace("side: %s, input_shapes: %s, target_shapes: %s", # type: ignore side, side_feed.shape, [i.shape for i in side_targets]) model_inputs.append([side_feed]) model_targets.append(side_targets) return model_inputs, model_targets
plugins/train/trainer/_base.py
217
faceswap
{ "docstring": " Get the feed data and the targets for each training side for feeding into the model's\n train function.\n\n Returns\n -------\n model_inputs: list\n The inputs to the model for each side A and B\n model_targets: list\n The targets for the model for each side A and B\n ", "language": "en", "n_whitespaces": 111, "n_words": 46, "vocab_size": 26 }
59
Python
50
2beceffad9b15c1fd78f06b9b272563321c5a41e
_base.py
101,297
22
140
get_batch
https://github.com/deepfakes/faceswap.git
Data Augmentation update (#1263) - lib.detected_face - Subclass Masks for Landmark based masks - Add training mask propery + methods to DetectedFace - lib.training_training - subclass TrainingDataGenerator for training and preview data - Split cache into own module - Reduce thread count to 1 to prevent image corruption + data re-use - Process on largest model input/output size rather than stored image size - Size and crop masks during caching stage - Implement ring buffer for data flow - Fix preview reload bug - augmentation - typing - switch color aug order - better initialization - Fix warp + landmark warp to correctly apply at different image scales - Slightly improved warp caching - Don't store whether image is_preview. Handle all data as training images implicitly - plugins.trainer: Typing and fixes to work with trainingdata refactor
190
0
20,716
15
9
39
def optgroups(self, name, value, attr=None): default = (None, [], 0) groups = [default] has_selected = False selected_choices = { str(v) for v in value if str(v) not in self.choices.field.empty_values } if not self.is_required and not self.allow_multiple_selected: default[1].append(self.create_option(name, "", "", False, 0)) remote_model_opts = self.field.remote_field.model._meta to_field_name = getattr( self.field.remote_field, "field_name", remote_model_opts.pk.attname ) to_field_name = remote_model_opts.get_field(to_field_name).attname choices = ( (getattr(obj, to_field_name), self.choices.field.label_from_instance(obj)) for obj in self.choices.queryset.using(self.db).filter( **{"%s__in" % to_field_name: selected_choices} ) ) for option_value, option_label in choices: selected = str(option_value) in value and ( has_selected is False or self.allow_multiple_selected ) has_selected |= selected index = len(default[1]) subgroup = default[1] subgroup.append( self.create_option( name, option_value, option_label, selected_choices, index ) ) return groups
django/contrib/admin/widgets.py
373
django
{ "docstring": "Return selected options based on the ModelChoiceIterator.", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
108
Python
70
9c19aff7c7561e3a82978a272ecdaad40dda5c00
widgets.py
203,557
33
244
optgroups
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
435
0
50,447
15
2
1
async def test_focused_child_widget_with_movement_bindings_on_screen() -> None:
tests/test_binding_inheritance.py
16
textual
{ "docstring": "A focused child widget, with movement bindings in the screen, should trigger screen actions.", "language": "en", "n_whitespaces": 13, "n_words": 14, "vocab_size": 14 }
5
Python
5
e75f784b2c788f95e398821266fcaab0f79aa12f
test_binding_inheritance.py
186,093
5
53
test_focused_child_widget_with_movement_bindings_on_screen
https://github.com/Textualize/textual.git
Add a test for a screen binding movement, wrapping a focusable widget This is the heart of the issue introduced by https://github.com/Textualize/textual/pull/1170/commits/b48a1402b8103ca16d5e338538620e9e08fb2c0e and which is being investigated in https://github.com/Textualize/textual/issues/1343 -- the child widget can be focused, but (as far as the author of the code is concerned) it has no bindings. Bindings for movement-oriented keys exist on the screen which composes up the widget into it. Up until 0.5.0 this worked just fine. As of 0.6.0, because binding inheritance was introduced, the bindings for movement that live at the `Widget` level cause the widget that has no bindings to appear to have bindings. While this can potentially be worked around with the use of inherit_bindings, this isn't a very satisfying solution and also breaks the rule of least astonishment. This test is going to be key to all of this. This is the test that should be made to work without breaking any of the other currently-passing tests.
8
0
45,329
6
4
12
def attributes_icon(self): if self._attributes: return self._attributes.get(ATTR_ICON) result = ICON_JSON_EXTRACT.search( self._row.shared_attrs or self._row.attributes ) return result and result.group(1)
homeassistant/components/logbook/__init__.py
77
core
{ "docstring": "Extract the icon from the decoded attributes or json.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 8 }
17
Python
15
9215702388eef03c7c3ed9f756ea0db533d5beec
__init__.py
293,739
7
47
attributes_icon
https://github.com/home-assistant/core.git
Separate attrs into another table (reduces database size) (#68224)
74
0
92,792
11
10
26
def get_default_bank_cash_account(company, account_type=None, mode_of_payment=None, account=None): from erpnext.accounts.doctype.sales_invoice.sales_invoice import get_bank_cash_account if mode_of_payment: account = get_bank_cash_account(mode_of_payment, company).get("account") if not account: if account_type == "Bank": account = frappe.get_cached_value("Company", company, "default_bank_account") if not account: account_list = frappe.get_all( "Account", filters={"company": company, "account_type": "Bank", "is_group": 0} ) if len(account_list) == 1: account = account_list[0].name elif account_type == "Cash": account = frappe.get_cached_value("Company", company, "default_cash_account") if not account: account_list = frappe.get_all( "Account", filters={"company": company, "account_type": "Cash", "is_group": 0} ) if len(account_list) == 1: account = account_list[0].name if account: account_details = frappe.db.get_value( "Account", account, ["account_currency", "account_type"], as_dict=1 ) return frappe._dict( { "account": account, "balance": get_balance_on(account), "account_currency": account_details.account_currency, "account_type": account_details.account_type, } ) else: return frappe._dict() @frappe.whitelist()
erpnext/accounts/doctype/journal_entry/journal_entry.py
409
@frappe.whitelist()
erpnext
{ "docstring": "\n\t\tSet the default account first. If the user hasn't set any default account then, he doesn't\n\t\twant us to set any random account. In this case set the account only if there is single\n\t\taccount (of that type), otherwise return empty dict.\n\t\t", "language": "en", "n_whitespaces": 39, "n_words": 42, "vocab_size": 33 }
106
Python
58
494bd9ef78313436f0424b918f200dab8fc7c20b
journal_entry.py
64,875
40
236
get_default_bank_cash_account
https://github.com/frappe/erpnext.git
style: format code with black
70
1
13,744
19
1
5
def destroy_if_owned(self) -> int: raise NotImplementedError @dataclass
python/ray/data/_internal/execution/interfaces.py
23
@dataclass
ray
{ "docstring": "Clears the object store memory for these blocks if owned.\n\n Returns:\n The number of bytes freed.\n ", "language": "en", "n_whitespaces": 41, "n_words": 16, "vocab_size": 16 }
7
Python
7
2cd4637521a0c75e0375266ff52d3dfca2084c2d
interfaces.py
138,197
7
10
destroy_if_owned
https://github.com/ray-project/ray.git
[data] New executor backend [1/n]--- Add basic interfaces (#31216) This PR adds the basic interfaces and feature flags; split out from https://github.com/ray-project/ray/pull/30903/files See REP ray-project/enhancements#18 for more details.
20
1
31,352
6
1
4
def subtract(inputs, **kwargs): return Subtract(**kwargs)(inputs)
keras/layers/merging/subtract.py
32
keras
{ "docstring": "Functional interface to the `Subtract` layer.\n\n Args:\n inputs: A list of input tensors (exactly 2).\n **kwargs: Standard layer keyword arguments.\n\n Returns:\n A tensor, the difference of the inputs.\n\n Examples:\n\n ```python\n import keras\n\n input1 = keras.layers.Input(shape=(16,))\n x1 = keras.layers.Dense(8, activation='relu')(input1)\n input2 = keras.layers.Input(shape=(32,))\n x2 = keras.layers.Dense(8, activation='relu')(input2)\n subtracted = keras.layers.subtract([x1, x2])\n\n out = keras.layers.Dense(4)(subtracted)\n model = keras.models.Model(inputs=[input1, input2], outputs=out)\n ```\n ", "language": "en", "n_whitespaces": 154, "n_words": 59, "vocab_size": 48 }
5
Python
5
84afc5193d38057e2e2badf9c889ea87d80d8fbf
subtract.py
272,697
2
18
subtract
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
11
0
81,039
9
7
26
def pick_config_ids(device_type, os, navigator): if os is None: default_dev_types = ["desktop"] else: default_dev_types = list(DEVICE_TYPE_OS.keys()) dev_type_choices = get_option_choices( "device_type", device_type, default_dev_types, list(DEVICE_TYPE_OS.keys()) ) os_choices = get_option_choices( "os", os, list(OS_NAVIGATOR.keys()), list(OS_NAVIGATOR.keys()) ) nav_choices = get_option_choices( "navigator", navigator, list(NAVIGATOR_OS.keys()), list(NAVIGATOR_OS.keys()) ) variants = [] for dev, os, nav in product(dev_type_choices, os_choices, nav_choices): if ( os in DEVICE_TYPE_OS[dev] and nav in DEVICE_TYPE_NAVIGATOR[dev] and nav in OS_NAVIGATOR[os] ): variants.append((dev, os, nav)) if not variants: raise InvalidOption( "Options device_type, os and navigator" " conflicts with each other" ) device_type, os_id, navigator_id = randomizer.choice(variants) assert os_id in OS_PLATFORM assert navigator_id in NAVIGATOR_OS assert device_type in DEVICE_TYPE_OS return device_type, os_id, navigator_id
build/pyinstaller/user_agent/base.py
315
OpenBBTerminal
{ "docstring": "\n Select one random pair (device_type, os_id, navigator_id) from\n all possible combinations matching the given os and\n navigator filters.\n\n :param os: allowed os(es)\n :type os: string or list/tuple or None\n :param navigator: allowed browser engine(s)\n :type navigator: string or list/tuple or None\n :param device_type: limit possible oses by device type\n :type device_type: list/tuple or None, possible values:\n \"desktop\", \"smartphone\", \"tablet\", \"all\"\n ", "language": "en", "n_whitespaces": 98, "n_words": 60, "vocab_size": 42 }
104
Python
66
ab4de1dd70fba866930150e440a03e461a6ca6a8
base.py
283,195
31
199
pick_config_ids
https://github.com/OpenBB-finance/OpenBBTerminal.git
Create a packaged app bundle with Pyinstaller (#1525) * Add dashboard widget assets * Add ipywidgets and ipyflex to project * Add currencies dashboard notebook * Update docs and docstrings * Add pyinstaller to project deps * Add pyinstaller artifacts to gitignore * Fix linter errors in terminal.py * Update cspell hook and action with a pyinstaller specific word * Add pyinstaller specfile and artifacts * Add splashscreen image * Add app icon * adding splash screen support to terminal.spec and terminal.py * Restore the conda env build files * Sync deps * Add border to the splashscreen image * Clean up terminal launcher * Add support for default feature flags in packages apps * Fix types and linting * Add splashscreen management to app bootup * Check prediction feature flag when entering crypto/pred * Update pyinstaller spec file * fix .spec file to work for splash and icon - removed the ".." * Allows to export when using installer (#1568) * fix export for packaged apps * fix filename * Git : replace commit_hash when it is set in config_terminal * Add update of the git commit hash in gtff default during build * Add packaged app name and feature flag to logs * Add platform specific icon assignment * Add macOS build assets * Add tensorflow to hidden imports * Move LOGGING_COMMIT_HASH to gtff * Adding files/folders needed to .spec and pyinstaller folder. This will make certain commands work again. * Linting * Workflow : ignore ./build/pyinstaller from codespell * Workflow : exclude ./build/pyinstaller from flake8 * Poetry + Workflow : add types-six * Pyinstaller : remove property_cached, user_agent and vaderSentiment * Revert "Pyinstaller : remove property_cached, user_agent and vaderSentiment" This reverts commit dbb3e2b81086f97819ebd21457148c7160a4d703. * Clean up local paths in specfile * Validate deps have correct Jinja version (they do) * Fix logging commit hash to be set correctly for the logger to see it Co-authored-by: Andrew <[email protected]> Co-authored-by: didierlopes.eth <[email protected]> Co-authored-by: Chavithra PARANA <[email protected]>
273
0
84,461
13
1
32
def name_scope(name):
keras/backend.py
64
"""A context manager for use when defining a Python op. This context manager pushes a name scope, which will make the name of all operations added within it have a prefix. For example, to define a new Python op called `my_op`:use when defining a Python op. This context manager pushes a namewhich will make the name of all operations added within it have a prefix. Forto define a new Python op called
keras
{ "docstring": "A context manager for use when defining a Python op.\n\n This context manager pushes a name scope, which will make the name of all\n operations added within it have a prefix.\n\n For example, to define a new Python op called `my_op`:\n\n", "language": "en", "n_whitespaces": 49, "n_words": 41, "vocab_size": 34 }
2
Python
2
84afc5193d38057e2e2badf9c889ea87d80d8fbf
backend.py
269,444
2
13
name_scope
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
5
4
80,083
6
15
6
def get_loc(self, key, method=None): if method is not None: raise NotImplementedError( "only the default get_loc method is " "currently supported for MultiIndex" ) self._check_indexing_error(key)
pandas/core/indexes/multi.py
53
pandas
{ "docstring": "\n Get location for a label or a tuple of labels.\n\n The location is returned as an integer/slice or boolean\n mask.\n\n Parameters\n ----------\n key : label or tuple of labels (one for each level)\n method : None\n\n Returns\n -------\n loc : int, slice object or boolean mask\n If the key is past the lexsort depth, the return may be a\n boolean mask array, otherwise it is always a slice or int.\n\n See Also\n --------\n Index.get_loc : The get_loc method for (single-level) index.\n MultiIndex.slice_locs : Get slice location given start label(s) and\n end label(s).\n MultiIndex.get_locs : Get location for a label/slice/list/mask or a\n sequence of such.\n\n Notes\n -----\n The key cannot be a slice, list of same-level labels, a boolean mask,\n or a sequence of such. If you want to use those, use\n :meth:`MultiIndex.get_locs` instead.\n\n Examples\n --------\n >>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')])\n\n >>> mi.get_loc('b')\n slice(1, 3, None)\n\n >>> mi.get_loc(('b', 'e'))\n 1\n ", "language": "en", "n_whitespaces": 428, "n_words": 149, "vocab_size": 93 }
24
Python
22
46ddb8ef882940fa3da58813e0b7a2df1061031e
multi.py
163,113
51
324
get_loc
https://github.com/pandas-dev/pandas.git
BUG: Index.get_loc always raise InvalidIndexError on listlike (#45181)
97
0
39,371
11
2
5
def setter(self, attr): if attr not in self._options: raise KeyError("No such option: %s" % attr)
mitmproxy/optmanager.py
40
mitmproxy
{ "docstring": "\n Generate a setter for a given attribute. This returns a callable\n taking a single argument.\n ", "language": "en", "n_whitespaces": 37, "n_words": 15, "vocab_size": 12 }
15
Python
15
b3587b52b25077f68116b9852b041d33e7fc6601
optmanager.py
251,421
5
27
setter
https://github.com/mitmproxy/mitmproxy.git
make it black!
40
0
73,730
11
3
8
def sqrtdenest(expr, max_iter=3): expr = expand_mul(expr) for i in range(max_iter): z = _sqrtdenest0(expr) if expr == z: return expr expr = z return expr
sympy/simplify/sqrtdenest.py
69
sympy
{ "docstring": "Denests sqrts in an expression that contain other square roots\n if possible, otherwise returns the expr unchanged. This is based on the\n algorithms of [1].\n\n Examples\n ========\n\n >>> from sympy.simplify.sqrtdenest import sqrtdenest\n >>> from sympy import sqrt\n >>> sqrtdenest(sqrt(5 + 2 * sqrt(6)))\n sqrt(2) + sqrt(3)\n\n See Also\n ========\n\n sympy.solvers.solvers.unrad\n\n References\n ==========\n\n .. [1] http://researcher.watson.ibm.com/researcher/files/us-fagin/symb85.pdf\n\n .. [2] D. J. Jeffrey and A. D. Rich, 'Symplifying Square Roots of Square Roots\n by Denesting' (available at http://www.cybertester.com/data/denest.pdf)\n\n ", "language": "en", "n_whitespaces": 133, "n_words": 75, "vocab_size": 63 }
24
Python
16
2a1afca9477eb781f16d5d6b63fa37abed7740a3
sqrtdenest.py
198,306
8
42
sqrtdenest
https://github.com/sympy/sympy.git
Use sympify less
68
0
48,866
10
1
5
def as_hashes(self) -> Hashes: return Hashes({self.name: [self.value]})
src/pip/_internal/models/link.py
39
pip
{ "docstring": "Return a Hashes instance which checks only for the current hash.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
7
Python
7
bad03ef931d9b3ff4f9e75f35f9c41f45839e2a1
link.py
174,923
3
23
as_hashes
https://github.com/pypa/pip.git
Use data-dist-info-metadata (PEP 658) to decouple resolution from downloading (#11111) Co-authored-by: Tzu-ping Chung <[email protected]>
21
0
41,518
11
1
6
def plextv_resources_two_servers_fixture(): return load_fixture("plex/plextv_resources_two_servers.xml") @pytest.fixture(name="plextv_shared_users", scope="session")
tests/components/plex/conftest.py
46
@pytest.fixture(name="plextv_shared_users", scope="session")
core
{ "docstring": "Load two-server payload for plex.tv resources and return it.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
6
Python
6
10195dc700770cdfdeaff79c53cf5d1d763f20c6
conftest.py
308,798
2
10
plextv_resources_two_servers_fixture
https://github.com/home-assistant/core.git
Improve server selection for Plex config flows (#63408)
11
1
107,536
8
2
15
def remove_signature_from_binary(filename): logger.debug("Removing signature from file %r", filename) cmd_args = ['codesign', '--remove', '--all-architectures', filename] p = subprocess.run(cmd_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True) if p.returncode: raise SystemError(f"codesign command ({cmd_args}) failed with error code {p.returncode}!\noutput: {p.stdout}")
PyInstaller/utils/osx.py
118
pyinstaller
{ "docstring": "\n Remove the signature from all architecture slices of the given binary file using the codesign utility.\n ", "language": "en", "n_whitespaces": 23, "n_words": 16, "vocab_size": 14 }
32
Python
31
1cd3b73e2939052271a0bc26cf204eebee4dcd15
osx.py
262,773
6
60
remove_signature_from_binary
https://github.com/pyinstaller/pyinstaller.git
macOS: Remove the timeouts for codesigning/signature stripping/lipo. (#6644)
54
0
77,359
12
5
24
def _get_child_layer_node_ids(self, node_id): # Sequential and Functional track layers with names following the format # "layer-N". Use this to generate the list of layers. num_layers = 0 child_layers = {} pattern = re.compile("layer-(\\d+)") for child in self._proto.nodes[node_id].children: m = pattern.match(child.local_name) if m is None: continue layer_n = int(m.group(1)) num_layers = max(layer_n + 1, num_layers) child_layers[layer_n] = child.node_id ordered = [] for n in range(num_layers): child = child_layers.get(n) if child is None: break ordered.append(child) return ordered
keras/saving/saved_model/load.py
191
keras
{ "docstring": "Returns the node ids of each layer in a Sequential/Functional model.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
75
Python
55
84afc5193d38057e2e2badf9c889ea87d80d8fbf
load.py
276,014
18
116
_get_child_layer_node_ids
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
263
0
81,524
12
1
16
def test_token_node_empty_csrf_cookie(self): req = self._get_request(cookie="") mw = CsrfViewMiddleware(token_view) mw.process_view(req, token_view, (), {}) resp = token_view(req) token = get_token(req) self.assertIsNotNone(token) csrf_secret = _unmask_cipher_token(token) self._check_token_present(resp, csrf_secret)
tests/csrf_tests/tests.py
113
django
{ "docstring": "\n A new token is sent if the csrf_cookie is the empty string.\n ", "language": "en", "n_whitespaces": 27, "n_words": 12, "vocab_size": 10 }
24
Python
20
9c19aff7c7561e3a82978a272ecdaad40dda5c00
tests.py
202,366
9
68
test_token_node_empty_csrf_cookie
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
87
0
50,084
10
15
25
def parse_tag(self, tag): subtags = tag.split("-") lang = {} labels = ["language", "extlang", "script", "region", "variant", "variant"] while subtags and labels: subtag = subtags.pop(0) found = False while labels: label = labels.pop(0) subtag = self.casing[label](subtag) if self.format[label].fullmatch(subtag): if subtag in self.db[label]: found = True valstr = self.val2str(self.db[label][subtag]["Description"]) if label == "variant" and label in lang: lang[label] += ": " + valstr else: lang[label] = valstr break elif subtag in self.db["deprecated"][label]: found = True note = f"The {subtag!r} {label} code is deprecated" if "Preferred-Value" in self.db["deprecated"][label][subtag]: prefer = self.db["deprecated"][label][subtag][ "Preferred-Value" ] note += f"', prefer '{self.val2str(prefer)}'" lang[label] = self.val2str( self.db["deprecated"][label][subtag]["Description"] ) warn(note) break if not found: if subtag == "u" and subtags[0] == "sd": # CLDR regional subdivisions sd = subtags[1] if sd in self.subdiv: ext = self.subdiv[sd] else: ext = f"<Unknown subdivision: {ext}>" else: # other extension subtags are not supported yet ext = f"{subtag}{''.join(['-'+ext for ext in subtags])}".lower() if not self.format["singleton"].fullmatch(subtag): ext = f"<Invalid extension: {ext}>" warn(ext) lang["extension"] = ext subtags = [] return lang
nltk/corpus/reader/bcp47.py
598
nltk
{ "docstring": "Convert a BCP-47 tag to a dictionary of labelled subtags", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 9 }
166
Python
94
f019fbedb3d2b6a2e6b58ec1b38db612b106568b
bcp47.py
42,591
47
318
parse_tag
https://github.com/nltk/nltk.git
Support both iso639-3 codes and BCP-47 language tags (#3060) * Add support for iso639-3 language codes * Add support for retired language codes * Move langnames.py to the top-level * Add langcode() function * Add iso639retired dictionary * Improve wrapper functions * Add module docstring with doctest * Add 2-letter language codes * Add regular expression check * Improve inverse lookup of retired codes * Support BCP-47 * Avoid deprecated langcodes * Set stack level for warnings to warn on the langname call Now it throws e.g. ``` ...\nltk_3060.py:9: UserWarning: Shortening 'smo' to 'sm' print(f"{lang}: {langname(code)}") ``` Rather than ``` ...\nltk\langnames.py:64: UserWarning: Shortening zha to za warn(f"Shortening {code} to {code2}") ``` * Dict key membership is equivalent to dict membership * Resolve bug: subtag -> tag * Capitalize BCP47 in CorpusReader name * Reimplement removed type hint changes from #3081 Co-authored-by: Tom Aarsen <[email protected]>
1,053
0
7,646
22
3
21
def test_recurrent_unroll_and_filter(self): inputs = TensorDict( { "in": torch.arange(B * T * 2).reshape(B, T, 2), "bork": torch.arange(5 * 4).reshape(5, 4), } ) states = TensorDict( { "in": torch.arange(B * 4).reshape(B, 4), "bork": torch.arange(5 * 4).reshape(5, 4), } ) outputs, out_states = SimpleRecurrentModel(ModelConfig()).unroll(inputs, states) desired = TensorDict({"out": torch.arange(B * T * 3).reshape(B, T, 3)}) desired_states = TensorDict({"out": torch.arange(B * 5).reshape(B, 5)}) for k in outputs.flatten().keys() | desired.flatten().keys(): check(outputs[k], desired[k]) for k in out_states.flatten().keys() | desired_states.flatten().keys(): check(out_states[k], desired_states[k])
rllib/models/tests/test_torch_model.py
374
ray
{ "docstring": "Ensures unused inputs are filtered out before _unroll and that\n outputs are correct.", "language": "en", "n_whitespaces": 19, "n_words": 13, "vocab_size": 12 }
75
Python
44
b0945548e874642287b81514b71432a2330de1d3
test_torch_model.py
128,542
20
235
test_recurrent_unroll_and_filter
https://github.com/ray-project/ray.git
[RLlib] Add torch models (#29043) 1. converted class attributes to setters 2. use override decorator 3. SimpleModel should not have any T dimension, it can confuse folks. So I removed it. 4. I merged all the unittests under one class and separated them by methods names. It will be easier to use -k filter to run pytests later if we don't allow repetative method names. Signed-off-by: Kourosh Hakhamaneshi <[email protected]> Signed-off-by: Steven Morad <[email protected]>
271
0
28,737
16
8
18
def _shard(self, num_shards=None, index=None, contiguous=False): if num_shards is None: num_shards = dist.get_world_size() if index is None: index = dist.get_rank() if contiguous: div = len(self) // num_shards mod = len(self) % num_shards start = div * index + min(index, mod) end = start + div + (1 if index < mod else 0) new_data = [self.new_data[idx] for idx in range(start, end)] else: new_data = [ self.new_data[idx] for idx in range(len(self.new_data)) if idx % num_shards == index ] return MapDataset(new_data)
paddlenlp/datasets/dataset.py
220
PaddleNLP
{ "docstring": "\n Split the dataset into `num_shards` pieces. Note that the size of each\n shard might be different because the original dataset may not be evenly\n divisible.\n\n Args:\n num_shards (int, optional): An integer representing the number of\n data shards. If None, `num_shards` would be number of trainers.\n Defaults to `None`.\n index (int, optional): An integer representing the index of the\n current shard. If None, `index` would be the current trainer rank\n id. Defaults to `None`.\n contiguous: (bool, optional): If true, contiguous chunks of data \n will be select for sharding. And total number of examples will \n be the same. Otherwise each shard will contain all examples of \n dataset whose index mod `num_shards` = `index`. Defaults to `False`.\n ", "language": "en", "n_whitespaces": 291, "n_words": 114, "vocab_size": 66 }
78
Python
44
1c10abadb7c960e58ce44813f6197dfca9cbd28d
dataset.py
322,238
17
141
_shard
https://github.com/PaddlePaddle/PaddleNLP.git
Integrate HF Datasets and add DatasetTuple (#1612) * fix bart perf * update fastergeneration doc * add img * add img * change img * update img * fix img * update docs * fix readme * update readme * fix perf * fix perf * fix modelling * fix perf and sample code * fix perf * fix perf * fix seq_len for gpt_sample * add forced eos token id for faster * upgrade perf and add forced eos token id * chenge stack to gather * add auto perf * minor fix * remove encoder change * Update bart_perf.py * Update bart_perf.py * 1. Integrate HF Datasets 2. return all splits by default 3. load_dataset returns DatasetTuple now * add HF Dataset example * add kwargs for HF load_dataset * change datasets to alternative * remove experimental
249
0
118,103
16
3
19
def _register_arrow_data_serializer(serialization_context): import pyarrow as pa if os.environ.get(RAY_DISABLE_CUSTOM_ARROW_DATA_SERIALIZATION, "0") == "1": return # Register custom reducer for Arrow Arrays. array_types = _get_arrow_array_types() for array_type in array_types: serialization_context._register_cloudpickle_reducer( array_type, _arrow_array_reduce ) # Register custom reducer for Arrow ChunkedArrays. serialization_context._register_cloudpickle_reducer( pa.ChunkedArray, _arrow_chunkedarray_reduce ) # Register custom reducer for Arrow RecordBatches. serialization_context._register_cloudpickle_reducer( pa.RecordBatch, _arrow_recordbatch_reduce ) # Register custom reducer for Arrow Tables. serialization_context._register_cloudpickle_reducer(pa.Table, _arrow_table_reduce)
python/ray/data/_internal/arrow_serialization.py
124
ray
{ "docstring": "Custom reducer for Arrow data that works around a zero-copy slicing pickling\n bug by using the Arrow IPC format for the underlying serialization.\n\n Background:\n Arrow has both array-level slicing and buffer-level slicing; both are zero-copy,\n but the former has a serialization bug where the entire buffer is serialized\n instead of just the slice, while the latter's serialization works as expected\n and only serializes the slice of the buffer. I.e., array-level slicing doesn't\n propagate the slice down to the buffer when serializing the array.\n\n All that these copy methods do is, at serialization time, take the array-level\n slicing and translate them to buffer-level slicing, so only the buffer slice is\n sent over the wire instead of the entire buffer.\n\n See https://issues.apache.org/jira/browse/ARROW-10739.\n ", "language": "en", "n_whitespaces": 188, "n_words": 120, "vocab_size": 75 }
61
Python
38
c1d62d46495f0157faf3168aa87eed350802e10f
arrow_serialization.py
128,560
16
73
_register_arrow_data_serializer
https://github.com/ray-project/ray.git
[Datasets] Arrow 7.0.0+ Support: Use Arrow IPC format for pickling Arrow data to circumvent slice view buffer truncation bug. (#29055) This PR registers a custom serializer for Array arrays, chunked arrays, record batches, and tables that works around an Arrow serialization bug that serializes the entire underlying data buffer when serializing zero-copy slice views. The custom serializer uses the Arrow IPC format as the underlying pickled representation.
149
0
28,745
9
1
2
def csrc(self): return self["csrc"]
packages/python/plotly/plotly/graph_objs/_scatterternary.py
22
plotly.py
{ "docstring": "\n Sets the source reference on Chart Studio Cloud for `c`.\n\n The 'csrc' property must be specified as a string or\n as a plotly.grid_objs.Column object\n\n Returns\n -------\n str\n ", "language": "en", "n_whitespaces": 77, "n_words": 27, "vocab_size": 25 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_scatterternary.py
228,160
2
11
csrc
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
59,833
7
1
12
def test_size_hint(view): view.show_message(message.MessageInfo(usertypes.MessageLevel.info, 'test1')) height1 = view.sizeHint().height() assert height1 > 0 view.show_message(message.MessageInfo(usertypes.MessageLevel.info, 'test2')) height2 = view.sizeHint().height() assert height2 == height1 * 2
tests/unit/mainwindow/test_messageview.py
122
qutebrowser
{ "docstring": "The message height should increase with more messages.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
22
Python
15
5616a99eff34f7074641d1391ed77d6b4b743529
test_messageview.py
320,927
7
72
test_size_hint
https://github.com/qutebrowser/qutebrowser.git
Add a MessageInfo data class Preparation for #7246
43
0
117,440
11
6
47
def handler(request, operation, current_url): if operation != QNetworkAccessManager.Operation.GetOperation: return networkreply.ErrorNetworkReply( request, "Unsupported request type", QNetworkReply.NetworkError.ContentOperationNotPermittedError) url = request.url() if ((url.scheme(), url.host(), url.path()) == ('qute', 'settings', '/set')): if current_url != QUrl('qute://settings/'): log.network.warning("Blocking malicious request from {} to {}" .format(current_url.toDisplayString(), url.toDisplayString())) return networkreply.ErrorNetworkReply( request, "Invalid qute://settings request", QNetworkReply.NetworkError.ContentAccessDenied) try: mimetype, data = qutescheme.data_for_url(url) except qutescheme.Error as e: errors = { qutescheme.NotFoundError: QNetworkReply.NetworkError.ContentNotFoundError, qutescheme.UrlInvalidError: QNetworkReply.NetworkError.ContentOperationNotPermittedError, qutescheme.RequestDeniedError: QNetworkReply.NetworkError.ContentAccessDenied, qutescheme.SchemeOSError: QNetworkReply.NetworkError.ContentNotFoundError, qutescheme.Error: QNetworkReply.NetworkError.InternalServerError, } exctype = type(e) log.misc.error("{} while handling qute://* URL".format( exctype.__name__)) return networkreply.ErrorNetworkReply(request, str(e), errors[exctype]) except qutescheme.Redirect as e: qtutils.ensure_valid(e.url) return networkreply.RedirectNetworkReply(e.url) return networkreply.FixedDataNetworkReply(request, data, mimetype)
qutebrowser/browser/webkit/network/webkitqutescheme.py
418
qutebrowser
{ "docstring": "Scheme handler for qute:// URLs.\n\n Args:\n request: QNetworkRequest to answer to.\n operation: The HTTP operation being done.\n current_url: The page we're on currently.\n\n Return:\n A QNetworkReply.\n ", "language": "en", "n_whitespaces": 63, "n_words": 26, "vocab_size": 25 }
93
Python
76
0877fb0d78635692e481c8bde224fac5ad0dd430
webkitqutescheme.py
321,172
38
264
handler
https://github.com/qutebrowser/qutebrowser.git
Run scripts/dev/rewrite_enums.py
483
0
117,576
15
2
4
def get(self, key, default_value=None): if key in self: return self[key] else: return default_value
pipenv/patched/notpip/_vendor/pyparsing/results.py
42
pipenv
{ "docstring": "\n Returns named result matching the given key, or if there is no\n such name, then returns the given ``default_value`` or ``None`` if no\n ``default_value`` is specified.\n\n Similar to ``dict.get()``.\n\n Example::\n\n integer = Word(nums)\n date_str = integer(\"year\") + '/' + integer(\"month\") + '/' + integer(\"day\")\n\n result = date_str.parse_string(\"1999/12/31\")\n print(result.get(\"year\")) # -> '1999'\n print(result.get(\"hour\", \"not specified\")) # -> 'not specified'\n print(result.get(\"hour\")) # -> None\n ", "language": "en", "n_whitespaces": 171, "n_words": 62, "vocab_size": 44 }
13
Python
12
f3166e673fe8d40277b804d35d77dcdb760fc3b3
results.py
20,619
5
26
get
https://github.com/pypa/pipenv.git
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
56
0
3,457
8
1
23
def huber(y_true, y_pred, delta=1.0): y_pred = tf.cast(y_pred, dtype=backend.floatx()) y_true = tf.cast(y_true, dtype=backend.floatx()) delta = tf.cast(delta, dtype=backend.floatx()) error = tf.subtract(y_pred, y_true) abs_error = tf.abs(error) half = tf.convert_to_tensor(0.5, dtype=abs_error.dtype) return backend.mean( tf.where( abs_error <= delta, half * tf.square(error), delta * abs_error - half * tf.square(delta), ), axis=-1, ) @keras_export( "keras.losses.log_cosh", "keras.losses.logcosh", "keras.metrics.log_cosh", "keras.metrics.logcosh", ) @tf.__internal__.dispatch.add_dispatch_support
keras/losses.py
243
@keras_export( "keras.losses.log_cosh", "keras.losses.logcosh", "keras.metrics.log_cosh", "keras.metrics.logcosh", ) @tf.__internal__.dispatch.add_dispatch_support
keras
{ "docstring": "Computes Huber loss value.\n\n For each value x in `error = y_true - y_pred`:\n\n ```\n loss = 0.5 * x^2 if |x| <= d\n loss = d * |x| - 0.5 * d^2 if |x| > d\n ```\n where d is `delta`. See: https://en.wikipedia.org/wiki/Huber_loss\n\n Args:\n y_true: tensor of true targets.\n y_pred: tensor of predicted targets.\n delta: A float, the point where the Huber loss function changes from a\n quadratic to linear.\n\n Returns:\n Tensor with one scalar loss entry per sample.\n ", "language": "en", "n_whitespaces": 158, "n_words": 80, "vocab_size": 57 }
53
Python
38
84afc5193d38057e2e2badf9c889ea87d80d8fbf
losses.py
274,548
15
139
huber
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
143
1
81,226
13
2
12
def print_help(self): id_string = "" for s_id, sub_dict in self.current_series.items(): id_string += f" [cyan]{s_id.upper()}[/cyan] : {sub_dict['title']}" help_string = f t_console.print(help_string)
gamestonk_terminal/economy/fred/prediction/pred_controller.py
96
OpenBBTerminal
{ "docstring": "Print help\nPrediction Techniques Menu:\n\n load load new series\n\nSelected Series (starting from [green]{self.start_date}[/green]):\n{id_string}\n\nModels:\n ets exponential smoothing (e.g. Holt-Winters)\n knn k-Nearest Neighbors\n regression polynomial regression\n arima autoregressive integrated moving average\n mlp MultiLayer Perceptron\n rnn Recurrent Neural Network\n lstm Long-Short Term Memory\n conv1d 1D Convolutional Neural Network\n mc Monte-Carlo simulations\n ", "language": "en", "n_whitespaces": 151, "n_words": 51, "vocab_size": 47 }
20
Python
18
f5b0dc8e7b5ae7ed3a4b175ba48aba0d5ea9d2db
pred_controller.py
281,032
24
36
print_help
https://github.com/OpenBB-finance/OpenBBTerminal.git
Add prediction to economy/fred (#1133)
69
0
83,477
12
4
20
def __call__(self, _metric=None, **kwargs): assert self._last_report_time is not None, ( "StatusReporter._start() must be called before the first " "report __call__ is made to ensure correct runtime metrics." ) if _metric: kwargs[DEFAULT_METRIC] = _metric # time per iteration is recorded directly in the reporter to ensure # any delays in logging results aren't counted report_time = time.time() if TIME_THIS_ITER_S not in kwargs: kwargs[TIME_THIS_ITER_S] = report_time - self._last_report_time self._last_report_time = report_time # add results to a thread-safe queue self._queue.put(kwargs.copy(), block=True) # This blocks until notification from the FunctionRunner that the last # result has been returned to Tune and that the function is safe to # resume training. self._continue_semaphore.acquire() # If the trial should be terminated, exit gracefully. if self._end_event.is_set(): self._end_event.clear() sys.exit(0)
python/ray/tune/function_runner.py
182
ray
{ "docstring": "Report updated training status.\n\n Pass in `done=True` when the training job is completed.\n\n Args:\n kwargs: Latest training result status.\n\n Example:\n >>> reporter(mean_accuracy=1, training_iteration=4)\n >>> reporter(mean_accuracy=1, training_iteration=4, done=True)\n\n Raises:\n StopIteration: A StopIteration exception is raised if the trial has\n been signaled to stop.\n ", "language": "en", "n_whitespaces": 136, "n_words": 42, "vocab_size": 35 }
120
Python
86
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
function_runner.py
132,209
16
107
__call__
https://github.com/ray-project/ray.git
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
305
0
29,699
10
2
14
def register_plotting_backend_cb(key) -> None: if key == "matplotlib": # We defer matplotlib validation, since it's the default return from pandas.plotting._core import _get_plot_backend _get_plot_backend(key) with cf.config_prefix("plotting"): cf.register_option( "backend", defval="matplotlib", doc=plotting_backend_doc, validator=register_plotting_backend_cb, ) register_converter_doc =
pandas/core/config_init.py
97
pandas
{ "docstring": "\n: bool or 'auto'.\n Whether to register converters with matplotlib's units registry for\n dates, times, datetimes, and Periods. Toggling to False will remove\n the converters, restoring any converters that pandas overwrote.\n", "language": "en", "n_whitespaces": 39, "n_words": 31, "vocab_size": 29 }
33
Python
33
9612375ca28ade056f15d4338f1bfde5d045c9fc
config_init.py
167,696
5
25
register_plotting_backend_cb
https://github.com/pandas-dev/pandas.git
TYP: return values in core/*.py (#47587) * TYP: return values in core/*.py * fix test * to_html * to_html part 2 * DataFrame.query * more overloads * fix query? * increase stacklevel by one * fix rename_axis * and an overload for DataFrame.eval * address comments * fix typevar
88
0
40,080
9