ast_errors
stringlengths
0
3.2k
d_id
int64
44
121k
id
int64
70
338k
n_whitespaces
int64
3
14k
path
stringlengths
8
134
n_words
int64
4
4.82k
n_identifiers
int64
1
131
random_cut
stringlengths
16
15.8k
commit_message
stringlengths
2
15.3k
fun_name
stringlengths
1
84
commit_id
stringlengths
40
40
repo
stringlengths
3
28
file_name
stringlengths
5
79
ast_levels
int64
6
31
nloc
int64
1
548
url
stringlengths
31
59
complexity
int64
1
66
token_counts
int64
6
2.13k
n_ast_errors
int64
0
28
vocab_size
int64
4
1.11k
n_ast_nodes
int64
15
19.2k
language
stringclasses
1 value
documentation
dict
code
stringlengths
101
62.2k
34,338
148,805
205
freqtrade/exchange/exchange.py
56
18
def reload_markets(self) -> None: # Check whether markets have to be reloaded if (self._last_markets_refresh > 0) and ( self._last_markets_refresh + self.markets_refresh_interval > arrow.utcnow().int_timestamp): return None logger.debug("Performing scheduled market reload..") try: self._markets = self._api.load_markets(reload=True) # Also reload async markets to avoid issues with newly listed pairs self._load_async_markets(reload=True) self._last_markets_refresh = arrow.utcnow().int_timestamp self.f
replaced "leverage" with "tiers"
reload_markets
42e36f44f8a91a79a8ffa14698542f38df39cb50
freqtrade
exchange.py
11
14
https://github.com/freqtrade/freqtrade.git
4
94
0
49
161
Python
{ "docstring": "Reload markets both sync and async if refresh interval has passed ", "language": "en", "n_whitespaces": 11, "n_words": 11, "vocab_size": 11 }
def reload_markets(self) -> None: # Check whether markets have to be reloaded if (self._last_markets_refresh > 0) and ( self._last_markets_refresh + self.markets_refresh_interval > arrow.utcnow().int_timestamp): return None logger.debug("Performing scheduled market reload..") try: self._markets = self._api.load_markets(reload=True) # Also reload async markets to avoid issues with newly listed pairs self._load_async_markets(reload=True) self._last_markets_refresh = arrow.utcnow().int_timestamp self.fill_leverage_tiers() except ccxt.BaseError: logger.exception("Could not reload markets.")
47,852
196,352
43
sympy/matrices/common.py
12
6
def is_strongly_diagonally_dominant(self): r
Moved imports to higher level
is_strongly_diagonally_dominant
59d22b6bb7287613d598611027f640d068ca5748
sympy
common.py
7
40
https://github.com/sympy/sympy.git
3
39
0
12
37
Python
{ "docstring": "Tests if the matrix is row strongly diagonally dominant.\n\n Explanation\n ===========\n\n A $n, n$ matrix $A$ is row strongly diagonally dominant if\n\n .. math::\n \\left|A_{i, i}\\right| > \\sum_{j = 0, j \\neq i}^{n-1}\n \\left|A_{i, j}\\right| \\quad {\\text{for all }}\n i \\in \\{ 0, ..., n-1 \\}\n\n Examples\n ========\n\n >>> from sympy import Matrix\n >>> A = Matrix([[3, -2, 1], [1, -3, 2], [-1, 2, 4]])\n >>> A.is_strongly_diagonally_dominant\n False\n\n >>> A = Matrix([[-2, 2, 1], [1, 3, 2], [1, -2, 0]])\n >>> A.is_strongly_diagonally_dominant\n False\n\n >>> A = Matrix([[-4, 2, 1], [1, 6, 2], [1, -2, 5]])\n >>> A.is_strongly_diagonally_dominant\n True\n\n Notes\n =====\n\n If you want to test whether a matrix is column diagonally\n dominant, you can apply the test after transposing the matrix.\n ", "language": "en", "n_whitespaces": 301, "n_words": 121, "vocab_size": 79 }
def is_strongly_diagonally_dominant(self): r if not self.is_square: return False rows, cols = self.shape
1,111
7,060
58
ludwig/features/binary_feature.py
15
12
def create_calibration_module(self, feature) -> torch.nn.Module: if feature.get("calibration"): calibration_cls = calibration.get_calibration_cls(BINARY, "temperature_scaling") return calibration_cls(binary=True) return None
Adds mechanism for calibrating probabilities for category and binary features (#1949) * Started adding files for calibration implementation. * Adds option to return logits and labels in predictor. * Pre-commit fixes * First pass temperature scaling working. * Fixes calibration for categorical feature. * Separate calibrated logits from logits. * Adds option to revert temperature scaling. * Refactoring, move binary prediction logic into calibration class. * Reverted accidental commit to simple_model_training.py * Adds checks and comments. * Fixes matrix scaling, convert pandas series to numpy arrays. * Fixes number of classes for categorical features. * Adds structured calibration result, unit tests. * Make create_calibration_module not abstract, default implementation returns None. * Relax precision requirement for calibration test. * Save weights after calibration, so calibration results are included in save file. * Implemented dirichlet scaling with l2 off-diagonal regularization. * Adds masked_select off_diagonal method. * Change back to matrix scaling. * Updates test expectations to reflect learning rate settings. * Tuned default regularization weight. * Comments. * Set random seed, testing to see if that makes a difference. * Remove checks for exact NLL, ECE values post calibration. * Restored LOGITS to EXCLUDE_PRED_SET, added another option to return logits in batch_predict. * Factor calibration method out of Trainer into Calibrator * Removed horovod argument from calibrator. * Return batch_size if eval_batch_size not specified. * Fix calibration_module docstring. * Updates comment, adds fallback method of calibrating on training set if no validation set available. * Adds calibration registry, replaces if statements for instantiating calibration. * Raise ValueError if unsupported calibration method specified. * Remove calibrate method from Trainer * f string * Use backend to create predictor for calibration. * Moves saving out of calibrator * Fix comment. * Adds ray test of calibration. * Implements collect_logits in ray predictor. * First pass implementation of collect_labels. * Implements collect_logits and collect_labels in ray backend. * Merge predictions and labels in ray backend * Reverts collect_labels, get labels from dataset in calibrate. * Allow overriding EXCLUDE_PRED_SET when getting preds. * Changes 'calibration' config option to binary. * Test both binary and category output features in ray test. * Comments/ * Adds type hints. Co-authored-by: Daniel Treiman <[email protected]>
create_calibration_module
e65f74e87e8e29922f4e9f9d839978ffb2c5b029
ludwig
binary_feature.py
11
10
https://github.com/ludwig-ai/ludwig.git
2
41
0
14
70
Python
{ "docstring": "Creates the appropriate calibration module based on the feature config.\n\n Today, only one type of calibration (\"temperature_scaling\") is available, but more options may be supported in\n the future.\n ", "language": "en", "n_whitespaces": 49, "n_words": 28, "vocab_size": 25 }
def create_calibration_module(self, feature) -> torch.nn.Module: if feature.get("calibration"): calibration_cls = calibration.get_calibration_cls(BINARY, "temperature_scaling") return calibration_cls(binary=True) return None
52,846
210,002
616
ppdet/utils/download.py
143
42
def _download(url, path, md5sum=None): if not osp.exists(path): os.makedir
fix download.py (#5159)
_download
1dcec15b6600df750d680e97e89117fcb8eb84a0
PaddleDetection
download.py
19
35
https://github.com/PaddlePaddle/PaddleDetection.git
11
256
0
103
435
Python
{ "docstring": "\n Download from url, save to path.\n\n url (str): download url\n path (str): download to given path\n ", "language": "en", "n_whitespaces": 29, "n_words": 16, "vocab_size": 11 }
def _download(url, path, md5sum=None): if not osp.exists(path): os.makedirs(path) fname = osp.split(url)[-1] fullname = osp.join(path, fname) retry_cnt = 0 while not (osp.exists(fullname) and _check_exist_file_md5(fullname, md5sum, url)): if retry_cnt < DOWNLOAD_RETRY_LIMIT: retry_cnt += 1 else: raise RuntimeError("Download from {} failed. " "Retry limit reached".format(url)) logger.info("Downloading {} from {}".format(fname, url)) # NOTE: windows path join may incur \, which is invalid in url if sys.platform == "win32": url = url.replace('\\', '/') req = requests.get(url, stream=True) if req.status_code != 200: raise RuntimeError("Downloading from {} failed with code " "{}!".format(url, req.status_code)) # For protecting download interupted, download to # tmp_fullname firstly, move tmp_fullname to fullname # after download finished tmp_fullname = fullname + "_tmp" total_size = req.headers.get('content-length') with open(tmp_fullname, 'wb') as f: if total_size: for chunk in tqdm.tqdm( req.iter_content(chunk_size=1024), total=(int(total_size) + 1023) // 1024, unit='KB'): f.write(chunk) else: for chunk in req.iter_content(chunk_size=1024): if chunk: f.write(chunk) shutil.move(tmp_fullname, fullname) return fullname
51,172
205,715
149
django/db/models/options.py
25
12
def related_objects(self): all_related_fields = self._get_fields( forward=False, reverse=True, include_hidden=True ) return make_immutable_fields_list( "related_objects", ( obj for obj in
Refs #33476 -- Reformatted code with Black.
related_objects
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
options.py
12
12
https://github.com/django/django.git
4
49
0
22
77
Python
{ "docstring": "\n Return all related objects pointing to the current model. The related\n objects can come from a one-to-one, one-to-many, or many-to-many field\n relation type.\n\n Private API intended only to be used by Django itself; get_fields()\n combined with filtering of field properties is the public API for\n obtaining this field list.\n ", "language": "en", "n_whitespaces": 99, "n_words": 49, "vocab_size": 42 }
def related_objects(self): all_related_fields = self._get_fields( forward=False, reverse=True, include_hidden=True ) return make_immutable_fields_list( "related_objects", ( obj for obj in all_related_fields if not obj.hidden or obj.field.many_to_many ), )
50,121
202,419
88
tests/csrf_tests/tests.py
25
13
def test_https_good_referer(self): req = self._get_POST_request_with_token()
Refs #33476 -- Reformatted code with Black.
test_https_good_referer
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
tests.py
9
9
https://github.com/django/django.git
1
68
0
20
118
Python
{ "docstring": "\n A POST HTTPS request with a good referer is accepted.\n ", "language": "en", "n_whitespaces": 25, "n_words": 10, "vocab_size": 10 }
def test_https_good_referer(self): req = self._get_POST_request_with_token() req._is_secure_override = True req.META["HTTP_HOST"] = "www.example.com" req.META["HTTP_REFERER"] = "https://www.example.com/somepage" mw = CsrfViewMiddleware(post_form_view) mw.process_request(req) resp = mw.process_view(req, post_form_view, (), {}) self.assertIsNone(resp)
71,467
247,057
133
tests/rest/client/test_retention.py
40
16
def test_state_policy(self) -> None: room_id = self.helper.create_room_as(self.user_id, tok=self.token) # Set the maximum lifetime to 35 days so that the first event gets expired but not # the second one. self.helper.send_state( room_id=room_id, event_type=EventTypes.Rete
Add type hints to `tests/rest/client` (#12084)
test_state_policy
1901cb1d4a8b7d9af64493fbd336e9aa2561c20c
synapse
test_retention.py
11
12
https://github.com/matrix-org/synapse.git
1
69
0
37
109
Python
{ "docstring": "Tests that an event gets correctly expired if there is no default retention\n policy but there's a policy specific to the room.\n ", "language": "en", "n_whitespaces": 36, "n_words": 22, "vocab_size": 21 }
def test_state_policy(self) -> None: room_id = self.helper.create_room_as(self.user_id, tok=self.token) # Set the maximum lifetime to 35 days so that the first event gets expired but not # the second one. self.helper.send_state( room_id=room_id, event_type=EventTypes.Retention, body={"max_lifetime": one_day_ms * 35}, tok=self.token, ) self._test_retention(room_id, expected_code_for_first_event=404)
4,382
22,638
126
insertion_sort.py
53
7
def insertion_sort(list, n): for i in range(0, n): key = list[i] j = i - 1 # Swap elements witth key iff they are # greater than
refactor: clean code Signed-off-by: slowy07 <[email protected]>
insertion_sort
f0af0c43340763724f139fa68aa1e5a9ffe458b4
Python
insertion_sort.py
12
9
https://github.com/geekcomputers/Python.git
4
67
0
34
105
Python
{ "docstring": "\n sort list in assending order\n\n INPUT:\n list=list of values to be sorted\n n=size of list that contains values to be sorted\n\n OUTPUT:\n list of sorted values in assending order\n ", "language": "en", "n_whitespaces": 63, "n_words": 29, "vocab_size": 16 }
def insertion_sort(list, n): for i in range(0, n): key = list[i] j = i - 1 # Swap elements witth key iff they are # greater than key while j >= 0 and list[j] > key: list[j + 1] = list[j] j = j - 1 list[j + 1] = key return list
42,922
179,232
21
gradio/component.py
7
6
def get_template_context(self): return {"name": self.__class__.__name__.lower(),
Format The Codebase - black formatting - isort formatting
get_template_context
cc0cff893f9d7d472788adc2510c123967b384fe
gradio
component.py
11
2
https://github.com/gradio-app/gradio.git
1
26
0
7
47
Python
{ "docstring": "\n :return: a dictionary with context variables for the javascript file associated with the context\n ", "language": "en", "n_whitespaces": 29, "n_words": 14, "vocab_size": 11 }
def get_template_context(self): return {"name": self.__class__.__name__.lower(), "label": self.label}
25,824
116,751
92
mindsdb/integrations/handlers/teradata_handler/teradata_handler.py
21
6
def connect(self): if self.is_connected is True: r
feat: add teradata integration
connect
47c5e0ac2d89807f8ff7239d423a3d346bd39a1e
mindsdb
teradata_handler.py
10
9
https://github.com/mindsdb/mindsdb.git
2
42
0
14
70
Python
{ "docstring": "\n Handles the connection to a Teradata database insance.\n ", "language": "en", "n_whitespaces": 23, "n_words": 8, "vocab_size": 8 }
def connect(self): if self.is_connected is True: return self.connection connection = teradatasql.connect( **self.connection_data ) self.is_connected = True self.connection = connection return self.connection
46,963
194,429
59
kivy/core/window/__init__.py
13
10
def unregister_event_manager(self, manager): self.event_managers.remove(manager) for type_id in manager.type_ids: self.event_managers_dict[type_id].remove(manager) m
Feature: EventManagerBase (#7658) * Added EventManagerBase class and event_managers attribute to WindowBase class. * Added on_motion event to Widget class. * Updated post_dispatch_input in EventLoopBase to skip non-touch events. * Using type ids in MouseMotionEventProvider. * Added on_motion method to Widget subclasses. * Updated Widget.on_motion method to dispatch to filtered widgets if 'pos' is not in me.profile. * Changed motion_filter property in Widget to store key to list values. * Updated Widget.on_motion to not dispatch event to children if widget is disabled. * Widget: Using flags to control dispatching in on_motion method. * Widget: Don't dispatch on_motion to children if only self is registered. * Widget: Removed collision on disabled check from on_motion method. * Widget: Added docstrings for motion_filter and related methods. * EventManager: Moved motion event flags to eventmanager/__init__.py module. * ScreenManager: Overrode the on_motion method. * WindowBase: Using attributes event_managers and event_managers_dict. * WindowBase: Added doc for register_event_manager and unregister_event_manager methods. * Widget: Improved default dispatch to stop after the last registered widgets. * EventManagerBase: Added initial docs class and module. * Widget: Added experimental warnings to motion_filter property and to on_motion and (un)register_for_motion_event methods. * WindowBase: Added docs for event_managers and event_managers_dict attributes. * MotionEvent: Added type_id and flags to push_attrs list. * EventManagerBase: Added versionadded tag on all flags. * EventManagerBase: Use dispatch modes instead of flags.
unregister_event_manager
1830123ba3edf7290b7c6cb1c6f406ccf1d0e5d4
kivy
__init__.py
11
6
https://github.com/kivy/kivy.git
2
44
0
13
72
Python
{ "docstring": "Unregister and stop an event manager previously registered with\n :meth:`register_event_manager`.\n\n .. versionadded:: 2.1.0\n\n .. warning::\n This is an experimental method and it remains so until this warning\n is present as it can be changed or removed in the next versions of\n Kivy.\n ", "language": "en", "n_whitespaces": 103, "n_words": 42, "vocab_size": 37 }
def unregister_event_manager(self, manager): self.event_managers.remove(manager) for type_id in manager.type_ids: self.event_managers_dict[type_id].remove(manager) manager.stop() manager.window = None
54,816
217,481
306
python3.10.4/Lib/functools.py
132
24
def _c3_mro(cls, abcs=None): for i, base in enumerate(reversed(cls.__bases__)): if hasattr(base, '__abstractmethods__'): boundary = len(cls.__bases__) - i break # Bases up to the last explicit ABC are considered first. else: boundary = 0 abcs = list(abcs) if abcs else [] explicit_bases = list(cls.__bases__[:boundary]) abstract_bases = [] other_bases = list(cls.__bases__[boundary:]) for base in abcs: if issubclass(cls, base) and not any( issubclass(b, base) for b in cls.__bases__ ): # If *cls* is the class that introduces behaviour described by # an ABC *base*, insert said ABC to its MRO. abstract_bases.append(base) for base in abstract_bases: abcs.remove(base) explicit_c3_mros = [_c3_mro(base, abcs=abcs) for base in explicit_bases] abstract_c3_mros = [_c3_mro(base, abcs=abcs) for base in abstract_bases] other_c3_mros = [_c3_mro(base, abcs=abcs) for base in other_bases] return _c3_merge
add python 3.10.4 for windows
_c3_mro
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
functools.py
15
26
https://github.com/XX-net/XX-Net.git
12
210
0
83
328
Python
{ "docstring": "Computes the method resolution order using extended C3 linearization.\n\n If no *abcs* are given, the algorithm works exactly like the built-in C3\n linearization used for method resolution.\n\n If given, *abcs* is a list of abstract base classes that should be inserted\n into the resulting MRO. Unrelated ABCs are ignored and don't end up in the\n result. The algorithm inserts ABCs where their functionality is introduced,\n i.e. issubclass(cls, abc) returns True for the class itself but returns\n False for all its direct base classes. Implicit ABCs for a given class\n (either registered or inferred from the presence of a special method like\n __len__) are inserted directly after the last ABC explicitly listed in the\n MRO of said class. If two implicit ABCs end up next to each other in the\n resulting MRO, their ordering depends on the order of types in *abcs*.\n\n ", "language": "en", "n_whitespaces": 177, "n_words": 141, "vocab_size": 96 }
def _c3_mro(cls, abcs=None): for i, base in enumerate(reversed(cls.__bases__)): if hasattr(base, '__abstractmethods__'): boundary = len(cls.__bases__) - i break # Bases up to the last explicit ABC are considered first. else: boundary = 0 abcs = list(abcs) if abcs else [] explicit_bases = list(cls.__bases__[:boundary]) abstract_bases = [] other_bases = list(cls.__bases__[boundary:]) for base in abcs: if issubclass(cls, base) and not any( issubclass(b, base) for b in cls.__bases__ ): # If *cls* is the class that introduces behaviour described by # an ABC *base*, insert said ABC to its MRO. abstract_bases.append(base) for base in abstract_bases: abcs.remove(base) explicit_c3_mros = [_c3_mro(base, abcs=abcs) for base in explicit_bases] abstract_c3_mros = [_c3_mro(base, abcs=abcs) for base in abstract_bases] other_c3_mros = [_c3_mro(base, abcs=abcs) for base in other_bases] return _c3_merge( [[cls]] + explicit_c3_mros + abstract_c3_mros + other_c3_mros + [explicit_bases] + [abstract_bases] + [other_bases] )
107,548
308,815
29
homeassistant/components/nissan_leaf/__init__.py
8
7
async def async_start_charging(self) -> None: awai
Add button to start leaf charge (#62948) Co-authored-by: Bruce Duncan <[email protected]>
async_start_charging
10027b20904b678d8baecbc6e72c5bcc3f4f24b2
core
__init__.py
10
4
https://github.com/home-assistant/core.git
1
26
0
8
47
Python
{ "docstring": "Request to start charging the car. Used by the button platform.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 10 }
async def async_start_charging(self) -> None: await self.hass.async_add_executor_job(self.leaf.start_charging) self.schedule_update()
73,835
251,831
25
test/mitmproxy/proxy/layers/http/test_http.py
13
13
def test_multiple_server_connections(tctx): server1 = Placeholder(Server) server2 = Placehold
make it black!
test_multiple_server_connections
b3587b52b25077f68116b9852b041d33e7fc6601
mitmproxy
test_http.py
11
35
https://github.com/mitmproxy/mitmproxy.git
1
219
0
10
61
Python
{ "docstring": "Test multiple requests being rewritten to different targets.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
def test_multiple_server_connections(tctx): server1 = Placeholder(Server) server2 = Placeholder(Server) playbook = Playbook(http.HttpLayer(tctx, HTTPMode.regular), hooks=False)
74,744
255,404
34
onnx/test/compose_test.py
9
5
def test_overlapping_output_names(self) -> None: self._test_overlapping_names( outputs0=['o0', 'o1'], outputs1=['o1', 'o2'
Use Python type annotations rather than comments (#3962) * These have been supported since Python 3.5. ONNX doesn't support Python < 3.6, so we can use the annotations. Diffs generated by https://pypi.org/project/com2ann/. Signed-off-by: Gary Miguel <[email protected]> * Remove MYPY conditional logic in gen_proto.py It breaks the type annotations and shouldn't be needed. Signed-off-by: Gary Miguel <[email protected]> * Get rid of MYPY bool from more scripts Signed-off-by: Gary Miguel <[email protected]> * move Descriptors class above where its referenced in type annotation Signed-off-by: Gary Miguel <[email protected]> * fixes Signed-off-by: Gary Miguel <[email protected]> * remove extra blank line Signed-off-by: Gary Miguel <[email protected]> * fix type annotations Signed-off-by: Gary Miguel <[email protected]> * fix type annotation in gen_docs Signed-off-by: Gary Miguel <[email protected]> * fix Operators.md Signed-off-by: Gary Miguel <[email protected]> * fix TestCoverage.md Signed-off-by: Gary Miguel <[email protected]> * fix protoc-gen-mypy.py Signed-off-by: Gary Miguel <[email protected]>
test_overlapping_output_names
83fa57c74edfd13ddac9548b8a12f9e3e2ed05bd
onnx
compose_test.py
10
6
https://github.com/onnx/onnx.git
1
28
0
9
52
Python
{ "docstring": "\n Tests error checking when the name of the output overlaps\n ", "language": "en", "n_whitespaces": 25, "n_words": 10, "vocab_size": 9 }
def test_overlapping_output_names(self) -> None: self._test_overlapping_names( outputs0=['o0', 'o1'], outputs1=['o1', 'o2'])
11,096
54,559
30
tests/test_settings.py
9
5
def test_write_profiles_does_not_include_default(self, temporary_profiles_path): write_profiles({}) assert "profiles.default" not in temporary_profiles_path.read_text()
Tests passing
test_write_profiles_does_not_include_default
1dd7561062328e96594bbf60a6d15f49163c9d87
prefect
test_settings.py
8
3
https://github.com/PrefectHQ/prefect.git
1
22
0
9
40
Python
{ "docstring": "\n Including the default has a tendency to bake in settings the user may not want, and\n can prevent them from gaining new defaults.\n ", "language": "en", "n_whitespaces": 45, "n_words": 23, "vocab_size": 22 }
def test_write_profiles_does_not_include_default(self, temporary_profiles_path): write_profiles({}) assert "profiles.default" not in temporary_profiles_path.read_text()
49,459
199,970
69
sympy/physics/optics/polarization.py
15
9
def phase_retarder(theta=0, delta=0): R = Matrix([[cos(theta)**2 + exp(I*delta)*sin(theta)**2, (1-exp(I*delta))*cos(theta)*sin(theta)], [(1-exp(I*delta))*cos(theta)*sin(theta), sin(theta)**2 + exp(I*delta)*cos(theta)**2]]) return R*exp(-I*delta/2)
removed backticks around variable names in docs according to PR review
phase_retarder
ae2baaa0bbcd42792bb2e7887ca61b97abc40463
sympy
polarization.py
17
6
https://github.com/sympy/sympy.git
1
118
0
14
185
Python
{ "docstring": "A phase retarder Jones matrix with retardance `delta` at angle `theta`.\n\n Parameters\n ==========\n\n theta : numeric type or SymPy Symbol\n The angle of the fast axis relative to the horizontal plane.\n delta : numeric type or SymPy Symbol\n The phase difference between the fast and slow axes of the\n transmitted light.\n\n Returns\n =======\n\n SymPy Matrix :\n A Jones matrix representing the retarder.\n\n Examples\n ========\n\n A generic retarder.\n\n >>> from sympy import pprint, symbols\n >>> from sympy.physics.optics.polarization import phase_retarder\n >>> theta, delta = symbols(\"theta, delta\", real=True)\n >>> R = phase_retarder(theta, delta)\n >>> pprint(R, use_unicode=True)\n โŽก -โ…ˆโ‹…ฮด -โ…ˆโ‹…ฮด โŽค\n โŽข โ”€โ”€โ”€โ”€โ”€ โ”€โ”€โ”€โ”€โ”€ โŽฅ\n โŽขโŽ› โ…ˆโ‹…ฮด 2 2 โŽž 2 โŽ› โ…ˆโ‹…ฮดโŽž 2 โŽฅ\n โŽขโŽโ„ฏ โ‹…sin (ฮธ) + cos (ฮธ)โŽ โ‹…โ„ฏ โŽ1 - โ„ฏ โŽ โ‹…โ„ฏ โ‹…sin(ฮธ)โ‹…cos(ฮธ)โŽฅ\n โŽข โŽฅ\n โŽข -โ…ˆโ‹…ฮด -โ…ˆโ‹…ฮด โŽฅ\n โŽข โ”€โ”€โ”€โ”€โ”€ โ”€โ”€โ”€โ”€โ”€โŽฅ\n โŽขโŽ› โ…ˆโ‹…ฮดโŽž 2 โŽ› โ…ˆโ‹…ฮด 2 2 โŽž 2 โŽฅ\n โŽฃโŽ1 - โ„ฏ โŽ โ‹…โ„ฏ โ‹…sin(ฮธ)โ‹…cos(ฮธ) โŽโ„ฏ โ‹…cos (ฮธ) + sin (ฮธ)โŽ โ‹…โ„ฏ โŽฆ\n\n ", "language": "en", "n_whitespaces": 637, "n_words": 153, "vocab_size": 93 }
def phase_retarder(theta=0, delta=0): R = Matrix([[cos(theta)**2 + exp(I*delta)*sin(theta)**2, (1-exp(I*delta))*cos(theta)*sin(theta)], [(1-exp(I*delta))*cos(theta)*sin(theta), sin(theta)**2 + exp(I*delta)*cos(theta)**2]]) return R*exp(-I*delta/2)
42,804
178,724
214
nuitka/utils/Execution.py
90
20
def wrapCommandForDebuggerForExec(*args): gdb_path = getExecutablePath("gdb") # Windows extra ball, attempt the downloaded one. if isWin32Windows() and gdb_path is None: from nuitka.Options import assumeYesForDownloads mingw64_gcc_path = getCachedDownloadedMinGW64( target_arch=getArchitecture(), assume_yes_for_downloads=assumeYesForDownloads(), ) with withEnvironmentPathAdded("PATH", os.path.dirname(mingw64_gcc_path)): lldb_path = getExecutablePath("lldb") if gdb_path is None and lldb_path is None: lldb_path = getExecutablePath("lldb") if lldb_path is None: general.sysexit("Error, no 'gdb' or 'lldb' binary found in path.") if gdb_path is not None: args = (gdb_path, "gdb", "-ex=run"
Windows: Make running in debugger work with cmd files as well
wrapCommandForDebuggerForExec
98badaaafd4e56529378947358acae489035fa1e
Nuitka
Execution.py
14
19
https://github.com/Nuitka/Nuitka.git
7
142
0
60
254
Python
{ "docstring": "Wrap a command for system debugger to call exec\n\n Args:\n args: (list of str) args for call to be debugged\n Returns:\n args tuple with debugger command inserted\n\n Notes:\n Currently only gdb and lldb are supported, but adding more\n debuggers would be very welcome.\n ", "language": "en", "n_whitespaces": 83, "n_words": 43, "vocab_size": 36 }
def wrapCommandForDebuggerForExec(*args): gdb_path = getExecutablePath("gdb") # Windows extra ball, attempt the downloaded one. if isWin32Windows() and gdb_path is None: from nuitka.Options import assumeYesForDownloads mingw64_gcc_path = getCachedDownloadedMinGW64( target_arch=getArchitecture(), assume_yes_for_downloads=assumeYesForDownloads(), ) with withEnvironmentPathAdded("PATH", os.path.dirname(mingw64_gcc_path)): lldb_path = getExecutablePath("lldb") if gdb_path is None and lldb_path is None: lldb_path = getExecutablePath("lldb") if lldb_path is None: general.sysexit("Error, no 'gdb' or 'lldb' binary found in path.") if gdb_path is not None: args = (gdb_path, "gdb", "-ex=run", "-ex=where", "-ex=quit", "--args") + args else: args = (lldb_path, "lldb", "-o", "run", "-o", "bt", "-o", "quit", "--") + args return args
51,944
207,377
54
tests/admin_scripts/tests.py
19
10
def test_commands_with_invalid_settings(self): args = ["startproject"] out, err = self.run_django_admin(args, settings_file="bad_settings") self.assertNoOutput(out)
Refs #33476 -- Reformatted code with Black.
test_commands_with_invalid_settings
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
tests.py
10
5
https://github.com/django/django.git
1
43
0
18
75
Python
{ "docstring": "\n Commands that don't require settings succeed if the settings file\n doesn't exist.\n ", "language": "en", "n_whitespaces": 34, "n_words": 12, "vocab_size": 11 }
def test_commands_with_invalid_settings(self): args = ["startproject"] out, err = self.run_django_admin(args, settings_file="bad_settings") self.assertNoOutput(out) self.assertOutput(err, "You must provide a project name", regex=True)
56,344
221,321
216
python3.10.4/Lib/chunk.py
67
12
def read(self, size=-1): if self.closed: raise ValueError("I/O operation on closed file") if self.size_read >= self.chunksize:
add python 3.10.4 for windows
read
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
chunk.py
11
17
https://github.com/XX-net/XX-Net.git
8
136
0
38
215
Python
{ "docstring": "Read at most size bytes from the chunk.\n If size is omitted or negative, read until the end\n of the chunk.\n ", "language": "en", "n_whitespaces": 42, "n_words": 21, "vocab_size": 17 }
def read(self, size=-1): if self.closed: raise ValueError("I/O operation on closed file") if self.size_read >= self.chunksize: return b'' if size < 0: size = self.chunksize - self.size_read if size > self.chunksize - self.size_read: size = self.chunksize - self.size_read data = self.file.read(size) self.size_read = self.size_read + len(data) if self.size_read == self.chunksize and \ self.align and \ (self.chunksize & 1): dummy = self.file.read(1) self.size_read = self.size_read + len(dummy) return data
36,529
156,064
193
dask/array/slicing.py
99
29
def slicing_plan(chunks, index): from dask.array.utils import asarray_safe if not is_arraylike(index): index = np.asanyarray(index) cum_chunks = cached_cumsum(chunks) cum_chunks = asarray_safe(cum_chunks, like=index) # this dispactches to the array library chunk_locations = np.searchsorted(cum_chunks, index, side="right") # bu
absolufy-imports - No relative - PEP8 (#8796) Conversation in https://github.com/dask/distributed/issues/5889
slicing_plan
cccb9d8d8e33a891396b1275c2448c352ef40c27
dask
slicing.py
13
20
https://github.com/dask/dask.git
4
196
0
72
305
Python
{ "docstring": "Construct a plan to slice chunks with the given index\n\n Parameters\n ----------\n chunks : Tuple[int]\n One dimensions worth of chunking information\n index : np.ndarray[int]\n The index passed to slice on that dimension\n\n Returns\n -------\n out : List[Tuple[int, np.ndarray]]\n A list of chunk/sub-index pairs corresponding to each output chunk\n ", "language": "en", "n_whitespaces": 93, "n_words": 48, "vocab_size": 39 }
def slicing_plan(chunks, index): from dask.array.utils import asarray_safe if not is_arraylike(index): index = np.asanyarray(index) cum_chunks = cached_cumsum(chunks) cum_chunks = asarray_safe(cum_chunks, like=index) # this dispactches to the array library chunk_locations = np.searchsorted(cum_chunks, index, side="right") # but we need chunk_locations as python ints for getitem calls downstream chunk_locations = chunk_locations.tolist() where = np.where(np.diff(chunk_locations))[0] + 1 extra = asarray_safe([0], like=where) c_loc = asarray_safe([len(chunk_locations)], like=where) where = np.concatenate([extra, where, c_loc]) out = [] for i in range(len(where) - 1): sub_index = index[where[i] : where[i + 1]] chunk = chunk_locations[where[i]] if chunk > 0: sub_index = sub_index - cum_chunks[chunk - 1] out.append((chunk, sub_index)) return out
70,662
245,112
151
mmdet/models/roi_heads/bbox_heads/double_bbox_head.py
21
14
def _add_conv_branch(self) -> None: branch_convs = ModuleList() for i in range(self.num_convs): branch_convs.append( Bottleneck( inplanes=self.conv_out_channels, planes=self.conv_out_channels // 4, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg))
Refactor Double Head, MS, Dynamic, Trident.
_add_conv_branch
cd4e9ed8269b0c767e129169b7268b0ced7e60c9
mmdetection
double_bbox_head.py
14
11
https://github.com/open-mmlab/mmdetection.git
2
56
0
20
91
Python
{ "docstring": "Add the fc branch which consists of a sequential of conv layers.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 11 }
def _add_conv_branch(self) -> None: branch_convs = ModuleList() for i in range(self.num_convs): branch_convs.append( Bottleneck( inplanes=self.conv_out_channels, planes=self.conv_out_channels // 4, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) return branch_convs
34,223
148,287
22
python/ray/_private/thirdparty/pathspec/util.py
33
15
def match_files(patterns, files): all_files = files if isinstance(files, Collection) else list(files) return_files = set() for pattern in patterns: if pattern.include is not None: result_files = pattern.match(all_files) if pattern.include: return
[Bugfix] fix invalid excluding of Black (#24042) - We should use `--force-exclude` when we pass code path explicitly https://black.readthedocs.io/en/stable/usage_and_configuration/the_basics.html?highlight=--force-exclude#command-line-options - Recover the files in `python/ray/_private/thirdparty` which has been formatted in the PR https://github.com/ray-project/ray/pull/21975 by mistake.
match_files
0e6c042e29cbbe429d81c9c1af3c75c261f00980
ray
util.py
14
11
https://github.com/ray-project/ray.git
5
70
0
28
113
Python
{ "docstring": "\n\tMatches the files to the patterns.\n\n\t*patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`)\n\tcontains the patterns to use.\n\n\t*files* (:class:`~collections.abc.Iterable` of :class:`str`) contains\n\tthe normalized file paths to be matched against *patterns*.\n\n\tReturns the matched files (:class:`set` of :class:`str`).\n\t", "language": "en", "n_whitespaces": 30, "n_words": 36, "vocab_size": 24 }
def match_files(patterns, files): all_files = files if isinstance(files, Collection) else list(files) return_files = set() for pattern in patterns: if pattern.include is not None: result_files = pattern.match(all_files) if pattern.include: return_files.update(result_files) else: return_files.difference_update(result_files) return return_files
39,770
166,205
490
pandas/core/exchange/column.py
130
33
def _get_offsets_buffer(self) -> Tuple[PandasBuffer, Any]: if self.dtype[0] == DtypeKind.STRING: # For each string, we need to manually determine the next offset values = self._col.to_numpy() ptr = 0 offsets = np.zeros(shape=(len(values) + 1,), dtype=np.int64) for i, v in enumerate(values): # For missing values (in this case, `np.nan` values) # we don't increment the pointer if isinstance(v, str): b = v.encode(encoding="utf-8") ptr += len(b) offsets[i + 1] = ptr # Convert the offsets to a Pandas "
ENH: Implement DataFrame interchange protocol (#46141)
_get_offsets_buffer
90140f055892a46f473bd26affab88a7f171e394
pandas
column.py
16
29
https://github.com/pandas-dev/pandas.git
4
139
0
97
228
Python
{ "docstring": "\n Return the buffer containing the offset values for variable-size binary\n data (e.g., variable-length strings) and the buffer's associated dtype.\n Raises NoBufferPresent if the data buffer does not have an associated\n offsets buffer.\n ", "language": "en", "n_whitespaces": 68, "n_words": 32, "vocab_size": 26 }
def _get_offsets_buffer(self) -> Tuple[PandasBuffer, Any]: if self.dtype[0] == DtypeKind.STRING: # For each string, we need to manually determine the next offset values = self._col.to_numpy() ptr = 0 offsets = np.zeros(shape=(len(values) + 1,), dtype=np.int64) for i, v in enumerate(values): # For missing values (in this case, `np.nan` values) # we don't increment the pointer if isinstance(v, str): b = v.encode(encoding="utf-8") ptr += len(b) offsets[i + 1] = ptr # Convert the offsets to a Pandas "buffer" using # the NumPy array as the backing store buffer = PandasBuffer(offsets) # Assemble the buffer dtype info dtype = ( DtypeKind.INT, 64, ArrowCTypes.INT64, Endianness.NATIVE, ) # note: currently only support native endianness else: raise NoBufferPresent( "This column has a fixed-length dtype so " "it does not have an offsets buffer" ) return buffer, dtype
102,039
303,211
46
homeassistant/helpers/update_coordinator.py
10
3
def _unschedule_refresh(self) -> None: if self._unsub_refresh: self._unsub_refresh() self._unsub_refresh = None
Keep track of a context for each listener (#72702) * Remove async_remove_listener This avoids the ambuigity as to what happens if same callback is added multiple times. * Keep track of a context for each listener This allow a update coordinator to adapt what data to request on update from the backing service based on which entities are enabled. * Clone list before calling callbacks The callbacks can end up unregistering and modifying the dict while iterating. * Only yield actual values * Add a test for update context * Factor out iteration of _listeners to helper * Verify context is passed to coordinator * Switch to Any as type instead of object * Remove function which use was dropped earliers The use was removed in 8bee25c938a123f0da7569b4e2753598d478b900
_unschedule_refresh
8910d265d6cf15fed4e6e98b4344031019c1016d
core
update_coordinator.py
9
5
https://github.com/home-assistant/core.git
2
23
0
10
41
Python
{ "docstring": "Unschedule any pending refresh since there is no longer any listeners.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 10 }
def _unschedule_refresh(self) -> None: if self._unsub_refresh: self._unsub_refresh() self._unsub_refresh = None
49,987
201,749
448
tests/backends/postgresql/tests.py
119
18
def test_connect_and_rollback(self): new_connection = connection.copy() try: # Ensure the database default time zone is different than # the time zone in new_connection.settings_dict. We can # get the default time zone by reset & show. with new_connection.cursor() as cursor: cursor.execute("RESET TIMEZONE") cursor.execute("SHOW TIMEZONE") db_default_tz = cursor.fetchone()[0] new_tz = "Europe/Paris" if db_default_tz == "UTC" else "UTC" new_connection.close() # Invalidate timezone name cache, because the setting_changed # handler cannot know about
Refs #33476 -- Reformatted code with Black.
test_connect_and_rollback
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
tests.py
15
19
https://github.com/django/django.git
3
125
0
79
237
Python
{ "docstring": "\n PostgreSQL shouldn't roll back SET TIME ZONE, even if the first\n transaction is rolled back (#17062).\n ", "language": "en", "n_whitespaces": 38, "n_words": 16, "vocab_size": 15 }
def test_connect_and_rollback(self): new_connection = connection.copy() try: # Ensure the database default time zone is different than # the time zone in new_connection.settings_dict. We can # get the default time zone by reset & show. with new_connection.cursor() as cursor: cursor.execute("RESET TIMEZONE") cursor.execute("SHOW TIMEZONE") db_default_tz = cursor.fetchone()[0] new_tz = "Europe/Paris" if db_default_tz == "UTC" else "UTC" new_connection.close() # Invalidate timezone name cache, because the setting_changed # handler cannot know about new_connection. del new_connection.timezone_name # Fetch a new connection with the new_tz as default # time zone, run a query and rollback. with self.settings(TIME_ZONE=new_tz): new_connection.set_autocommit(False) new_connection.rollback() # Now let's see if the rollback rolled back the SET TIME ZONE. with new_connection.cursor() as cursor: cursor.execute("SHOW TIMEZONE") tz = cursor.fetchone()[0] self.assertEqual(new_tz, tz) finally: new_connection.close()
30,664
135,585
366
python/ray/data/tests/test_dataset_tfrecords.py
79
11
def test_readback_tfrecords(ray_start_regular_shared, tmp_path): # The dataset we will write to a .tfrecords file. ds = ray.data.from_items( [ # Row one. { "int_item": 1, "int_list": [2, 2, 3], "float_item": 1.0, "float_list": [2.0, 3.0, 4.0], "bytes_item": b"abc", "bytes_list": [b"abc", b"1234"], }, # Row two. { "int_item": 2, "int_list": [3, 3, 4], "float_item": 2.0, "float_list": [2.0, 2.0, 3.0], "bytes_item": b"def", "bytes_list": [b"def", b"1234"], }, ] ) # Write the TFRecords. ds.write_tfrecords(tmp_path) # Read the TFRecords. readback_ds = ray.data.read_tfrecords(tmp_path) assert ds.take() == readback_ds.take()
[Datasets] Add writer for TFRecords. (#29448) This PR enables users to write TFRecords from datasets. In particular, the master branch already includes an API for reading TFRecords from datasets. Users have requested the ability to write these datasets back to TFRecords.
test_readback_tfrecords
9fab504fe776f96fecf85e12ea006264cbe92f4a
ray
test_dataset_tfrecords.py
13
24
https://github.com/ray-project/ray.git
1
155
0
59
226
Python
{ "docstring": "\n Test reading back TFRecords written using datasets.\n The dataset we read back should be the same that we wrote.\n ", "language": "en", "n_whitespaces": 29, "n_words": 19, "vocab_size": 17 }
def test_readback_tfrecords(ray_start_regular_shared, tmp_path): # The dataset we will write to a .tfrecords file. ds = ray.data.from_items( [ # Row one. { "int_item": 1, "int_list": [2, 2, 3], "float_item": 1.0, "float_list": [2.0, 3.0, 4.0], "bytes_item": b"abc", "bytes_list": [b"abc", b"1234"], }, # Row two. { "int_item": 2, "int_list": [3, 3, 4], "float_item": 2.0, "float_list": [2.0, 2.0, 3.0], "bytes_item": b"def", "bytes_list": [b"def", b"1234"], }, ] ) # Write the TFRecords. ds.write_tfrecords(tmp_path) # Read the TFRecords. readback_ds = ray.data.read_tfrecords(tmp_path) assert ds.take() == readback_ds.take()
83,014
279,493
484
keras/layers/rnn/legacy_cells.py
79
27
def call(self, inputs, state): cur_state_pos = 0 cur_inp = inputs new_states = [] for i, cell in enumerate(self._cells): with tf.compat.v1.variable_scope("cell_%d" % i): if self._state_is_tuple: if not tf.nest.is_nested(state): raise ValueError( "Expected state to be a tuple of length " f"{len(self.state_size)}" f", but received: {state}" ) cur_state = state[i] else: cur_state = tf.slice( state, [0, cur_state_pos], [-1, cell.state_size] )
Add f-string format and lint with flynt on the whole codebase
call
be73ac1a1e25d9abd4d793cba9707098d7adf231
keras
legacy_cells.py
21
27
https://github.com/keras-team/keras.git
5
148
0
64
252
Python
{ "docstring": "Run this multi-layer cell on inputs, starting from state.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
def call(self, inputs, state): cur_state_pos = 0 cur_inp = inputs new_states = [] for i, cell in enumerate(self._cells): with tf.compat.v1.variable_scope("cell_%d" % i): if self._state_is_tuple: if not tf.nest.is_nested(state): raise ValueError( "Expected state to be a tuple of length " f"{len(self.state_size)}" f", but received: {state}" ) cur_state = state[i] else: cur_state = tf.slice( state, [0, cur_state_pos], [-1, cell.state_size] ) cur_state_pos += cell.state_size cur_inp, new_state = cell(cur_inp, cur_state) new_states.append(new_state) new_states = ( tuple(new_states) if self._state_is_tuple else tf.concat(new_states, 1) ) return cur_inp, new_states
54,497
216,279
76
tests/pytests/functional/transport/server/test_req_channel.py
29
8
def test_basic(push_channel): msgs = [ {"foo": "bar"}, {"bar": "baz"}, {"baz": "qux", "list": [1, 2, 3]},
Fix minion unit tests, specifically .../tests/pytests/test_minion.py
test_basic
3c7e1ec1f08abd7cd1ba78ad7880acb6ba6fdce7
salt
test_req_channel.py
11
9
https://github.com/saltstack/salt.git
2
66
0
27
112
Python
{ "docstring": "\n Test a variety of messages, make sure we get the expected responses\n ", "language": "en", "n_whitespaces": 19, "n_words": 12, "vocab_size": 12 }
def test_basic(push_channel): msgs = [ {"foo": "bar"}, {"bar": "baz"}, {"baz": "qux", "list": [1, 2, 3]}, ] for msg in msgs: ret = push_channel.send(msg, timeout=5, tries=1) assert ret["load"] == msg
72,034
247,975
197
tests/module_api/test_account_data_manager.py
36
10
def test_put_global(self) -> None: self.get_success( self._module_api.account_data_manager.put_global( self.user_id, "test.data", {"wombat": True} ) ) # Request that account data from the normal store; check it's as we expect. self.assertEqual( self.get_success( self._store.get_globa
Add Module API for reading and writing global account data. (#12391)
test_put_global
85ca963c1add5ca12f59238a50dfc63df4846bb7
synapse
test_account_data_manager.py
12
17
https://github.com/matrix-org/synapse.git
1
62
0
30
105
Python
{ "docstring": "\n Tests that written account data using `put_global` can be read out again later.\n ", "language": "en", "n_whitespaces": 28, "n_words": 13, "vocab_size": 13 }
def test_put_global(self) -> None: self.get_success( self._module_api.account_data_manager.put_global( self.user_id, "test.data", {"wombat": True} ) ) # Request that account data from the normal store; check it's as we expect. self.assertEqual( self.get_success( self._store.get_global_account_data_by_type_for_user( self.user_id, "test.data" ) ), {"wombat": True}, )
75,990
259,914
24
build_tools/azure/update_environments_and_lock_files.py
9
7
def get_conda_environment_content(build_metadata): template = environment.from_string(
CI: move Linux and MacOS Azure builds to conda lock files (#22448) Co-authored-by: Olivier Grisel <[email protected]> Co-authored-by: Thomas J. Fan <[email protected]>
get_conda_environment_content
f862129f36786acbae3d9f2d161bbb72d77b87ec
scikit-learn
update_environments_and_lock_files.py
11
21
https://github.com/scikit-learn/scikit-learn.git
1
26
0
9
45
Python
{ "docstring": "\n# DO NOT EDIT: this file is generated from the specification found in the\n# following script to centralize the configuration for all Azure CI builds:\n# build_tools/azure/update_environments_and_lock_files.py\nchannels:\n - {{ build_metadata['channel'] }}\ndependencies:\n {% for conda_dep in build_metadata['conda_dependencies'] %}\n - {{ conda_dep | get_package_with_constraint(build_metadata) }}\n {% endfor %}\n {% if build_metadata['pip_dependencies'] %}\n - pip\n - pip:\n {% for pip_dep in build_metadata.get('pip_dependencies', []) %}\n - {{ pip_dep | get_package_with_constraint(build_metadata, uses_pip=True) }}\n {% endfor %}\n {% endif %}", "language": "en", "n_whitespaces": 85, "n_words": 77, "vocab_size": 47 }
def get_conda_environment_content(build_metadata): template = environment.from_string( .strip() ) return template.render(build_metadata=build_metadata)
5,147
27,990
360
saleor/thumbnail/utils.py
94
24
def preprocess(self, image, image_format): format = self.format or image_format save_kwargs = {"format": format} # Ensuring image is properly rotated if hasattr(image, "_getexif"): exif_datadict = image._getexif() # returns None if no EXIF data if exif_datadict is not None: exif = dict(exif_datadict.items()) orientation = exif.get(self.EXIF_ORIENTATION_KEY, None) if orientation == 3: image = image.transpose(Image.ROTATE_180)
Better media thumbnails including WebP support (#9988) * Add thumbnail app * Update get_thumbnail_size method and add tests * Add logic for creating thumbnails * Update logic for getting thumbnail * Allow defining format for tumbnail generation * Clear handle_thumbnail views * Add prepare_image_proxy_url method * Use ImageField for user avatar * Allow defining thumbnail format when querying user avatar * Use ImageField for category backgound_image * Use ImageField for Collection backgound_image * Use ImageField for ProductMedia image * Ensure that thumbnails are deleted when category background_image is changed or deleted * Ensure that thumbnails are deleted when collection background_image is changed or deleted * Update product media deleteion task and failing tests * Delete thumbnail from storage when thumbnail objects is deleted * Fix import in product test_bulk_delete * Drop create_thumbnails command * Update Product.thumbnail resolver * Update OrderLine thumbnail resolver * Add missing ADDED_IN_35 and PREVIEW_FEATURE labels * Update account and product signals - ensure the image is deleted from storage * Refactor product_images methods * Add signal for product media image delete * Drop create_thumbnails method and not longer valid settings fields * Clean the ProcessedImage class * Drop versatileimagefield from INSTALLED_APPS * Update changelog * Drop comments from ThumbnailFormat * Add get_image_or_proxy_url method * Apply reiew suggestions - add ThumbnailField and use get_image_or_proxy_ur when it's possible * Update changelog * Replace ADDED_IN_35 with ADDED_IN_36 label * Update changelog Co-authored-by: Marcin Gฤ™bala <[email protected]>
preprocess
5d1a36b9aaf408016957db04f86397b2e53c2500
saleor
utils.py
16
21
https://github.com/saleor/saleor.git
8
162
0
62
271
Python
{ "docstring": "Preprocess an image.\n\n An API hook for image pre-processing. Calls any image format specific\n pre-processors (if defined). I.E. If `image_format` is 'JPEG', this\n method will look for a method named `preprocess_JPEG`, if found\n `image` will be passed to it.\n\n Arguments:\n image: a PIL Image instance\n image_format: str, a valid PIL format (i.e. 'JPEG' or 'WEBP')\n\n Subclasses should return a 2-tuple:\n * [0]: A PIL Image instance.\n * [1]: A dictionary of additional keyword arguments to be used\n when the instance is saved. If no additional keyword\n arguments, return an empty dict ({}).\n\n ", "language": "en", "n_whitespaces": 223, "n_words": 92, "vocab_size": 70 }
def preprocess(self, image, image_format): format = self.format or image_format save_kwargs = {"format": format} # Ensuring image is properly rotated if hasattr(image, "_getexif"): exif_datadict = image._getexif() # returns None if no EXIF data if exif_datadict is not None: exif = dict(exif_datadict.items()) orientation = exif.get(self.EXIF_ORIENTATION_KEY, None) if orientation == 3: image = image.transpose(Image.ROTATE_180) elif orientation == 6: image = image.transpose(Image.ROTATE_270) elif orientation == 8: image = image.transpose(Image.ROTATE_90) # Ensure any embedded ICC profile is preserved save_kwargs["icc_profile"] = image.info.get("icc_profile") if hasattr(self, "preprocess_%s" % format): image, addl_save_kwargs = getattr(self, "preprocess_%s" % format)( image=image ) save_kwargs.update(addl_save_kwargs) return image, save_kwargs
77,424
262,880
102
PyInstaller/utils/hooks/__init__.py
49
11
def get_package_paths(package): pkg_paths = get_all_package_paths(package) if not pkg_paths: raise ValueError(f"Package '{package}' does not exist or is not a package!") if len(pkg_paths) > 1: logger.warning( "get_package_paths - packa
hookutils: support multiple package paths in collect_* helpers Split the functionality of ``get_package_paths`` into two new helpers, ``get_all_package_paths`` and ``package_base_path``. The former obtains all package paths, while the latter simplifies removal of package-specific sub-path from the full package-path. Implement the old, backwards-compatible ``get_package_paths`` using these helpers; the function now supports namespace packages, but always returns a single package path and its base path. Have ``collect_submodules``, ``collect_dynamic_libs``, and ``collect_data_files`` helpers use the new ``get_all_package_paths`` and extend them to process all returned package paths. This enables proper support for PEP420 namespace packages with multiple package paths.
get_package_paths
e232aaf089d150b085502b97ce0fcf699b45e1b2
pyinstaller
__init__.py
11
11
https://github.com/pyinstaller/pyinstaller.git
3
58
0
42
100
Python
{ "docstring": "\n Given a package, return the path to packages stored on this machine and also returns the path to this particular\n package. For example, if pkg.subpkg lives in /abs/path/to/python/libs, then this function returns\n ``(/abs/path/to/python/libs, /abs/path/to/python/libs/pkg/subpkg)``.\n\n NOTE: due to backwards compatibility, this function returns only one package path along with its base directory.\n In case of PEP 420 namespace package with multiple location, only first location is returned. To obtain all\n package paths, use the ``get_all_package_paths`` function and obtain corresponding base directories using the\n ``package_base_path`` helper.\n ", "language": "en", "n_whitespaces": 109, "n_words": 84, "vocab_size": 63 }
def get_package_paths(package): pkg_paths = get_all_package_paths(package) if not pkg_paths: raise ValueError(f"Package '{package}' does not exist or is not a package!") if len(pkg_paths) > 1: logger.warning( "get_package_paths - package %s has multiple paths (%r); returning only first one!", package, pkg_paths ) pkg_dir = pkg_paths[0] pkg_base = package_base_path(pkg_dir, package) return pkg_base, pkg_dir
14,022
65,820
18
erpnext/e_commerce/shopping_cart/cart.py
27
8
def get_address_territory(address_name): territory = None if address_name: address_fields = frappe.db.get_value("Address", address_name, ["city", "state", "country"]) for value in address_fields: territory = frappe.db.get_value("Territory", value) if territory: break return territory
style: format code with black
get_address_territory
494bd9ef78313436f0424b918f200dab8fc7c20b
erpnext
cart.py
13
9
https://github.com/frappe/erpnext.git
4
55
0
22
95
Python
{ "docstring": "Tries to match city, state and country of address to existing territory", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 11 }
def get_address_territory(address_name): territory = None if address_name: address_fields = frappe.db.get_value("Address", address_name, ["city", "state", "country"]) for value in address_fields: territory = frappe.db.get_value("Territory", value) if territory: break return territory
77,991
265,105
102
netbox/dcim/svg.py
38
13
def _get_device_coords(self, position, height): x = self.legend_width + RACK_ELEVATION_BORDER_WIDTH y = RACK_ELEVATION_BORDER_WIDTH if self.rack.desc_units: y += int((position - 1) * self.unit_height)
Clean up rack elevation rendering
_get_device_coords
0c915f7de9612c7485da3713cc6d63f368698a5d
netbox
svg.py
18
8
https://github.com/netbox-community/netbox.git
2
76
0
24
121
Python
{ "docstring": "\n Return the X, Y coordinates of the top left corner for a device in the specified rack unit.\n ", "language": "en", "n_whitespaces": 33, "n_words": 18, "vocab_size": 16 }
def _get_device_coords(self, position, height): x = self.legend_width + RACK_ELEVATION_BORDER_WIDTH y = RACK_ELEVATION_BORDER_WIDTH if self.rack.desc_units: y += int((position - 1) * self.unit_height) else: y += int((self.rack.u_height - position + 1) * self.unit_height) - int(height * self.unit_height) return x, y
47,439
195,852
151
sympy/core/numbers.py
71
16
def igcd(*args): if len(args) < 2: raise TypeError( 'igcd() takes at least 2 arguments (%s given)' % len(args)) args_temp = [abs(as_int(i)) for i in args] if 1 in args_temp: return 1
Improved documentation formatting
igcd
cda8dfe6f45dc5ed394c2f5cda706cd6c729f713
sympy
numbers.py
13
15
https://github.com/sympy/sympy.git
8
98
0
46
166
Python
{ "docstring": "Computes nonnegative integer greatest common divisor.\n\n Explanation\n ===========\n\n The algorithm is based on the well known Euclid's algorithm [1]_. To\n improve speed, ``igcd()`` has its own caching mechanism.\n\n Examples\n ========\n\n >>> from sympy import igcd\n >>> igcd(2, 4)\n 2\n >>> igcd(5, 10, 15)\n 5\n\n References\n ==========\n\n .. [1] https://en.wikipedia.org/wiki/Euclidean_algorithm\n\n ", "language": "en", "n_whitespaces": 94, "n_words": 49, "vocab_size": 46 }
def igcd(*args): if len(args) < 2: raise TypeError( 'igcd() takes at least 2 arguments (%s given)' % len(args)) args_temp = [abs(as_int(i)) for i in args] if 1 in args_temp: return 1 a = args_temp.pop() if HAS_GMPY: # Using gmpy if present to speed up. for b in args_temp: a = gmpy.gcd(a, b) if b else a return as_int(a) for b in args_temp: a = math.gcd(a, b) return a igcd2 = math.gcd
117,417
320,887
192
qutebrowser/browser/webengine/webenginetab.py
52
14
def _prev_next_cb(self, found, *, going_up, callback): if found: result = browsertab.SearchNavigationResult.found # Check if the match count change is opposite to the search direction if self._old_match.current > 0: if not going_up and self._old_match.current > self.match.current: result = browsertab.SearchNavigationResult.wrapped_bottom elif going_up and self._old_match.current < self.m
search: Split navigation/search callbacks This way, we can move more logic (checking wrapping, etc.) into the API, thus making the commands much more simple and stateless.
_prev_next_cb
e15bda307e42c288b926f578e7bf8c610e4767af
qutebrowser
webenginetab.py
15
11
https://github.com/qutebrowser/qutebrowser.git
7
91
0
36
145
Python
{ "docstring": "Call the prev/next callback based on the search result.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 8 }
def _prev_next_cb(self, found, *, going_up, callback): if found: result = browsertab.SearchNavigationResult.found # Check if the match count change is opposite to the search direction if self._old_match.current > 0: if not going_up and self._old_match.current > self.match.current: result = browsertab.SearchNavigationResult.wrapped_bottom elif going_up and self._old_match.current < self.match.current: result = browsertab.SearchNavigationResult.wrapped_top else: result = browsertab.SearchNavigationResult.not_found callback(result)
42,730
178,524
219
nuitka/plugins/standard/TensorflowPlugin.py
59
15
def onModuleSourceCode(self, module_name, source_code): if module_name != "tensorflow": return source_code source_lines = source_code.splitlines() found_insert = False for i, l in enumerate(source_lines): if l.startswith("def ") and "_running_from_pip_package():" in l: source_lines.insert(i, "_site_packages_dirs = []") source_lines.insert(i, "from tensorflow.python import keras") found_insert = True break if found_insert is True: self.info("Patched 'running-from-pip' path magic.") else: self.sysexit("Did not find 'running-from-pip' path magic code.")
Plugins: Slight more helpful error message in case tensorflow works
onModuleSourceCode
ab7014c6457b2b65010aea41512ca75d93847c9a
Nuitka
TensorflowPlugin.py
12
16
https://github.com/Nuitka/Nuitka.git
6
95
0
47
170
Python
{ "docstring": "Neutralize some path magic in tensorflow.\n\n Notes:\n Make sure tensorflow understands, we are not running as a PIP\n installed application.\n ", "language": "en", "n_whitespaces": 56, "n_words": 20, "vocab_size": 20 }
def onModuleSourceCode(self, module_name, source_code): if module_name != "tensorflow": return source_code source_lines = source_code.splitlines() found_insert = False for i, l in enumerate(source_lines): if l.startswith("def ") and "_running_from_pip_package():" in l: source_lines.insert(i, "_site_packages_dirs = []") source_lines.insert(i, "from tensorflow.python import keras") found_insert = True break if found_insert is True: self.info("Patched 'running-from-pip' path magic.") else: self.sysexit("Did not find 'running-from-pip' path magic code.") return "\n".join(source_lines)
40,604
170,734
91
pandas/core/common.py
32
6
def cast_scalar_indexer(val): # assumes lib.is_scalar(val) if lib.is_float(val) and val.is_integer(): raise IndexError( # GH#34193 "Indexing with a float is no lon
DEPR: indexing (#49412) * DEPR: disallow Series.__getitem__ with a single-element list containing slice * DEPR: disallow slicing with positional slicer and .loc * DEPR: disallow positional indexing with float key * move whatsnew * DEPR: disallow multi-dimensional indexing * fix matplotlib tests * update install.rst
cast_scalar_indexer
9820edc174730e11cb423d7869650c13100eb314
pandas
common.py
11
7
https://github.com/pandas-dev/pandas.git
3
28
0
31
55
Python
{ "docstring": "\n Disallow indexing with a float key, even if that key is a round number.\n\n Parameters\n ----------\n val : scalar\n\n Returns\n -------\n outval : scalar\n ", "language": "en", "n_whitespaces": 49, "n_words": 24, "vocab_size": 21 }
def cast_scalar_indexer(val): # assumes lib.is_scalar(val) if lib.is_float(val) and val.is_integer(): raise IndexError( # GH#34193 "Indexing with a float is no longer supported. Manually convert " "to an integer key instead." ) return val
79,351
268,085
45
test/lib/ansible_test/_internal/util.py
28
15
def load_plugins(base_type, database): # type: (t.Type[C], t.Dict[str, t.Type[C]]) -> None plugins: t.
ansible-test - Convert more type hints. (#78449) * Simple regex replace of multi-line function arg annotations. * Simple regex replace of multi-line function arg annotations with default values. * Simple regex replace of multi-line function arg return annotations. * Simple regex replace of assignment annotations.
load_plugins
b993b5cd49662f715774c333ce98e2845227ab66
ansible
util.py
13
4
https://github.com/ansible/ansible.git
3
65
0
23
99
Python
{ "docstring": "\n Load plugins of the specified type and track them in the specified database.\n Only plugins which have already been imported will be loaded.\n ", "language": "en", "n_whitespaces": 33, "n_words": 23, "vocab_size": 20 }
def load_plugins(base_type, database): # type: (t.Type[C], t.Dict[str, t.Type[C]]) -> None plugins: t.Dict[str, t.Type[C]] = dict((sc.__module__.rsplit('.', 1)[1], sc) for sc in get_subclasses(base_type)) for plugin in plugins: database[plugin] = plugins[plugin]
49,648
200,437
44
sympy/solvers/ode/nonhomogeneous.py
29
14
def _undetermined_coefficients_match(expr, x, func=None, eq_homogeneous=S.Zero): r a = Wild('a', exclude=[x]) b = W
Fix various typos Found via `codespell -q 3 -L aboves,aline,ans,aother,arithmetics,assum,atleast,braket,clen,declar,declars,dorder,dum,enew,fo,fro,inout,iself,ist,ket,lamda,lightyear,lightyears,nd,numer,numers,orderd,ot,pring,rcall,rever,ro,ser,siz,splitted,sring,supercedes,te,tht,unequality,upto,vas,versin,whet`
_undetermined_coefficients_match
24f1e7730119fe958cc8e28411f790c9a5ec04eb
sympy
nonhomogeneous.py
10
54
https://github.com/sympy/sympy.git
7
151
0
23
93
Python
{ "docstring": "\n Returns a trial function match if undetermined coefficients can be applied\n to ``expr``, and ``None`` otherwise.\n\n A trial expression can be found for an expression for use with the method\n of undetermined coefficients if the expression is an\n additive/multiplicative combination of constants, polynomials in `x` (the\n independent variable of expr), `\\sin(a x + b)`, `\\cos(a x + b)`, and\n `e^{a x}` terms (in other words, it has a finite number of linearly\n independent derivatives).\n\n Note that you may still need to multiply each term returned here by\n sufficient `x` to make it linearly independent with the solutions to the\n homogeneous equation.\n\n This is intended for internal use by ``undetermined_coefficients`` hints.\n\n SymPy currently has no way to convert `\\sin^n(x) \\cos^m(y)` into a sum of\n only `\\sin(a x)` and `\\cos(b x)` terms, so these are not implemented. So,\n for example, you will need to manually convert `\\sin^2(x)` into `[1 +\n \\cos(2 x)]/2` to properly apply the method of undetermined coefficients on\n it.\n\n Examples\n ========\n\n >>> from sympy import log, exp\n >>> from sympy.solvers.ode.nonhomogeneous import _undetermined_coefficients_match\n >>> from sympy.abc import x\n >>> _undetermined_coefficients_match(9*x*exp(x) + exp(-x), x)\n {'test': True, 'trialset': {x*exp(x), exp(-x), exp(x)}}\n >>> _undetermined_coefficients_match(log(x), x)\n {'test': False}\n\n ", "language": "en", "n_whitespaces": 277, "n_words": 194, "vocab_size": 127 }
def _undetermined_coefficients_match(expr, x, func=None, eq_homogeneous=S.Zero): r a = Wild('a', exclude=[x]) b = Wild('b', exclude=[x]) expr = powsimp(expr, combine='exp') # exp(x)*exp(2*x + 1) => exp(3*x + 1) retdict = {}
20,154
100,698
99
lib/gui/analysis/stats.py
27
10
def _remove_raw(self) -> None: if "raw" in self._selections: return logger.debug("Removing Raw Data from output") for key in list(self._stats.keys()): if key.startswith("raw"):
Bugfixes: - Stats graph - Handle NaNs in data - logger - de-elevate matplotlib font messages
_remove_raw
afec52309326304f4323029039e49bfcf928ef43
faceswap
stats.py
11
9
https://github.com/deepfakes/faceswap.git
4
57
0
21
102
Python
{ "docstring": " Remove raw values from :attr:`stats` if they are not requested. ", "language": "en", "n_whitespaces": 11, "n_words": 10, "vocab_size": 10 }
def _remove_raw(self) -> None: if "raw" in self._selections: return logger.debug("Removing Raw Data from output") for key in list(self._stats.keys()): if key.startswith("raw"): del self._stats[key] logger.debug("Removed Raw Data from output")
80,044
269,374
32
keras/applications/efficientnet_weight_update_util.py
20
6
def get_keras_blocks(keras_weight_names): # example: 'block1a_dwconv/depthwise_kernel:0' -> 'block1a' keras_blocks = {x.split("_")[0] for x in keras_weight_names if "block" in x} return sorted(keras_blocks)
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
get_keras_blocks
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
efficientnet_weight_update_util.py
11
3
https://github.com/keras-team/keras.git
3
32
0
19
57
Python
{ "docstring": "Extract the block names from list of full weight names.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
def get_keras_blocks(keras_weight_names): # example: 'block1a_dwconv/depthwise_kernel:0' -> 'block1a' keras_blocks = {x.split("_")[0] for x in keras_weight_names if "block" in x} return sorted(keras_blocks)
13,800
65,128
121
erpnext/accounts/party.py
193
45
def get_dashboard_info(party_type, party, loyalty_program=None): current_fiscal_year = get_fiscal_year(nowdate(), as_dict=True) doctype = "Sales Invoice" if party_type == "Customer" else "Purchase Invoice" companies = frappe.get_all( doctype, filters={"docstatus": 1, party_type.lower(): party}, distinct=1, fields=["company"] ) company_wise_info = [] company_wise_grand_total = frappe.get_all( doctype, filters={ "docstatus": 1, party_type.lower(): party, "posting_date": ( "between", [current_fiscal_year.year_start_date, current_fiscal_year.year_end_date], ), }, group_by="company", fields=[ "company", "sum(grand_total) as grand_total", "sum(base_grand_total) as base_grand_total", ], ) loyalty_point_details = [] if party_type == "Customer": loyalty_point_details = frappe._dict( frappe.get_all( "Loyalty Point Entry", filters={ "customer": party, "expiry_d
style: format code with black
get_dashboard_info
494bd9ef78313436f0424b918f200dab8fc7c20b
erpnext
party.py
18
77
https://github.com/frappe/erpnext.git
12
432
0
116
712
Python
{ "docstring": "\n\t\tselect company, sum(debit_in_account_currency) - sum(credit_in_account_currency)\n\t\tfrom `tabGL Entry`\n\t\twhere party_type = %s and party=%s\n\t\tand is_cancelled = 0\n\t\tgroup by company", "language": "en", "n_whitespaces": 16, "n_words": 21, "vocab_size": 19 }
def get_dashboard_info(party_type, party, loyalty_program=None): current_fiscal_year = get_fiscal_year(nowdate(), as_dict=True) doctype = "Sales Invoice" if party_type == "Customer" else "Purchase Invoice" companies = frappe.get_all( doctype, filters={"docstatus": 1, party_type.lower(): party}, distinct=1, fields=["company"] ) company_wise_info = [] company_wise_grand_total = frappe.get_all( doctype, filters={ "docstatus": 1, party_type.lower(): party, "posting_date": ( "between", [current_fiscal_year.year_start_date, current_fiscal_year.year_end_date], ), }, group_by="company", fields=[ "company", "sum(grand_total) as grand_total", "sum(base_grand_total) as base_grand_total", ], ) loyalty_point_details = [] if party_type == "Customer": loyalty_point_details = frappe._dict( frappe.get_all( "Loyalty Point Entry", filters={ "customer": party, "expiry_date": (">=", getdate()), }, group_by="company", fields=["company", "sum(loyalty_points) as loyalty_points"], as_list=1, ) ) company_wise_billing_this_year = frappe._dict() for d in company_wise_grand_total: company_wise_billing_this_year.setdefault( d.company, {"grand_total": d.grand_total, "base_grand_total": d.base_grand_total} ) company_wise_total_unpaid = frappe._dict( frappe.db.sql( , (party_type, party), ) ) for d in companies: company_default_currency = frappe.db.get_value("Company", d.company, "default_currency") party_account_currency = get_party_account_currency(party_type, party, d.company) if party_account_currency == company_default_currency: billing_this_year = flt( company_wise_billing_this_year.get(d.company, {}).get("base_grand_total") ) else: billing_this_year = flt(company_wise_billing_this_year.get(d.company, {}).get("grand_total")) total_unpaid = flt(company_wise_total_unpaid.get(d.company)) if loyalty_point_details: loyalty_points = loyalty_point_details.get(d.company) info = {} info["billing_this_year"] = flt(billing_this_year) if billing_this_year else 0 info["currency"] = party_account_currency info["total_unpaid"] = flt(total_unpaid) if total_unpaid else 0 info["company"] = d.company if party_type == "Customer" and loyalty_point_details: info["loyalty_points"] = loyalty_points if party_type == "Supplier": info["total_unpaid"] = -1 * info["total_unpaid"] company_wise_info.append(info) return company_wise_info
83,815
281,509
32
gamestonk_terminal/portfolio/brokers/robinhood/robinhood_controller.py
10
7
def print_help(self): help_text = console.print(text=
Terminal Wide Rich (#1161) * My idea for how we handle Rich moving forward * remove independent consoles * FIxed pylint issues * add a few vars * Switched print to console * More transitions * Changed more prints * Replaced all prints * Fixing tabulate * Finished replace tabulate * Finished removing rich from Tabulate * add Panel around menu * add GST watermark under feature flag * Fixed 46 tests * Delete test_screener[False].yaml * Delete test_screener[True].yaml * Fixed the rest of the tests * add help and source color vars and use rgb * rich on stocks/options * update rich on disc, dps, sia * rich in gov, ins and scr menus * ba and ca menus with rich * Fixed import issue * Fixed some tests * removed termcolor * Removed prettytable * add rich to remaining stocks menus * FIxed linting issue * Added James' changes * Updated dependencies * Add rich to cryptocurrency menu * refactor economy and forex * refactor etf with rich * refactor mfunds * refactor rich rest * not specify style so default color works well on any background * Fixing mypy issues * Updated tests * More test fixes * James' test fixes * Updating tests : stocks/screener - fix cassettes using BR * Updating tests : crypto * Updating tests : disable DEBUG_MODE * Updating tests : stocks/fa/yfinance * minor fixes that escape * Improve the rich table function (that replaces tabulate :D ) * Fixed bad code * delete rogue file + dcf fix + NoConsole * sia mypy * fuck you linter * fuck you linter pt 2 * skip hehe * i hate the black linter * ubuntu mypy attempt * Update : rich_config + gtff * Updating tests : conftest * Updating tests : stocks * Update : rich_config * Updating : rich_config * make panel configurable for Theodore :b * colors update * Merged * Updating : rich_config + feature_flags * Updating : rich_config * Updating tests : stocks * Updating : feature_flags Co-authored-by: DidierRLopes <[email protected]> Co-authored-by: Chavithra PARANA <[email protected]> Co-authored-by: james <[email protected]> Co-authored-by: jose-donato <[email protected]>
print_help
82747072c511beb1b2672846ae2ee4aec53eb562
OpenBBTerminal
robinhood_controller.py
9
8
https://github.com/OpenBB-finance/OpenBBTerminal.git
1
21
0
9
40
Python
{ "docstring": "Print help[cmds]\n login login to robinhood\n\n holdings show account holdings in stocks\n history show equity history of your account\n[/cmds]", "language": "en", "n_whitespaces": 40, "n_words": 20, "vocab_size": 15 }
def print_help(self): help_text = console.print(text=help_text, menu="Portfolio - Brokers - Robinhood")
15,811
71,984
153
wagtail/admin/tests/test_edit_handlers.py
31
16
def test_page_with_inline_model_with_tabbed_panel_only(self): EventPageSpeaker.settings_panels = [ FieldPanel("first_name"), FieldPanel("last_name"), ] warning = checks.Warning( "EventPageSpeaker.settings_panels will have no effect on InlinePanel model editing", hint=, obj=EventPageSpeaker, id=self.warning_id, ) checks_results = self.get_chec
Reformat with black
test_page_with_inline_model_with_tabbed_panel_only
d10f15e55806c6944827d801cd9c2d53f5da4186
wagtail
test_edit_handlers.py
10
15
https://github.com/wagtail/wagtail.git
1
66
0
29
112
Python
{ "docstring": "Test that checks will warn against setting single tabbed panel on InlinePanel modelEnsure that EventPageSpeaker uses `panels` instead of `settings_panels`.\nThere are no tabs on non-Page model editing within InlinePanels.", "language": "en", "n_whitespaces": 28, "n_words": 30, "vocab_size": 28 }
def test_page_with_inline_model_with_tabbed_panel_only(self): EventPageSpeaker.settings_panels = [ FieldPanel("first_name"), FieldPanel("last_name"), ] warning = checks.Warning( "EventPageSpeaker.settings_panels will have no effect on InlinePanel model editing", hint=, obj=EventPageSpeaker, id=self.warning_id, ) checks_results = self.get_checks_result() self.assertIn(warning, checks_results) delattr(EventPageSpeaker, "settings_panels")
@dataclass
121,067
337,478
48
src/accelerate/utils/dataclasses.py
21
10
def to_kwargs(self): default_dict = self.__class__().to_dict() this_dict = self.to_dict()
Refactor utils into its own module (#340) Co-authored-by: Sylvain Gugger <[email protected]>
to_kwargs
02e2ed567be0e6d54b65884265a14873c3a30b2a
accelerate
dataclasses.py
10
4
https://github.com/huggingface/accelerate.git
3
47
1
19
82
Python
{ "docstring": "\n Returns a dictionary containing the attributes with values different from the default of this class.\n ", "language": "en", "n_whitespaces": 30, "n_words": 15, "vocab_size": 14 }
def to_kwargs(self): default_dict = self.__class__().to_dict() this_dict = self.to_dict() return {k: v for k, v in this_dict.items() if default_dict[k] != v} @dataclass
117,024
319,942
662
src/documents/tasks.py
141
56
def update_document_archive_file(document_id): document = Document.objects.get(id=document_id) mime_type = document.mime_type parser_class: Type[DocumentParser] = get_parser_class_for_mime_type(mime_type) if not parser_class: logger.error( f"No parser found for mime type {mime_type}, cannot " f"archive document {document} (ID: {document_id})", ) return parser: DocumentParser = parser_class(logging_group=uuid.uuid4()) try: parser.parse(document.source_path, mime_type, document.get_public_filename()) thumbnail = parser.get_thumbnail( document.source_path, mime_type, document.get_public_filename(), ) if parser.get_archive_path(): with transaction.atomic(): with open(parser.get_archive_path(), "rb") as f: checksum = hashlib.md5(f.read()).hexdigest() # I'm going to save first so that in case the file move # fails, the database is rolled back. # We also don't use save() since that triggers the filehandling # logic, and we don't want that yet (file not yet in place) document.archive_filename = generate_unique_filename(
Implements a better re-do of OCR by making the document archiver function common. Actually creates updated file now
update_document_archive_file
ab761e837c4be4974f699c8c97560a4291a8d298
paperless-ngx
tasks.py
20
43
https://github.com/paperless-ngx/paperless-ngx.git
5
266
0
108
463
Python
{ "docstring": "\n Re-creates the archive file of a document, including new OCR content and thumbnail\n ", "language": "en", "n_whitespaces": 20, "n_words": 13, "vocab_size": 13 }
def update_document_archive_file(document_id): document = Document.objects.get(id=document_id) mime_type = document.mime_type parser_class: Type[DocumentParser] = get_parser_class_for_mime_type(mime_type) if not parser_class: logger.error( f"No parser found for mime type {mime_type}, cannot " f"archive document {document} (ID: {document_id})", ) return parser: DocumentParser = parser_class(logging_group=uuid.uuid4()) try: parser.parse(document.source_path, mime_type, document.get_public_filename()) thumbnail = parser.get_thumbnail( document.source_path, mime_type, document.get_public_filename(), ) if parser.get_archive_path(): with transaction.atomic(): with open(parser.get_archive_path(), "rb") as f: checksum = hashlib.md5(f.read()).hexdigest() # I'm going to save first so that in case the file move # fails, the database is rolled back. # We also don't use save() since that triggers the filehandling # logic, and we don't want that yet (file not yet in place) document.archive_filename = generate_unique_filename( document, archive_filename=True, ) Document.objects.filter(pk=document.pk).update( archive_checksum=checksum, content=parser.get_text(), archive_filename=document.archive_filename, ) with FileLock(settings.MEDIA_LOCK): create_source_path_directory(document.archive_path) shutil.move(parser.get_archive_path(), document.archive_path) shutil.move(thumbnail, document.thumbnail_path) with index.open_index_writer() as writer: index.update_document(writer, document) except Exception: logger.exception( f"Error while parsing document {document} " f"(ID: {document_id})", ) finally: parser.cleanup()
14,258
66,630
13
erpnext/patches/v12_0/move_credit_limit_to_customer_credit_limit.py
20
6
def execute(): frappe.reload_doc("Selling", "doctype", "Customer Credit Limit") frappe.reload_doc("Selling", "doctype", "Customer") frappe.reload_doc("Setup", "doctype", "Customer Group
style: format code with black
execute
494bd9ef78313436f0424b918f200dab8fc7c20b
erpnext
move_credit_limit_to_customer_credit_limit.py
8
7
https://github.com/frappe/erpnext.git
2
49
0
15
98
Python
{ "docstring": "Move credit limit and bypass credit limit to the child table of customer credit limit", "language": "en", "n_whitespaces": 14, "n_words": 15, "vocab_size": 11 }
def execute(): frappe.reload_doc("Selling", "doctype", "Customer Credit Limit") frappe.reload_doc("Selling", "doctype", "Customer") frappe.reload_doc("Setup", "doctype", "Customer Group") if frappe.db.a_row_exists("Customer Credit Limit"): return move_credit_limit_to_child_table()
34,708
150,327
73
scripts/rest_client.py
15
7
def forceexit(self, tradeid, ordertype=None, amount=None): return self._post("forceexit", data={ "tradeid": tradeid, "ordertype": ordertype, "amount": amount,
Accept parameters to forceexit
forceexit
82aecc81f393e98b86115e9bdfa46dac1a143fad
freqtrade
rest_client.py
11
6
https://github.com/freqtrade/freqtrade.git
1
40
0
14
66
Python
{ "docstring": "Force-exit a trade.\n\n :param tradeid: Id of the trade (can be received via status command)\n :param ordertype: Order type to use (must be market or limit)\n :param amount: Amount to sell. Full sell if not given\n :return: json object\n ", "language": "en", "n_whitespaces": 74, "n_words": 39, "vocab_size": 35 }
def forceexit(self, tradeid, ordertype=None, amount=None): return self._post("forceexit", data={ "tradeid": tradeid, "ordertype": ordertype, "amount": amount, })
13,070
62,924
25
.venv/lib/python3.8/site-packages/pip/_vendor/packaging/tags.py
16
4
def _abi3_applies(python_version): # type: (PythonVersion) -> bool return len(python_version) > 1 and tu
upd; format
_abi3_applies
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
transferlearning
tags.py
9
2
https://github.com/jindongwang/transferlearning.git
2
24
0
16
41
Python
{ "docstring": "\n Determine if the Python version supports abi3.\n\n PEP 384 was first implemented in Python 3.2.\n ", "language": "en", "n_whitespaces": 25, "n_words": 15, "vocab_size": 14 }
def _abi3_applies(python_version): # type: (PythonVersion) -> bool return len(python_version) > 1 and tuple(python_version) >= (3, 2)
80,150
269,521
264
keras/backend.py
64
29
def variable(value, dtype=None, name=None, constraint=None): if dtype is None: dtype = floatx() if hasattr(value, "tocoo"): sparse_coo = value.tocoo() indices = np.concatenate( ( np.expand_dims(sparse_coo.row, 1), np.expand_dims(sparse_coo.col, 1), ), 1, ) v = tf.SparseTensor( indices=indices, values=sparse_coo.data, dense_shape=sparse_coo.shape, ) v._keras_shape = sparse_coo.shape return v v = tf.Variable( value, dtype=tf.as_dtype(dtype), name=name, constraint=constraint ) if isinstance(value, np.ndarray):
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
variable
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
backend.py
14
28
https://github.com/keras-team/keras.git
5
173
0
44
265
Python
{ "docstring": "Instantiates a variable and returns it.\n\n Args:\n value: Numpy array, initial value of the tensor.\n dtype: Tensor type.\n name: Optional name string for the tensor.\n constraint: Optional projection function to be\n applied to the variable after an optimizer update.\n\n Returns:\n A variable instance (with Keras metadata included).\n\n Examples:\n\n >>> val = np.array([[1, 2], [3, 4]])\n >>> kvar = tf.keras.backend.variable(value=val, dtype='float64',\n ... name='example_var')\n >>> tf.keras.backend.dtype(kvar)\n 'float64'\n >>> print(kvar)\n <tf.Variable 'example_var:...' shape=(2, 2) dtype=float64, numpy=\n array([[1., 2.],\n [3., 4.]])>\n\n ", "language": "en", "n_whitespaces": 206, "n_words": 77, "vocab_size": 66 }
def variable(value, dtype=None, name=None, constraint=None): if dtype is None: dtype = floatx() if hasattr(value, "tocoo"): sparse_coo = value.tocoo() indices = np.concatenate( ( np.expand_dims(sparse_coo.row, 1), np.expand_dims(sparse_coo.col, 1), ), 1, ) v = tf.SparseTensor( indices=indices, values=sparse_coo.data, dense_shape=sparse_coo.shape, ) v._keras_shape = sparse_coo.shape return v v = tf.Variable( value, dtype=tf.as_dtype(dtype), name=name, constraint=constraint ) if isinstance(value, np.ndarray): v._keras_shape = value.shape elif hasattr(value, "shape"): v._keras_shape = int_shape(value) track_variable(v) return v
84,633
284,080
93
openbb_terminal/stocks/dark_pool_shorts/ibkr_model.py
51
27
def get_cost_to_borrow() -> pd.DataFrame: ftp = ftplib.FTP("ftp3.interactivebrokers.com", "shortstock") flo = BytesIO() ftp.retrbinary("RETR usa.txt", flo.write) flo.seek(0) data = pd.read_csv(flo, sep="|", skiprows=1) data = data[["#SYM", "FEERATE", "AVAILABLE"]] data["AVAILABLE"] = data["AVAILABLE"].replace(">10000000", 10000000) data.fillna(0, inplace=True) data["AVAILABLE"] = data["AVAILABLE"].astype(int) data.sort_values(by=["FEERATE"], ascending=False, inplace=True) data["FEERATE"] = data["FEERATE"].apply(lambda x: str(x) + "%") data.columns = ["Symbol", "Fees", "Available"] return dat
Add cost to borrow of stocks. Data from IBKR (#1663) * add ctb to dps * add test for ctb * reformat using black * fix tests for ctb Co-authored-by: didierlopes.eth <[email protected]> Co-authored-by: jmaslek <[email protected]>
get_cost_to_borrow
73187d9e17a4838fc6ec583bcfcab593e06508cf
OpenBBTerminal
ibkr_model.py
12
21
https://github.com/OpenBB-finance/OpenBBTerminal.git
1
161
0
40
281
Python
{ "docstring": "Get stocks with highest cost to borrow [Source: Interactive Broker]\n\n Returns\n -------\n pd.DataFrame\n Cost to borrow\n ", "language": "en", "n_whitespaces": 35, "n_words": 16, "vocab_size": 14 }
def get_cost_to_borrow() -> pd.DataFrame: ftp = ftplib.FTP("ftp3.interactivebrokers.com", "shortstock") flo = BytesIO() ftp.retrbinary("RETR usa.txt", flo.write) flo.seek(0) data = pd.read_csv(flo, sep="|", skiprows=1) data = data[["#SYM", "FEERATE", "AVAILABLE"]] data["AVAILABLE"] = data["AVAILABLE"].replace(">10000000", 10000000) data.fillna(0, inplace=True) data["AVAILABLE"] = data["AVAILABLE"].astype(int) data.sort_values(by=["FEERATE"], ascending=False, inplace=True) data["FEERATE"] = data["FEERATE"].apply(lambda x: str(x) + "%") data.columns = ["Symbol", "Fees", "Available"] return data
8,086
43,885
30
airflow/jobs/local_task_job.py
5
4
def _enable_task_listeners(): if get_listener_manager()
Add Listener Plugin API that tracks TaskInstance state changes (#20443) This adds new Plugin API - "listeners". It enables plugin authors to write [pluggy hook implementation][1] that will be called on certain formalized extension points. To differentiate between current Airflow extension points, like plugins, and current Airflow hooks, implementations of those hooks are called listeners. The API is ment to be called across all dags, and all operators - in contrast to current on_success_callback, pre_execute and related family which are meant to provide callbacks for particular dag authors, or operator creators. pluggy mechanism enables us to execute multiple, or none, listeners that implement particular extension point, so that users can use multiple listeners seamlessly. In this PR, three such extension points are added. When TaskInstance's state is changed to RUNNING, on_task_instance_running hook is called. On change toSUCCESS on_task_instance_success is called, similarly on FAILED on_task_instance_failed is called. Actual notification mechanism is be implemented using [SQLAlchemyโ€™s events mechanism][2]. This ensures that plugins will get every change of state, regardless of where in the codebase it happened, and not require manual annotation of TI state changes across the codebase. To make sure that this change is not affecting performance, running this mechanism on scheduler is disabled by default. The SQLAlchemy event mechanism is also not affected by default - the event listener is only added if we have any plugin which actually provides any listener. [1]: https://pluggy.readthedocs.io/en/stable/ [2]: https://docs.sqlalchemy.org/en/13/orm/session_events.html#after-flush Signed-off-by: Maciej Obuchowski <[email protected]>
_enable_task_listeners
dba00ce6a32b7f50153887c6974f62985ca8023f
airflow
local_task_job.py
9
3
https://github.com/apache/airflow.git
2
15
0
5
30
Python
{ "docstring": "\n Check if we have any registered listeners, then register sqlalchemy hooks for\n TI state change if we do.\n ", "language": "en", "n_whitespaces": 40, "n_words": 18, "vocab_size": 16 }
def _enable_task_listeners(): if get_listener_manager().has_listeners: register_task_instance_state_events()
31,438
138,495
26
python/ray/data/impl/plan.py
12
5
def has_computed_output(self) -> bool: return self._snapshot_blocks is not N
[Datasets] [Out-of-Band Serialization: 2/3] Refactor `ExecutionPlan` to maintain complete lineage and eagerly unlink block references. (#23931) This PR refactors ExecutionPlan to maintain complete stage lineage, even for eagerly computed datasets, while ensuring that block references are unlinked as early as possible in order to more eagerly release block memory. This PR is the final precursor to adding the actual out-of-band serialization APIs (PR 3/3). The fully lineage has to be maintained, even for eagerly computed datasets, since the lineage is needed for out-of-band serialization of datasets.
has_computed_output
9ee24530abf1b5e3239869b5257dd7b678337b90
ray
plan.py
8
5
https://github.com/ray-project/ray.git
2
20
0
11
34
Python
{ "docstring": "Whether this plan has a computed snapshot for the final stage, i.e. for the\n output of this plan.\n ", "language": "en", "n_whitespaces": 32, "n_words": 18, "vocab_size": 15 }
def has_computed_output(self) -> bool: return self._snapshot_blocks is not None and not self._stages_after_snapshot
14,851
68,747
110
erpnext/accounts/report/sales_register/sales_register.py
150
21
def get_conditions(filters): conditions = "" accounting_dimensions = get_accounting_dimensions(as_list=False) or [] accounting_dimensions_list = [d.fieldname for d in accounting_dimensions] if filters.get("company"): conditions += " and company=%(company)s" if filters.get("customer") and "customer" not in accounting_dimensions_list: conditions += " and customer = %(customer)s" if filters.get("from_date"): conditions += " and posting_date >= %(from_date)s" if filters.get("to_date"): conditions += " and posting_date <= %(to_date)s" if filters.get("owner"): conditions += " and owner = %(owner)s" def get_sales_invoice_item_field_condition(field, table="Sales Invoice Item") -> str: if not filters.get(field) or field in accounting_dimensions_list: return "" return f conditions += get_sales_invoice_item_field_condition("mode_of_payments", "Sales Invoice Payment") conditions += get_sales_invoice_item_field_condition("cost_center") conditions += get_sales_invoice_item_field_condition("warehouse") conditions += get_sales_invoice_item_field_condition("brand") conditions += get_sales_invoice_item_field_condition("item_group") if accounting_dimensions: common_condition = for dimension in accounting_dimensions: if filters.get(dimension.fieldname): if frappe.get_cached_value("DocType", dimension.document_type, "is_tree"): filters[dimension.fieldname] = get_dimension_with_children( dimension.document_type, filters.get(dimension.fieldname) ) conditions += ( common_condition + "and ifnull(`tabSales Invoice Item`.{0}, '') in %({0})s)".format(dimension.fieldname) ) else: conditions += ( common_condition + "and ifnull(`tabSales Invoice Item`.{0}, '') in %({0})s)".format(dim
fix(Sales Register): incorrect query with dimensions If accounting dimension is also part of the default filters then same query is repeated with incorrect syntax. e.g. `item_group = (child1, child2)` instead of `in` query. fix: don't add default filter if they are part of dimensions to be added.
get_conditions
c3219ebad1cac35afc04cc051c9e215c70cd1e9b
erpnext
sales_register.py
20
41
https://github.com/frappe/erpnext.git
13
213
0
73
446
Python
{ "docstring": " and exists(select name from `tab{table}`\n\t\t\t where parent=`tabSales Invoice`.name\n\t\t\t \tand ifnull(`tab{table}`.{field}, '') = %({field})s)\n\t\t\tand exists(select name from `tabSales Invoice Item`\n\t\t\t\twhere parent=`tabSales Invoice`.name\n\t\t\t", "language": "en", "n_whitespaces": 21, "n_words": 23, "vocab_size": 15 }
def get_conditions(filters): conditions = "" accounting_dimensions = get_accounting_dimensions(as_list=False) or [] accounting_dimensions_list = [d.fieldname for d in accounting_dimensions] if filters.get("company"): conditions += " and company=%(company)s" if filters.get("customer") and "customer" not in accounting_dimensions_list: conditions += " and customer = %(customer)s" if filters.get("from_date"): conditions += " and posting_date >= %(from_date)s" if filters.get("to_date"): conditions += " and posting_date <= %(to_date)s" if filters.get("owner"): conditions += " and owner = %(owner)s" def get_sales_invoice_item_field_condition(field, table="Sales Invoice Item") -> str: if not filters.get(field) or field in accounting_dimensions_list: return "" return f conditions += get_sales_invoice_item_field_condition("mode_of_payments", "Sales Invoice Payment") conditions += get_sales_invoice_item_field_condition("cost_center") conditions += get_sales_invoice_item_field_condition("warehouse") conditions += get_sales_invoice_item_field_condition("brand") conditions += get_sales_invoice_item_field_condition("item_group") if accounting_dimensions: common_condition = for dimension in accounting_dimensions: if filters.get(dimension.fieldname): if frappe.get_cached_value("DocType", dimension.document_type, "is_tree"): filters[dimension.fieldname] = get_dimension_with_children( dimension.document_type, filters.get(dimension.fieldname) ) conditions += ( common_condition + "and ifnull(`tabSales Invoice Item`.{0}, '') in %({0})s)".format(dimension.fieldname) ) else: conditions += ( common_condition + "and ifnull(`tabSales Invoice Item`.{0}, '') in %({0})s)".format(dimension.fieldname) ) return conditions
2,909
19,198
29
mlflow/sklearn/utils.py
13
8
def _get_classifier_artifacts(fitted_estimator, prefix, X, y_true, sample_weight): import sklearn if
Improve confusion matrix plot (#5273) * update Signed-off-by: Weichen Xu <[email protected]> * fix Signed-off-by: Weichen Xu <[email protected]> * update Signed-off-by: Weichen Xu <[email protected]>
_get_classifier_artifacts
847eb6b22d03f0cffef945996cf835272870435a
mlflow
utils.py
8
48
https://github.com/mlflow/mlflow.git
3
187
0
13
41
Python
{ "docstring": "\n Draw and record various common artifacts for classifier\n\n For all classifiers, we always log:\n (1) confusion matrix:\n https://scikit-learn.org/stable/modules/generated/sklearn.metrics.plot_confusion_matrix.html\n\n For only binary classifiers, we will log:\n (2) precision recall curve:\n https://scikit-learn.org/stable/modules/generated/sklearn.metrics.plot_precision_recall_curve.html\n (3) roc curve:\n https://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html\n\n Steps:\n 1. Extract X and y_true from fit_args and fit_kwargs, and split into train & test datasets.\n 2. If the sample_weight argument exists in fit_func (accuracy_score by default\n has sample_weight), extract it from fit_args or fit_kwargs as\n (y_true, y_pred, sample_weight, multioutput), otherwise as (y_true, y_pred, multioutput)\n 3. return a list of artifacts path to be logged\n\n :param fitted_estimator: The already fitted regressor\n :param fit_args: Positional arguments given to fit_func.\n :param fit_kwargs: Keyword arguments given to fit_func.\n :return: List of artifacts to be logged\n ", "language": "en", "n_whitespaces": 178, "n_words": 117, "vocab_size": 91 }
def _get_classifier_artifacts(fitted_estimator, prefix, X, y_true, sample_weight): import sklearn if not _is_plotting_supported(): return []
@py_random_state(3)
42,180
176,941
559
networkx/algorithms/swap.py
228
33
def double_edge_swap(G, nswap=1, max_tries=100, seed=None): if G.is_directed(): raise nx.NetworkXError( "double_edge_swap() not defined for directed graphs. Use direc
Implement directed edge swap (#5663) * Add tests for directed edge swap * Add directed edge swap algorithm * Allow more swaps in directed tests * Fix errors in swap.py to meet test criteria * Remove TODOs * Update documentation for directed_edge_swap and run black * Fix incosistent spacing * Add references * Added PR to release docs * Fix reference formatting * Improve documentation * An -> A * Update networkx/algorithms/swap.py Co-authored-by: Ross Barnowski <[email protected]> * Add 'Raises' section to documentation * Update tests to use keyword arguments for directed_edge_swap * Fix references to 'triple-edge' swap * Use not_implemented_for decorator for directed_edge_swap * Rename n to tries and remove unnecessary updates * Rename e to msg * Use 'succ' instead of 'out_edges' for getting successors * Update networkx/algorithms/swap.py Co-authored-by: Dan Schult <[email protected]> * Update networkx/algorithms/tests/test_swap.py Co-authored-by: Dan Schult <[email protected]> * Update networkx/algorithms/tests/test_swap.py Co-authored-by: Dan Schult <[email protected]> * Update networkx/algorithms/tests/test_swap.py Co-authored-by: Dan Schult <[email protected]> * Update networkx/algorithms/swap.py Co-authored-by: Dan Schult <[email protected]> * Convert gnp graphs to path graphs for clarity * Use seed when testing directed edge swap * Check node equality sooner * Add directed_edge_swap to documentation Co-authored-by: Ross Barnowski <[email protected]> Co-authored-by: Dan Schult <[email protected]>
double_edge_swap
7d910e7184abd385c929f789b0c935ab143fc932
networkx
swap.py
14
38
https://github.com/networkx/networkx.git
10
251
1
154
428
Python
{ "docstring": "Swap two edges in the graph while keeping the node degrees fixed.\n\n A double-edge swap removes two randomly chosen edges u-v and x-y\n and creates the new edges u-x and v-y::\n\n u--v u v\n becomes | |\n x--y x y\n\n If either the edge u-x or v-y already exist no swap is performed\n and another attempt is made to find a suitable edge pair.\n\n Parameters\n ----------\n G : graph\n An undirected graph\n\n nswap : integer (optional, default=1)\n Number of double-edge swaps to perform\n\n max_tries : integer (optional)\n Maximum number of attempts to swap edges\n\n seed : integer, random_state, or None (default)\n Indicator of random number generation state.\n See :ref:`Randomness<randomness>`.\n\n Returns\n -------\n G : graph\n The graph after double edge swaps.\n\n Notes\n -----\n Does not enforce any connectivity constraints.\n\n The graph G is modified in place.\n ", "language": "en", "n_whitespaces": 272, "n_words": 135, "vocab_size": 96 }
def double_edge_swap(G, nswap=1, max_tries=100, seed=None): if G.is_directed(): raise nx.NetworkXError( "double_edge_swap() not defined for directed graphs. Use directed_edge_swap instead." ) if nswap > max_tries: raise nx.NetworkXError("Number of swaps > number of tries allowed.") if len(G) < 4: raise nx.NetworkXError("Graph has less than four nodes.") # Instead of choosing uniformly at random from a generated edge list, # this algorithm chooses nonuniformly from the set of nodes with # probability weighted by degree. n = 0 swapcount = 0 keys, degrees = zip(*G.degree()) # keys, degree cdf = nx.utils.cumulative_distribution(degrees) # cdf of degree discrete_sequence = nx.utils.discrete_sequence while swapcount < nswap: # if random.random() < 0.5: continue # trick to avoid periodicities? # pick two random edges without creating edge list # choose source node indices from discrete distribution (ui, xi) = discrete_sequence(2, cdistribution=cdf, seed=seed) if ui == xi: continue # same source, skip u = keys[ui] # convert index to label x = keys[xi] # choose target uniformly from neighbors v = seed.choice(list(G[u])) y = seed.choice(list(G[x])) if v == y: continue # same target, skip if (x not in G[u]) and (y not in G[v]): # don't create parallel edges G.add_edge(u, x) G.add_edge(v, y) G.remove_edge(u, v) G.remove_edge(x, y) swapcount += 1 if n >= max_tries: e = ( f"Maximum number of swap attempts ({n}) exceeded " f"before desired swaps achieved ({nswap})." ) raise nx.NetworkXAlgorithmError(e) n += 1 return G @py_random_state(3)
27,737
124,997
196
rllib/offline/tests/test_dataset_reader.py
44
16
def test_dataset_shard_with_task_parallelization(self): config = { "input": "dataset", "input_config": { "format": "json", "paths": self.dset_path, "parallelism": 10, },
[RLlib] improved unittests for dataset_reader and fixed bugs (#26458)
test_dataset_shard_with_task_parallelization
569fe0109629048d08e1d9e023f7769f10bd2244
ray
test_dataset_reader.py
11
16
https://github.com/ray-project/ray.git
2
86
0
38
143
Python
{ "docstring": "Tests whether the dataset_shard function works correctly with parallelism\n for reading the dataset.", "language": "en", "n_whitespaces": 19, "n_words": 13, "vocab_size": 12 }
def test_dataset_shard_with_task_parallelization(self): config = { "input": "dataset", "input_config": { "format": "json", "paths": self.dset_path, "parallelism": 10, }, } NUM_WORKERS = 4 _, shards = get_dataset_and_shards(config, num_workers=NUM_WORKERS) assert len(shards) == NUM_WORKERS + 1 assert shards[0] is None assert all( isinstance(remote_shard, ray.data.Dataset) for remote_shard in shards[1:] )
23,178
108,410
164
lib/matplotlib/tests/test_compare_images.py
97
24
def test_image_comparison_expect_rms(im1, im2, tol, expect_rms): baseline_dir, result_dir = map(Path, _image_directories(lambda: "dummy")) # Copy both "baseline" a
Add uuid in im1 name
test_image_comparison_expect_rms
f3edc8771b7c292c5539e0e6444746b6ccefec04
matplotlib
test_compare_images.py
12
14
https://github.com/matplotlib/matplotlib.git
2
124
0
70
194
Python
{ "docstring": "\n Compare two images, expecting a particular RMS error.\n\n im1 and im2 are filenames relative to the baseline_dir directory.\n\n tol is the tolerance to pass to compare_images.\n\n expect_rms is the expected RMS value, or None. If None, the test will\n succeed if compare_images succeeds. Otherwise, the test will succeed if\n compare_images fails and returns an RMS error almost equal to this value.\n ", "language": "en", "n_whitespaces": 83, "n_words": 61, "vocab_size": 45 }
def test_image_comparison_expect_rms(im1, im2, tol, expect_rms): baseline_dir, result_dir = map(Path, _image_directories(lambda: "dummy")) # Copy both "baseline" and "test" image to result_dir, so that 1) # compare_images writes the diff to result_dir, rather than to the source # tree and 2) the baseline image doesn't appear missing to triage_tests.py. uid = str(uuid.uuid4()) result_im1 = make_test_filename(result_dir / (uid + im1), "expected") shutil.copyfile(baseline_dir / im1, result_im1) result_im2 = result_dir / im1 shutil.copyfile(baseline_dir / im2, result_im2) results = compare_images( result_im1, result_im2, tol=tol, in_decorator=True) if expect_rms is None: assert results is None else: assert results is not None assert results['rms'] == approx(expect_rms, abs=1e-4)
21,474
102,147
110
test/jit/test_save_load.py
27
20
def test_versioned_symbols_reserialization(self): module_v2 = torch.jit.load(py
Revert D33198155: Bump version number to 7 and compile old operators with old schema Test Plan: revert-hammer Differential Revision: D33198155 (https://github.com/pytorch/pytorch/commit/d35fc409ad84c1a837e7e07ffe3f4e4942538e50) Original commit changeset: 38a1185f9ecb Original Phabricator Diff: D33198155 (https://github.com/pytorch/pytorch/commit/d35fc409ad84c1a837e7e07ffe3f4e4942538e50) fbshipit-source-id: 411aaeb4e047aad9202db50d4d0f2ff35bc51f9d
test_versioned_symbols_reserialization
0ece9a49d7d705b1a0cd4406d4f1c526d720e1f3
pytorch
test_save_load.py
12
9
https://github.com/pytorch/pytorch.git
2
81
0
23
136
Python
{ "docstring": "\n Tests that loading and saving serialized Torchscript with a versioned\n symbol won't persist the original function and will inline the\n versioned builtin.\n ", "language": "en", "n_whitespaces": 51, "n_words": 22, "vocab_size": 19 }
def test_versioned_symbols_reserialization(self): module_v2 = torch.jit.load(pytorch_test_dir + "/jit/fixtures/_test_serialization_subcmul_v2.pt") buffer = io.BytesIO() torch.jit.save(module_v2, buffer) buffer.seek(0) module_reserialized = torch.jit.load(buffer) subcmul_nodes = sum("subcmul" in n.kind() for n in module_reserialized.graph.nodes()) self.assertEqual(subcmul_nodes, 0)
44,359
183,837
62
tests/css/test_stylesheet.py
39
13
def test_stylesheet_apply_takes_final_rule_in_specificity_clash(): css = ".a {background: red; color: lime;} .b {background: blue;}" stylesheet = _make_stylesheet(css) node = DOMNode(classes="a b", id="c") stylesheet.apply(node) assert node.styles.color == Color(0, 255, 0) # color: lime assert node.styles.backg
Add various additional tests around CSS specificity
test_stylesheet_apply_takes_final_rule_in_specificity_clash
4dd0d9fae43583638f34257f97d5749ca4f2c00c
textual
test_stylesheet.py
10
7
https://github.com/Textualize/textual.git
1
62
0
31
105
Python
{ "docstring": ".a and .b both contain background and have same specificity, so .b wins\n since it was declared last - the background should be blue.", "language": "en", "n_whitespaces": 26, "n_words": 24, "vocab_size": 21 }
def test_stylesheet_apply_takes_final_rule_in_specificity_clash(): css = ".a {background: red; color: lime;} .b {background: blue;}" stylesheet = _make_stylesheet(css) node = DOMNode(classes="a b", id="c") stylesheet.apply(node) assert node.styles.color == Color(0, 255, 0) # color: lime assert node.styles.background == Color(0, 0, 255) # background: blue
8,919
46,541
408
airflow/migrations/versions/0106_909884dea523_update_migration_for_fab_tables_to_add_missing_constraints.py
116
25
def upgrade(): conn = op.get_bind() if conn.dialect.name == 'sqlite': op.execute('PRAGMA foreign_keys=OFF') with op.batch_alter_table('ab_view_menu', schema=None) as batch_op: batch_op.create_unique_constraint(batch_op.f('ab_view_menu_name_uq'), ['name']) op.execute('PRAGMA foreign_keys=ON') elif conn.dialect.name == 'mysql': with op.batch_alter_table('ab_register_user', schema=None) as batch_op: batch_op.alter_column('username', existing_type=sa.String(256), nullable=False) batch_op.alter_column('email', existing_type=sa.String(256), nullable=False) with op.batch_alter_table('ab_user', schema=None) as batch_op: batch_op.alter_column('username', existing_type=sa.String(256), nullable=False) batch_op.alter_column('email', existing_type=sa.String(256), nullable=False) elif conn.dialect.name == 'mssql': with op.batch_alter_table('ab_register_user') as batch_op: # Drop the unique constraint on username and email constraints = get_mssql_table
Use Airflow.Base.metadata in FAB models (#22353) Since FAB models are now in airflow, it makes sense to monitor changes in them. Therefore we use Airflow.models.base.Base.metadata for FAB models
upgrade
2f5a567977e1219cab16c2548825a1b9eba07ab3
airflow
0106_909884dea523_update_migration_for_fab_tables_to_add_missing_constraints.py
16
30
https://github.com/apache/airflow.git
6
378
0
53
652
Python
{ "docstring": "Apply Update migration for FAB tables to add missing constraints", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
def upgrade(): conn = op.get_bind() if conn.dialect.name == 'sqlite': op.execute('PRAGMA foreign_keys=OFF') with op.batch_alter_table('ab_view_menu', schema=None) as batch_op: batch_op.create_unique_constraint(batch_op.f('ab_view_menu_name_uq'), ['name']) op.execute('PRAGMA foreign_keys=ON') elif conn.dialect.name == 'mysql': with op.batch_alter_table('ab_register_user', schema=None) as batch_op: batch_op.alter_column('username', existing_type=sa.String(256), nullable=False) batch_op.alter_column('email', existing_type=sa.String(256), nullable=False) with op.batch_alter_table('ab_user', schema=None) as batch_op: batch_op.alter_column('username', existing_type=sa.String(256), nullable=False) batch_op.alter_column('email', existing_type=sa.String(256), nullable=False) elif conn.dialect.name == 'mssql': with op.batch_alter_table('ab_register_user') as batch_op: # Drop the unique constraint on username and email constraints = get_mssql_table_constraints(conn, 'ab_register_user') for k, _ in constraints.get('UNIQUE').items(): batch_op.drop_constraint(k, type_='unique') batch_op.alter_column('username', existing_type=sa.String(256), nullable=False) batch_op.create_unique_constraint(None, ['username']) batch_op.alter_column('email', existing_type=sa.String(256), nullable=False) with op.batch_alter_table('ab_user') as batch_op: # Drop the unique constraint on username and email constraints = get_mssql_table_constraints(conn, 'ab_user') for k, _ in constraints.get('UNIQUE').items(): batch_op.drop_constraint(k, type_='unique') batch_op.alter_column('username', existing_type=sa.String(256), nullable=False) batch_op.create_unique_constraint(None, ['username']) batch_op.alter_column('email', existing_type=sa.String(256), nullable=False) batch_op.create_unique_constraint(None, ['email'])
25,090
114,082
1,416
mindsdb/migrations/versions/2022-02-09_27c5aca9e47e_test.py
386
70
def upgrade(): op.drop_table('ai_table') conn = op.get_bind() # views was created with unnamed fk. Therefore need recreate it op.create_table( 'view_tmp', sa.Column('id', sa.Integer(), nullable=False), sa.Column('name', sa.String(), nullable=False), sa.Column('company_id', sa.Integer(), nullable=True), sa.Column('query', sa.String(), nullable=False), sa.Column('integration_id', sa.Integer(), nullable=False), sa.ForeignKeyConstraint(['integration_id'], ['integration.id'], name='fk_integration_id'), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('name', 'company_id', name='unique_name_company_id') ) conn.execute(text()) op.drop_table('view') op.rename_table('view_tmp', 'view') op.create_table( 'analysis', sa.Column('id', sa.Integer(), nullable=False), sa.Column('analysis', mindsdb.interfaces.storage.db.Json(), nullable=False), sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.PrimaryKeyConstraint('id') ) with op.batch_alter_table('datasource', schema=None) as batch_op: batch_op.add_column(sa.Column('analysis_id', sa.Integer(), nullable=True)) batch_op.create_foreign_key('fk_analysis_id', 'analysis', ['analysis_id'], ['id']) batch_op.add_column(sa.Column('ds_class', sa.String(), nullable=True)) session = sa.orm.Session(bind=conn) dsatasources = conn.execute('select id, analysis from datasource').fetchall() for row in dsatasources: if row['analysis'] is not None: # NOTE 'returning' is relatively new in sqlite, so better wi
migration
upgrade
2a39e0ab3c81f09a227c50c98a3fb7ee57ec8fac
mindsdb
2022-02-09_27c5aca9e47e_test.py
17
130
https://github.com/mindsdb/mindsdb.git
10
1,172
0
197
1,989
Python
{ "docstring": "\n insert into view_tmp (id, name, company_id, query, integration_id)\n select id, name, company_id, query, datasource_id from view;\n \n insert into analysis (analysis) select analysis from datasource where id = :id;\n \n select id from analysis order by id desc limit 1;\n \n update datasource set analysis_id = :analysis_id where id = :id\n \n update datasource\n set integration_id = (select id from integration where name = :datasource_name and company_id = :company_id),\n ds_class = :ds_class\n where id = :id\n ", "language": "en", "n_whitespaces": 263, "n_words": 72, "vocab_size": 40 }
def upgrade(): op.drop_table('ai_table') conn = op.get_bind() # views was created with unnamed fk. Therefore need recreate it op.create_table( 'view_tmp', sa.Column('id', sa.Integer(), nullable=False), sa.Column('name', sa.String(), nullable=False), sa.Column('company_id', sa.Integer(), nullable=True), sa.Column('query', sa.String(), nullable=False), sa.Column('integration_id', sa.Integer(), nullable=False), sa.ForeignKeyConstraint(['integration_id'], ['integration.id'], name='fk_integration_id'), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('name', 'company_id', name='unique_name_company_id') ) conn.execute(text()) op.drop_table('view') op.rename_table('view_tmp', 'view') op.create_table( 'analysis', sa.Column('id', sa.Integer(), nullable=False), sa.Column('analysis', mindsdb.interfaces.storage.db.Json(), nullable=False), sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.PrimaryKeyConstraint('id') ) with op.batch_alter_table('datasource', schema=None) as batch_op: batch_op.add_column(sa.Column('analysis_id', sa.Integer(), nullable=True)) batch_op.create_foreign_key('fk_analysis_id', 'analysis', ['analysis_id'], ['id']) batch_op.add_column(sa.Column('ds_class', sa.String(), nullable=True)) session = sa.orm.Session(bind=conn) dsatasources = conn.execute('select id, analysis from datasource').fetchall() for row in dsatasources: if row['analysis'] is not None: # NOTE 'returning' is relatively new in sqlite, so better will be use select after insert. conn.execute( text(), { 'id': row['id'] } ) analysis_id = conn.execute(text()).fetchall() conn.execute( text(), { 'analysis_id': analysis_id[0][0], 'id': row['id'] } ) with op.batch_alter_table('datasource', schema=None) as batch_op: batch_op.drop_column('analysis') op.create_table( 'file', sa.Column('id', sa.Integer(), nullable=False), sa.Column('name', sa.String(), nullable=False), sa.Column('company_id', sa.Integer(), nullable=True), sa.Column('source_file_path', sa.String(), nullable=False), sa.Column('file_path', sa.String(), nullable=False), sa.Column('row_count', sa.Integer(), nullable=False), sa.Column('columns', mindsdb.interfaces.storage.db.Json(), nullable=False), # sa.Column('created_at', sa.DateTime(), nullable=True, server_default=sa.func.current_timestamp()), # ????? # sa.Column('updated_at', sa.DateTime(), nullable=True, server_default=sa.func.current_timestamp(), server_onupdate=sa.func.current_timestamp()), # ????? erver_default=func.now() # sa.Column('created_at', sa.DateTime(), nullable=True, server_default=datetime.datetime.now), # ????? # sa.Column('updated_at', sa.DateTime(), nullable=True, server_default=datetime.datetime.now, server_onupdate=datetime.datetime.now), # ????? erver_default=func.now() sa.Column('created_at', sa.DateTime(), nullable=True, server_default=sa.func.current_timestamp()), # ????? sa.Column('updated_at', sa.DateTime(), nullable=True, server_default=sa.func.current_timestamp(), server_onupdate=sa.func.current_timestamp()), # ????? erver_default=func.now() sa.Column('analysis_id', sa.Integer(), nullable=True), sa.ForeignKeyConstraint(['analysis_id'], ['analysis.id'], name='fk_analysis_id'), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('name', 'company_id', name='unique_name_company_id') ) # delete ds where data is none dsatasources = conn.execute(text('select * from datasource')).fetchall() for ds in dsatasources: if ds['data'] is None: conn.execute(text('delete from datasource where id = :id'), {'id': ds['id']}) continue ds_data = json.loads(ds['data']) creation_info = json.loads(ds['creation_info']) datasource_name = ds_data.get('source_type') if datasource_name == 'file': created_at = None if isinstance(ds['created_at'], str): created_at = datetime.datetime.fromisoformat(ds['created_at']) elif isinstance(ds['created_at'], [float, int]): created_at = datetime.fromtimestamp(ds['created_at']) updated_at = None if isinstance(ds['updated_at'], str): updated_at = datetime.datetime.fromisoformat(ds['updated_at']) elif isinstance(ds['updated_at'], [float, int]): updated_at = datetime.fromtimestamp(ds['updated_at']) file = mindsdb.interfaces.storage.db.File( name=ds['name'], company_id=ds['company_id'], source_file_path=ds_data['source'], file_path=creation_info['args'][0], row_count=ds_data['row_count'], columns=ds_data['columns'], created_at=created_at, updated_at=updated_at, analysis_id=ds['analysis_id'] ) session.add(file) conn.execute( text(), { 'datasource_name': datasource_name, 'company_id': ds['company_id'], 'ds_class': creation_info['class'], 'id': ds['id'] } ) session.commit() op.rename_table('datasource', 'dataset') with op.batch_alter_table('dataset', schema=None) as batch_op: batch_op.create_foreign_key('fk_integration_id', 'integration', ['integration_id'], ['id']) # NOTE two different 'batch' is necessary, in other way FK is not creating with op.batch_alter_table('predictor', schema=None) as batch_op: batch_op.alter_column('datasource_id', new_column_name='dataset_id') with op.batch_alter_table('predictor', schema=None) as batch_op: batch_op.create_foreign_key('fk_dataset_id', 'dataset', ['dataset_id'], ['id']) with op.batch_alter_table('predictor', schema=None) as batch_op: batch_op.create_unique_constraint('unique_name_company_id', ['name', 'company_id']) with op.batch_alter_table('integration', schema=None) as batch_op: batch_op.create_unique_constraint('unique_name_company_id', ['name', 'company_id']) with op.batch_alter_table('dataset', schema=None) as batch_op: batch_op.create_unique_constraint('unique_name_company_id', ['name', 'company_id'])
76,511
260,810
310
sklearn/cluster/_bisect_k_means.py
95
24
def _predict_recursive(self, X, sample_weight, cluster_node): if cluster_node.left is None: # This cluster has no subcluster. Labels are just the label of the cluster. return np.full(X.shape[0], cluster_node.label, dtype=np.int32) # Determine if data points belong to the left or right subc
MAINT Remove `x_squared_norms` arg from `k_means_lloyd` signature (#24264) Co-authored-by: Thomas J. Fan <[email protected]>
_predict_recursive
60f16feaadaca28f9a1cc68d2f406201860d27e8
scikit-learn
_bisect_k_means.py
11
22
https://github.com/scikit-learn/scikit-learn.git
3
171
0
67
254
Python
{ "docstring": "Predict recursively by going down the hierarchical tree.\n\n Parameters\n ----------\n X : {ndarray, csr_matrix} of shape (n_samples, n_features)\n The data points, currently assigned to `cluster_node`, to predict between\n the subclusters of this node.\n\n sample_weight : ndarray of shape (n_samples,)\n The weights for each observation in X.\n\n cluster_node : _BisectingTree node object\n The cluster node of the hierarchical tree.\n\n Returns\n -------\n labels : ndarray of shape (n_samples,)\n Index of the cluster each sample belongs to.\n ", "language": "en", "n_whitespaces": 192, "n_words": 74, "vocab_size": 51 }
def _predict_recursive(self, X, sample_weight, cluster_node): if cluster_node.left is None: # This cluster has no subcluster. Labels are just the label of the cluster. return np.full(X.shape[0], cluster_node.label, dtype=np.int32) # Determine if data points belong to the left or right subcluster centers = np.vstack((cluster_node.left.center, cluster_node.right.center)) if hasattr(self, "_X_mean"): centers += self._X_mean cluster_labels = _labels_inertia_threadpool_limit( X, sample_weight, centers, self._n_threads, return_inertia=False, ) mask = cluster_labels == 0 # Compute the labels for each subset of the data points. labels = np.full(X.shape[0], -1, dtype=np.int32) labels[mask] = self._predict_recursive( X[mask], sample_weight[mask], cluster_node.left ) labels[~mask] = self._predict_recursive( X[~mask], sample_weight[~mask], cluster_node.right ) return labels
77,188
262,331
117
TTS/tts/models/vits.py
54
27
def inference_voice_conversion(self, reference_wav, speaker_id=None, d_vector=None, reference_speaker_id=None, reference_d_vector=None): # com
Add Voice conversion inference support (#1337) * Add support for voice conversion inference * Cache d_vectors_by_speaker for fast inference using a bigger speakers.json * Rebase bug fix * Use the average d-vector for inference
inference_voice_conversion
dbe9da7f15544b83043f481a99e5bcb23e002dc9
TTS
vits.py
14
7
https://github.com/coqui-ai/TTS.git
3
128
0
42
187
Python
{ "docstring": "Inference for voice conversion\n\n Args:\n reference_wav (Tensor): Reference wavform. Tensor of shape [B, T]\n speaker_id (Tensor): speaker_id of the target speaker. Tensor of shape [B]\n d_vector (Tensor): d_vector embedding of target speaker. Tensor of shape `[B, C]`\n reference_speaker_id (Tensor): speaker_id of the reference_wav speaker. Tensor of shape [B]\n reference_d_vector (Tensor): d_vector embedding of the reference_wav speaker. Tensor of shape `[B, C]`\n ", "language": "en", "n_whitespaces": 130, "n_words": 61, "vocab_size": 25 }
def inference_voice_conversion(self, reference_wav, speaker_id=None, d_vector=None, reference_speaker_id=None, reference_d_vector=None): # compute spectrograms y = wav_to_spec(reference_wav, self.config.audio.fft_size, self.config.audio.hop_length, self.config.audio.win_length, center=False).transpose(1, 2) y_lengths = torch.tensor([y.size(-1)]).to(y.device) speaker_cond_src = reference_speaker_id if reference_speaker_id is not None else reference_d_vector speaker_cond_tgt = speaker_id if speaker_id is not None else d_vector # print(y.shape, y_lengths.shape) wav, _, _ = self.voice_conversion(y, y_lengths, speaker_cond_src, speaker_cond_tgt) return wav
9,587
48,736
39
tests/test_routers.py
11
7
def test_nonconflicting_specified_basename(self): self.router.register(r'notes', NoteViewSet, basename='notes') self.router.register(r'notes_kwduplicate', KWa
raise ImproperlyConfigured exception if `basename` is not unique (#8438) * raise ImproperlyConfigured if basename already exists * rename already_registered function; return True/False * additional basename tests * additional basename tests * Update rest_framework/routers.py Co-authored-by: David Graves <[email protected]> Co-authored-by: Asif Saif Uddin <[email protected]>
test_nonconflicting_specified_basename
48a21aa0eb3a95d32456c2a927eff9552a04231e
django-rest-framework
test_routers.py
9
4
https://github.com/encode/django-rest-framework.git
1
51
0
10
85
Python
{ "docstring": "\n Ensure 2 routers with the same model, and a distinct basename specified\n on each does not throw an exception\n ", "language": "en", "n_whitespaces": 41, "n_words": 19, "vocab_size": 19 }
def test_nonconflicting_specified_basename(self): self.router.register(r'notes', NoteViewSet, basename='notes') self.router.register(r'notes_kwduplicate', KWargedNoteViewSet, basename='notes_kwduplicate') self.router.register(r'notes_duplicate', NoteViewSet, basename='notes_duplicate')
@pytest.mark.parametrize("config_count", [(CONFIG_ATTRIBUTES, 1)])
112,947
314,340
1,649
tests/components/group/test_cover.py
389
35
async def test_state(hass, setup_comp): state = hass.states.get(COVER_GROUP) # No entity has a valid state -> group state unknown assert state.state == STATE_UNKNOWN assert state.attributes[ATTR_FRIENDLY_NAME] == DEFAULT_NAME assert state.attributes[ATTR_ENTITY_ID] == [ DEMO_COVER, DEMO_COVER_POS, DEMO_COVER_TILT, DEMO_TILT, ] assert ATTR_ASSUMED_STATE not in state.attributes assert state.attributes[ATTR_SUPPORTED_FEATURES] == 0 assert ATTR_CURRENT_POSITION not in state.attributes assert ATTR_CURRENT_TILT_POSITION not in state.attributes # The group state is unknown if all group members are unknown or unavailable. for state_1 in (STATE_UNAVAILABLE, STATE_UNKNOWN): for state_2 in (STATE_UNAVAILABLE, STATE_UNKNOWN): for state_3 in (STATE_UNAVAILABLE, STATE_UNKNOWN): hass.states.async_set(DEMO_COVER, state_1, {}) hass.states.async_set(DEMO_COVER_POS, state_2, {}) hass.states.async_set(DEMO_COVER_TILT, state_3, {}) hass.states.async_set(DEMO_TILT, STATE_UNAVAILABLE, {}) await hass.async_block_till_done() state = hass.states.get(COVER_GROUP) assert state.state == STATE_UNKNOWN for state_1 in (STATE_UNAVAILABLE, STATE_UNKNOWN): for state_2 in (STATE_UNAVAILABLE, STATE_UNKNOWN): for state_3 in (STATE_UNAVAILABLE, STATE_UNKNOWN): hass.states.async_set(DEMO_COVER, state_1, {}) hass.states.async_set(DEMO_COVER_POS, state_2, {}) hass.states.async_set(DEMO_COVER_TILT, state_3, {}) hass.states.async_set(DEMO_TILT, STATE_UNKNOWN, {}) await hass.async_block_till_done() state = hass.states.get(COVER_GROUP) assert state.state == STATE_UNKNOWN # At least one member opening -> group opening for state_1 in ( STATE_CLOSED, STATE_CLOSING, STATE_OPEN, STATE_OPENING, STATE_UNAVAILABLE, STATE_UNKNOWN, ): for state_2 in ( STATE_CLOSED, STATE_CLOSING, STATE_OPEN, STATE_OPENING, STATE_UNAVAILABLE, STATE_UNKNOWN, ): for state_3 in ( STATE_CLOSED, STATE_CLOSING, STATE_OPEN, STATE_OPENING, STATE_UNAVAILABLE, STATE_UNKNOWN, ): hass.states.async_set(DEMO_COVER, state_1, {}) hass.states.async_set(DEMO_COVER_POS, state_2, {}) hass.states.async_set(DEMO_COVER_TILT, state_3, {}) hass.states.async_set(DEMO_TILT, STATE_OPENING, {}) await hass.async_block_till_done() state = hass.states.get(COVER_GROUP) assert state.state == STATE_OPENING # At least one member closing -> group closing for state_1 in ( STATE_CLOSED, STATE_CLOSING, STATE_OPEN, STATE_UNAVAILABLE, STATE_UNKNOWN, ): for state_2 in ( STATE_CLOSED, STATE_CLOSING, STATE_OPEN, STATE_UNAVAILABLE, STATE_UNKNOWN, ): for state_3 in ( STATE_CLOSED, STATE_CLOSING, STATE_OPEN, STATE_UNAVAILABLE, STATE_UNKNOWN, ): hass.states.async_set(DEMO_COVER, state_1, {}) hass.states.async_set(DEMO_COVER_POS, state_2, {}) hass.states.async_set(DEMO_COVER_TILT, state_3, {}) hass.states.async_set(DEMO_TILT, STATE_CLOSING, {}) await hass.async_block_till_done() state = hass.states.get(COVER_GROUP) assert state.state == STATE_CLOSING # At least one member open -> group open for state_1 in (STATE_CLOSED, STATE_OPEN, STATE_UNAVAILABLE, STATE_UNKNOWN): for state_2 in (STATE_CLOSED, STATE_OPEN, STATE_UNAVAILABLE, STATE_UNKNOWN): for state_3 in (STATE_CLOSED, STATE_OPEN, STATE_UNAVAIL
Improve group tests (#73630)
test_state
9b8c3e37bbee3dbaa949705c7ae7b29f521988e7
core
test_cover.py
15
120
https://github.com/home-assistant/core.git
19
807
1
94
1,196
Python
{ "docstring": "Test handling of state.\n\n The group state is unknown if all group members are unknown or unavailable.\n Otherwise, the group state is opening if at least one group member is opening.\n Otherwise, the group state is closing if at least one group member is closing.\n Otherwise, the group state is open if at least one group member is open.\n Otherwise, the group state is closed.\n ", "language": "en", "n_whitespaces": 83, "n_words": 65, "vocab_size": 28 }
async def test_state(hass, setup_comp): state = hass.states.get(COVER_GROUP) # No entity has a valid state -> group state unknown assert state.state == STATE_UNKNOWN assert state.attributes[ATTR_FRIENDLY_NAME] == DEFAULT_NAME assert state.attributes[ATTR_ENTITY_ID] == [ DEMO_COVER, DEMO_COVER_POS, DEMO_COVER_TILT, DEMO_TILT, ] assert ATTR_ASSUMED_STATE not in state.attributes assert state.attributes[ATTR_SUPPORTED_FEATURES] == 0 assert ATTR_CURRENT_POSITION not in state.attributes assert ATTR_CURRENT_TILT_POSITION not in state.attributes # The group state is unknown if all group members are unknown or unavailable. for state_1 in (STATE_UNAVAILABLE, STATE_UNKNOWN): for state_2 in (STATE_UNAVAILABLE, STATE_UNKNOWN): for state_3 in (STATE_UNAVAILABLE, STATE_UNKNOWN): hass.states.async_set(DEMO_COVER, state_1, {}) hass.states.async_set(DEMO_COVER_POS, state_2, {}) hass.states.async_set(DEMO_COVER_TILT, state_3, {}) hass.states.async_set(DEMO_TILT, STATE_UNAVAILABLE, {}) await hass.async_block_till_done() state = hass.states.get(COVER_GROUP) assert state.state == STATE_UNKNOWN for state_1 in (STATE_UNAVAILABLE, STATE_UNKNOWN): for state_2 in (STATE_UNAVAILABLE, STATE_UNKNOWN): for state_3 in (STATE_UNAVAILABLE, STATE_UNKNOWN): hass.states.async_set(DEMO_COVER, state_1, {}) hass.states.async_set(DEMO_COVER_POS, state_2, {}) hass.states.async_set(DEMO_COVER_TILT, state_3, {}) hass.states.async_set(DEMO_TILT, STATE_UNKNOWN, {}) await hass.async_block_till_done() state = hass.states.get(COVER_GROUP) assert state.state == STATE_UNKNOWN # At least one member opening -> group opening for state_1 in ( STATE_CLOSED, STATE_CLOSING, STATE_OPEN, STATE_OPENING, STATE_UNAVAILABLE, STATE_UNKNOWN, ): for state_2 in ( STATE_CLOSED, STATE_CLOSING, STATE_OPEN, STATE_OPENING, STATE_UNAVAILABLE, STATE_UNKNOWN, ): for state_3 in ( STATE_CLOSED, STATE_CLOSING, STATE_OPEN, STATE_OPENING, STATE_UNAVAILABLE, STATE_UNKNOWN, ): hass.states.async_set(DEMO_COVER, state_1, {}) hass.states.async_set(DEMO_COVER_POS, state_2, {}) hass.states.async_set(DEMO_COVER_TILT, state_3, {}) hass.states.async_set(DEMO_TILT, STATE_OPENING, {}) await hass.async_block_till_done() state = hass.states.get(COVER_GROUP) assert state.state == STATE_OPENING # At least one member closing -> group closing for state_1 in ( STATE_CLOSED, STATE_CLOSING, STATE_OPEN, STATE_UNAVAILABLE, STATE_UNKNOWN, ): for state_2 in ( STATE_CLOSED, STATE_CLOSING, STATE_OPEN, STATE_UNAVAILABLE, STATE_UNKNOWN, ): for state_3 in ( STATE_CLOSED, STATE_CLOSING, STATE_OPEN, STATE_UNAVAILABLE, STATE_UNKNOWN, ): hass.states.async_set(DEMO_COVER, state_1, {}) hass.states.async_set(DEMO_COVER_POS, state_2, {}) hass.states.async_set(DEMO_COVER_TILT, state_3, {}) hass.states.async_set(DEMO_TILT, STATE_CLOSING, {}) await hass.async_block_till_done() state = hass.states.get(COVER_GROUP) assert state.state == STATE_CLOSING # At least one member open -> group open for state_1 in (STATE_CLOSED, STATE_OPEN, STATE_UNAVAILABLE, STATE_UNKNOWN): for state_2 in (STATE_CLOSED, STATE_OPEN, STATE_UNAVAILABLE, STATE_UNKNOWN): for state_3 in (STATE_CLOSED, STATE_OPEN, STATE_UNAVAILABLE, STATE_UNKNOWN): hass.states.async_set(DEMO_COVER, state_1, {}) hass.states.async_set(DEMO_COVER_POS, state_2, {}) hass.states.async_set(DEMO_COVER_TILT, state_3, {}) hass.states.async_set(DEMO_TILT, STATE_OPEN, {}) await hass.async_block_till_done() state = hass.states.get(COVER_GROUP) assert state.state == STATE_OPEN # At least one member closed -> group closed for state_1 in (STATE_CLOSED, STATE_UNAVAILABLE, STATE_UNKNOWN): for state_2 in (STATE_CLOSED, STATE_UNAVAILABLE, STATE_UNKNOWN): for state_3 in (STATE_CLOSED, STATE_UNAVAILABLE, STATE_UNKNOWN): hass.states.async_set(DEMO_COVER, state_1, {}) hass.states.async_set(DEMO_COVER_POS, state_2, {}) hass.states.async_set(DEMO_COVER_TILT, state_3, {}) hass.states.async_set(DEMO_TILT, STATE_CLOSED, {}) await hass.async_block_till_done() state = hass.states.get(COVER_GROUP) assert state.state == STATE_CLOSED # All group members removed from the state machine -> unknown hass.states.async_remove(DEMO_COVER) hass.states.async_remove(DEMO_COVER_POS) hass.states.async_remove(DEMO_COVER_TILT) hass.states.async_remove(DEMO_TILT) await hass.async_block_till_done() state = hass.states.get(COVER_GROUP) assert state.state == STATE_UNKNOWN @pytest.mark.parametrize("config_count", [(CONFIG_ATTRIBUTES, 1)])
23,787
109,877
128
lib/matplotlib/cbook/__init__.py
36
16
def connect(self, signal, func): if self._signals is not None: _api.check_in_list(self._signals, signal=signal) self._func_cid_map.setdefault(signal, {}) proxy = _weak_or_strong_ref(func, self._remove_proxy)
Remove miscellaneous deprecations from 3.5
connect
e199c3b819f66a56f49657de0a9b3fb60c745b94
matplotlib
__init__.py
10
15
https://github.com/matplotlib/matplotlib.git
4
137
0
25
173
Python
{ "docstring": "Register *func* to be called when signal *signal* is generated.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
def connect(self, signal, func): if self._signals is not None: _api.check_in_list(self._signals, signal=signal) self._func_cid_map.setdefault(signal, {}) proxy = _weak_or_strong_ref(func, self._remove_proxy) if proxy in self._func_cid_map[signal]: return self._func_cid_map[signal][proxy] cid = next(self._cid_gen) self._func_cid_map[signal][proxy] = cid self.callbacks.setdefault(signal, {}) self.callbacks[signal][cid] = proxy return cid
14,551
67,548
43
erpnext/setup/setup_wizard/operations/taxes_setup.py
69
22
def get_or_create_account(company_name, account): default_root_type = "Liability" root_type = account.get("root_type", default_root_type) existing_accounts = frappe.get_all( "Account", filters={"company": company_name, "root_type": root_type}, or_filters={ "account_name": account.get("account_name"),
style: format code with black
get_or_create_account
494bd9ef78313436f0424b918f200dab8fc7c20b
erpnext
taxes_setup.py
14
26
https://github.com/frappe/erpnext.git
2
168
0
51
294
Python
{ "docstring": "\n\tCheck if account already exists. If not, create it.\n\tReturn a tax account or None.\n\t", "language": "en", "n_whitespaces": 13, "n_words": 15, "vocab_size": 14 }
def get_or_create_account(company_name, account): default_root_type = "Liability" root_type = account.get("root_type", default_root_type) existing_accounts = frappe.get_all( "Account", filters={"company": company_name, "root_type": root_type}, or_filters={ "account_name": account.get("account_name"), "account_number": account.get("account_number"), }, ) if existing_accounts: return frappe.get_doc("Account", existing_accounts[0].name) tax_group = get_or_create_tax_group(company_name, root_type) account["doctype"] = "Account" account["company"] = company_name account["parent_account"] = tax_group account["report_type"] = "Balance Sheet" account["account_type"] = "Tax" account["root_type"] = root_type account["is_group"] = 0 doc = frappe.get_doc(account) doc.flags.ignore_links = True doc.flags.ignore_validate = True doc.insert(ignore_permissions=True, ignore_mandatory=True) return doc
20,926
101,515
47
lib/gui/utils.py
12
10
def set_default_options(self) -> None: default = self.cli_opts.get_option_values() logger.debug(default) self.
Bugfix: Preview for extract in batch mode
set_default_options
dc18c74eea0c7837a820d27628cb12b0824fa30e
faceswap
utils.py
9
12
https://github.com/deepfakes/faceswap.git
1
37
0
10
64
Python
{ "docstring": " Set the default options for :mod:`lib.gui.projects`\n\n The Default GUI options are stored on Faceswap startup.\n\n Exposed as the :attr:`_default_opts` for a project cannot be set until after the main\n Command Tabs have been loaded.\n ", "language": "en", "n_whitespaces": 63, "n_words": 34, "vocab_size": 30 }
def set_default_options(self) -> None: default = self.cli_opts.get_option_values() logger.debug(default) self._gui_objects.default_options = default self.project.set_default_options()
3,324
20,326
1,331
pipenv/patched/notpip/_vendor/pygments/formatters/html.py
244
42
def _format_lines(self, tokensource):
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
_format_lines
f3166e673fe8d40277b804d35d77dcdb760fc3b3
pipenv
html.py
20
63
https://github.com/pypa/pipenv.git
26
453
0
124
751
Python
{ "docstring": "\n Just format the tokens, without any wrapping tags.\n Yield individual lines.\n ", "language": "en", "n_whitespaces": 33, "n_words": 11, "vocab_size": 11 }
def _format_lines(self, tokensource): nocls = self.noclasses lsep = self.lineseparator tagsfile = self.tagsfile lspan = '' line = [] for ttype, value in tokensource: try: cspan = self.span_element_openers[ttype] except KeyError: title = ' title="%s"' % '.'.join(ttype) if self.debug_token_types else '' if nocls: css_style = self._get_css_inline_styles(ttype) if css_style: css_style = self.class2style[css_style][0] cspan = '<span style="%s"%s>' % (css_style, title) else: cspan = '' else: css_class = self._get_css_classes(ttype) if css_class: cspan = '<span class="%s"%s>' % (css_class, title) else: cspan = '' self.span_element_openers[ttype] = cspan parts = self._translate_parts(value) if tagsfile and ttype in Token.Name: filename, linenumber = self._lookup_ctag(value) if linenumber: base, filename = os.path.split(filename) if base: base += '/' filename, extension = os.path.splitext(filename) url = self.tagurlformat % {'path': base, 'fname': filename, 'fext': extension} parts[0] = "<a href=\"%s#%s-%d\">%s" % \ (url, self.lineanchors, linenumber, parts[0]) parts[-1] = parts[-1] + "</a>" # for all but the last line for part in parts[:-1]: if line: if lspan != cspan: line.extend(((lspan and '</span>'), cspan, part, (cspan and '</span>'), lsep)) else: # both are the same line.extend((part, (lspan and '</span>'), lsep)) yield 1, ''.join(line) line = [] elif part: yield 1, ''.join((cspan, part, (cspan and '</span>'), lsep)) else: yield 1, lsep # for the last line if line and parts[-1]: if lspan != cspan: line.extend(((lspan and '</span>'), cspan, parts[-1])) lspan = cspan else: line.append(parts[-1]) elif parts[-1]: line = [cspan, parts[-1]] lspan = cspan # else we neither have to open a new span nor set lspan if line: line.extend(((lspan and '</span>'), lsep)) yield 1, ''.join(line)
643
4,250
101
octavia-cli/octavia_cli/apply/resources.py
33
13
def _get_remote_resource(self) -> Optional[Union[SourceRead, DestinationRead, ConnectionRead]]: search_results = self._search().get(f"{self.resource_type}s", []) if len(search_results) > 1: raise DuplicateResourceError("Two or more ressources exist with the same name.") if len(search_results) == 1: return search_results[0] else: return None
๐Ÿ™ octavia-cli: `apply` connections (#10881)
_get_remote_resource
56bf982cb96f831fe04f5e44a92ee4a669b9e16a
airbyte
resources.py
11
16
https://github.com/airbytehq/airbyte.git
3
64
0
29
111
Python
{ "docstring": "Find the remote resource on the Airbyte instance associated with the current resource.\n\n Raises:\n DuplicateResourceError: raised if the search results return multiple resources.\n\n Returns:\n Optional[Union[SourceRead, DestinationRead, ConnectionRead]]: The remote resource found.\n ", "language": "en", "n_whitespaces": 74, "n_words": 31, "vocab_size": 26 }
def _get_remote_resource(self) -> Optional[Union[SourceRead, DestinationRead, ConnectionRead]]: search_results = self._search().get(f"{self.resource_type}s", []) if len(search_results) > 1: raise DuplicateResourceError("Two or more ressources exist with the same name.") if len(search_results) == 1: return search_results[0] else: return None
80,817
271,592
228
keras/engine/training.py
51
16
def _get_compile_args(self, user_metrics=True): self._assert_compile_was_called() # pylint: disable=protected-a
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
_get_compile_args
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
training.py
12
17
https://github.com/keras-team/keras.git
4
95
0
34
158
Python
{ "docstring": "Used for saving or cloning a Model.\n\n Args:\n user_metrics: Whether to return user-supplied metrics or `Metric` objects.\n Defaults to returning the user-supplied metrics.\n\n Returns:\n Dictionary of arguments that were used when compiling the model.\n ", "language": "en", "n_whitespaces": 84, "n_words": 34, "vocab_size": 30 }
def _get_compile_args(self, user_metrics=True): self._assert_compile_was_called() # pylint: disable=protected-access saved_metrics = self.compiled_metrics._user_metrics saved_weighted_metrics = self.compiled_metrics._user_weighted_metrics if not user_metrics: if saved_metrics is not None: saved_metrics = self.compiled_metrics._metrics if saved_weighted_metrics is not None: saved_weighted_metrics = self.compiled_metrics._weighted_metrics compile_args = { "optimizer": self.optimizer, "loss": self.compiled_loss._user_losses, "metrics": saved_metrics, "weighted_metrics": saved_weighted_metrics, "loss_weights": self.compiled_loss._user_loss_weights, } # pylint: enable=protected-access return compile_args
38,542
160,170
93
numpy/f2py/tests/test_f2py2e.py
34
17
def test_norestexdoc(capfd, hello_world_f90, monkeypatch): ipath = Path(hello_world_f90) mname = "blah" monkeypatch.setattr(sys, "argv",
TST: Initialize f2py2e tests of the F2PY CLI (#20668) Increases F2PY coverage by around 15 percent. For the CLI itself it covers the major features (around 70 percent), with the exception of mostly numpy.distutils stuff. More importantly, sets the groundwork for #20056, in that passing the same testsuite should indicate feature parity.
test_norestexdoc
729ad4f92420231e2a7009b3223c6c7620b8b808
numpy
test_f2py2e.py
11
9
https://github.com/numpy/numpy.git
1
61
0
32
115
Python
{ "docstring": "Ensures that TeX documentation is written out\n\n CLI :: --no-rest-doc\n ", "language": "en", "n_whitespaces": 16, "n_words": 10, "vocab_size": 10 }
def test_norestexdoc(capfd, hello_world_f90, monkeypatch): ipath = Path(hello_world_f90) mname = "blah" monkeypatch.setattr(sys, "argv", f'f2py -m {mname} {ipath} --no-rest-doc'.split()) with util.switchdir(ipath.parent): f2pycli() out, _ = capfd.readouterr() assert "ReST Documentation is saved to file" not in out
81,383
275,342
341
keras/optimizers/optimizer_v1.py
82
18
def set_weights(self, weights): params = self.weights if len(params) != len(weights): raise ValueError( "Length of the specified weight list (" + str(len(weights)) + ") does not match the number of weights " "of the optimizer (" + str(len(params)) + ")" ) weight_value_tuples = [] param_values = backend.batch_get_value(params) for pv, p, w in zip(param_values, params, weights): if pv.shape != w.shape: raise ValueError( "Optimizer weight shape " +
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
set_weights
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
optimizer_v1.py
17
21
https://github.com/keras-team/keras.git
4
125
0
56
212
Python
{ "docstring": "Sets the weights of the optimizer, from Numpy arrays.\n\n Should only be called after computing the gradients\n (otherwise the optimizer has no weights).\n\n Args:\n weights: a list of Numpy arrays. The number of arrays and their shape\n must match number of the dimensions of the weights of the optimizer\n (i.e. it should match the output of `get_weights`).\n\n Raises:\n ValueError: in case of incompatible weight shapes.\n ", "language": "en", "n_whitespaces": 148, "n_words": 65, "vocab_size": 45 }
def set_weights(self, weights): params = self.weights if len(params) != len(weights): raise ValueError( "Length of the specified weight list (" + str(len(weights)) + ") does not match the number of weights " "of the optimizer (" + str(len(params)) + ")" ) weight_value_tuples = [] param_values = backend.batch_get_value(params) for pv, p, w in zip(param_values, params, weights): if pv.shape != w.shape: raise ValueError( "Optimizer weight shape " + str(pv.shape) + " not compatible with " "provided weight shape " + str(w.shape) ) weight_value_tuples.append((p, w)) backend.batch_set_value(weight_value_tuples)
80,860
271,840
26
keras/engine/training_utils_v1.py
14
8
def extract_tensors_from_dataset(dataset): iterator = get_iterator(dataset) inputs, targets, sample_weight = unpack_iterator_input(iterator) return inputs, targets, sample_weight
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
extract_tensors_from_dataset
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
training_utils_v1.py
8
4
https://github.com/keras-team/keras.git
1
28
0
10
46
Python
{ "docstring": "Extract a tuple of tensors `inputs, targets, sample_weight` from a dataset.\n\n Args:\n dataset: Dataset instance.\n\n Returns:\n Tuple of tensors `x, y, weights`. `y` and `weights` entry may be None.\n ", "language": "en", "n_whitespaces": 48, "n_words": 29, "vocab_size": 26 }
def extract_tensors_from_dataset(dataset): iterator = get_iterator(dataset) inputs, targets, sample_weight = unpack_iterator_input(iterator) return inputs, targets, sample_weight
50,895
204,808
56
django/db/backends/base/base.py
21
4
def _set_autocommit(self, autocommit): raise NotImplementedError( "subclasses of BaseDatabaseWrapper
Refs #33476 -- Reformatted code with Black.
_set_autocommit
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
base.py
8
4
https://github.com/django/django.git
1
13
0
20
26
Python
{ "docstring": "\n Backend-specific implementation to enable or disable autocommit.\n ", "language": "en", "n_whitespaces": 22, "n_words": 7, "vocab_size": 7 }
def _set_autocommit(self, autocommit): raise NotImplementedError( "subclasses of BaseDatabaseWrapper may require a _set_autocommit() method" ) # ##### Generic transaction management methods #####
15,886
72,413
209
wagtail/admin/views/generic/multiple_upload.py
29
17
def get_edit_upload_form_context_data(self): edit_form_class = self.get_edit_form_class() return { self.context_upload_name: self.upload_object, "edit_action": reverse( self.edit_upload_url
Reformat with black
get_edit_upload_form_context_data
d10f15e55806c6944827d801cd9c2d53f5da4186
wagtail
multiple_upload.py
14
16
https://github.com/wagtail/wagtail.git
1
100
0
25
155
Python
{ "docstring": "\n Return the context data necessary for rendering the HTML form for supplying the\n metadata to turn an upload object into a final object\n ", "language": "en", "n_whitespaces": 45, "n_words": 23, "vocab_size": 19 }
def get_edit_upload_form_context_data(self): edit_form_class = self.get_edit_form_class() return { self.context_upload_name: self.upload_object, "edit_action": reverse( self.edit_upload_url_name, args=(self.upload_object.id,) ), "delete_action": reverse( self.delete_upload_url_name, args=(self.upload_object.id,) ), "form": edit_form_class( instance=self.object, prefix="%s-%d" % (self.edit_upload_form_prefix, self.upload_object.id), user=self.request.user, ), }
52,128
207,841
310
tests/admin_views/tests.py
79
24
def test_overriding_has_module_permission(self): articles = Article._meta.verbose_name_plural.title() sections = Section._meta.verbose_name_plural.title() index_url = reverse("admin7:index") self.client.force_login(self.superuser) response = self.client.get(index_url) self.assertContains(response, sections) self.assertNotContains(response, articles
Refs #33476 -- Reformatted code with Black.
test_overriding_has_module_permission
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
tests.py
13
31
https://github.com/django/django.git
1
280
0
39
459
Python
{ "docstring": "\n If has_module_permission() always returns False, the module shouldn't\n be displayed on the admin index page for any users.\n ", "language": "en", "n_whitespaces": 40, "n_words": 18, "vocab_size": 17 }
def test_overriding_has_module_permission(self): articles = Article._meta.verbose_name_plural.title() sections = Section._meta.verbose_name_plural.title() index_url = reverse("admin7:index") self.client.force_login(self.superuser) response = self.client.get(index_url) self.assertContains(response, sections) self.assertNotContains(response, articles) self.client.logout() self.client.force_login(self.viewuser) response = self.client.get(index_url) self.assertNotContains(response, "admin_views") self.assertNotContains(response, articles) self.client.logout() self.client.force_login(self.adduser) response = self.client.get(index_url) self.assertNotContains(response, "admin_views") self.assertNotContains(response, articles) self.client.logout() self.client.force_login(self.changeuser) response = self.client.get(index_url) self.assertNotContains(response, "admin_views") self.assertNotContains(response, articles) self.client.logout() self.client.force_login(self.deleteuser) response = self.client.get(index_url) self.assertNotContains(response, articles) # The app list displays Sections but not Articles as the latter has # ModelAdmin.has_module_permission() = False. self.client.force_login(self.superuser) response = self.client.get(reverse("admin7:app_list", args=("admin_views",))) self.assertContains(response, sections) self.assertNotContains(response, articles)
51,299
205,960
51
django/forms/forms.py
8
7
def non_field_errors(self): return self.errors.get( NON_FIELD_ERRORS, self.error_class(error_class
Refs #33476 -- Reformatted code with Black.
non_field_errors
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
forms.py
11
5
https://github.com/django/django.git
1
31
0
8
51
Python
{ "docstring": "\n Return an ErrorList of errors that aren't associated with a particular\n field -- i.e., from Form.clean(). Return an empty ErrorList if there\n are none.\n ", "language": "en", "n_whitespaces": 53, "n_words": 24, "vocab_size": 21 }
def non_field_errors(self): return self.errors.get( NON_FIELD_ERRORS, self.error_class(error_class="nonfield", renderer=self.renderer), )
@add_start_docstrings( "The bare VAN model outputting raw features without any specific head on top. Note, VAN does not have an embedding layer.", VAN_START_DOCSTRING, )
6,577
36,152
59
src/transformers/models/van/modeling_van.py
40
10
def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, V
Visual Attention Network (VAN) (#16027) * encoder works * addded files * norm in stage * convertion script * tests * fix copies * make fix-copies * fixed __init__ * make fix-copies * fix * shapiro test needed * make fix-copie * minor changes * make style + quality * minor refactor conversion script * rebase + tests * removed unused variables * updated doc * toctree * CI * doc * Apply suggestions from code review Co-authored-by: Sylvain Gugger <[email protected]> * resolved conversations * make fixup * config passed to modules * config passed to modules * Apply suggestions from code review Co-authored-by: NielsRogge <[email protected]> * conversations * conversations * copyrights * normal test * tests Co-authored-by: Sylvain Gugger <[email protected]> Co-authored-by: NielsRogge <[email protected]>
_set_gradient_checkpointing
0a057201a96565df29984d716f660fd8d634329a
transformers
modeling_van.py
9
3
https://github.com/huggingface/transformers.git
2
24
1
36
64
Python
{ "docstring": "\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`VanConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoFeatureExtractor`]. See\n [`AutoFeatureExtractor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all stages. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n", "language": "en", "n_whitespaces": 248, "n_words": 128, "vocab_size": 88 }
def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, VanModel): module.gradient_checkpointing = value VAN_START_DOCSTRING = r VAN_INPUTS_DOCSTRING = r @add_start_docstrings( "The bare VAN model outputting raw features without any specific head on top. Note, VAN does not have an embedding layer.", VAN_START_DOCSTRING, )
12,544
61,396
35
.venv/lib/python3.8/site-packages/pip/_internal/vcs/versioncontrol.py
14
6
def update(self, dest, url, rev_options): # type: (str, HiddenText, RevOptions) -> None raise NotImplementedError
upd; format
update
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
transferlearning
versioncontrol.py
6
2
https://github.com/jindongwang/transferlearning.git
1
14
0
14
23
Python
{ "docstring": "\n Update an already-existing repo to the given ``rev_options``.\n\n Args:\n rev_options: a RevOptions object.\n ", "language": "en", "n_whitespaces": 44, "n_words": 13, "vocab_size": 13 }
def update(self, dest, url, rev_options): # type: (str, HiddenText, RevOptions) -> None raise NotImplementedError
42,482
177,721
92
label_studio/webhooks/utils.py
34
10
def get_nested_field(value, field): if field == '__self__': return value fields =
fix: DEV-1725: Add ANNOTATIONS_CREATED webhook action to predictions to annotations action (#2052) * fix: DEV-1725: Add ANNOTATIONS_CREATED webhook action to predictions to annotations action * Update predictions_to_annotations.py Co-authored-by: Max Tkachenko <[email protected]>
get_nested_field
b2aa62dc675036f7695c0b09dd509617ba9df90d
label-studio
utils.py
13
10
https://github.com/heartexlabs/label-studio.git
5
62
0
24
101
Python
{ "docstring": "\n Get nested field from list of objects or single instance\n :param value: Single instance or list to look up field\n :param field: Field to lookup\n :return: List or single instance of looked up field\n ", "language": "en", "n_whitespaces": 50, "n_words": 34, "vocab_size": 22 }
def get_nested_field(value, field): if field == '__self__': return value fields = field.split('__') for fld in fields: if isinstance(value, list): value = [getattr(v, fld) for v in value] else: value = getattr(value, fld) return value
44,362
183,841
45
tests/css/test_stylesheet.py
27
12
def test_stylesheet_many_classes_dont_overrule_id(): css = "#id {color: red;} .a.b.c.d {color: blue;}" stylesheet = _make_stylesheet(css) node = DOMNode(classes="a b c d", id="id") style
Add various additional tests around CSS specificity
test_stylesheet_many_classes_dont_overrule_id
4dd0d9fae43583638f34257f97d5749ca4f2c00c
textual
test_stylesheet.py
10
6
https://github.com/Textualize/textual.git
1
47
0
24
82
Python
{ "docstring": "#id is further to the left in the specificity tuple than class, and\n a selector containing multiple classes cannot take priority over even a\n single class.", "language": "en", "n_whitespaces": 31, "n_words": 26, "vocab_size": 24 }
def test_stylesheet_many_classes_dont_overrule_id(): css = "#id {color: red;} .a.b.c.d {color: blue;}" stylesheet = _make_stylesheet(css) node = DOMNode(classes="a b c d", id="id") stylesheet.apply(node) assert node.styles.color == Color(255, 0, 0)
3,272
20,220
21
pipenv/patched/notpip/_vendor/platformdirs/macos.py
7
4
def site_data_dir(self) -> str: return self._append_
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
site_data_dir
f3166e673fe8d40277b804d35d77dcdb760fc3b3
pipenv
macos.py
8
3
https://github.com/pypa/pipenv.git
1
15
0
7
29
Python
{ "docstring": ":return: data directory shared by users, e.g. ``/Library/Application Support/$appname/$version``", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
def site_data_dir(self) -> str: return self._append_app_name_and_version("/Library/Application Support")
43,550
181,764
113
tests/tpot_tests.py
37
16
def test_warm_start(): tpot_obj = TPOTClassifier( random_state=42, population_size=1, offspring_size=2, generations=1, verbosity=0, config_dict='TPOT light', warm_start=True) tpot_obj.fit(pretest_X, pretest_y) assert tpot_obj._pop is not None assert tpot_obj._pareto_front is not None first_pop = tpot_obj._pop tpot_obj.random_state = 21 tpot_obj.fit(pretest_X, pret
Revert "Deployed 7ccda9a with MkDocs version: 1.3.0" This reverts commit bd9629c40e01241766197119b581a99409b07068.
test_warm_start
388616b6247ca4ea8de4e2f340d6206aee523541
tpot
tpot_tests.py
10
16
https://github.com/EpistasisLab/tpot.git
1
83
0
25
126
Python
{ "docstring": "Assert that the TPOT warm_start flag stores the pop and pareto_front from the first run.", "language": "en", "n_whitespaces": 14, "n_words": 15, "vocab_size": 13 }
def test_warm_start(): tpot_obj = TPOTClassifier( random_state=42, population_size=1, offspring_size=2, generations=1, verbosity=0, config_dict='TPOT light', warm_start=True) tpot_obj.fit(pretest_X, pretest_y) assert tpot_obj._pop is not None assert tpot_obj._pareto_front is not None first_pop = tpot_obj._pop tpot_obj.random_state = 21 tpot_obj.fit(pretest_X, pretest_y) assert tpot_obj._pop == first_pop
37,381
158,212
85
d2l/mxnet.py
26
18
def load_data_wiki(batch_size, max_len): num_workers = d2l.get_dataloader_workers() data_dir = d2l.download_extract('wikitext-2', 'wikitext-2') paragraphs = _read_wiki(data_dir) train_set = _WikiTextDataset(paragraphs, max_len) train_iter = gluon.data.DataLoader(train_set, batch_size, s
[PaddlePaddle] Merge master into Paddle branch (#1186) * change 15.2 title in chinese version (#1109) change title โ€™15.2. ๆƒ…ๆ„Ÿๅˆ†ๆž๏ผšไฝฟ็”จ้€’ๅฝ’็ฅž็ป็ฝ‘็ปœโ€˜ to โ€™15.2. ๆƒ…ๆ„Ÿๅˆ†ๆž๏ผšไฝฟ็”จๅพช็Žฏ็ฅž็ป็ฝ‘็ปœโ€˜ * ไฟฎๆ”น้ƒจๅˆ†่ฏญไน‰่กจ่ฟฐ (#1105) * Update r0.17.5 (#1120) * Bump versions in installation * 94่กŒtypo: ๏ผˆโ€œbert.mallโ€๏ผ‰->๏ผˆโ€œbert.smallโ€๏ผ‰ (#1129) * line 313: "bert.mall" -> "bert.small" (#1130) * fix: update language as native reader (#1114) * Fix the translation of "stride" (#1115) * Update index.md (#1118) ไฟฎๆ”น้ƒจๅˆ†่ฏญไน‰่กจ่ฟฐ * Update self-attention-and-positional-encoding.md (#1133) ไพ็…งๆœฌไนฆ็š„็ฟป่ฏ‘ไน ๆƒฏ๏ผŒๅฐ†pooling็ฟป่ฏ‘ๆˆๆฑ‡่š * maybe a comment false (#1149) * maybe a little false * maybe a little false * A minor bug in the rcnn section (Chinese edition) (#1148) * Update bert.md (#1137) ไธ€ไธช็ฌ”่ฏฏ # ๅ‡่ฎพbatch_size=2๏ผŒnum_pred_positions=3 # ้‚ฃไนˆbatch_idxๅบ”่ฏฅๆ˜ฏnp.repeat( [0,1], 3 ) = [0,0,0,1,1,1] * Update calculus.md (#1135) * fix typo in git documentation (#1106) * fix: Update the Chinese translation in lr-scheduler.md (#1136) * Update lr-scheduler.md * Update chapter_optimization/lr-scheduler.md Co-authored-by: goldmermaid <[email protected]> Co-authored-by: goldmermaid <[email protected]> * fix translation for kaggle-house-price.md (#1107) * fix translation for kaggle-house-price.md * fix translation for kaggle-house-price.md Signed-off-by: sunhaizhou <[email protected]> * Update weight-decay.md (#1150) * Update weight-decay.md ๅ…ณไบŽโ€œkๅคš้€‰dโ€่ฟ™ไธ€้ƒจๅˆ†๏ผŒไธญๆ–‡่ฏป่€…ไฝฟ็”จๆŽ’ๅˆ—็ป„ๅˆ็š„ๆ–นๅผๅฏ่ƒฝๆ›ดๅฎนๆ˜“็†่งฃ ๅ…ณไบŽโ€œ็ป™ๅฎškไธชๅ˜้‡๏ผŒ้˜ถๆ•ฐ็š„ไธชๆ•ฐไธบ...โ€่ฟ™ๅฅ่ฏๆ˜ฏๆœ‰ๆญงไน‰็š„๏ผŒไธๆ˜ฏๅพˆๅƒไธญๅ›ฝ่ฏ๏ผŒๅบ”่ฏฅๆ˜ฏ่ฏดโ€œ้˜ถๆ•ฐไธบd็š„้กน็š„ไธชๆ•ฐไธบ...โ€ใ€‚ ๅนถๅขžๅŠ ไบ†ไธ€ๅฅๅฏนโ€œๅ› ๆญคๅณไฝฟๆ˜ฏ้˜ถๆ•ฐไธŠ็š„ๅพฎๅฐๅ˜ๅŒ–๏ผŒๆฏ”ๅฆ‚ไปŽ$2$ๅˆฐ$3$๏ผŒไนŸไผšๆ˜พ่‘—ๅขžๅŠ ๆˆ‘ไปฌๆจกๅž‹็š„ๅคๆ‚ๆ€งใ€‚โ€็š„่งฃ้‡Š ่งฃ้‡Šไธบไฝ•ไผšๅขžๅŠ ๅคๆ‚ๆ€งไปฅๅŠไธบไฝ•้œ€่ฆ็ป†็ฒ’ๅบฆๅทฅๅ…ทใ€‚ * Update chapter_multilayer-perceptrons/weight-decay.md yep Co-authored-by: goldmermaid <[email protected]> * Update chapter_multilayer-perceptrons/weight-decay.md yep Co-authored-by: goldmermaid <[email protected]> Co-authored-by: goldmermaid <[email protected]> * Fix a spelling error (#1161) * Update gru.md (#1152) The key distinction between vanilla RNNs and GRUs is that the latter support gating of the hidden state. ็ฟป่ฏ‘้”™่ฏฏ * Unify the function naming (#1113) Unify naming of the function 'init_xavier()'. * Update mlp-concise.md (#1166) * Update mlp-concise.md ่ฏญๅฅไธ้€š้กบ * Update environment.md ่ฏญๅบๅผ‚ๅธธ * Update config.ini * fix the imprecise description (#1168) Co-authored-by: yuande <yuande> * fix typo in chapter_natural-language-processing-pretraining/glove.md (#1175) * Fix some typos. (#1163) * Update batch-norm.md (#1170) fixing typos u->x in article * Update linear-regression.md (#1090) We invoke Stuart Russell and Peter Norvig who, in their classic AI text book Artificial Intelligence: A Modern Approach :cite:Russell.Norvig.2016, pointed out that ๅŽŸ่ฏ‘ๆ–‡ๆŠŠwhoไนŸ็›ดๆŽฅ็ฟป่ฏ‘ๅ‡บๆฅไบ†ใ€‚ * Update mlp.md (#1117) * Update mlp.md ไฟฎๆ”น้ƒจๅˆ†่ฏญไน‰่กจ่ฟฐ * Update chapter_multilayer-perceptrons/mlp.md Co-authored-by: goldmermaid <[email protected]> * Update chapter_multilayer-perceptrons/mlp.md Co-authored-by: Aston Zhang <[email protected]> Co-authored-by: goldmermaid <[email protected]> * Correct a translation error. (#1091) * Correct a translation error. * Update chapter_computer-vision/image-augmentation.md Co-authored-by: Aston Zhang <[email protected]> * Update aws.md (#1121) * Update aws.md * Update chapter_appendix-tools-for-deep-learning/aws.md Co-authored-by: Aston Zhang <[email protected]> * Update image-augmentation.md (#1093) * Update anchor.md (#1088) fix a minor issue in code * Update anchor.md * Update image-augmentation.md * fix typo and improve translation in chapter_linear-networks\softmax-regression.md (#1087) * Avoid `torch.meshgrid` user warning (#1174) Avoids the following user warning: ```python ~/anaconda3/envs/torch/lib/python3.10/site-packages/torch/functional.py:568: UserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at ../aten/src/ATen/native/TensorShape.cpp:2228.) return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined] ``` * bump to 2.0.0-beta1 * Update sequence.md * bump beta1 on readme * Add latex code block background to config * BLD: Bump python support version 3.9 (#1183) * BLD: Bump python support version 3.9 * Remove clear and manually downgrade protobuf 4.21.4 to 3.19.4 * BLD: Bump torch and tensorflow * Update Jenkinsfile * Update chapter_installation/index.md * Update chapter_installation/index.md Co-authored-by: Aston Zhang <[email protected]> * Update config.ini * Update INFO.md * Update INFO.md * Drop mint to show code in pdf, use Inconsolata font, apply code cell color (#1187) * resolve the conflicts * revise from publisher (#1089) * revise from publisher * d2l api * post_latex * revise from publisher * revise ch11 * Delete d2l-Copy1.bib * clear cache * rm d2lbook clear * debug anchor * keep original d2l doc Co-authored-by: Ubuntu <[email protected]> Co-authored-by: Aston Zhang <[email protected]> Co-authored-by: Aston Zhang <[email protected]> * ้‡ๅค่ฏญๅฅ (#1188) Co-authored-by: Aston Zhang <[email protected]> * Improve expression for chapter_preliminaries/pandas.md (#1184) * Update pandas.md * Improve expression * Improve expression * Update chapter_preliminaries/pandas.md Co-authored-by: Aston Zhang <[email protected]> * Improce expression for chapter_preliminaries/linear-algebra.md (#1185) * Improce expression * Improve code comments * Update chapter_preliminaries/linear-algebra.md * Update chapter_preliminaries/linear-algebra.md * Update chapter_preliminaries/linear-algebra.md * Update chapter_preliminaries/linear-algebra.md Co-authored-by: Aston Zhang <[email protected]> * Fix multibox_detection bugs * Update d2l to 0.17.5 version * restore older version * Upgrade pandas * change to python3.8 * Test warning log * relocate warning log * test logs filtering * Update gru.md * Add DeprecationWarning filter * Test warning log * Update attention mechanisms & computational performance * Update multilayer perceptron& linear & convolution networks & computer vision * Update recurrent&optimition&nlp pretraining & nlp applications * ignore warnings * Update index.md * Update linear networks * Update multilayer perceptrons&deep learning computation * Update preliminaries * Check and Add warning filter * Update kaggle-cifar10.md * Update object-detection-dataset.md * Update ssd.md fcn.md * Update hybridize.md * Update hybridize.md Signed-off-by: sunhaizhou <[email protected]> Co-authored-by: zhou201505013 <[email protected]> Co-authored-by: Xinwei Liu <[email protected]> Co-authored-by: Anirudh Dagar <[email protected]> Co-authored-by: Aston Zhang <[email protected]> Co-authored-by: hugo_han <[email protected]> Co-authored-by: gyroๆฐธไธๆŠฝ้ฃŽ <[email protected]> Co-authored-by: CanChengZheng <[email protected]> Co-authored-by: linlin <[email protected]> Co-authored-by: iuk <[email protected]> Co-authored-by: yoos <[email protected]> Co-authored-by: Mr. Justice Lawrence John Wargrave <[email protected]> Co-authored-by: Chiyuan Fu <[email protected]> Co-authored-by: Sunhuashan <[email protected]> Co-authored-by: Haiker Sun <[email protected]> Co-authored-by: Ming Liu <[email protected]> Co-authored-by: goldmermaid <[email protected]> Co-authored-by: silenceZheng66 <[email protected]> Co-authored-by: Wenchao Yan <[email protected]> Co-authored-by: Kiki2049 <[email protected]> Co-authored-by: Krahets <[email protected]> Co-authored-by: friedmainfunction <[email protected]> Co-authored-by: Jameson <[email protected]> Co-authored-by: P. Yao <[email protected]> Co-authored-by: Yulv-git <[email protected]> Co-authored-by: Liu,Xiao <[email protected]> Co-authored-by: YIN, Gang <[email protected]> Co-authored-by: Joe-HZ <[email protected]> Co-authored-by: lybloveyou <[email protected]> Co-authored-by: VigourJiang <[email protected]> Co-authored-by: zxhd863943427 <[email protected]> Co-authored-by: LYF <[email protected]> Co-authored-by: Aston Zhang <[email protected]> Co-authored-by: xiaotinghe <[email protected]> Co-authored-by: Ubuntu <[email protected]> Co-authored-by: Holly-Max <[email protected]> Co-authored-by: HinGwenWoong <[email protected]> Co-authored-by: Shuai Zhang <[email protected]>
load_data_wiki
b64b41d8c1ac23c43f7a4e3f9f6339d6f0012ab2
d2l-zh
mxnet.py
9
8
https://github.com/d2l-ai/d2l-zh.git
1
65
0
22
105
Python
{ "docstring": "Load the WikiText-2 dataset.\n\n Defined in :numref:`subsec_prepare_mlm_data`", "language": "en", "n_whitespaces": 9, "n_words": 7, "vocab_size": 7 }
def load_data_wiki(batch_size, max_len): num_workers = d2l.get_dataloader_workers() data_dir = d2l.download_extract('wikitext-2', 'wikitext-2') paragraphs = _read_wiki(data_dir) train_set = _WikiTextDataset(paragraphs, max_len) train_iter = gluon.data.DataLoader(train_set, batch_size, shuffle=True, num_workers=num_workers) return train_iter, train_set.vocab
7,483
42,087
60
seaborn/_core/plot.py
21
8
def save(self, loc, **kwargs) -> Plot: # TODO expose important keyword arguments in our signature? with theme_context(self._theme_with_defaults()): self._plot().save(loc, **kwargs) return self
Add rudimentary themeing support (#2929) * WIP Plot.theme * Add default values for theme to match set_theme() * Depend on matplotib style defaults and update rcParams more selectively * Fix lines test * Improve test coverage
save
762db897b52d16ab2f164d5103df4cc26c1d0503
seaborn
plot.py
11
16
https://github.com/mwaskom/seaborn.git
1
38
0
20
66
Python
{ "docstring": "\n Compile the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n loc : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n kwargs\n Other keyword arguments are passed through to\n :meth:`matplotlib.figure.Figure.savefig`.\n\n ", "language": "en", "n_whitespaces": 119, "n_words": 43, "vocab_size": 32 }
def save(self, loc, **kwargs) -> Plot: # TODO expose important keyword arguments in our signature? with theme_context(self._theme_with_defaults()): self._plot().save(loc, **kwargs) return self
17,908
85,028
97
zerver/tests/test_signup.py
32
10
def test_create_realm_no_creation_key(self) -> None: email = "[email protected]" with self.settings(OPEN_REALM_CREATION=False): # Create new realm with the email, but no creation key.
realm_creation: Rework error pages. The previous error page was inadequate for serving the two different scenarios where we show errors in realm_creations, in particular containing a misleading sentence about realm creation being disabled (even in the case where it was actually enabled and the user simply had an expired link).
test_create_realm_no_creation_key
582d5b0aa31ac79a5ee1af95b2e71c4bfc53d5aa
zulip
test_signup.py
13
10
https://github.com/zulip/zulip.git
1
53
0
29
96
Python
{ "docstring": "\n Trying to create a realm without a creation_key should fail when\n OPEN_REALM_CREATION is false.\n ", "language": "en", "n_whitespaces": 36, "n_words": 14, "vocab_size": 13 }
def test_create_realm_no_creation_key(self) -> None: email = "[email protected]" with self.settings(OPEN_REALM_CREATION=False): # Create new realm with the email, but no creation key. result = self.client_post("/new/", {"email": email}) self.assertEqual(result.status_code, 200) self.assert_in_response("Organization creation link required", result)
20,038
100,574
61
lib/gpu_stats/nvidia.py
17
11
def _get_device_names(self) -> List[str]: names = [pynvml.nvmlDeviceGetName(handle).d
Refactor lib.gpu_stats (#1218) * inital gpu_stats refactor * Add dummy CPU Backend * Update Sphinx documentation
_get_device_names
bdbbad4d310fb606b6f412aa81e9f57ccd994e97
faceswap
nvidia.py
11
12
https://github.com/deepfakes/faceswap.git
2
43
0
16
77
Python
{ "docstring": " Obtain the list of names of connected Nvidia GPUs as identified in :attr:`_handles`.\n\n Returns\n -------\n list\n The list of connected Nvidia GPU names\n ", "language": "en", "n_whitespaces": 63, "n_words": 23, "vocab_size": 16 }
def _get_device_names(self) -> List[str]: names = [pynvml.nvmlDeviceGetName(handle).decode("utf-8") for handle in self._handles] self._log("debug", f"GPU Devices: {names}") return names
73,013
249,592
162
tests/push/test_push_rule_evaluator.py
71
17
def test_delayed_message(self) -> None: user1 = UserID.from_string(self.user_id1) # Send a message before user2 joins event_id1 = self.create_and_send_event(self.room_id, user1) # Have user2 join the room self.helper.join(self.room_id, self.user_id2, tok=self.tok2) # They start off with no notifications self.assertEqual(self.get_notif_count(self.user_id2), 0) # Send another message tha
Speed up calculating push actions in large rooms (#13973) We move the expensive check of visibility to after calculating push actions, avoiding the expensive check for users who won't get pushed anyway. I think this should have a big impact on rooms with large numbers of local users that have pushed disabled.
test_delayed_message
285b9e9b6c3558718e7d4f513062e277948ac35d
synapse
test_push_rule_evaluator.py
10
10
https://github.com/matrix-org/synapse.git
1
96
0
52
154
Python
{ "docstring": "Test that a delayed message that was from before a user joined\n doesn't cause a notification for the joined user.\n ", "language": "en", "n_whitespaces": 34, "n_words": 20, "vocab_size": 16 }
def test_delayed_message(self) -> None: user1 = UserID.from_string(self.user_id1) # Send a message before user2 joins event_id1 = self.create_and_send_event(self.room_id, user1) # Have user2 join the room self.helper.join(self.room_id, self.user_id2, tok=self.tok2) # They start off with no notifications self.assertEqual(self.get_notif_count(self.user_id2), 0) # Send another message that references the event before the join to # simulate a "delayed" event self.create_and_send_event(self.room_id, user1, prev_event_ids=[event_id1]) # user2 should not be notified about it, because they can't see it. self.assertEqual(self.get_notif_count(self.user_id2), 0)
@pytest.mark.parametrize("solver", SOLVERS) @pytest.mark.parametrize("fit_intercept", [True, False])
76,233
260,409
777
sklearn/linear_model/_glm/tests/test_glm.py
314
49
def test_glm_regression_unpenalized_hstacked_X(solver, fit_intercept, glm_dataset): model, X, y, coef, _, _, _ = glm_dataset n_samples, n_features = X.shape alpha = 0 # unpenalized params = dict( alpha=alpha, fit_intercept=fit_intercept, # solver=solver, # only lbfgs available tol=1e-12, max_iter=1000, ) model = clone(model).set_params(**params) if fit_intercept: intercept = coef[-1] coef = coef[:-1] if n_samples > n_features: X = X[:, :-1] # remove intercept X = 0.5 * np.concatenate((X, X), axis=1) else: # To know the minimum norm solution, we keep one intercept column and do # not divide by 2. Later on, we must take special care. X = np.c_[X[:, :-1], X[:, :-1], X[:, -1]] else: intercept = 0 X = 0.5 * np.concatenate((X, X), axis=1) assert np.linalg.matrix_rank(X) <= min(n_samples, n_features) with warnings.catch_warnings(): if fit_intercept and n_samples <= n_features: # XXX: Investigate if the lack of convergence in this case should be # considered a bug or not. warnings.filterwarnings("ignore", category=ConvergenceWarning) model.fit(X, y) if fit_intercept and n_samples <= n_features: # Here we take special care. model_intercept = 2 * model.intercept_ model_coef = 2 * model.coef_[:-1] # exclude the ot
TST tight tests for GLMs (#23619) Co-authored-by: Olivier Grisel <[email protected]>
test_glm_regression_unpenalized_hstacked_X
9d863aba2b6dab9c9cbbcf2f7c3b7a99b6ad168f
scikit-learn
test_glm.py
16
48
https://github.com/scikit-learn/scikit-learn.git
9
414
1
170
664
Python
{ "docstring": "Test that unpenalized GLM converges for all solvers to correct solution.\n\n We work with a simple constructed data set with known solution.\n GLM fit on [X] is the same as fit on [X, X]/2.\n For long X, [X, X] is a singular matrix and we check against the minimum norm\n solution:\n min ||w||_2 subject to w = argmin deviance(X, y, w)\n ", "language": "en", "n_whitespaces": 83, "n_words": 61, "vocab_size": 51 }
def test_glm_regression_unpenalized_hstacked_X(solver, fit_intercept, glm_dataset): model, X, y, coef, _, _, _ = glm_dataset n_samples, n_features = X.shape alpha = 0 # unpenalized params = dict( alpha=alpha, fit_intercept=fit_intercept, # solver=solver, # only lbfgs available tol=1e-12, max_iter=1000, ) model = clone(model).set_params(**params) if fit_intercept: intercept = coef[-1] coef = coef[:-1] if n_samples > n_features: X = X[:, :-1] # remove intercept X = 0.5 * np.concatenate((X, X), axis=1) else: # To know the minimum norm solution, we keep one intercept column and do # not divide by 2. Later on, we must take special care. X = np.c_[X[:, :-1], X[:, :-1], X[:, -1]] else: intercept = 0 X = 0.5 * np.concatenate((X, X), axis=1) assert np.linalg.matrix_rank(X) <= min(n_samples, n_features) with warnings.catch_warnings(): if fit_intercept and n_samples <= n_features: # XXX: Investigate if the lack of convergence in this case should be # considered a bug or not. warnings.filterwarnings("ignore", category=ConvergenceWarning) model.fit(X, y) if fit_intercept and n_samples <= n_features: # Here we take special care. model_intercept = 2 * model.intercept_ model_coef = 2 * model.coef_[:-1] # exclude the other intercept term. # For minimum norm solution, we would have # assert model.intercept_ == pytest.approx(model.coef_[-1]) else: model_intercept = model.intercept_ model_coef = model.coef_ rtol = 6e-5 if n_samples > n_features: assert model_intercept == pytest.approx(intercept) assert_allclose(model_coef, np.r_[coef, coef], rtol=1e-4) else: # As it is an underdetermined problem, prediction = y. The following shows that # we get a solution, i.e. a (non-unique) minimum of the objective function ... assert_allclose(model.predict(X), y, rtol=1e-6) if fit_intercept: # Same as in test_glm_regression_unpenalized. # But it is not the minimum norm solution. Otherwise the norms would be # equal. norm_solution = np.linalg.norm( 0.5 * np.r_[intercept, intercept, coef, coef] ) norm_model = np.linalg.norm(np.r_[model.intercept_, model.coef_]) assert norm_model > (1 + 1e-12) * norm_solution # For minimum norm solution, we would have # assert model.intercept_ == pytest.approx(model.coef_[-1]) else: assert model_intercept == pytest.approx(intercept) assert_allclose(model_coef, np.r_[coef, coef], rtol=rtol) @pytest.mark.parametrize("solver", SOLVERS) @pytest.mark.parametrize("fit_intercept", [True, False])
73,022
249,612
43
tests/storage/test_event_push_actions.py
15
11
def test_count_aggregation_threads(self) -> None:
Track notification counts per thread (implement MSC3773). (#13776) When retrieving counts of notifications segment the results based on the thread ID, but choose whether to return them as individual threads or as a single summed field by letting the client opt-in via a sync flag. The summarization code is also updated to be per thread, instead of per room.
test_count_aggregation_threads
b4ec4f5e71a87d5bdc840a4220dfd9a34c54c847
synapse
test_event_push_actions.py
8
69
https://github.com/matrix-org/synapse.git
1
434
0
14
49
Python
{ "docstring": "\n This is essentially the same test as test_count_aggregation, but adds\n events to the main timeline and to a thread.\n ", "language": "en", "n_whitespaces": 41, "n_words": 19, "vocab_size": 17 }
def test_count_aggregation_threads(self) -> None: user_id, token, _, other_token, room_id = self._create_users_and_room() thread_id: str last_event_id: str
72,180
248,249
76
tests/config/test_cache.py
20
14
def test_global_instantiated_before_config_load(self): cache = LruCache(100) add_resizable_cache("foo", cache_resize_callback=cache.set_cache_factor) self.as
Reload cache factors from disk on SIGHUP (#12673)
test_global_instantiated_before_config_load
d38d242411b8910dfacde1e61fd3a0ec5cbcaa66
synapse
test_cache.py
11
8
https://github.com/matrix-org/synapse.git
1
76
0
18
130
Python
{ "docstring": "\n If a cache is instantiated before the config is read, it will be given\n the default cache size in the interim, and then resized to the new\n default cache size once the config is loaded.\n ", "language": "en", "n_whitespaces": 64, "n_words": 35, "vocab_size": 24 }
def test_global_instantiated_before_config_load(self): cache = LruCache(100) add_resizable_cache("foo", cache_resize_callback=cache.set_cache_factor) self.assertEqual(cache.max_size, 50) config = {"caches": {"global_factor": 4}} self.config.read_config(config, config_dir_path="", data_dir_path="") self.config.resize_all_caches() self.assertEqual(cache.max_size, 400)
80,086
269,448
15
keras/backend.py
10
8
def in_top_k(predictions, targets, k): return tf.compat.v1.math.in_top_k(p
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
in_top_k
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
backend.py
10
2
https://github.com/keras-team/keras.git
1
27
0
9
42
Python
{ "docstring": "Returns whether the `targets` are in the top `k` `predictions`.\n\n Args:\n predictions: A tensor of shape `(batch_size, classes)` and type `float32`.\n targets: A 1D tensor of length `batch_size` and type `int32` or `int64`.\n k: An `int`, number of top elements to consider.\n\n Returns:\n A 1D tensor of length `batch_size` and type `bool`.\n `output[i]` is `True` if `predictions[i, targets[i]]` is within top-`k`\n values of `predictions[i]`.\n ", "language": "en", "n_whitespaces": 115, "n_words": 64, "vocab_size": 46 }
def in_top_k(predictions, targets, k): return tf.compat.v1.math.in_top_k(predictions, targets, k) # CONVOLUTIONS
564
3,805
53
airbyte-integrations/connectors/source-facebook-marketing/unit_tests/test_async_job.py
14
9
def test_update_job(self, parent_job, grouped_jobs, api, batch): parent_job.update_job() # assert for job in grouped_jobs: j
๐ŸŽ‰ ๐ŸŽ‰ Source FB Marketing: performance and reliability fixes (#9805) * Facebook Marketing performance improvement * add comments and little refactoring * fix integration tests with the new config * improve job status handling, limit concurrency to 10 * fix campaign jobs, refactor manager * big refactoring of async jobs, support random order of slices * update source _read_incremental to hook new state logic * fix issues with timeout * remove debugging and clean up, improve retry logic * merge changes from #8234 * fix call super _read_increment * generalize batch execution, add use_batch flag * improve coverage, do some refactoring of spec * update test, remove overrides of source * add split by AdSet * add smaller insights * fix end_date < start_date case * add account_id to PK * add notes * fix new streams * fix reversed incremental stream * update spec.json for SAT * upgrade CDK and bump version Co-authored-by: Dmytro Rezchykov <[email protected]> Co-authored-by: Eugene Kulak <[email protected]>
test_update_job
a3aae8017a0a40ff2006e2567f71dccb04c997a5
airbyte
test_async_job.py
10
4
https://github.com/airbytehq/airbyte.git
2
34
0
14
54
Python
{ "docstring": "Checks jobs status in advance and restart if some failed.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
def test_update_job(self, parent_job, grouped_jobs, api, batch): parent_job.update_job() # assert for job in grouped_jobs: job.update_job.assert_called_once_with(batch=batch)
54,446
216,169
30
salt/modules/cp.py
14
6
def list_master_symlinks(saltenv=None, prefix=""): if not saltenv: salt
fixes saltstack/salt#61562 cp functions derive saltenv from config
list_master_symlinks
2bd6323ef5f87d871891a59917ee96f44ef55e75
salt
cp.py
11
4
https://github.com/saltstack/salt.git
3
35
0
14
63
Python
{ "docstring": "\n .. versionchanged:: 3005\n ``saltenv`` will use value from config if not explicitly set\n\n List all of the symlinks stored on the master\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' cp.list_master_symlinks\n ", "language": "en", "n_whitespaces": 60, "n_words": 30, "vocab_size": 28 }
def list_master_symlinks(saltenv=None, prefix=""): if not saltenv: saltenv = __opts__["saltenv"] or "base" return _client().symlink_list(saltenv, prefix)
83,854
281,557
60
gamestonk_terminal/stocks/options/screener_controller.py
25
12
def print_help(self): has_screen_tickers_start = "" if self.screen_tickers else "[unvl]" has_screen_tickers_end = "" if self.screen_tic
Terminal Wide Rich (#1161) * My idea for how we handle Rich moving forward * remove independent consoles * FIxed pylint issues * add a few vars * Switched print to console * More transitions * Changed more prints * Replaced all prints * Fixing tabulate * Finished replace tabulate * Finished removing rich from Tabulate * add Panel around menu * add GST watermark under feature flag * Fixed 46 tests * Delete test_screener[False].yaml * Delete test_screener[True].yaml * Fixed the rest of the tests * add help and source color vars and use rgb * rich on stocks/options * update rich on disc, dps, sia * rich in gov, ins and scr menus * ba and ca menus with rich * Fixed import issue * Fixed some tests * removed termcolor * Removed prettytable * add rich to remaining stocks menus * FIxed linting issue * Added James' changes * Updated dependencies * Add rich to cryptocurrency menu * refactor economy and forex * refactor etf with rich * refactor mfunds * refactor rich rest * not specify style so default color works well on any background * Fixing mypy issues * Updated tests * More test fixes * James' test fixes * Updating tests : stocks/screener - fix cassettes using BR * Updating tests : crypto * Updating tests : disable DEBUG_MODE * Updating tests : stocks/fa/yfinance * minor fixes that escape * Improve the rich table function (that replaces tabulate :D ) * Fixed bad code * delete rogue file + dcf fix + NoConsole * sia mypy * fuck you linter * fuck you linter pt 2 * skip hehe * i hate the black linter * ubuntu mypy attempt * Update : rich_config + gtff * Updating tests : conftest * Updating tests : stocks * Update : rich_config * Updating : rich_config * make panel configurable for Theodore :b * colors update * Merged * Updating : rich_config + feature_flags * Updating : rich_config * Updating tests : stocks * Updating : feature_flags Co-authored-by: DidierRLopes <[email protected]> Co-authored-by: Chavithra PARANA <[email protected]> Co-authored-by: james <[email protected]> Co-authored-by: jose-donato <[email protected]>
print_help
82747072c511beb1b2672846ae2ee4aec53eb562
OpenBBTerminal
screener_controller.py
11
16
https://github.com/OpenBB-finance/OpenBBTerminal.git
3
40
0
18
106
Python
{ "docstring": "Print help[cmds]\n view view available presets (or one in particular)\n set set one of the available presets\n[/cmds]\n[param]PRESET: [/param]{self.preset}[cmds]\n\n scr screen data from this preset[/cmds]\n{has_screen_tickers_start}\n[param]Last screened tickers: [/param]{', '.join(self.screen_tickers)}[menu]\n> ca take these to comparison analysis menu\n> po take these to portoflio optimization menu{has_screen_tickers_end}\n ", "language": "en", "n_whitespaces": 116, "n_words": 48, "vocab_size": 39 }
def print_help(self): has_screen_tickers_start = "" if self.screen_tickers else "[unvl]" has_screen_tickers_end = "" if self.screen_tickers else "[/unvl]" help_text = f console.print(text=help_text, menu="Stocks - Options - Screener")
49,586
200,292
1,749
sympy/testing/runtests.py
358
43
def _find(self, tests, obj, name, module, source_lines, globs, seen): if self._verbose: print('Finding tests in %s' % name) # If we've already processed this object, then ignore it. if id(obj) in seen: return seen[id(obj)] = 1 # Make sure we don't run doctests for classes outside of sympy, such # as in numpy or scipy. if inspect.isclass(obj): if obj.__module__.split('.')[0] != 'sympy': return
runtests.py: Undo auto-formatting, re-add changes to blacklist for scipy, numpy
_find
6d2bbf80752549276a968fd4af78231c569d55c5
sympy
runtests.py
18
65
https://github.com/sympy/sympy.git
32
512
0
161
803
Python
{ "docstring": "\n Find tests for the given object and any contained objects, and\n add them to ``tests``.\n ", "language": "en", "n_whitespaces": 37, "n_words": 15, "vocab_size": 14 }
def _find(self, tests, obj, name, module, source_lines, globs, seen): if self._verbose: print('Finding tests in %s' % name) # If we've already processed this object, then ignore it. if id(obj) in seen: return seen[id(obj)] = 1 # Make sure we don't run doctests for classes outside of sympy, such # as in numpy or scipy. if inspect.isclass(obj): if obj.__module__.split('.')[0] != 'sympy': return # Find a test for this object, and add it to the list of tests. test = self._get_test(obj, name, module, globs, source_lines) if test is not None: tests.append(test) if not self._recurse: return # Look for tests in a module's contained objects. if inspect.ismodule(obj): for rawname, val in obj.__dict__.items(): # Recurse to functions & classes. if inspect.isfunction(val) or inspect.isclass(val): # Make sure we don't run doctests functions or classes # from different modules if val.__module__ != module.__name__: continue assert self._from_module(module, val), \ "%s is not in module %s (rawname %s)" % (val, module, rawname) try: valname = '%s.%s' % (name, rawname) self._find(tests, val, valname, module, source_lines, globs, seen) except KeyboardInterrupt: raise # Look for tests in a module's __test__ dictionary. for valname, val in getattr(obj, '__test__', {}).items(): if not isinstance(valname, str): raise ValueError("SymPyDocTestFinder.find: __test__ keys " "must be strings: %r" % (type(valname),)) if not (inspect.isfunction(val) or inspect.isclass(val) or inspect.ismethod(val) or inspect.ismodule(val) or isinstance(val, str)): raise ValueError("SymPyDocTestFinder.find: __test__ values " "must be strings, functions, methods, " "classes, or modules: %r" % (type(val),)) valname = '%s.__test__.%s' % (name, valname) self._find(tests, val, valname, module, source_lines, globs, seen) # Look for tests in a class's contained objects. if inspect.isclass(obj): for valname, val in obj.__dict__.items(): # Special handling for staticmethod/classmethod. if isinstance(val, staticmethod): val = getattr(obj, valname) if isinstance(val, classmethod): val = getattr(obj, valname).__func__ # Recurse to methods, properties, and nested classes. if ((inspect.isfunction(unwrap(val)) or inspect.isclass(val) or isinstance(val, property)) and self._from_module(module, val)): # Make sure we don't run doctests functions or classes # from different modules if isinstance(val, property): if hasattr(val.fget, '__module__'): if val.fget.__module__ != module.__name__: continue else: if val.__module__ != module.__name__: continue assert self._from_module(module, val), \ "%s is not in module %s (valname %s)" % ( val, module, valname) valname = '%s.%s' % (name, valname) self._find(tests, val, valname, module, source_lines, globs, seen)