id
int64
20
338k
vocab_size
int64
2
671
ast_levels
int64
4
32
nloc
int64
1
451
n_ast_nodes
int64
12
5.6k
n_identifiers
int64
1
186
n_ast_errors
int64
0
10
n_words
int64
2
2.17k
n_whitespaces
int64
2
13.8k
fun_name
stringlengths
2
73
commit_message
stringlengths
51
15.3k
url
stringlengths
31
59
code
stringlengths
51
31k
ast_errors
stringlengths
0
1.46k
token_counts
int64
6
3.32k
file_name
stringlengths
5
56
language
stringclasses
1 value
path
stringlengths
7
134
commit_id
stringlengths
40
40
repo
stringlengths
3
28
complexity
int64
1
153
276,774
94
15
99
961
49
0
188
1,125
test_get_file_and_validate_it
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def test_get_file_and_validate_it(self): dest_dir = self.get_temp_dir() orig_dir = self.get_temp_dir() text_file_path = os.path.join(orig_dir, "test.txt") zip_file_path = os.path.join(orig_dir, "test.zip") tar_file_path = os.path.join(orig_dir, "test.tar.gz") with open(text_file_path, "w") as text_file: text_file.write("Float like a butterfly, sting like a bee.") with tarfile.open(tar_file_path, "w:gz") as tar_file: tar_file.add(text_file_path) with zipfile.ZipFile(zip_file_path, "w") as zip_file: zip_file.write(text_file_path) origin = urllib.parse.urljoin( "file://", urllib.request.pathname2url(os.path.abspath(tar_file_path)), ) path = keras.utils.data_utils.get_file( "test.txt", origin, untar=True, cache_subdir=dest_dir ) filepath = path + ".tar.gz" hashval_sha256 = keras.utils.data_utils._hash_file(filepath) hashval_md5 = keras.utils.data_utils._hash_file( filepath, algorithm="md5" ) path = keras.utils.data_utils.get_file( "test.txt", origin, md5_hash=hashval_md5, untar=True, cache_subdir=dest_dir, ) path = keras.utils.data_utils.get_file( filepath, origin, file_hash=hashval_sha256, extract=True, cache_subdir=dest_dir, ) self.assertTrue(os.path.exists(filepath)) self.assertTrue( keras.utils.data_utils.validate_file(filepath, hashval_sha256) ) self.assertTrue( keras.utils.data_utils.validate_file(filepath, hashval_md5) ) os.remove(filepath) origin = urllib.parse.urljoin( "file://", urllib.request.pathname2url(os.path.abspath(zip_file_path)), ) hashval_sha256 = keras.utils.data_utils._hash_file(zip_file_path) hashval_md5 = keras.utils.data_utils._hash_file( zip_file_path, algorithm="md5" ) path = keras.utils.data_utils.get_file( "test", origin, md5_hash=hashval_md5, extract=True, cache_subdir=dest_dir, ) path = keras.utils.data_utils.get_file( "test", origin, file_hash=hashval_sha256, extract=True, cache_subdir=dest_dir, ) self.assertTrue(os.path.exists(path)) self.assertTrue( keras.utils.data_utils.validate_file(path, hashval_sha256) ) self.assertTrue(keras.utils.data_utils.validate_file(path, hashval_md5)) os.remove(path) for file_path, extract in [ (text_file_path, False), (tar_file_path, True), (zip_file_path, True), ]: origin = urllib.parse.urljoin( "file://", urllib.request.pathname2url(os.path.abspath(file_path)), ) hashval_sha256 = keras.utils.data_utils._hash_file(file_path) path = keras.utils.data_utils.get_file( origin=origin, file_hash=hashval_sha256, extract=extract, cache_subdir=dest_dir, ) self.assertTrue(os.path.exists(path)) self.assertTrue( keras.utils.data_utils.validate_file(path, hashval_sha256) ) os.remove(path) with self.assertRaisesRegexp( ValueError, 'Please specify the "origin".*' ): _ = keras.utils.data_utils.get_file()
608
data_utils_test.py
Python
keras/utils/data_utils_test.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
2
211,402
171
17
62
961
58
0
292
1,144
get_pred
add flag skipping postprocess to support edgeboard hardware (#6719) * add flag skipping postprocess to support edgeboard hardware * add flag skipping postprocess to support edgeboard hardware * add flag skipping postprocess to support edgeboard hardware * add comment for the flag export_eb
https://github.com/PaddlePaddle/PaddleDetection.git
def get_pred(self, bboxes, bbox_num, im_shape, scale_factor): if self.export_eb: # enable rcnn models for edgeboard hw to skip the following postprocess. return bboxes, bboxes, bbox_num if not self.export_onnx: bboxes_list = [] bbox_num_list = [] id_start = 0 fake_bboxes = paddle.to_tensor( np.array( [[0., 0.0, 0.0, 0.0, 1.0, 1.0]], dtype='float32')) fake_bbox_num = paddle.to_tensor(np.array([1], dtype='int32')) # add fake bbox when output is empty for each batch for i in range(bbox_num.shape[0]): if bbox_num[i] == 0: bboxes_i = fake_bboxes bbox_num_i = fake_bbox_num else: bboxes_i = bboxes[id_start:id_start + bbox_num[i], :] bbox_num_i = bbox_num[i] id_start += bbox_num[i] bboxes_list.append(bboxes_i) bbox_num_list.append(bbox_num_i) bboxes = paddle.concat(bboxes_list) bbox_num = paddle.concat(bbox_num_list) origin_shape = paddle.floor(im_shape / scale_factor + 0.5) if not self.export_onnx: origin_shape_list = [] scale_factor_list = [] # scale_factor: scale_y, scale_x for i in range(bbox_num.shape[0]): expand_shape = paddle.expand(origin_shape[i:i + 1, :], [bbox_num[i], 2]) scale_y, scale_x = scale_factor[i][0], scale_factor[i][1] scale = paddle.concat([scale_x, scale_y, scale_x, scale_y]) expand_scale = paddle.expand(scale, [bbox_num[i], 4]) origin_shape_list.append(expand_shape) scale_factor_list.append(expand_scale) self.origin_shape_list = paddle.concat(origin_shape_list) scale_factor_list = paddle.concat(scale_factor_list) else: # simplify the computation for bs=1 when exporting onnx scale_y, scale_x = scale_factor[0][0], scale_factor[0][1] scale = paddle.concat( [scale_x, scale_y, scale_x, scale_y]).unsqueeze(0) self.origin_shape_list = paddle.expand(origin_shape, [bbox_num[0], 2]) scale_factor_list = paddle.expand(scale, [bbox_num[0], 4]) # bboxes: [N, 6], label, score, bbox pred_label = bboxes[:, 0:1] pred_score = bboxes[:, 1:2] pred_bbox = bboxes[:, 2:] # rescale bbox to original image scaled_bbox = pred_bbox / scale_factor_list origin_h = self.origin_shape_list[:, 0] origin_w = self.origin_shape_list[:, 1] zeros = paddle.zeros_like(origin_h) # clip bbox to [0, original_size] x1 = paddle.maximum(paddle.minimum(scaled_bbox[:, 0], origin_w), zeros) y1 = paddle.maximum(paddle.minimum(scaled_bbox[:, 1], origin_h), zeros) x2 = paddle.maximum(paddle.minimum(scaled_bbox[:, 2], origin_w), zeros) y2 = paddle.maximum(paddle.minimum(scaled_bbox[:, 3], origin_h), zeros) pred_bbox = paddle.stack([x1, y1, x2, y2], axis=-1) # filter empty bbox keep_mask = nonempty_bbox(pred_bbox, return_mask=True) keep_mask = paddle.unsqueeze(keep_mask, [1]) pred_label = paddle.where(keep_mask, pred_label, paddle.ones_like(pred_label) * -1) pred_result = paddle.concat([pred_label, pred_score, pred_bbox], axis=1) return bboxes, pred_result, bbox_num
651
post_process.py
Python
ppdet/modeling/post_process.py
b41194eaed10a01409451e4d3ea7f8b4812cdd23
PaddleDetection
7
130,291
15
8
5
61
9
0
19
58
is_file
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
https://github.com/ray-project/ray.git
def is_file(self, follow_links=None): if follow_links is None: follow_links = True node_stat = self._stat if follow_links else self._lstat return stat.S_ISREG(node_stat.st_mode)
38
util.py
Python
python/ray/_private/thirdparty/pathspec/util.py
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
3
178,342
170
18
99
725
65
0
273
1,045
executePostProcessing
Windows: Fix, cmd files created for accelerated mode didn't forward arguments
https://github.com/Nuitka/Nuitka.git
def executePostProcessing(): result_filename = OutputDirectories.getResultFullpath(onefile=False) if not os.path.exists(result_filename): postprocessing_logger.sysexit( "Error, scons failed to create the expected file %r. " % result_filename ) if isWin32Windows(): if not Options.shallMakeModule(): if python_version < 0x300: # Copy the Windows manifest from the CPython binary to the created # executable, so it finds "MSCRT.DLL". This is needed for Python2 # only, for Python3 newer MSVC doesn't hide the C runtime. manifest = getWindowsExecutableManifest(sys.executable) else: manifest = None executePostProcessingResources(manifest=manifest, onefile=False) source_dir = OutputDirectories.getSourceDirectoryPath() # Attach the binary blob as a Windows resource. addResourceToFile( target_filename=result_filename, data=getFileContents(getConstantBlobFilename(source_dir), "rb"), resource_kind=RT_RCDATA, res_name=3, lang_id=0, logger=postprocessing_logger, ) # On macOS, we update the executable path for searching the "libpython" # library. if ( isMacOS() and not Options.shallMakeModule() and not Options.shallUseStaticLibPython() ): python_abi_version = python_version_str + getPythonABI() python_dll_filename = "libpython" + python_abi_version + ".dylib" python_lib_path = os.path.join(sys.prefix, "lib") # Note: For CPython and potentially others, the rpath for the Python # library needs to be set. callInstallNameTool( filename=result_filename, mapping=( ( python_dll_filename, os.path.join(python_lib_path, python_dll_filename), ), ( "@rpath/Python3.framework/Versions/%s/Python3" % python_version_str, os.path.join(python_lib_path, python_dll_filename), ), ), rpath=python_lib_path, ) if isMacOS() and Options.shallCreateAppBundle(): createPlistInfoFile(logger=postprocessing_logger, onefile=False) # Modules should not be executable, but Scons creates them like it, fix # it up here. if not isWin32Windows() and Options.shallMakeModule(): removeFileExecutablePermission(result_filename) if isWin32Windows() and Options.shallMakeModule(): candidate = os.path.join( os.path.dirname(result_filename), "lib" + os.path.basename(result_filename)[:-4] + ".a", ) if os.path.exists(candidate): os.unlink(candidate) if isWin32Windows() and Options.shallTreatUninstalledPython(): dll_directory = getExternalUsePath(os.path.dirname(getTargetPythonDLLPath())) cmd_filename = OutputDirectories.getResultRunFilename(onefile=False) cmd_contents = % { "dll_directory": dll_directory, "exe_filename": os.path.basename(result_filename), } putTextFileContents(cmd_filename, cmd_contents) # Create a ".pyi" file for created modules if Options.shallMakeModule() and Options.shallCreatePyiFile(): pyi_filename = OutputDirectories.getResultBasepath() + ".pyi" putTextFileContents( filename=pyi_filename, contents= % { "imports": "\n".join( "import %s" % module_name for module_name in getImportedNames() ) }, )
431
PostProcessing.py
Python
nuitka/PostProcessing.py
ccc9b2f5a305c76d05eaaf29f94c74e7b4ece7ff
Nuitka
20
305,503
139
26
105
798
55
0
213
1,944
test_sync_in_area
Remove area_id from entity_registry.async_get_or_create (#77700) * Remove area_id from entity_registry.async_get_or_create * Adjust tests * Fix lying comment in test
https://github.com/home-assistant/core.git
async def test_sync_in_area(area_on_device, hass, registries): area = registries.area.async_create("Living Room") device = registries.device.async_get_or_create( config_entry_id="1234", manufacturer="Someone", model="Some model", sw_version="Some Version", connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")}, ) registries.device.async_update_device( device.id, area_id=area.id if area_on_device else None ) entity = registries.entity.async_get_or_create( "light", "test", "1235", suggested_object_id="demo_light", device_id=device.id, ) entity = registries.entity.async_update_entity( entity.entity_id, area_id=area.id if not area_on_device else None ) light = DemoLight( None, "Demo Light", state=False, hs_color=(180, 75), effect_list=LIGHT_EFFECT_LIST, effect=LIGHT_EFFECT_LIST[0], ) light.hass = hass light.entity_id = entity.entity_id await light.async_update_ha_state() config = MockConfig(should_expose=lambda _: True, entity_config={}) events = async_capture_events(hass, EVENT_SYNC_RECEIVED) result = await sh.async_handle_message( hass, config, "test-agent", {"requestId": REQ_ID, "inputs": [{"intent": "action.devices.SYNC"}]}, const.SOURCE_CLOUD, ) assert result == { "requestId": REQ_ID, "payload": { "agentUserId": "test-agent", "devices": [ { "id": "light.demo_light", "name": {"name": "Demo Light"}, "traits": [ trait.TRAIT_BRIGHTNESS, trait.TRAIT_ONOFF, trait.TRAIT_COLOR_SETTING, trait.TRAIT_MODES, ], "type": const.TYPE_LIGHT, "willReportState": False, "attributes": { "availableModes": [ { "name": "effect", "name_values": [ {"lang": "en", "name_synonym": ["effect"]} ], "ordered": False, "settings": [ { "setting_name": "rainbow", "setting_values": [ { "lang": "en", "setting_synonym": ["rainbow"], } ], }, { "setting_name": "none", "setting_values": [ {"lang": "en", "setting_synonym": ["none"]} ], }, ], } ], "colorModel": "hsv", "colorTemperatureRange": { "temperatureMinK": 2000, "temperatureMaxK": 6535, }, }, "deviceInfo": { "manufacturer": "Someone", "model": "Some model", "swVersion": "Some Version", }, "roomHint": "Living Room", } ], }, } await hass.async_block_till_done() assert len(events) == 1 assert events[0].event_type == EVENT_SYNC_RECEIVED assert events[0].data == {"request_id": REQ_ID, "source": "cloud"}
466
test_smart_home.py
Python
tests/components/google_assistant/test_smart_home.py
1bc8770b51658f0dc1bd076b392d70be5a7433bc
core
3
266,492
8
11
4
42
6
0
8
33
check_layout
ansible-test - Improve help for unsupported cwd. (#76866) * ansible-test - Improve help for unsupported cwd. * The `--help` option is now available when an unsupported cwd is in use. * The `--help` output now shows the same instructions about cwd as would be shown in error messages if the cwd is unsupported. * Add `--version` support to show the ansible-core version. * The explanation about cwd usage has been improved to explain more clearly what is required. Resolves https://github.com/ansible/ansible/issues/64523 Resolves https://github.com/ansible/ansible/issues/67551
https://github.com/ansible/ansible.git
def check_layout(self) -> None: if self.content.unsupported: raise ApplicationError(self.explain_working_directory())
24
data.py
Python
test/lib/ansible_test/_internal/data.py
de5f60e374524de13fe079b52282cd7a9eeabd5f
ansible
2
21,982
115
20
42
457
48
0
159
1,081
_yield_distributions
Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.
https://github.com/pypa/pipenv.git
def _yield_distributions(self): # We need to check if we've seen some resources already, because on # some Linux systems (e.g. some Debian/Ubuntu variants) there are # symlinks which alias other files in the environment. seen = set() for path in self.path: finder = resources.finder_for_path(path) if finder is None: continue r = finder.find('') if not r or not r.is_container: continue rset = sorted(r.resources) for entry in rset: r = finder.find(entry) if not r or r.path in seen: continue try: if self._include_dist and entry.endswith(DISTINFO_EXT): possible_filenames = [METADATA_FILENAME, WHEEL_METADATA_FILENAME, LEGACY_METADATA_FILENAME] for metadata_filename in possible_filenames: metadata_path = posixpath.join(entry, metadata_filename) pydist = finder.find(metadata_path) if pydist: break else: continue with contextlib.closing(pydist.as_stream()) as stream: metadata = Metadata(fileobj=stream, scheme='legacy') logger.debug('Found %s', r.path) seen.add(r.path) yield new_dist_class(r.path, metadata=metadata, env=self) elif self._include_egg and entry.endswith(('.egg-info', '.egg')): logger.debug('Found %s', r.path) seen.add(r.path) yield old_dist_class(r.path, self) except Exception as e: msg = 'Unable to read distribution at %s, perhaps due to bad metadata: %s' logger.warning(msg, r.path, e) import warnings warnings.warn(msg % (r.path, e), stacklevel=2)
277
database.py
Python
pipenv/patched/pip/_vendor/distlib/database.py
cd5a9683be69c86c8f3adcd13385a9bc5db198ec
pipenv
15
20,199
12
11
9
55
8
0
15
54
user_log_dir
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
https://github.com/pypa/pipenv.git
def user_log_dir(self) -> str: path = self.user_cache_dir if self.opinion: path = os.path.join(path, "log") return path
32
android.py
Python
pipenv/patched/notpip/_vendor/platformdirs/android.py
f3166e673fe8d40277b804d35d77dcdb760fc3b3
pipenv
2
176,457
25
10
5
54
7
0
35
72
dict_to_numpy_array
Deprecate dict to numpy helpers (#5427) * Convert to private fns and add deprecated public wrappers. * Modify existing tests to use private fns. * Add test that dep warnings are raised. * Add dep note. * Add release note. * Add importorskip(numpy).
https://github.com/networkx/networkx.git
def dict_to_numpy_array(d, mapping=None): try: return _dict_to_numpy_array2(d, mapping) except (AttributeError, TypeError): # AttributeError is when no mapping was provided and v.keys() fails. # TypeError is when a mapping was provided and d[k1][k2] fails. return _dict_to_numpy_array1(d, mapping)
33
misc.py
Python
networkx/utils/misc.py
72b1dca7d7d4d8bab519d55541d981d2f4f61365
networkx
2
279,606
45
11
10
167
18
1
52
105
save_img
Add f-string format and lint with flynt on the whole codebase
https://github.com/keras-team/keras.git
def save_img(path, x, data_format=None, file_format=None, scale=True, **kwargs): if data_format is None: data_format = backend.image_data_format() img = array_to_img(x, data_format=data_format, scale=scale) if img.mode == "RGBA" and (file_format == "jpg" or file_format == "jpeg"): warnings.warn( "The JPG format does not support RGBA images, converting to RGB." ) img = img.convert("RGB") img.save(path, format=file_format, **kwargs) @keras_export("keras.utils.load_img", "keras.preprocessing.image.load_img")
@keras_export("keras.utils.load_img", "keras.preprocessing.image.load_img")
93
image_utils.py
Python
keras/utils/image_utils.py
be73ac1a1e25d9abd4d793cba9707098d7adf231
keras
5
83,030
26
11
9
63
6
0
29
64
test_subscriptions_exist_invalid_name
string_validation: Give a more specific message for empty stream names. Co-authored-by: Shlok Patel <[email protected]>
https://github.com/zulip/zulip.git
def test_subscriptions_exist_invalid_name(self) -> None: # currently, the only invalid stream name is the empty string invalid_stream_name = "" result = self.client_post("/json/subscriptions/exists", {"stream": invalid_stream_name}) self.assert_json_error(result, "Stream name can't be empty!")
33
test_subs.py
Python
zerver/tests/test_subs.py
94dbb540b149251068698ceca4591ad74b494686
zulip
1
111,253
57
11
31
294
12
0
82
315
test_displacy_parse_spans_with_kb_id_options
Add displacy support for overlapping Spans (#10332) * Fix docstring for EntityRenderer * Add warning in displacy if doc.spans are empty * Implement parse_spans converter One notable change here is that the default spans_key is sc, and it's set by the user through the options. * Implement SpanRenderer Here, I implemented a SpanRenderer that looks similar to the EntityRenderer except for some templates. The spans_key, by default, is set to sc, but can be configured in the options (see parse_spans). The way I rendered these spans is per-token, i.e., I first check if each token (1) belongs to a given span type and (2) a starting token of a given span type. Once I have this information, I render them into the markup. * Fix mypy issues on typing * Add tests for displacy spans support * Update colors from RGB to hex Co-authored-by: Ines Montani <[email protected]> * Remove unnecessary CSS properties * Add documentation for website * Remove unnecesasry scripts * Update wording on the documentation Co-authored-by: Sofie Van Landeghem <[email protected]> * Put typing dependency on top of file * Put back z-index so that spans overlap properly * Make warning more explicit for spans_key Co-authored-by: Ines Montani <[email protected]> Co-authored-by: Sofie Van Landeghem <[email protected]>
https://github.com/explosion/spaCy.git
def test_displacy_parse_spans_with_kb_id_options(en_vocab): doc = Doc(en_vocab, words=["Welcome", "to", "the", "Bank", "of", "China"]) doc.spans["sc"] = [ Span(doc, 3, 6, "ORG", kb_id="Q790068"), Span(doc, 5, 6, "GPE", kb_id="Q148"), ] spans = displacy.parse_spans( doc, {"kb_url_template": "https://wikidata.org/wiki/{}"} ) assert isinstance(spans, dict) assert spans["text"] == "Welcome to the Bank of China " assert spans["spans"] == [ { "start": 15, "end": 28, "start_token": 3, "end_token": 6, "label": "ORG", "kb_id": "Q790068", "kb_url": "https://wikidata.org/wiki/Q790068", }, { "start": 23, "end": 28, "start_token": 5, "end_token": 6, "label": "GPE", "kb_id": "Q148", "kb_url": "https://wikidata.org/wiki/Q148", }, ]
165
test_displacy.py
Python
spacy/tests/test_displacy.py
a79cd3542b3dd667d8a97293462e22ed26a04ee5
spaCy
1
80,113
31
13
11
106
11
0
34
114
map_list_block_value
Add StreamField migration helpers from https://github.com/sandilsranasinghe/wagtail-streamfield-migration-toolkit/
https://github.com/wagtail/wagtail.git
def map_list_block_value(list_block_value, block_def, block_path, **kwargs): mapped_value = [] # In case data is in old list format for child_block in formatted_list_child_generator(list_block_value): mapped_child_value = map_block_value( child_block["value"], block_def=block_def.child_block, block_path=block_path[1:], **kwargs, ) mapped_value.append({**child_block, "value": mapped_child_value}) return mapped_value
67
utils.py
Python
wagtail/blocks/migrations/utils.py
ec6229c23600ebae8ec0d5db6846b095a9468151
wagtail
2
209,490
23
12
4
70
8
0
25
53
get_if_addr6
Answering machines improvements (NBNS/DNS/LLMNR) (#3699) * Minor NBNS improvements * Improve Netbios/LLMNR/DNS answering machines * DNS_am: support IPv6 * More customization of some answering machines
https://github.com/secdev/scapy.git
def get_if_addr6(niff): # type: (NetworkInterface) -> Optional[str] iff = network_name(niff) return next((x[0] for x in in6_getifaddr() if x[2] == iff and x[1] == IPV6_ADDR_GLOBAL), None)
44
__init__.py
Python
scapy/arch/__init__.py
dd7a5c97d68c00d1d03ecf8ac27c6c7038525065
scapy
4
176,095
11
10
7
44
3
0
11
46
test_edgeql_select_subqueries_09
Add a `bag` type that tells assert_query_result to ignore order (#3314) assert_query_result currently supports using sets to ignore order, but that doesn't work for objects, which can't be hashed or sorted. There is a system for specifying a sort key for internal data, but it is way clunkier than just saying we don't care about the order. I converted some places that were using sort= to use this.
https://github.com/edgedb/edgedb.git
async def test_edgeql_select_subqueries_09(self): await self.assert_query_result( r, {'11', '22', '33', '44'}, )
24
test_edgeql_select.py
Python
tests/test_edgeql_select.py
26be7d28bdb4eb96c888e373e08f46e6b85711e3
edgedb
1
111,250
23
11
7
146
7
0
31
52
test_doc_to_json_span
Add spans to doc.to_json (#10073) * Add spans to to_json * adjustments to_json * Change docstring * change doc key naming * Update spacy/tokens/doc.pyx Co-authored-by: Adriane Boyd <[email protected]> Co-authored-by: Adriane Boyd <[email protected]>
https://github.com/explosion/spaCy.git
def test_doc_to_json_span(doc): doc.spans["test"] = [Span(doc, 0, 2, "test"), Span(doc, 0, 1, "test")] json_doc = doc.to_json() assert "spans" in json_doc assert len(json_doc["spans"]) == 1 assert len(json_doc["spans"]["test"]) == 2 assert json_doc["spans"]["test"][0]["start"] == 0
86
test_to_json.py
Python
spacy/tests/doc/test_to_json.py
b68bf43f5bf07b78c062777f35240f031374fe00
spaCy
1
286,657
78
13
32
286
26
0
109
267
functions_df
Audit SDK and View/Model functions (#3384) * Initial commit * Finalized functionality * update script * Allow using it without forecasting * Update gitignore * Update `sdk_audit.py` * Fixed issues, found more * Added fix for helper functions, and column for SDK type * Checked one more thing * Moved file * Move files ending with models/views * Added fix of name * Added file path fixes * Patch to fix sdk_audit for windows * fix Co-authored-by: Chavithra PARANA <[email protected]>
https://github.com/OpenBB-finance/OpenBBTerminal.git
def functions_df() -> pd.DataFrame: modules = all_view_models() all_formatted = [] for module in modules: if not FORECASTING and "forecast" in str(module): continue loaded = load_modules(module) # Gets all of a module's functions, but ignores imported functions func_list = [ x[1] for x in getmembers(loaded, isfunction) if x[1].__module__ == loaded.__name__ ] formatted_list = [format_function(x) for x in func_list] all_formatted.extend(formatted_list) func_df = pd.DataFrame() func_df["name"] = [x[0] for x in all_formatted] func_df["docstring"] = [x[1] for x in all_formatted] func_dups = len(func_df["name"]) - len(func_df["name"].drop_duplicates()) if func_dups > 0: print(f"Number of duplicate functions found: {func_dups}") print( "This may indicate that functions are defined several times in the terminal.\n" ) func_df = func_df.set_index("name") return func_df
169
sdk_audit.py
Python
openbb_terminal/core/scripts/sdk_audit.py
963ca9b2b924d0514e0e65243dc8d9d7af023ad1
OpenBBTerminal
10
266,729
20
7
2
23
3
0
21
29
format_line
ansible-test - Code cleanup and refactoring. (#77169) * Remove unnecessary PyCharm ignores. * Ignore intentional undefined attribute usage. * Add missing type hints. Fix existing type hints. * Fix docstrings and comments. * Use function to register completion handler. * Pass strings to display functions. * Fix CompositeAction handling of dest argument. * Use consistent types in expressions/assignments. * Use custom function to keep linters happy. * Add missing raise for custom exception. * Clean up key/value type handling in cloud plugins. * Use dataclass instead of dict for results. * Add custom type_guard function to check lists. * Ignore return type that can't be checked (yet). * Avoid changing types on local variables.
https://github.com/ansible/ansible.git
def format_line(value): # type: (int) -> str return str(value) # putting this in a function keeps both pylint and mypy happy
11
__init__.py
Python
test/lib/ansible_test/_internal/commands/coverage/analyze/targets/__init__.py
a06fa496d3f837cca3c437ab6e9858525633d147
ansible
1
301,457
17
9
7
69
9
0
20
42
_get_full_name
Fix memory leak when firing state_changed events (#72571)
https://github.com/home-assistant/core.git
def _get_full_name(obj) -> str: objtype = type(obj) name = objtype.__name__ if module := getattr(objtype, "__module__", None): return f"{module}.{name}" return name
36
test_core.py
Python
tests/test_core.py
049c06061ce92834b0c82b0e8b06ae7520322e54
core
2
249,078
26
12
18
160
12
0
38
185
test_update_display_name
Use literals in place of `HTTPStatus` constants in tests (#13469)
https://github.com/matrix-org/synapse.git
def test_update_display_name(self) -> None: # Set new display_name channel = self.make_request( "PUT", self.url, access_token=self.admin_user_tok, content={"display_name": "new displayname"}, ) self.assertEqual(200, channel.code, msg=channel.json_body) # Check new display_name channel = self.make_request( "GET", self.url, access_token=self.admin_user_tok, ) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual("new displayname", channel.json_body["display_name"])
99
test_device.py
Python
tests/rest/admin/test_device.py
c97042f7eef3748e17c90e48a4122389a89c4735
synapse
1
32,882
27
15
9
123
18
0
34
82
test_correct_head_class
`bitsandbytes` - `Linear8bitLt` integration into `transformers` models (#17901) * first commit * correct replace function * add final changes - works like charm! - cannot implement tests yet - tested * clean up a bit * add bitsandbytes dependencies * working version - added import function - added bitsandbytes utils file * small fix * small fix - fix import issue * fix import issues * Apply suggestions from code review Co-authored-by: Sylvain Gugger <[email protected]> * refactor a bit - move bitsandbytes utils to utils - change comments on functions * reformat docstring - reformat docstring on init_empty_weights_8bit * Update src/transformers/__init__.py Co-authored-by: Sylvain Gugger <[email protected]> * revert bad formatting * change to bitsandbytes * refactor a bit - remove init8bit since it is useless * more refactoring - fixed init empty weights issue - added threshold param * small hack to make it work * Update src/transformers/modeling_utils.py * Update src/transformers/modeling_utils.py * revmoe the small hack * modify utils file * make style + refactor a bit * create correctly device map * add correct dtype for device map creation * Apply suggestions from code review Co-authored-by: Sylvain Gugger <[email protected]> * apply suggestions - remove with torch.grad - do not rely on Python bool magic! * add docstring - add docstring for new kwargs * add docstring - comment `replace_8bit_linear` function - fix weird formatting * - added more documentation - added new utility function for memory footprint tracking - colab demo to add * few modifs - typo doc - force cast into float16 when load_in_8bit is enabled * added colab link * add test architecture + docstring a bit * refactor a bit testing class * make style + refactor a bit * enhance checks - add more checks - start writing saving test * clean up a bit * male style * add more details on doc * add more tests - still needs to fix 2 tests * replace by "or" - could not fix it from GitHub GUI Co-authored-by: Sylvain Gugger <[email protected]> * refactor a bit testing code + add readme * make style * fix import issue * Update src/transformers/modeling_utils.py Co-authored-by: Michael Benayoun <[email protected]> * add few comments * add more doctring + make style * more docstring * raise error when loaded in 8bit * make style * add warning if loaded on CPU * add small sanity check * fix small comment * add bitsandbytes on dockerfile * Improve documentation - improve documentation from comments * add few comments * slow tests pass on the VM but not on the CI VM * Fix merge conflict * make style * another test should pass on a multi gpu setup * fix bad import in testing file * Fix slow tests - remove dummy batches - no more CUDA illegal memory errors * odify dockerfile * Update docs/source/en/main_classes/model.mdx * Update Dockerfile * Update model.mdx * Update Dockerfile * Apply suggestions from code review * few modifications - lm head can stay on disk/cpu - change model name so that test pass * change test value - change test value to the correct output - torch bmm changed to baddmm in bloom modeling when merging * modify installation guidelines * Apply suggestions from code review Co-authored-by: Sylvain Gugger <[email protected]> * Apply suggestions from code review Co-authored-by: Sylvain Gugger <[email protected]> * Apply suggestions from code review Co-authored-by: Sylvain Gugger <[email protected]> * replace `n`by `name` * merge `load_in_8bit` and `low_cpu_mem_usage` * first try - keep the lm head in full precision * better check - check the attribute `base_model_prefix` instead of computing the number of parameters * added more tests * Update src/transformers/utils/bitsandbytes.py Co-authored-by: Sylvain Gugger <[email protected]> * Merge branch 'integration-8bit' of https://github.com/younesbelkada/transformers into integration-8bit * improve documentation - fix typos for installation - change title in the documentation Co-authored-by: Sylvain Gugger <[email protected]> Co-authored-by: Michael Benayoun <[email protected]>
https://github.com/huggingface/transformers.git
def test_correct_head_class(self): r from bitsandbytes.nn import Int8Params # last param of a base model should be a linear8bit module self.assertTrue(self.base_model.h[-1].mlp.dense_4h_to_h.weight.__class__ == Int8Params) # Other heads should be nn.Parameter self.assertTrue(self.model_8bit.lm_head.weight.__class__ == torch.nn.Parameter) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter)
77
test_mixed_int8.py
Python
tests/mixed_int8/test_mixed_int8.py
4a51075a96d2049f368b5f3dd6c0e9f08f599b62
transformers
1
246,261
11
8
16
42
7
0
13
41
test_overlapping_spans
Fixes for opentracing scopes (#11869) `start_active_span` was inconsistent as to whether it would activate the span immediately, or wait for `scope.__enter__` to happen (it depended on whether the current logcontext already had an associated scope). The inconsistency was rather confusing if you were hoping to set up a couple of separate spans before activating either. Looking at the other implementations of opentracing `ScopeManager`s, the intention is that it *should* be activated immediately, as the name implies. Indeed, the idea is that you don't have to use the scope as a contextmanager at all - you can just call `.close` on the result. Hence, our cleanup has to happen in `.close` rather than `.__exit__`. So, the main change here is to ensure that `start_active_span` does activate the span, and that `scope.close()` does close the scope. We also add some tests, which requires a `tracer` param so that we don't have to rely on the global variable in unit tests.
https://github.com/matrix-org/synapse.git
def test_overlapping_spans(self) -> None: reactor = MemoryReactorClock() clock = Clock(reactor) scopes = []
108
test_opentracing.py
Python
tests/logging/test_opentracing.py
31b554c2976612ce5fd983517615906261c39cea
synapse
1
287,950
39
11
17
180
21
1
51
146
mock_smile_anna
Bump plugwise to v0.21.3, add related new features (#76610) Co-authored-by: Franck Nijhof <[email protected]>
https://github.com/home-assistant/core.git
def mock_smile_anna() -> Generator[None, MagicMock, None]: chosen_env = "anna_heatpump_heating" with patch( "homeassistant.components.plugwise.gateway.Smile", autospec=True ) as smile_mock: smile = smile_mock.return_value smile.gateway_id = "015ae9ea3f964e668e490fa39da3870b" smile.heater_id = "1cbf783bb11e4a7c8a6843dee3a86927" smile.smile_version = "4.0.15" smile.smile_type = "thermostat" smile.smile_hostname = "smile98765" smile.smile_name = "Anna" smile.connect.return_value = True smile.notifications = _read_json(chosen_env, "notifications") smile.async_update.return_value = _read_json(chosen_env, "all_data") yield smile @pytest.fixture
@pytest.fixture
95
conftest.py
Python
tests/components/plugwise/conftest.py
2667f0b792b1f936aeb5958cc40d5dee26350bf6
core
1
266,728
49
16
11
266
24
0
76
127
make_report
ansible-test - Code cleanup and refactoring. (#77169) * Remove unnecessary PyCharm ignores. * Ignore intentional undefined attribute usage. * Add missing type hints. Fix existing type hints. * Fix docstrings and comments. * Use function to register completion handler. * Pass strings to display functions. * Fix CompositeAction handling of dest argument. * Use consistent types in expressions/assignments. * Use custom function to keep linters happy. * Add missing raise for custom exception. * Clean up key/value type handling in cloud plugins. * Use dataclass instead of dict for results. * Add custom type_guard function to check lists. * Ignore return type that can't be checked (yet). * Avoid changing types on local variables.
https://github.com/ansible/ansible.git
def make_report(target_indexes, arcs, lines): # type: (TargetIndexes, Arcs, Lines) -> t.Dict[str, t.Any] set_indexes = {} # type: TargetSetIndexes arc_refs = dict((path, dict((format_arc(arc), get_target_set_index(indexes, set_indexes)) for arc, indexes in data.items())) for path, data in arcs.items()) line_refs = dict((path, dict((line, get_target_set_index(indexes, set_indexes)) for line, indexes in data.items())) for path, data in lines.items()) report = dict( targets=[name for name, index in sorted(target_indexes.items(), key=lambda kvp: kvp[1])], target_sets=[sorted(data) for data, index in sorted(set_indexes.items(), key=lambda kvp: kvp[1])], arcs=arc_refs, lines=line_refs, ) return report
177
__init__.py
Python
test/lib/ansible_test/_internal/commands/coverage/analyze/targets/__init__.py
a06fa496d3f837cca3c437ab6e9858525633d147
ansible
7
271,043
39
12
6
62
10
0
48
101
_maybe_broadcast_to_outputs
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def _maybe_broadcast_to_outputs(self, outputs, objects): if not self._should_broadcast(objects): return objects # When there is more than one Model output, this is needed to keep # each Metric / Loss separate. When there is only one Model output, # the user-supplied object should be used. should_copy_objects = len(tf.nest.flatten(outputs)) > 1
54
compile_utils.py
Python
keras/engine/compile_utils.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
2
20,010
37
14
11
101
16
0
41
93
_get_pyvenv_cfg_lines
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
https://github.com/pypa/pipenv.git
def _get_pyvenv_cfg_lines() -> Optional[List[str]]: pyvenv_cfg_file = os.path.join(sys.prefix, "pyvenv.cfg") try: # Although PEP 405 does not specify, the built-in venv module always # writes with UTF-8. (pypa/pip#8717) with open(pyvenv_cfg_file, encoding="utf-8") as f: return f.read().splitlines() # avoids trailing newlines except OSError: return None
56
virtualenv.py
Python
pipenv/patched/notpip/_internal/utils/virtualenv.py
f3166e673fe8d40277b804d35d77dcdb760fc3b3
pipenv
2
260,757
10
9
4
53
7
0
11
39
fit
MAINT Parameters validation for `ClassifierChain` and `RegressorChain` (#24112) Co-authored-by: jeremie du boisberranger <[email protected]>
https://github.com/scikit-learn/scikit-learn.git
def fit(self, X, Y, **fit_params): self._validate_params() super().fit(X, Y, **fit_params) return self
33
multioutput.py
Python
sklearn/multioutput.py
01944223c81617c24b9ce741356505fcfc0d6b44
scikit-learn
1
9,864
40
12
23
204
23
0
55
142
update_runtime_cls
feat: star routing (#3900) * feat(proto): adjust proto for star routing (#3844) * feat(proto): adjust proto for star routing * feat(proto): generate proto files * feat(grpc): refactor grpclet interface (#3846) * feat: refactor connection pool for star routing (#3872) * feat(k8s): add more labels to k8s deployments * feat(network): refactor connection pool * feat(network): refactor k8s pool * feat: star routing graph gateway (#3877) * feat: star routing - refactor grpc data runtime (#3887) * feat(runtimes): refactor grpc dataruntime * fix(tests): adapt worker runtime tests * fix(import): fix import * feat(proto): enable sending multiple lists (#3891) * feat: star routing gateway (#3893) * feat: star routing gateway all protocols (#3897) * test: add streaming and prefetch tests (#3901) * feat(head): new head runtime for star routing (#3899) * feat(head): new head runtime * feat(head): new head runtime * style: fix overload and cli autocomplete * feat(network): improve proto comments Co-authored-by: Jina Dev Bot <[email protected]> * feat(worker): merge docs in worker runtime (#3905) * feat(worker): merge docs in worker runtime * feat(tests): assert after clean up * feat(tests): star routing runtime integration tests (#3908) * fix(tests): fix integration tests * test: test runtimes fast slow request (#3910) * feat(zmq): purge zmq, zed, routing_table (#3915) * feat(zmq): purge zmq, zed, routing_table * style: fix overload and cli autocomplete * feat(zmq): adapt comment in dependency list * style: fix overload and cli autocomplete * fix(tests): fix type tests Co-authored-by: Jina Dev Bot <[email protected]> * test: add test gateway to worker connection (#3921) * feat(pea): adapt peas for star routing (#3918) * feat(pea): adapt peas for star routing * style: fix overload and cli autocomplete * feat(pea): add tests * feat(tests): add failing head pea test Co-authored-by: Jina Dev Bot <[email protected]> * feat(tests): integration tests for peas (#3923) * feat(tests): integration tests for peas * feat(pea): remove _inner_pea function * feat: star routing container pea (#3922) * test: rescue tests (#3942) * fix: fix streaming tests (#3945) * refactor: move docker run to run (#3948) * feat: star routing pods (#3940) * feat(pod): adapt pods for star routing * feat(pods): adapt basepod to star routing * feat(pod): merge pod and compound pod * feat(tests): fix tests * style: fix overload and cli autocomplete * feat(test): add container pea int test * feat(ci): remove more unnecessary tests * fix(tests): remove jinad runtime * feat(ci): remove latency tracking * fix(ci): fix ci def * fix(runtime): enable runtime to be exited * fix(tests): wrap runtime test in process * fix(runtimes): remove unused runtimes * feat(runtimes): improve cancel wait * fix(ci): build test pip again in ci * fix(tests): fix a test * fix(test): run async in its own process * feat(pod): include shard in activate msg * fix(pea): dont join * feat(pod): more debug out * feat(grpc): manage channels properly * feat(pods): remove exitfifo * feat(network): add simple send retry mechanism * fix(network): await pool close * fix(test): always close grpc server in worker * fix(tests): remove container pea from tests * fix(tests): reorder tests * fix(ci): split tests * fix(ci): allow alias setting * fix(test): skip a test * feat(pods): address comments Co-authored-by: Jina Dev Bot <[email protected]> * test: unblock skipped test (#3957) * feat: jinad pea (#3949) * feat: jinad pea * feat: jinad pea * test: remote peas * test: toplogy tests with jinad * ci: parallel jobs * feat(tests): add pod integration tests (#3958) * feat(tests): add pod integration tests * fix(tests): make tests less flaky * fix(test): fix test * test(pea): remote pea topologies (#3961) * test(pea): remote pea simple topology * test: remote pea topologies * refactor: refactor streamer result handling (#3960) * feat(k8s): adapt K8s Pod for StarRouting (#3964) * test: optimize k8s test * test: increase timeout and use different namespace * test: optimize k8s test * test: build and load image when needed * test: refactor k8s test * test: fix image name error * test: fix k8s image load * test: fix typoe port expose * test: update tests in connection pool and handling * test: remove unused fixture * test: parameterize docker images * test: parameterize docker images * test: parameterize docker images * feat(k8s): adapt k8s pod for star routing * fix(k8s): dont overwrite add/remove function in pool * fix(k8s): some fixes * fix(k8s): some more fixes * fix(k8s): linting * fix(tests): fix tests * fix(tests): fix k8s unit tests * feat(k8s): complete k8s integration test * feat(k8s): finish k8s tests * feat(k8s): fix test * fix(tests): fix test with no name * feat(k8s): unify create/replace interface * feat(k8s): extract k8s port constants * fix(tests): fix tests * fix(tests): wait for runtime being ready in tests * feat(k8s): address comments Co-authored-by: bwanglzu <[email protected]> * feat(flow): adapt Flow for StarRouting (#3986) * feat(flow): add routes * feat(flow): adapt flow to star routing * style: fix overload and cli autocomplete * feat(flow): handle empty topologies * feat(k8s): allow k8s pool disabling * style: fix overload and cli autocomplete * fix(test): fix test with mock * fix(tests): fix more tests * feat(flow): clean up tests * style: fix overload and cli autocomplete * fix(tests): fix more tests * feat: add plot function (#3994) * fix(tests): avoid hanging tests * feat(flow): add type hinting * fix(test): fix duplicate exec name in test * fix(tests): fix more tests * fix(tests): enable jinad test again * fix(tests): random port fixture * fix(style): replace quotes Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Joan Fontanals <[email protected]> * feat(ci): bring back ci (#3997) * feat(ci): enable ci again * style: fix overload and cli autocomplete * feat(ci): add latency tracking * feat(ci): bring back some tests * fix(tests): remove invalid port test * feat(ci): disable daemon and distributed tests * fix(tests): fix entrypoint in hub test * fix(tests): wait for gateway to be ready * fix(test): fix more tests * feat(flow): do rolling update and scale sequentially * fix(tests): fix more tests * style: fix overload and cli autocomplete * feat: star routing hanging pods (#4011) * fix: try to handle hanging pods better * test: hanging pods test work * fix: fix topology graph problem * test: add unit test to graph * fix(tests): fix k8s tests * fix(test): fix k8s test * fix(test): fix k8s pool test * fix(test): fix k8s test * fix(test): fix k8s connection pool setting * fix(tests): make runtime test more reliable * fix(test): fix routes test * fix(tests): make rolling update test less flaky * feat(network): gurantee unique ports * feat(network): do round robin for shards * fix(ci): increase pytest timeout to 10 min Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Joan Fontanals <[email protected]> * fix(ci): fix ci file * feat(daemon): jinad pod for star routing * Revert "feat(daemon): jinad pod for star routing" This reverts commit ed9b37ac862af2e2e8d52df1ee51c0c331d76f92. * feat(daemon): remote jinad pod support (#4042) * feat(daemon): add pod tests for star routing * feat(daemon): add remote pod test * test(daemon): add remote pod arguments test * test(daemon): add async scale test * test(daemon): add rolling update test * test(daemon): fix host * feat(proto): remove message proto (#4051) * feat(proto): remove message proto * fix(tests): fix tests * fix(tests): fix some more tests * fix(tests): fix more tests * fix(tests): fix more tests * fix(tests): fix more tests * fix(tests): fix more tests * feat(proto): put docs back in data * fix(proto): clean up * feat(proto): clean up * fix(tests): skip latency tracking * fix(test): fix hub test * fix(tests): fix k8s test * fix(test): some test clean up * fix(style): clean up style issues * feat(proto): adjust for rebase * fix(tests): bring back latency tracking * fix(tests): fix merge accident * feat(proto): skip request serialization (#4074) * feat: add reduce to star routing (#4070) * feat: add reduce on shards to head runtime * test: add reduce integration tests with fixed order * feat: add reduce on needs * chore: get_docs_matrix_from_request becomes public * style: fix overload and cli autocomplete * docs: remove undeterministic results warning * fix: fix uses_after * test: assert correct num docs after reducing in test_external_pod * test: correct asserts after reduce in test_rolling_update * fix: no reduce if uses_after_address is set * fix: get_docs_from_request only if needed * fix: fix tests after merge * refactor: move reduce from data_request_handler to head * style: fix overload and cli autocomplete * chore: apply suggestions * fix: fix asserts * chore: minor test fix * chore: apply suggestions * test: remove flow tests with external executor (pea) * fix: fix test_expected_messages_routing * fix: fix test_func_joiner * test: adapt k8s test Co-authored-by: Jina Dev Bot <[email protected]> * fix(k8s): fix static pool config * fix: use custom protoc doc generator image (#4088) * fix: use custom protoc doc generator image * fix(docs): minor doc improvement * fix(docs): use custom image * fix(docs): copy docarray * fix: doc building local only * fix: timeout doc building * fix: use updated args when building ContainerPea * test: add container PeaFactory test * fix: force pea close on windows (#4098) * fix: dont reduce if uses exist (#4099) * fix: dont use reduce if uses exist * fix: adjust reduce tests * fix: adjust more reduce tests * fix: fix more tests * fix: adjust more tests * fix: ignore non jina resources (#4101) * feat(executor): enable async executors (#4102) * feat(daemon): daemon flow on star routing (#4096) * test(daemon): add remote flow test * feat(daemon): call scale in daemon * feat(daemon): remove tail args and identity * test(daemon): rename scalable executor * test(daemon): add a small delay in async test * feat(daemon): scale partial flow only * feat(daemon): call scale directly in partial flow store * test(daemon): use asyncio sleep * feat(daemon): enable flow level distributed tests * test(daemon): fix jinad env workspace config * test(daemon): fix pod test use new port rolling update * feat(daemon): enable distribuetd tests * test(daemon): remove duplicate tests and zed runtime test * test(daemon): fix stores unit test * feat(daemon): enable part of distributed tests * feat(daemon): enable part of distributed tests * test: correct test paths * test(daemon): add client test for remote flows * test(daemon): send a request with jina client * test(daemon): assert async generator * test(daemon): small interval between tests * test(daemon): add flow test for container runtime * test(daemon): add flow test for container runtime * test(daemon): fix executor name * test(daemon): fix executor name * test(daemon): use async client fetch result * test(daemon): finish container flow test * test(daemon): enable distributed in ci * test(daemon): enable distributed in ci * test(daemon): decare flows and pods * test(daemon): debug ci if else * test(daemon): debug ci if else * test(daemon): decare flows and pods * test(daemon): correct test paths * test(daemon): add small delay for async tests * fix: star routing fixes (#4100) * docs: update docs * fix: fix Request.__repr__ * docs: update flow remarks * docs: fix typo * test: add non_empty_fields test * chore: remove non_empty_fields test * feat: polling per endpoint (#4111) * feat(polling): polling per endpoint configurable * fix: adjust tests * feat(polling): extend documentation * style: fix overload and cli autocomplete * fix: clean up * fix: adjust more tests * fix: remove repeat from flaky test * fix: k8s test * feat(polling): address pr feedback * feat: improve docs Co-authored-by: Jina Dev Bot <[email protected]> * feat(grpc): support connect grpc server via ssl tunnel (#4092) * feat(grpc): support ssl grpc connect if port is 443 * fix(grpc): use https option instead of detect port automatically * chore: fix typo * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * test(networking): add test for peapods networking * fix: address comments Co-authored-by: Joan Fontanals <[email protected]> * feat(polling): unify polling args (#4113) * fix: several issues for jinad pods (#4119) * fix: activate for jinad pods * fix: dont expose worker pod in partial daemon * fix: workspace setting * fix: containerized flows * fix: hub test * feat(daemon): remote peas on star routing (#4112) * test(daemon): fix request in peas * test(daemon): fix request in peas * test(daemon): fix sync async client test * test(daemon): enable remote peas test * test(daemon): replace send message to send request * test(daemon): declare pea tests in ci * test(daemon): use pea args fixture * test(daemon): head pea use default host * test(daemon): fix peas topologies * test(daemon): fix pseudo naming * test(daemon): use default host as host * test(daemon): fix executor path * test(daemon): add remote worker back * test(daemon): skip local remote remote topology * fix: jinad pea test setup * fix: jinad pea tests * fix: remove invalid assertion Co-authored-by: jacobowitz <[email protected]> * feat: enable daemon tests again (#4132) * feat: enable daemon tests again * fix: remove bogy empty script file * fix: more jinad test fixes * style: fix overload and cli autocomplete * fix: scale and ru in jinad * fix: fix more jinad tests Co-authored-by: Jina Dev Bot <[email protected]> * fix: fix flow test * fix: improve pea tests reliability (#4136) Co-authored-by: Joan Fontanals <[email protected]> Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Deepankar Mahapatro <[email protected]> Co-authored-by: bwanglzu <[email protected]> Co-authored-by: AlaeddineAbdessalem <[email protected]> Co-authored-by: Zhaofeng Miao <[email protected]>
https://github.com/jina-ai/jina.git
def update_runtime_cls(args, copy=False) -> 'Namespace': _args = deepcopy(args) if copy else args gateway_runtime_dict = { GatewayProtocolType.GRPC: 'GRPCGatewayRuntime', GatewayProtocolType.WEBSOCKET: 'WebSocketGatewayRuntime', GatewayProtocolType.HTTP: 'HTTPGatewayRuntime', } if _args.runtime_cls == 'WorkerRuntime' and is_valid_huburi(_args.uses): _hub_args = deepcopy(_args) _hub_args.uri = _args.uses _hub_args.no_usage = True _args.uses = HubIO(_hub_args).pull() if hasattr(_args, 'protocol'): _args.runtime_cls = gateway_runtime_dict[_args.protocol] if _args.pea_role == PeaRoleType.HEAD: _args.runtime_cls = 'HeadRuntime' return _args
122
helper.py
Python
jina/peapods/peas/helper.py
933415bfa1f9eb89f935037014dfed816eb9815d
jina
6
262,743
160
16
54
650
40
0
283
840
findLibrary
Bindepend: Add Termux-specific libraries search path. According to termux/termux-app#1595, this is all we need to change to faclitate using PyInstaller on Termux.
https://github.com/pyinstaller/pyinstaller.git
def findLibrary(name): assert compat.is_unix, "Current implementation for Unix only (Linux, Solaris, AIX, FreeBSD)" # Look in the LD_LIBRARY_PATH according to platform. if compat.is_aix: lp = compat.getenv('LIBPATH', '') elif compat.is_darwin: lp = compat.getenv('DYLD_LIBRARY_PATH', '') else: lp = compat.getenv('LD_LIBRARY_PATH', '') lib = _which_library(name, filter(None, lp.split(os.pathsep))) # Look in /etc/ld.so.cache # Solaris does not have /sbin/ldconfig. Just check if this file exists. if lib is None: utils.load_ldconfig_cache() lib = utils.LDCONFIG_CACHE.get(name) if lib: assert os.path.isfile(lib) # Look in the known safe paths. if lib is None: # Architecture independent locations. paths = ['/lib', '/usr/lib'] # Architecture dependent locations. if compat.architecture == '32bit': paths.extend(['/lib32', '/usr/lib32']) else: paths.extend(['/lib64', '/usr/lib64']) # Machine dependent locations. if compat.machine == 'intel': if compat.architecture == '32bit': paths.extend(['/usr/lib/i386-linux-gnu']) else: paths.extend(['/usr/lib/x86_64-linux-gnu']) # On Debian/Ubuntu /usr/bin/python is linked statically with libpython. Newer Debian/Ubuntu with multiarch # support puts the libpythonX.Y.so in paths like /usr/lib/i386-linux-gnu/. try: # Module available only in Python 2.7+ import sysconfig # 'multiarchsubdir' works on Debian/Ubuntu only in Python 2.7 and 3.3+. arch_subdir = sysconfig.get_config_var('multiarchsubdir') # Ignore if None is returned. if arch_subdir: arch_subdir = os.path.basename(arch_subdir) paths.append(os.path.join('/usr/lib', arch_subdir)) else: logger.debug('Multiarch directory not detected.') except ImportError: logger.debug('Multiarch directory not detected.') # Termux (a Ubuntu like subsystem for Android) has an additional libraries directory. if os.path.isdir('/data/data/com.termux/files/usr/lib'): paths.append('/data/data/com.termux/files/usr/lib') if compat.is_aix: paths.append('/opt/freeware/lib') elif compat.is_hpux: if compat.architecture == '32bit': paths.append('/usr/local/lib/hpux32') else: paths.append('/usr/local/lib/hpux64') elif compat.is_freebsd or compat.is_openbsd: paths.append('/usr/local/lib') lib = _which_library(name, paths) # Give up :( if lib is None: return None # Resolve the file name into the soname if compat.is_freebsd or compat.is_aix or compat.is_openbsd: # On FreeBSD objdump does not show SONAME, and on AIX objdump does not exist, so we just return the lib we # have found. return lib else: dir = os.path.dirname(lib) return os.path.join(dir, _get_so_name(lib))
360
bindepend.py
Python
PyInstaller/depend/bindepend.py
57c520132b4d0ab7bfd5653383ec2602e40088af
pyinstaller
21
249,827
7
8
5
28
4
0
7
21
test_gen_next_while_still_waiting_for_persistence
Reintroduce #14376, with bugfix for monoliths (#14468) * Add tests for StreamIdGenerator * Drive-by: annotate all defs * Revert "Revert "Remove slaved id tracker (#14376)" (#14463)" This reverts commit d63814fd736fed5d3d45ff3af5e6d3bfae50c439, which in turn reverted 36097e88c4da51fce6556a58c49bd675f4cf20ab. This restores the latter. * Fix StreamIdGenerator not handling unpersisted IDs Spotted by @erikjohnston. Closes #14456. * Changelog Co-authored-by: Nick Mills-Barrett <[email protected]> Co-authored-by: Erik Johnston <[email protected]>
https://github.com/matrix-org/synapse.git
def test_gen_next_while_still_waiting_for_persistence(self) -> None: id_gen = self._create_id_generator()
26
test_id_generators.py
Python
tests/storage/test_id_generators.py
115f0eb2334b13665e5c112bd87f95ea393c9047
synapse
1
297,971
50
13
22
124
14
0
61
315
connection_is_valid
String formatting and max line length - Part 5 (#84501) Co-authored-by: jjlawren <[email protected]>
https://github.com/home-assistant/core.git
def connection_is_valid(self): server = None try: server = self.connect() except (smtplib.socket.gaierror, ConnectionRefusedError): _LOGGER.exception( ( "SMTP server not found or refused connection (%s:%s). Please check" " the IP address, hostname, and availability of your SMTP server" ), self._server, self._port, ) except smtplib.SMTPAuthenticationError: _LOGGER.exception( "Login not possible. Please check your setting and/or your credentials" ) return False finally: if server: server.quit() return True
72
notify.py
Python
homeassistant/components/smtp/notify.py
f39f3b612a8c1a12504f2f1d54fb1c9872216d12
core
5
138,092
36
15
17
183
19
0
42
141
test_multi_trial_reuse_with_failing
[air/tune] Internal resource management 2 - Ray Tune to use new Ray AIR resource manager (#30016) Includes/depends on #30777 TLDR: This PR refactors Ray Tune's resource management to use a central AIR resource management package instead of the tightly coupled PlacementGroupManager. Ray Tune's resource management currently uses a tightly coupled placement group manager. This leads to a number of shortcomings: - The tight coupling on the manager side (e.g. PG manager keeps track of trials) prevents re-usability - The tight coupling on the trial executor side prevents using different resource management strategies (e.g. shared or budget-based) - It's hard to test independently. Tests for the resource management require a simulated tune setup. To improve stability, extensibility, and maintainability, this PR moves the resource management logic into a central `ray.air.execution.resources` subpackage. The resource management has a simple API that works with `ResourceRequest`s and `AllocatedResources` to manage requested and assigned resources, respectively. The actual resource management can then be anything - per default it is a placement group based manager, but this PR also introduces a PoC budget-based manager that can be plugged in. The PR does not substantially change existing tests, so we can be certain that the new resource model is a fully compatible replacement for the old placement group manager. Signed-off-by: Kai Fricke <[email protected]>
https://github.com/ray-project/ray.git
def test_multi_trial_reuse_with_failing(ray_start_4_cpus_extra): os.environ["TUNE_MAX_PENDING_TRIALS_PG"] = "2" register_trainable("foo2", MyResettableClass) [trial1, trial2, trial3, trial4] = tune.run( "foo2", config={ "fail": tune.grid_search([False, True, False, False]), "id": -1, "sleep": 2, }, reuse_actors=True, resources_per_trial={"cpu": 2}, raise_on_failed_trial=False, ).trials assert trial1.last_result["num_resets"] == 0 assert trial3.last_result["num_resets"] == 0 assert trial4.last_result["num_resets"] == 1
113
test_actor_reuse.py
Python
python/ray/tune/tests/test_actor_reuse.py
1510fb2cd631b2776092fb45ee4082e5e65f16f8
ray
1
270,862
19
13
112
102
15
1
21
70
check_graph_consistency
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def check_graph_consistency(tensor=None, method="add_loss", force_raise=False): if force_raise or ( tf.compat.v1.executing_eagerly_outside_functions() and hasattr(tensor, "graph") and tensor.graph.is_control_flow_graph ): if method == "activity_regularizer": bad_example =
bad_example = """
142
base_layer_utils.py
Python
keras/engine/base_layer_utils.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
8
268,859
5
9
2
32
4
0
5
7
multiply
Refactor `merge.py` into smaller logically organized files hosted under a `merging` directory. PiperOrigin-RevId: 424162837
https://github.com/keras-team/keras.git
def multiply(inputs, **kwargs): return Multiply(**kwargs)(inputs)
18
multiply.py
Python
keras/layers/merging/multiply.py
85ccb4e108551b7444213276ffb4c4c09f22f886
keras
1
100,503
60
15
23
361
39
0
88
377
_add_actions
bugfix: Preview Tool, ensure all config items are written
https://github.com/deepfakes/faceswap.git
def _add_actions(self, parent, config_key): logger.debug("Adding util buttons") title = config_key.split(".")[1].replace("_", " ").title() btn_frame = ttk.Frame(self._action_frame) btn_frame.pack(padx=5, side=tk.BOTTOM, fill=tk.X) for utl in ("save", "clear", "reload"): logger.debug("Adding button: '%s'", utl) img = get_images().icons[utl] if utl == "save": text = _(f"Save {title} config") action = parent.config_tools.save_config elif utl == "clear": text = _(f"Reset {title} config to default values") action = parent.config_tools.reset_config_to_default elif utl == "reload": text = _(f"Reset {title} config to saved values") action = parent.config_tools.reset_config_to_saved btnutl = ttk.Button(btn_frame, image=img, command=lambda cmd=action: cmd(config_key)) btnutl.pack(padx=2, side=tk.RIGHT) Tooltip(btnutl, text=text, wrap_length=200) logger.debug("Added util buttons")
210
preview.py
Python
tools/preview/preview.py
71c20252c2e747f692289cdefe80ad0d5a456ea6
faceswap
5
336,635
44
12
27
199
22
0
60
100
downsample_2d
Rename variables from single letter to meaningful name fix (#395) Co-authored-by: Rashmi S <[email protected]>
https://github.com/huggingface/diffusers.git
def downsample_2d(x, kernel=None, factor=2, gain=1): r assert isinstance(factor, int) and factor >= 1 if kernel is None: kernel = [1] * factor kernel = np.asarray(kernel, dtype=np.float32) if kernel.ndim == 1: kernel = np.outer(kernel, kernel) kernel /= np.sum(kernel) kernel = kernel * gain p = kernel.shape[0] - factor return upfirdn2d_native(x, torch.tensor(kernel, device=x.device), down=factor, pad=((p + 1) // 2, p // 2))
128
resnet.py
Python
src/diffusers/models/resnet.py
1a431ae886b516d13f49c1f8a1e1e68d3159eab4
diffusers
4
135,744
30
11
13
123
16
1
37
147
test_cuda_visible_devices
[Jobs] Allow resource requests for driver (entrypoint command) (#28654) Allows num_cpus, num_gpus and resources to be reserved for the job entrypoint command. This is achieved by passing them in as .options() to the JobSupervisor actor. Adds a friendly error message if the JobSupervisor cannot be scheduled Update docs Add tests Bump API version number if needed and check backwards compatibility test Make CUDA_VISIBLE_DEVICES workaround opt-in only Add test
https://github.com/ray-project/ray.git
async def test_cuda_visible_devices(self, job_manager, resource_kwarg, env_vars): run_cmd = f"python {_driver_script_path('check_cuda_devices.py')}" runtime_env = {"env_vars": env_vars} if resource_kwarg: run_cmd = "RAY_TEST_RESOURCES_SPECIFIED=1 " + run_cmd job_id = await job_manager.submit_job( entrypoint=run_cmd, runtime_env=runtime_env, **resource_kwarg, ) await async_wait_for_condition_async_predicate( check_job_succeeded, job_manager=job_manager, job_id=job_id ) @pytest.mark.asyncio
@pytest.mark.asyncio
63
test_job_manager.py
Python
dashboard/modules/job/tests/test_job_manager.py
e4b211a921698b79452f8c2f4d3e6a34ff58b26b
ray
2
154,492
36
14
9
192
19
0
61
100
deploy_dask_func
FIX-#4597: Refactor Partition handling of func, args, kwargs (#4715) Co-authored-by: Iaroslav Igoshev <[email protected]> Signed-off-by: Jonathan Shi <[email protected]>
https://github.com/modin-project/modin.git
def deploy_dask_func(deployer, axis, f_to_deploy, f_args, f_kwargs, *args, **kwargs): result = deployer(axis, f_to_deploy, f_args, f_kwargs, *args, **kwargs) ip = get_ip() if isinstance(result, pandas.DataFrame): return result, len(result), len(result.columns), ip elif all(isinstance(r, pandas.DataFrame) for r in result): return [i for r in result for i in [r, len(r), len(r.columns), ip]] else: return [i for r in result for i in [r, None, None, ip]]
136
virtual_partition.py
Python
modin/core/execution/dask/implementations/pandas_on_dask/partitioning/virtual_partition.py
d6d503ac7c3028d871c34d9e99e925ddb0746df6
modin
8
309,915
53
18
37
290
34
0
74
593
async_step_client_control
Orphaned MAC addresses breaks UniFi options flow (#64327)
https://github.com/home-assistant/core.git
async def async_step_client_control(self, user_input=None): errors = {} if user_input is not None: self.options.update(user_input) return await self.async_step_statistics_sensors() clients_to_block = {} for client in self.controller.api.clients.values(): clients_to_block[ client.mac ] = f"{client.name or client.hostname} ({client.mac})" selected_clients_to_block = [ client for client in self.options.get(CONF_BLOCK_CLIENT, []) if client in clients_to_block ] return self.async_show_form( step_id="client_control", data_schema=vol.Schema( { vol.Optional( CONF_BLOCK_CLIENT, default=selected_clients_to_block ): cv.multi_select(clients_to_block), vol.Optional( CONF_POE_CLIENTS, default=self.options.get(CONF_POE_CLIENTS, DEFAULT_POE_CLIENTS), ): bool, vol.Optional( CONF_DPI_RESTRICTIONS, default=self.options.get( CONF_DPI_RESTRICTIONS, DEFAULT_DPI_RESTRICTIONS ), ): bool, } ), errors=errors, last_step=False, )
178
config_flow.py
Python
homeassistant/components/unifi/config_flow.py
d4e509422a2d93764586dc9de0789f1deed4cfaa
core
5
81,815
13
12
6
71
9
0
18
84
test_non_job_config_complete
Add WorkflowJob.instance_groups and distinguish from char_prompts This removes a loop that ran on import the loop was giving the wrong behavior and it initialized too many fields as char_prompts fields With this, we will now enumerate the char_prompts type fields manually
https://github.com/ansible/awx.git
def test_non_job_config_complete(self): for field_name in JobTemplate.get_ask_mapping().keys(): if field_name in LaunchTimeConfigBase.SUBCLASS_FIELDS: assert not hasattr(LaunchTimeConfigBase, field_name) else: assert hasattr(LaunchTimeConfigBase, field_name)
43
test_job_launch_config.py
Python
awx/main/tests/functional/models/test_job_launch_config.py
68e11d2b81ccb9aa4189cdbc37739fbd1207b65f
awx
3
282,475
47
16
21
296
22
1
62
224
get_quote
Refactored terminal.py (#1312) * Seperated run_scripts function from main * Added scripts ability * Converted to endswith * Count success and failure * Fixed commands without -h * Handled edge case * Replaced prints with console.print * Fixed main function issue * Tried more things for output * Showed reason for error * Scripts runs successfully * Formatting updates * Made integrated printout look better * Added capturing of stderr * Fixed reset issue * Updated flags * off fix * Added Didier fix * Squashed bugs * Added tests * Allowed for script file to be sent to tests
https://github.com/OpenBB-finance/OpenBBTerminal.git
def get_quote(ticker) -> pd.DataFrame: try: df_fa = fa.quote(ticker, cfg.API_KEY_FINANCIALMODELINGPREP) except ValueError: df_fa = pd.DataFrame() if not df_fa.empty: clean_df_index(df_fa) df_fa.loc["Market cap"][0] = long_number_format(df_fa.loc["Market cap"][0]) df_fa.loc["Shares outstanding"][0] = long_number_format( df_fa.loc["Shares outstanding"][0] ) df_fa.loc["Volume"][0] = long_number_format(df_fa.loc["Volume"][0]) # Check if there is a valid earnings announcement if df_fa.loc["Earnings announcement"][0]: earning_announcement = datetime.strptime( df_fa.loc["Earnings announcement"][0][0:19], "%Y-%m-%dT%H:%M:%S" ) df_fa.loc["Earnings announcement"][ 0 ] = f"{earning_announcement.date()} {earning_announcement.time()}" return df_fa @log_start_end(log=logger)
@log_start_end(log=logger)
158
fmp_model.py
Python
gamestonk_terminal/stocks/fundamental_analysis/financial_modeling_prep/fmp_model.py
e22c48ac9fd518f28527e9f82e5b89741b750b8c
OpenBBTerminal
4
289,733
8
9
4
38
5
0
8
33
async_mqtt_connect
Add typing hints for MQTT mixins (#80702) * Add typing hints for MQTT mixins * Follow up comments * config_entry is always set * typing discovery_data - substate None assignment * Rename `config[CONF_DEVICE]` -> specifications
https://github.com/home-assistant/core.git
def async_mqtt_connect(self) -> None: if not self.hass.is_stopping: self.async_write_ha_state()
21
mixins.py
Python
homeassistant/components/mqtt/mixins.py
2f1138562720cd50343d2fedd4981913a9ef6bd9
core
2
20,054
10
7
2
23
2
0
10
19
distro_release_info
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
https://github.com/pypa/pipenv.git
def distro_release_info(): # type: () -> Dict[str, str] return _distro.distro_release_info()
11
distro.py
Python
pipenv/patched/notpip/_vendor/distro.py
f3166e673fe8d40277b804d35d77dcdb760fc3b3
pipenv
1
266,103
21
14
13
246
21
0
39
166
apply
Closes #10851: New staging mechanism (#10890) * WIP * Convert checkout() context manager to a class * Misc cleanup * Drop unique constraint from Change model * Extend staging tests * Misc cleanup * Incorporate M2M changes * Don't cancel wipe out creation records when an object is deleted * Rename Change to StagedChange * Add documentation for change staging
https://github.com/netbox-community/netbox.git
def apply(self): if self.action == ChangeActionChoices.ACTION_CREATE: instance = deserialize_object(self.model, self.data, pk=self.object_id) logger.info(f'Creating {self.model._meta.verbose_name} {instance}') instance.save() if self.action == ChangeActionChoices.ACTION_UPDATE: instance = deserialize_object(self.model, self.data, pk=self.object_id) logger.info(f'Updating {self.model._meta.verbose_name} {instance}') instance.save() if self.action == ChangeActionChoices.ACTION_DELETE: instance = self.model.objects.get(pk=self.object_id) logger.info(f'Deleting {self.model._meta.verbose_name} {instance}') instance.delete()
121
staging.py
Python
netbox/extras/models/staging.py
a5308ea28e851a4ddb65a4e7ca2297b641e5891f
netbox
4
267,150
14
9
3
75
9
0
14
80
find_plugin
expand ansible-doc coverage (#74963) * Expand ansible-doc to tests/filters and fix existing issues enable filter/test docs if in single file or companion yaml add docs for several filters/tests plugins allow .yml companion for docs for other plugins, must be colocated verify plugins are valid (not modules, cannot) fix 'per collection' filtering limit old style deprecation (_ prefix) to builtin/legacy start move to pathlib for saner path handling moved some funcitons, kept backwards compat shims with deprecation notice Co-authored-by: Abhijeet Kasurde <[email protected]> Co-authored-by: Felix Fontein <[email protected]> Co-authored-by: Sandra McCann <[email protected]>
https://github.com/ansible/ansible.git
def find_plugin(self, name, mod_type='', ignore_deprecated=False, check_aliases=False, collection_list=None): return super(Jinja2Loader, self).find_plugin(name, mod_type=mod_type, ignore_deprecated=ignore_deprecated, check_aliases=check_aliases, collection_list=collection_list)
52
loader.py
Python
lib/ansible/plugins/loader.py
b439e41a915ccec0ccbabecc966919ea406db74e
ansible
1
285,679
10
10
13
36
6
0
11
30
get_anchor_yield_reserve
Next release : reports on steroids (#2349) * fix gov tests * refactor insider * new virtual path extraction * removed some symbol default params as they're considered critical * little adjustments * portfolio refactor * merge API factory * add helpers, stocks, crypto, forex * minor forex changes * include forex api paths * add 2 missing forex funcs * portfolio brokers refactor * display help on api func call * add econometrics virtual paths to api * add api unit test * fixed report for the new api * minor portfolio refactorings * added gdapps * anchor_yield path * some more crypto path fixes * small change * fixed wrong param * minor fixes * wip - inital commit for forex report * add bw as a model, we'll get better solution afterwards * added ema with dummy model as it adds great functionality to the report * minor fixes * wip - added functions to forex report * add feedparser news path * add new virtual paths to api * adding commands to equity report * revert to old paths, new ones were breaking * Add in very basic ETF report * Add candle chart to ETF report * add etf load * allow use of candle without data * add raw to candle * added forex report * ongoing equity report * equity report change * fix some portfolio bugs and add docstrings * include portfolio paths and coin class * add crypto paths * change event dates to str * starting economy report * window for limit * equity report and refactor newsapi * add helper to api * update on economy report * equity report * update economy report * refactor some docstrings * change maturities helper * refactor newsapi * refactor futures command * add some sauce to ycrv plot * black * update report * refactor alphavantage * refactor wsj * update economy report * ycrv tenor * map avaiable_indices * map economy helpers * fix econdb docstring * add plots on economy report * minor fixes * wip - crypto report * update economy report * added same default args as view * added view to explicity use chart=True when suing the api * adjustments - removed rich tables to use only df * final version economy report * change report name * equity report for review * linting * add etf symbols endpoint * incorporate feedback economy report * fix reports launch by adding tag to economy report * fix equity bug * remove analyst name * fix * fix news * make links hyperlinks for equity * click links * fixed arg name * improved news * small improves * Fix light terminal stylesheet that would prevent using it in notebooks (#2473) * improved report * run reports in installer * fix #2209 * minor ycrv refactoring * refactor portfolio/holdv virtual path * refactor benchmark trades * fix events args * adapt economy report to changes * fix portfolio controller bug * holdv refactor * refactor perf command * start portfolio report * remove perf view * refactor holp * add textwrap3 to poetry (doesn't solve the error) * fix equity after merge * add some rolling commands * fix equity after save button * improved crypto report, plus minor fixes * minor fixes on the reports * add maxdd and distr * refactor qa * var command * refactor qa expected shortfall * add es command * add es command * fix qa percentile bug * fix economy rendering * refactor qa omega * add om command * add summary command * add dret command * add mret command * add yret command * add metrics * add allocs to report * remove bro and po commands, add later * fixed some tests * adjustments to crypto report * Fix docstring for VSCode Added a note about installing Jupyter PowerToys extension for optimal API usage in Jupyter VSCode, in the API_README.md. * minor adjustment * remove nft calendar model virtual paths * Add in Portfolio report * fix external axes portfolio view * Update portfolio report with rolling plots * Details for ETF and Portfolio * fix economy report * change analyst to openbb * floppy * fixed unmatched axis in reports * Speed up tests * fix file and load on po * get_news output * add some po paths * Add integration tests for Reports menu * refactor maxsharpe * open maxsharpe * open minrisk * open maxutil * open maxret * Added fixes * black * remove useless views * Fixed small issue * refactor ef * open ef api * portfolio optimization report * Added fixes * unblock api loading * add more endpoints * update po report * unblock api loading * update po report * expose herc * expose property endpoint * Added fixes * More api fixes * flake8 * Fixed some mypy * news api model * flake8 * mypy fix * mypy * black * pylint * fix tests * markdown * markdown * Added fixes * fix economy report * merge * fix economy report * remove empty notebook * expose nco * remove jupyter notebook * expose plot endpoint * remove po report, just used for tests * api v paths plot * remove api_old * change loading msg Co-authored-by: montezdesousa <[email protected]> Co-authored-by: hjoaquim <[email protected]> Co-authored-by: montezdesousa <[email protected]> Co-authored-by: Om Gupta <[email protected]> Co-authored-by: minhhoang1023 <[email protected]> Co-authored-by: JerBouma <[email protected]> Co-authored-by: Theodore Aptekarev <[email protected]> Co-authored-by: Om Gupta <[email protected]> Co-authored-by: Diogo Sousa <[email protected]> Co-authored-by: Colin Delahunty <[email protected]> Co-authored-by: northern-64bit <[email protected]> Co-authored-by: colin99d <[email protected]> Co-authored-by: Minh Hoang <[email protected]>
https://github.com/OpenBB-finance/OpenBBTerminal.git
def get_anchor_yield_reserve() -> pd.DataFrame: df = get_history_asset_from_terra_address( address="terra1tmnqgvg567ypvsvk6rwsga3srp7e3lg6u0elp8" ) return df
19
terraengineer_model.py
Python
openbb_terminal/cryptocurrency/defi/terraengineer_model.py
72b0a9f1ee8b91ad9fd9e76d80d2ccab51ee6d21
OpenBBTerminal
1
267,385
22
11
2
59
7
0
24
31
get_type_associations
ansible-test - Code cleanup. This helps prepare for a future pylint upgrade.
https://github.com/ansible/ansible.git
def get_type_associations(base_type, generic_base_type): # type: (t.Type[TBase], t.Type[TValue]) -> t.List[t.Tuple[t.Type[TValue], t.Type[TBase]]] return [item for item in [(get_generic_type(sc_type, generic_base_type), sc_type) for sc_type in get_subclasses(base_type)] if item[1]]
39
util.py
Python
test/lib/ansible_test/_internal/util.py
86779cc90376ea70bafa7044b12ce5132409fd63
ansible
4
20,335
5
9
2
37
6
0
5
19
_translate_parts
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
https://github.com/pypa/pipenv.git
def _translate_parts(self, value): return value.translate(_escape_html_table).split('\n')
20
html.py
Python
pipenv/patched/notpip/_vendor/pygments/formatters/html.py
f3166e673fe8d40277b804d35d77dcdb760fc3b3
pipenv
1
290,569
20
9
14
114
10
0
49
107
test_cover_positions
Implemented RestoreEntity for Dynalite (#73911) * Implemented RestoreEntity Merged commit conflict * removed accidental change * Update homeassistant/components/dynalite/dynalitebase.py Co-authored-by: Erik Montnemery <[email protected]> * added tests for the state * added tests for switch state * moved to ATTR_x and STATE_x instead of strings some fixes to test_cover * moved blind to DEVICE_CLASS_BLIND * used correct constant instead of deprecated * Implemented RestoreEntity * removed accidental change * added tests for the state * added tests for switch state * moved to ATTR_x and STATE_x instead of strings some fixes to test_cover * fixed isort issue from merge Co-authored-by: Erik Montnemery <[email protected]>
https://github.com/home-assistant/core.git
async def test_cover_positions(hass, mock_device): update_func = await create_entity_from_device(hass, mock_device) await check_cover_position( hass, update_func, mock_device, True, False, False, STATE_CLOSING ) await check_cover_position( hass, update_func, mock_device, False, True, False, STATE_OPENING ) await check_cover_position( hass, update_func, mock_device, False, False, True, STATE_CLOSED ) await check_cover_position( hass, update_func, mock_device, False, False, False, STATE_OPEN )
85
test_cover.py
Python
tests/components/dynalite/test_cover.py
b6c27585c74294fd4cc4d3a2640cf98ef6b4c343
core
1
48,376
21
10
11
84
10
0
24
81
_get_multiprocessing_start_method
Add typing for airflow/configuration.py (#23716) * Add typing for airflow/configuration.py The configuraiton.py did not have typing information and it made it rather difficult to reason about it-especially that it went a few changes in the past that made it rather complex to understand. This PR adds typing information all over the configuration file
https://github.com/apache/airflow.git
def _get_multiprocessing_start_method(self) -> str: if conf.has_option('core', 'mp_start_method'): return conf.get_mandatory_value('core', 'mp_start_method') method = multiprocessing.get_start_method() if not method: raise ValueError("Failed to determine start method") return method
45
mixins.py
Python
airflow/utils/mixins.py
71e4deb1b093b7ad9320eb5eb34eca8ea440a238
airflow
3
34,981
17
12
4
50
6
0
18
50
get_subsampled_output_lengths
Add TFSpeech2Text (#15113) * Add wrapper classes * convert inner layers to tf * Add TF Encoder and Decoder layers * TFSpeech2Text models * Loadable model * TF model with same outputs as PT model * test skeleton * correct tests and run the fixup * correct attention expansion * TFSpeech2Text pask_key_values with TF format
https://github.com/huggingface/transformers.git
def get_subsampled_output_lengths(self, input_lengths): for _ in range(self.num_conv_layers): input_lengths = (input_lengths - 1) // 2 + 1 return input_lengths
28
test_modeling_tf_speech_to_text.py
Python
tests/test_modeling_tf_speech_to_text.py
8406fa6dd538c6e1b5a218b119e8efd771023112
transformers
2
286,052
58
15
39
304
25
1
72
259
us_indices
Enhances error handling in economy menu (#2819) * Lots of bug fixes * Fixed issue
https://github.com/OpenBB-finance/OpenBBTerminal.git
def us_indices() -> pd.DataFrame: url = ( "https://www.wsj.com/market-data/stocks?id=%7B%22application%22%3A%22WSJ%22%2C%22instruments%22%3A%5B%7B" "%22symbol%22%3A%22INDEX%2FUS%2F%2FDJIA%22%2C%22name%22%3A%22DJIA%22%7D%2C%7B%22symbol%22%3A%22INDEX%2FUS%2F" "%2FCOMP%22%2C%22name%22%3A%22Nasdaq%20Composite%22%7D%2C%7B%22symbol%22%3A%22INDEX%2FUS%2F%2FSPX%22%2C%22name" "%22%3A%22S%26P%20500%22%7D%2C%7B%22symbol%22%3A%22INDEX%2FUS%2F%2FDWCF%22%2C%22name%22%3A%22DJ%20Total%20Stock" "%20Market%22%7D%2C%7B%22symbol%22%3A%22INDEX%2FUS%2F%2FRUT%22%2C%22name%22%3A%22Russell%202000%22%7D%2C%7B" "%22symbol%22%3A%22INDEX%2FUS%2F%2FNYA%22%2C%22name%22%3A%22NYSE%20Composite%22%7D%2C%7B%22symbol%22%3A%22INDEX" "%2FUS%2F%2FB400%22%2C%22name%22%3A%22Barron%27s%20400%22%7D%2C%7B%22symbol%22%3A%22INDEX%2FUS%2F%2FVIX%22%2C%22" "name%22%3A%22CBOE%20Volatility%22%7D%2C%7B%22symbol%22%3A%22FUTURE%2FUS%2F%2FDJIA%20FUTURES%22%2C%22name%22%3A%" "22DJIA%20Futures%22%7D%2C%7B%22symbol%22%3A%22FUTURE%2FUS%2F%2FS%26P%20500%20FUTURES%22%2C%22name%22%3A%22S%26P" "%20500%20Futures%22%7D%5D%7D&type=mdc_quotes" ) try: response = requests.get( url, headers={"User-Agent": get_user_agent()}, ) except requests.exceptions.RequestException: console.print("[red]Could not retrieve data from wsj.[/red]\n") return pd.DataFrame() data = response.json() name, last_price, net_change, percent_change = [], [], [], [] for entry in data["data"]["instruments"]: name.append(entry["formattedName"]) last_price.append(entry["lastPrice"]) net_change.append(entry["priceChange"]) percent_change.append(entry["percentChange"]) indices = pd.DataFrame( {" ": name, "Price": last_price, "Chg": net_change, "%Chg": percent_change} ) return indices @log_start_end(log=logger)
@log_start_end(log=logger)
162
wsj_model.py
Python
openbb_terminal/economy/wsj_model.py
4304a5c664700cf083f1432fa7523f051492754c
OpenBBTerminal
3
176,384
32
11
9
182
19
0
37
76
test_to_numpy_array_structured_dtype_nonedge_ary
Add structured dtypes to `to_numpy_array` (#5324) * Add basic test for supporting multi-attr adjacency. * WIP: sloppy implementation of multiattr adjacency in to_numpy_array. Conditionals could be improved. * Reorg conditionals. * Test to_numpy_array raises with structured dtype for multigraphs. * Fix default value handling for structured types. * Add tests for dtypes with single field. * Parametrize field tests for directed/undirected inputs. * Handle ambiguous case: structured dtype + specified weight. * Add test for multiple fields that may/not have corresponding edge attrs. * Updated docstring. * Add tests with nonedge values + structured dtypes.
https://github.com/networkx/networkx.git
def test_to_numpy_array_structured_dtype_nonedge_ary(G): G.add_edge(0, 1, weight=10) dtype = np.dtype([("weight", float), ("cost", float)]) nonedges = np.array([(0, np.inf)], dtype=dtype) A = nx.to_numpy_array(G, dtype=dtype, weight=None, nonedge=nonedges) for attr in dtype.names: nonedge = nonedges[attr] expected = nx.to_numpy_array(G, dtype=float, weight=attr, nonedge=nonedge) npt.assert_array_equal(A[attr], expected)
122
test_convert_numpy.py
Python
networkx/tests/test_convert_numpy.py
d2278b4c3402c735a31e266adde75ecc2eeb98eb
networkx
2
323,154
5
7
2
21
3
0
5
19
_new_epoch
[Trainer] Add init version of paddlenlp trainer and apply finetune for ernie-1.0 pretraining. (#1761) * add some datasets for finetune. * support fine tune for all tastks. * add trainer prototype. * init verison for paddlenlp trainer. * refine trainer. * update for some details. * support multi-cards training evaluation. * support load from ckpt. * support for export inference model. * first version of trainer. * seq cls support clue. * trainer support for token classification and question answersing tasks. * fix as reviews. Co-authored-by: Zeyu Chen <[email protected]>
https://github.com/PaddlePaddle/PaddleNLP.git
def _new_epoch(self): self.should_epoch_stop = False
11
trainer_callback.py
Python
paddlenlp/trainer/trainer_callback.py
44a290e94d1becd1f09fddc3d873f9e19c9d6919
PaddleNLP
1
100,333
18
13
9
119
23
0
18
139
add_option_save
Update code to support Tensorflow versions up to 2.8 (#1213) * Update maximum tf version in setup + requirements * - bump max version of tf version in launcher - standardise tf version check * update keras get_custom_objects for tf>2.6 * bugfix: force black text in GUI file dialogs (linux) * dssim loss - Move to stock tf.ssim function * Update optimizer imports for compatibility * fix logging for tf2.8 * Fix GUI graphing for TF2.8 * update tests * bump requirements.txt versions * Remove limit on nvidia-ml-py * Graphing bugfixes - Prevent live graph from displaying if data not yet available * bugfix: Live graph. Collect loss labels correctly * fix: live graph - swallow inconsistent loss errors * Bugfix: Prevent live graph from clearing during training * Fix graphing for AMD
https://github.com/deepfakes/faceswap.git
def add_option_save(self): logger.debug("Adding save option") btnsave = ttk.Button(self.optsframe, image=get_images().icons["save"], command=self.save_items) btnsave.pack(padx=2, side=tk.RIGHT) Tooltip(btnsave, text=_(f"Save {self.tabname}(s) to file"), wrap_length=200)
69
display_page.py
Python
lib/gui/display_page.py
c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf
faceswap
1
290,947
8
9
4
49
7
0
9
30
async_press
AVM Fritz!Box SmartHome: Integrate Templates (#81885)
https://github.com/home-assistant/core.git
async def async_press(self) -> None: await self.hass.async_add_executor_job(self.apply_template) await self.coordinator.async_refresh()
27
button.py
Python
homeassistant/components/fritzbox/button.py
3b783a85c30001d7a8cf60c54932cb2b6616de4d
core
1
261,025
40
12
13
199
17
0
55
120
softmax
ENH Adds Array API support to LinearDiscriminantAnalysis (#22554) Co-authored-by: Olivier Grisel <[email protected]> Co-authored-by: Julien Jerphanion <[email protected]>
https://github.com/scikit-learn/scikit-learn.git
def softmax(X, copy=True): xp, is_array_api = get_namespace(X) if copy: X = xp.asarray(X, copy=True) max_prob = xp.reshape(xp.max(X, axis=1), (-1, 1)) X -= max_prob if xp.__name__ in {"numpy", "numpy.array_api"}: # optimization for NumPy arrays np.exp(X, out=np.asarray(X)) else: # array_api does not have `out=` X = xp.exp(X) sum_prob = xp.reshape(xp.sum(X, axis=1), (-1, 1)) X /= sum_prob return X
126
extmath.py
Python
sklearn/utils/extmath.py
2710a9e7eefd2088ce35fd2fb6651d5f97e5ef8b
scikit-learn
3
64,677
39
15
17
186
23
1
47
29
get_delivered_serial_nos
fix(pos): remove returned sr. nos. from pos reserved sr. nos. list
https://github.com/frappe/erpnext.git
def get_delivered_serial_nos(serial_nos): from frappe.query_builder.functions import Coalesce SerialNo = frappe.qb.DocType("Serial No") serial_nos = get_serial_nos(serial_nos) query = ( frappe.qb .from_(SerialNo) .select(SerialNo.name) .where( (SerialNo.name.isin(serial_nos)) & (Coalesce(SerialNo.delivery_document_type, "") != "") ) ) result = query.run() if result and len(result) > 0: delivered_serial_nos = [row[0] for row in result] return delivered_serial_nos @frappe.whitelist()
@frappe.whitelist()
107
serial_no.py
Python
erpnext/stock/doctype/serial_no/serial_no.py
f2ae63cbfdc0262f45ccae5991927e49e5c38c4c
erpnext
4
42,246
12
9
3
45
7
0
12
21
crayon_palette
Convert color palette docstrings to notebooks (#3034) * Convert color palette docstrings to notebooks and rerun all with py310 kernel * Add v0.12.1 release notes to index * Improve failure mode when ipywidgets is not involved * Update palettes docstrings * Remove all other doctest-style examples * Remove doctest-oriented testing infrastructure * Mention in release notes * Skip colormap patch test on matplotlib's where it's not relevant * Use more robust approach to mpl backcompat
https://github.com/mwaskom/seaborn.git
def crayon_palette(colors): palette = [crayons[name] for name in colors] return color_palette(palette, len(palette))
28
palettes.py
Python
seaborn/palettes.py
e644793f0ac2b1be178425f20f529121f37f29de
seaborn
2
294,627
24
13
10
134
15
1
27
80
test_form_stream_invalidimage
Generic IP Camera configflow 2 (#52360) Co-authored-by: J. Nick Koston <[email protected]>
https://github.com/home-assistant/core.git
async def test_form_stream_invalidimage(hass, mock_av_open, user_flow): respx.get("http://127.0.0.1/testurl/1").respond(stream=b"invalid") with mock_av_open: result2 = await hass.config_entries.flow.async_configure( user_flow["flow_id"], TESTDATA, ) await hass.async_block_till_done() assert result2["type"] == "form" assert result2["errors"] == {"still_image_url": "invalid_still_image"} @respx.mock
@respx.mock
70
test_config_flow.py
Python
tests/components/generic/test_config_flow.py
c1a2be72fc8b76b55cfde1823c5688100e397369
core
1
42,167
59
10
5
537
46
0
65
206
plot
Cleanup and merge #2909 (#2955) * Sorting boxenplot * Boxenplot separate kws Removed `kwargs` which were used to draw the median lines and scatter plot of outliers previously. Added separate kwargs - `box_kws`, `line_kws` (drawing the median lines) and `flier_kws` (for the scatter of outliers). Updated the matching docstring. * In the previous commit most code on the categorical.py file was auto-reformatted. Here it is reverted and only the changes to `seaborn.categorical.boxenplot` and `seaborn.categorical._LVPlotter` are kept. * Reinserted blank lines in docstring. * - Removed redundant indention in `boxenplot` function - Removed commented out code in the `plot` function * Removed default kwargs from `plot` * Removing commented out code * Reverted to ternary expressions * Replaced default kwargs assignment to box_kws Disentangled the nested for loop for default kwargs assignment * Removed remaining `kwargs` item in docstring * Resolved incorrect reference in the box_kws item on the docstring. * Resolved incorrect descriptions for box_kws, line_kws and flier_kws. * Changed line_kws update to source arguments frmo box_kws if there is only a single data point. * Added line_kws test * Added flier_kws test, renamed line_kws test * Tests - further work is required in expanding the tests. Two current issues (a) most are not testing when multiple categories are used on the x-axis, but only a single one. (b) the tests for the box_kws functionality are very slim. * Fix lint issues * Fix pinned tests * Update release notes * Cleanup boxenplot colors test Co-authored-by: EitanHemed <[email protected]>
https://github.com/mwaskom/seaborn.git
def plot(self, ax, box_kws, flier_kws, line_kws): self.draw_letter_value_plot(ax, box_kws, flier_kws, line_kws) self.annotate_axes(ax) if self.orient == "h": ax.invert_yaxis() _categorical_docs = dict( # Shared narrative docs categorical_narrative=dedent(), new_categorical_narrative=dedent(), # Shared function parameters input_params=dedent(), string_input_params=dedent(), categorical_data=dedent(), long_form_data=dedent(), order_vars=dedent(), stat_api_params=dedent(), orient=dedent(), color=dedent(), palette=dedent(), hue_norm=dedent(), saturation=dedent(), capsize=dedent(), errwidth=dedent(), width=dedent(), dodge=dedent(), linewidth=dedent(), native_scale=dedent(), formatter=dedent(), legend=dedent(), ax_in=dedent(), ax_out=dedent(), # Shared see also boxplot=dedent(), violinplot=dedent(), stripplot=dedent(), swarmplot=dedent(), barplot=dedent(), countplot=dedent(), pointplot=dedent(), catplot=dedent(), boxenplot=dedent(), ) _categorical_docs.update(_facet_docs)
44
categorical.py
Python
seaborn/categorical.py
b1db0f72627e9fae8fda261514392d53906384cf
seaborn
2
263,869
81
12
7
110
17
0
111
266
exec_module
loader: remove pyimod01_os_path and replace its use with os.path Add os (and its dependencies) to compat.PY3_BASE_MODULES so that they are collected into base_library.zip. As this archive is available during the bootstrap, we can now use full functionality of os.path in our bootstrap scripts/modules instead of having to rely on our own pyimod01_os_path.
https://github.com/pyinstaller/pyinstaller.git
def exec_module(self, module): spec = module.__spec__ bytecode = self.get_code(spec.loader_state) # Set by the import machinery assert hasattr(module, '__file__') # If `submodule_search_locations` is not None, this is a package; set __path__. if spec.submodule_search_locations is not None: # Since PYTHONHOME is set in bootloader, 'sys.prefix' points to the correct path where PyInstaller should # find bundled dynamic libraries. In one-file mode it points to the tmp directory where bundled files are # extracted at execution time. # # __path__ cannot be empty list because 'wx' module prepends something to it. It cannot contain value # 'sys.prefix' because 'xml.etree.cElementTree' fails otherwise. # # Set __path__ to point to 'sys.prefix/package/subpackage'. module.__path__ = [os.path.dirname(module.__file__)] exec(bytecode, module.__dict__)
62
pyimod02_importers.py
Python
PyInstaller/loader/pyimod02_importers.py
c115bce4dd1eb43c3e1ad52ca6743ed6fd3f0a70
pyinstaller
2
200,399
29
11
7
95
13
0
34
94
perpendicular_segment
Fix various typos Found via `codespell -q 3 -L aboves,aline,ans,aother,arithmetics,assum,atleast,braket,clen,declar,declars,dorder,dum,enew,fo,fro,inout,iself,ist,ket,lamda,lightyear,lightyears,nd,numer,numers,orderd,ot,pring,rcall,rever,ro,ser,siz,splitted,sring,supercedes,te,tht,unequality,upto,vas,versin,whet`
https://github.com/sympy/sympy.git
def perpendicular_segment(self, p): p = Point(p, dim=self.ambient_dimension) if p in self: return p l = self.perpendicular_line(p) # The intersection should be unique, so unpack the singleton p2, = Intersection(Line(self.p1, self.p2), l) return Segment(p, p2)
60
line.py
Python
sympy/geometry/line.py
24f1e7730119fe958cc8e28411f790c9a5ec04eb
sympy
2
85,851
124
16
39
442
33
0
208
743
get_autoassign_owners
feat(codeowners): Add codeowners type to group owners (#38813)
https://github.com/getsentry/sentry.git
def get_autoassign_owners(cls, project_id, data, limit=2): from sentry.models import ProjectCodeOwners from sentry.models.groupowner import OwnerRuleType with metrics.timer("projectownership.get_autoassign_owners"): ownership = cls.get_ownership_cached(project_id) codeowners = ProjectCodeOwners.get_codeowners_cached(project_id) assigned_by_codeowners = False if not (ownership or codeowners): return False, [], assigned_by_codeowners, None, [] if not ownership: ownership = cls(project_id=project_id) ownership_rules = cls._matching_ownership_rules(ownership, project_id, data) codeowners_rules = ( cls._matching_ownership_rules(codeowners, project_id, data) if codeowners else [] ) if not (codeowners_rules or ownership_rules): return ownership.auto_assignment, [], assigned_by_codeowners, None, [] ownership_actors = cls._find_actors(project_id, ownership_rules, limit) codeowners_actors = cls._find_actors(project_id, codeowners_rules, limit) # Can happen if the ownership rule references a user/team that no longer # is assigned to the project or has been removed from the org. if not (ownership_actors or codeowners_actors): return ownership.auto_assignment, [], assigned_by_codeowners, None, [] # Ownership rules take precedence over codeowner rules. actors = [*ownership_actors, *codeowners_actors][:limit] actor_source = [ *([OwnerRuleType.OWNERSHIP_RULE.value] * len(ownership_actors)), *([OwnerRuleType.CODEOWNERS.value] * len(codeowners_actors)), ][:limit] # Only the first item in the list is used for assignment, the rest are just used to suggest suspect owners. # So if ownership_actors is empty, it will be assigned by codeowners_actors if len(ownership_actors) == 0: assigned_by_codeowners = True # The rule that would be used for auto assignment auto_assignment_rule = ( codeowners_rules[0] if assigned_by_codeowners else ownership_rules[0] ) from sentry.models import ActorTuple return ( ownership.auto_assignment, ActorTuple.resolve_many(actors), assigned_by_codeowners, auto_assignment_rule, actor_source, )
291
projectownership.py
Python
src/sentry/models/projectownership.py
56a4e17eb1a20773ef60d62a364daad4980bd643
sentry
11
337,589
20
13
11
130
18
0
24
76
test_load_states_by_steps
Refactor tests to use accelerate launch (#373) Co-authored-by: Sylvain Gugger <[email protected]>
https://github.com/huggingface/accelerate.git
def test_load_states_by_steps(self): testargs = f.split() output = subprocess.run( self._launch_args + testargs, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE ).stdout self.assertNotIn("epoch 0:", output) self.assertIn("epoch 1:", output) self.assertIn("epoch 2:", output)
67
test_examples.py
Python
tests/test_examples.py
23c0341262bd396a3ba9265614b3818d6e08a6c1
accelerate
1
118,546
37
10
18
153
26
0
40
217
_maybe_create_scriptrunner
Rename and refactor `Report` machinery (#4141) This refactor renames (almost) everything related to the outdated "report" concept with more precise concepts that we use throughout our code, primarily "script run", "session", and "app".
https://github.com/streamlit/streamlit.git
def _maybe_create_scriptrunner(self): if ( self._state == AppSessionState.SHUTDOWN_REQUESTED or self._scriptrunner is not None or not self._script_request_queue.has_request ): return # Create the ScriptRunner, attach event handlers, and start it self._scriptrunner = ScriptRunner( session_id=self.id, session_data=self._session_data, enqueue_forward_msg=self.enqueue, client_state=self._client_state, request_queue=self._script_request_queue, session_state=self._session_state, uploaded_file_mgr=self._uploaded_file_mgr, ) self._scriptrunner.on_event.connect(self._on_scriptrunner_event) self._scriptrunner.start()
100
app_session.py
Python
lib/streamlit/app_session.py
704eab3478cf69847825b23dabf15813a8ac9fa2
streamlit
4
133,741
32
15
15
217
20
0
41
150
multi_log_probs_from_logits_and_actions
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
https://github.com/ray-project/ray.git
def multi_log_probs_from_logits_and_actions(policy_logits, actions, dist_class, model): log_probs = [] for i in range(len(policy_logits)): p_shape = tf.shape(policy_logits[i]) a_shape = tf.shape(actions[i]) policy_logits_flat = tf.reshape( policy_logits[i], tf.concat([[-1], p_shape[2:]], axis=0) ) actions_flat = tf.reshape(actions[i], tf.concat([[-1], a_shape[2:]], axis=0)) log_probs.append( tf.reshape( dist_class(policy_logits_flat, model).logp(actions_flat), a_shape[:2] ) ) return log_probs
144
vtrace_tf.py
Python
rllib/agents/impala/vtrace_tf.py
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
2
95,536
8
9
6
83
8
0
9
51
capture_screenshots
test(dashboard-layout): Add missing acceptance tests (#31099)
https://github.com/getsentry/sentry.git
def capture_screenshots(self, screenshot_name): self.page.wait_until_loaded() self.browser.snapshot(screenshot_name) self.browser.refresh() self.page.wait_until_loaded() self.browser.snapshot(f"{screenshot_name} (refresh)")
46
test_organization_dashboards.py
Python
tests/acceptance/test_organization_dashboards.py
cc156785c3f63b942597d84fe458c37bdcf92ec2
sentry
1
294,028
22
12
10
109
12
0
29
104
offset
Support multiple Plex servers in media browser (#68321)
https://github.com/home-assistant/core.git
def offset(self) -> int: if offset := self._params.get("offset", 0): return offset * 1000 resume = self._params.get("resume", False) if isinstance(resume, str): resume = bool(strtobool(resume)) if resume: return self.media.viewOffset return 0
66
models.py
Python
homeassistant/components/plex/models.py
653305b998dd033365576db303b32dd5df3a6c54
core
4
87,081
64
16
27
311
28
0
76
373
test_cannot_update_dynamic_sampling_config_on_am2_plan
feat(ds): Handle GET and PUT in project details for v2 dynamic sampling [TET-475] (#40181) Ensures that when new AM2 plan flag is enabled GET request does not return `dynamicSampling` data in response, and for PUT request guards against storing `dynamicSampling` data. Also, handles popping `dynamicSampling` data from response if a PUT request is made to update some other project fields
https://github.com/getsentry/sentry.git
def test_cannot_update_dynamic_sampling_config_on_am2_plan(self): dynamic_sampling = _dyn_sampling_data() project = self.project # force creation # Update project adding three rules project.update_option("sentry:dynamic_sampling", dynamic_sampling) self.login_as(self.user) token = ApiToken.objects.create(user=self.user, scope_list=["project:write"]) authorization = f"Bearer {token.token}" url = reverse( "sentry-api-0-project-details", kwargs={ "organization_slug": self.project.organization.slug, "project_slug": self.project.slug, }, ) data = { "dynamicSampling": { "rules": [ {**dynamic_sampling["rules"][0], "active": False}, dynamic_sampling["rules"][1], dynamic_sampling["rules"][2], ] } } with Feature({"organizations:dynamic-sampling-basic": True}): resp = self.client.put(url, format="json", HTTP_AUTHORIZATION=authorization, data=data) assert resp.status_code == 403 assert resp.json()["detail"] == ["dynamicSampling is not a valid field"]
180
test_project_details.py
Python
tests/sentry/api/endpoints/test_project_details.py
8c51b98545d71ed7ef0b3b924db13461e924023a
sentry
1
267,952
19
11
11
119
13
0
21
107
to_dict
ansible-test - Use more native type hints. (#78435) * ansible-test - Use more native type hints. Simple search and replace to switch from comments to native type hints for return types of functions with no arguments. * ansible-test - Use more native type hints. Conversion of simple single-line function annotation type comments to native type hints. * ansible-test - Use more native type hints. Conversion of single-line function annotation type comments with default values to native type hints. * ansible-test - Use more native type hints. Manual conversion of type annotation comments for functions which have pylint directives.
https://github.com/ansible/ansible.git
def to_dict(self) -> t.Dict[str, t.Any]: value: t.Dict[str, t.Any] = dict( host_ip=self.host_ip, names=self.names, ) if self.ports: value.update(ports=self.ports) if self.forwards: value.update(forwards=self.forwards) return value
77
containers.py
Python
test/lib/ansible_test/_internal/containers.py
3eb0485dd92c88cc92152d3656d94492db44b183
ansible
3
270,139
20
11
11
83
10
0
22
75
get_word_index
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def get_word_index(path="reuters_word_index.json"): origin_folder = ( "https://storage.googleapis.com/tensorflow/tf-keras-datasets/" ) path = get_file( path, origin=origin_folder + "reuters_word_index.json", file_hash="4d44cc38712099c9e383dc6e5f11a921", ) with open(path) as f: return json.load(f)
45
reuters.py
Python
keras/datasets/reuters.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
1
20,414
120
24
53
468
29
0
195
1,392
get_tokens_unprocessed
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
https://github.com/pypa/pipenv.git
def get_tokens_unprocessed(self, text, stack=('root',)): pos = 0 tokendefs = self._tokens statestack = list(stack) statetokens = tokendefs[statestack[-1]] while 1: for rexmatch, action, new_state in statetokens: m = rexmatch(text, pos) if m: if action is not None: if type(action) is _TokenType: yield pos, action, m.group() else: yield from action(self, m) pos = m.end() if new_state is not None: # state transition if isinstance(new_state, tuple): for state in new_state: if state == '#pop': if len(statestack) > 1: statestack.pop() elif state == '#push': statestack.append(statestack[-1]) else: statestack.append(state) elif isinstance(new_state, int): # pop, but keep at least one state on the stack # (random code leading to unexpected pops should # not allow exceptions) if abs(new_state) >= len(statestack): del statestack[1:] else: del statestack[new_state:] elif new_state == '#push': statestack.append(statestack[-1]) else: assert False, "wrong state def: %r" % new_state statetokens = tokendefs[statestack[-1]] break else: # We are here only if all state tokens have been considered # and there was not a match on any of them. try: if text[pos] == '\n': # at EOL, reset state to "root" statestack = ['root'] statetokens = tokendefs['root'] yield pos, Text, '\n' pos += 1 continue yield pos, Error, text[pos] pos += 1 except IndexError: break
279
lexer.py
Python
pipenv/patched/notpip/_vendor/pygments/lexer.py
f3166e673fe8d40277b804d35d77dcdb760fc3b3
pipenv
17
20,061
77
14
26
284
15
0
116
461
version
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
https://github.com/pypa/pipenv.git
def version(self, pretty=False, best=False): # type: (bool, bool) -> str versions = [ self.os_release_attr("version_id"), self.lsb_release_attr("release"), self.distro_release_attr("version_id"), self._parse_distro_release_content(self.os_release_attr("pretty_name")).get( "version_id", "" ), self._parse_distro_release_content( self.lsb_release_attr("description") ).get("version_id", ""), self.uname_attr("release"), ] version = "" if best: # This algorithm uses the last version in priority order that has # the best precision. If the versions are not in conflict, that # does not matter; otherwise, using the last one instead of the # first one might be considered a surprise. for v in versions: if v.count(".") > version.count(".") or version == "": version = v else: for v in versions: if v != "": version = v break if pretty and version and self.codename(): version = "{0} ({1})".format(version, self.codename()) return version
160
distro.py
Python
pipenv/patched/notpip/_vendor/distro.py
f3166e673fe8d40277b804d35d77dcdb760fc3b3
pipenv
10
168,206
63
12
27
152
24
0
73
219
to_perioddelta
PERF cache find_stack_level (#48023) cache stacklevel
https://github.com/pandas-dev/pandas.git
def to_perioddelta(self, freq) -> TimedeltaArray: # Deprecaation GH#34853 warnings.warn( "to_perioddelta is deprecated and will be removed in a " "future version. " "Use `dtindex - dtindex.to_period(freq).to_timestamp()` instead.", FutureWarning, # stacklevel chosen to be correct for when called from DatetimeIndex stacklevel=find_stack_level(inspect.currentframe()), ) from pandas.core.arrays.timedeltas import TimedeltaArray if self._ndarray.dtype != "M8[ns]": raise NotImplementedError("Only supported for nanosecond resolution.") i8delta = self.asi8 - self.to_period(freq).to_timestamp().asi8 m8delta = i8delta.view("m8[ns]") return TimedeltaArray(m8delta) # ----------------------------------------------------------------- # Properties - Vectorized Timestamp Properties/Methods
87
datetimes.py
Python
pandas/core/arrays/datetimes.py
2f8d0a36703e81e4dca52ca9fe4f58c910c1b304
pandas
2
159,520
38
12
26
250
17
0
56
291
_validate_configuration
Enable `union-attr` mypy check and fix issues (#10942) * first batch of mypy fixes * fix for failing tests * fix errors in channels and shared packages * add docstring, fix error in brokers and lock module * more fixes * fix more union-attr errors * add more fixes * fix errors in action module, undo a different change * more fixes in agent and diet classifier modules * fix more errors * fixes for failing tests * fix errors in migrate and policies modules * address review comments - first part * address review comments - part 2 * add fix in generator module * apply review suggestions - part 3 * fix error in domain module * fix final errors * address final review comments * add changelog entries * fix docstring
https://github.com/RasaHQ/rasa.git
def _validate_configuration(self) -> None: if self.assistant_voice not in self.SUPPORTED_VOICES: self._raise_invalid_voice_exception() try: int(self.speech_timeout) except ValueError: if self.speech_timeout.lower() != "auto": self._raise_invalid_speech_timeout_exception() if self.speech_model not in self.SUPPORTED_SPEECH_MODELS: self._raise_invalid_speech_model_exception() if self.enhanced.lower() not in [ "true", "false", ]: self._raise_invalid_enhanced_option_exception() if ( self.enhanced.lower() == "true" and self.speech_model.lower() != "phone_call" ): self._raise_invalid_enhanced_speech_model_exception() if ( self.speech_model.lower() != "numbers_and_commands" and self.speech_timeout.lower() == "auto" ): self._raise_invalid_speech_model_timeout_exception()
143
twilio_voice.py
Python
rasa/core/channels/twilio_voice.py
ec8de9bc19c20880bf681a686f776523b612cc27
rasa
10
1,493
23
14
5
123
11
0
34
69
forward
Moved all code from notebook to codebase Took 19 minutes
https://github.com/OpenMined/PySyft.git
def forward(self, outputs, targets): outputs = outputs.clip(self.epsilon, 1 - self.epsilon) log_loss = targets * dp_log(outputs) + ((targets * -1) + 1) * dp_log((outputs * -1) + 1) log_loss = log_loss.sum(axis=1) * -1 return log_loss.mean()
76
loss.py
Python
packages/syft/src/syft/core/tensor/nn/loss.py
f3b8f6f1196e6f8a92620b4efc190715273fecab
PySyft
1
138,500
38
10
16
105
13
0
53
135
meta_count
[Datasets] [Out-of-Band Serialization: 2/3] Refactor `ExecutionPlan` to maintain complete lineage and eagerly unlink block references. (#23931) This PR refactors ExecutionPlan to maintain complete stage lineage, even for eagerly computed datasets, while ensuring that block references are unlinked as early as possible in order to more eagerly release block memory. This PR is the final precursor to adding the actual out-of-band serialization APIs (PR 3/3). The fully lineage has to be maintained, even for eagerly computed datasets, since the lineage is needed for out-of-band serialization of datasets.
https://github.com/ray-project/ray.git
def meta_count(self) -> Optional[int]: if self._stages_after_snapshot: return None # Snapshot is now guaranteed to be the output of the final stage or None. blocks = self._snapshot_blocks metadata = blocks.get_metadata() if blocks else None if metadata and all(m.num_rows is not None for m in metadata): return sum(m.num_rows for m in metadata) else: return None
66
plan.py
Python
python/ray/data/impl/plan.py
9ee24530abf1b5e3239869b5257dd7b678337b90
ray
7
43,335
7
9
3
38
6
0
7
21
hook
Amazon appflow (#24057) * Add Amazon AppFlow hook. * Add Amazon AppFlow operators. * Add Amazon AppFlow examples. * Add Amazon Appflow docs. * Apply comments/docs patterns. * Removing the "private" attribute signal and more. * Fix task_ids for example_appflow. * Move datetime_to_epoch() to utils and more. * Fix the AppflowBaseOperator name. * Ignore AppflowBaseOperator during structure check. * test_short_circuit refactor. * Add get_airflow_version. * Update airflow/providers/amazon/aws/hooks/appflow.py Co-authored-by: Josh Fell <[email protected]> * Update airflow/providers/amazon/aws/operators/appflow.py Co-authored-by: Josh Fell <[email protected]> * Update airflow/providers/amazon/aws/operators/appflow.py Co-authored-by: Josh Fell <[email protected]> * Update airflow/providers/amazon/aws/operators/appflow.py Co-authored-by: Josh Fell <[email protected]> * Update airflow/providers/amazon/aws/operators/appflow.py Co-authored-by: Josh Fell <[email protected]> * Update airflow/providers/amazon/aws/operators/appflow.py Co-authored-by: Josh Fell <[email protected]> * Addressing Josh's requests. * Add cached_property to AppflowHook * Update airflow/providers/amazon/aws/hooks/appflow.py Co-authored-by: Josh Fell <[email protected]> * Update airflow/providers/amazon/aws/operators/appflow.py Co-authored-by: Josh Fell <[email protected]> * Update airflow/providers/amazon/aws/operators/appflow.py Co-authored-by: Josh Fell <[email protected]> * Update Josh's comment. * Update cached_property import. * Fix mypy. Co-authored-by: Josh Fell <[email protected]>
https://github.com/apache/airflow.git
def hook(self) -> AppflowHook: return AppflowHook(aws_conn_id=self.aws_conn_id, region_name=self.region)
23
appflow.py
Python
airflow/providers/amazon/aws/operators/appflow.py
e477f4ba6cd15fabbfe5210c99947bcb70ddac4f
airflow
1
306,789
13
8
5
43
8
0
13
32
test_convert_from_gallons
Refactor distance, speed and volume utils (#77952) * Refactor distance util * Fix bmw connected drive tests * Adjust here travel time tests * Adjust waze travel time tests * Adjust test_distance * Adjust rounding values * Adjust more tests * Adjust volume conversions * Add tests
https://github.com/home-assistant/core.git
def test_convert_from_gallons(): gallons = 5 assert volume_util.convert(gallons, VOLUME_GALLONS, VOLUME_LITERS) == pytest.approx( 18.92706 )
28
test_volume.py
Python
tests/util/test_volume.py
9490771a8737892a7a86afd866a3520b836779fd
core
1
269,529
17
11
43
77
11
0
21
47
switch
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def switch(condition, then_expression, else_expression): if condition.dtype != tf.bool: condition = tf.cast(condition, "bool") cond_ndim = ndim(condition) if not cond_ndim: if not callable(then_expression):
239
backend.py
Python
keras/backend.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
9
180,699
10
10
7
47
5
0
11
43
gather_data_and_broadcast_estimations
Release new queue beta (#1969) * queue-refactor-backend (#1489) * queue-refactor-backend - create a template for the new design * queue-refactor-backend - clean after the old queue * queue-refactor-backend - add basic test to websocket endpoint * queue-refactor-backend - small fix * queue-refactor-backend - debugs&fixes&finalizations - test the flow with postman * queue-refactor-backend - tweaks on websocket closing * queue-refactor-backend - cleanup * queue-refactor-backend - cleanup & tweaks * queue-refactor-backend - cleanup & tweaks * queue-refactor-backend - cleanup & tweaks - correct the exception handling * queue-refactor-backend - add websockets dependency * queue-refactor-backend - reformat * queue-refactor-backend - add single event test * queue-refactor-backend - tweaks - remove outdated tests * queue-refactor-backend - reformat * queue-refactor-backend - reformat * queue-refactor-backend - reformat * queue-refactor-backend - add Queue configurations to Blocks.launch() - add live_queue_update to send estimations whenever a job gets fetched from the Queue * queue-refactor-backend - add Queue configurations to Blocks.launch() - add live_queue_update to send estimations whenever a job gets fetched from the Queue * queue-refactor-backend - tweaks * queue-refactor-backend - make SLEEP_WHEN_FREE shorter Co-authored-by: Ali Abid <[email protected]> * Add estimation parameters to queue (#1889) * - tweaks on Estimation * version * Revert "version" This reverts commit bd1f4d7bfe3658a4967b93126859a62a511a70e2. * some fix and tweaks * implement queue frontend (#1950) * implement queue frontend * fix types * fix ws endpoint in build mode * cleanup * Queue tweaks (#1909) * tweaks on estimation payload * Queue keep ws connections open (#1910) * 1. keep ws connections open after the event process is completed 2. do not send estimations periodically if live queue updates is open * fix calculation * 1. tweaks on event_queue * fix issue - create new ws for each request * format * fix * fix tests * fix tests * tets * test * changes * changes * changes * change' * wtf * changes * changes * file perms * Release queue beta v1 (#1971) * - release the new queue * - bypass the issue in the tests - rewrite the lost part in the codebase * - add concurrent queue example (#1978) * rank_eta calc * Queue fixes (#1981) * change * format * - comment out queue tests as they dont work well * - reformat * Update gradio/event_queue.py Co-authored-by: Ömer Faruk Özdemir <[email protected]> * changes * changes * change * weird fix Co-authored-by: Ömer Faruk Özdemir <[email protected]> * release-queue-v3 (#1988) * Fix frontend queuing to target secure WSS (#1996) * change * format * changes * queue-concurrency-tweaks (#2002) 1. make gather_data and broadcast_estimation sequential instead of concurrent because they were deleting elements at the same time and raising expections which was lowering the performance * Update Queue API, documentation (#2026) * changes * changes * fixes * changes * change * fix Co-authored-by: Ömer Faruk Özdemir <[email protected]> Co-authored-by: pngwn <[email protected]>
https://github.com/gradio-app/gradio.git
async def gather_data_and_broadcast_estimations(cls) -> None: await cls.gather_data_for_first_ranks() if cls.LIVE_UPDATES: await cls.broadcast_estimations()
25
event_queue.py
Python
gradio/event_queue.py
b1dfc9a172440e9c9736566f326ba339ff559604
gradio
2
100,667
34
12
14
161
15
0
48
163
_check_folder
Alignments tool - Replace 'extract-large' with 'min-size'
https://github.com/deepfakes/faceswap.git
def _check_folder(self) -> None: err = None if not self._faces_dir: err = "ERROR: Output faces folder not provided." elif not os.path.isdir(self._faces_dir): logger.debug("Creating folder: '%s'", self._faces_dir) os.makedirs(self._faces_dir) elif os.listdir(self._faces_dir): err = f"ERROR: Output faces folder should be empty: '{self._faces_dir}'" if err: logger.error(err) sys.exit(0) logger.verbose("Creating output folder at '%s'", self._faces_dir)
90
jobs.py
Python
tools/alignments/jobs.py
a9908b46f77dc66ac7efe7100ea0eed4b1f2b460
faceswap
5
265,888
10
10
5
66
8
0
13
28
register_search
Closes #10560: New global search (#10676) * Initial work on new search backend * Clean up search backends * Return only the most relevant result per object * Clear any pre-existing cached entries on cache() * #6003: Implement global search functionality for custom field values * Tweak field weights & document guidance * Extend search() to accept a lookup type * Move get_registry() out of SearchBackend * Enforce object permissions when returning search results * Add indexers for remaining models * Avoid calling remove() on non-cacheable objects * Use new search backend by default * Extend search backend to filter by object type * Clean up search view form * Enable specifying lookup logic * Add indexes for value field * Remove object type selector from search bar * Introduce SearchTable and enable HTMX for results * Enable pagination * Remove legacy search backend * Cleanup * Use a UUID for CachedValue primary key * Refactoring search methods * Define max search results limit * Extend reindex command to support specifying particular models * Add clear() and size to SearchBackend * Optimize bulk caching performance * Highlight matched portion of field value * Performance improvements for reindexing * Started on search tests * Cleanup & docs * Documentation updates * Clean up SearchIndex * Flatten search registry to register by app_label.model_name * Clean up search backend classes * Clean up RestrictedGenericForeignKey and RestrictedPrefetch * Resolve migrations conflict
https://github.com/netbox-community/netbox.git
def register_search(cls): model = cls.model label = f'{model._meta.app_label}.{model._meta.model_name}' registry['search'][label] = cls return cls
26
__init__.py
Python
netbox/netbox/search/__init__.py
9628dead07ccef9608b32906aa8194bc948e5a09
netbox
1
271,874
66
12
25
250
20
0
108
284
get_metric_function
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def get_metric_function(metric, output_shape=None, loss_fn=None): if metric not in ["accuracy", "acc", "crossentropy", "ce"]: return metrics_module.get(metric) is_sparse_categorical_crossentropy = isinstance( loss_fn, losses.SparseCategoricalCrossentropy ) or ( isinstance(loss_fn, losses.LossFunctionWrapper) and loss_fn.fn == losses.sparse_categorical_crossentropy ) is_binary_crossentropy = isinstance(loss_fn, losses.BinaryCrossentropy) or ( isinstance(loss_fn, losses.LossFunctionWrapper) and loss_fn.fn == losses.binary_crossentropy ) if metric in ["accuracy", "acc"]: if output_shape[-1] == 1 or is_binary_crossentropy: return metrics_module.binary_accuracy elif is_sparse_categorical_crossentropy: return metrics_module.sparse_categorical_accuracy # If the output_shape[-1] is not 1, then we know output is `categorical`. # We assume it is sparse categorical only if loss is explicitly given # as sparse categorical crossentropy loss. return metrics_module.categorical_accuracy else: if output_shape[-1] == 1 or is_binary_crossentropy: return metrics_module.binary_crossentropy elif is_sparse_categorical_crossentropy: return metrics_module.sparse_categorical_crossentropy return metrics_module.categorical_crossentropy
156
training_utils_v1.py
Python
keras/engine/training_utils_v1.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
13
141,975
29
11
18
153
21
0
39
145
test_multi_sync_different_node
[tune] Refactor Syncer / deprecate Sync client (#25655) This PR includes / depends on #25709 The two concepts of Syncer and SyncClient are confusing, as is the current API for passing custom sync functions. This PR refactors Tune's syncing behavior. The Sync client concept is hard deprecated. Instead, we offer a well defined Syncer API that can be extended to provide own syncing functionality. However, the default will be to use Ray AIRs file transfer utilities. New API: - Users can pass `syncer=CustomSyncer` which implements the `Syncer` API - Otherwise our off-the-shelf syncing is used - As before, syncing to cloud disables syncing to driver Changes: - Sync client is removed - Syncer interface introduced - _DefaultSyncer is a wrapper around the URI upload/download API from Ray AIR - SyncerCallback only uses remote tasks to synchronize data - Rsync syncing is fully depracated and removed - Docker and kubernetes-specific syncing is fully deprecated and removed - Testing is improved to use `file://` URIs instead of mock sync clients
https://github.com/ray-project/ray.git
def test_multi_sync_different_node(ray_start_2_cpus, temp_data_dirs, num_workers): tmp_source, tmp_target = temp_data_dirs assert_file(True, tmp_source, "level0.txt") assert_file(True, tmp_source, "subdir/level1.txt") node_ip = ray.util.get_node_ip_address() futures = [ _sync_dir_between_different_nodes( source_ip=node_ip, source_path=tmp_source, target_ip=node_ip, target_path=tmp_target, return_futures=True, )[0] for _ in range(num_workers) ] ray.get(futures) assert_file(True, tmp_target, "level0.txt") assert_file(True, tmp_target, "subdir/level1.txt")
99
test_util_file_transfer.py
Python
python/ray/tune/tests/test_util_file_transfer.py
6313ddc47cf9df4df8c8907997df559850a1b874
ray
2
128,724
4
6
43
17
4
0
4
7
test_recover_from_latest
[tune] Try to recover from latest available checkpoint (#29099) Tune tracks the latest available checkpoint per trial in the trial metadata, which is stored on the driver. This data is periodically synced to cloud storage, but can get out of sync if e.g. the node where the driver was on is terminated. When experiments are continued, this can then lead to us not finding the checkpoints that tune wants to recover from (e.g. because we only keep a certain number of checkpoints). The current fallback behavior is to start the trials from scratch. However, instead we would usually like to restart the trial from the latest available checkpoint instead. This PR adds fully unit- and integration-tested functionality for this, and guards it behind a feature env flag, that is enabled per default. Signed-off-by: Kai Fricke <[email protected]>
https://github.com/ray-project/ray.git
def test_recover_from_latest(tmpdir, upload_uri, fetch_from_cloud):
230
test_trainable.py
Python
python/ray/tune/tests/test_trainable.py
d7b2b49a962bf33dae7a50376f159ab15d80800f
ray
6
44,879
43
14
18
233
27
0
53
159
dag_bag_multiple
Fix test_clear_multiple_external_task_marker timing out (#21343) closes: #11443
https://github.com/apache/airflow.git
def dag_bag_multiple(): dag_bag = DagBag(dag_folder=DEV_NULL, include_examples=False) daily_dag = DAG("daily_dag", start_date=DEFAULT_DATE, schedule_interval="@daily") agg_dag = DAG("agg_dag", start_date=DEFAULT_DATE, schedule_interval="@daily") dag_bag.bag_dag(dag=daily_dag, root_dag=daily_dag) dag_bag.bag_dag(dag=agg_dag, root_dag=agg_dag) daily_task = DummyOperator(task_id="daily_tas", dag=daily_dag) begin = DummyOperator(task_id="begin", dag=agg_dag) for i in range(8): task = ExternalTaskMarker( task_id=f"{daily_task.task_id}_{i}", external_dag_id=daily_dag.dag_id, external_task_id=daily_task.task_id, execution_date="{{ macros.ds_add(ds, -1 * %s) }}" % i, dag=agg_dag, ) begin >> task yield dag_bag
139
test_external_task_sensor.py
Python
tests/sensors/test_external_task_sensor.py
0873ee7e847e67cf045d9fcc3da6f6422b1b7701
airflow
2
120,111
13
10
9
66
7
0
19
31
explicit_device_get_scope
Bump minimum jaxlib version to 0.3.2 and remove transfer guard compatibility code
https://github.com/google/jax.git
def explicit_device_get_scope() -> Iterator[None]: state = transfer_guard_lib.thread_local_state() prev = state.explicit_device_get state.explicit_device_get = True try: yield finally: state.explicit_device_get = prev
37
config.py
Python
jax/_src/config.py
36df8619d74672b0072e7880bcdd257c4a83e9f1
jax
2
102,400
38
12
10
195
14
0
46
92
no_batch_dim_reference_mha
[rnn/gru] no batch dim (#70442) Summary: Fixes https://github.com/pytorch/pytorch/issues/60585 TODO: * [x] Doc updates Pull Request resolved: https://github.com/pytorch/pytorch/pull/70442 Reviewed By: zou3519 Differential Revision: D33460427 Pulled By: jbschlosser fbshipit-source-id: c64d9624c305d90570c79d11a28557f9ec667b27
https://github.com/pytorch/pytorch.git
def no_batch_dim_reference_mha(m, p, *args, **kwargs): batch_dim = 0 if kwargs.get('batch_first', True) else 1 if 'batch_first' in kwargs: kwargs.pop('batch_first') if 'key_padding_mask' in kwargs and kwargs['key_padding_mask'] is not None: kwargs['key_padding_mask'] = kwargs['key_padding_mask'].unsqueeze(0) single_batch_input_args = [input.unsqueeze(batch_dim) for input in args] with freeze_rng_state(): output = m(*single_batch_input_args, **kwargs) return (output[0].squeeze(batch_dim), output[1].squeeze(0))
117
common_modules.py
Python
torch/testing/_internal/common_modules.py
6eba936082a641be8ece156f70c0f5c435f7a7aa
pytorch
6
9,836
42
10
29
143
9
0
56
156
set_ping_parser
feat: star routing (#3900) * feat(proto): adjust proto for star routing (#3844) * feat(proto): adjust proto for star routing * feat(proto): generate proto files * feat(grpc): refactor grpclet interface (#3846) * feat: refactor connection pool for star routing (#3872) * feat(k8s): add more labels to k8s deployments * feat(network): refactor connection pool * feat(network): refactor k8s pool * feat: star routing graph gateway (#3877) * feat: star routing - refactor grpc data runtime (#3887) * feat(runtimes): refactor grpc dataruntime * fix(tests): adapt worker runtime tests * fix(import): fix import * feat(proto): enable sending multiple lists (#3891) * feat: star routing gateway (#3893) * feat: star routing gateway all protocols (#3897) * test: add streaming and prefetch tests (#3901) * feat(head): new head runtime for star routing (#3899) * feat(head): new head runtime * feat(head): new head runtime * style: fix overload and cli autocomplete * feat(network): improve proto comments Co-authored-by: Jina Dev Bot <[email protected]> * feat(worker): merge docs in worker runtime (#3905) * feat(worker): merge docs in worker runtime * feat(tests): assert after clean up * feat(tests): star routing runtime integration tests (#3908) * fix(tests): fix integration tests * test: test runtimes fast slow request (#3910) * feat(zmq): purge zmq, zed, routing_table (#3915) * feat(zmq): purge zmq, zed, routing_table * style: fix overload and cli autocomplete * feat(zmq): adapt comment in dependency list * style: fix overload and cli autocomplete * fix(tests): fix type tests Co-authored-by: Jina Dev Bot <[email protected]> * test: add test gateway to worker connection (#3921) * feat(pea): adapt peas for star routing (#3918) * feat(pea): adapt peas for star routing * style: fix overload and cli autocomplete * feat(pea): add tests * feat(tests): add failing head pea test Co-authored-by: Jina Dev Bot <[email protected]> * feat(tests): integration tests for peas (#3923) * feat(tests): integration tests for peas * feat(pea): remove _inner_pea function * feat: star routing container pea (#3922) * test: rescue tests (#3942) * fix: fix streaming tests (#3945) * refactor: move docker run to run (#3948) * feat: star routing pods (#3940) * feat(pod): adapt pods for star routing * feat(pods): adapt basepod to star routing * feat(pod): merge pod and compound pod * feat(tests): fix tests * style: fix overload and cli autocomplete * feat(test): add container pea int test * feat(ci): remove more unnecessary tests * fix(tests): remove jinad runtime * feat(ci): remove latency tracking * fix(ci): fix ci def * fix(runtime): enable runtime to be exited * fix(tests): wrap runtime test in process * fix(runtimes): remove unused runtimes * feat(runtimes): improve cancel wait * fix(ci): build test pip again in ci * fix(tests): fix a test * fix(test): run async in its own process * feat(pod): include shard in activate msg * fix(pea): dont join * feat(pod): more debug out * feat(grpc): manage channels properly * feat(pods): remove exitfifo * feat(network): add simple send retry mechanism * fix(network): await pool close * fix(test): always close grpc server in worker * fix(tests): remove container pea from tests * fix(tests): reorder tests * fix(ci): split tests * fix(ci): allow alias setting * fix(test): skip a test * feat(pods): address comments Co-authored-by: Jina Dev Bot <[email protected]> * test: unblock skipped test (#3957) * feat: jinad pea (#3949) * feat: jinad pea * feat: jinad pea * test: remote peas * test: toplogy tests with jinad * ci: parallel jobs * feat(tests): add pod integration tests (#3958) * feat(tests): add pod integration tests * fix(tests): make tests less flaky * fix(test): fix test * test(pea): remote pea topologies (#3961) * test(pea): remote pea simple topology * test: remote pea topologies * refactor: refactor streamer result handling (#3960) * feat(k8s): adapt K8s Pod for StarRouting (#3964) * test: optimize k8s test * test: increase timeout and use different namespace * test: optimize k8s test * test: build and load image when needed * test: refactor k8s test * test: fix image name error * test: fix k8s image load * test: fix typoe port expose * test: update tests in connection pool and handling * test: remove unused fixture * test: parameterize docker images * test: parameterize docker images * test: parameterize docker images * feat(k8s): adapt k8s pod for star routing * fix(k8s): dont overwrite add/remove function in pool * fix(k8s): some fixes * fix(k8s): some more fixes * fix(k8s): linting * fix(tests): fix tests * fix(tests): fix k8s unit tests * feat(k8s): complete k8s integration test * feat(k8s): finish k8s tests * feat(k8s): fix test * fix(tests): fix test with no name * feat(k8s): unify create/replace interface * feat(k8s): extract k8s port constants * fix(tests): fix tests * fix(tests): wait for runtime being ready in tests * feat(k8s): address comments Co-authored-by: bwanglzu <[email protected]> * feat(flow): adapt Flow for StarRouting (#3986) * feat(flow): add routes * feat(flow): adapt flow to star routing * style: fix overload and cli autocomplete * feat(flow): handle empty topologies * feat(k8s): allow k8s pool disabling * style: fix overload and cli autocomplete * fix(test): fix test with mock * fix(tests): fix more tests * feat(flow): clean up tests * style: fix overload and cli autocomplete * fix(tests): fix more tests * feat: add plot function (#3994) * fix(tests): avoid hanging tests * feat(flow): add type hinting * fix(test): fix duplicate exec name in test * fix(tests): fix more tests * fix(tests): enable jinad test again * fix(tests): random port fixture * fix(style): replace quotes Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Joan Fontanals <[email protected]> * feat(ci): bring back ci (#3997) * feat(ci): enable ci again * style: fix overload and cli autocomplete * feat(ci): add latency tracking * feat(ci): bring back some tests * fix(tests): remove invalid port test * feat(ci): disable daemon and distributed tests * fix(tests): fix entrypoint in hub test * fix(tests): wait for gateway to be ready * fix(test): fix more tests * feat(flow): do rolling update and scale sequentially * fix(tests): fix more tests * style: fix overload and cli autocomplete * feat: star routing hanging pods (#4011) * fix: try to handle hanging pods better * test: hanging pods test work * fix: fix topology graph problem * test: add unit test to graph * fix(tests): fix k8s tests * fix(test): fix k8s test * fix(test): fix k8s pool test * fix(test): fix k8s test * fix(test): fix k8s connection pool setting * fix(tests): make runtime test more reliable * fix(test): fix routes test * fix(tests): make rolling update test less flaky * feat(network): gurantee unique ports * feat(network): do round robin for shards * fix(ci): increase pytest timeout to 10 min Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Joan Fontanals <[email protected]> * fix(ci): fix ci file * feat(daemon): jinad pod for star routing * Revert "feat(daemon): jinad pod for star routing" This reverts commit ed9b37ac862af2e2e8d52df1ee51c0c331d76f92. * feat(daemon): remote jinad pod support (#4042) * feat(daemon): add pod tests for star routing * feat(daemon): add remote pod test * test(daemon): add remote pod arguments test * test(daemon): add async scale test * test(daemon): add rolling update test * test(daemon): fix host * feat(proto): remove message proto (#4051) * feat(proto): remove message proto * fix(tests): fix tests * fix(tests): fix some more tests * fix(tests): fix more tests * fix(tests): fix more tests * fix(tests): fix more tests * fix(tests): fix more tests * feat(proto): put docs back in data * fix(proto): clean up * feat(proto): clean up * fix(tests): skip latency tracking * fix(test): fix hub test * fix(tests): fix k8s test * fix(test): some test clean up * fix(style): clean up style issues * feat(proto): adjust for rebase * fix(tests): bring back latency tracking * fix(tests): fix merge accident * feat(proto): skip request serialization (#4074) * feat: add reduce to star routing (#4070) * feat: add reduce on shards to head runtime * test: add reduce integration tests with fixed order * feat: add reduce on needs * chore: get_docs_matrix_from_request becomes public * style: fix overload and cli autocomplete * docs: remove undeterministic results warning * fix: fix uses_after * test: assert correct num docs after reducing in test_external_pod * test: correct asserts after reduce in test_rolling_update * fix: no reduce if uses_after_address is set * fix: get_docs_from_request only if needed * fix: fix tests after merge * refactor: move reduce from data_request_handler to head * style: fix overload and cli autocomplete * chore: apply suggestions * fix: fix asserts * chore: minor test fix * chore: apply suggestions * test: remove flow tests with external executor (pea) * fix: fix test_expected_messages_routing * fix: fix test_func_joiner * test: adapt k8s test Co-authored-by: Jina Dev Bot <[email protected]> * fix(k8s): fix static pool config * fix: use custom protoc doc generator image (#4088) * fix: use custom protoc doc generator image * fix(docs): minor doc improvement * fix(docs): use custom image * fix(docs): copy docarray * fix: doc building local only * fix: timeout doc building * fix: use updated args when building ContainerPea * test: add container PeaFactory test * fix: force pea close on windows (#4098) * fix: dont reduce if uses exist (#4099) * fix: dont use reduce if uses exist * fix: adjust reduce tests * fix: adjust more reduce tests * fix: fix more tests * fix: adjust more tests * fix: ignore non jina resources (#4101) * feat(executor): enable async executors (#4102) * feat(daemon): daemon flow on star routing (#4096) * test(daemon): add remote flow test * feat(daemon): call scale in daemon * feat(daemon): remove tail args and identity * test(daemon): rename scalable executor * test(daemon): add a small delay in async test * feat(daemon): scale partial flow only * feat(daemon): call scale directly in partial flow store * test(daemon): use asyncio sleep * feat(daemon): enable flow level distributed tests * test(daemon): fix jinad env workspace config * test(daemon): fix pod test use new port rolling update * feat(daemon): enable distribuetd tests * test(daemon): remove duplicate tests and zed runtime test * test(daemon): fix stores unit test * feat(daemon): enable part of distributed tests * feat(daemon): enable part of distributed tests * test: correct test paths * test(daemon): add client test for remote flows * test(daemon): send a request with jina client * test(daemon): assert async generator * test(daemon): small interval between tests * test(daemon): add flow test for container runtime * test(daemon): add flow test for container runtime * test(daemon): fix executor name * test(daemon): fix executor name * test(daemon): use async client fetch result * test(daemon): finish container flow test * test(daemon): enable distributed in ci * test(daemon): enable distributed in ci * test(daemon): decare flows and pods * test(daemon): debug ci if else * test(daemon): debug ci if else * test(daemon): decare flows and pods * test(daemon): correct test paths * test(daemon): add small delay for async tests * fix: star routing fixes (#4100) * docs: update docs * fix: fix Request.__repr__ * docs: update flow remarks * docs: fix typo * test: add non_empty_fields test * chore: remove non_empty_fields test * feat: polling per endpoint (#4111) * feat(polling): polling per endpoint configurable * fix: adjust tests * feat(polling): extend documentation * style: fix overload and cli autocomplete * fix: clean up * fix: adjust more tests * fix: remove repeat from flaky test * fix: k8s test * feat(polling): address pr feedback * feat: improve docs Co-authored-by: Jina Dev Bot <[email protected]> * feat(grpc): support connect grpc server via ssl tunnel (#4092) * feat(grpc): support ssl grpc connect if port is 443 * fix(grpc): use https option instead of detect port automatically * chore: fix typo * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * test(networking): add test for peapods networking * fix: address comments Co-authored-by: Joan Fontanals <[email protected]> * feat(polling): unify polling args (#4113) * fix: several issues for jinad pods (#4119) * fix: activate for jinad pods * fix: dont expose worker pod in partial daemon * fix: workspace setting * fix: containerized flows * fix: hub test * feat(daemon): remote peas on star routing (#4112) * test(daemon): fix request in peas * test(daemon): fix request in peas * test(daemon): fix sync async client test * test(daemon): enable remote peas test * test(daemon): replace send message to send request * test(daemon): declare pea tests in ci * test(daemon): use pea args fixture * test(daemon): head pea use default host * test(daemon): fix peas topologies * test(daemon): fix pseudo naming * test(daemon): use default host as host * test(daemon): fix executor path * test(daemon): add remote worker back * test(daemon): skip local remote remote topology * fix: jinad pea test setup * fix: jinad pea tests * fix: remove invalid assertion Co-authored-by: jacobowitz <[email protected]> * feat: enable daemon tests again (#4132) * feat: enable daemon tests again * fix: remove bogy empty script file * fix: more jinad test fixes * style: fix overload and cli autocomplete * fix: scale and ru in jinad * fix: fix more jinad tests Co-authored-by: Jina Dev Bot <[email protected]> * fix: fix flow test * fix: improve pea tests reliability (#4136) Co-authored-by: Joan Fontanals <[email protected]> Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Deepankar Mahapatro <[email protected]> Co-authored-by: bwanglzu <[email protected]> Co-authored-by: AlaeddineAbdessalem <[email protected]> Co-authored-by: Zhaofeng Miao <[email protected]>
https://github.com/jina-ai/jina.git
def set_ping_parser(parser=None): if not parser: parser = set_base_parser() parser.add_argument( 'host', type=str, help='The host address of the target Pea, e.g. 0.0.0.0' ) parser.add_argument('port', type=int, help='The control port of the target pod/pea') parser.add_argument( '--timeout', type=int, default=3000, help=, ) parser.add_argument( '--retries', type=int, default=3, help='The max number of tried health checks before exit with exit code 1', ) return parser
104
ping.py
Python
jina/parsers/ping.py
933415bfa1f9eb89f935037014dfed816eb9815d
jina
2
155,204
25
13
12
95
11
0
26
91
to_pickle_distributed
FEAT-#5053: Add pandas on unidist execution with MPI backend (#5059) Signed-off-by: Igoshev, Iaroslav <[email protected]>
https://github.com/modin-project/modin.git
def to_pickle_distributed(cls, qc, **kwargs): if not ( isinstance(kwargs["filepath_or_buffer"], str) and "*" in kwargs["filepath_or_buffer"] ) or not isinstance(qc, PandasQueryCompiler): warnings.warn("Defaulting to Modin core implementation") return PandasOnUnidistIO.to_pickle(qc, **kwargs)
93
io.py
Python
modin/experimental/core/execution/unidist/implementations/pandas_on_unidist/io/io.py
193505fdf0c984743397ba3df56262f30aee13a8
modin
4
250,109
35
11
35
186
8
0
57
374
test_expiry_logic
Require types in tests.storage. (#14646) Adds missing type hints to `tests.storage` package and does not allow untyped definitions.
https://github.com/matrix-org/synapse.git
def test_expiry_logic(self) -> None: self.event_creator_handler._rooms_to_exclude_from_dummy_event_insertion[ "1" ] = 100000 self.event_creator_handler._rooms_to_exclude_from_dummy_event_insertion[ "2" ] = 200000 self.event_creator_handler._rooms_to_exclude_from_dummy_event_insertion[ "3" ] = 300000 self.event_creator_handler._expire_rooms_to_exclude_from_dummy_event_insertion() # All entries within time frame self.assertEqual( len( self.event_creator_handler._rooms_to_exclude_from_dummy_event_insertion ), 3, ) # Oldest room to expire self.pump(1.01) self.event_creator_handler._expire_rooms_to_exclude_from_dummy_event_insertion() self.assertEqual( len( self.event_creator_handler._rooms_to_exclude_from_dummy_event_insertion ), 2, ) # All rooms to expire self.pump(2) self.assertEqual( len( self.event_creator_handler._rooms_to_exclude_from_dummy_event_insertion ), 0, )
114
test_cleanup_extrems.py
Python
tests/storage/test_cleanup_extrems.py
3ac412b4e2f8c5ba11dc962b8a9d871c1efdce9b
synapse
1
7,174
20
12
9
93
12
0
23
98
get_metrics
feat: Added model type GBM (LightGBM tree learner), as an alternative to ECD (#2027)
https://github.com/ludwig-ai/ludwig.git
def get_metrics(self): all_of_metrics = {} for of_name, of_obj in self.output_features.items(): all_of_metrics[of_name] = of_obj.get_metrics() all_of_metrics[COMBINED] = { LOSS: get_scalar_from_ludwig_metric(self.eval_loss_metric) + get_scalar_from_ludwig_metric(self.eval_additional_losses_metrics) } return all_of_metrics
57
base.py
Python
ludwig/models/base.py
aa0c63bf2ed825eb3ca8eff8a002d5ccbe395173
ludwig
2
283,270
19
13
16
105
13
0
20
67
get_user_timezone
Updating some names (#1575) * quick econ fix * black * keys and feature flags * terminal name :eyes: * some more replacements * some more replacements * edit pyproject * gst -> openbb * add example portfolios back to git * Update api from gst * sorry. skipping some tests * another round of names * another round of test edits * Missed some .gst refs and update timezone * water mark stuff * Fixing Names in terminal.spec and name of GTFF_DEFAULTS to OBBFF_DEFAULTS * fix more GST to OpenBB Terminal * Logging : merge conflicts with main * Revert wrong files Co-authored-by: Andrew <[email protected]> Co-authored-by: DidierRLopes <[email protected]> Co-authored-by: Chavithra PARANA <[email protected]>
https://github.com/OpenBB-finance/OpenBBTerminal.git
def get_user_timezone() -> str: filename = os.path.join( os.path.dirname(os.path.abspath(__file__)), "timezone.openbb", ) if os.path.isfile(filename): with open(filename) as f: return f.read() return ""
60
helper_funcs.py
Python
openbb_terminal/helper_funcs.py
b71abcfbf4d7e8ac1855522aff0378e13c8b5362
OpenBBTerminal
2
297,814
33
17
20
247
9
0
46
278
extra_state_attributes
String formatting and max line length - Part 1 (#84390) Co-authored-by: Erik Montnemery <[email protected]>
https://github.com/home-assistant/core.git
def extra_state_attributes(self): if self._attr_native_value is None or self._attrs is None: return None if ( self.entity_description.key == SENSOR_RANDOM_RECORD_TYPE and self._attr_native_value is not None ): return { "cat_no": self._attrs["labels"][0]["catno"], "cover_image": self._attrs["cover_image"], "format": ( f"{self._attrs['formats'][0]['name']} ({self._attrs['formats'][0]['descriptions'][0]})" ), "label": self._attrs["labels"][0]["name"], "released": self._attrs["year"], ATTR_IDENTITY: self._discogs_data["user"], } return { ATTR_IDENTITY: self._discogs_data["user"], }
118
sensor.py
Python
homeassistant/components/discogs/sensor.py
b0cee0bc46cbd7efe0e6421da18d91595c7a25ad
core
5
46,884
26
10
13
77
11
0
30
110
parse_time_mapped_ti_count
Expand mapped tasks at DagRun.Veriy_integrity (#22679) Create the necessary task instances for a mapped task at dagrun.verify_integrity Co-authored-by: Ash Berlin-Taylor <[email protected]>
https://github.com/apache/airflow.git
def parse_time_mapped_ti_count(self) -> Optional[int]: total = 0 for value in self._get_expansion_kwargs().values(): if not isinstance(value, MAPPABLE_LITERAL_TYPES): # None literal type encountered, so give up return None total += len(value) return total
46
mappedoperator.py
Python
airflow/models/mappedoperator.py
91832a42d8124b040073481fd93c54e9e64c2609
airflow
3
312,477
23
13
11
102
11
0
25
139
_async_cancel_websocket_loop
Remove unnecessary `TYPE_CHECKING` declarations in SimpliSafe (#65750)
https://github.com/home-assistant/core.git
async def _async_cancel_websocket_loop(self) -> None: if self._websocket_reconnect_task: self._websocket_reconnect_task.cancel() try: await self._websocket_reconnect_task except asyncio.CancelledError: LOGGER.debug("Websocket reconnection task successfully canceled") self._websocket_reconnect_task = None assert self._api.websocket await self._api.websocket.async_disconnect()
58
__init__.py
Python
homeassistant/components/simplisafe/__init__.py
fbe4d4272912a2ac5e2783b90eb75c90a6d7e6f5
core
3
120,940
34
16
8
206
25
0
48
56
_complex_truncated_normal
[x64] make nn_test pass with strict dtype promotion
https://github.com/google/jax.git
def _complex_truncated_normal(key, upper, shape, dtype): key_r, key_theta = random.split(key) real_dtype = np.array(0, dtype).real.dtype dtype = dtypes._to_complex_dtype(real_dtype) t = (1 - jnp.exp(jnp.array(-(upper ** 2), dtype))) * random.uniform(key_r, shape, real_dtype).astype(dtype) r = jnp.sqrt(-jnp.log(1 - t)) theta = 2 * jnp.pi * random.uniform(key_theta, shape, real_dtype).astype(dtype) return r * jnp.exp(1j * theta)
134
initializers.py
Python
jax/_src/nn/initializers.py
80d814ab8933a29a82e5645490128e1412e01891
jax
1
298,130
33
15
16
149
19
0
42
215
_update_data
Improve DataUpdateCoordinator typing in integrations (2) (#84656)
https://github.com/home-assistant/core.git
def _update_data(self) -> CanaryData: locations_by_id: dict[str, Location] = {} readings_by_device_id: dict[str, ValuesView[Reading]] = {} for location in self.canary.get_locations(): location_id = location.location_id locations_by_id[location_id] = location for device in location.devices: if device.is_online: readings_by_device_id[ device.device_id ] = self.canary.get_latest_readings(device.device_id) return { "locations": locations_by_id, "readings": readings_by_device_id, }
95
coordinator.py
Python
homeassistant/components/canary/coordinator.py
06db5476e4e687f1034196eb04c5cad3cf6861ad
core
4