ast_errors
stringlengths 0
3.2k
| d_id
int64 44
121k
| id
int64 70
338k
| n_whitespaces
int64 3
14k
| path
stringlengths 8
134
| n_words
int64 4
4.82k
| n_identifiers
int64 1
131
| random_cut
stringlengths 16
15.8k
| commit_message
stringlengths 2
15.3k
| fun_name
stringlengths 1
84
| commit_id
stringlengths 40
40
| repo
stringlengths 3
28
| file_name
stringlengths 5
79
| ast_levels
int64 6
31
| nloc
int64 1
548
| url
stringlengths 31
59
| complexity
int64 1
66
| token_counts
int64 6
2.13k
| n_ast_errors
int64 0
28
| vocab_size
int64 4
1.11k
| n_ast_nodes
int64 15
19.2k
| language
stringclasses 1
value | documentation
dict | code
stringlengths 101
62.2k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
53,050 | 211,247 | 554 | deploy/pipeline/pipeline.py | 116 | 14 | def get_model_dir(cfg):
for key in cfg.keys():
if type(cfg[key]) == dict and \
("enable" in cfg[key].keys() and cfg[key]['enable']
or "enable" not in cfg[key].keys()):
if "model_dir" in cfg[key].keys():
model_dir = cfg[key]["model_dir"]
downloaded_model_dir = auto_download_model(model_dir)
if downloaded_model_dir:
model_dir = downloaded_model_dir
cfg[key]["model_dir"] = model_dir
print(key, " model dir: ", model_dir)
elif key == "VEHICLE_PLATE":
det_model_dir = cfg[key]["det_model_dir"]
downloaded_det_model_dir = auto_download_model(det_model_dir)
if downloaded_det_model_dir:
det_model_dir = downloaded_det_model_dir
cfg[key]["det_model_dir"] = det_model_dir
print("det_model_dir model dir: ", det_model_dir)
rec_model_dir = cfg[key]["rec_model_dir"]
downloaded_rec_ | move initialize part into class (#6621) | get_model_dir | ff8a7b1d090a2f57048d3e87892706a8407dcfe6 | PaddleDetection | pipeline.py | 17 | 32 | https://github.com/PaddlePaddle/PaddleDetection.git | 13 | 228 | 0 | 56 | 387 | Python | {
"docstring": " \n Auto download inference model if the model_path is a url link. \n Otherwise it will use the model_path directly.\n ",
"language": "en",
"n_whitespaces": 38,
"n_words": 18,
"vocab_size": 16
} | def get_model_dir(cfg):
for key in cfg.keys():
if type(cfg[key]) == dict and \
("enable" in cfg[key].keys() and cfg[key]['enable']
or "enable" not in cfg[key].keys()):
if "model_dir" in cfg[key].keys():
model_dir = cfg[key]["model_dir"]
downloaded_model_dir = auto_download_model(model_dir)
if downloaded_model_dir:
model_dir = downloaded_model_dir
cfg[key]["model_dir"] = model_dir
print(key, " model dir: ", model_dir)
elif key == "VEHICLE_PLATE":
det_model_dir = cfg[key]["det_model_dir"]
downloaded_det_model_dir = auto_download_model(det_model_dir)
if downloaded_det_model_dir:
det_model_dir = downloaded_det_model_dir
cfg[key]["det_model_dir"] = det_model_dir
print("det_model_dir model dir: ", det_model_dir)
rec_model_dir = cfg[key]["rec_model_dir"]
downloaded_rec_model_dir = auto_download_model(rec_model_dir)
if downloaded_rec_model_dir:
rec_model_dir = downloaded_rec_model_dir
cfg[key]["rec_model_dir"] = rec_model_dir
print("rec_model_dir model dir: ", rec_model_dir)
elif key == "MOT": # for idbased and skeletonbased actions
model_dir = cfg[key]["model_dir"]
downloaded_model_dir = auto_download_model(model_dir)
if downloaded_model_dir:
model_dir = downloaded_model_dir
cfg[key]["model_dir"] = model_dir
print("mot_model_dir model_dir: ", model_dir)
|
|
4,398 | 22,669 | 72 | linear-algebra-python/src/lib.py | 29 | 8 | def component(self, x, y):
if x >= 0 and x < self.__height and y >= 0 and y < s | refactor: clean code
Signed-off-by: slowy07 <[email protected]> | component | f0af0c43340763724f139fa68aa1e5a9ffe458b4 | Python | lib.py | 11 | 5 | https://github.com/geekcomputers/Python.git | 5 | 48 | 0 | 22 | 77 | Python | {
"docstring": "\n returns the specified (x,y) component\n ",
"language": "en",
"n_whitespaces": 20,
"n_words": 5,
"vocab_size": 5
} | def component(self, x, y):
if x >= 0 and x < self.__height and y >= 0 and y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("changeComponent: indices out of bounds")
|
|
23,815 | 109,908 | 270 | lib/mpl_toolkits/axisartist/axis_artist.py | 79 | 14 | def toggle(self, all=None, ticks=None, ticklabels=None, label=None):
if all:
_ticks, _ticklabels, _label = True, True, True
elif all is not None:
_ticks, _ticklabels, _label = False, False, False
else:
_ticks, _ticklabels, _label = None, None, None
if ticks is not Non | Improve mpl_toolkit documentation | toggle | df6f95703b60348e01603f98a439b133da2938a0 | matplotlib | axis_artist.py | 10 | 21 | https://github.com/matplotlib/matplotlib.git | 9 | 151 | 0 | 34 | 230 | Python | {
"docstring": "\n Toggle visibility of ticks, ticklabels, and (axis) label.\n To turn all off, ::\n\n axis.toggle(all=False)\n\n To turn all off but ticks on ::\n\n axis.toggle(all=False, ticks=True)\n\n To turn all on but (axis) label off ::\n\n axis.toggle(all=True, label=False)\n\n ",
"language": "en",
"n_whitespaces": 98,
"n_words": 35,
"vocab_size": 23
} | def toggle(self, all=None, ticks=None, ticklabels=None, label=None):
if all:
_ticks, _ticklabels, _label = True, True, True
elif all is not None:
_ticks, _ticklabels, _label = False, False, False
else:
_ticks, _ticklabels, _label = None, None, None
if ticks is not None:
_ticks = ticks
if ticklabels is not None:
_ticklabels = ticklabels
if label is not None:
_label = label
if _ticks is not None:
self.major_ticks.set_visible(_ticks)
self.minor_ticks.set_visible(_ticks)
if _ticklabels is not None:
self.major_ticklabels.set_visible(_ticklabels)
self.minor_ticklabels.set_visible(_ticklabels)
if _label is not None:
self.label.set_visible(_label)
|
|
@pytest.mark.integration
@pytest.mark.elasticsearch | 75,030 | 257,136 | 148 | test/test_pipeline_yaml.py | 82 | 30 | def mock_json_schema(request, monkeypatch, tmp_path):
# Do not patch integration tests
if "integration" in request.keywords:
return
# Mock the subclasses list to make it very small, containing only mock nodes
monkeypatch.setattr(
haystack.nodes._json_schema,
"find_subclasses_in_modules",
lambda *a, **k: [(conftest, MockDocumentStore), (conftest, MockReader), (conftest, MockRetriever)],
)
# Point the JSON schema path to tmp_path
monkeypatch.setattr(haystack.pipelines.config, "JSON_SCHEMAS_PATH", tmp_path)
# Generate mock schema in tmp_path
filename = f"haystack-pipeline-master.schema.json"
test_schema = _json_schema.get_json_schema(filename=filename, version="ignore")
with open(tm | Change YAML version exception into a warning (#2385)
* Change exception into warning, add strict_version param, and remove compatibility between schemas
* Simplify update_json_schema
* Rename unstable into master
* Prevent validate_config from changing the config to validate
* Fix version validation and add tests
* Rename master into ignore
* Complete parameter rename
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> | mock_json_schema | 4eec2dc45ee60e8b8780aa4f956aea8ad3624da3 | haystack | test_pipeline_yaml.py | 11 | 13 | https://github.com/deepset-ai/haystack.git | 2 | 114 | 1 | 68 | 206 | Python | {
"docstring": "\n JSON schema with the master version and only mocked nodes.\n ",
"language": "en",
"n_whitespaces": 17,
"n_words": 10,
"vocab_size": 10
} | def mock_json_schema(request, monkeypatch, tmp_path):
# Do not patch integration tests
if "integration" in request.keywords:
return
# Mock the subclasses list to make it very small, containing only mock nodes
monkeypatch.setattr(
haystack.nodes._json_schema,
"find_subclasses_in_modules",
lambda *a, **k: [(conftest, MockDocumentStore), (conftest, MockReader), (conftest, MockRetriever)],
)
# Point the JSON schema path to tmp_path
monkeypatch.setattr(haystack.pipelines.config, "JSON_SCHEMAS_PATH", tmp_path)
# Generate mock schema in tmp_path
filename = f"haystack-pipeline-master.schema.json"
test_schema = _json_schema.get_json_schema(filename=filename, version="ignore")
with open(tmp_path / filename, "w") as schema_file:
json.dump(test_schema, schema_file, indent=4)
#
# Integration
#
@pytest.mark.integration
@pytest.mark.elasticsearch |
80,226 | 269,606 | 27 | keras/backend.py | 16 | 6 | def _has_nchw_support():
explicitly_on_cpu = _is_current_explicit_device("CPU")
gpus_available = bool(_get_available_gpus())
return not explicitly_on_cpu and gpus_available
# VARIABLE MANIPULATI | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | _has_nchw_support | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | backend.py | 10 | 4 | https://github.com/keras-team/keras.git | 2 | 24 | 0 | 13 | 47 | Python | {
"docstring": "Check whether the current scope supports NCHW ops.\n\n TensorFlow does not support NCHW on CPU. Therefore we check if we are not\n explicitly put on\n CPU, and have GPUs available. In this case there will be soft-placing on the\n GPU device.\n\n Returns:\n bool: if the current scope device placement would support nchw\n ",
"language": "en",
"n_whitespaces": 77,
"n_words": 52,
"vocab_size": 41
} | def _has_nchw_support():
explicitly_on_cpu = _is_current_explicit_device("CPU")
gpus_available = bool(_get_available_gpus())
return not explicitly_on_cpu and gpus_available
# VARIABLE MANIPULATION
|
|
36,978 | 157,549 | 225 | ldm/modules/image_degradation/utils_image.py | 117 | 27 | def tensor2img(tensor, out_type=np.uint8, min_max=(0, 1)):
| release more models | tensor2img | ca86da3a30c4e080d4db8c25fca73de843663cb4 | stablediffusion | utils_image.py | 17 | 24 | https://github.com/Stability-AI/stablediffusion.git | 5 | 228 | 0 | 77 | 358 | Python | {
"docstring": "\n Converts a torch Tensor into an image Numpy array of BGR channel order\n Input: 4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order\n Output: 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default)\n \n# --------------------------------------------\n# Augmentation, flipe and/or rotate\n# --------------------------------------------\n# The following two are enough.\n# (1) augmet_img: numpy image of WxHxC or WxH\n# (2) augment_img_tensor4: tensor image 1xCxWxH\n# --------------------------------------------\n",
"language": "en",
"n_whitespaces": 68,
"n_words": 62,
"vocab_size": 46
} | def tensor2img(tensor, out_type=np.uint8, min_max=(0, 1)):
tensor = tensor.squeeze().float().cpu().clamp_(*min_max) # squeeze first, then clamp
tensor = (tensor - min_max[0]) / (min_max[1] - min_max[0]) # to range [0,1]
n_dim = tensor.dim()
if n_dim == 4:
n_img = len(tensor)
img_np = make_grid(tensor, nrow=int(math.sqrt(n_img)), normalize=False).numpy()
img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR
elif n_dim == 3:
img_np = tensor.numpy()
img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR
elif n_dim == 2:
img_np = tensor.numpy()
else:
raise TypeError(
'Only support 4D, 3D and 2D tensor. But received with dimension: {:d}'.format(n_dim))
if out_type == np.uint8:
img_np = (img_np * 255.0).round()
# Important. Unlike matlab, numpy.unit8() WILL NOT round by default.
return img_np.astype(out_type)
|
|
43,310 | 181,347 | 65 | gradio/utils.py | 21 | 11 | def get_local_ip_address() -> str:
try:
ip_address = requests.get(
"https://checkip.amazonaws.com/", timeout=3
).text.strip()
except (requests.ConnectionError, requests.exceptions.ReadTimeout):
ip_address = "No internet connection"
return ip_address
| Patching `test_get_ip` attempt 2 (#2810)
* ip-patch-2
* formatting
* patch 2 | get_local_ip_address | 51824608865b66ab04b018f55055124edbe603f3 | gradio | utils.py | 14 | 9 | https://github.com/gradio-app/gradio.git | 2 | 45 | 0 | 18 | 78 | Python | {
"docstring": "Gets the public IP address or returns the string \"No internet connection\" if unable to obtain it.",
"language": "en",
"n_whitespaces": 16,
"n_words": 17,
"vocab_size": 16
} | def get_local_ip_address() -> str:
try:
ip_address = requests.get(
"https://checkip.amazonaws.com/", timeout=3
).text.strip()
except (requests.ConnectionError, requests.exceptions.ReadTimeout):
ip_address = "No internet connection"
return ip_address
|
|
46,161 | 189,674 | 66 | manim/mobject/geometry/arc.py | 16 | 6 | def get_tip(self):
tips = self.get_tips()
if len(tips) == 0:
raise Excep | Improved structure of the :mod:`.mobject` module (#2476)
* group graphing and update its references
* group text and update its references
* group opengl and update its references
* group three_d and update its references
* group geometry and update (most) references
* move some chaning.py + updater files into animation
* refactor arc.py
* refactor line.py
* refactor polygram.py
* refactor tips.py
* black + isort
* import new files in __init__.py
* refactor places where geometry was used
* black + isort again
* remove unused imports
* update reference.rst
* add descriptions to files
* fix circular imports
* forgot ArrowTip
* fix tests
* fix doctests
* satisfy mypy?
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* fix ALL merge conflicts
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* one VMobject import slipped through
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* re-add imports to `manim/opengl/__init__.py`
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* fix reference manual
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* ignore unknown directive type
* fix arrow tip imports in docstrings
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: Benjamin Hackl <[email protected]> | get_tip | e040bcacd38378386749db18aeba575b93f4ebca | manim | arc.py | 10 | 6 | https://github.com/ManimCommunity/manim.git | 2 | 33 | 0 | 16 | 59 | Python | {
"docstring": "Returns the TipableVMobject instance's (first) tip,\n otherwise throws an exception.",
"language": "en",
"n_whitespaces": 16,
"n_words": 10,
"vocab_size": 10
} | def get_tip(self):
tips = self.get_tips()
if len(tips) == 0:
raise Exception("tip not found")
else:
return tips[0]
|
|
35,652 | 153,839 | 95 | modin/core/dataframe/pandas/partitioning/axis_partition.py | 39 | 13 | def shuffle(self, func, lengths, **kwargs):
num_splits = len(lengths)
# We add these to kwargs and will pop them off before performing the operation.
kwargs["manual_partition"] = True
kwargs["_lengths"] = lengths
args = [self.axis, func, num_splits, False]
args.extend(self.list_of_blocks)
return self._wrap_partitions(self.d | FIX-#4464: Refactor Ray utils and quick fix groupby.count failing on virtual partitions (#4490)
Co-authored-by: Devin Petersohn <[email protected]>
Signed-off-by: jeffreykennethli <[email protected]> | shuffle | b22b93df20ad25ae7a11f0c89d32fb2f234d4641 | modin | axis_partition.py | 10 | 7 | https://github.com/modin-project/modin.git | 1 | 68 | 0 | 35 | 109 | Python | {
"docstring": "\n Shuffle the order of the data in this axis partition based on the `lengths`.\n\n Parameters\n ----------\n func : callable\n The function to apply before splitting.\n lengths : list\n The list of partition lengths to split the result into.\n **kwargs : dict\n Additional keywords arguments to be passed in `func`.\n\n Returns\n -------\n list\n A list of `PandasDataframePartition` objects split by `lengths`.\n ",
"language": "en",
"n_whitespaces": 175,
"n_words": 60,
"vocab_size": 42
} | def shuffle(self, func, lengths, **kwargs):
num_splits = len(lengths)
# We add these to kwargs and will pop them off before performing the operation.
kwargs["manual_partition"] = True
kwargs["_lengths"] = lengths
args = [self.axis, func, num_splits, False]
args.extend(self.list_of_blocks)
return self._wrap_partitions(self.deploy_axis_func(*args, **kwargs))
|
|
72,413 | 248,677 | 757 | tests/storage/databases/main/test_room.py | 136 | 29 | def test_background_add_room_type_column(self):
# Create a room without a type
room_id = self._generate_room()
# Get event_id of the m.room.create event
event_id = self.get_success(
self.store.db_pool.simple_select_one_onecol(
table="current_state_events",
keyvalues={
"room_id": room_id,
"type": "m.room.create",
| Implement MSC3827: Filtering of `/publicRooms` by room type (#13031)
Signed-off-by: Šimon Brandner <[email protected]> | test_background_add_room_type_column | 13e359aec8ae8be8dc56a036ae6d9f2bc1d07385 | synapse | test_room.py | 14 | 48 | https://github.com/matrix-org/synapse.git | 1 | 211 | 0 | 88 | 368 | Python | {
"docstring": "Test that the background update to populate the `room_type` column in\n `room_stats_state` works properly.\n ",
"language": "en",
"n_whitespaces": 28,
"n_words": 14,
"vocab_size": 13
} | def test_background_add_room_type_column(self):
# Create a room without a type
room_id = self._generate_room()
# Get event_id of the m.room.create event
event_id = self.get_success(
self.store.db_pool.simple_select_one_onecol(
table="current_state_events",
keyvalues={
"room_id": room_id,
"type": "m.room.create",
},
retcol="event_id",
)
)
# Fake a room creation event with a room type
event = {
"content": {
"creator": "@user:server.org",
"room_version": "9",
"type": RoomTypes.SPACE,
},
"type": "m.room.create",
}
self.get_success(
self.store.db_pool.simple_update(
table="event_json",
keyvalues={"event_id": event_id},
updatevalues={"json": json.dumps(event)},
desc="test",
)
)
# Insert and run the background update
self.get_success(
self.store.db_pool.simple_insert(
"background_updates",
{
"update_name": _BackgroundUpdates.ADD_ROOM_TYPE_COLUMN,
"progress_json": "{}",
},
)
)
# ... and tell the DataStore that it hasn't finished all updates yet
self.store.db_pool.updates._all_done = False
# Now let's actually drive the updates to completion
self.wait_for_background_updates()
# Make sure the background update filled in the room type
room_type_after = self.get_success(
self.store.db_pool.simple_select_one_onecol(
table="room_stats_state",
keyvalues={"room_id": room_id},
retcol="room_type",
allow_none=True,
)
)
self.assertEqual(room_type_after, RoomTypes.SPACE)
|
|
@router.delete("/feedback") | 74,903 | 256,622 | 16 | rest_api/controller/feedback.py | 8 | 6 | def get_feedback():
labels = DOCUMENT_ | Allow Linux CI to push changes to forks (#2182)
* Add explicit reference to repo name to allow CI to push code back
* Run test matrix only on tested code changes
* Isolate the bot to check if it works
* Clarify situation with a comment
* Simplify autoformat.yml
* Add code and docs check
* Add git pull to make sure to fetch changes if they were created
* Add cache to autoformat.yml too
* Add information on forks in CONTRIBUTING.md
* Add a not about code quality tools in CONTRIBUTING.md
* Add image file types to the CI exclusion list
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> | get_feedback | 4e940be85902dc93f3924662ba83111df72bb4d3 | haystack | feedback.py | 8 | 3 | https://github.com/deepset-ai/haystack.git | 1 | 14 | 1 | 7 | 41 | Python | {
"docstring": "\n This endpoint allows the API user to retrieve all the feedback that has been submitted\n through the `POST /feedback` endpoint.\n ",
"language": "en",
"n_whitespaces": 30,
"n_words": 20,
"vocab_size": 18
} | def get_feedback():
labels = DOCUMENT_STORE.get_all_labels()
return labels
@router.delete("/feedback") |
3,311 | 20,288 | 167 | pipenv/patched/notpip/_vendor/pygments/formatters/__init__.py | 52 | 17 | def get_formatter_for_filename(fn, **options):
fn = basename(fn)
for modname, name, _, filenames, _ in FORMATTERS.values():
for filename in filenames:
| check point progress on only bringing in pip==22.0.4 (#4966)
* vendor in pip==22.0.4
* updating vendor packaging version
* update pipdeptree to fix pipenv graph with new version of pip.
* Vendoring of pip-shims 0.7.0
* Vendoring of requirementslib 1.6.3
* Update pip index safety restrictions patch for pip==22.0.4
* Update patches
* exclude pyptoject.toml from black to see if that helps.
* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4 | get_formatter_for_filename | f3166e673fe8d40277b804d35d77dcdb760fc3b3 | pipenv | __init__.py | 15 | 13 | https://github.com/pypa/pipenv.git | 8 | 99 | 0 | 37 | 155 | Python | {
"docstring": "Lookup and instantiate a formatter by filename pattern.\n\n Raises ClassNotFound if not found.\n ",
"language": "en",
"n_whitespaces": 19,
"n_words": 13,
"vocab_size": 13
} | def get_formatter_for_filename(fn, **options):
fn = basename(fn)
for modname, name, _, filenames, _ in FORMATTERS.values():
for filename in filenames:
if _fn_matches(fn, filename):
if name not in _formatter_cache:
_load_formatters(modname)
return _formatter_cache[name](**options)
for cls in find_plugin_formatters():
for filename in cls.filenames:
if _fn_matches(fn, filename):
return cls(**options)
raise ClassNotFound("no formatter found for file name %r" % fn)
|
|
31,214 | 137,676 | 91 | python/ray/util/spark/utils.py | 39 | 12 | def setup_sigterm_on_parent_death():
try:
import ctypes
import sig | Ray on spark implementation (#28771)
REP: ray-project/enhancements#14 | setup_sigterm_on_parent_death | e76ccee69aaa7583be1a9d81cf7b2aa72cf25647 | ray | utils.py | 14 | 8 | https://github.com/ray-project/ray.git | 2 | 41 | 0 | 34 | 86 | Python | {
"docstring": "\n Uses prctl to automatically send SIGTERM to the child process when its parent is\n dead. The child process itself should handle SIGTERM properly.\n ",
"language": "en",
"n_whitespaces": 33,
"n_words": 23,
"vocab_size": 19
} | def setup_sigterm_on_parent_death():
try:
import ctypes
import signal
libc = ctypes.CDLL("libc.so.6")
# Set the parent process death signal of the command process to SIGTERM.
libc.prctl(1, signal.SIGTERM) # PR_SET_PDEATHSIG, see prctl.h
except OSError as e:
_logger.warning(f"Setup libc.prctl PR_SET_PDEATHSIG failed, error {repr(e)}.")
|
|
33,111 | 144,090 | 50 | python/ray/data/dataset.py | 15 | 8 | def force_reads(self) -> "Dataset[T]":
blocks = self.get_internal_block_refs()
bar = ProgressBar("Force reads", len(blocks))
bar.block_until_complete(blocks)
return self
| Deflake occasional deadlock in test_dataset.py::test_basic_actors[True] (#21970) | force_reads | fe167c94b10c832071544d82e83b51e534526c5b | ray | dataset.py | 10 | 10 | https://github.com/ray-project/ray.git | 1 | 34 | 0 | 14 | 62 | Python | {
"docstring": "Force full evaluation of the blocks of this dataset.\n\n This can be used to read all blocks into memory. By default, Datasets\n doesn't read blocks from the datasource until the first transform.\n ",
"language": "en",
"n_whitespaces": 53,
"n_words": 32,
"vocab_size": 26
} | def force_reads(self) -> "Dataset[T]":
blocks = self.get_internal_block_refs()
bar = ProgressBar("Force reads", len(blocks))
bar.block_until_complete(blocks)
return self
|
|
43,896 | 182,658 | 161 | src/textual/_compositor.py | 36 | 22 | def __iter__(self) -> Iterator[tuple[Widget, Region, Region, Size, Size]]:
layers = sorted(self.map.items(), key=lambda item: item[1].order, reverse=True)
intersection = Region.intersection
for widget, (region, _order, clip, virtual_size, container_size) in layers:
yield (
widget,
intersection(region, clip),
region,
virtual_size,
container | docstring | __iter__ | 1a20b9de7d4cef7f93e4500757d3fb42e680f40c | textual | _compositor.py | 12 | 17 | https://github.com/Textualize/textual.git | 2 | 90 | 0 | 32 | 126 | Python | {
"docstring": "Iterate map with information regarding each widget and is position\n\n Yields:\n Iterator[tuple[Widget, Region, Region, Size, Size]]: Iterates a tuple of\n Widget, clip region, region, virtual size, and container size.\n ",
"language": "en",
"n_whitespaces": 69,
"n_words": 29,
"vocab_size": 26
} | def __iter__(self) -> Iterator[tuple[Widget, Region, Region, Size, Size]]:
layers = sorted(self.map.items(), key=lambda item: item[1].order, reverse=True)
intersection = Region.intersection
for widget, (region, _order, clip, virtual_size, container_size) in layers:
yield (
widget,
intersection(region, clip),
region,
virtual_size,
container_size,
)
|
|
38,608 | 160,358 | 71 | numpy/lib/recfunctions.py | 24 | 9 | def get_names_flat(adtype):
listnames = []
names = adtype.names
for name in names:
listnames.append(name)
current = adtype[name]
if current.names is not None:
listnames.extend(ge | Fix docstring and examples for rfn.get_names* | get_names_flat | 569fc6a40ea53054409e00c7d1c0e7f5f53cb0ce | numpy | recfunctions.py | 13 | 9 | https://github.com/numpy/numpy.git | 3 | 54 | 0 | 22 | 89 | Python | {
"docstring": "\n Returns the field names of the input datatype as a tuple. Input datatype\n has to have fields otherwise error is raised.\n Nested structure are flattened beforehand.\n\n Parameters\n ----------\n adtype : dtype\n Input datatype\n\n Examples\n --------\n >>> from numpy.lib import recfunctions as rfn\n >>> rfn.get_names_flat(np.empty((1,), dtype=[('A', int)]).dtype) is None\n False\n >>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', str)]).dtype)\n ('A', 'B')\n >>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])\n >>> rfn.get_names_flat(adtype)\n ('a', 'b', 'ba', 'bb')\n ",
"language": "en",
"n_whitespaces": 131,
"n_words": 72,
"vocab_size": 59
} | def get_names_flat(adtype):
listnames = []
names = adtype.names
for name in names:
listnames.append(name)
current = adtype[name]
if current.names is not None:
listnames.extend(get_names_flat(current))
return tuple(listnames)
|
|
39,623 | 164,932 | 512 | pandas/tests/plotting/frame/test_frame.py | 124 | 31 | def test_memory_leak(self):
import gc
import weakref
results = {}
for kind in plotting.PlotAccessor._all_kinds:
args = {}
if kind in ["hexbin", "scatter", "pie"]:
df = DataFrame(
{
"A": np.random.uniform(size=20),
"B": np.random.uniform(size=20),
"C": np.arange | TST: Clean tests/plotting (#45992) | test_memory_leak | 03fef5f0e35200aa5828975b62782bcf11faa0d2 | pandas | test_frame.py | 18 | 26 | https://github.com/pandas-dev/pandas.git | 5 | 184 | 0 | 92 | 323 | Python | {
"docstring": "Check that every plot type gets properly collected.",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 8
} | def test_memory_leak(self):
import gc
import weakref
results = {}
for kind in plotting.PlotAccessor._all_kinds:
args = {}
if kind in ["hexbin", "scatter", "pie"]:
df = DataFrame(
{
"A": np.random.uniform(size=20),
"B": np.random.uniform(size=20),
"C": np.arange(20) + np.random.uniform(size=20),
}
)
args = {"x": "A", "y": "B"}
elif kind == "area":
df = tm.makeTimeDataFrame().abs()
else:
df = tm.makeTimeDataFrame()
# Use a weakref so we can see if the object gets collected without
# also preventing it from being collected
results[kind] = weakref.proxy(df.plot(kind=kind, **args))
# have matplotlib delete all the figures
tm.close()
# force a garbage collection
gc.collect()
msg = "weakly-referenced object no longer exists"
for key in results:
# check that every plot was collected
with pytest.raises(ReferenceError, match=msg):
# need to actually access something to get an error
results[key].lines
|
|
117,242 | 320,637 | 93 | qutebrowser/utils/utils.py | 39 | 6 | def disabled_excepthook() -> Iterator[None]:
old_excepthook = sys.excepthook
sys.excepthook = sys.__excepthook__
try:
yield
finally:
# If the code we did run did change sys.excepthook, we leave it
# unchanged. Otherwise, we reset it.
if sys.excepthook is sys.__excepthook__:
sys.excepthook = old_excepthook
| Update code for latest mypy | disabled_excepthook | 60de9523ba42d35dc2bf8e0ed5c1521ffbc9b7f5 | qutebrowser | utils.py | 12 | 9 | https://github.com/qutebrowser/qutebrowser.git | 3 | 41 | 0 | 29 | 73 | Python | {
"docstring": "Run code with the exception hook temporarily disabled.",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 8
} | def disabled_excepthook() -> Iterator[None]:
old_excepthook = sys.excepthook
sys.excepthook = sys.__excepthook__
try:
yield
finally:
# If the code we did run did change sys.excepthook, we leave it
# unchanged. Otherwise, we reset it.
if sys.excepthook is sys.__excepthook__:
sys.excepthook = old_excepthook
|
|
36,057 | 154,536 | 120 | asv_bench/benchmarks/utils/common.py | 39 | 21 | def trigger_import(*dfs):
if ASV_USE_STORAGE_FORMAT != "hdk" or ASV_USE_IMPL == "pandas":
return
from modin.experimental.core.execution.native.implementations.hdk_on_native.db_worker import (
DbWorker,
)
for df in dfs:
df.shape # to trigger real execution
df._query_compiler._modin_frame._partitions[0][
0
].frame_id = DbWorker().i | FEAT-#4946: Replace OmniSci with HDK (#4947)
Co-authored-by: Iaroslav Igoshev <[email protected]>
Signed-off-by: Andrey Pavlenko <[email protected]> | trigger_import | e5b1888cd932909e49194d58035da34b210b91c4 | modin | common.py | 17 | 13 | https://github.com/modin-project/modin.git | 4 | 86 | 0 | 33 | 134 | Python | {
"docstring": "\n Trigger import execution for DataFrames obtained by HDK engine.\n\n Parameters\n ----------\n *dfs : iterable\n DataFrames to trigger import.\n ",
"language": "en",
"n_whitespaces": 41,
"n_words": 18,
"vocab_size": 17
} | def trigger_import(*dfs):
if ASV_USE_STORAGE_FORMAT != "hdk" or ASV_USE_IMPL == "pandas":
return
from modin.experimental.core.execution.native.implementations.hdk_on_native.db_worker import (
DbWorker,
)
for df in dfs:
df.shape # to trigger real execution
df._query_compiler._modin_frame._partitions[0][
0
].frame_id = DbWorker().import_arrow_table(
df._query_compiler._modin_frame._partitions[0][0].get()
) # to trigger real execution
|
|
80,974 | 272,187 | 323 | keras/integration_test/forwardprop_test.py | 63 | 31 | def _jacfwd(f, primals):
jac_flat = []
flat_primals = tf.nest.flatten(primals)
tangent_mask = [tf.zeros_like(primal) for primal in flat_primals]
for primal_index, primal in enumerate(flat_primals):
primal_vector = tf.reshape(primal, [-1])
primal_vector_length = tf.size(primal_vector)
jac_columns = []
for element_index in tf.range(primal_vector_length):
mask = tf.one_hot(element_index, primal | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | _jacfwd | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | forwardprop_test.py | 19 | 24 | https://github.com/keras-team/keras.git | 4 | 196 | 0 | 46 | 299 | Python | {
"docstring": "Compute the jacobian of `f` at `primals` using forward-mode autodiff.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | def _jacfwd(f, primals):
jac_flat = []
flat_primals = tf.nest.flatten(primals)
tangent_mask = [tf.zeros_like(primal) for primal in flat_primals]
for primal_index, primal in enumerate(flat_primals):
primal_vector = tf.reshape(primal, [-1])
primal_vector_length = tf.size(primal_vector)
jac_columns = []
for element_index in tf.range(primal_vector_length):
mask = tf.one_hot(element_index, primal_vector_length)
tangent_mask[primal_index] = tf.reshape(mask, tf.shape(primal))
jac_columns.append(
tf.nest.map_structure(
functools.partial(tf.reshape, shape=[-1]),
_jvp(
f,
primals,
tf.nest.pack_sequence_as(primals, tangent_mask),
)[1],
)
)
jac_flat.append(tf.stack(jac_columns, axis=1))
tangent_mask[primal_index] = tf.zeros_like(primal)
return tf.nest.pack_sequence_as(primals, jac_flat)
|
|
7,329 | 40,162 | 76 | dash/_callback_context.py | 30 | 11 | def record_timing(name, duration=None, description=None):
timing_information = getattr(flask.g, "timing_information", {})
if name in timing_information:
raise KeyError(f'Duplicate resource name "{name}" found.')
timing_information[name] = {"dur": round(duration * 1000), "desc": description}
setattr(flask.g, "timing_information", timing_information)
| f-strings everywhere! fffff | record_timing | c3c84b9ecf16bcc61ed80ec39d511af92fe07f2c | dash | _callback_context.py | 11 | 6 | https://github.com/plotly/dash.git | 2 | 67 | 0 | 27 | 114 | Python | {
"docstring": "Records timing information for a server resource.\n\n :param name: The name of the resource.\n :type name: string\n\n :param duration: The time in seconds to report. Internally, this\n is rounded to the nearest millisecond.\n :type duration: float or None\n\n :param description: A description of the resource.\n :type description: string or None\n ",
"language": "en",
"n_whitespaces": 110,
"n_words": 50,
"vocab_size": 33
} | def record_timing(name, duration=None, description=None):
timing_information = getattr(flask.g, "timing_information", {})
if name in timing_information:
raise KeyError(f'Duplicate resource name "{name}" found.')
timing_information[name] = {"dur": round(duration * 1000), "desc": description}
setattr(flask.g, "timing_information", timing_information)
|
|
@register.tag(name="admin_actions") | 50,409 | 203,493 | 19 | django/contrib/admin/templatetags/admin_list.py | 11 | 6 | def admin_actions(context):
context["action_index"] = context.get("action_index", -1) + 1
return context
@register.tag(name="admin_ac | Refs #33476 -- Reformatted code with Black. | admin_actions | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | admin_list.py | 10 | 3 | https://github.com/django/django.git | 1 | 24 | 1 | 11 | 61 | Python | {
"docstring": "\n Track the number of times the action field has been rendered on the page,\n so we know which value to use.\n ",
"language": "en",
"n_whitespaces": 31,
"n_words": 21,
"vocab_size": 19
} | def admin_actions(context):
context["action_index"] = context.get("action_index", -1) + 1
return context
@register.tag(name="admin_actions") |
93,309 | 294,272 | 458 | tests/components/hue/test_light_v2.py | 264 | 22 | async def test_lights(hass, mock_bridge_v2, v2_resources_test_data):
await mock_bridge_v2.api.load_test_data(v2_resources_test_data)
await setup_platform(hass, mock_bridge_v2, "light")
# there shouldn't have been any requests at this point
assert len(mock_bridge_v2.mock_requests) == 0
# 6 entities should be created from test data (grouped_lights are disabled by default)
assert len(hass.states.async_all()) == 6
# test light which supports color and color temperature
light_1 = hass.states.get("light.hue_light_with_color_and_color_temperature_1")
assert light_1 is not None
assert (
light_1.attributes["friendly_name"]
== "Hue light with color and color temperature 1"
)
assert light_1.state == "on"
assert light_1.attributes["brightness"] == int(46.85 / 100 * 255)
assert light_1.attributes["mode"] == "normal"
assert light_1.attributes["color_mode"] == COLOR_MODE_XY
assert set(light_1.attributes["supported_color_modes"]) == {
COLOR_MODE_COLOR_TEMP,
COLOR_MODE_XY,
}
assert light_1.attributes["xy_color"] == (0.5614, 0.4058)
assert light_1.attributes["min_mireds"] == 153
assert light_1.attributes["max_mireds"] == 500
assert light_1.attributes["dynamics"] == "dynamic_palette"
assert light_1.attributes["effect_list"] == ["None", "candle", "fire"]
assert light_1.attributes["effect"] == "None"
# test light which supports color temperature only
light_2 = hass.states.get("light.hue_light_with_color_temperature_only")
assert light_2 is not None
assert (
light_2.attributes["friendly_name"] == "Hue light with color temperature only"
)
assert light_2.state == "off"
assert light_2.attributes["mode"] == "normal"
assert light_2.attributes["supported_color_modes"] == [COLOR_MODE_COLOR_TEMP]
assert light_2.attributes["min_mireds"] == 153
assert light_2.attributes["max_mireds"] == 454
assert light_2.attributes["dynamics"] == "none"
assert light_2.attributes["effect_list"] == ["None", "candle", "sunrise"]
# test light which supports color only
light_3 = hass.states.get("light.hue_light_with_color_only")
assert light_3 is not None
assert light_3.attributes["friendly_name"] == "Hue light with color only"
assert light_3.state == "on"
assert light_3.attributes["brightness"] == 128
assert light_3.attributes["mode"] == "normal"
assert light_3.attributes["supported_color_modes"] == [COLOR_MOD | Add effects feature to Hue lights (#68567) | test_lights | dbef90654f3693401a2df88fa00afbbffbdffcd2 | core | test_light_v2.py | 11 | 52 | https://github.com/home-assistant/core.git | 1 | 423 | 0 | 124 | 729 | Python | {
"docstring": "Test if all v2 lights get created with correct features.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | async def test_lights(hass, mock_bridge_v2, v2_resources_test_data):
await mock_bridge_v2.api.load_test_data(v2_resources_test_data)
await setup_platform(hass, mock_bridge_v2, "light")
# there shouldn't have been any requests at this point
assert len(mock_bridge_v2.mock_requests) == 0
# 6 entities should be created from test data (grouped_lights are disabled by default)
assert len(hass.states.async_all()) == 6
# test light which supports color and color temperature
light_1 = hass.states.get("light.hue_light_with_color_and_color_temperature_1")
assert light_1 is not None
assert (
light_1.attributes["friendly_name"]
== "Hue light with color and color temperature 1"
)
assert light_1.state == "on"
assert light_1.attributes["brightness"] == int(46.85 / 100 * 255)
assert light_1.attributes["mode"] == "normal"
assert light_1.attributes["color_mode"] == COLOR_MODE_XY
assert set(light_1.attributes["supported_color_modes"]) == {
COLOR_MODE_COLOR_TEMP,
COLOR_MODE_XY,
}
assert light_1.attributes["xy_color"] == (0.5614, 0.4058)
assert light_1.attributes["min_mireds"] == 153
assert light_1.attributes["max_mireds"] == 500
assert light_1.attributes["dynamics"] == "dynamic_palette"
assert light_1.attributes["effect_list"] == ["None", "candle", "fire"]
assert light_1.attributes["effect"] == "None"
# test light which supports color temperature only
light_2 = hass.states.get("light.hue_light_with_color_temperature_only")
assert light_2 is not None
assert (
light_2.attributes["friendly_name"] == "Hue light with color temperature only"
)
assert light_2.state == "off"
assert light_2.attributes["mode"] == "normal"
assert light_2.attributes["supported_color_modes"] == [COLOR_MODE_COLOR_TEMP]
assert light_2.attributes["min_mireds"] == 153
assert light_2.attributes["max_mireds"] == 454
assert light_2.attributes["dynamics"] == "none"
assert light_2.attributes["effect_list"] == ["None", "candle", "sunrise"]
# test light which supports color only
light_3 = hass.states.get("light.hue_light_with_color_only")
assert light_3 is not None
assert light_3.attributes["friendly_name"] == "Hue light with color only"
assert light_3.state == "on"
assert light_3.attributes["brightness"] == 128
assert light_3.attributes["mode"] == "normal"
assert light_3.attributes["supported_color_modes"] == [COLOR_MODE_XY]
assert light_3.attributes["color_mode"] == COLOR_MODE_XY
assert light_3.attributes["dynamics"] == "dynamic_palette"
# test light which supports on/off only
light_4 = hass.states.get("light.hue_on_off_light")
assert light_4 is not None
assert light_4.attributes["friendly_name"] == "Hue on/off light"
assert light_4.state == "off"
assert light_4.attributes["mode"] == "normal"
assert light_4.attributes["supported_color_modes"] == []
|
|
47,233 | 195,246 | 102 | projects/bb3/holistic_bias/scripts/eval_175b_model.py | 29 | 6 | def setup_data(self, path):
for message, new_episode in super().setup_data(path):
assert (
message['text'] == '__SILENCE__'
), 'The expected original context string is not found!'
message['text'] = 'Person | Patch 8322 (#4709)
* add dafetymix teacher
* safety_mix teacher
* safety_mix teacher pos and neg teachers
* add tests for teacher
* add license info
* improvement
* add task list
* add task list and lint
* add init.py
* adding some patch to director
* seeker changes
* th
* 3
* jing
* changes
* z and r
* remove .opts
* fix docs
* add contrractions
* lint
Co-authored-by: Dexter Ju <[email protected]>
Co-authored-by: Jing Xu <[email protected]> | setup_data | b1acb681207559da56a787ba96e16f0e23697d92 | ParlAI | eval_175b_model.py | 11 | 7 | https://github.com/facebookresearch/ParlAI.git | 2 | 43 | 0 | 26 | 78 | Python | {
"docstring": "\n Modify each output message to add in an OPT-compatible context string.\n ",
"language": "en",
"n_whitespaces": 26,
"n_words": 11,
"vocab_size": 11
} | def setup_data(self, path):
for message, new_episode in super().setup_data(path):
assert (
message['text'] == '__SILENCE__'
), 'The expected original context string is not found!'
message['text'] = 'Person 1:'
yield message, new_episode
|
|
20,890 | 101,477 | 138 | scripts/extract.py | 40 | 20 | def _get_input_locations(self) -> List[str]:
if not self._args.batch_mode or os.path.isfile(self._args.input_dir):
return [self._args.input_dir] # Not batch mode or a single file
retval = [os.path.join(self._args.input_dir, fname)
for fname in os.listdir(self._args.input_dir)
if os.path.isdir(os.path.join(self._args.input_dir, fname))
or os.p | extract: Add batch processing mode | _get_input_locations | 13cfb3f39e72e9ca181f173b7b3db2a048db0d08 | faceswap | extract.py | 16 | 17 | https://github.com/deepfakes/faceswap.git | 6 | 122 | 0 | 34 | 192 | Python | {
"docstring": " Obtain the full path to input locations. Will be a list of locations if batch mode is\n selected, or a containing a single location if batch mode is not selected.\n\n Returns\n -------\n list:\n The list of input location paths\n ",
"language": "en",
"n_whitespaces": 86,
"n_words": 39,
"vocab_size": 29
} | def _get_input_locations(self) -> List[str]:
if not self._args.batch_mode or os.path.isfile(self._args.input_dir):
return [self._args.input_dir] # Not batch mode or a single file
retval = [os.path.join(self._args.input_dir, fname)
for fname in os.listdir(self._args.input_dir)
if os.path.isdir(os.path.join(self._args.input_dir, fname))
or os.path.splitext(fname)[-1].lower() in _video_extensions]
logger.debug("Input locations: %s", retval)
return retval
|
|
13,801 | 65,129 | 22 | erpnext/accounts/party.py | 32 | 11 | def get_party_gle_currency(party_type, party, company):
def generator():
existing_gle_currency = frappe.db.sql(
,
{"company": company, "party_type": party_type, "party": party},
)
return existing_gle_currency[0][0] if existing_gle_currency else None
return frappe.local_cache(
"party_gle_currency", (party_type, pa | style: format code with black | get_party_gle_currency | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | party.py | 13 | 5 | https://github.com/frappe/erpnext.git | 1 | 32 | 0 | 27 | 109 | Python | {
"docstring": "select account_currency from `tabGL Entry`\n\t\t\twhere docstatus=1 and company=%(company)s and party_type=%(party_type)s and party=%(party)s\n\t\t\tlimit 1",
"language": "en",
"n_whitespaces": 12,
"n_words": 15,
"vocab_size": 13
} | def get_party_gle_currency(party_type, party, company):
def generator():
existing_gle_currency = frappe.db.sql(
,
{"company": company, "party_type": party_type, "party": party},
)
return existing_gle_currency[0][0] if existing_gle_currency else None
return frappe.local_cache(
"party_gle_currency", (party_type, party, company), generator, regenerate_if_none=True
)
|
|
75,348 | 258,644 | 610 | sklearn/datasets/_base.py | 125 | 21 | def load_breast_cancer(*, return_X_y=False, as_frame=False):
data_file_name = "breast_cancer.csv"
data, target, target_names, fdescr = load_csv_data(
data_file_name=data_file_name, descr_file_name="breast_cancer.rst"
)
feature_names = np.array(
[
"mean radius",
"mean texture",
"mean perimeter",
"mean area",
"mean smoothness",
"mean compactness",
"mean concavity",
"mean concave points",
"mean symmetry",
"mean fractal dimension",
"radius error",
"texture error",
"perimeter error",
"area error",
"smoothness error",
"compactness error",
"concavity error",
"concave points error",
"symmetry error",
"fractal dimension error",
"worst radius",
"worst texture",
"worst perimeter",
"worst area",
| DOC Ensures that sklearn.datasets._base.load_breast_cancer passes numpydoc validation (#22346)
Co-authored-by: Guillaume Lemaitre <[email protected]>
Co-authored-by: Arturo Amor <[email protected]> | load_breast_cancer | a793c1f0ad7dd63b2a896d2e84087089a11e7fca | scikit-learn | _base.py | 11 | 59 | https://github.com/scikit-learn/scikit-learn.git | 3 | 177 | 0 | 68 | 297 | Python | {
"docstring": "Load and return the breast cancer wisconsin dataset (classification).\n\n The breast cancer dataset is a classic and very easy binary classification\n dataset.\n\n ================= ==============\n Classes 2\n Samples per class 212(M),357(B)\n Samples total 569\n Dimensionality 30\n Features real, positive\n ================= ==============\n\n The copy of UCI ML Breast Cancer Wisconsin (Diagnostic) dataset is\n downloaded from:\n https://goo.gl/U2Uwz2\n\n Read more in the :ref:`User Guide <breast_cancer_dataset>`.\n\n Parameters\n ----------\n return_X_y : bool, default=False\n If True, returns ``(data, target)`` instead of a Bunch object.\n See below for more information about the `data` and `target` object.\n\n .. versionadded:: 0.18\n\n as_frame : bool, default=False\n If True, the data is a pandas DataFrame including columns with\n appropriate dtypes (numeric). The target is\n a pandas DataFrame or Series depending on the number of target columns.\n If `return_X_y` is True, then (`data`, `target`) will be pandas\n DataFrames or Series as described below.\n\n .. versionadded:: 0.23\n\n Returns\n -------\n data : :class:`~sklearn.utils.Bunch`\n Dictionary-like object, with the following attributes.\n\n data : {ndarray, dataframe} of shape (569, 30)\n The data matrix. If `as_frame=True`, `data` will be a pandas\n DataFrame.\n target : {ndarray, Series} of shape (569,)\n The classification target. If `as_frame=True`, `target` will be\n a pandas Series.\n feature_names : list\n The names of the dataset columns.\n target_names : list\n The names of target classes.\n frame : DataFrame of shape (569, 31)\n Only present when `as_frame=True`. DataFrame with `data` and\n `target`.\n\n .. versionadded:: 0.23\n DESCR : str\n The full description of the dataset.\n filename : str\n The path to the location of the data.\n\n .. versionadded:: 0.20\n\n (data, target) : tuple if ``return_X_y`` is True\n A tuple of two ndarrays by default. The first contains a 2D ndarray of\n shape (569, 30) with each row representing one sample and each column\n representing the features. The second ndarray of shape (569,) contains\n the target samples. If `as_frame=True`, both arrays are pandas objects,\n i.e. `X` a dataframe and `y` a series.\n\n .. versionadded:: 0.18\n\n Examples\n --------\n Let's say you are interested in the samples 10, 50, and 85, and want to\n know their class name.\n\n >>> from sklearn.datasets import load_breast_cancer\n >>> data = load_breast_cancer()\n >>> data.target[[10, 50, 85]]\n array([0, 1, 0])\n >>> list(data.target_names)\n ['malignant', 'benign']\n ",
"language": "en",
"n_whitespaces": 823,
"n_words": 356,
"vocab_size": 205
} | def load_breast_cancer(*, return_X_y=False, as_frame=False):
data_file_name = "breast_cancer.csv"
data, target, target_names, fdescr = load_csv_data(
data_file_name=data_file_name, descr_file_name="breast_cancer.rst"
)
feature_names = np.array(
[
"mean radius",
"mean texture",
"mean perimeter",
"mean area",
"mean smoothness",
"mean compactness",
"mean concavity",
"mean concave points",
"mean symmetry",
"mean fractal dimension",
"radius error",
"texture error",
"perimeter error",
"area error",
"smoothness error",
"compactness error",
"concavity error",
"concave points error",
"symmetry error",
"fractal dimension error",
"worst radius",
"worst texture",
"worst perimeter",
"worst area",
"worst smoothness",
"worst compactness",
"worst concavity",
"worst concave points",
"worst symmetry",
"worst fractal dimension",
]
)
frame = None
target_columns = [
"target",
]
if as_frame:
frame, data, target = _convert_data_dataframe(
"load_breast_cancer", data, target, feature_names, target_columns
)
if return_X_y:
return data, target
return Bunch(
data=data,
target=target,
frame=frame,
target_names=target_names,
DESCR=fdescr,
feature_names=feature_names,
filename=data_file_name,
data_module=DATA_MODULE,
)
|
|
3,307 | 20,279 | 44 | pipenv/patched/notpip/_vendor/pygments/filters/__init__.py | 18 | 6 | def get_filter_by_name(filtername, **options):
cls = find_filter_class(filtername)
if cls:
return cls(**options)
else:
raise ClassNotFound('filter %r not found' % filte | check point progress on only bringing in pip==22.0.4 (#4966)
* vendor in pip==22.0.4
* updating vendor packaging version
* update pipdeptree to fix pipenv graph with new version of pip.
* Vendoring of pip-shims 0.7.0
* Vendoring of requirementslib 1.6.3
* Update pip index safety restrictions patch for pip==22.0.4
* Update patches
* exclude pyptoject.toml from black to see if that helps.
* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4 | get_filter_by_name | f3166e673fe8d40277b804d35d77dcdb760fc3b3 | pipenv | __init__.py | 12 | 6 | https://github.com/pypa/pipenv.git | 2 | 33 | 0 | 18 | 59 | Python | {
"docstring": "Return an instantiated filter.\n\n Options are passed to the filter initializer if wanted.\n Raise a ClassNotFound if not found.\n ",
"language": "en",
"n_whitespaces": 28,
"n_words": 19,
"vocab_size": 18
} | def get_filter_by_name(filtername, **options):
cls = find_filter_class(filtername)
if cls:
return cls(**options)
else:
raise ClassNotFound('filter %r not found' % filtername)
|
|
10,081 | 50,301 | 23 | modules/image/text_to_image/disco_diffusion_ernievil_base/vit_b_16x/ernievil2/transformers/resnet.py | 14 | 5 | def wide_resnet50_2(pretrained=False, **kwargs):
kwargs['width'] = 64 * 2
return _resnet('wide_resnet50_2', BottleneckBlock, 50, pretrained, **kwargs)
| add disco_diffusion_ernievil_base | wide_resnet50_2 | ffcde21305c61d950a9f93e57e6180c9a9665b87 | PaddleHub | resnet.py | 8 | 3 | https://github.com/PaddlePaddle/PaddleHub.git | 1 | 33 | 0 | 14 | 54 | Python | {
"docstring": "Wide ResNet-50-2 model from\n `\"Wide Residual Networks\" <https://arxiv.org/pdf/1605.07146.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n\n Examples:\n .. code-block:: python\n\n import paddle\n from paddle.vision.models import wide_resnet50_2\n\n # build model\n model = wide_resnet50_2()\n\n # build model and load imagenet pretrained weight\n # model = wide_resnet50_2(pretrained=True)\n\n x = paddle.rand([1, 3, 224, 224])\n out = model(x)\n\n print(out.shape)\n ",
"language": "en",
"n_whitespaces": 182,
"n_words": 57,
"vocab_size": 43
} | def wide_resnet50_2(pretrained=False, **kwargs):
kwargs['width'] = 64 * 2
return _resnet('wide_resnet50_2', BottleneckBlock, 50, pretrained, **kwargs)
|
|
71,110 | 246,216 | 54 | tests/rest/admin/test_username_available.py | 19 | 14 | def test_username_available(self) -> None:
url = "%s?username=%s" % (self.url, "allowed")
channel = self.make_request("GET", url, access_token=self.admin_user_tok)
self.asser | Add type hints to `tests/rest/admin` (#11851) | test_username_available | 901b264c0c88f39cbfb8b2229e0dc57968882658 | synapse | test_username_available.py | 10 | 8 | https://github.com/matrix-org/synapse.git | 1 | 64 | 0 | 18 | 106 | Python | {
"docstring": "\n The endpoint should return a HTTPStatus.OK response if the username does not exist\n ",
"language": "en",
"n_whitespaces": 28,
"n_words": 13,
"vocab_size": 13
} | def test_username_available(self) -> None:
url = "%s?username=%s" % (self.url, "allowed")
channel = self.make_request("GET", url, access_token=self.admin_user_tok)
self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
self.assertTrue(channel.json_body["available"])
|
|
113,349 | 314,745 | 151 | tests/helpers/test_entityfilter.py | 84 | 9 | def test_with_include_glob_filtering_case4a_include_strong():
incl_dom = {}
incl_glob = {"*working"}
incl_ent = {"binary_sensor.specificly_included"}
excl_dom = {}
excl_glob = {"*broken", "*notworking", "binary_sensor.*"}
excl_ent = {"light.ignoreme"}
testfilter = generate_filter(
incl_dom, incl_ent, excl_dom, excl_ent, incl_glob, excl_glob
)
assert testfilter("sensor.working") is True
assert testfilter("sensor.notworking") is True # include is stronger
assert testfilter("sensor.broken") is False
assert testfilter("light.test") is False
assert testfilter("light.notworking") is True # include is stronger
assert testfilter("light.ignoreme") is False
assert testfilter("binary_sensor.not_working") is True # include is stronger
assert t | Adjust entity filters to make includes stronger than excludes (#74080)
* Adjust entity filters to make includes stronger than excludes
Fixes #59080
* adjust test for stronger entity glob includes
* sync with docs | test_with_include_glob_filtering_case4a_include_strong | a8349a4866d22cddbca9ac9367d4affae39a8325 | core | test_entityfilter.py | 9 | 20 | https://github.com/home-assistant/core.git | 1 | 123 | 0 | 41 | 227 | Python | {
"docstring": "Test case 4 - include and exclude specified, both have globs, and a specifically included entity.",
"language": "en",
"n_whitespaces": 15,
"n_words": 16,
"vocab_size": 15
} | def test_with_include_glob_filtering_case4a_include_strong():
incl_dom = {}
incl_glob = {"*working"}
incl_ent = {"binary_sensor.specificly_included"}
excl_dom = {}
excl_glob = {"*broken", "*notworking", "binary_sensor.*"}
excl_ent = {"light.ignoreme"}
testfilter = generate_filter(
incl_dom, incl_ent, excl_dom, excl_ent, incl_glob, excl_glob
)
assert testfilter("sensor.working") is True
assert testfilter("sensor.notworking") is True # include is stronger
assert testfilter("sensor.broken") is False
assert testfilter("light.test") is False
assert testfilter("light.notworking") is True # include is stronger
assert testfilter("light.ignoreme") is False
assert testfilter("binary_sensor.not_working") is True # include is stronger
assert testfilter("binary_sensor.another") is False
assert testfilter("binary_sensor.specificly_included") is True
assert testfilter("sun.sun") is False
|
|
@frappe.whitelist() | 14,183 | 66,418 | 67 | erpnext/manufacturing/doctype/production_plan/production_plan.py | 93 | 20 | def get_sales_orders(self):
so_filter = item_filter = ""
bom_item = "bom.item = so_item.item_code"
date_field_mapper = {
"from_date": (">=", "so.transaction_date"),
"to_date": ("<=", "so.transaction_date"),
"from_delivery_date": (">=", "so_item.delivery_date"),
"to_delivery_date": ("<=", "so_item.delivery_date"),
}
for | style: format code with black | get_sales_orders | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | production_plan.py | 13 | 38 | https://github.com/frappe/erpnext.git | 9 | 158 | 1 | 59 | 329 | Python | {
"docstring": "\n\t\tselect distinct so.name, so.transaction_date, so.customer, so.base_grand_total\n\t\tfrom `tabSales Order` so, `tabSales Order Item` so_item\n\t\twhere so_item.parent = so.name\n\t\t\tand so.docstatus = 1 and so.status not in (\"Stopped\", \"Closed\")\n\t\t\tand so.company = %(company)s\n\t\t\tand so_item.qty > so_item.work_order_qty {so_filter} {item_filter}\n\t\t\tand (exists (select name from `tabBOM` bom where {bom_item}\n\t\t\t\t\tand bom.is_active = 1)\n\t\t\t\tor exists (select name from `tabPacked Item` pi\n\t\t\t\t\twhere pi.parent = so.name and pi.parent_item = so_item.item_code\n\t\t\t\t\t\tand exists (select name from `tabBOM` bom where bom.item=pi.item_code\n\t\t\t\t\t\t\tand bom.is_active = 1)))\n\t\t",
"language": "en",
"n_whitespaces": 68,
"n_words": 80,
"vocab_size": 49
} | def get_sales_orders(self):
so_filter = item_filter = ""
bom_item = "bom.item = so_item.item_code"
date_field_mapper = {
"from_date": (">=", "so.transaction_date"),
"to_date": ("<=", "so.transaction_date"),
"from_delivery_date": (">=", "so_item.delivery_date"),
"to_delivery_date": ("<=", "so_item.delivery_date"),
}
for field, value in date_field_mapper.items():
if self.get(field):
so_filter += f" and {value[1]} {value[0]} %({field})s"
for field in ["customer", "project", "sales_order_status"]:
if self.get(field):
so_field = "status" if field == "sales_order_status" else field
so_filter += f" and so.{so_field} = %({field})s"
if self.item_code and frappe.db.exists("Item", self.item_code):
bom_item = self.get_bom_item() or bom_item
item_filter += " and so_item.item_code = %(item_code)s"
open_so = frappe.db.sql(
f,
self.as_dict(),
as_dict=1,
)
return open_so
@frappe.whitelist() |
16,251 | 74,366 | 39 | wagtail/core/tests/test_page_model.py | 11 | 9 | def test_copy_published_emits_signal(self):
christmas_page = EventPage.objects.get(url_path="/h | Reformat with black | test_copy_published_emits_signal | d10f15e55806c6944827d801cd9c2d53f5da4186 | wagtail | test_page_model.py | 10 | 11 | https://github.com/wagtail/wagtail.git | 1 | 65 | 0 | 9 | 44 | Python | {
"docstring": "Test that copying of a published page emits a page_published signal.",
"language": "en",
"n_whitespaces": 10,
"n_words": 11,
"vocab_size": 10
} | def test_copy_published_emits_signal(self):
christmas_page = EventPage.objects.get(url_path="/home/events/christmas/")
signal_fired = False
signal_page = None
|
|
6,971 | 38,418 | 98 | utils/tests_fetcher.py | 55 | 17 | def get_all_tests():
test_root_dir = os.path.join(PATH_TO_TRANFORMERS, "tests")
# test folders/files directly under `tests` folder
tests = os.listdir(test_root_dir)
tests = sorted(
list(filter(lambda x: os.path.isdir(x) or x.startswith("tests/test_"), [f"tests/{x}" for x in tests]))
)
# model specific test folders
model_tests_folders = os.listdir(os.path.join(test_root_dir, "models"))
model_test_folders = sorted(list(filter(os.path.isdir, [f"tests/models/{x}" for x in model_tests | Update self-push workflow (#17177)
* update push ci
* install git-python
* update comment
* update deepspeed jobs
* fix report
* skip 2 more tests that require fairscale
* Fix changes in test_fetcher.py (to deal with `setup.py` is changed)
* set RUN_PT_TF_CROSS_TESTS=1 and final clean-up
* remove SIGOPT_API_TOKEN
* remove echo "$matrix_folders"
Co-authored-by: ydshieh <[email protected]> | get_all_tests | 38043d8453b82a9c712f8d5c98323150fbee7503 | transformers | tests_fetcher.py | 17 | 11 | https://github.com/huggingface/transformers.git | 4 | 118 | 0 | 40 | 205 | Python | {
"docstring": "\n Return a list of paths to all test folders and files under `tests`. All paths are rooted at `tests`.\n\n - folders under `tests`: `tokenization`, `pipelines`, etc. The folder `models` is excluded.\n - folders under `tests/models`: `bert`, `gpt2`, etc.\n - test files under `tests`: `test_modeling_common.py`, `test_tokenization_common.py`, etc.\n ",
"language": "en",
"n_whitespaces": 62,
"n_words": 46,
"vocab_size": 32
} | def get_all_tests():
test_root_dir = os.path.join(PATH_TO_TRANFORMERS, "tests")
# test folders/files directly under `tests` folder
tests = os.listdir(test_root_dir)
tests = sorted(
list(filter(lambda x: os.path.isdir(x) or x.startswith("tests/test_"), [f"tests/{x}" for x in tests]))
)
# model specific test folders
model_tests_folders = os.listdir(os.path.join(test_root_dir, "models"))
model_test_folders = sorted(list(filter(os.path.isdir, [f"tests/models/{x}" for x in model_tests_folders])))
tests.remove("tests/models")
tests = model_test_folders + tests
return tests
|
|
30,090 | 133,739 | 433 | rllib/agents/impala/tests/test_vtrace.py | 150 | 30 | def test_vtrace(self):
seq_len = 5
batch_size = 10
# Create log_rhos such that rho will span from near-zero to above the
# clipping thresholds. In particular, calculate log_rhos in
# [-2.5, 2.5),
# so that rho is in approx [0.08, 12.2).
space_w_time | [CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes. | test_vtrace | 7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065 | ray | test_vtrace.py | 15 | 25 | https://github.com/ray-project/ray.git | 6 | 230 | 0 | 109 | 344 | Python | {
"docstring": "Tests V-trace against ground truth data calculated in python.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | def test_vtrace(self):
seq_len = 5
batch_size = 10
# Create log_rhos such that rho will span from near-zero to above the
# clipping thresholds. In particular, calculate log_rhos in
# [-2.5, 2.5),
# so that rho is in approx [0.08, 12.2).
space_w_time = Box(-1.0, 1.0, (seq_len, batch_size), np.float32)
space_only_batch = Box(-1.0, 1.0, (batch_size,), np.float32)
log_rhos = space_w_time.sample() / (batch_size * seq_len)
log_rhos = 5 * (log_rhos - 0.5) # [0.0, 1.0) -> [-2.5, 2.5).
values = {
"log_rhos": log_rhos,
# T, B where B_i: [0.9 / (i+1)] * T
"discounts": np.array(
[[0.9 / (b + 1) for b in range(batch_size)] for _ in range(seq_len)]
),
"rewards": space_w_time.sample(),
"values": space_w_time.sample() / batch_size,
"bootstrap_value": space_only_batch.sample() + 1.0,
"clip_rho_threshold": 3.7,
"clip_pg_rho_threshold": 2.2,
}
for fw, sess in framework_iterator(frameworks=("torch", "tf"), session=True):
vtrace = vtrace_tf if fw != "torch" else vtrace_torch
output = vtrace.from_importance_weights(**values)
if sess:
output = sess.run(output)
ground_truth_v = _ground_truth_calculation(vtrace, **values)
check(output, ground_truth_v)
|
|
52,821 | 209,854 | 205 | scapy/base_classes.py | 45 | 24 | def pdfdump(self, filename=None, **kargs):
# type: (Optional[str], **Any) -> None
from scapy.config import conf
from scapy.utils import get_temp_file, ContextManagerSubprocess
canvas = self.canvas_dump(**kargs)
if filename is None:
fname = get_temp_file(autoext=kargs.get("suffix", ".pdf"))
canvas.writePDFfile(fname)
if WINDOWS and not conf.prog.pdfreader:
os.startfile(fname)
else:
with ContextManagerSubprocess(conf.prog.pdfreader):
subprocess.Popen([conf.prog.pdfreader, fname])
else:
canvas.writePDFfile(filename)
print | [Hinty] Core typing: windows (#3684)
* Core typing: windows
Co-authored-by: Pierre <[email protected]> | pdfdump | a2b7a28faff1db058dd22ce097a268e0ad5d1d33 | scapy | base_classes.py | 17 | 15 | https://github.com/secdev/scapy.git | 4 | 115 | 0 | 40 | 193 | Python | {
"docstring": "\n pdfdump(filename=None, layer_shift=0, rebuild=1)\n\n Creates a PDF file describing a packet. If filename is not provided a\n temporary file is created and xpdf is called.\n\n :param filename: the file's filename\n ",
"language": "en",
"n_whitespaces": 65,
"n_words": 29,
"vocab_size": 23
} | def pdfdump(self, filename=None, **kargs):
# type: (Optional[str], **Any) -> None
from scapy.config import conf
from scapy.utils import get_temp_file, ContextManagerSubprocess
canvas = self.canvas_dump(**kargs)
if filename is None:
fname = get_temp_file(autoext=kargs.get("suffix", ".pdf"))
canvas.writePDFfile(fname)
if WINDOWS and not conf.prog.pdfreader:
os.startfile(fname)
else:
with ContextManagerSubprocess(conf.prog.pdfreader):
subprocess.Popen([conf.prog.pdfreader, fname])
else:
canvas.writePDFfile(filename)
print()
|
|
118,072 | 322,143 | 190 | examples/dependency_parsing/ddparser/utils.py | 105 | 22 | def index_sample(x, index):
x_s = x.shape
dim = len(index.shape) - 1
assert x_s[:dim] == index.shape[:dim]
if len(x_s) == 3 and dim == 1:
r_x = paddle.reshape(x, shape=[-1, x_s[1], x_s[-1]])
else:
r_x = paddle.reshape(x, shape=[-1, x_s[-1]])
index = paddle.reshape(index, shape=[len(r_x), -1, 1])
# Generate arange index, shape like index
| Update neural search readme and Add Paddle Serving Support (#1558)
* add recall inference similarity
* update examples
* updatea readme
* update dir name
* update neural search readme
* update milvus readme
* update domain adaptive pretraining readme
* fix the mistakes
* update readme
* add recall Paddle Serving Support
* update readme
* update readme and format the code
* reformat the files
* move the files
* reformat the code
* remove redundant code
Co-authored-by: Zeyu Chen <[email protected]>
Co-authored-by: tianxin <[email protected]> | index_sample | 621357338437ee420eabbbf5ab19065bc85e73a5 | PaddleNLP | utils.py | 15 | 20 | https://github.com/PaddlePaddle/PaddleNLP.git | 5 | 272 | 0 | 64 | 410 | Python | {
"docstring": "\n Select input value according to index\n \n Arags:\n input: input matrix\n index: index matrix\n Returns:\n output\n >>> input\n [\n [1, 2, 3],\n [4, 5, 6]\n ]\n >>> index\n [\n [1, 2],\n [0, 1]\n ]\n >>> index_sample(input, index)\n [\n [2, 3],\n [4, 5]\n ]\n ",
"language": "en",
"n_whitespaces": 149,
"n_words": 42,
"vocab_size": 28
} | def index_sample(x, index):
x_s = x.shape
dim = len(index.shape) - 1
assert x_s[:dim] == index.shape[:dim]
if len(x_s) == 3 and dim == 1:
r_x = paddle.reshape(x, shape=[-1, x_s[1], x_s[-1]])
else:
r_x = paddle.reshape(x, shape=[-1, x_s[-1]])
index = paddle.reshape(index, shape=[len(r_x), -1, 1])
# Generate arange index, shape like index
arr_index = paddle.arange(start=0, end=len(index), dtype=index.dtype)
arr_index = paddle.unsqueeze(arr_index, axis=[1, 2])
arr_index = paddle.expand(arr_index, index.shape)
# Genrate new index
new_index = paddle.concat((arr_index, index), -1)
new_index = paddle.reshape(new_index, (-1, 2))
# Get output
out = paddle.gather_nd(r_x, new_index)
if len(x_s) == 3 and dim == 2:
out = paddle.reshape(out, shape=[x_s[0], x_s[1], -1])
else:
out = paddle.reshape(out, shape=[x_s[0], -1])
return out
|
|
48,334 | 197,094 | 923 | sympy/diffgeom/diffgeom.py | 188 | 50 | def __new__(cls, name, patch, symbols=None, relations={}, **kwargs):
if not isinstance(name, Str):
name = Str(name)
# canonicallize the symbols
if symbols is None:
names = kwargs.get('names', None)
if names is None:
symbols = Tuple(
*[Symbol('%s_%s' % (name.name, i), real=True)
for i in range(patch.dim)]
)
else:
sympy_deprecation_warning(
f,
deprecated_since_version="1.7",
active_deprecations_target="deprecated-diffgeom-mutable",
)
symbols = Tuple(
*[Symbol(n, real=True) for n in names]
| Update the sympy.diffgeom mutability deprecations | __new__ | f8674bfe4988332e7ce60ceb36b365ce9aff662a | sympy | diffgeom.py | 21 | 73 | https://github.com/sympy/sympy.git | 15 | 399 | 0 | 109 | 681 | Python | {
"docstring": "\nThe 'names' argument to CoordSystem is deprecated. Use 'symbols' instead. That\nis, replace\n\n CoordSystem(..., names={names})\n\nwith\n\n CoordSystem(..., symbols=[{', '.join([\"Symbol(\" + repr(n) + \", real=True)\" for n in names])}])\n \n\nPassing a string as the coordinate symbol name to CoordSystem is deprecated.\nPass a Symbol with the appropriate name and assumptions instead.\n\nThat is, replace {s} with Symbol({s!r}, real=True).\n \n CoordSystem.transforms is deprecated. The CoordSystem class is now\n immutable. Use the 'relations' keyword argument to the\n CoordSystems() constructor to specify relations.\n ",
"language": "en",
"n_whitespaces": 167,
"n_words": 78,
"vocab_size": 52
} | def __new__(cls, name, patch, symbols=None, relations={}, **kwargs):
if not isinstance(name, Str):
name = Str(name)
# canonicallize the symbols
if symbols is None:
names = kwargs.get('names', None)
if names is None:
symbols = Tuple(
*[Symbol('%s_%s' % (name.name, i), real=True)
for i in range(patch.dim)]
)
else:
sympy_deprecation_warning(
f,
deprecated_since_version="1.7",
active_deprecations_target="deprecated-diffgeom-mutable",
)
symbols = Tuple(
*[Symbol(n, real=True) for n in names]
)
else:
syms = []
for s in symbols:
if isinstance(s, Symbol):
syms.append(Symbol(s.name, **s._assumptions.generator))
elif isinstance(s, str):
sympy_deprecation_warning(
f,
deprecated_since_version="1.7",
active_deprecations_target="deprecated-diffgeom-mutable",
)
syms.append(Symbol(s, real=True))
symbols = Tuple(*syms)
# canonicallize the relations
rel_temp = {}
for k,v in relations.items():
s1, s2 = k
if not isinstance(s1, Str):
s1 = Str(s1)
if not isinstance(s2, Str):
s2 = Str(s2)
key = Tuple(s1, s2)
# Old version used Lambda as a value.
if isinstance(v, Lambda):
v = (tuple(v.signature), tuple(v.expr))
else:
v = (tuple(v[0]), tuple(v[1]))
rel_temp[key] = v
relations = Dict(rel_temp)
# construct the object
obj = super().__new__(cls, name, patch, symbols, relations)
# Add deprecated attributes
obj.transforms = _deprecated_dict(
, {})
obj._names = [str(n) for n in symbols]
obj.patch.coord_systems.append(obj) # deprecated
obj._dummies = [Dummy(str(n)) for n in symbols] # deprecated
obj._dummy = Dummy()
return obj
|
|
117,222 | 320,610 | 99 | qutebrowser/completion/models/miscmodels.py | 43 | 12 | def tab_focus(*, info):
m | Fixes qutebrowser/qutebrowser#6967 by adding win id param in _tabs & using it in delete_tabs
As delete_tab was assuming that completion column contains window ID, it was showing
exception in case of tab-focus, as it doesn't have the window ID in completion column.
So instead a new parameter named current_win_id is used in _tabs which is also passed
in all uses of the function. | tab_focus | 57155e329ada002245ab3fac45d906f6707c14cf | qutebrowser | miscmodels.py | 12 | 10 | https://github.com/qutebrowser/qutebrowser.git | 1 | 70 | 0 | 34 | 118 | Python | {
"docstring": "A model to complete on open tabs in the current window.",
"language": "en",
"n_whitespaces": 10,
"n_words": 11,
"vocab_size": 11
} | def tab_focus(*, info):
model = _tabs(win_id_filter=lambda win_id: win_id == info.win_id,
add_win_id=False, current_win_id=info.win_id)
special = [
("last", "Focus the last-focused tab"),
("stack-next", "Go forward through a stack of focused tabs"),
("stack-prev", "Go backward through a stack of focused tabs"),
]
model.add_category(listcategory.ListCategory("Special", special))
return model
|
|
14,907 | 68,836 | 49 | erpnext/accounts/report/sales_payment_summary/sales_payment_summary.py | 80 | 22 | def get_mode_of_payment_details(filters):
mode_of_payment_details = {}
invoice_list = get_invoices(filters)
invoice_list_names = ",".join("'" + invoice["name"] + "'" for invoice in invoice_list)
if invoice_list:
inv_mop_detail = frappe.db.sq | refactor: DB independent quoting and truthy/falsy values (#31358)
* refactor: DB independent quoting and truthy/falsy values
* style: reformat to black spec
* fix: ifnull -> coalesce
* fix: coalesce -> Coalesce
* fix: revert pypika comparison
* refactor: convert queries to QB
* fix: incorrect value types for query
`=` query makes no sense with list of values
* fix: remove warehouse docstatus condition
* fix: keep using base rate as rate
Co-authored-by: Ankush Menat <[email protected]> | get_mode_of_payment_details | 74a782d81d8f8c4a4d9214a9c06377e5e6e464dd | erpnext | sales_payment_summary.py | 18 | 71 | https://github.com/frappe/erpnext.git | 9 | 181 | 0 | 50 | 304 | Python | {
"docstring": "\n\t\t\tselect t.owner,\n\t\t\t t.posting_date,\n\t\t\t\t t.mode_of_payment,\n\t\t\t\t sum(t.paid_amount) as paid_amount\n\t\t\tfrom (\n\t\t\t\tselect a.owner, a.posting_date,\n\t\t\t\tifnull(b.mode_of_payment, '') as mode_of_payment, sum(b.base_amount) as paid_amount\n\t\t\t\tfrom `tabSales Invoice` a, `tabSales Invoice Payment` b\n\t\t\t\twhere a.name = b.parent\n\t\t\t\tand a.docstatus = 1\n\t\t\t\tand a.name in ({invoice_list_names})\n\t\t\t\tgroup by a.owner, a.posting_date, mode_of_payment\n\t\t\t\tunion\n\t\t\t\tselect a.owner,a.posting_date,\n\t\t\t\tifnull(b.mode_of_payment, '') as mode_of_payment, sum(c.allocated_amount) as paid_amount\n\t\t\t\tfrom `tabSales Invoice` a, `tabPayment Entry` b,`tabPayment Entry Reference` c\n\t\t\t\twhere a.name = c.reference_name\n\t\t\t\tand b.name = c.parent\n\t\t\t\tand b.docstatus = 1\n\t\t\t\tand a.name in ({invoice_list_names})\n\t\t\t\tgroup by a.owner, a.posting_date, mode_of_payment\n\t\t\t\tunion\n\t\t\t\tselect a.owner, a.posting_date,\n\t\t\t\tifnull(a.voucher_type,'') as mode_of_payment, sum(b.credit)\n\t\t\t\tfrom `tabJournal Entry` a, `tabJournal Entry Account` b\n\t\t\t\twhere a.name = b.parent\n\t\t\t\tand a.docstatus = 1\n\t\t\t\tand b.reference_type = 'Sales Invoice'\n\t\t\t\tand b.reference_name in ({invoice_list_names})\n\t\t\t\tgroup by a.owner, a.posting_date, mode_of_payment\n\t\t\t) t\n\t\t\tgroup by t.owner, t.posting_date, t.mode_of_payment\n\t\t\tselect a.owner, a.posting_date,\n\t\t\tifnull(b.mode_of_payment, '') as mode_of_payment, sum(a.base_change_amount) as change_amount\n\t\t\tfrom `tabSales Invoice` a, `tabSales Invoice Payment` b\n\t\t\twhere a.name = b.parent\n\t\t\tand a.name in ({invoice_list_names})\n\t\t\tand b.type = 'Cash'\n\t\t\tand a.base_change_amount > 0\n\t\t\tgroup by a.owner, a.posting_date, mode_of_payment",
"language": "en",
"n_whitespaces": 142,
"n_words": 169,
"vocab_size": 64
} | def get_mode_of_payment_details(filters):
mode_of_payment_details = {}
invoice_list = get_invoices(filters)
invoice_list_names = ",".join("'" + invoice["name"] + "'" for invoice in invoice_list)
if invoice_list:
inv_mop_detail = frappe.db.sql(
.format(
invoice_list_names=invoice_list_names
),
as_dict=1,
)
inv_change_amount = frappe.db.sql(
.format(
invoice_list_names=invoice_list_names
),
as_dict=1,
)
for d in inv_change_amount:
for det in inv_mop_detail:
if (
det["owner"] == d["owner"]
and det["posting_date"] == d["posting_date"]
and det["mode_of_payment"] == d["mode_of_payment"]
):
paid_amount = det["paid_amount"] - d["change_amount"]
det["paid_amount"] = paid_amount
for d in inv_mop_detail:
mode_of_payment_details.setdefault(d["owner"] + cstr(d["posting_date"]), []).append(
(d.mode_of_payment, d.paid_amount)
)
return mode_of_payment_details
|
|
78,013 | 265,164 | 207 | netbox/extras/models/configcontexts.py | 85 | 12 | def get_config_context(self):
data = {}
if not hasattr(self, 'config_context_data'):
# The annotation is not available, so we fall back to manually querying for the config context objects
config_context_data = ConfigContext.objects.get_for_object(self, aggregate_data=True)
else:
# The attribute may exist, but the annotated value could be None if there is no config context data
config_context_data = self.config_context_data or []
for context in config_context_data:
data = deepmerge(data, | Closes #9582: Enable assigning config contexts based on device location | get_config_context | 379880cd8431da6cc39753a8b3a7c8bfcd8f9cc1 | netbox | configcontexts.py | 11 | 11 | https://github.com/netbox-community/netbox.git | 5 | 73 | 0 | 59 | 122 | Python | {
"docstring": "\n Compile all config data, overwriting lower-weight values with higher-weight values where a collision occurs.\n Return the rendered configuration context for a device or VM.\n ",
"language": "en",
"n_whitespaces": 46,
"n_words": 24,
"vocab_size": 22
} | def get_config_context(self):
data = {}
if not hasattr(self, 'config_context_data'):
# The annotation is not available, so we fall back to manually querying for the config context objects
config_context_data = ConfigContext.objects.get_for_object(self, aggregate_data=True)
else:
# The attribute may exist, but the annotated value could be None if there is no config context data
config_context_data = self.config_context_data or []
for context in config_context_data:
data = deepmerge(data, context)
# If the object has local config context data defined, merge it last
if self.local_context_data:
data = deepmerge(data, self.local_context_data)
return data
|
|
72,899 | 249,408 | 159 | tests/rest/admin/test_server_notice.py | 32 | 14 | def test_displayname_is_set_avatar_is_none(self) -> None:
channel = self.make_request(
"POST",
self.url,
access_token=self.admin_user_tok,
content={
"user_id": self.other_user,
"content": {"msgtype": "m.text", "body": "test msg"},
},
)
self.assertEqual(200, channel.code, msg=channel.json_body)
# user has one inv | Fix that sending server notices fail if avatar is `None` (#13566)
Indroduced in #11846. | test_displayname_is_set_avatar_is_none | 37f329c9adf6ed02df15661850f999edd9e5fd93 | synapse | test_server_notice.py | 14 | 17 | https://github.com/matrix-org/synapse.git | 1 | 78 | 0 | 32 | 129 | Python | {
"docstring": "\n Tests that sending a server notices is successfully,\n if a display_name is set, avatar_url is `None` and\n \"check avatar size and mime type\" is set.\n ",
"language": "en",
"n_whitespaces": 54,
"n_words": 25,
"vocab_size": 20
} | def test_displayname_is_set_avatar_is_none(self) -> None:
channel = self.make_request(
"POST",
self.url,
access_token=self.admin_user_tok,
content={
"user_id": self.other_user,
"content": {"msgtype": "m.text", "body": "test msg"},
},
)
self.assertEqual(200, channel.code, msg=channel.json_body)
# user has one invite
self._check_invite_and_join_status(self.other_user, 1, 0)
|
|
29,712 | 132,250 | 57 | python/ray/tune/schedulers/hyperband.py | 14 | 9 | def cur_iter_done(self) -> bool:
return all(
self._get_result_time(result) > | [CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes. | cur_iter_done | 7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065 | ray | hyperband.py | 11 | 8 | https://github.com/ray-project/ray.git | 2 | 32 | 0 | 14 | 53 | Python | {
"docstring": "Checks if all iterations have completed.\n\n TODO(rliaw): also check that `t.iterations == self._r`",
"language": "en",
"n_whitespaces": 19,
"n_words": 13,
"vocab_size": 13
} | def cur_iter_done(self) -> bool:
return all(
self._get_result_time(result) >= self._cumul_r
for result in self._live_trials.values()
)
|
|
14,065 | 65,946 | 12 | erpnext/education/report/student_monthly_attendance_sheet/student_monthly_attendance_sheet.py | 18 | 9 | def get_attendance_years():
year_list = frappe.db.sql_list(
)
if not year_list:
year_list | style: format code with black | get_attendance_years | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | student_monthly_attendance_sheet.py | 12 | 7 | https://github.com/frappe/erpnext.git | 3 | 41 | 0 | 16 | 72 | Python | {
"docstring": "select distinct YEAR(date) from `tabStudent Attendance` ORDER BY YEAR(date) DESC",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 9
} | def get_attendance_years():
year_list = frappe.db.sql_list(
)
if not year_list:
year_list = [getdate().year]
return "\n".join(str(year) for year in year_list)
|
|
115,022 | 316,444 | 22 | tests/test_config_entries.py | 10 | 9 | async def test_discovery_notification(hass):
mock_integration(hass, MockModule("test"))
mock_entity_platform(hass, "config_flow.test", None)
with patch.dict(config_entries. | Search/replace RESULT_TYPE_* by FlowResultType enum (#74642) | test_discovery_notification | 7cd68381f1d4f58930ffd631dfbfc7159d459832 | core | test_config_entries.py | 10 | 28 | https://github.com/home-assistant/core.git | 1 | 223 | 0 | 10 | 61 | Python | {
"docstring": "Test that we create/dismiss a notification when source is discovery.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | async def test_discovery_notification(hass):
mock_integration(hass, MockModule("test"))
mock_entity_platform(hass, "config_flow.test", None)
with patch.dict(config_entries.HANDLERS):
|
|
13,468 | 63,674 | 22 | .venv/lib/python3.8/site-packages/pip/_vendor/resolvelib/providers.py | 8 | 7 | def get_preference(self, identifier, resolutions, candidates, information):
raise NotImplementedError
| upd; format | get_preference | f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | transferlearning | providers.py | 6 | 2 | https://github.com/jindongwang/transferlearning.git | 1 | 16 | 0 | 8 | 24 | Python | {
"docstring": "Produce a sort key for given requirement based on preference.\n\n The preference is defined as \"I think this requirement should be\n resolved first\". The lower the return value is, the more preferred\n this group of arguments is.\n\n :param identifier: An identifier as returned by ``identify()``. This\n identifies the dependency matches of which should be returned.\n :param resolutions: Mapping of candidates currently pinned by the\n resolver. Each key is an identifier, and the value a candidate.\n The candidate may conflict with requirements from ``information``.\n :param candidates: Mapping of each dependency's possible candidates.\n Each value is an iterator of candidates.\n :param information: Mapping of requirement information of each package.\n Each value is an iterator of *requirement information*.\n\n A *requirement information* instance is a named tuple with two members:\n\n * ``requirement`` specifies a requirement contributing to the current\n list of candidates.\n * ``parent`` specifies the candidate that provides (dependend on) the\n requirement, or ``None`` to indicate a root requirement.\n\n The preference could depend on a various of issues, including (not\n necessarily in this order):\n\n * Is this package pinned in the current resolution result?\n * How relaxed is the requirement? Stricter ones should probably be\n worked on first? (I don't know, actually.)\n * How many possibilities are there to satisfy this requirement? Those\n with few left should likely be worked on first, I guess?\n * Are there any known conflicts for this requirement? We should\n probably work on those with the most known conflicts.\n\n A sortable value should be returned (this will be used as the ``key``\n parameter of the built-in sorting function). The smaller the value is,\n the more preferred this requirement is (i.e. the sorting function\n is called with ``reverse=False``).\n ",
"language": "en",
"n_whitespaces": 526,
"n_words": 279,
"vocab_size": 160
} | def get_preference(self, identifier, resolutions, candidates, information):
raise NotImplementedError
|
|
3,155 | 19,946 | 517 | pipenv/patched/notpip/_internal/req/req_install.py | 72 | 23 | def prepare_metadata(self) -> None:
assert self.source_dir
details = self.name or f"from {self.link}"
if self.use_pep517:
assert self.pep517_backend is not None
if (
self.editable
and self.permit_editable_wheels
and self.supports_pyproject_editable()
):
self.metadata_directory = generate_editable_metadata(
build_env=self.build_env,
backend=self.pep517_backend,
details=details,
)
else:
self.metadata_directory = generate_metadata(
build_env=self.build_env,
backend=self.pep517_backend,
details=details,
)
else:
self.metadata_directory = generate_metadata_legacy(
build_env=self.build_env,
setup_py_path=self.setup_py_path,
source | check point progress on only bringing in pip==22.0.4 (#4966)
* vendor in pip==22.0.4
* updating vendor packaging version
* update pipdeptree to fix pipenv graph with new version of pip.
* Vendoring of pip-shims 0.7.0
* Vendoring of requirementslib 1.6.3
* Update pip index safety restrictions patch for pip==22.0.4
* Update patches
* exclude pyptoject.toml from black to see if that helps.
* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4 | prepare_metadata | f3166e673fe8d40277b804d35d77dcdb760fc3b3 | pipenv | req_install.py | 15 | 39 | https://github.com/pypa/pipenv.git | 7 | 157 | 0 | 50 | 252 | Python | {
"docstring": "Ensure that project metadata is available.\n\n Under PEP 517 and PEP 660, call the backend hook to prepare the metadata.\n Under legacy processing, call setup.py egg-info.\n ",
"language": "en",
"n_whitespaces": 47,
"n_words": 26,
"vocab_size": 22
} | def prepare_metadata(self) -> None:
assert self.source_dir
details = self.name or f"from {self.link}"
if self.use_pep517:
assert self.pep517_backend is not None
if (
self.editable
and self.permit_editable_wheels
and self.supports_pyproject_editable()
):
self.metadata_directory = generate_editable_metadata(
build_env=self.build_env,
backend=self.pep517_backend,
details=details,
)
else:
self.metadata_directory = generate_metadata(
build_env=self.build_env,
backend=self.pep517_backend,
details=details,
)
else:
self.metadata_directory = generate_metadata_legacy(
build_env=self.build_env,
setup_py_path=self.setup_py_path,
source_dir=self.unpacked_source_directory,
isolated=self.isolated,
details=details,
)
# Act on the newly generated metadata, based on the name and version.
if not self.name:
self._set_requirement()
else:
self.warn_on_mismatching_name()
self.assert_source_matches_version()
|
|
16,381 | 75,204 | 34 | wagtail/images/tests/test_blocks.py | 13 | 14 | def get_image_filename(self, image, filterspec):
name, ext = os.path.splitext(os.path.basename(i | Reformat with black | get_image_filename | d10f15e55806c6944827d801cd9c2d53f5da4186 | wagtail | test_blocks.py | 12 | 3 | https://github.com/wagtail/wagtail.git | 1 | 48 | 0 | 12 | 75 | Python | {
"docstring": "\n Get the generated filename for a resized image\n ",
"language": "en",
"n_whitespaces": 23,
"n_words": 8,
"vocab_size": 8
} | def get_image_filename(self, image, filterspec):
name, ext = os.path.splitext(os.path.basename(image.file.name))
return "{}images/{}.{}{}".format(settings.MEDIA_URL, name, filterspec, ext)
|
|
55,678 | 219,648 | 340 | python3.10.4/Lib/_pydecimal.py | 95 | 17 | def min(self, other, context=None):
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans | add python 3.10.4 for windows | min | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | _pydecimal.py | 13 | 21 | https://github.com/XX-net/XX-Net.git | 12 | 143 | 0 | 51 | 232 | Python | {
"docstring": "Returns the smaller value.\n\n Like min(self, other) except if one is not a number, returns\n NaN (and signals if one is sNaN). Also rounds.\n ",
"language": "en",
"n_whitespaces": 46,
"n_words": 24,
"vocab_size": 21
} | def min(self, other, context=None):
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans(other, context)
c = self._cmp(other)
if c == 0:
c = self.compare_total(other)
if c == -1:
ans = self
else:
ans = other
return ans._fix(context)
|
|
51,886 | 207,160 | 70 | tests/admin_filters/tests.py | 21 | 17 | def test_simplelistfilter_with_none_returning_lookups(self):
modeladmin = DecadeFilterBookAdminWithNoneReturningLookups(Book, site)
request = self.request_f | Refs #33476 -- Reformatted code with Black. | test_simplelistfilter_with_none_returning_lookups | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | tests.py | 9 | 7 | https://github.com/django/django.git | 1 | 64 | 0 | 17 | 105 | Python | {
"docstring": "\n A SimpleListFilter lookups method can return None but disables the\n filter completely.\n ",
"language": "en",
"n_whitespaces": 34,
"n_words": 12,
"vocab_size": 12
} | def test_simplelistfilter_with_none_returning_lookups(self):
modeladmin = DecadeFilterBookAdminWithNoneReturningLookups(Book, site)
request = self.request_factory.get("/", {})
request.user = self.alfred
changelist = modeladmin.get_changelist_instance(request)
filterspec = changelist.get_filters(request)[0]
self.assertEqual(len(filterspec), 0)
|
|
1,099 | 6,991 | 450 | ludwig/data/preprocessing.py | 157 | 32 | def precompute_fill_value(dataset_cols, feature, preprocessing_parameters, backend):
missing_value_strategy = preprocessing_parameters["missing_value_strategy"]
if missing_value_strategy == FILL_WITH_CONST:
return preprocessing_parameters["fill_value"]
elif missing_value_strategy == FILL_WITH_MODE:
return dataset_cols[feature[COLUMN]].value_counts().index[0]
elif missing_value_strategy == FILL_WITH_MEAN:
if feature[TYPE] != NUMBER:
raise ValueError(
f"Filling missing values with mean is supported "
f"only for number types, not for type {feature[TYPE]}.",
)
return backend.df_engine.compute(dataset_cols[feature[COLUMN]].mean())
elif missing_value_strategy == FILL_WITH_FALSE:
distinct_values = backend.df_engine.compute(
dataset_cols[feature[COLUMN]].drop_duplicates().dropna()
).values.tolist()
if len(distinct_values) > 2:
raise ValueError(
f"Missing value strategy `fill_with_false` "
f"for column {feature[COLUMN]} expects 2 distinct values, "
f"found: {len(distinct_values)} (ex: {distinct_values[:10]})"
)
# Determine the False label.
# Distinct values are sorted in reverse to mirror the selection of the default fallback_true_label (in
# binary_feature.get_feature_meta) for binary columns with unconventional boolean values, "human"/"bot".
for v in sorted(distinct_values, reverse=True):
fallback_true_label = preprocessing_parameters.get("fallback_true_label", "true")
if strings_utils.str2bool(v, fallback_true_label) is False:
return v
raise ValueError(
f"Unable to determine False value for column {feature[COLUMN]} with distinct values: {distin | Fixes NaN handling in boolean dtypes (#2058)
* reorganizes cast_columns and handle_missing_values
* fix failing tests with logs
* remove logs
* re-added deflaked test
* cleanup
* refactor to avoid calling handle missing values twice
* refactored build preprocessing and metadata to separate fns
* improve style with metadata
* preserves outputs as booleans for binary output feature
* remove extraneous casting
* fixes related to manual boolean casting
* leaving a note comment in read_xsv for prosperity
* updates wording
* cast changed from np fixed length str (modin) to object
* cleanup
* cleanup
* unit tests
* revert back to str type again
* add backwards compatible behavior in torchscript
* add comment in precompute_fill_value to remind devs of NaNs
* revert changes to test_class_imbalance_feature::test_imbalance_ray
* cleanup | precompute_fill_value | 3030fc2f7d414d54a9aaa0f7b47ccf8d4f54b12c | ludwig | preprocessing.py | 20 | 31 | https://github.com/ludwig-ai/ludwig.git | 9 | 188 | 0 | 105 | 351 | Python | {
"docstring": "Precomputes the fill value for a feature.\n\n NOTE: this is called before NaNs are removed from the dataset. Modifications here must handle NaNs gracefully.\n ",
"language": "en",
"n_whitespaces": 30,
"n_words": 24,
"vocab_size": 22
} | def precompute_fill_value(dataset_cols, feature, preprocessing_parameters, backend):
missing_value_strategy = preprocessing_parameters["missing_value_strategy"]
if missing_value_strategy == FILL_WITH_CONST:
return preprocessing_parameters["fill_value"]
elif missing_value_strategy == FILL_WITH_MODE:
return dataset_cols[feature[COLUMN]].value_counts().index[0]
elif missing_value_strategy == FILL_WITH_MEAN:
if feature[TYPE] != NUMBER:
raise ValueError(
f"Filling missing values with mean is supported "
f"only for number types, not for type {feature[TYPE]}.",
)
return backend.df_engine.compute(dataset_cols[feature[COLUMN]].mean())
elif missing_value_strategy == FILL_WITH_FALSE:
distinct_values = backend.df_engine.compute(
dataset_cols[feature[COLUMN]].drop_duplicates().dropna()
).values.tolist()
if len(distinct_values) > 2:
raise ValueError(
f"Missing value strategy `fill_with_false` "
f"for column {feature[COLUMN]} expects 2 distinct values, "
f"found: {len(distinct_values)} (ex: {distinct_values[:10]})"
)
# Determine the False label.
# Distinct values are sorted in reverse to mirror the selection of the default fallback_true_label (in
# binary_feature.get_feature_meta) for binary columns with unconventional boolean values, "human"/"bot".
for v in sorted(distinct_values, reverse=True):
fallback_true_label = preprocessing_parameters.get("fallback_true_label", "true")
if strings_utils.str2bool(v, fallback_true_label) is False:
return v
raise ValueError(
f"Unable to determine False value for column {feature[COLUMN]} with distinct values: {distinct_values}."
)
# Otherwise, we cannot precompute the fill value for this dataset
return None
|
|
6,129 | 33,645 | 284 | src/transformers/models/deformable_detr/modeling_deformable_detr.py | 84 | 36 | def loss_labels(self, outputs, targets, indices, num_boxes, log=True):
if "logits" not in outputs:
raise ValueError("No logits were found in the outputs")
source_logits = outputs["logits"]
idx = self._get_source_permutation_idx(indices)
target_classes_o = torch.cat([t["class_labels"][J] for t, (_, J) in zip(targets, indices)])
target_classes = torch.full(
source_logits.shape[:2], self.num_classes, dtype=torch.int64, device | Add Deformable DETR (#17281)
* First draft
* More improvements
* Improve model, add custom CUDA code
* Import torch before
* Add script that imports custom layer
* Add everything in new ops directory
* Import custom layer in modeling file
* Fix ARCHIVE_MAP typo
* Creating the custom kernel on the fly.
* Import custom layer in modeling file
* More improvements
* Fix CUDA loading
* More improvements
* Improve conversion script
* Improve conversion script
* Make it work until encoder_outputs
* Make forward pass work
* More improvements
* Make logits match original implementation
* Make implementation also support single_scale model
* Add support for single_scale and dilation checkpoint
* Add support for with_box_refine model
* Support also two stage model
* Improve tests
* Fix more tests
* Make more tests pass
* Upload all models to the hub
* Clean up some code
* Improve decoder outputs
* Rename intermediate hidden states and reference points
* Improve model outputs
* Move tests to dedicated folder
* Improve model outputs
* Fix retain_grad test
* Improve docs
* Clean up and make test_initialization pass
* Improve variable names
* Add copied from statements
* Improve docs
* Fix style
* Improve docs
* Improve docs, move tests to model folder
* Fix rebase
* Remove DetrForSegmentation from auto mapping
* Apply suggestions from code review
* Improve variable names and docstrings
* Apply some more suggestions from code review
* Apply suggestion from code review
* better docs and variables names
* hint to num_queries and two_stage confusion
* remove asserts and code refactor
* add exception if two_stage is True and with_box_refine is False
* use f-strings
* Improve docs and variable names
* Fix code quality
* Fix rebase
* Add require_torch_gpu decorator
* Add pip install ninja to CI jobs
* Apply suggestion of @sgugger
* Remove DeformableDetrForObjectDetection from auto mapping
* Remove DeformableDetrModel from auto mapping
* Add model to toctree
* Add model back to mappings, skip model in pipeline tests
* Apply @sgugger's suggestion
* Fix imports in the init
* Fix copies
* Add CPU implementation
* Comment out GPU function
* Undo previous change
* Apply more suggestions
* Remove require_torch_gpu annotator
* Fix quality
* Add logger.info
* Fix logger
* Fix variable names
* Fix initializaztion
* Add missing initialization
* Update checkpoint name
* Add model to doc tests
* Add CPU/GPU equivalence test
* Add Deformable DETR to pipeline tests
* Skip model for object detection pipeline
Co-authored-by: Nicolas Patry <[email protected]>
Co-authored-by: Nouamane Tazi <[email protected]>
Co-authored-by: Sylvain Gugger <[email protected]> | loss_labels | 59407bbeb31fff8340938768051c9daabd38d7a7 | transformers | modeling_deformable_detr.py | 12 | 24 | https://github.com/huggingface/transformers.git | 3 | 226 | 0 | 68 | 337 | Python | {
"docstring": "Classification loss (NLL)\n targets dicts must contain the key \"labels\" containing a tensor of dim [nb_target_boxes]\n ",
"language": "en",
"n_whitespaces": 30,
"n_words": 16,
"vocab_size": 16
} | def loss_labels(self, outputs, targets, indices, num_boxes, log=True):
if "logits" not in outputs:
raise ValueError("No logits were found in the outputs")
source_logits = outputs["logits"]
idx = self._get_source_permutation_idx(indices)
target_classes_o = torch.cat([t["class_labels"][J] for t, (_, J) in zip(targets, indices)])
target_classes = torch.full(
source_logits.shape[:2], self.num_classes, dtype=torch.int64, device=source_logits.device
)
target_classes[idx] = target_classes_o
target_classes_onehot = torch.zeros(
[source_logits.shape[0], source_logits.shape[1], source_logits.shape[2] + 1],
dtype=source_logits.dtype,
layout=source_logits.layout,
device=source_logits.device,
)
target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1)
target_classes_onehot = target_classes_onehot[:, :, :-1]
loss_ce = (
sigmoid_focal_loss(source_logits, target_classes_onehot, num_boxes, alpha=self.focal_alpha, gamma=2)
* source_logits.shape[1]
)
losses = {"loss_ce": loss_ce}
return losses
|
|
76,567 | 260,920 | 129 | sklearn/ensemble/tests/test_stacking.py | 62 | 29 | def test_stacking_classifier_multilabel_predict_proba(estimator):
X_train, X_test, y_train, y_test = train_test_split(
X_multilabel, y_multilabel, stratify=y_multilabel, random_state=42
)
n_outputs = 3
estimators = [("est", estimator)]
stacker = StackingClassifier(
estimators=estimators,
final_estimator=KNeighborsClassifier(),
stack_method="predict_proba",
).fit(X_train, y_train)
X_trans = stacker.transform(X_test)
assert X_trans.shape == (X_test.shape[0], n_outputs)
# we should not have any collinear classes and thus nothing should sum to 1
assert not any(np.isclose(X_trans.sum(axis=1), 1.0))
y_pred = stac | EHN Add multilabel classification support for `StackingClassifier` (#24146)
* Add stacking multilabel functionality
* Add underscore to a class attr
* Remove model from base estimator in test_stacking
* Remove scale in train/test split in test_stacking_classifier_multilabel
* Add stack_method as a test parameter, change RandomForestClassifier to KNeighborsClassifier in test
* Update Changelog
* fix doc typos
* predict_proba output will be concatenate this list in an array of shape n_samples, n_outputs * n_classes - 1. Update test.
* Update sklearn/ensemble/_stacking.py
Co-authored-by: Guillaume Lemaitre <[email protected]>
* Update doc/whats_new/v1.0.rst
Co-authored-by: Guillaume Lemaitre <[email protected]>
* update whats_new
* add passthrough test
* update whats_new with current PR
* Apply suggestions from code review
Co-authored-by: Julien Jerphanion <[email protected]>
* update tests
* Apply suggestion to update comments on `concatenate`
Co-authored-by: Julien Jerphanion <[email protected]>
* parametrized the two tests into one
* parametrized the two tests into one
* strip the mysterious trailing _r
* fix multilabel list scenario
* add Guillaume's recommendations
* add test for
* some fix
* split tests
* fix flake8
* add suggestions
* Trigger CI
* remove multiclass-multioutput from comments and docstrings
Co-authored-by: Nicolas <[email protected]>
Co-authored-by: Nestor Navarro <[email protected]>
Co-authored-by: Nestor Navarro <[email protected]>
Co-authored-by: Guillaume Lemaitre <[email protected]>
Co-authored-by: Julien Jerphanion <[email protected]> | test_stacking_classifier_multilabel_predict_proba | c18460f78441f11b3e6c15c12238695fcfe3c872 | scikit-learn | test_stacking.py | 13 | 16 | https://github.com/scikit-learn/scikit-learn.git | 1 | 127 | 0 | 52 | 193 | Python | {
"docstring": "Check the behaviour for the multilabel classification case and the\n `predict_proba` stacking method.\n\n Estimators are not consistent with the output arrays and we need to ensure that\n we handle all cases.\n ",
"language": "en",
"n_whitespaces": 43,
"n_words": 31,
"vocab_size": 26
} | def test_stacking_classifier_multilabel_predict_proba(estimator):
X_train, X_test, y_train, y_test = train_test_split(
X_multilabel, y_multilabel, stratify=y_multilabel, random_state=42
)
n_outputs = 3
estimators = [("est", estimator)]
stacker = StackingClassifier(
estimators=estimators,
final_estimator=KNeighborsClassifier(),
stack_method="predict_proba",
).fit(X_train, y_train)
X_trans = stacker.transform(X_test)
assert X_trans.shape == (X_test.shape[0], n_outputs)
# we should not have any collinear classes and thus nothing should sum to 1
assert not any(np.isclose(X_trans.sum(axis=1), 1.0))
y_pred = stacker.predict(X_test)
assert y_pred.shape == y_test.shape
|
|
12,842 | 62,033 | 1,086 | .venv/lib/python3.8/site-packages/pip/_vendor/distlib/locators.py | 199 | 57 | def get_page(self, url):
# http: | upd; format | get_page | f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | transferlearning | locators.py | 22 | 49 | https://github.com/jindongwang/transferlearning.git | 14 | 362 | 0 | 111 | 624 | Python | {
"docstring": "\n Get the HTML for an URL, possibly from an in-memory cache.\n\n XXX TODO Note: this cache is never actually cleared. It's assumed that\n the data won't get stale over the lifetime of a locator instance (not\n necessarily true for the default_locator).\n ",
"language": "en",
"n_whitespaces": 77,
"n_words": 41,
"vocab_size": 36
} | def get_page(self, url):
# http://peak.telecommunity.com/DevCenter/EasyInstall#package-index-api
scheme, netloc, path, _, _, _ = urlparse(url)
if scheme == 'file' and os.path.isdir(url2pathname(path)):
url = urljoin(ensure_slash(url), 'index.html')
if url in self._page_cache:
result = self._page_cache[url]
logger.debug('Returning %s from cache: %s', url, result)
else:
host = netloc.split(':', 1)[0]
result = None
if host in self._bad_hosts:
logger.debug('Skipping %s due to bad host %s', url, host)
else:
req = Request(url, headers={'Accept-encoding': 'identity'})
try:
logger.debug('Fetching %s', url)
resp = self.opener.open(req, timeout=self.timeout)
logger.debug('Fetched %s', url)
headers = resp.info()
content_type = headers.get('Content-Type', '')
if HTML_CONTENT_TYPE.match(content_type):
final_url = resp.geturl()
data = resp.read()
encoding = headers.get('Content-Encoding')
if encoding:
decoder = self.decoders[encoding] # fail if not found
data = decoder(data)
encoding = 'utf-8'
m = CHARSET.search(content_type)
if m:
encoding = m.group(1)
try:
data = data.decode(encoding)
except UnicodeError: # pragma: no cover
data = data.decode('latin-1') # fallback
result = Page(data, final_url)
self._page_cache[final_url] = result
except HTTPError as e:
if e.code != 404:
logger.exception('Fetch failed: %s: %s', url, e)
except URLError as e: # pragma: no cover
logger.exception('Fetch failed: %s: %s', url, e)
with self._lock:
self._bad_hosts.add(host)
except Exception as e: # pragma: no cover
logger.exception('Fetch failed: %s: %s', url, e)
finally:
self._page_cache[url] = result # even if None (failure)
return result
_distname_re = re.compile('<a href=[^>]*>([^<]+)<')
|
|
3,429 | 20,567 | 15 | pipenv/patched/notpip/_vendor/pyparsing/core.py | 11 | 3 | def enable_all_warnings() -> None:
__diag__.enable_all_warnings()
# hide abstract class
del | check point progress on only bringing in pip==22.0.4 (#4966)
* vendor in pip==22.0.4
* updating vendor packaging version
* update pipdeptree to fix pipenv graph with new version of pip.
* Vendoring of pip-shims 0.7.0
* Vendoring of requirementslib 1.6.3
* Update pip index safety restrictions patch for pip==22.0.4
* Update patches
* exclude pyptoject.toml from black to see if that helps.
* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4 | enable_all_warnings | f3166e673fe8d40277b804d35d77dcdb760fc3b3 | pipenv | core.py | 7 | 5 | https://github.com/pypa/pipenv.git | 1 | 12 | 0 | 11 | 28 | Python | {
"docstring": "\n Enable all global pyparsing diagnostic warnings (see :class:`Diagnostics`).\n ",
"language": "en",
"n_whitespaces": 15,
"n_words": 8,
"vocab_size": 8
} | def enable_all_warnings() -> None:
__diag__.enable_all_warnings()
# hide abstract class
del __config_flags
|
|
77,672 | 264,300 | 32 | netbox/netbox/views/generic/bulk_views.py | 11 | 7 | def export_yaml(self):
yaml_da | Refactor generic views; add plugins dev documentation | export_yaml | 54834c47f8870e7faabcd847c3270da0bd3d2884 | netbox | bulk_views.py | 9 | 3 | https://github.com/netbox-community/netbox.git | 2 | 28 | 0 | 11 | 50 | Python | {
"docstring": "\n Export the queryset of objects as concatenated YAML documents.\n ",
"language": "en",
"n_whitespaces": 24,
"n_words": 9,
"vocab_size": 9
} | def export_yaml(self):
yaml_data = [obj.to_yaml() for obj in self.queryset]
return '---\n'.join(yaml_data)
|
|
51,643 | 206,700 | 150 | django/utils/http.py | 79 | 10 | def url_has_allowed_host_and_scheme(url, allowed_hosts, require_https=False):
if url is not None:
url = url.strip()
if not url:
return False
if allowed_hosts is None:
allowed_hosts = set()
elif isinstance(allowed_hosts, str):
allowed_hosts = {allowed_hosts}
# Chrome treats \ completely as / in paths but it could be part of some
# basic auth credentials so we need to check both URLs.
return _url_has_allowed_host_and_scheme(
url, allowed_hosts, require_https=require_https
) and _url_has_allowed_host_and_sche | Refs #33476 -- Reformatted code with Black. | url_has_allowed_host_and_scheme | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | http.py | 11 | 14 | https://github.com/django/django.git | 6 | 83 | 0 | 60 | 136 | Python | {
"docstring": "\n Return ``True`` if the url uses an allowed host and a safe scheme.\n\n Always return ``False`` on an empty url.\n\n If ``require_https`` is ``True``, only 'https' will be considered a valid\n scheme, as opposed to 'http' and 'https' with the default, ``False``.\n\n Note: \"True\" doesn't entail that a URL is \"safe\". It may still be e.g.\n quoted incorrectly. Ensure to also use django.utils.encoding.iri_to_uri()\n on the path component of untrusted URLs.\n ",
"language": "en",
"n_whitespaces": 95,
"n_words": 70,
"vocab_size": 59
} | def url_has_allowed_host_and_scheme(url, allowed_hosts, require_https=False):
if url is not None:
url = url.strip()
if not url:
return False
if allowed_hosts is None:
allowed_hosts = set()
elif isinstance(allowed_hosts, str):
allowed_hosts = {allowed_hosts}
# Chrome treats \ completely as / in paths but it could be part of some
# basic auth credentials so we need to check both URLs.
return _url_has_allowed_host_and_scheme(
url, allowed_hosts, require_https=require_https
) and _url_has_allowed_host_and_scheme(
url.replace("\\", "/"), allowed_hosts, require_https=require_https
)
# Copied from urllib.parse.urlparse() but uses fixed urlsplit() function. |
|
78,455 | 266,523 | 161 | lib/ansible/module_utils/service.py | 60 | 15 | def get_ps(module, pattern):
found = False
if platform.system() == 'SunOS':
flags = '-ef'
else:
flags = 'auxww'
psbin = module.get_bin_path('ps', True)
(rc, psout, pserr) = module.run_comm | Misc typo fixes in module_utils (#76564) | get_ps | fee90b15a25b588bfb8a9ff047e851d43e78511f | ansible | service.py | 12 | 14 | https://github.com/ansible/ansible.git | 5 | 81 | 0 | 47 | 138 | Python | {
"docstring": "\n Last resort to find a service by trying to match pattern to programs in memory\n ",
"language": "en",
"n_whitespaces": 22,
"n_words": 15,
"vocab_size": 13
} | def get_ps(module, pattern):
found = False
if platform.system() == 'SunOS':
flags = '-ef'
else:
flags = 'auxww'
psbin = module.get_bin_path('ps', True)
(rc, psout, pserr) = module.run_command([psbin, flags])
if rc == 0:
for line in psout.splitlines():
if pattern in line:
# FIXME: should add logic to prevent matching 'self', though that should be extremely rare
found = True
break
return found
|
|
@register_op | 53,060 | 211,300 | 161 | ppdet/data/transform/operators.py | 47 | 17 | def apply(self, sample, context=None):
| [dev] add ppyoloe_plus configs and alter NormalizeImage (#6675)
* [dev] add ppyoloe_plus configs and alter NormalizeImage
* alter other NormalizeImage
* alter cpp NormalizeImage | apply | 34d7832946145006083b602d5d090f7f104e661e | PaddleDetection | operators.py | 12 | 13 | https://github.com/PaddlePaddle/PaddleDetection.git | 3 | 114 | 1 | 30 | 176 | Python | {
"docstring": "Normalize the image.\n Operators:\n 1.(optional) Scale the pixel to [0,1]\n 2.(optional) Each pixel minus mean and is divided by std\n ",
"language": "en",
"n_whitespaces": 56,
"n_words": 20,
"vocab_size": 18
} | def apply(self, sample, context=None):
im = sample['image']
im = im.astype(np.float32, copy=False)
if self.is_scale:
scale = 1.0 / 255.0
im *= scale
if self.norm_type == 'mean_std':
mean = np.array(self.mean)[np.newaxis, np.newaxis, :]
std = np.array(self.std)[np.newaxis, np.newaxis, :]
im -= mean
im /= std
sample['image'] = im
return sample
@register_op |
51,119 | 205,412 | 460 | django/db/models/base.py | 88 | 21 | def _check_m2m_through_same_relationship(cls):
errors = []
seen_intermediary_signatures = []
fields = cls._meta.local_many_to_many
# Skip when the target model wasn't found.
fields = (f for f in fields if isinstance(f.remote_field.model, Mod | Refs #33476 -- Reformatted code with Black. | _check_m2m_through_same_relationship | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | base.py | 18 | 26 | https://github.com/django/django.git | 7 | 136 | 0 | 53 | 215 | Python | {
"docstring": "Check if no relationship model is used by more than one m2m field.",
"language": "en",
"n_whitespaces": 12,
"n_words": 13,
"vocab_size": 13
} | def _check_m2m_through_same_relationship(cls):
errors = []
seen_intermediary_signatures = []
fields = cls._meta.local_many_to_many
# Skip when the target model wasn't found.
fields = (f for f in fields if isinstance(f.remote_field.model, ModelBase))
# Skip when the relationship model wasn't found.
fields = (f for f in fields if isinstance(f.remote_field.through, ModelBase))
for f in fields:
signature = (
f.remote_field.model,
cls,
f.remote_field.through,
f.remote_field.through_fields,
)
if signature in seen_intermediary_signatures:
errors.append(
checks.Error(
"The model has two identical many-to-many relations "
"through the intermediate model '%s'."
% f.remote_field.through._meta.label,
obj=cls,
id="models.E003",
)
)
else:
seen_intermediary_signatures.append(signature)
return errors
|
|
20,765 | 101,350 | 32 | plugins/extract/pipeline.py | 11 | 3 | def image(self) -> "np.ndarray":
assert self. | Bugfix: convert - Gif Writer
- Fix non-launch error on Gif Writer
- convert plugins - linting
- convert/fs_media/preview/queue_manager - typing
- Change convert items from dict to Dataclass | image | 1022651eb8a7741014f5d2ec7cbfe882120dfa5f | faceswap | pipeline.py | 7 | 4 | https://github.com/deepfakes/faceswap.git | 1 | 19 | 0 | 10 | 34 | Python | {
"docstring": " :class:`numpy.ndarray`: The source frame for this object. ",
"language": "en",
"n_whitespaces": 8,
"n_words": 7,
"vocab_size": 7
} | def image(self) -> "np.ndarray":
assert self._image is not None
return self._image
|
|
18,055 | 85,880 | 512 | tests/sentry/notifications/test_notifications.py | 113 | 36 | def test_sends_deployment_notification(self, record_analytics):
release = self.create_release()
version_parsed = self.version_parsed = parse_release(release.version)["description"]
url | chore(notification): Pass User ID into notification analytics (#38924)
We pass in the actor_id to notification analytics events but we should
also include a user_id if the recipient is a user | test_sends_deployment_notification | afbf9a3334ce9cad1a62fced372d7fcee40a3133 | sentry | test_notifications.py | 15 | 42 | https://github.com/getsentry/sentry.git | 1 | 211 | 0 | 67 | 433 | Python | {
"docstring": "\n Test that an email AND Slack notification are sent with\n the expected values when a release is deployed.\n ",
"language": "en",
"n_whitespaces": 40,
"n_words": 18,
"vocab_size": 18
} | def test_sends_deployment_notification(self, record_analytics):
release = self.create_release()
version_parsed = self.version_parsed = parse_release(release.version)["description"]
url = f"/api/0/organizations/{self.organization.slug}/releases/{release.version}/deploys/"
with self.tasks():
response = self.client.post(
url, format="json", data={"environment": self.environment.name}
)
assert response.status_code == 201, response.content
msg = mail.outbox[0]
# check the txt version
assert f"Version {version_parsed} was deployed to {self.environment.name} on" in msg.body
# check the html version
assert (
f"Version {version_parsed} was deployed to {self.environment.name}\n </h2>\n"
in msg.alternatives[0][0]
)
attachment, text = get_attachment()
assert (
text
== f"Release {version_parsed} was deployed to {self.environment.name} for this project"
)
assert (
attachment["actions"][0]["url"]
== f"http://testserver/organizations/{self.organization.slug}/releases/{release.version}/?project={self.project.id}&unselectedSeries=Healthy/"
)
assert (
attachment["footer"]
== f"{self.project.slug} | <http://testserver/settings/account/notifications/deploy/?referrer=release_activity-slack-user|Notification Settings>"
)
assert analytics_called_with_args(
record_analytics,
"integrations.email.notification_sent",
user_id=self.user.id,
actor_id=self.user.actor_id,
organization_id=self.organization.id,
)
assert analytics_called_with_args(
record_analytics,
"integrations.slack.notification_sent",
user_id=self.user.id,
actor_id=self.user.actor_id,
organization_id=self.organization.id,
)
|
|
13,781 | 65,049 | 3 | erpnext/accounts/doctype/sales_invoice/sales_invoice.py | 9 | 7 | def get_all_mode_of_payments(doc):
return frappe.db.sql(
,
{"compa | style: format code with black | get_all_mode_of_payments | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | sales_invoice.py | 10 | 9 | https://github.com/frappe/erpnext.git | 1 | 27 | 0 | 9 | 44 | Python | {
"docstring": "\n\t\tselect mpa.default_account, mpa.parent, mp.type as type\n\t\tfrom `tabMode of Payment Account` mpa,`tabMode of Payment` mp\n\t\twhere mpa.parent = mp.name and mpa.company = %(company)s and mp.enabled = 1",
"language": "en",
"n_whitespaces": 24,
"n_words": 27,
"vocab_size": 23
} | def get_all_mode_of_payments(doc):
return frappe.db.sql(
,
{"company": doc.company},
as_dict=1,
)
|
|
23,728 | 109,744 | 615 | lib/mpl_toolkits/mplot3d/axes3d.py | 203 | 44 | def _on_move(self, event):
if not self.button_pressed:
return
if self.get_navigate_mode() is not None:
# we don't want to rotate if we are zooming/panning
# from the toolbar
return
if self.M is None:
return
x, y = event.xdata, event.ydata
# In case the mouse is out of bounds.
if x is None or event.inaxes != self:
return
dx, dy = x - self._sx, y - self._sy
w = self._pseudo_w
h = self._pseudo_h
# Rotation
if self.button_pressed in self._rotate_btn:
# rotate viewing point
# get the x and y pixel coords
if dx == 0 and dy == 0:
return
roll = np.deg2rad(self.roll)
delev = -(dy/h)*180*np.cos(roll) + (dx/w)*180*np.sin(roll)
dazim = -(d | Add pan and zoom toolbar handling to 3D Axes (Replaces PR#22614) (#23449)
* ENH: Add pan and zoom toolbar handling to 3D Axes
1) This moves the pan logic that was already in the mouse move handler
into the "drag_pan" method to make it available from the toolbar.
2) This expands upon the panning logic to enable a zoom-to-box feature.
The zoom-to-box is done relative to the Axes, so it shrinks/expands
the box as a fraction of each delta, from lower-left Axes to lower-left
zoom-box. Thus, it tries to handle non-centered zooms, which adds more
cases to handle versus the current right-click zoom only scaling from
the center of the projection.
* Rewrite zooming with bounding box
* Rewrite 3d panning to work with a roll angle
* Whats new for zoom and pan buttons
* Make pan button configurable
* Do not jump when zooming and mouse goes over other subplot
* Rework zooming for 3d plots
* Handle x/y lock when zooming and panning
* Update tests
* Docstrings
* Dont assume a scale_z
* Limit zoom box
* Test zoom pan key modifiers
* Save some calculation by saving view axes
* Deprecation warnings for Axes3D.eye, .vvec
* Remove Axes3D._prepare_view_from_bbox for now
* Comments and docstrings
* Switch from uvn to uvw
* Save aspect to axes
* Constrain zooming with mouse when one of the equal aspect ratios is set
* Cleanup
* Cleanup
* Consolidate finding equal aspect axis indices
* linting
* More intuitive scaling
* Box zoom keeps existing aspect ratios
* Linting
* Code review comments
* Revert parameters for view_transformation
* Fix new 3d pan/zoom view going on view stack twice
* Better clipping
* Test 3d toolbar navigation
* Privatize helper functions
* Deprecations
* Code review changes
* Deprecation note
* Undeprecate proj3d.view_transformation
* Undeprecate proj3d.view_transformation
* Update doc/api/next_api_changes/deprecations/23449-SS.rst
Co-authored-by: Greg Lucas <[email protected]>
Co-authored-by: Scott Shambaugh <[email protected]>
Co-authored-by: Oscar Gustafsson <[email protected]> | _on_move | 4896ec1a2cfb8c454e385632d8df213c915ced52 | matplotlib | axes3d.py | 14 | 32 | https://github.com/matplotlib/matplotlib.git | 11 | 306 | 0 | 125 | 490 | Python | {
"docstring": "\n Mouse moving.\n\n By default, button-1 rotates, button-2 pans, and button-3 zooms;\n these buttons can be modified via `mouse_init`.\n ",
"language": "en",
"n_whitespaces": 47,
"n_words": 18,
"vocab_size": 18
} | def _on_move(self, event):
if not self.button_pressed:
return
if self.get_navigate_mode() is not None:
# we don't want to rotate if we are zooming/panning
# from the toolbar
return
if self.M is None:
return
x, y = event.xdata, event.ydata
# In case the mouse is out of bounds.
if x is None or event.inaxes != self:
return
dx, dy = x - self._sx, y - self._sy
w = self._pseudo_w
h = self._pseudo_h
# Rotation
if self.button_pressed in self._rotate_btn:
# rotate viewing point
# get the x and y pixel coords
if dx == 0 and dy == 0:
return
roll = np.deg2rad(self.roll)
delev = -(dy/h)*180*np.cos(roll) + (dx/w)*180*np.sin(roll)
dazim = -(dy/h)*180*np.sin(roll) - (dx/w)*180*np.cos(roll)
self.elev = self.elev + delev
self.azim = self.azim + dazim
self.stale = True
elif self.button_pressed in self._pan_btn:
# Start the pan event with pixel coordinates
px, py = self.transData.transform([self._sx, self._sy])
self.start_pan(px, py, 2)
# pan view (takes pixel coordinate input)
self.drag_pan(2, None, event.x, event.y)
self.end_pan()
# Zoom
elif self.button_pressed in self._zoom_btn:
# zoom view (dragging down zooms in)
scale = h/(h - dy)
self._scale_axis_limits(scale, scale, scale)
# Store the event coordinates for the next time through.
self._sx, self._sy = x, y
# Always request a draw update at the end of interaction
self.figure.canvas.draw_idle()
|
|
53,793 | 215,075 | 998 | salt/modules/aixpkg.py | 248 | 46 | def install(name=None, refresh=False, pkgs=None, version=None, test=False, **kwargs):
targets = salt.utils.args.split_input(pkgs) if pkgs else [name]
if not targets:
return {}
if pkgs:
log.debug("Removing these fileset(s)/rpm package(s) %s: %s", name, targets)
# Get a list of the currently installed pkgs.
old = list_pkgs()
# Install the fileset (normally ends with bff or rte) or rpm package(s)
errors = []
for target in targets:
filename = os.path.basename(target)
if filename.endswith(".bff") or filename.endswith(".rte"):
if _is_installed(target):
continue
cmd = "/usr/sbin/installp -acYXg"
if test:
cmd += "p"
cmd += " -d "
dirpath = os.path.dirname(target)
cmd += dirpath + " " + filename
out = __salt__["cmd.run_all"](cmd, python_shell=False)
else:
if _is_installed_rpm(filename.split(".aix")[0]):
continue
# assume use dnf or yum
cmdflags = " install --allowerasing "
if pathlib.Path("/opt/freeware/bin/dnf").is_file():
cmdexe = "/opt/freeware/bin/dnf"
if test:
cmdflags += " --assumeno"
else:
cmdflags += " --assumeyes"
| work in progress while resolve issue of python3_32 usage by dnf and yum | install | fbcc707e76f11770712e6828155258ac61e00ff8 | salt | aixpkg.py | 18 | 66 | https://github.com/saltstack/salt.git | 22 | 371 | 0 | 129 | 663 | Python | {
"docstring": "\n Install the named fileset(s)/rpm package(s).\n\n .. versionadded:: 3005\n\n preference to install rpm packages are to use in the following order:\n /opt/freeware/bin/dnf\n /opt/freeware/bin/yum\n /usr/bin/yum\n /usr/bin/rpm\n\n Note: use of rpm to install implies that rpm's dependencies must have been previously installed.\n dnf and yum automatically install rpm's dependencies as part of the install process\n\n name\n The name of the fileset or rpm package to be installed.\n\n refresh\n Whether or not to update the yum database before executing.\n\n\n Multiple Package Installation Options:\n\n pkgs\n A list of filesets and/or rpm packages to install.\n Must be passed as a python list. The ``name`` parameter will be\n ignored if this option is passed.\n\n version\n Install a specific version of a fileset/rpm package.\n (Unused at present).\n\n test\n Verify that command functions correctly:\n\n Returns a dict containing the new fileset(s)/rpm package(s) names and versions:\n\n {'<package>': {'old': '<old-version>',\n 'new': '<new-version>'}}\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' pkg.install /stage/middleware/AIX/bash-4.2-3.aix6.1.ppc.rpm\n salt '*' pkg.install /stage/middleware/AIX/bash-4.2-3.aix6.1.ppc.rpm refresh=True\n salt '*' pkg.install /stage/middleware/AIX/VIOS2211_update/tpc_4.1.1.85.bff\n salt '*' pkg.install /stage/middleware/AIX/Xlc/usr/sys/inst.images/xlC.rte\n salt '*' pkg.install /stage/middleware/AIX/Firefox/ppc-AIX53/Firefox.base\n salt '*' pkg.install pkgs='[\"foo\", \"bar\"]'\n ",
"language": "en",
"n_whitespaces": 405,
"n_words": 172,
"vocab_size": 115
} | def install(name=None, refresh=False, pkgs=None, version=None, test=False, **kwargs):
targets = salt.utils.args.split_input(pkgs) if pkgs else [name]
if not targets:
return {}
if pkgs:
log.debug("Removing these fileset(s)/rpm package(s) %s: %s", name, targets)
# Get a list of the currently installed pkgs.
old = list_pkgs()
# Install the fileset (normally ends with bff or rte) or rpm package(s)
errors = []
for target in targets:
filename = os.path.basename(target)
if filename.endswith(".bff") or filename.endswith(".rte"):
if _is_installed(target):
continue
cmd = "/usr/sbin/installp -acYXg"
if test:
cmd += "p"
cmd += " -d "
dirpath = os.path.dirname(target)
cmd += dirpath + " " + filename
out = __salt__["cmd.run_all"](cmd, python_shell=False)
else:
if _is_installed_rpm(filename.split(".aix")[0]):
continue
# assume use dnf or yum
cmdflags = " install --allowerasing "
if pathlib.Path("/opt/freeware/bin/dnf").is_file():
cmdexe = "/opt/freeware/bin/dnf"
if test:
cmdflags += " --assumeno"
else:
cmdflags += " --assumeyes"
if refresh:
cmdflags += " --refresh"
elif pathlib.Path("/opt/freeware/bin/yum").is_file():
cmdexe = "/opt/freeware/bin/yum"
if test:
cmdflags += " --assumeno"
else:
cmdflags += " --assumeyes"
if refresh:
cmdflags += " --refresh"
elif pathlib.Path("/usr/bin/yum").is_file():
cmdexe = "/usr/bin/yum"
if test:
cmdflags += " --assumeno"
else:
cmdflags += " --assumeyes"
else:
cmdexe = "/usr/bin/rpm"
cmdflags = " -Uivh "
if test:
cmdflags += " --test"
cmd = [cmdexe, cmdflags, target]
out = __salt__["cmd.run_all"](cmd, python_shell=False)
if 0 != out["retcode"]:
errors.append(out["stderr"])
# Get a list of the packages after the uninstall
__context__.pop("pkg.list_pkgs", None)
new = list_pkgs()
ret = salt.utils.data.compare_dicts(old, new)
if errors:
raise CommandExecutionError(
"Problems encountered installing filesets(s)/package(s)",
info={"changes": ret, "errors": errors},
)
# No error occurred
if test:
return "Test succeeded."
return ret
|
|
121,108 | 337,705 | 36 | src/accelerate/utils/deepspeed.py | 15 | 6 | def is_false(self, ds_key_long):
value = self.get_value(ds_key_long)
return False if value is None else not bool(value)
| Migrate HFDeepSpeedConfig from trfrs to accelerate (#432)
* Migrate HFDeepSpeedConfig from trfrs to accelerate
* update state.py to resolve comments
1. Adds static method to have a simple API for integrating deepspeed config in transformers trainer.
* reverting changes and addressing comments
* Marking DepSpeed and FSDP as experimental in accelerate | is_false | 873dcc63a461558152eec20af991482204e8248f | accelerate | deepspeed.py | 9 | 3 | https://github.com/huggingface/accelerate.git | 2 | 28 | 0 | 14 | 46 | Python | {
"docstring": "\n Returns `True`/``False` only if the value is set, always `False` otherwise. So use this method to ask the very\n specific question of whether the value is set to `False` (and it's not set to `True`` or isn't set).\n ",
"language": "en",
"n_whitespaces": 60,
"n_words": 38,
"vocab_size": 30
} | def is_false(self, ds_key_long):
value = self.get_value(ds_key_long)
return False if value is None else not bool(value)
|
|
1,042 | 6,647 | 253 | scripts/extract_schema.py | 62 | 30 | def extract_pytorch_structures():
for opt in lmo.optimizer_registry:
# Get the torch class:
optimizer_class = lmo.optimizer_registry[opt][0]
# Parse and clean the class structure:
path = get_fully_qualified_class_name(optimizer_class)
opt_struct = get_pytkdocs_structure_for_path(path, "google")["objects"][0]
prune_pytorch_structures(opt_struct)
# Write it to a file:
| fix: Naming scheme cleanup that includes: renaming `ludwig.marshmallow` module to `ludwig.validation` to avoid implicit import errors, and moving `ludwig.utils.schema` into this new module. (#1936)
* Rename marshmallow/ folder to marshmallow_schema_utils/, marshmallow_schema_utils.py to utils.py (under folder), update all refs.
* Rename marshmallow/ folder to marshmallow_schema_utils/, marshmallow_schema_utils.py to utils.py (under folder), update all refs.
* update extract_schema
* update generated files.
* update manifest
* rename using validation/schema_utils naming
* update generated files
* new naming scheme
* fix imports.
* rerun extract_schema | extract_pytorch_structures | a95f611d582a724740af772ead1fa439b3713124 | ludwig | extract_schema.py | 14 | 18 | https://github.com/ludwig-ai/ludwig.git | 2 | 136 | 0 | 55 | 229 | Python | {
"docstring": "Extracts and saves the parsed structure of all pytorch classes referenced in\n `ludwig.modules.optimization_modules.optimizer_registry` as JSON files under\n `ludwig/validation/generated/torch/`.",
"language": "en",
"n_whitespaces": 23,
"n_words": 18,
"vocab_size": 18
} | def extract_pytorch_structures():
for opt in lmo.optimizer_registry:
# Get the torch class:
optimizer_class = lmo.optimizer_registry[opt][0]
# Parse and clean the class structure:
path = get_fully_qualified_class_name(optimizer_class)
opt_struct = get_pytkdocs_structure_for_path(path, "google")["objects"][0]
prune_pytorch_structures(opt_struct)
# Write it to a file:
parent_dir = str(Path(__file__).parent.parent)
filename = os.path.join(parent_dir, "ludwig/validation/generated/torch/", optimizer_class.__name__) + ".json"
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename, "w") as outfile:
json.dump(
opt_struct,
outfile,
indent=4,
sort_keys=True,
separators=(",", ": "),
)
outfile.write("\n")
|
|
25,830 | 116,777 | 53 | tests/unit/test_ml_handlers.py | 17 | 7 | def test_hf_classification_bin(self, mock_handler):
# create predictor
create_sql =
model_name = 'spam_classifier'
predict_sql =
self.hf_test_run(mock_handler, model_ | huggingface handler in new ml handler api
- permanent is property of handler | test_hf_classification_bin | 4e12722621c12ca2b2b075421f30e5ae8a58ebe8 | mindsdb | test_ml_handlers.py | 7 | 17 | https://github.com/mindsdb/mindsdb.git | 1 | 28 | 0 | 15 | 49 | Python | {
"docstring": "\n CREATE PREDICTOR huggingface.spam_classifier\n predict PRED\n USING\n task='text-classification',\n model_name= \"mrm8488/bert-tiny-finetuned-sms-spam-detection\",\n input_column = 'text_spammy',\n labels=['ham','spam']\n \n SELECT h.*\n FROM pg.df as t \n JOIN huggingface.spam_classifier as h\n ",
"language": "en",
"n_whitespaces": 166,
"n_words": 23,
"vocab_size": 21
} | def test_hf_classification_bin(self, mock_handler):
# create predictor
create_sql =
model_name = 'spam_classifier'
predict_sql =
self.hf_test_run(mock_handler, model_name, create_sql, predict_sql)
|
|
23,586 | 109,439 | 1,147 | lib/matplotlib/_constrained_layout.py | 190 | 33 | def match_submerged_margins(layoutgrids, fig):
for sfig in fig.subfigs:
match_submerged_margins(layoutgrids, sfig)
axs = [a for a in fig.get_axes()
if a.get_subplotspec() is not None and a.get_in_layout()]
for ax1 in axs:
ss1 = ax1.get_subplotspec()
if ss1.get_gridspec() not in layoutgrids:
axs.remove(ax1)
continue
lg1 = layoutgrids[ss1.get_gridspec()]
# interior columns:
if len(ss1.colspan) > 1:
maxsubl = np.max(
lg1.margin_vals['left'][ss1.colspan[1:]] +
lg1.margin_vals['leftcb'][ss1.colspan[1:]]
)
maxsubr = np.max(
lg1.margin_vals['right'][ss1.colspan[:-1]] +
lg1.margin_vals['rightcb'][ss1.colspan[:-1]]
)
for ax2 in axs:
ss2 = ax2.get_subplotspec()
lg2 = layoutgrids[ss2.get_gridspec()]
if lg2 is not None and len(ss2.colspan) > 1:
maxsubl2 = np.max(
lg2.margin_vals['left'][ss2.colspan[1:]] +
lg2.margin_vals['leftcb'][ss2.colspan[1:]])
if maxsubl2 > maxsubl:
| Merge SubplotBase into AxesBase. | match_submerged_margins | c73f4c455514cf5422d27bf38c93250de8316b21 | matplotlib | _constrained_layout.py | 26 | 64 | https://github.com/matplotlib/matplotlib.git | 21 | 623 | 0 | 91 | 986 | Python | {
"docstring": "\n Make the margins that are submerged inside an Axes the same size.\n\n This allows axes that span two columns (or rows) that are offset\n from one another to have the same size.\n\n This gives the proper layout for something like::\n fig = plt.figure(constrained_layout=True)\n axs = fig.subplot_mosaic(\"AAAB\\nCCDD\")\n\n Without this routine, the axes D will be wider than C, because the\n margin width between the two columns in C has no width by default,\n whereas the margins between the two columns of D are set by the\n width of the margin between A and B. However, obviously the user would\n like C and D to be the same size, so we need to add constraints to these\n \"submerged\" margins.\n\n This routine makes all the interior margins the same, and the spacing\n between the three columns in A and the two column in C are all set to the\n margins between the two columns of D.\n\n See test_constrained_layout::test_constrained_layout12 for an example.\n ",
"language": "en",
"n_whitespaces": 218,
"n_words": 158,
"vocab_size": 87
} | def match_submerged_margins(layoutgrids, fig):
for sfig in fig.subfigs:
match_submerged_margins(layoutgrids, sfig)
axs = [a for a in fig.get_axes()
if a.get_subplotspec() is not None and a.get_in_layout()]
for ax1 in axs:
ss1 = ax1.get_subplotspec()
if ss1.get_gridspec() not in layoutgrids:
axs.remove(ax1)
continue
lg1 = layoutgrids[ss1.get_gridspec()]
# interior columns:
if len(ss1.colspan) > 1:
maxsubl = np.max(
lg1.margin_vals['left'][ss1.colspan[1:]] +
lg1.margin_vals['leftcb'][ss1.colspan[1:]]
)
maxsubr = np.max(
lg1.margin_vals['right'][ss1.colspan[:-1]] +
lg1.margin_vals['rightcb'][ss1.colspan[:-1]]
)
for ax2 in axs:
ss2 = ax2.get_subplotspec()
lg2 = layoutgrids[ss2.get_gridspec()]
if lg2 is not None and len(ss2.colspan) > 1:
maxsubl2 = np.max(
lg2.margin_vals['left'][ss2.colspan[1:]] +
lg2.margin_vals['leftcb'][ss2.colspan[1:]])
if maxsubl2 > maxsubl:
maxsubl = maxsubl2
maxsubr2 = np.max(
lg2.margin_vals['right'][ss2.colspan[:-1]] +
lg2.margin_vals['rightcb'][ss2.colspan[:-1]])
if maxsubr2 > maxsubr:
maxsubr = maxsubr2
for i in ss1.colspan[1:]:
lg1.edit_margin_min('left', maxsubl, cell=i)
for i in ss1.colspan[:-1]:
lg1.edit_margin_min('right', maxsubr, cell=i)
# interior rows:
if len(ss1.rowspan) > 1:
maxsubt = np.max(
lg1.margin_vals['top'][ss1.rowspan[1:]] +
lg1.margin_vals['topcb'][ss1.rowspan[1:]]
)
maxsubb = np.max(
lg1.margin_vals['bottom'][ss1.rowspan[:-1]] +
lg1.margin_vals['bottomcb'][ss1.rowspan[:-1]]
)
for ax2 in axs:
ss2 = ax2.get_subplotspec()
lg2 = layoutgrids[ss2.get_gridspec()]
if lg2 is not None:
if len(ss2.rowspan) > 1:
maxsubt = np.max([np.max(
lg2.margin_vals['top'][ss2.rowspan[1:]] +
lg2.margin_vals['topcb'][ss2.rowspan[1:]]
), maxsubt])
maxsubb = np.max([np.max(
lg2.margin_vals['bottom'][ss2.rowspan[:-1]] +
lg2.margin_vals['bottomcb'][ss2.rowspan[:-1]]
), maxsubb])
for i in ss1.rowspan[1:]:
lg1.edit_margin_min('top', maxsubt, cell=i)
for i in ss1.rowspan[:-1]:
lg1.edit_margin_min('bottom', maxsubb, cell=i)
|
|
70,805 | 245,473 | 95 | mmdet/version.py | 26 | 12 | def parse_version_info(version_str):
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_ | [Enhance] Update mmdet, mmcv, and mmdet version in MMDetection (#8417)
* Update dev-3.x circleci (#8396)
* update circleci
* update test config
* tmp delete github action
* update
* tmp reduce the coverage requirement
* update branch
* update branch
* [Fix] Fix metafile 3.x (#8404)
* update reppoints readme and metafile
* update openimages metafile
* update faster rcnn readme and metafile
* update convnext readme and metafile
* update guided_anchoring metafile
* update groie metafile and readme
* update reppoints readme and metafile
* update metafile
* update metafile
* release ld and mask_rcnn models
* update metafile
* update regnet metafile
* fix markdown format
* Update README.md
* Update README.md
* Update README.md
* Update README.md
* update md format
* release lad
* rename
* rename
* update solov2 metafile
* update cascase rcnn metafile
* [Doc]: fix markdown version (#8408)
* [Enhance] Update mmdet, mmcv, and mmdet version in MMDetection
* Fix anchor_free load_from_state_dict
Co-authored-by: RangiLyu <[email protected]>
Co-authored-by: Cedric Luo <[email protected]>
Co-authored-by: Wenwei Zhang <[email protected]> | parse_version_info | 035b915983ace07533f1a718a983315d126f3a40 | mmdetection | version.py | 15 | 10 | https://github.com/open-mmlab/mmdetection.git | 4 | 79 | 0 | 23 | 156 | Python | {
"docstring": "Parse a version string into a tuple.\n\n Args:\n version_str (str): The version string.\n Returns:\n tuple[int | str]: The version info, e.g., \"1.3.0\" is parsed into\n (1, 3, 0), and \"2.0.0rc1\" is parsed into (2, 0, 0, 'rc1').\n ",
"language": "en",
"n_whitespaces": 71,
"n_words": 37,
"vocab_size": 28
} | def parse_version_info(version_str):
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
|
@pytest.fixture | 19,398 | 97,275 | 173 | src/sentry/utils/pytest/relay.py | 40 | 8 | def adjust_settings_for_relay_tests(settings):
settings.ALLOWED_HOSTS = [
"localhost",
"testserver",
"host.docker | feat: Improve relay debug in CI (#32625) | adjust_settings_for_relay_tests | 8429cf33623b759a3ff7bddcf13d251b0dab9b8e | sentry | relay.py | 13 | 19 | https://github.com/getsentry/sentry.git | 1 | 65 | 1 | 34 | 132 | Python | {
"docstring": "\n Adjusts the application settings to accept calls from a Relay instance running inside a\n docker container.\n\n :param settings: the app settings\n ",
"language": "en",
"n_whitespaces": 34,
"n_words": 21,
"vocab_size": 18
} | def adjust_settings_for_relay_tests(settings):
settings.ALLOWED_HOSTS = [
"localhost",
"testserver",
"host.docker.internal",
"0.0.0.0",
"127.0.0.1",
]
settings.KAFKA_CLUSTERS = {
"default": {
"common": {"bootstrap.servers": "127.0.0.1:9092"},
"producers": {
"compression.type": "lz4",
"message.max.bytes": 50000000, # 50MB, default is 1MB
},
}
}
settings.SENTRY_RELAY_WHITELIST_PK = ["SMSesqan65THCV6M4qs4kBzPai60LzuDn-xNsvYpuP8"]
settings.SENTRY_USE_RELAY = True
@pytest.fixture |
27,047 | 121,234 | 65 | jax/_src/api.py | 31 | 19 | def clear_backends():
if xc._version < 79:
rais | Introduce jax.experimental.clear_backends to delete all JAX runtime backends.
In cases like unit tests, users may want to clean up all the backends along with the resources used in the end of the test, and reinitialize them in the next test.
PiperOrigin-RevId: 462239974 | clear_backends | c0ec3b33e687ce37b431906109d4a2bc4655285f | jax | api.py | 11 | 10 | https://github.com/google/jax.git | 2 | 59 | 0 | 30 | 108 | Python | {
"docstring": "\n Clear all backend clients so that new backend clients can be created later.\n ",
"language": "en",
"n_whitespaces": 16,
"n_words": 13,
"vocab_size": 11
} | def clear_backends():
if xc._version < 79:
raise RuntimeError("clear_backends is not supported in the jaxlib used."
"Please update your jaxlib package.")
xb._clear_backends()
jax.lib.xla_bridge._backends = {}
dispatch.xla_callable.cache_clear() # type: ignore
dispatch.xla_primitive_callable.cache_clear()
_cpp_jit_cache.clear()
jax_jit.CompiledFunctionCache.clear_all()
|
|
78,027 | 265,205 | 190 | netbox/dcim/models/racks.py | 54 | 23 | def get_power_utilization(self):
powerfeeds = PowerFeed.objects.filter(rack=self)
available_power_total = sum(pf.available_power for pf in powerfeeds)
print(f'available_power_total: {available_power_total}')
if not available_power_total:
return 0
pow | Update power utilization calculations for new cabling model | get_power_utilization | fcd1daaf798d62023f999c3e09e035f7b3f47c8f | netbox | racks.py | 13 | 16 | https://github.com/netbox-community/netbox.git | 7 | 103 | 0 | 39 | 175 | Python | {
"docstring": "\n Determine the utilization rate of power in the rack and return it as a percentage.\n ",
"language": "en",
"n_whitespaces": 30,
"n_words": 15,
"vocab_size": 14
} | def get_power_utilization(self):
powerfeeds = PowerFeed.objects.filter(rack=self)
available_power_total = sum(pf.available_power for pf in powerfeeds)
print(f'available_power_total: {available_power_total}')
if not available_power_total:
return 0
powerports = []
for powerfeed in powerfeeds:
powerports.extend([
peer for peer in powerfeed.link_peers if isinstance(peer, PowerPort)
])
allocated_draw = 0
for powerport in powerports:
allocated_draw += powerport.get_power_draw()['allocated']
print(f'allocated_draw: {allocated_draw}')
return int(allocated_draw / available_power_total * 100)
|
|
17,238 | 81,631 | 1,121 | awx/main/dispatch/pool.py | 270 | 52 | def cleanup(self):
orphaned = []
for w in self.workers[::]:
if not w.alive:
# the worker process has exited
# 1. take the task it was running and enqueue the error
# callbacks
# 2. take any pending tasks delivered to its queue and
# send them to another worker
logger.error('worker pid:{} is gone (exit={})'.format(w.pid, w.exitcode))
if w.current_task:
if w.current_task != 'QUIT':
try:
for j in UnifiedJob.objects.filter(celery_task_id=w.current_task['uuid']):
reaper.reap_job(j, 'failed')
except Exception:
logger.exception('failed to reap job UUID {}'.format(w.current_task['uuid']))
orphaned.extend(w.orphaned_tasks)
self.workers.remove(w)
elif (len(self.workers) > self.min_workers) and w.ready_to_scale_down:
# the process has an empty queue (it's idle) and we have
# more processes in the pool than we need (> min)
# send this process a message so it will exit gracefully
# at the next opportunity
logger | Add back in cleanup call | cleanup | 67190100500819eb1237c4572accafa72816ae54 | awx | pool.py | 22 | 36 | https://github.com/ansible/awx.git | 18 | 330 | 0 | 171 | 602 | Python | {
"docstring": "\n Perform some internal account and cleanup. This is run on\n every cluster node heartbeat:\n\n 1. Discover worker processes that exited, and recover messages they\n were handling.\n 2. Clean up unnecessary, idle workers.\n\n IMPORTANT: this function is one of the few places in the dispatcher\n (aside from setting lookups) where we talk to the database. As such,\n if there's an outage, this method _can_ throw various\n django.db.utils.Error exceptions. Act accordingly.\n ",
"language": "en",
"n_whitespaces": 149,
"n_words": 69,
"vocab_size": 64
} | def cleanup(self):
orphaned = []
for w in self.workers[::]:
if not w.alive:
# the worker process has exited
# 1. take the task it was running and enqueue the error
# callbacks
# 2. take any pending tasks delivered to its queue and
# send them to another worker
logger.error('worker pid:{} is gone (exit={})'.format(w.pid, w.exitcode))
if w.current_task:
if w.current_task != 'QUIT':
try:
for j in UnifiedJob.objects.filter(celery_task_id=w.current_task['uuid']):
reaper.reap_job(j, 'failed')
except Exception:
logger.exception('failed to reap job UUID {}'.format(w.current_task['uuid']))
orphaned.extend(w.orphaned_tasks)
self.workers.remove(w)
elif (len(self.workers) > self.min_workers) and w.ready_to_scale_down:
# the process has an empty queue (it's idle) and we have
# more processes in the pool than we need (> min)
# send this process a message so it will exit gracefully
# at the next opportunity
logger.info(f'scaling down worker pid:{w.pid} prior total:{len(self.workers)}')
w.quit()
self.workers.remove(w)
if w.alive:
# if we discover a task manager invocation that's been running
# too long, reap it (because otherwise it'll just hold the postgres
# advisory lock forever); the goal of this code is to discover
# deadlocks or other serious issues in the task manager that cause
# the task manager to never do more work
current_task = w.current_task
if current_task and isinstance(current_task, dict):
endings = ['tasks.task_manager', 'tasks.dependency_manager', 'tasks.workflow_manager']
current_task_name = current_task.get('task', '')
if any(current_task_name.endswith(e) for e in endings):
if 'started' not in current_task:
w.managed_tasks[current_task['uuid']]['started'] = time.time()
age = time.time() - current_task['started']
w.managed_tasks[current_task['uuid']]['age'] = age
if age > self.task_manager_timeout:
logger.error(f'{current_task_name} has held the advisory lock for {age}, sending SIGTERM to {w.pid}')
os.kill(w.pid, signal.SIGTERM)
for m in orphaned:
# if all the workers are dead, spawn at least one
if not len(self.workers):
self.up()
idx = random.choice(range(len(self.workers)))
self.write(idx, m)
|
|
50,084 | 202,366 | 87 | tests/csrf_tests/tests.py | 24 | 16 | def test_token_node_empty_csrf_cookie(self):
req = self._get_request(cookie="")
mw = CsrfViewMiddleware(token_view)
mw.process_view(req, token_view, (), {})
resp = token_view(req)
token = get_token(req)
self.assertIsNotNone(token)
csrf_secret = _unmask_cipher_token(token)
| Refs #33476 -- Reformatted code with Black. | test_token_node_empty_csrf_cookie | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | tests.py | 10 | 9 | https://github.com/django/django.git | 1 | 68 | 0 | 20 | 113 | Python | {
"docstring": "\n A new token is sent if the csrf_cookie is the empty string.\n ",
"language": "en",
"n_whitespaces": 27,
"n_words": 12,
"vocab_size": 10
} | def test_token_node_empty_csrf_cookie(self):
req = self._get_request(cookie="")
mw = CsrfViewMiddleware(token_view)
mw.process_view(req, token_view, (), {})
resp = token_view(req)
token = get_token(req)
self.assertIsNotNone(token)
csrf_secret = _unmask_cipher_token(token)
self._check_token_present(resp, csrf_secret)
|
|
22,483 | 106,865 | 143 | py/visdom/__init__.py | 34 | 9 | def save(self, envs):
assert isinstance(envs, list), "envs should be a list"
if le | apply black py to all python files | save | 5b8b7f267cfaf76a2a39a727ef31a62b3909a093 | visdom | __init__.py | 11 | 11 | https://github.com/fossasia/visdom.git | 3 | 52 | 0 | 30 | 86 | Python | {
"docstring": "\n This function allows the user to save envs that are alive on the\n Tornado server. The envs can be specified as a list of env ids.\n ",
"language": "en",
"n_whitespaces": 48,
"n_words": 26,
"vocab_size": 24
} | def save(self, envs):
assert isinstance(envs, list), "envs should be a list"
if len(envs) > 0:
for env in envs:
assert isstr(env), "env should be a string"
return self._send(
{
"data": envs,
},
"save",
)
|
|
52,148 | 207,885 | 76 | tests/admin_views/tests.py | 20 | 16 | def test_has_related_field_in_list_display_o2o(self):
media = Media.objects.create(name="Foo")
Vodcast.objects.create(media=media)
response = self.client.get(reverse("admin:admin_views_vodcast_changelist"), {})
response.context["cl"].list_display = ["media"]
sel | Refs #33476 -- Reformatted code with Black. | test_has_related_field_in_list_display_o2o | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | tests.py | 11 | 8 | https://github.com/django/django.git | 1 | 102 | 0 | 15 | 177 | Python | {
"docstring": "Joins shouldn't be performed for <O2O>_id fields in list display.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | def test_has_related_field_in_list_display_o2o(self):
media = Media.objects.create(name="Foo")
Vodcast.objects.create(media=media)
response = self.client.get(reverse("admin:admin_views_vodcast_changelist"), {})
response.context["cl"].list_display = ["media"]
self.assertIs(response.context["cl"].has_related_field_in_list_display(), True)
response.context["cl"].list_display = ["media_id"]
self.assertIs(response.context["cl"].has_related_field_in_list_display(), False)
|
|
@keras_export("keras.backend.binary_focal_crossentropy")
@tf.__internal__.dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs | 80,219 | 269,598 | 421 | keras/backend.py | 176 | 37 | def binary_crossentropy(target, output, from_logits=False):
target = tf.convert_to_tensor(target)
output = tf.convert_to_tensor(output)
# Use logits whenever they are available. `softmax` and `sigmoid`
# activations cache logits on the `output` Tensor.
| Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | binary_crossentropy | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | backend.py | 14 | 31 | https://github.com/keras-team/keras.git | 7 | 222 | 1 | 121 | 387 | Python | {
"docstring": "Binary crossentropy between an output tensor and a target tensor.\n\n Args:\n target: A tensor with the same shape as `output`.\n output: A tensor.\n from_logits: Whether `output` is expected to be a logits tensor.\n By default, we consider that `output`\n encodes a probability distribution.\n\n Returns:\n A tensor.\n ",
"language": "en",
"n_whitespaces": 105,
"n_words": 46,
"vocab_size": 37
} | def binary_crossentropy(target, output, from_logits=False):
target = tf.convert_to_tensor(target)
output = tf.convert_to_tensor(output)
# Use logits whenever they are available. `softmax` and `sigmoid`
# activations cache logits on the `output` Tensor.
if hasattr(output, "_keras_logits"):
output = output._keras_logits # pylint: disable=protected-access
if from_logits:
warnings.warn(
'"`binary_crossentropy` received `from_logits=True`, but the `output`'
" argument was produced by a sigmoid or softmax activation and thus "
'does not represent logits. Was this intended?"',
stacklevel=2,
)
from_logits = True
if from_logits:
return tf.nn.sigmoid_cross_entropy_with_logits(
labels=target, logits=output
)
if (
not isinstance(output, (tf.__internal__.EagerTensor, tf.Variable))
and output.op.type == "Sigmoid"
) and not hasattr(output, "_keras_history"):
# When sigmoid activation function is used for output operation, we
# use logits from the sigmoid function directly to compute loss in order
# to prevent collapsing zero when training.
assert len(output.op.inputs) == 1
output = output.op.inputs[0]
return tf.nn.sigmoid_cross_entropy_with_logits(
labels=target, logits=output
)
epsilon_ = _constant_to_tensor(epsilon(), output.dtype.base_dtype)
output = tf.clip_by_value(output, epsilon_, 1.0 - epsilon_)
# Compute cross entropy from probabilities.
bce = target * tf.math.log(output + epsilon())
bce += (1 - target) * tf.math.log(1 - output + epsilon())
return -bce
@keras_export("keras.backend.binary_focal_crossentropy")
@tf.__internal__.dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs |
71,960 | 247,858 | 20 | tests/handlers/test_federation_event.py | 6 | 3 | def test_process_pulled_event_with_missing_state(self) -> None:
return self._test_process_pulled_event_with_missing_state(False)
| Optimise `_get_state_after_missing_prev_event`: use `/state` (#12040)
If we're missing most of the events in the room state, then we may as well call the /state endpoint, instead of individually requesting each and every event. | test_process_pulled_event_with_missing_state | 9b43df1f7b2977431563b3cda8fed1ed879651ba | synapse | test_federation_event.py | 7 | 12 | https://github.com/matrix-org/synapse.git | 1 | 15 | 0 | 6 | 27 | Python | {
"docstring": "Ensure that we correctly handle pulled events with lots of missing state\n\n In this test, we pretend we are processing a \"pulled\" event (eg, via backfill\n or get_missing_events). The pulled event has a prev_event we haven't previously\n seen, so the server requests the state at that prev_event. There is a lot\n of state we don't have, so we expect the server to make a /state request.\n\n We check that the pulled event is correctly persisted, and that the state is\n as we expect.\n ",
"language": "en",
"n_whitespaces": 132,
"n_words": 83,
"vocab_size": 54
} | def test_process_pulled_event_with_missing_state(self) -> None:
return self._test_process_pulled_event_with_missing_state(False)
|
|
19,664 | 99,586 | 238 | tests/sentry/integrations/slack/notifications/test_resolved_in_release.py | 45 | 22 | def test_resolved_in_release(self, mock_func):
notification = ResolvedInReleaseActivityNotification(
Activity(
project=self.project,
group=self.group,
user=self.user,
type=ActivityType.SET_RESOLVED_IN_RELEASE,
data={"version": "meow"},
)
)
with self.tasks():
notification.send()
attachment, text = get_attachment()
release_name = notification.activity.data["version"]
assert text == f"Issue marked as resolved in {release_name} by {self.name}"
assert (
attachment["footer"]
== f"{self.project.slug} | <http://testserver/settings/account/notifications/workflow/?referrer=r | fix(notifications): Use `metrics_key` (#34572) | test_resolved_in_release | 1730c481f1a8a71446326fa1ff72e10663016385 | sentry | test_resolved_in_release.py | 14 | 19 | https://github.com/getsentry/sentry.git | 1 | 92 | 0 | 38 | 174 | Python | {
"docstring": "\n Test that a Slack message is sent with the expected payload when an issue is resolved in a release\n ",
"language": "en",
"n_whitespaces": 34,
"n_words": 19,
"vocab_size": 17
} | def test_resolved_in_release(self, mock_func):
notification = ResolvedInReleaseActivityNotification(
Activity(
project=self.project,
group=self.group,
user=self.user,
type=ActivityType.SET_RESOLVED_IN_RELEASE,
data={"version": "meow"},
)
)
with self.tasks():
notification.send()
attachment, text = get_attachment()
release_name = notification.activity.data["version"]
assert text == f"Issue marked as resolved in {release_name} by {self.name}"
assert (
attachment["footer"]
== f"{self.project.slug} | <http://testserver/settings/account/notifications/workflow/?referrer=resolved_in_release_activity-slack-user|Notification Settings>"
)
|
|
82,104 | 277,596 | 302 | keras/layers/preprocessing/index_lookup.py | 100 | 25 | def get_vocabulary(self, include_special_tokens=True):
# The lookup table data will not be sorted, so we will create a inverted
# lookup here, and us | reduce layers line-too-long | get_vocabulary | 8401e08334d4b1f102a6ee9479738bacfee0600c | keras | index_lookup.py | 12 | 19 | https://github.com/keras-team/keras.git | 7 | 158 | 0 | 70 | 249 | Python | {
"docstring": "Returns the current vocabulary of the layer.\n\n Args:\n include_special_tokens: If True, the returned vocabulary will include\n mask and OOV tokens, and a term's index in the vocabulary will equal\n the term's index when calling the layer. If False, the returned\n vocabulary will not include any mask or OOV tokens.\n ",
"language": "en",
"n_whitespaces": 105,
"n_words": 49,
"vocab_size": 29
} | def get_vocabulary(self, include_special_tokens=True):
# The lookup table data will not be sorted, so we will create a inverted
# lookup here, and use that to lookup a range of indices [0,
# vocab_size).
if self.lookup_table.size() == 0:
vocab, indices = [], []
else:
keys, values = self.lookup_table.export()
vocab, indices = (values, keys) if self.invert else (keys, values)
vocab, indices = (
self._tensor_vocab_to_numpy(vocab),
indices.numpy(),
)
lookup = collections.defaultdict(
lambda: self.oov_token, zip(indices, vocab)
)
vocab = [lookup[x] for x in range(self.vocabulary_size())]
if self.mask_token is not None and self.output_mode == INT:
vocab[0] = self.mask_token
if not include_special_tokens:
vocab = vocab[self._token_start_index() :]
return vocab
|
|
@pytest.mark.parametrize(
"val_dl",
[
DataLoader(dataset=RandomDataset(32, 64), shuffle=True),
CombinedLoader(DataLoader(dataset=RandomDataset(32, 64), shuffle=True)),
CombinedLoader(
[DataLoader(dataset=RandomDataset(32, 64)), DataLoader(dataset=RandomDataset(32, 64), shuffle=True)]
),
CombinedLoader(
{
"dl1": DataLoader(dataset=RandomDataset(32, 64)),
"dl2": DataLoader(dataset=RandomDataset(32, 64), shuffle=True),
}
),
],
) | 69,640 | 241,648 | 220 | tests/trainer/test_data_loading.py | 63 | 25 | def test_error_raised_with_float_limited_eval_batches():
model = BoringModel()
dl_size = len(model.val_dataloader())
limit_val_batches = 1 / (dl_size + 2)
trainer = Trainer(limit_val_batches=limit_val_batches)
trainer._data_connector.attach_data(model)
with pytest.raises(
MisconfigurationException,
match=fr"{limit_val_batches} \* {dl_size} < 1. Please increase the `limit_val_batches`",
):
trainer._data_connector._reset_eval_dataloader(RunningStage.VALIDATING, model)
@py | Deprecate `TrainerDataLoadingMixin` and move logic to `DataConnector` (#11282)
Co-authored-by: Rohit Gupta <[email protected]>
Co-authored-by: Aki Nitta <[email protected]>
Co-authored-by: Carlos Mocholí <[email protected]> | test_error_raised_with_float_limited_eval_batches | 5b59c951e28ddc8bb884f044b1f46fb54c23a8b8 | lightning | test_data_loading.py | 16 | 11 | https://github.com/Lightning-AI/lightning.git | 1 | 71 | 1 | 50 | 303 | Python | {
"docstring": "Test that an error is raised if there are not enough batches when passed with float value of\n limit_eval_batches.",
"language": "en",
"n_whitespaces": 21,
"n_words": 19,
"vocab_size": 19
} | def test_error_raised_with_float_limited_eval_batches():
model = BoringModel()
dl_size = len(model.val_dataloader())
limit_val_batches = 1 / (dl_size + 2)
trainer = Trainer(limit_val_batches=limit_val_batches)
trainer._data_connector.attach_data(model)
with pytest.raises(
MisconfigurationException,
match=fr"{limit_val_batches} \* {dl_size} < 1. Please increase the `limit_val_batches`",
):
trainer._data_connector._reset_eval_dataloader(RunningStage.VALIDATING, model)
@pytest.mark.parametrize(
"val_dl",
[
DataLoader(dataset=RandomDataset(32, 64), shuffle=True),
CombinedLoader(DataLoader(dataset=RandomDataset(32, 64), shuffle=True)),
CombinedLoader(
[DataLoader(dataset=RandomDataset(32, 64)), DataLoader(dataset=RandomDataset(32, 64), shuffle=True)]
),
CombinedLoader(
{
"dl1": DataLoader(dataset=RandomDataset(32, 64)),
"dl2": DataLoader(dataset=RandomDataset(32, 64), shuffle=True),
}
),
],
) |
27,096 | 121,997 | 177 | jax/_src/dispatch.py | 130 | 18 | def not_none_device_or_backend_on_jit(backend, device, num_ins):
# TODO(yashkatariya): Remove this entire function when backend and device are
# removed as arguments on jit.
from jax.experimental import sharding
if device is not None and backend is not None:
raise ValueError("can't specify both a device and a backend for jit, "
"got device={} and backend={}".format(device, backend))
if backend | Minimally support `device` argument on `jit` in the `jax.Array` path
This means that only a single device is allowed to flow through this path. This is a compromise i.e. it will support the existing codepaths but won't support sharded arrays to go through this path and encourage users to use other well supported techniques like using device_put explicitly instead of relying on `jit` to do that for you.
PiperOrigin-RevId: 473373822 | not_none_device_or_backend_on_jit | 980aa318fbe1e3653906465788e919027cf4d680 | jax | dispatch.py | 14 | 14 | https://github.com/google/jax.git | 4 | 106 | 0 | 85 | 174 | Python | {
"docstring": "This is to support the backend and device argument on jit. It's a feature\n that's deprecated but needs to be supported for feature parity and so that we\n can delete the non-Array paths when Array is switched on.\n ",
"language": "en",
"n_whitespaces": 41,
"n_words": 38,
"vocab_size": 33
} | def not_none_device_or_backend_on_jit(backend, device, num_ins):
# TODO(yashkatariya): Remove this entire function when backend and device are
# removed as arguments on jit.
from jax.experimental import sharding
if device is not None and backend is not None:
raise ValueError("can't specify both a device and a backend for jit, "
"got device={} and backend={}".format(device, backend))
if backend is not None:
da = [xb.get_backend(backend).get_default_device_assignment(1)[0]]
else:
assert device is not None
da = [device]
assert len(da) == 1
# Set committed to True for this path because it simulates a device_put on
# behalf of a user.
committed = True
# in_shardings will be marked as replicated regardless of whatever the input
# had. Given that only a single device is allowed above, this is correct.
in_shardings = [sharding.OpShardingSharding.get_replicated(da)] * num_ins
return committed, da, in_shardings
|
|
46,969 | 194,435 | 25 | kivy/input/motionevent.py | 11 | 5 | def is_mouse_scrolling(self, *args):
return 'button' in self.profile and 'scroll' in self.button
| Feature: EventManagerBase (#7658)
* Added EventManagerBase class and event_managers attribute to WindowBase class.
* Added on_motion event to Widget class.
* Updated post_dispatch_input in EventLoopBase to skip non-touch events.
* Using type ids in MouseMotionEventProvider.
* Added on_motion method to Widget subclasses.
* Updated Widget.on_motion method to dispatch to filtered widgets if 'pos' is not in me.profile.
* Changed motion_filter property in Widget to store key to list values.
* Updated Widget.on_motion to not dispatch event to children if widget is disabled.
* Widget: Using flags to control dispatching in on_motion method.
* Widget: Don't dispatch on_motion to children if only self is registered.
* Widget: Removed collision on disabled check from on_motion method.
* Widget: Added docstrings for motion_filter and related methods.
* EventManager: Moved motion event flags to eventmanager/__init__.py module.
* ScreenManager: Overrode the on_motion method.
* WindowBase: Using attributes event_managers and event_managers_dict.
* WindowBase: Added doc for register_event_manager and unregister_event_manager methods.
* Widget: Improved default dispatch to stop after the last registered widgets.
* EventManagerBase: Added initial docs class and module.
* Widget: Added experimental warnings to motion_filter property and to on_motion and (un)register_for_motion_event methods.
* WindowBase: Added docs for event_managers and event_managers_dict attributes.
* MotionEvent: Added type_id and flags to push_attrs list.
* EventManagerBase: Added versionadded tag on all flags.
* EventManagerBase: Use dispatch modes instead of flags. | is_mouse_scrolling | 1830123ba3edf7290b7c6cb1c6f406ccf1d0e5d4 | kivy | motionevent.py | 8 | 2 | https://github.com/kivy/kivy.git | 2 | 21 | 0 | 10 | 39 | Python | {
"docstring": "Returns True if the touch event is a mousewheel scrolling\n\n .. versionadded:: 1.6.0\n ",
"language": "en",
"n_whitespaces": 27,
"n_words": 13,
"vocab_size": 13
} | def is_mouse_scrolling(self, *args):
return 'button' in self.profile and 'scroll' in self.button
|
|
53,492 | 212,886 | 223 | PySimpleGUI.py | 60 | 27 | def easy_print(*args, size=(None, None), end=None, sep=None, location=(None, None), relative_location=(None, None), font=None, no_titlebar=False,
no_button=False, grab_anywhere=False, keep_on_top=None, do_not_reroute_stdout=True, echo_stdout=False, text_color=None, background_color=None, colors=None, c=None,
erase_all=False, resizable=True, blocking=None):
if _DebugWin.debug_window is None:
_DebugWin.debug_window = _DebugWin(size=size, location=location, relative_location=relative_location, font=font, no_titlebar=no_titlebar,
no_button=no_button, grab_anywhere=grab_anywhere, keep_on_top=keep_on_top,
do_not_reroute_stdout=do_not_reroute_stdout, echo_stdout=echo_stdout, resizable=resizable, blocking=blocking)
txt_color, bg_color = _parse_colors_parm(c or colors)
_DebugWin.debug_window.Print(*args, end=end, sep=sep, text_color=text_color or txt_color, background_color=background_color or bg_color,
erase_all=erase_all, font=font, blocking=blocking)
| Addition of blocking parameter to debug printing. IF True, then execution of your code is stopped until the "Quit" button / "X" is clicked on the Debug Window. | easy_print | 935e430420f5ac18df67233040ba86359d98a579 | PySimpleGUI | PySimpleGUI.py | 11 | 3 | https://github.com/PySimpleGUI/PySimpleGUI.git | 1 | 94 | 0 | 51 | 279 | Python | {
"docstring": "\n Works like a \"print\" statement but with windowing options. Routes output to the \"Debug Window\"\n\n In addition to the normal text and background colors, you can use a \"colors\" tuple/string\n The \"colors\" or \"c\" parameter defines both the text and background in a single parm.\n It can be a tuple or a single single. Both text and background colors need to be specified\n colors -(str, str) or str. A combined text/background color definition in a single parameter\n c - (str, str) - Colors tuple has format (foreground, backgrouned)\n c - str - can also be a string of the format \"foreground on background\" (\"white on red\")\n\n :param *args: stuff to output\n :type *args: (Any)\n :param size: (w,h) w=characters-wide, h=rows-high\n :type size: (int, int)\n :param end: end character\n :type end: (str)\n :param sep: separator character\n :type sep: (str)\n :param location: Location of upper left corner of the window\n :type location: (int, int)\n :param relative_location: (x,y) location relative to the default location of the window, in pixels. Normally the window centers. This location is relative to the location the window would be created. Note they can be negative.\n :type relative_location: (int, int)\n :param font: specifies the font family, size, etc. Tuple or Single string format 'name size styles'. Styles: italic * roman bold normal underline overstrike\n :type font: (str or (str, int[, str]) or None)\n :param no_titlebar: If True no titlebar will be shown\n :type no_titlebar: (bool)\n :param no_button: don't show button\n :type no_button: (bool)\n :param grab_anywhere: If True: can grab anywhere to move the window (Default = False)\n :type grab_anywhere: (bool)\n :param background_color: color of background\n :type background_color: (str)\n :param text_color: color of the text\n :type text_color: (str)\n :param keep_on_top: If True the window will remain above all current windows\n :type keep_on_top: (bool)\n :param location: Location of upper left corner of the window\n :type location: (int, int)\n :param do_not_reroute_stdout: do not reroute stdout and stderr. If False, both stdout and stderr will reroute to here\n :type do_not_reroute_stdout: (bool)\n :param echo_stdout: If True stdout is sent to both the console and the debug window\n :type echo_stdout: (bool)\n :param colors: Either a tuple or a string that has both the text and background colors\n :type colors: (str) or (str, str)\n :param c: Either a tuple or a string that has both the text and background colors\n :type c: (str) or (str, str)\n :param resizable: if True, the user can resize the debug window. Default is True\n :type resizable: (bool)\n :param erase_all: If True when erase the output before printing\n :type erase_all: (bool)\n :param blocking: if True, makes the window block instead of returning immediately. The \"Quit\" button changers to \"More\"\n :type blocking: (bool | None)\n :return:\n :rtype:\n ",
"language": "en",
"n_whitespaces": 1135,
"n_words": 444,
"vocab_size": 200
} | def easy_print(*args, size=(None, None), end=None, sep=None, location=(None, None), relative_location=(None, None), font=None, no_titlebar=False,
no_button=False, grab_anywhere=False, keep_on_top=None, do_not_reroute_stdout=True, echo_stdout=False, text_color=None, background_color=None, colors=None, c=None,
erase_all=False, resizable=True, blocking=None):
if _DebugWin.debug_window is None:
_DebugWin.debug_window = _DebugWin(size=size, location=location, relative_location=relative_location, font=font, no_titlebar=no_titlebar,
no_button=no_button, grab_anywhere=grab_anywhere, keep_on_top=keep_on_top,
do_not_reroute_stdout=do_not_reroute_stdout, echo_stdout=echo_stdout, resizable=resizable, blocking=blocking)
txt_color, bg_color = _parse_colors_parm(c or colors)
_DebugWin.debug_window.Print(*args, end=end, sep=sep, text_color=text_color or txt_color, background_color=background_color or bg_color,
erase_all=erase_all, font=font, blocking=blocking)
|
|
1,978 | 10,901 | 119 | jina/orchestrate/pods/__init__.py | 22 | 11 | def wait_start_success(self):
_timeout = self.args.timeout_ready
if _timeout <= 0:
_timeout = None
else:
_timeout /= | refactor: rename pod to deployment (#4230)
* refactor: rename pod to deployment
* style: fix overload and cli autocomplete
* fix: undo daemon mistake
* refactor: leftover cleanup
* fix: more test fixes
* fix: more fixes
* fix: more fixes
* fix: more fixes
* fix: more tests
* fix: fix more tests
* refactor: fix more tests
* refactor: more tests fixes
* refactor: rename pea to pod
* refactor: adjust docs
* refactor: complete pea renaming
* refactor: more fixes
* fix: pea_type in k8s yamls
* fix: adjust pod args name
* refactor: rename peapods parser folder
* fix: da init
Co-authored-by: Jina Dev Bot <[email protected]> | wait_start_success | 13edc16d806fb5d77a6849551178ccc75937f25f | jina | __init__.py | 10 | 11 | https://github.com/jina-ai/jina.git | 3 | 56 | 0 | 16 | 95 | Python | {
"docstring": "Block until all pods starts successfully.\n\n If not success, it will raise an error hoping the outer function to catch it\n ",
"language": "en",
"n_whitespaces": 35,
"n_words": 21,
"vocab_size": 20
} | def wait_start_success(self):
_timeout = self.args.timeout_ready
if _timeout <= 0:
_timeout = None
else:
_timeout /= 1e3
if self._wait_for_ready_or_shutdown(_timeout):
self._check_failed_to_start()
self.logger.debug(__ready_msg__)
else:
self._fail_start_timeout(_timeout)
|
|
47,872 | 196,372 | 53 | sympy/matrices/decompositions.py | 25 | 15 | def _rank_decomposition(M, iszerofunc=_iszero, simplify=False):
r
F, pivot_cols = M.rref(simplify=simplify, iszerofunc=iszerofunc,
pivots=True)
rank = len(pivot_cols)
C = M.extract(range(M.rows), pivot_cols)
F = F[:rank, :]
return C, | Moved imports to higher level | _rank_decomposition | 59d22b6bb7287613d598611027f640d068ca5748 | sympy | decompositions.py | 11 | 91 | https://github.com/sympy/sympy.git | 1 | 69 | 0 | 21 | 105 | Python | {
"docstring": "Returns a pair of matrices (`C`, `F`) with matching rank\n such that `A = C F`.\n\n Parameters\n ==========\n\n iszerofunc : Function, optional\n A function used for detecting whether an element can\n act as a pivot. ``lambda x: x.is_zero`` is used by default.\n\n simplify : Bool or Function, optional\n A function used to simplify elements when looking for a\n pivot. By default SymPy's ``simplify`` is used.\n\n Returns\n =======\n\n (C, F) : Matrices\n `C` and `F` are full-rank matrices with rank as same as `A`,\n whose product gives `A`.\n\n See Notes for additional mathematical details.\n\n Examples\n ========\n\n >>> from sympy import Matrix\n >>> A = Matrix([\n ... [1, 3, 1, 4],\n ... [2, 7, 3, 9],\n ... [1, 5, 3, 1],\n ... [1, 2, 0, 8]\n ... ])\n >>> C, F = A.rank_decomposition()\n >>> C\n Matrix([\n [1, 3, 4],\n [2, 7, 9],\n [1, 5, 1],\n [1, 2, 8]])\n >>> F\n Matrix([\n [1, 0, -2, 0],\n [0, 1, 1, 0],\n [0, 0, 0, 1]])\n >>> C * F == A\n True\n\n Notes\n =====\n\n Obtaining `F`, an RREF of `A`, is equivalent to creating a\n product\n\n .. math::\n E_n E_{n-1} ... E_1 A = F\n\n where `E_n, E_{n-1}, \\dots, E_1` are the elimination matrices or\n permutation matrices equivalent to each row-reduction step.\n\n The inverse of the same product of elimination matrices gives\n `C`:\n\n .. math::\n C = \\left(E_n E_{n-1} \\dots E_1\\right)^{-1}\n\n It is not necessary, however, to actually compute the inverse:\n the columns of `C` are those from the original matrix with the\n same column indices as the indices of the pivot columns of `F`.\n\n References\n ==========\n\n .. [1] https://en.wikipedia.org/wiki/Rank_factorization\n\n .. [2] Piziak, R.; Odell, P. L. (1 June 1999).\n \"Full Rank Factorization of Matrices\".\n Mathematics Magazine. 72 (3): 193. doi:10.2307/2690882\n\n See Also\n ========\n\n sympy.matrices.matrices.MatrixReductions.rref\n ",
"language": "en",
"n_whitespaces": 543,
"n_words": 291,
"vocab_size": 172
} | def _rank_decomposition(M, iszerofunc=_iszero, simplify=False):
r
F, pivot_cols = M.rref(simplify=simplify, iszerofunc=iszerofunc,
pivots=True)
rank = len(pivot_cols)
C = M.extract(range(M.rows), pivot_cols)
F = F[:rank, :]
return C, F
|
|
75,623 | 259,172 | 542 | sklearn/preprocessing/_data.py | 172 | 37 | def normalize(X, norm="l2", *, axis=1, copy=True, return_norm=False):
if norm not in ("l1", "l2", "max"):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = "csc"
elif axis == 1:
sparse_format = "csr"
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(
X,
accept_sparse=sparse_format,
copy=copy,
estimator="the normalize function",
dtype=FLOAT_DTYPES,
)
if axis == 0:
X = X.T
if sparse.issparse(X):
if return_norm and norm in ("l1", "l2"):
raise NotImplementedError(
"return_norm=True is not implemented "
"for sparse matrices with norm 'l1' "
"or norm 'l2'"
)
if norm == "l1":
inplace_csr_row_normalize_l1(X)
elif norm == "l2":
inplace_csr_row_normalize_l2(X)
elif norm == "max":
mins, maxes = min_max_axis(X, 1)
norms = np.maximum(abs(mins), maxes)
norms_elementwise = norms.repeat(np.diff(X.indptr))
mask = norms_elementwise != 0
X.data[mask] /= norms_elementwise[mask]
else:
if norm == "l1":
norms = np.abs(X).sum(axis=1)
elif norm == "l2":
norms = row_norms(X)
elif norm == "max":
norms = np.max(abs(X), axis=1)
norms = _handle_zeros_in_scale(no | fix docstrings on preprocessing._data.normalize (#22795)
Co-authored-by: ducanne <[email protected]> | normalize | 6d36596c4d724cb1354db9eb824bc84b8e2ce512 | scikit-learn | _data.py | 16 | 50 | https://github.com/scikit-learn/scikit-learn.git | 16 | 300 | 0 | 91 | 501 | Python | {
"docstring": "Scale input vectors individually to unit norm (vector length).\n\n Read more in the :ref:`User Guide <preprocessing_normalization>`.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The data to normalize, element by element.\n scipy.sparse matrices should be in CSR format to avoid an\n un-necessary copy.\n\n norm : {'l1', 'l2', 'max'}, default='l2'\n The norm to use to normalize each non zero sample (or each non-zero\n feature if axis is 0).\n\n axis : {0, 1}, default=1\n axis used to normalize the data along. If 1, independently normalize\n each sample, otherwise (if 0) normalize each feature.\n\n copy : bool, default=True\n Set to False to perform inplace row normalization and avoid a\n copy (if the input is already a numpy array or a scipy.sparse\n CSR matrix and if axis is 1).\n\n return_norm : bool, default=False\n Whether to return the computed norms.\n\n Returns\n -------\n X : {ndarray, sparse matrix} of shape (n_samples, n_features)\n Normalized input X.\n\n norms : ndarray of shape (n_samples, ) if axis=1 else (n_features, )\n An array of norms along given axis for X.\n When X is sparse, a NotImplementedError will be raised\n for norm 'l1' or 'l2'.\n\n See Also\n --------\n Normalizer : Performs normalization using the Transformer API\n (e.g. as part of a preprocessing :class:`~sklearn.pipeline.Pipeline`).\n\n Notes\n -----\n For a comparison of the different scalers, transformers, and normalizers,\n see :ref:`examples/preprocessing/plot_all_scaling.py\n <sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.\n ",
"language": "en",
"n_whitespaces": 395,
"n_words": 220,
"vocab_size": 142
} | def normalize(X, norm="l2", *, axis=1, copy=True, return_norm=False):
if norm not in ("l1", "l2", "max"):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = "csc"
elif axis == 1:
sparse_format = "csr"
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(
X,
accept_sparse=sparse_format,
copy=copy,
estimator="the normalize function",
dtype=FLOAT_DTYPES,
)
if axis == 0:
X = X.T
if sparse.issparse(X):
if return_norm and norm in ("l1", "l2"):
raise NotImplementedError(
"return_norm=True is not implemented "
"for sparse matrices with norm 'l1' "
"or norm 'l2'"
)
if norm == "l1":
inplace_csr_row_normalize_l1(X)
elif norm == "l2":
inplace_csr_row_normalize_l2(X)
elif norm == "max":
mins, maxes = min_max_axis(X, 1)
norms = np.maximum(abs(mins), maxes)
norms_elementwise = norms.repeat(np.diff(X.indptr))
mask = norms_elementwise != 0
X.data[mask] /= norms_elementwise[mask]
else:
if norm == "l1":
norms = np.abs(X).sum(axis=1)
elif norm == "l2":
norms = row_norms(X)
elif norm == "max":
norms = np.max(abs(X), axis=1)
norms = _handle_zeros_in_scale(norms, copy=False)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
if return_norm:
return X, norms
else:
return X
|
|
3,075 | 19,712 | 255 | pipenv/installers.py | 41 | 16 | def find_version_to_install(self, name):
version = Version.parse(name)
if version.patch is not None:
return name
try:
best_match = max(
(
inst_version
for inst_version in self.iter_installable_versions()
if inst_version.matches_minor(version)
),
key=operator.attrgetter("cmpkey"),
)
except ValueError:
| Issue 4993 Add standard pre commit hooks and apply linting. (#4994)
* Add .pre-commit-config.yaml to the project and exclude tests (for now). This does not include the MyPy linting that pip does but does include everything else. | find_version_to_install | 9a3b3ce70621af6f9adaa9eeac9cf83fa149319c | pipenv | installers.py | 14 | 18 | https://github.com/pypa/pipenv.git | 5 | 73 | 0 | 33 | 123 | Python | {
"docstring": "Find a version in the installer from the version supplied.\n\n A ValueError is raised if a matching version cannot be found.\n ",
"language": "en",
"n_whitespaces": 35,
"n_words": 21,
"vocab_size": 17
} | def find_version_to_install(self, name):
version = Version.parse(name)
if version.patch is not None:
return name
try:
best_match = max(
(
inst_version
for inst_version in self.iter_installable_versions()
if inst_version.matches_minor(version)
),
key=operator.attrgetter("cmpkey"),
)
except ValueError:
raise ValueError(
f"no installable version found for {name!r}",
)
return best_match
|
|
78,322 | 266,161 | 138 | netbox/utilities/utils.py | 45 | 16 | def copy_safe_request(request):
meta = {
k: request.META[k]
for k in HTTP_REQUEST_META_SAFE_COPY
if k in request.META and | Closes #10920: Include request cookies when queuing a custom script | copy_safe_request | 540bba4544d9f31c126571cc1a45a6783b3b6a89 | netbox | utils.py | 13 | 16 | https://github.com/netbox-community/netbox.git | 4 | 97 | 0 | 43 | 158 | Python | {
"docstring": "\n Copy selected attributes from a request object into a new fake request object. This is needed in places where\n thread safe pickling of the useful request data is needed.\n ",
"language": "en",
"n_whitespaces": 39,
"n_words": 29,
"vocab_size": 25
} | def copy_safe_request(request):
meta = {
k: request.META[k]
for k in HTTP_REQUEST_META_SAFE_COPY
if k in request.META and isinstance(request.META[k], str)
}
return NetBoxFakeRequest({
'META': meta,
'COOKIES': request.COOKIES,
'POST': request.POST,
'GET': request.GET,
'FILES': request.FILES,
'user': request.user,
'path': request.path,
'id': getattr(request, 'id', None), # UUID assigned by middleware
})
|
|
47,190 | 195,091 | 168 | projects/director/director_agent.py | 38 | 13 | def batchify(self, obs_batch, sort=False):
batch = super().batchify(obs_batch, sort=sort)
if batc | Added director agent and safety experiment commands. (#4602)
* Added director agent and safety.
* ran autoformat.sh | batchify | 2ef5586ed0d644abe18cd3ff45ef9fa01981e87c | ParlAI | director_agent.py | 13 | 14 | https://github.com/facebookresearch/ParlAI.git | 4 | 98 | 0 | 27 | 153 | Python | {
"docstring": "\n This method calls the parent class's batchify method and then add\n classifier_label and is_ltr property to the the batch.\n ",
"language": "en",
"n_whitespaces": 41,
"n_words": 19,
"vocab_size": 15
} | def batchify(self, obs_batch, sort=False):
batch = super().batchify(obs_batch, sort=sort)
if batch.valid_indices is None:
return batch
batch.classifier_label = torch.tensor(
[
[obs_batch[i].get('classifier_label_idx', -1)]
for i in batch.valid_indices
]
)
batch.is_ltr = torch.tensor(
[[obs_batch[i].get('is_ltr', False)] for i in batch.valid_indices]
)
return batch
|
|
77,229 | 262,467 | 57 | TTS/tts/layers/tacotron/capacitron_layers.py | 25 | 8 | def calculate_post_conv_height(height, kernel_size, stride, pad, n_convs):
| Capacitron (#977)
* new CI config
* initial Capacitron implementation
* delete old unused file
* fix empty formatting changes
* update losses and training script
* fix previous commit
* fix commit
* Add Capacitron test and first round of test fixes
* revert formatter change
* add changes to the synthesizer
* add stepwise gradual lr scheduler and changes to the recipe
* add inference script for dev use
* feat: add posterior inference arguments to synth methods
- added reference wav and text args for posterior inference
- some formatting
* fix: add espeak flag to base_tts and dataset APIs
- use_espeak_phonemes flag was not implemented in those APIs
- espeak is now able to be utilised for phoneme generation
- necessary phonemizer for the Capacitron model
* chore: update training script and style
- training script includes the espeak flag and other hyperparams
- made style
* chore: fix linting
* feat: add Tacotron 2 support
* leftover from dev
* chore:rename parser args
* feat: extract optimizers
- created a separate optimizer class to merge the two optimizers
* chore: revert arbitrary trainer changes
* fmt: revert formatting bug
* formatting again
* formatting fixed
* fix: log func
* fix: update optimizer
- Implemented load_state_dict for continuing training
* fix: clean optimizer init for standard models
* improvement: purge espeak flags and add training scripts
* Delete capacitronT2.py
delete old training script, new one is pushed
* feat: capacitron trainer methods
- extracted capacitron specific training operations from the trainer into custom
methods in taco1 and taco2 models
* chore: renaming and merging capacitron and gst style args
* fix: bug fixes from the previous commit
* fix: implement state_dict method on CapacitronOptimizer
* fix: call method
* fix: inference naming
* Delete train_capacitron.py
* fix: synthesize
* feat: update tests
* chore: fix style
* Delete capacitron_inference.py
* fix: fix train tts t2 capacitron tests
* fix: double forward in T2 train step
* fix: double forward in T1 train step
* fix: run make style
* fix: remove unused import
* fix: test for T1 capacitron
* fix: make lint
* feat: add blizzard2013 recipes
* make style
* fix: update recipes
* chore: make style
* Plot test sentences in Tacotron
* chore: make style and fix import
* fix: call forward first before problematic floordiv op
* fix: update recipes
* feat: add min_audio_len to recipes
* aux_input["style_mel"]
* chore: make style
* Make capacitron T2 recipe more stable
* Remove T1 capacitron Ljspeech
* feat: implement new grad clipping routine and update configs
* make style
* Add pretrained checkpoints
* Add default vocoder
* Change trainer package
* Fix grad clip issue for tacotron
* Fix scheduler issue with tacotron
Co-authored-by: Eren Gölge <[email protected]>
Co-authored-by: WeberJulian <[email protected]>
Co-authored-by: Eren Gölge <[email protected]> | calculate_post_conv_height | 8be21ec38734e780e787d07d7e979392d7d63f24 | TTS | capacitron_layers.py | 13 | 4 | https://github.com/coqui-ai/TTS.git | 2 | 36 | 0 | 23 | 59 | Python | {
"docstring": "Height of spec after n convolutions with fixed kernel/stride/pad.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | def calculate_post_conv_height(height, kernel_size, stride, pad, n_convs):
for _ in range(n_convs):
height = (height - kernel_size + 2 * pad) // stride + 1
return height
|
|
89,645 | 290,529 | 164 | tests/components/stream/test_hls.py | 76 | 34 | async def test_hls_playlist_view(hass, setup_component, hls_stream, stream_worker_sync):
stream = create_stream(hass, STREAM_SOURCE, {}, dynamic_stream_settings())
stream_worker_sync.pause()
hls = stream.add_provider(HLS_PROVIDER)
for i in range(2):
segment = Segment(sequence=i, duration=SEGMENT_DURATION)
hls.put(segment)
await hass.async_block_till_done()
hls_client = await hls_stream(stream)
resp = await hls_client.get("/playlist.m3u8")
assert resp.status == HTTPStatus.OK
assert await resp.text() == make_playlist(
sequence=0, segments=[make_segment(0), make_segment(1)]
)
segment = Segment(sequence=2, duration=SEGMENT_DURATI | Refactor camera stream settings (#81663) | test_hls_playlist_view | ee910bd0e41391e00ccd521fe7d605e494d33046 | core | test_hls.py | 13 | 24 | https://github.com/home-assistant/core.git | 2 | 209 | 0 | 44 | 337 | Python | {
"docstring": "Test rendering the hls playlist with 1 and 2 output segments.",
"language": "en",
"n_whitespaces": 10,
"n_words": 11,
"vocab_size": 11
} | async def test_hls_playlist_view(hass, setup_component, hls_stream, stream_worker_sync):
stream = create_stream(hass, STREAM_SOURCE, {}, dynamic_stream_settings())
stream_worker_sync.pause()
hls = stream.add_provider(HLS_PROVIDER)
for i in range(2):
segment = Segment(sequence=i, duration=SEGMENT_DURATION)
hls.put(segment)
await hass.async_block_till_done()
hls_client = await hls_stream(stream)
resp = await hls_client.get("/playlist.m3u8")
assert resp.status == HTTPStatus.OK
assert await resp.text() == make_playlist(
sequence=0, segments=[make_segment(0), make_segment(1)]
)
segment = Segment(sequence=2, duration=SEGMENT_DURATION)
hls.put(segment)
await hass.async_block_till_done()
resp = await hls_client.get("/playlist.m3u8")
assert resp.status == HTTPStatus.OK
assert await resp.text() == make_playlist(
sequence=0, segments=[make_segment(0), make_segment(1), make_segment(2)]
)
stream_worker_sync.resume()
await stream.stop()
|
|
32,355 | 141,415 | 107 | python/ray/train/_internal/backend_executor.py | 28 | 15 | def _create_local_rank_map(self) -> Dict:
rank_mapping = {}
ip_dict = defaultdict(int)
for world_rank in range(len(self.worker_group)):
worker = self.worker_group.workers[world_rank]
node_ip = worker.metadata.node_ip
rank_mapping[world_rank] = ip_dict[node_ip]
| [Train] Clean up `ray.train` package (#25566) | _create_local_rank_map | 80ae651f259e1ea13c21b285d6bfcc7fd834ef9c | ray | backend_executor.py | 11 | 30 | https://github.com/ray-project/ray.git | 2 | 65 | 0 | 22 | 104 | Python | {
"docstring": "Create mapping from worker world_rank to local_rank.\n\n Example:\n Worker 0: 0.0.0.0\n Worker 1: 0.0.0.0\n Worker 2: 0.0.0.1\n Worker 3: 0.0.0.0\n Worker 4: 0.0.0.1\n\n Workers 0, 1, 3 are on 0.0.0.0.\n Workers 2, 4 are on 0.0.0.1.\n\n Expected Output:\n {\n 0 -> 0,\n 1 -> 1,\n 2 -> 0,\n 3 -> 2,\n 4 -> 1\n }\n ",
"language": "en",
"n_whitespaces": 254,
"n_words": 55,
"vocab_size": 34
} | def _create_local_rank_map(self) -> Dict:
rank_mapping = {}
ip_dict = defaultdict(int)
for world_rank in range(len(self.worker_group)):
worker = self.worker_group.workers[world_rank]
node_ip = worker.metadata.node_ip
rank_mapping[world_rank] = ip_dict[node_ip]
ip_dict[node_ip] += 1
return rank_mapping
|
|
21,013 | 101,605 | 503 | tools/sort/sort.py | 124 | 46 | def _output_groups(self) -> None:
is_rename = self._args.sort_method != "none"
logger.info("Creating %s group folders in '%s'.",
len(self._sorter.binned), self._args.output_dir)
bin_names = [f"_{b}" for b in self._sorter.bin_names]
if is_rename:
| Overhaul sort:
- Standardize image data reading and writing
- Optimize loading (just one pass required)
- Make all sort groups binnable (to greater or lesser results)
- Add sort by pitch
- Deprecate multiple options
- linting, docs + locales | _output_groups | 98d01760e469fd2108eed8d0b0a1ba6297c3177c | faceswap | sort.py | 15 | 37 | https://github.com/deepfakes/faceswap.git | 11 | 260 | 0 | 88 | 486 | Python | {
"docstring": " Move the files to folders.\n\n Obtains the bins and original filenames from :attr:`_sorter` and outputs into appropriate\n bins in the output location\n ",
"language": "en",
"n_whitespaces": 44,
"n_words": 22,
"vocab_size": 18
} | def _output_groups(self) -> None:
is_rename = self._args.sort_method != "none"
logger.info("Creating %s group folders in '%s'.",
len(self._sorter.binned), self._args.output_dir)
bin_names = [f"_{b}" for b in self._sorter.bin_names]
if is_rename:
bin_names = [f"{name}_by_{self._args.sort_method}" for name in bin_names]
for name in bin_names:
folder = os.path.join(self._args.output_dir, name)
if os.path.exists(folder):
rmtree(folder)
os.makedirs(folder)
description = f"{'Copying' if self._args.keep_original else 'Moving'} into groups"
description += " and renaming" if is_rename else ""
pbar = tqdm(range(len(self._sorter.sorted_filelist)),
desc=description,
file=sys.stdout,
leave=False)
idx = 0
for bin_id, bin_ in enumerate(self._sorter.binned):
pbar.set_description(f"{description}: Bin {bin_id + 1} of {len(self._sorter.binned)}")
output_path = os.path.join(self._args.output_dir, bin_names[bin_id])
if not bin_:
logger.debug("Removing empty bin: %s", output_path)
os.rmdir(output_path)
for source in bin_:
basename = os.path.basename(source)
dst_name = f"{idx:06d}_{basename}" if is_rename else basename
dest = os.path.join(output_path, dst_name)
self._sort_file(source, dest)
idx += 1
pbar.update(1)
# Output methods |
|
42,710 | 178,485 | 125 | nuitka/utils/SharedLibraries.py | 53 | 11 | def _setSharedLibraryRPATHElf(filename, rpath):
# TODO: Might write something that makes a shell script replacement
# in case no rpath is present, or use patchelf, for now our use
# case seems to use rpaths for executables.
# patchelf --set-rpath "$ORIGIN/path/to/library" <executable>
with withEnvironmentVarOverriden("LANG", "C"):
execut | macOS: Make sure to check exit code and output problematic command | _setSharedLibraryRPATHElf | e399c9cade448a8dd0018dc5484613782fcabf63 | Nuitka | SharedLibraries.py | 12 | 10 | https://github.com/Nuitka/Nuitka.git | 1 | 42 | 0 | 46 | 75 | Python | {
"docstring": "\\\nError, needs 'patchelf' on your system, due to 'RPATH' settings that need to be\nset.",
"language": "en",
"n_whitespaces": 13,
"n_words": 16,
"vocab_size": 15
} | def _setSharedLibraryRPATHElf(filename, rpath):
# TODO: Might write something that makes a shell script replacement
# in case no rpath is present, or use patchelf, for now our use
# case seems to use rpaths for executables.
# patchelf --set-rpath "$ORIGIN/path/to/library" <executable>
with withEnvironmentVarOverriden("LANG", "C"):
executeToolChecked(
logger=postprocessing_logger,
command=("patchelf", "--set-rpath", rpath, filename),
stderr_filter=_filterPatchelfErrorOutput,
absence_message=,
)
|
|
35,802 | 154,137 | 105 | modin/core/dataframe/pandas/dataframe/dataframe.py | 21 | 6 | def _get_columns(self):
| FEAT-#4725: Make index and columns lazy in Modin DataFrame (#4726)
Co-authored-by: Mahesh Vashishtha <[email protected]>
Co-authored-by: Yaroslav Igoshev <[email protected]>
Signed-off-by: Vasily Litvinov <[email protected]> | _get_columns | adb16a17f721048005520388080627975c6852d8 | modin | dataframe.py | 11 | 8 | https://github.com/modin-project/modin.git | 3 | 41 | 0 | 14 | 68 | Python | {
"docstring": "\n Get the columns from the cache object.\n\n Returns\n -------\n pandas.Index\n An index object containing the column labels.\n ",
"language": "en",
"n_whitespaces": 64,
"n_words": 17,
"vocab_size": 15
} | def _get_columns(self):
if self._columns_cache is None:
self._columns_cache, column_widths = self._compute_axis_labels_and_lengths(
1
)
if self._column_widths_cache is None:
self._column_widths_cache = column_widths
return self._columns_cache
|
|
103,934 | 305,142 | 301 | homeassistant/components/intesishome/climate.py | 90 | 41 | async def async_update(self) -> None:
# Update values from controller's device dictionary
self._connected = self._ | Improve entity type hints [i] (#77529) | async_update | 23090cb8a268b3f268aefa8477f30af88bf46051 | core | climate.py | 9 | 24 | https://github.com/home-assistant/core.git | 1 | 243 | 0 | 63 | 393 | Python | {
"docstring": "Copy values from controller dictionary to climate device.",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 8
} | async def async_update(self) -> None:
# Update values from controller's device dictionary
self._connected = self._controller.is_connected
self._current_temp = self._controller.get_temperature(self._device_id)
self._fan_speed = self._controller.get_fan_speed(self._device_id)
self._power = self._controller.is_on(self._device_id)
self._min_temp = self._controller.get_min_setpoint(self._device_id)
self._max_temp = self._controller.get_max_setpoint(self._device_id)
self._rssi = self._controller.get_rssi(self._device_id)
self._run_hours = self._controller.get_run_hours(self._device_id)
self._target_temp = self._controller.get_setpoint(self._device_id)
self._outdoor_temp = self._controller.get_outdoor_temperature(self._device_id)
# Operation mode
mode = self._controller.get_mode(self._device_id)
self._hvac_mode = MAP_IH_TO_HVAC_MODE.get(mode)
# Preset mode
preset = self._controller.get_preset_mode(self._device_id)
self._preset = MAP_IH_TO_PRESET_MODE.get(preset)
# Swing mode
# Climate module only supports one swing setting.
self._vvane = self._controller.get_vertical_swing(self._device_id)
self._hvane = self._controller.get_horizontal_swing(self._device_id)
# Power usage
self._power_consumption_heat = self._controller.get_heat_power_consumption(
self._device_id
)
self._power_consumption_cool = self._controller.get_cool_power_consumption(
self._device_id
)
|
|
24,423 | 111,524 | 157 | spacy/tests/pipeline/test_entity_linker.py | 94 | 19 | def test_kb_valid_entities(nlp):
mykb = InMemoryLookupKB(nlp.vocab, entity_vector_length=3)
# adding entities
mykb.add_entity(entity="Q1", freq=19, entity_vector=[8, 4, 3])
mykb.add_entity(entity="Q2", freq=5, entity_vector=[2, 1, 0])
mykb.add_entity(entity="Q3", freq=25, entity_vector=[-1, -6, 5])
# adding aliases
mykb.add_alias(alias="douglas", entities=["Q2", "Q3"], probabilities=[0.8, 0.2])
mykb.add_alias(a | Refactor KB for easier customization (#11268)
* Add implementation of batching + backwards compatibility fixes. Tests indicate issue with batch disambiguation for custom singular entity lookups.
* Fix tests. Add distinction w.r.t. batch size.
* Remove redundant and add new comments.
* Adjust comments. Fix variable naming in EL prediction.
* Fix mypy errors.
* Remove KB entity type config option. Change return types of candidate retrieval functions to Iterable from Iterator. Fix various other issues.
* Update spacy/pipeline/entity_linker.py
Co-authored-by: Paul O'Leary McCann <[email protected]>
* Update spacy/pipeline/entity_linker.py
Co-authored-by: Paul O'Leary McCann <[email protected]>
* Update spacy/kb_base.pyx
Co-authored-by: Paul O'Leary McCann <[email protected]>
* Update spacy/kb_base.pyx
Co-authored-by: Paul O'Leary McCann <[email protected]>
* Update spacy/pipeline/entity_linker.py
Co-authored-by: Paul O'Leary McCann <[email protected]>
* Add error messages to NotImplementedErrors. Remove redundant comment.
* Fix imports.
* Remove redundant comments.
* Rename KnowledgeBase to InMemoryLookupKB and BaseKnowledgeBase to KnowledgeBase.
* Fix tests.
* Update spacy/errors.py
Co-authored-by: Sofie Van Landeghem <[email protected]>
* Move KB into subdirectory.
* Adjust imports after KB move to dedicated subdirectory.
* Fix config imports.
* Move Candidate + retrieval functions to separate module. Fix other, small issues.
* Fix docstrings and error message w.r.t. class names. Fix typing for candidate retrieval functions.
* Update spacy/kb/kb_in_memory.pyx
Co-authored-by: Sofie Van Landeghem <[email protected]>
* Update spacy/ml/models/entity_linker.py
Co-authored-by: Sofie Van Landeghem <[email protected]>
* Fix typing.
* Change typing of mentions to be Span instead of Union[Span, str].
* Update docs.
* Update EntityLinker and _architecture docs.
* Update website/docs/api/entitylinker.md
Co-authored-by: Paul O'Leary McCann <[email protected]>
* Adjust message for E1046.
* Re-add section for Candidate in kb.md, add reference to dedicated page.
* Update docs and docstrings.
* Re-add section + reference for KnowledgeBase.get_alias_candidates() in docs.
* Update spacy/kb/candidate.pyx
* Update spacy/kb/kb_in_memory.pyx
* Update spacy/pipeline/legacy/entity_linker.py
* Remove canididate.md. Remove mistakenly added config snippet in entity_linker.py.
Co-authored-by: Paul O'Leary McCann <[email protected]>
Co-authored-by: Sofie Van Landeghem <[email protected]> | test_kb_valid_entities | 1f23c615d7a7326ca5a38a7d768b8b70caaa0e17 | spaCy | test_entity_linker.py | 11 | 16 | https://github.com/explosion/spaCy.git | 1 | 275 | 0 | 67 | 423 | Python | {
"docstring": "Test the valid construction of a KB with 3 entities and two aliases",
"language": "en",
"n_whitespaces": 12,
"n_words": 13,
"vocab_size": 13
} | def test_kb_valid_entities(nlp):
mykb = InMemoryLookupKB(nlp.vocab, entity_vector_length=3)
# adding entities
mykb.add_entity(entity="Q1", freq=19, entity_vector=[8, 4, 3])
mykb.add_entity(entity="Q2", freq=5, entity_vector=[2, 1, 0])
mykb.add_entity(entity="Q3", freq=25, entity_vector=[-1, -6, 5])
# adding aliases
mykb.add_alias(alias="douglas", entities=["Q2", "Q3"], probabilities=[0.8, 0.2])
mykb.add_alias(alias="adam", entities=["Q2"], probabilities=[0.9])
# test the size of the corresponding KB
assert mykb.get_size_entities() == 3
assert mykb.get_size_aliases() == 2
# test retrieval of the entity vectors
assert mykb.get_vector("Q1") == [8, 4, 3]
assert mykb.get_vector("Q2") == [2, 1, 0]
assert mykb.get_vector("Q3") == [-1, -6, 5]
# test retrieval of prior probabilities
assert_almost_equal(mykb.get_prior_prob(entity="Q2", alias="douglas"), 0.8)
assert_almost_equal(mykb.get_prior_prob(entity="Q3", alias="douglas"), 0.2)
assert_almost_equal(mykb.get_prior_prob(entity="Q342", alias="douglas"), 0.0)
assert_almost_equal(mykb.get_prior_prob(entity="Q3", alias="douglassssss"), 0.0)
|
|
38,596 | 160,327 | 183 | numpy/lib/twodim_base.py | 104 | 17 | def eye(N, M=None, k=0, dtype=float, order='C', *, like=None):
if like is not None:
return _eye_with_like(N, M=M, k=k, dtype=dtype, order=order, like=like)
if M is None:
M = N
m = zeros((N, M), dtype=dtype, order=order)
if k >= M:
return m
# Ensure M and k are integers, so we don't get any surprise casting
# result | BUG: lib: Allow type uint64 for eye() arguments.
Closes gh-9982.
(Plus a few small PEP 8 fixes.) | eye | f9355942f6ef7c5d27691c4571096234efb67a2b | numpy | twodim_base.py | 12 | 16 | https://github.com/numpy/numpy.git | 5 | 146 | 0 | 73 | 239 | Python | {
"docstring": "\n Return a 2-D array with ones on the diagonal and zeros elsewhere.\n\n Parameters\n ----------\n N : int\n Number of rows in the output.\n M : int, optional\n Number of columns in the output. If None, defaults to `N`.\n k : int, optional\n Index of the diagonal: 0 (the default) refers to the main diagonal,\n a positive value refers to an upper diagonal, and a negative value\n to a lower diagonal.\n dtype : data-type, optional\n Data-type of the returned array.\n order : {'C', 'F'}, optional\n Whether the output should be stored in row-major (C-style) or\n column-major (Fortran-style) order in memory.\n\n .. versionadded:: 1.14.0\n ${ARRAY_FUNCTION_LIKE}\n\n .. versionadded:: 1.20.0\n\n Returns\n -------\n I : ndarray of shape (N,M)\n An array where all elements are equal to zero, except for the `k`-th\n diagonal, whose values are equal to one.\n\n See Also\n --------\n identity : (almost) equivalent function\n diag : diagonal 2-D array from a 1-D array specified by the user.\n\n Examples\n --------\n >>> np.eye(2, dtype=int)\n array([[1, 0],\n [0, 1]])\n >>> np.eye(3, k=1)\n array([[0., 1., 0.],\n [0., 0., 1.],\n [0., 0., 0.]])\n\n ",
"language": "en",
"n_whitespaces": 350,
"n_words": 176,
"vocab_size": 120
} | def eye(N, M=None, k=0, dtype=float, order='C', *, like=None):
if like is not None:
return _eye_with_like(N, M=M, k=k, dtype=dtype, order=order, like=like)
if M is None:
M = N
m = zeros((N, M), dtype=dtype, order=order)
if k >= M:
return m
# Ensure M and k are integers, so we don't get any surprise casting
# results in the expressions `M-k` and `M+1` used below. This avoids
# a problem with inputs with type (for example) np.uint64.
M = operator.index(M)
k = operator.index(k)
if k >= 0:
i = k
else:
i = (-k) * M
m[:M-k].flat[i::M+1] = 1
return m
_eye_with_like = array_function_dispatch(
_eye_dispatcher
)(eye)
|