ast_errors
stringlengths 0
3.2k
| d_id
int64 44
121k
| id
int64 70
338k
| n_whitespaces
int64 3
14k
| path
stringlengths 8
134
| n_words
int64 4
4.82k
| n_identifiers
int64 1
131
| random_cut
stringlengths 16
15.8k
| commit_message
stringlengths 2
15.3k
| fun_name
stringlengths 1
84
| commit_id
stringlengths 40
40
| repo
stringlengths 3
28
| file_name
stringlengths 5
79
| ast_levels
int64 6
31
| nloc
int64 1
548
| url
stringlengths 31
59
| complexity
int64 1
66
| token_counts
int64 6
2.13k
| n_ast_errors
int64 0
28
| vocab_size
int64 4
1.11k
| n_ast_nodes
int64 15
19.2k
| language
stringclasses 1
value | documentation
dict | code
stringlengths 101
62.2k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
13,236 | 63,296 | 207 | .venv/lib/python3.8/site-packages/pip/_vendor/pyparsing.py | 141 | 30 | def replaceHTMLEntity(t):
return _htmlEntityMap.get(t.entity)
# it's easy to get these comment structures wrong - they're very common, so may as well make them available
cStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/').setName("C style comment")
"Comment of the form ``/* ... */``"
htmlComment = Regex(r"<!--[\s\S]*?-->").setName("HTML comment")
"Comment of the form ``<!-- ... -->``"
restOfLine = Regex(r".*").leaveWhitespace().setName("rest of line")
dblSlashComment = Regex(r"//(?:\\\n|[^\n])*").setName("// comment")
"Comment of the form ``// ... (to end of line)``"
cppStyleComment = Combine(R | upd; format | replaceHTMLEntity | f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | transferlearning | pyparsing.py | 21 | 2 | https://github.com/jindongwang/transferlearning.git | 1 | 15 | 0 | 91 | 347 | Python | {
"docstring": "Helper parser action to replace common HTML entities with their special characters(Deprecated) Predefined expression of 1 or more printable words or\nquoted strings, separated by commas.\n\nThis expression is deprecated in favor of :class:`pyparsing_common.comma_separated_list`.\n",
"language": "en",
"n_whitespaces": 31,
"n_words": 34,
"vocab_size": 31
} | def replaceHTMLEntity(t):
return _htmlEntityMap.get(t.entity)
# it's easy to get these comment structures wrong - they're very common, so may as well make them available
cStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/').setName("C style comment")
"Comment of the form ``/* ... */``"
htmlComment = Regex(r"<!--[\s\S]*?-->").setName("HTML comment")
"Comment of the form ``<!-- ... -->``"
restOfLine = Regex(r".*").leaveWhitespace().setName("rest of line")
dblSlashComment = Regex(r"//(?:\\\n|[^\n])*").setName("// comment")
"Comment of the form ``// ... (to end of line)``"
cppStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/' | dblSlashComment).setName("C++ style comment")
"Comment of either form :class:`cStyleComment` or :class:`dblSlashComment`"
javaStyleComment = cppStyleComment
"Same as :class:`cppStyleComment`"
pythonStyleComment = Regex(r"#.*").setName("Python style comment")
"Comment of the form ``# ... (to end of line)``"
_commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',')
+ Optional(Word(" \t")
+ ~Literal(",") + ~LineEnd()))).streamline().setName("commaItem")
commaSeparatedList = delimitedList(Optional(quotedString.copy() | _commasepitem, default="")).setName("commaSeparatedList")
# some other useful expressions - using lower-case class name since we are really using this as a namespace |
|
1,072 | 6,800 | 81 | tests/integration_tests/utils.py | 29 | 19 | def read_csv_with_nan(path, nan_percent=0.0):
df = pd.read_csv(path)
if nan_percent > 0:
num_rows = len(df)
for col in df.columns:
for row in random.sampl | Adds regression tests for #2020 (#2021)
* fixes nans in dask df engine
* adds tests
* fixes with logs
* fixes
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* cleanup
* checking accuracy closeness
* investigates ray batcher dropping samples with logs
* clean up for PR review
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* cleanup
* add missing test param
* updated sampling to nan_percent% of rows in each col
* cleanup
Co-authored-by: Geoffrey Angus <[email protected]>
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> | read_csv_with_nan | 9ae57a93ee4011c3d56cb0284ec07a861f3c88ff | ludwig | utils.py | 16 | 8 | https://github.com/ludwig-ai/ludwig.git | 4 | 76 | 0 | 24 | 116 | Python | {
"docstring": "Converts `nan_percent` of samples in each row of the CSV at `path` to NaNs.",
"language": "en",
"n_whitespaces": 13,
"n_words": 14,
"vocab_size": 13
} | def read_csv_with_nan(path, nan_percent=0.0):
df = pd.read_csv(path)
if nan_percent > 0:
num_rows = len(df)
for col in df.columns:
for row in random.sample(range(num_rows), int(round(nan_percent * num_rows))):
df[col].iloc[row] = np.nan
return df
|
|
10,203 | 50,719 | 586 | modules/image/text_to_image/stable_diffusion/diffusers/models/resnet.py | 219 | 36 | def _upsample_2d(self, x, w=None, k=None, factor=2, gain=1):
assert isinstance(factor, int) and factor >= 1
# Setup filter kernel.
if k is None:
k = [1] * factor
# setup kernel
k = np.asarray(k, dtype=np.float32)
if k.ndim == 1:
k = np.outer(k, k)
k /= np.sum(k)
k = k * (gain * (factor**2))
if self.use_conv:
convH = w.shape[2]
convW = w.shape[3]
inC = w.shape[1]
p = (k.shape[0] - factor) - (convW - 1)
stride = (factor, factor)
# Determine data dimensions.
stride = [1, 1, factor, factor]
output_shape = ((x.shape[2] - 1) * factor + convH, (x.shape[3] - 1) * factor + convW)
output_padding = (
output_shape[0] - (x.shape[2] - 1) * stride[0] - convH,
output_shape[1] - (x.shape[3] - 1) * stride[1] - convW,
)
assert output_padding[0] >= 0 and output_padding[1] >= 0
inC = w.shape[1]
num_groups = x.shape[1] // inC
# Transpose weights.
w = paddle.reshape(w, (num_groups, -1, inC, convH, convW))
w = w[..., ::-1, ::-1].transpose([0, 2, 1, 3, 4])
w = paddle.reshape(w, (num_groups * inC, -1, convH, convW))
x = F.conv2d_transpose(x, w, st | Add stable diffusion module | _upsample_2d | a6790a651a12eb391060e533868bf0ba197f6f7e | PaddleHub | resnet.py | 18 | 33 | https://github.com/PaddlePaddle/PaddleHub.git | 6 | 434 | 0 | 112 | 670 | Python | {
"docstring": "Fused `upsample_2d()` followed by `Conv2d()`.\n\n Args:\n Padding is performed only once at the beginning, not between the operations. The fused op is considerably more\n efficient than performing the same calculation using standard TensorFlow ops. It supports gradients of arbitrary:\n order.\n x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W,\n C]`.\n w: Weight tensor of the shape `[filterH, filterW, inChannels,\n outChannels]`. Grouped convolution can be performed by `inChannels = x.shape[0] // numGroups`.\n k: FIR filter of the shape `[firH, firW]` or `[firN]`\n (separable). The default is `[1] * factor`, which corresponds to nearest-neighbor upsampling.\n factor: Integer upsampling factor (default: 2). gain: Scaling factor for signal magnitude (default: 1.0).\n\n Returns:\n Tensor of the shape `[N, C, H * factor, W * factor]` or `[N, H * factor, W * factor, C]`, and same datatype as\n `x`.\n ",
"language": "en",
"n_whitespaces": 256,
"n_words": 139,
"vocab_size": 102
} | def _upsample_2d(self, x, w=None, k=None, factor=2, gain=1):
assert isinstance(factor, int) and factor >= 1
# Setup filter kernel.
if k is None:
k = [1] * factor
# setup kernel
k = np.asarray(k, dtype=np.float32)
if k.ndim == 1:
k = np.outer(k, k)
k /= np.sum(k)
k = k * (gain * (factor**2))
if self.use_conv:
convH = w.shape[2]
convW = w.shape[3]
inC = w.shape[1]
p = (k.shape[0] - factor) - (convW - 1)
stride = (factor, factor)
# Determine data dimensions.
stride = [1, 1, factor, factor]
output_shape = ((x.shape[2] - 1) * factor + convH, (x.shape[3] - 1) * factor + convW)
output_padding = (
output_shape[0] - (x.shape[2] - 1) * stride[0] - convH,
output_shape[1] - (x.shape[3] - 1) * stride[1] - convW,
)
assert output_padding[0] >= 0 and output_padding[1] >= 0
inC = w.shape[1]
num_groups = x.shape[1] // inC
# Transpose weights.
w = paddle.reshape(w, (num_groups, -1, inC, convH, convW))
w = w[..., ::-1, ::-1].transpose([0, 2, 1, 3, 4])
w = paddle.reshape(w, (num_groups * inC, -1, convH, convW))
x = F.conv2d_transpose(x, w, stride=stride, output_padding=output_padding, padding=0)
x = upfirdn2d_native(x, paddle.to_tensor(k), pad=((p + 1) // 2 + factor - 1, p // 2 + 1))
else:
p = k.shape[0] - factor
x = upfirdn2d_native(x, paddle.to_tensor(k), up=factor, pad=((p + 1) // 2 + factor - 1, p // 2))
return x
|
|
79,255 | 267,980 | 44 | test/lib/ansible_test/_internal/docker_util.py | 12 | 8 | def get_network_names(self) -> t.Optional[t.List[str]]:
i | ansible-test - Use more native type hints. (#78435)
* ansible-test - Use more native type hints.
Simple search and replace to switch from comments to native type hints for return types of functions with no arguments.
* ansible-test - Use more native type hints.
Conversion of simple single-line function annotation type comments to native type hints.
* ansible-test - Use more native type hints.
Conversion of single-line function annotation type comments with default values to native type hints.
* ansible-test - Use more native type hints.
Manual conversion of type annotation comments for functions which have pylint directives. | get_network_names | 3eb0485dd92c88cc92152d3656d94492db44b183 | ansible | docker_util.py | 8 | 5 | https://github.com/ansible/ansible.git | 2 | 34 | 0 | 11 | 55 | Python | {
"docstring": "Return a list of the network names the container is attached to.",
"language": "en",
"n_whitespaces": 11,
"n_words": 12,
"vocab_size": 11
} | def get_network_names(self) -> t.Optional[t.List[str]]:
if self.networks is None:
return None
return sorted(self.networks)
|
|
14,413 | 67,035 | 16 | erpnext/projects/report/project_wise_stock_tracking/project_wise_stock_tracking.py | 31 | 12 | def get_delivered_items_cost():
dn_items = frappe.db.sql(
,
as_dict=1,
)
si_items = frappe.db.sql(
,
as_dict=1,
)
dn_item_map = {}
for item in dn_items:
dn_item_map.setdefault(item.project, item.amount)
for item in si_items: | style: format code with black | get_delivered_items_cost | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | project_wise_stock_tracking.py | 10 | 22 | https://github.com/frappe/erpnext.git | 3 | 74 | 0 | 19 | 116 | Python | {
"docstring": "select dn.project, sum(dn_item.base_net_amount) as amount\n\t\tfrom `tabDelivery Note` dn, `tabDelivery Note Item` dn_item\n\t\twhere dn.name = dn_item.parent and dn.docstatus = 1 and ifnull(dn.project, '') != ''\n\t\tgroup by dn.projectselect si.project, sum(si_item.base_net_amount) as amount\n\t\tfrom `tabSales Invoice` si, `tabSales Invoice Item` si_item\n\t\twhere si.name = si_item.parent and si.docstatus = 1 and si.update_stock = 1\n\t\tand si.is_pos = 1 and ifnull(si.project, '') != ''\n\t\tgroup by si.project",
"language": "en",
"n_whitespaces": 57,
"n_words": 65,
"vocab_size": 40
} | def get_delivered_items_cost():
dn_items = frappe.db.sql(
,
as_dict=1,
)
si_items = frappe.db.sql(
,
as_dict=1,
)
dn_item_map = {}
for item in dn_items:
dn_item_map.setdefault(item.project, item.amount)
for item in si_items:
dn_item_map.setdefault(item.project, item.amount)
return dn_item_map
|
|
49,709 | 200,574 | 508 | sympy/tensor/tensor.py | 186 | 61 | def __new__(cls, *args, **kw_args):
is_canon_bp = kw_args.get('is_canon_bp', False)
args = list(map(_sympify, args))
free = [get_free_indices(arg) for arg in args]
free = set(itertools.chain(*free)) #flatten free
newargs = []
for arg in args:
dum_this = set(get_dummy_indices(arg))
dum_other = [get_dummy_indices(a) for a in newargs]
dum_other = set(itertools.chain(*dum_other)) #flatten dum_other
free_this = set(get_free_indices(arg))
if len(dum_this.intersect | move dummy index deduping to TensMul.__new__
Also removed _eval_subs and _xreplace. All tests pass. | __new__ | 6c55ca197b0f795047d8f8ee0d871ab36600d560 | sympy | tensor.py | 13 | 37 | https://github.com/sympy/sympy.git | 10 | 348 | 0 | 107 | 648 | Python | {
"docstring": "\n If the internal dummy indices in one arg conflict with the free indices of the remaining args, we need to rename those internal dummy indices.\n ",
"language": "en",
"n_whitespaces": 40,
"n_words": 25,
"vocab_size": 20
} | def __new__(cls, *args, **kw_args):
is_canon_bp = kw_args.get('is_canon_bp', False)
args = list(map(_sympify, args))
free = [get_free_indices(arg) for arg in args]
free = set(itertools.chain(*free)) #flatten free
newargs = []
for arg in args:
dum_this = set(get_dummy_indices(arg))
dum_other = [get_dummy_indices(a) for a in newargs]
dum_other = set(itertools.chain(*dum_other)) #flatten dum_other
free_this = set(get_free_indices(arg))
if len(dum_this.intersection(free)) > 0:
exclude = free_this.union(free, dum_other)
newarg = TensMul._dedupe_indices(arg, exclude, arg._index_structure)
else:
newarg = arg
newargs.append(newarg)
args = newargs
# Flatten:
args = [i for arg in args for i in (arg.args if isinstance(arg, (TensMul, Mul)) else [arg])]
args, indices, free, dum = TensMul._tensMul_contract_indices(args, replace_indices=False)
# Data for indices:
index_types = [i.tensor_index_type for i in indices]
index_structure = _IndexStructure(free, dum, index_types, indices, canon_bp=is_canon_bp)
obj = TensExpr.__new__(cls, *args)
obj._indices = indices
obj._index_types = index_types[:]
obj._index_structure = index_structure
obj._free = index_structure.free[:]
obj._dum = index_structure.dum[:]
obj._free_indices = {x[0] for x in obj.free}
obj._rank = len(obj.free)
obj._ext_rank = len(obj._index_structure.free) + 2*len(obj._index_structure.dum)
obj._coeff = S.One
obj._is_canon_bp = is_canon_bp
return obj
index_types = property(lambda self: self._index_types)
free = property(lambda self: self._free)
dum = property(lambda self: self._dum)
free_indices = property(lambda self: self._free_indices)
rank = property(lambda self: self._rank)
ext_rank = property(lambda self: self._ext_rank)
|
|
19,308 | 96,402 | 178 | tests/sentry/incidents/test_action_handlers.py | 38 | 19 | def test_context_for_crash_rate_alert(self):
status = TriggerStatus.ACTIVE
incident = self.create_incident()
alert_rule = self.create_alert_rule(
aggregate="percentage(sessions_crashed, sessions) AS _crash_rate_alert_aggregate"
)
alert_rule_trigger = self.create_alert_rule_trigger(alert_rule)
action = self.create_alert_rule_trigger_action(
alert_rule_trigger=alert_rule_trigger, triggered_for_incident=incident
)
assert (
generate_incident_trigger_email_context(
self.project, incident, action.alert_rule_trigger, status, IncidentStatus.CRITICAL
)["aggregate"]
== "percentage(sessions_crashed, sessions)"
) | fix(metric_alerts): Make sure critical triggers resolve properly when no action is set on a warning trigger (#31883)
### Problem
If we have an alert set up like:
- Warning: 50. Action: None
- Critical: 100. Action: Slack
Then if we go from critical -> warning state the slack resolve action will fail to fire.
### Cause
The reason this happens is related to a previous fix. For an alert like
- Warning: 50. Action: Slack
- Critical: 100. Action: Slack
When going from critical -> warning the critical action would be marked as resolved. This would
cause a slack notification with `Resolved` to be sent to the channel. This is misleading, because
the alert is still active, just in the warning state. What we want here is to fire a warning
notification instead.
The initial fix for this was that when we resolved a critical trigger, we’d check and see whether
there was an active warning trigger. If so, we’d send a warning trigger fire to our actions, rather
than a critical trigger resolve. This works ok for many cases, but fails when the actions on the
warning trigger are different to those on the critical trigger.
### Fix
Substituting the warning trigger for the critical trigger causes us subtle bugs. So, instead of
this, when triggering fires/resolves on our action handlers we will also pass along the incident
state change that the trigger/resolve caused the incident to go into.
So if a critical trigger resolves, we check what state it would have put the incident in. If
there’s a warning trigger, then the state is warning. If no warning trigger, the state is closed.
This state is then used to appropriately generate the messages that we send to users via our
various actions.
So now, If we have an alert set up like:
- Warning: 50. Action: None
- Critical: 100. Action: Slack
If this goes from
- critical -> warning OR critical -> resolved we will send `IncidentStatus.WARNING` to any actions
related to the critical trigger.
- warning -> resolved We do nothing since there are no actions on the warning trigger
If we have an alert set up like:
- Warning: 50. Action: Slack
- Critical: 100. Action: Slack
If this goes from:
- critical -> warning: critical trigger, `IncidentStatus.Warning`
- warning -> resolved: warning trigger, `IncidentStatus.Closed`
- critical -> resolved: Since we de-dupe triggers to avoid spamming the user, we will select the
warning trigger here, and send `IncidentStatus.closed`
If we have an alert set up like:
- Warning: 50. Action: Slack
- Critical: 100. Action: Pagerduty
If this goes from:
- critical -> warning: critical trigger, `IncidentStatus.Warning` sent to Pagerduty. Nothing sent
to Slack
- warning -> resolved: warning trigger, `IncidentStatus.Closed` sent to Slack. Nothing sent to
Pagerduty
- critical -> resolved: Critical trigger, `IncidentStatus.Warning` sent to Pagerduty. Warning
trigger, `IncidentStatus.Closed` sent to Slack. We don’t de-dupe here since the actions are
different. | test_context_for_crash_rate_alert | 146fba432a32568be7d0b884dae0c39a6c33a11f | sentry | test_action_handlers.py | 11 | 16 | https://github.com/getsentry/sentry.git | 1 | 76 | 0 | 32 | 124 | Python | {
"docstring": "\n Test that ensures the metric name for Crash rate alerts excludes the alias\n ",
"language": "en",
"n_whitespaces": 28,
"n_words": 13,
"vocab_size": 12
} | def test_context_for_crash_rate_alert(self):
status = TriggerStatus.ACTIVE
incident = self.create_incident()
alert_rule = self.create_alert_rule(
aggregate="percentage(sessions_crashed, sessions) AS _crash_rate_alert_aggregate"
)
alert_rule_trigger = self.create_alert_rule_trigger(alert_rule)
action = self.create_alert_rule_trigger_action(
alert_rule_trigger=alert_rule_trigger, triggered_for_incident=incident
)
assert (
generate_incident_trigger_email_context(
self.project, incident, action.alert_rule_trigger, status, IncidentStatus.CRITICAL
)["aggregate"]
== "percentage(sessions_crashed, sessions)"
)
|
|
51,471 | 206,293 | 302 | django/template/loaders/base.py | 49 | 13 | def get_template(self, template_name, skip=None):
tried = []
for origin in self.get_template_sources(template_name):
if skip is n | Refs #33476 -- Reformatted code with Black. | get_template | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | base.py | 14 | 19 | https://github.com/django/django.git | 5 | 98 | 0 | 43 | 155 | Python | {
"docstring": "\n Call self.get_template_sources() and return a Template object for\n the first template matching template_name. If skip is provided, ignore\n template origins in skip. This is used to avoid recursion during\n template extending.\n ",
"language": "en",
"n_whitespaces": 67,
"n_words": 31,
"vocab_size": 28
} | def get_template(self, template_name, skip=None):
tried = []
for origin in self.get_template_sources(template_name):
if skip is not None and origin in skip:
tried.append((origin, "Skipped to avoid recursion"))
continue
try:
contents = self.get_contents(origin)
except TemplateDoesNotExist:
tried.append((origin, "Source does not exist"))
continue
else:
return Template(
contents,
origin,
origin.template_name,
self.engine,
)
raise TemplateDoesNotExist(template_name, tried=tried)
|
|
@keras_export("keras.optimizers.experimental.Optimizer", v1=[]) | 81,373 | 275,289 | 103 | keras/optimizers/optimizer_experimental/optimizer.py | 24 | 10 | def from_config(cls, config):
if "learning_rate" in config:
if isinstance(config["learning_rate"], dict):
config["learning_rate"] = learning_rate_schedule.deserialize(
config["learning_rate"]
)
return cls( | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | from_config | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | optimizer.py | 14 | 7 | https://github.com/keras-team/keras.git | 3 | 44 | 1 | 21 | 104 | Python | {
"docstring": "Creates an optimizer from its config.\n\n This method is the reverse of `get_config`, capable of instantiating the\n same optimizer from the config dictionary.\n\n Args:\n config: A Python dictionary, typically the output of get_config.\n\n Returns:\n An optimizer instance.\n name: String. The name to use\n for momentum accumulator weights created by\n the optimizer.\n clipnorm: Float. If set, the gradient of each weight is individually\n clipped so that its norm is no higher than this value.\n clipvalue: Float. If set, the gradient of each weight is clipped to be no\n higher than this value.\n global_clipnorm: Float. If set, the gradient of all weights is clipped so\n that their global norm is no higher than this value.\n use_ema: Boolean, defaults to False. If True, exponential moving average\n (EMA) is applied. EMA consists of computing an exponential moving\n average of the weights of the model (as the weight values change after\n each training batch), and periodically overwriting the weights with\n their moving average.\n ema_momentum: Float, defaults to 0.99. Only used if `use_ema=True`. This is\n the momentum to use when computing the EMA of the model's weights:\n `new_average = ema_momentum * old_average + (1 - ema_momentum) *\n current_variable_value`.\n ema_overwrite_frequency: Int or None, defaults to None. Only used if\n `use_ema=True`. Every `ema_overwrite_frequency` steps of iterations, we\n overwrite the model variable by its moving average. If None, the optimizer\n does not overwrite model variables in the middle of training, and you\n need to explicitly overwrite the variables at the end of training\n by calling `optimizer.finalize_variable_values()` (which updates the model\n variables in-place). When using the built-in `fit()` training loop, this\n happens automatically after the last epoch, and you don't need to do\n anything.\n jit_compile: Boolean, defaults to True. If True, the optimizer will use XLA\n compilation. If no GPU device is found, this flag will be ignored.\n **kwargs: keyword arguments only used for backward compatibility.",
"language": "en",
"n_whitespaces": 492,
"n_words": 306,
"vocab_size": 166
} | def from_config(cls, config):
if "learning_rate" in config:
if isinstance(config["learning_rate"], dict):
config["learning_rate"] = learning_rate_schedule.deserialize(
config["learning_rate"]
)
return cls(**config)
base_optimizer_keyword_args =
# pylint: disable=g-classes-have-attributes
@keras_export("keras.optimizers.experimental.Optimizer", v1=[]) |
7,726 | 42,764 | 238 | airflow/providers/amazon/aws/log/s3_task_handler.py | 92 | 19 | def close(self):
# When application exit, system shuts down all handlers by
# calling close method. Here we check if logger is already
# closed to prevent uploading the log to remote storage multiple
# times when `logging.shutdown` is called.
if self.closed:
return
super().close()
if not self.upload_on_close:
return
local_loc = os.path.join(self.local_base, self.log_relative_path)
remote_loc = os.path.join(self.remote_base, self.log_relative_path)
if os.path.exists(local_loc):
# read log and remove old logs to get just the latest additions
log = pathlib.Path(local_loc).read_text()
self.s3_write(log, remote_loc)
# Mark closed so we don't double write if close is called twice
self.clo | Light Refactor and Clean-up AWS Provider (#23907) | close | 595981c8ad3cfeb4ad7a4514d00060e978aa9d81 | airflow | s3_task_handler.py | 12 | 12 | https://github.com/apache/airflow.git | 4 | 93 | 0 | 68 | 158 | Python | {
"docstring": "Close and upload local log file to remote storage S3.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | def close(self):
# When application exit, system shuts down all handlers by
# calling close method. Here we check if logger is already
# closed to prevent uploading the log to remote storage multiple
# times when `logging.shutdown` is called.
if self.closed:
return
super().close()
if not self.upload_on_close:
return
local_loc = os.path.join(self.local_base, self.log_relative_path)
remote_loc = os.path.join(self.remote_base, self.log_relative_path)
if os.path.exists(local_loc):
# read log and remove old logs to get just the latest additions
log = pathlib.Path(local_loc).read_text()
self.s3_write(log, remote_loc)
# Mark closed so we don't double write if close is called twice
self.closed = True
|
|
50,242 | 203,167 | 41 | tests/view_tests/views.py | 15 | 10 | def safestring_in_template_exception(request):
template = Template('{% extends "<script>alert(1);</script>" %}')
try:
template.render(Cont | Fixed #33461 -- Escaped template errors in the technical 500 debug page. | safestring_in_template_exception | c5c7a15b09368a58340d3a65ba9d1f1441e92dc8 | django | views.py | 13 | 6 | https://github.com/django/django.git | 2 | 37 | 0 | 15 | 67 | Python | {
"docstring": "\n Trigger an exception in the template machinery which causes a SafeString\n to be inserted as args[0] of the Exception.\n ",
"language": "en",
"n_whitespaces": 29,
"n_words": 19,
"vocab_size": 18
} | def safestring_in_template_exception(request):
template = Template('{% extends "<script>alert(1);</script>" %}')
try:
template.render(Context())
except Exception:
return technical_500_response(request, *sys.exc_info())
|
|
115,017 | 316,439 | 18 | tests/test_config_entries.py | 9 | 6 | async def test_unique_id_in_progress(hass, manager):
mock_integration(hass, Mo | Search/replace RESULT_TYPE_* by FlowResultType enum (#74642) | test_unique_id_in_progress | 7cd68381f1d4f58930ffd631dfbfc7159d459832 | core | test_config_entries.py | 10 | 17 | https://github.com/home-assistant/core.git | 1 | 127 | 0 | 9 | 45 | Python | {
"docstring": "Test that we abort if there is already a flow in progress with same unique id.",
"language": "en",
"n_whitespaces": 15,
"n_words": 16,
"vocab_size": 16
} | async def test_unique_id_in_progress(hass, manager):
mock_integration(hass, MockModule("comp"))
mock_entity_platform(hass, "config_flow.comp", None)
|
|
73,285 | 250,140 | 2,689 | tests/storage/test_event_chain.py | 338 | 55 | def test_simple(self) -> None:
event_factory = self.hs.get_event_builder_factory()
bob = "@creator:test"
alice = "@alice:test"
room_id = "!room:test"
# Ensure that we have a rooms entry so that we generate the chain index.
self.get_success(
self.store.store_room(
room_id=room_id,
room_creator_user_id="",
is_public=True,
room_version=RoomVersions.V6,
)
)
create = self.get_success(
event_factory.for_room_version(
RoomVersions.V6,
{
"type": EventTypes.Create,
"state_key": "",
"sender": bob,
"room_id": room_id,
"content": {"tag": "create"},
},
).build(prev_event_ids=[], auth_event_ids=[])
)
bob_join = self.get_success(
event_factory.for_room_version(
RoomVersions.V6,
{
"type": EventTypes.Member,
"state_key": bob,
"sender": bob,
"room_id": room_id,
"content": {"tag": "bob_join"},
},
).build(prev_event_ids=[], auth_event_ids=[create.event_id])
)
power = self.get_success(
event_factory.for_room_version(
RoomVersions.V6,
{
"type": EventTypes.PowerLevels,
"state_key": "",
"sender": bob,
"room_id": room_id,
"content": {"tag": "power"},
},
).build(
prev_event_ids=[],
auth_event_ids=[create.event_id, bob_join.event_id],
)
)
alice_invite = self.get_success(
event_factory.for_room_version(
RoomVersions.V6,
{
"type": EventTypes.Member,
"state_key": alice,
"sender": bob,
"room_id": room_id,
"content": {"tag": "alice_invite"},
},
).build(
prev_event_ids=[],
auth_event_ids=[create.event_id, bob_join.event_id, power.event_id],
)
)
alice_join = self.get_success(
event_factory.for_room_version(
RoomVersions.V6,
{
"type": EventTypes.Member,
"state_key": alice,
"sender": alice,
"room_id": room_id,
"content": {"tag": "alice_join"},
},
).build(
prev_event_ids=[],
auth_event_ids=[create.event_id, alice_invite.event_id, power.event_id],
)
)
power_2 = self.get_success(
event_factory.for_room_version(
RoomVersions.V6,
{
"type": EventTypes.PowerLevels,
"state_key": "",
"sender": bob,
"room_id": room_id,
"content": {"tag": "power_2"},
},
).build(
prev_event_ids=[],
auth_event_ids=[create.event_id, bob_join.event_id, power.event_id],
)
)
bob_join_2 = self.get_success(
event_factory.for_room_version(
RoomVersions.V6,
{
"type": EventTypes.Member,
"state_key": bob,
"sender": bob,
"room_id": room_id,
"content": {"tag": "bob_join_2"},
},
).build(
prev_event_ids=[],
auth_event_ids=[create.event_id, bob_join.event_id, power.event_id],
)
)
alice_join2 = self.get_success(
event_factory.for_room_version(
RoomVersions.V6,
{
"type": EventTypes.Member,
"state_key": alice,
"sender": alice,
"room_id": room_id,
"content": {"tag": "alice_join2"},
},
).build(
prev_event_ids=[],
auth_event_ids=[
create.event_id,
alice_join.event_id,
power_2.event_id,
],
)
)
events = [
create,
bob_join,
power,
alice_invite,
alice_join,
bob_join_2,
power_2,
alice_join2,
]
expected_links = [
(bob_join, create),
(power, create),
(power, bob_join),
(alice_invite, create),
(alice_invite, power),
(alice_invite, bob_join),
(bob_join_2, power),
(alice_join2, power_2),
]
self.persist(events)
chain_map, link_map = self.fetch_chains(events)
# Check that the expected links and only the expected links have been
# added.
self.assertEqual(len(expected_links), len(list(link_map.get_additions())))
for start, end in expected_links:
start_id, start_seq = chain_map[start.event_id]
end_id, end_seq = chain_map[end.event_id]
| Require types in tests.storage. (#14646)
Adds missing type hints to `tests.storage` package
and does not allow untyped definitions. | test_simple | 3ac412b4e2f8c5ba11dc962b8a9d871c1efdce9b | synapse | test_event_chain.py | 17 | 175 | https://github.com/matrix-org/synapse.git | 3 | 808 | 0 | 148 | 1,276 | Python | {
"docstring": "Test that the example in `docs/auth_chain_difference_algorithm.md`\n works.\n ",
"language": "en",
"n_whitespaces": 21,
"n_words": 7,
"vocab_size": 7
} | def test_simple(self) -> None:
event_factory = self.hs.get_event_builder_factory()
bob = "@creator:test"
alice = "@alice:test"
room_id = "!room:test"
# Ensure that we have a rooms entry so that we generate the chain index.
self.get_success(
self.store.store_room(
room_id=room_id,
room_creator_user_id="",
is_public=True,
room_version=RoomVersions.V6,
)
)
create = self.get_success(
event_factory.for_room_version(
RoomVersions.V6,
{
"type": EventTypes.Create,
"state_key": "",
"sender": bob,
"room_id": room_id,
"content": {"tag": "create"},
},
).build(prev_event_ids=[], auth_event_ids=[])
)
bob_join = self.get_success(
event_factory.for_room_version(
RoomVersions.V6,
{
"type": EventTypes.Member,
"state_key": bob,
"sender": bob,
"room_id": room_id,
"content": {"tag": "bob_join"},
},
).build(prev_event_ids=[], auth_event_ids=[create.event_id])
)
power = self.get_success(
event_factory.for_room_version(
RoomVersions.V6,
{
"type": EventTypes.PowerLevels,
"state_key": "",
"sender": bob,
"room_id": room_id,
"content": {"tag": "power"},
},
).build(
prev_event_ids=[],
auth_event_ids=[create.event_id, bob_join.event_id],
)
)
alice_invite = self.get_success(
event_factory.for_room_version(
RoomVersions.V6,
{
"type": EventTypes.Member,
"state_key": alice,
"sender": bob,
"room_id": room_id,
"content": {"tag": "alice_invite"},
},
).build(
prev_event_ids=[],
auth_event_ids=[create.event_id, bob_join.event_id, power.event_id],
)
)
alice_join = self.get_success(
event_factory.for_room_version(
RoomVersions.V6,
{
"type": EventTypes.Member,
"state_key": alice,
"sender": alice,
"room_id": room_id,
"content": {"tag": "alice_join"},
},
).build(
prev_event_ids=[],
auth_event_ids=[create.event_id, alice_invite.event_id, power.event_id],
)
)
power_2 = self.get_success(
event_factory.for_room_version(
RoomVersions.V6,
{
"type": EventTypes.PowerLevels,
"state_key": "",
"sender": bob,
"room_id": room_id,
"content": {"tag": "power_2"},
},
).build(
prev_event_ids=[],
auth_event_ids=[create.event_id, bob_join.event_id, power.event_id],
)
)
bob_join_2 = self.get_success(
event_factory.for_room_version(
RoomVersions.V6,
{
"type": EventTypes.Member,
"state_key": bob,
"sender": bob,
"room_id": room_id,
"content": {"tag": "bob_join_2"},
},
).build(
prev_event_ids=[],
auth_event_ids=[create.event_id, bob_join.event_id, power.event_id],
)
)
alice_join2 = self.get_success(
event_factory.for_room_version(
RoomVersions.V6,
{
"type": EventTypes.Member,
"state_key": alice,
"sender": alice,
"room_id": room_id,
"content": {"tag": "alice_join2"},
},
).build(
prev_event_ids=[],
auth_event_ids=[
create.event_id,
alice_join.event_id,
power_2.event_id,
],
)
)
events = [
create,
bob_join,
power,
alice_invite,
alice_join,
bob_join_2,
power_2,
alice_join2,
]
expected_links = [
(bob_join, create),
(power, create),
(power, bob_join),
(alice_invite, create),
(alice_invite, power),
(alice_invite, bob_join),
(bob_join_2, power),
(alice_join2, power_2),
]
self.persist(events)
chain_map, link_map = self.fetch_chains(events)
# Check that the expected links and only the expected links have been
# added.
self.assertEqual(len(expected_links), len(list(link_map.get_additions())))
for start, end in expected_links:
start_id, start_seq = chain_map[start.event_id]
end_id, end_seq = chain_map[end.event_id]
self.assertIn(
(start_seq, end_seq), list(link_map.get_links_between(start_id, end_id))
)
# Test that everything can reach the create event, but the create event
# can't reach anything.
for event in events[1:]:
self.assertTrue(
link_map.exists_path_from(
chain_map[event.event_id], chain_map[create.event_id]
),
)
self.assertFalse(
link_map.exists_path_from(
chain_map[create.event_id],
chain_map[event.event_id],
),
)
|
|
94,400 | 295,382 | 273 | homeassistant/components/withings/common.py | 69 | 14 | async def _do_retry(self, func, attempts=3) -> Any:
# pylint: disable=no-self-use
exception = None
for attempt in range(1, attempts + 1):
_LOGGER.debug("Attempt %s of %s", attempt, attempts)
try:
return await func()
except Exception as exception1: # pylint: disable=broad-except
_LOGGER.debug(
"Failed attempt %s of %s (%s)", attempt, attempts, exce | Fix withings race condition for access token (#69107) | _do_retry | ccd5ada3414b8b51835a7a29b2e5c2a70464987f | core | common.py | 14 | 19 | https://github.com/home-assistant/core.git | 4 | 83 | 0 | 56 | 135 | Python | {
"docstring": "Retry a function call.\n\n Withings' API occasionally and incorrectly throws errors. Retrying the call tends to work.\n ",
"language": "en",
"n_whitespaces": 31,
"n_words": 17,
"vocab_size": 17
} | async def _do_retry(self, func, attempts=3) -> Any:
# pylint: disable=no-self-use
exception = None
for attempt in range(1, attempts + 1):
_LOGGER.debug("Attempt %s of %s", attempt, attempts)
try:
return await func()
except Exception as exception1: # pylint: disable=broad-except
_LOGGER.debug(
"Failed attempt %s of %s (%s)", attempt, attempts, exception1
)
# Make each backoff pause a little bit longer
await asyncio.sleep(0.5 * attempt)
exception = exception1
continue
if exception:
raise exception
|
|
55,526 | 218,881 | 343 | python3.10.4/Lib/lib2to3/refactor.py | 97 | 30 | def refactor_doctest(self, block, lineno, indent, filename):
try:
tree = self.parse_block(block, lineno, indent)
except Exception as err:
if self.logger.isEnabledFor(logging.DEBUG):
for line in block:
self.log_debug("Source: %s", line.rstrip("\n"))
self.log_error("Can't parse docstring in %s line %s: %s: %s",
filename, lineno, err.__class__.__name__, err)
return block
if self.refactor_tree(tree, filename):
new = str(tree).splitlines(keepends=True)
# Undo the adjustment of the line numbers in wrap_toks() below.
clipped, new = new[:lineno-1], new[lineno-1:]
assert clipped == ["\n"] * (lineno-1), clipped
if not new[-1].endswith("\n"):
new[-1] += "\n"
block = [indent + self.PS1 + new.pop(0)]
if new:
| add python 3.10.4 for windows | refactor_doctest | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | refactor.py | 17 | 20 | https://github.com/XX-net/XX-Net.git | 8 | 195 | 0 | 66 | 313 | Python | {
"docstring": "Refactors one doctest.\n\n A doctest is given as a block of lines, the first of which starts\n with \">>>\" (possibly indented), while the remaining lines start\n with \"...\" (identically indented).\n\n ",
"language": "en",
"n_whitespaces": 58,
"n_words": 30,
"vocab_size": 27
} | def refactor_doctest(self, block, lineno, indent, filename):
try:
tree = self.parse_block(block, lineno, indent)
except Exception as err:
if self.logger.isEnabledFor(logging.DEBUG):
for line in block:
self.log_debug("Source: %s", line.rstrip("\n"))
self.log_error("Can't parse docstring in %s line %s: %s: %s",
filename, lineno, err.__class__.__name__, err)
return block
if self.refactor_tree(tree, filename):
new = str(tree).splitlines(keepends=True)
# Undo the adjustment of the line numbers in wrap_toks() below.
clipped, new = new[:lineno-1], new[lineno-1:]
assert clipped == ["\n"] * (lineno-1), clipped
if not new[-1].endswith("\n"):
new[-1] += "\n"
block = [indent + self.PS1 + new.pop(0)]
if new:
block += [indent + self.PS2 + line for line in new]
return block
|
|
25,567 | 115,819 | 41 | mindsdb/integrations/handlers/bigquery_handler/bigquery_handler.py | 13 | 8 | def get_tables(self, dataset_id) -> Response:
client = self.connect()
| Add handler | get_tables | cbe6767de6152a78348a8047244e5e3305b24e04 | mindsdb | bigquery_handler.py | 8 | 7 | https://github.com/mindsdb/mindsdb.git | 1 | 27 | 0 | 11 | 46 | Python | {
"docstring": "\n Get a list with all of the tabels in BigQuery\n ",
"language": "en",
"n_whitespaces": 25,
"n_words": 10,
"vocab_size": 10
} | def get_tables(self, dataset_id) -> Response:
client = self.connect()
result = client.list_tables(dataset_id)
return result
|
|
77,401 | 262,841 | 130 | PyInstaller/depend/dylib.py | 76 | 17 | def mac_set_relative_dylib_deps(libname, distname):
from macholib import util
from macholib.MachO import MachO
# Ignore bootloader; otherwise PyInstaller fails with exception like
# 'ValueError: total_size > low_offset (288 > 0)'
if os.path.basename(libname) in _BOOTLOADER_FNAMES:
return
# Determine how many directories up ('../') is the directory with shared dynamic libraries.
# E.g., ./qt4_plugins/images/ | Fix typos (#6782) [skip ci] | mac_set_relative_dylib_deps | 1a7d704ffbabb433007e3ba04750c2f13ade48e5 | pyinstaller | dylib.py | 15 | 21 | https://github.com/pyinstaller/pyinstaller.git | 5 | 141 | 0 | 61 | 132 | Python | {
"docstring": "\n On Mac OS set relative paths to dynamic library dependencies of `libname`.\n\n Relative paths allow to avoid using environment variable DYLD_LIBRARY_PATH. There are known some issues with\n DYLD_LIBRARY_PATH. Relative paths is more flexible mechanism.\n\n Current location of dependent libraries is derived from the location of the library path (paths start with\n '@loader_path').\n\n 'distname' path of the library relative to dist directory of frozen executable. We need this to determine the level\n of directory level for @loader_path of binaries not found in dist directory.\n\n For example, Qt5 plugins are not in the same directory as Qt*.dylib files. Without using\n '@loader_path/../..' for Qt plugins, Mac OS would not be able to resolve shared library dependencies,\n and Qt plugins will not be loaded.\n ",
"language": "en",
"n_whitespaces": 203,
"n_words": 120,
"vocab_size": 78
} | def mac_set_relative_dylib_deps(libname, distname):
from macholib import util
from macholib.MachO import MachO
# Ignore bootloader; otherwise PyInstaller fails with exception like
# 'ValueError: total_size > low_offset (288 > 0)'
if os.path.basename(libname) in _BOOTLOADER_FNAMES:
return
# Determine how many directories up ('../') is the directory with shared dynamic libraries.
# E.g., ./qt4_plugins/images/ -> ./../../
parent_dir = ''
# Check if distname is not only base filename.
if os.path.dirname(distname):
parent_level = len(os.path.dirname(distname).split(os.sep))
parent_dir = parent_level * (os.pardir + os.sep)
|
|
81,643 | 276,381 | 90 | keras/testing_infra/test_utils.py | 23 | 9 | def get_v2_optimizer(name, **kwargs):
try:
return _V2_OPTIMIZER_MAP[name](**kwargs)
except KeyError:
| Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | get_v2_optimizer | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | test_utils.py | 16 | 9 | https://github.com/keras-team/keras.git | 2 | 42 | 0 | 22 | 73 | Python | {
"docstring": "Get the v2 optimizer requested.\n\n This is only necessary until v2 are the default, as we are testing in Eager,\n and Eager + v1 optimizers fail tests. When we are in v2, the strings alone\n should be sufficient, and this mapping can theoretically be removed.\n\n Args:\n name: string name of Keras v2 optimizer.\n **kwargs: any kwargs to pass to the optimizer constructor.\n\n Returns:\n Initialized Keras v2 optimizer.\n\n Raises:\n ValueError: if an unknown name was passed.\n ",
"language": "en",
"n_whitespaces": 116,
"n_words": 75,
"vocab_size": 58
} | def get_v2_optimizer(name, **kwargs):
try:
return _V2_OPTIMIZER_MAP[name](**kwargs)
except KeyError:
raise ValueError(
"Could not find requested v2 optimizer: {}\nValid choices: {}".format(
name, list(_V2_OPTIMIZER_MAP.keys())
)
)
|
|
55,366 | 218,525 | 197 | python3.10.4/Lib/ipaddress.py | 52 | 14 | def _prefix_from_ip_int(cls, ip_int):
trailing_zeroes = _count_righthand_zero_bits(ip_int,
cls._max_prefixlen)
prefixlen = cls._max_prefixlen - trailing_zeroes
leading_ones = ip_int >> trailing_zeroes
| add python 3.10.4 for windows | _prefix_from_ip_int | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | ipaddress.py | 11 | 12 | https://github.com/XX-net/XX-Net.git | 2 | 74 | 0 | 40 | 120 | Python | {
"docstring": "Return prefix length from the bitwise netmask.\n\n Args:\n ip_int: An integer, the netmask in expanded bitwise format\n\n Returns:\n An integer, the prefix length.\n\n Raises:\n ValueError: If the input intermingles zeroes & ones\n ",
"language": "en",
"n_whitespaces": 93,
"n_words": 32,
"vocab_size": 25
} | def _prefix_from_ip_int(cls, ip_int):
trailing_zeroes = _count_righthand_zero_bits(ip_int,
cls._max_prefixlen)
prefixlen = cls._max_prefixlen - trailing_zeroes
leading_ones = ip_int >> trailing_zeroes
all_ones = (1 << prefixlen) - 1
if leading_ones != all_ones:
byteslen = cls._max_prefixlen // 8
details = ip_int.to_bytes(byteslen, 'big')
msg = 'Netmask pattern %r mixes zeroes & ones'
raise ValueError(msg % details)
return prefixlen
|
|
24,361 | 111,228 | 102 | spacy/pipeline/entity_linker.py | 21 | 11 | def batch_has_learnable_example(self, examples):
for eg in examples:
for ent in eg.predicted.ents:
candidates = list(self.get_candidates(self.kb, ent))
if candidates:
return True
return False
| Fix entity linker batching (#9669)
* Partial fix of entity linker batching
* Add import
* Better name
* Add `use_gold_ents` option, docs
* Change to v2, create stub v1, update docs etc.
* Fix error type
Honestly no idea what the right type to use here is.
ConfigValidationError seems wrong. Maybe a NotImplementedError?
* Make mypy happy
* Add hacky fix for init issue
* Add legacy pipeline entity linker
* Fix references to class name
* Add __init__.py for legacy
* Attempted fix for loss issue
* Remove placeholder V1
* formatting
* slightly more interesting train data
* Handle batches with no usable examples
This adds a test for batches that have docs but not entities, and a
check in the component that detects such cases and skips the update step
as thought the batch were empty.
* Remove todo about data verification
Check for empty data was moved further up so this should be OK now - the
case in question shouldn't be possible.
* Fix gradient calculation
The model doesn't know which entities are not in the kb, so it generates
embeddings for the context of all of them.
However, the loss does know which entities aren't in the kb, and it
ignores them, as there's no sensible gradient.
This has the issue that the gradient will not be calculated for some of
the input embeddings, which causes a dimension mismatch in backprop.
That should have caused a clear error, but with numpyops it was causing
nans to happen, which is another problem that should be addressed
separately.
This commit changes the loss to give a zero gradient for entities not in
the kb.
* add failing test for v1 EL legacy architecture
* Add nasty but simple working check for legacy arch
* Clarify why init hack works the way it does
* Clarify use_gold_ents use case
* Fix use gold ents related handling
* Add tests for no gold ents and fix other tests
* Use aligned ents function (not working)
This doesn't actually work because the "aligned" ents are gold-only. But
if I have a different function that returns the intersection, *then*
this will work as desired.
* Use proper matching ent check
This changes the process when gold ents are not used so that the
intersection of ents in the pred and gold is used.
* Move get_matching_ents to Example
* Use model attribute to check for legacy arch
* Rename flag
* bump spacy-legacy to lower 3.0.9
Co-authored-by: svlandeg <[email protected]> | batch_has_learnable_example | 91acc3ea75d219ad07ed2b106e7b8bdcb01516dd | spaCy | entity_linker.py | 15 | 7 | https://github.com/explosion/spaCy.git | 4 | 44 | 0 | 18 | 69 | Python | {
"docstring": "Check if a batch contains a learnable example.\n\n If one isn't present, then the update step needs to be skipped.\n ",
"language": "en",
"n_whitespaces": 34,
"n_words": 20,
"vocab_size": 19
} | def batch_has_learnable_example(self, examples):
for eg in examples:
for ent in eg.predicted.ents:
candidates = list(self.get_candidates(self.kb, ent))
if candidates:
return True
return False
|
|
17,165 | 81,164 | 56 | awx/main/tasks/callback.py | 17 | 8 | def get_delayed_update_fields(self):
self.extra_update_fields['emitted_events'] = self.event_ct
if 'got an unexpected keyword argument' in self.extra_update_fields.get('result_traceback', ''):
self.delay_up | Delay update of artifacts and error fields until final job save (#11832)
* Delay update of artifacts until final job save
Save tracebacks from receptor module to callback object
Move receptor traceback check up to be more logical
Use new mock_me fixture to avoid DB call with me method
Update the special runner message to the delay_update pattern
* Move special runner message into post-processing of callback fields | get_delayed_update_fields | 452744b67e02823879e722fe574984a2d760ed60 | awx | callback.py | 10 | 5 | https://github.com/ansible/awx.git | 2 | 42 | 0 | 17 | 76 | Python | {
"docstring": "Return finalized dict of all fields that should be saved along with the job status change",
"language": "en",
"n_whitespaces": 15,
"n_words": 16,
"vocab_size": 16
} | def get_delayed_update_fields(self):
self.extra_update_fields['emitted_events'] = self.event_ct
if 'got an unexpected keyword argument' in self.extra_update_fields.get('result_traceback', ''):
self.delay_update(result_traceback=ANSIBLE_RUNNER_NEEDS_UPDATE_MESSAGE)
return self.extra_update_fields
|
|
15,508 | 70,442 | 89 | wagtail/search/tests/test_indexed_class.py | 20 | 7 | def get_checks_result(warning_id=None):
checks_result = checks.run_checks()
if warning_id:
return [
warning for warning in
| add check for correct search_fields on pages
- fixes #4940 | get_checks_result | d964675ee8fcb7ea58681ac8869733a86d58e4ec | wagtail | test_indexed_class.py | 11 | 7 | https://github.com/wagtail/wagtail.git | 4 | 34 | 0 | 15 | 56 | Python | {
"docstring": "Run Django checks on any with the 'search' tag used when registering the check",
"language": "en",
"n_whitespaces": 13,
"n_words": 14,
"vocab_size": 13
} | def get_checks_result(warning_id=None):
checks_result = checks.run_checks()
if warning_id:
return [
warning for warning in
checks_result if warning.id == warning_id]
return checks_result
|
|
40,077 | 167,693 | 16 | pandas/core/config_init.py | 11 | 9 | def use_bottleneck_cb(key) -> None:
| TYP: return values in core/*.py (#47587)
* TYP: return values in core/*.py
* fix test
* to_html
* to_html part 2
* DataFrame.query
* more overloads
* fix query?
* increase stacklevel by one
* fix rename_axis
* and an overload for DataFrame.eval
* address comments
* fix typevar | use_bottleneck_cb | 9612375ca28ade056f15d4338f1bfde5d045c9fc | pandas | config_init.py | 9 | 3 | https://github.com/pandas-dev/pandas.git | 1 | 24 | 0 | 11 | 46 | Python | {
"docstring": "\n: bool\n Use the numexpr library to accelerate computation if it is installed,\n the default is True\n Valid values: False,True\n",
"language": "en",
"n_whitespaces": 28,
"n_words": 20,
"vocab_size": 18
} | def use_bottleneck_cb(key) -> None:
from pandas.core import nanops
nanops.set_use_bottleneck(cf.get_option(key))
use_numexpr_doc =
|
|
39,578 | 164,647 | 92 | pandas/tests/io/test_stata.py | 19 | 10 | def test_repeated_column_labels(self, datapath):
# GH 13923, 2577 | TST: Don't use autouse fixture in test_stata (#45831) | test_repeated_column_labels | c055dc4e6be9fc1b68d873a1ace286322dadd5e1 | pandas | test_stata.py | 12 | 16 | https://github.com/pandas-dev/pandas.git | 1 | 40 | 0 | 19 | 73 | Python | {
"docstring": "\nValue labels for column ethnicsn are not unique. These cannot be converted to\npandas categoricals.\n\nEither read the file with `convert_categoricals` set to False or use the\nlow level interface in `StataReader` to separately read the values and the\nvalue_labels.\n\nThe repeated labels are:\\n-+\\nwolof\n",
"language": "en",
"n_whitespaces": 38,
"n_words": 44,
"vocab_size": 37
} | def test_repeated_column_labels(self, datapath):
# GH 13923, 25772
msg =
with pytest.raises(ValueError, match=msg):
read_stata(
datapath("io", "data", "stata", "stata15.dta"),
convert_categoricals=True,
)
|
|
53,306 | 212,643 | 190 | PySimpleGUI.py | 70 | 17 | def string_width_in_pixels(cls, font, string):
# if no windows have been created (there is no hidden master root to rely on) then temporarily make a window so the meas | Enable Text class methods to be called prior to any windows being created: string_width_in_pixels, char_height_in_pixels, char_width_in_pixels. Removed destruction of hidden master root from popup_get_file & popup_get_folder (was old code) | string_width_in_pixels | acaae54a1ade24b2e55f7274ae4db747160a38db | PySimpleGUI | PySimpleGUI.py | 13 | 13 | https://github.com/PySimpleGUI/PySimpleGUI.git | 4 | 75 | 0 | 56 | 128 | Python | {
"docstring": "\n Get the with of the supplied string in pixels for the font being passed in.\n If an error occurs, 0 will be returned\n :param font: specifies the font family, size, etc. Tuple or Single string format 'name size styles'. Styles: italic * roman bold normal underline overstrike, to be measured\n :type font: (str or (str, int[, str]) or None)\n :param string: the string to measure\n :type string: str\n :return: Width in pixels of string\n :rtype: (int)\n ",
"language": "en",
"n_whitespaces": 160,
"n_words": 76,
"vocab_size": 57
} | def string_width_in_pixels(cls, font, string):
# if no windows have been created (there is no hidden master root to rely on) then temporarily make a window so the measurement can happen
if Window.NumOpenWindows == 0:
root = tk.Tk()
else:
root = None
size = 0
try:
size = tkinter.font.Font(font=font).measure(string) # string's width
except Exception as e:
_error_popup_with_traceback('Exception retrieving string width in pixels', e)
if root is not None:
root.destroy()
return size
|
|
81,601 | 276,243 | 41 | keras/saving/saving_utils.py | 19 | 7 | def model_call_inputs(model, keep_original_batch_size=False):
input_specs = model.save_spec(dynamic_batch=not keep_original_batch_size)
if input_specs is None:
return None, None
input_specs = | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | model_call_inputs | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | saving_utils.py | 10 | 6 | https://github.com/keras-team/keras.git | 2 | 38 | 0 | 14 | 63 | Python | {
"docstring": "Inspect model to get its input signature.\n\n The model's input signature is a list with a single (possibly-nested) object.\n This is due to the Keras-enforced restriction that tensor inputs must be\n passed in as the first argument.\n\n For example, a model with input {'feature1': <Tensor>, 'feature2': <Tensor>}\n will have input signature: [{'feature1': TensorSpec, 'feature2': TensorSpec}]\n\n Args:\n model: Keras Model object.\n keep_original_batch_size: A boolean indicating whether we want to keep using\n the original batch size or set it to None. Default is `False`, which means\n that the batch dim of the returned input signature will always be set to\n `None`.\n\n Returns:\n A tuple containing `(args, kwargs)` TensorSpecs of the model call function\n inputs.\n `kwargs` does not contain the `training` argument.\n ",
"language": "en",
"n_whitespaces": 189,
"n_words": 119,
"vocab_size": 87
} | def model_call_inputs(model, keep_original_batch_size=False):
input_specs = model.save_spec(dynamic_batch=not keep_original_batch_size)
if input_specs is None:
return None, None
input_specs = _enforce_names_consistency(input_specs)
return input_specs
|
|
42,862 | 178,927 | 595 | nuitka/plugins/standard/DataFileCollectorPlugin.py | 139 | 40 | def _getSubDirectoryFolders(self, module, sub_dirs):
module_dir = module.getCompileTimeDirectory()
file_list = []
data_dirs = [os.path.join(module_dir, subdir) for subdir in sub_dirs]
# Gather the full file list, probably makes no sense to include bytecode files
file_list = sum(
(
getFileList(
data_dir, ignore_dirs=("__pycache__",), ignore_suffixes=(".pyc",)
)
for data_dir in data_dirs
),
[],
)
if not file_list:
msg = "No files or folders found for '%s' in subfolder(s) %r (%r)." % (
module.getFullName(),
sub_dirs,
data_dirs,
)
self.warning(msg)
is_package = (
module.isCompiledPythonPackage() or module.isUncompiledPythonPackage()
)
# We need to preserve the package target path in the dist folder.
if is_package:
package_part = module.getFullName().asPath()
else:
package = module.getFullName().getPackageName()
if package is None:
package_part = ""
else:
package_part = package.asPath()
item_set = OrderedSet()
for f in file_list:
target = os.path.join(package_part, os.path.relpath(f, module_dir))
dir_name = os.path.dirname(target)
item_set. | Plugins: Massive cleanup of data file handling
* Move data file handling out of standalone only, allowing support
for other modes as well.
* Attach logger and tags to data file objects. | _getSubDirectoryFolders | abfb99b0a05dd76d2ecc6ebc20732a271857c6c8 | Nuitka | DataFileCollectorPlugin.py | 14 | 42 | https://github.com/Nuitka/Nuitka.git | 8 | 232 | 0 | 95 | 376 | Python | {
"docstring": "Get dirnames in given subdirs of the module.\n\n Notes:\n All dirnames in folders below one of the sub_dirs are recursively\n retrieved and returned shortened to begin with the string of subdir.\n Args:\n module: module object\n sub_dirs: sub folder name(s) - tuple\n Returns:\n makeIncludedEmptyDirectories of found dirnames.\n ",
"language": "en",
"n_whitespaces": 129,
"n_words": 46,
"vocab_size": 39
} | def _getSubDirectoryFolders(self, module, sub_dirs):
module_dir = module.getCompileTimeDirectory()
file_list = []
data_dirs = [os.path.join(module_dir, subdir) for subdir in sub_dirs]
# Gather the full file list, probably makes no sense to include bytecode files
file_list = sum(
(
getFileList(
data_dir, ignore_dirs=("__pycache__",), ignore_suffixes=(".pyc",)
)
for data_dir in data_dirs
),
[],
)
if not file_list:
msg = "No files or folders found for '%s' in subfolder(s) %r (%r)." % (
module.getFullName(),
sub_dirs,
data_dirs,
)
self.warning(msg)
is_package = (
module.isCompiledPythonPackage() or module.isUncompiledPythonPackage()
)
# We need to preserve the package target path in the dist folder.
if is_package:
package_part = module.getFullName().asPath()
else:
package = module.getFullName().getPackageName()
if package is None:
package_part = ""
else:
package_part = package.asPath()
item_set = OrderedSet()
for f in file_list:
target = os.path.join(package_part, os.path.relpath(f, module_dir))
dir_name = os.path.dirname(target)
item_set.add(dir_name)
return self.makeIncludedEmptyDirectories(
source_path=module_dir,
dest_paths=item_set,
reason="Subdirectories of module %s" % module.getFullName(),
tags="config",
)
|
|
18,045 | 85,795 | 233 | tests/sentry/api/endpoints/test_organization_metric_data.py | 33 | 23 | def test_orderby_percentile_with_many_fields_one_entity_no_data(self):
for metric in [
TransactionMRI.MEASUREMENTS_FCP.value,
"transaction",
]:
perf_indexer_record(self.organization.id, metric)
response = self.get_success_response(
self.organization.slug,
field=[
f"p50({TransactionMetricKey.MEASUREMENTS_LCP.value})",
f"p50({TransactionMetricKey.MEASUREMENTS_FCP.value})",
],
statsPeriod="1h",
interval="1h",
groupBy=["project_id", "transaction"],
orderBy=f"p50({TransactionMetricKey.MEASUREMENTS_LCP.value})",
useCase="performance",
)
groups = response.data["groups"]
assert | feat(metrics): Support rate for derived metric [TET-129 TET-127] (#38792)
Adds support for operation `rate` to be able to compute performance
related metrics such as tpm, tps, epm, eps
This PR achieves this by:
- Defining rate as a derived operation that produces its own SnQL rather
than trying to compute the data sketch aggregate and using that directly
- Replaces `filter_conditions_func` that used to just produce a snql
condition to be used a conditional aggregate with `snql_func` that
instead produces a SnQL function
- Replaces the logic in `get_entity` on MetricsExpression to determine
the entity from the MRI rather than from the aggregate applied | test_orderby_percentile_with_many_fields_one_entity_no_data | 35ec251212b82e5d9468062a3ab5945d8e739002 | sentry | test_organization_metric_data.py | 14 | 20 | https://github.com/getsentry/sentry.git | 2 | 94 | 0 | 32 | 181 | Python | {
"docstring": "\n Test that ensures that when metrics data is available then an empty response is returned\n gracefully\n ",
"language": "en",
"n_whitespaces": 38,
"n_words": 16,
"vocab_size": 14
} | def test_orderby_percentile_with_many_fields_one_entity_no_data(self):
for metric in [
TransactionMRI.MEASUREMENTS_FCP.value,
"transaction",
]:
perf_indexer_record(self.organization.id, metric)
response = self.get_success_response(
self.organization.slug,
field=[
f"p50({TransactionMetricKey.MEASUREMENTS_LCP.value})",
f"p50({TransactionMetricKey.MEASUREMENTS_FCP.value})",
],
statsPeriod="1h",
interval="1h",
groupBy=["project_id", "transaction"],
orderBy=f"p50({TransactionMetricKey.MEASUREMENTS_LCP.value})",
useCase="performance",
)
groups = response.data["groups"]
assert len(groups) == 0
|
|
20,973 | 101,563 | 133 | lib/training/preview_tk.py | 20 | 15 | def _add_save_button(self) -> None:
logger.debug("Adding save button")
button = tk.Button(self,
text="Save",
cursor="hand2",
command=lambda: self.save_var.set(True))
button.pack(side=tk.LEFT)
logger.debug("Added save burron: '%s'", button)
| Training - Use custom preview pop-out | _add_save_button | 7da2cc3dd266aabebf41a31384cc2e0e7e5af6e5 | faceswap | preview_tk.py | 13 | 9 | https://github.com/deepfakes/faceswap.git | 1 | 61 | 0 | 19 | 104 | Python | {
"docstring": " Add a save button for saving out original preview ",
"language": "en",
"n_whitespaces": 10,
"n_words": 9,
"vocab_size": 9
} | def _add_save_button(self) -> None:
logger.debug("Adding save button")
button = tk.Button(self,
text="Save",
cursor="hand2",
command=lambda: self.save_var.set(True))
button.pack(side=tk.LEFT)
logger.debug("Added save burron: '%s'", button)
|
|
10,804 | 53,433 | 145 | src/prefect/context.py | 38 | 11 | def temporary_environ_defaults(**kwargs):
old_env = os.environ.copy()
try:
for var in kwargs:
# TODO: Consider warning on conflicts
os.environ.setdefault(var, str(kwargs[var]))
yield {var: os.environ[var] for var in kwargs}
finally:
for var in kwargs:
if old_env.get(var):
os.environ[var] = old_env[var]
else:
os.environ.pop(var, None)
| Introduce basic profile context management | temporary_environ_defaults | a9e67e2311c1e4a056b9e740cc739360896aab92 | prefect | context.py | 16 | 12 | https://github.com/PrefectHQ/prefect.git | 6 | 92 | 0 | 29 | 147 | Python | {
"docstring": "\n Temporarily override default values in os.environ.\n\n Yields a dictionary of the key/value pairs matching the provided keys.\n ",
"language": "en",
"n_whitespaces": 27,
"n_words": 17,
"vocab_size": 16
} | def temporary_environ_defaults(**kwargs):
old_env = os.environ.copy()
try:
for var in kwargs:
# TODO: Consider warning on conflicts
os.environ.setdefault(var, str(kwargs[var]))
yield {var: os.environ[var] for var in kwargs}
finally:
for var in kwargs:
if old_env.get(var):
os.environ[var] = old_env[var]
else:
os.environ.pop(var, None)
|
|
18,790 | 91,706 | 264 | tests/sentry/api/endpoints/test_organization_metric_details.py | 53 | 22 | def test_same_entity_multiple_metric_ids_missing_data(self, mocked_derived_metrics):
mocked_derived_metrics.return_value = MOCKED_DERIVED_METRICS_2
_indexer_record(self.organization.id, "metric_foo_doe")
self.store_sessi | feat(metrics): make indexer more configurable (#35604)
This makes the sentry_metrics indexer more configurable in the following ways, to enable indexing on the ingest-performance-metrics topic:
- configurable input Kafka topic
- configurable output Kafka topic
- configurable model from which to pull index results
- tags for internal metrics to distinguish between the two modes operationally | test_same_entity_multiple_metric_ids_missing_data | 7f60db924ea37f34e0cfe6856777239e2a2ffe13 | sentry | test_organization_metric_details.py | 15 | 21 | https://github.com/getsentry/sentry.git | 1 | 97 | 0 | 43 | 169 | Python | {
"docstring": "\n Test when not requested metrics have data in the dataset\n ",
"language": "en",
"n_whitespaces": 25,
"n_words": 10,
"vocab_size": 10
} | def test_same_entity_multiple_metric_ids_missing_data(self, mocked_derived_metrics):
mocked_derived_metrics.return_value = MOCKED_DERIVED_METRICS_2
_indexer_record(self.organization.id, "metric_foo_doe")
self.store_session(
self.build_session(
project_id=self.project.id,
started=(time.time() // 60) * 60,
status="ok",
release="[email protected]",
errors=2,
)
)
response = self.get_response(
self.organization.slug,
"derived_metric.multiple_metrics",
)
assert response.status_code == 404
assert response.json()["detail"] == (
"Not all the requested metrics or the constituent metrics in "
"['derived_metric.multiple_metrics'] have data in the dataset"
)
|
|
81,784 | 276,932 | 126 | keras/utils/kernelized_utils.py | 62 | 15 | def _align_matrices(x, y):
x_matrix = _to_matrix(x)
y_matrix = _to_matrix(y)
x_shape = x_matrix.shape
y_shape = y_matrix.shape
if y_shape[1] != x_shape[1]: # dimensions do | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | _align_matrices | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | kernelized_utils.py | 13 | 13 | https://github.com/keras-team/keras.git | 2 | 104 | 0 | 51 | 176 | Python | {
"docstring": "Aligns x and y tensors to allow computations over pairs of their rows.",
"language": "en",
"n_whitespaces": 12,
"n_words": 13,
"vocab_size": 13
} | def _align_matrices(x, y):
x_matrix = _to_matrix(x)
y_matrix = _to_matrix(y)
x_shape = x_matrix.shape
y_shape = y_matrix.shape
if y_shape[1] != x_shape[1]: # dimensions do not match.
raise ValueError(
"The outermost dimensions of the input tensors should match. "
f"Received y = {y_shape[1]} vs x = {x_shape[1]}."
)
x_tile = tf.tile(tf.expand_dims(x_matrix, 1), [1, y_shape[0], 1])
y_tile = tf.tile(tf.expand_dims(y_matrix, 0), [x_shape[0], 1, 1])
return x_tile, y_tile
|
|
72,243 | 248,366 | 454 | tests/storage/test_events.py | 131 | 32 | def test_prune_gap_if_dummy_local(self):
body = self.helper.send(self.room_id, body="Test", tok=self.token)
body = self.helper.send_event(
self.room_id, type=EventTypes.Dummy, content={}, tok=self.token
)
local_message_event_id = body["event_id"]
self.assert_extremities([local_message_event_id])
# Advance the clock for many days to make the old extremity "old". We
# also set the depth to "lots".
self.reactor.advance(7 * 24 * 60 * 60)
# Fudge a second event which points to an event we don't have. This is a
# state event so that the state changes (otherwise we won't prune the
# extremity as they'll have the same state group).
remote_event_2 = event_from_pdu_json(
{
"type": EventTypes.Member,
"state_key": "@user:other2",
"content": {"membership": Membership.JOIN},
"room_id": self.room_id,
"sender": "@user:other2",
"depth": 10000,
"prev_events": ["$some_unknown_message"],
"auth | Pull out less state when handling gaps mk2 (#12852) | test_prune_gap_if_dummy_local | b83bc5fab57b37f75a79d02213d6032c586fd36e | synapse | test_events.py | 13 | 27 | https://github.com/matrix-org/synapse.git | 1 | 191 | 0 | 96 | 320 | Python | {
"docstring": "Test that we don't drop extremities after a gap when the previous\n extremity is a local dummy event and points to local events.\n ",
"language": "en",
"n_whitespaces": 37,
"n_words": 23,
"vocab_size": 21
} | def test_prune_gap_if_dummy_local(self):
body = self.helper.send(self.room_id, body="Test", tok=self.token)
body = self.helper.send_event(
self.room_id, type=EventTypes.Dummy, content={}, tok=self.token
)
local_message_event_id = body["event_id"]
self.assert_extremities([local_message_event_id])
# Advance the clock for many days to make the old extremity "old". We
# also set the depth to "lots".
self.reactor.advance(7 * 24 * 60 * 60)
# Fudge a second event which points to an event we don't have. This is a
# state event so that the state changes (otherwise we won't prune the
# extremity as they'll have the same state group).
remote_event_2 = event_from_pdu_json(
{
"type": EventTypes.Member,
"state_key": "@user:other2",
"content": {"membership": Membership.JOIN},
"room_id": self.room_id,
"sender": "@user:other2",
"depth": 10000,
"prev_events": ["$some_unknown_message"],
"auth_events": [],
"origin_server_ts": self.clock.time_msec(),
},
RoomVersions.V6,
)
state_before_gap = self.get_success(
self.state.get_current_state_ids(self.room_id)
)
self.persist_event(remote_event_2, state=state_before_gap)
# Check the new extremity is just the new remote event.
self.assert_extremities([remote_event_2.event_id, local_message_event_id])
|
|
79,929 | 269,147 | 64 | keras/saving/saved_model/save_impl.py | 27 | 9 | def _reset_layer_losses(parent_layer):
losses_dict = {}
for layer in utils.list_all_layers_and_sublayers(parent_layer):
losses_dict[layer] = {
'losses': layer._losses[:],
'eager_losses | Support Keras saving/loading for ShardedVariables with arbitrary partitions.
PiperOrigin-RevId: 439837516 | _reset_layer_losses | e61cbc52fd3b0170769c120e9b8dabc8c4205322 | keras | save_impl.py | 12 | 11 | https://github.com/keras-team/keras.git | 2 | 66 | 0 | 22 | 113 | Python | {
"docstring": "Resets losses of layer and its sublayers, and returns original losses.",
"language": "en",
"n_whitespaces": 10,
"n_words": 11,
"vocab_size": 10
} | def _reset_layer_losses(parent_layer):
losses_dict = {}
for layer in utils.list_all_layers_and_sublayers(parent_layer):
losses_dict[layer] = {
'losses': layer._losses[:],
'eager_losses': layer._eager_losses[:]
}
with utils.no_automatic_dependency_tracking_scope(layer):
layer._losses = []
layer._eager_losses = []
return losses_dict
|
|
51,815 | 206,971 | 438 | tests/admin_changelist/tests.py | 139 | 40 | def test_result_list_editable_html(self):
new_parent = Parent.objects.create(name="parent")
new_child = Child.objects.create(name="name", parent=new_parent)
request = self.factory.get("/child/")
request.user = self.superuser
m = ChildAdmin(Child, custom_site)
# Test with list_editable fields
m.list_display = ["id", "name", "parent"]
m.list_display_links = ["id"]
m.list_editable = ["name"]
cl = m.get_changelist_instance(request)
FormSet = m.get_changelist_formset(request)
cl.formset = FormSet(queryset=cl.result_list)
template = Template(
"{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}"
)
context = Context({"cl": cl, "opts": Child._meta})
table_output = template.render(context)
# make sure that hidden fields are in the correct place
hiddenfields_div = (
'<div class="hiddenfields">'
'<input type="hidden" name="form-0-id" value="%d" id="id_form-0-id">'
"</div>"
) % new_child.id
self.assertInHTML(
| Refs #33476 -- Reformatted code with Black. | test_result_list_editable_html | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | tests.py | 11 | 34 | https://github.com/django/django.git | 1 | 186 | 0 | 99 | 330 | Python | {
"docstring": "\n Regression tests for #11791: Inclusion tag result_list generates a\n table and this checks that the items are nested within the table\n element tags.\n Also a regression test for #13599, verifies that hidden fields\n when list_editable is enabled are rendered in a div outside the\n table.\n ",
"language": "en",
"n_whitespaces": 95,
"n_words": 45,
"vocab_size": 37
} | def test_result_list_editable_html(self):
new_parent = Parent.objects.create(name="parent")
new_child = Child.objects.create(name="name", parent=new_parent)
request = self.factory.get("/child/")
request.user = self.superuser
m = ChildAdmin(Child, custom_site)
# Test with list_editable fields
m.list_display = ["id", "name", "parent"]
m.list_display_links = ["id"]
m.list_editable = ["name"]
cl = m.get_changelist_instance(request)
FormSet = m.get_changelist_formset(request)
cl.formset = FormSet(queryset=cl.result_list)
template = Template(
"{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}"
)
context = Context({"cl": cl, "opts": Child._meta})
table_output = template.render(context)
# make sure that hidden fields are in the correct place
hiddenfields_div = (
'<div class="hiddenfields">'
'<input type="hidden" name="form-0-id" value="%d" id="id_form-0-id">'
"</div>"
) % new_child.id
self.assertInHTML(
hiddenfields_div, table_output, msg_prefix="Failed to find hidden fields"
)
# make sure that list editable fields are rendered in divs correctly
editable_name_field = (
'<input name="form-0-name" value="name" class="vTextField" '
'maxlength="30" type="text" id="id_form-0-name">'
)
self.assertInHTML(
'<td class="field-name">%s</td>' % editable_name_field,
table_output,
msg_prefix='Failed to find "name" list_editable field',
)
|
|
16,677 | 77,580 | 337 | wagtail/admin/tests/ui/test_tables.py | 51 | 27 | def test_title_column(self):
root_page = Page.objects.filter(depth=2).first()
blog = Site.objects.create(
| Allow passing arbitrary link attributes to TitleColumn | test_title_column | 5994cc43dfc5cc1ed891ab78eff3a3bcf56f6830 | wagtail | test_tables.py | 15 | 51 | https://github.com/wagtail/wagtail.git | 1 | 136 | 0 | 40 | 223 | Python | {
"docstring": "\n <table class=\"listing\">\n <thead>\n <tr><th>Hostname</th><th>Site name</th></tr>\n </thead>\n <tbody>\n <tr>\n <td class=\"title\">\n <div class=\"title-wrapper\">\n <a href=\"/admin/sites/%d/\" class=\"choose-site\" data-chooser=\"yes\">blog.example.com</a>\n </div>\n </td>\n <td>My blog</td>\n </tr>\n <tr>\n <td class=\"title\">\n <div class=\"title-wrapper\">\n <a href=\"/admin/sites/%d/\" class=\"choose-site\" data-chooser=\"yes\">gallery.example.com</a>\n </div>\n </td>\n <td>My gallery</td>\n </tr>\n </tbody>\n </table>\n ",
"language": "en",
"n_whitespaces": 530,
"n_words": 37,
"vocab_size": 25
} | def test_title_column(self):
root_page = Page.objects.filter(depth=2).first()
blog = Site.objects.create(
hostname="blog.example.com", site_name="My blog", root_page=root_page
)
gallery = Site.objects.create(
hostname="gallery.example.com", site_name="My gallery", root_page=root_page
)
data = [blog, gallery]
table = Table(
[
TitleColumn(
"hostname",
url_name="wagtailsites:edit",
link_classname="choose-site",
link_attrs={"data-chooser": "yes"},
),
Column("site_name", label="Site name"),
],
data,
)
html = self.render_component(table)
self.assertHTMLEqual(
html,
% (blog.pk, gallery.pk),
)
|
|
6,233 | 34,270 | 149 | src/transformers/models/realm/tokenization_realm.py | 32 | 11 | def _clean_text(self, text):
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xFFFD or _is_control(char):
continue
if _is_whitespace(char):
output.append | Add FastTokenizer to REALM (#15211)
* Remove BertTokenizer abstraction
* Add FastTokenizer to REALM
* Fix config archive map
* Fix copies
* Update realm.mdx
* Apply suggestions from code review | _clean_text | 841d979190319098adc8101f9820a02ee3be4c8b | transformers | tokenization_realm.py | 12 | 11 | https://github.com/huggingface/transformers.git | 6 | 65 | 0 | 26 | 112 | Python | {
"docstring": "Performs invalid character removal and whitespace cleanup on text.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | def _clean_text(self, text):
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xFFFD or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
|
|
84,941 | 284,723 | 258 | openbb_terminal/cryptocurrency/crypto_controller.py | 27 | 20 | def call_candle(self, other_args):
if self.symbol:
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="candle",
description=,
)
ns_parser = parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
plot_chart(
| refactoring load, changed chart to candle (#1838)
* refactoring load, changed chart to candle
* updating load
* refactor done, missing tests
* fixed chart
* refactor
* linting
* tests failing
* fix minh issues
* auto completion for load
* linting
* Tests : cryptocurrency/controller ; remove mocking of functions which are not used anymore
* Cryptocurrency/Controller : call_headlines ; fix bug
* Tests : cryptocurrency/controller ; mock function
* Tests : cryptocurrency/due_diligence ; fix expected output
* cryptocurrency/due_diligence ; mock functions
Co-authored-by: Chavithra <[email protected]>
Co-authored-by: minhhoang1023 <[email protected]>
Co-authored-by: James Maslek <[email protected]> | call_candle | 0e03b9e9e41aaa61cdec5d674a9f2c64ab8d3394 | OpenBBTerminal | crypto_controller.py | 13 | 18 | https://github.com/OpenBB-finance/OpenBBTerminal.git | 3 | 72 | 0 | 23 | 111 | Python | {
"docstring": "Process candle commandDisplay chart for loaded coin. You can specify currency vs which you want\n to show chart and also number of days to get data for.",
"language": "en",
"n_whitespaces": 41,
"n_words": 27,
"vocab_size": 25
} | def call_candle(self, other_args):
if self.symbol:
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="candle",
description=,
)
ns_parser = parse_known_args_and_warn(
parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES
)
if ns_parser:
plot_chart(
symbol=self.symbol,
currency=self.current_currency,
prices_df=self.current_df,
)
|
|
88,940 | 289,805 | 355 | tests/components/bayesian/test_binary_sensor.py | 72 | 14 | async def test_load_values_when_added_to_hass(hass):
config = {
"binary_sensor": {
| Bayesian - support `unique_id:` (#79879)
* support unique_id
* adds test for unique_ids | test_load_values_when_added_to_hass | fe7402375d2f899a7edd6ac326d2c1998b4c43da | core | test_binary_sensor.py | 14 | 33 | https://github.com/home-assistant/core.git | 1 | 183 | 0 | 55 | 319 | Python | {
"docstring": "Test that sensor initializes with observations of relevant entities.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | async def test_load_values_when_added_to_hass(hass):
config = {
"binary_sensor": {
"name": "Test_Binary",
"platform": "bayesian",
"unique_id": "3b4c9563-5e84-4167-8fe7-8f507e796d72",
"device_class": "connectivity",
"observations": [
{
"platform": "state",
"entity_id": "sensor.test_monitored",
"to_state": "off",
"prob_given_true": 0.8,
"prob_given_false": 0.4,
}
],
"prior": 0.2,
"probability_threshold": 0.32,
}
}
hass.states.async_set("sensor.test_monitored", "off")
await hass.async_block_till_done()
assert await async_setup_component(hass, "binary_sensor", config)
await hass.async_block_till_done()
entity_registry = async_get_entities(hass)
assert (
entity_registry.entities["binary_sensor.test_binary"].unique_id
== "bayesian-3b4c9563-5e84-4167-8fe7-8f507e796d72"
)
state = hass.states.get("binary_sensor.test_binary")
assert state.attributes.get("device_class") == "connectivity"
assert state.attributes.get("observations")[0]["prob_given_true"] == 0.8
assert state.attributes.get("observations")[0]["prob_given_false"] == 0.4
|
|
9,308 | 47,940 | 157 | tests/providers/databricks/operators/test_databricks_sql.py | 24 | 16 | def test_copy_with_target_credential(self):
expression = "col1, col2"
op = DatabricksCopyIntoOperator(
file_location=COPY_FILE_LOCATION,
file_format='CSV',
table_name='test',
task_id=TASK_ID,
expression_list=expression,
storage_credential='abc',
credential={'AZURE_SAS_TOKEN': 'abc'},
)
asse | Update to the released version of DBSQL connector
Also added additional parameters for further customization of connection
if it's required | test_copy_with_target_credential | 6a3d6cc32b4e3922d259c889460fe82e0ebf3663 | airflow | test_databricks_sql.py | 12 | 18 | https://github.com/apache/airflow.git | 1 | 60 | 0 | 22 | 109 | Python | {
"docstring": "COPY INTO test WITH (CREDENTIAL abc)\nFROM (SELECT {expression} FROM '{COPY_FILE_LOCATION}' WITH (CREDENTIAL (AZURE_SAS_TOKEN = 'abc') ))\nFILEFORMAT = CSV\n",
"language": "en",
"n_whitespaces": 17,
"n_words": 20,
"vocab_size": 16
} | def test_copy_with_target_credential(self):
expression = "col1, col2"
op = DatabricksCopyIntoOperator(
file_location=COPY_FILE_LOCATION,
file_format='CSV',
table_name='test',
task_id=TASK_ID,
expression_list=expression,
storage_credential='abc',
credential={'AZURE_SAS_TOKEN': 'abc'},
)
assert (
op._create_sql_query()
== f.strip()
)
|
|
12,219 | 60,607 | 189 | .venv/lib/python3.8/site-packages/pip/_internal/commands/debug.py | 58 | 12 | def show_actual_vendor_versions(vendor_txt_versions):
# type: (Dict[str, str]) -> None
for module_name, expected_version in vendor_txt_versions.items():
extra_message = ''
actual_version = get_vendor_version_from_module(module_name)
if not actual_version:
extra_message = ' (Un | upd; format | show_actual_vendor_versions | f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | transferlearning | debug.py | 15 | 12 | https://github.com/jindongwang/transferlearning.git | 4 | 71 | 0 | 45 | 126 | Python | {
"docstring": "Log the actual version and print extra info if there is\n a conflict or if the actual version could not be imported.\n ",
"language": "en",
"n_whitespaces": 28,
"n_words": 22,
"vocab_size": 18
} | def show_actual_vendor_versions(vendor_txt_versions):
# type: (Dict[str, str]) -> None
for module_name, expected_version in vendor_txt_versions.items():
extra_message = ''
actual_version = get_vendor_version_from_module(module_name)
if not actual_version:
extra_message = ' (Unable to locate actual module version, using'\
' vendor.txt specified version)'
actual_version = expected_version
elif parse_version(actual_version) != parse_version(expected_version):
extra_message = ' (CONFLICT: vendor.txt suggests version should'\
' be {})'.format(expected_version)
logger.info('%s==%s%s', module_name, actual_version, extra_message)
|
|
22,383 | 106,467 | 45 | youtube_dl/utils.py | 26 | 9 | def escape_rfc3986(s):
if sys.version_info < (3, 0) and isinstance(s, compat_str):
s = s.encode('utf- | [utils] Ensure RFC3986 encoding result is unicode | escape_rfc3986 | 556862bc911bb54435b7b0b01451789b884b0390 | youtube-dl | utils.py | 11 | 4 | https://github.com/ytdl-org/youtube-dl.git | 3 | 45 | 0 | 26 | 75 | Python | {
"docstring": "Escape non-ASCII characters as suggested by RFC 3986",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 8
} | def escape_rfc3986(s):
if sys.version_info < (3, 0) and isinstance(s, compat_str):
s = s.encode('utf-8')
# ensure unicode: after quoting, it can always be converted
return compat_str(compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]"))
|
|
83,534 | 281,122 | 158 | gamestonk_terminal/cryptocurrency/due_diligence/dd_controller.py | 24 | 18 | def call_social(self, other_args):
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="social",
description=,
)
ns_parser = parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
pycoingecko_view.display_social(
self.coin_map_df["CoinGecko" | Crypto menu refactor (#1119)
* enabled some crypto commands in dd to be called independent of source loaded
* support for coin_map_df in all dd functions + load ta and plot chart refactor
* updated tests and removed coingecko scrapping where possible
* removed ref of command from hugo
* updated pycoingecko version
* refactoring load
* refactored load to fetch prices; pred can run independent of source now
* load by default usd on cp/cg and usdt on cb/bin
* updated to rich for formatting and updated dependencies
* fixed changes requested
* update docs
* revert discord requirements
* removed absolute from calculate change for price
* fixing pr issues
* fix loading issue when similar coins exist, move coins to home, fill n/a
* update docs for coins
* adds load to ta and pred menu | call_social | ea964109d654394cc0a5237e6ec5510ba6404097 | OpenBBTerminal | dd_controller.py | 11 | 15 | https://github.com/OpenBB-finance/OpenBBTerminal.git | 2 | 63 | 0 | 21 | 100 | Python | {
"docstring": "Process social commandShows social media corresponding to loaded coin. You can find there name of\n telegram channel, urls to twitter, reddit, bitcointalk, facebook and discord.",
"language": "en",
"n_whitespaces": 35,
"n_words": 25,
"vocab_size": 23
} | def call_social(self, other_args):
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="social",
description=,
)
ns_parser = parse_known_args_and_warn(
parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED
)
if ns_parser:
pycoingecko_view.display_social(
self.coin_map_df["CoinGecko"], export=ns_parser.export
)
|
|
17,999 | 85,476 | 174 | tests/snuba/api/endpoints/test_project_event_details.py | 36 | 21 | def test_ignores_different_group(self):
url = reverse(
"sentry-api-0-project-event-details",
kwargs={
"event_id": self.next_transaction_event.event_id,
"project_slug": self.next_transaction_event.project.slug,
"organization_slug": self.next_transacti | feat(perf issues): Return prev/next for transaction events (#38274)
* feat(perf issues): Return prev/next for transaction events | test_ignores_different_group | 6d7681529f68a87e41d4c11a4aa1e6732cb15ade | sentry | test_project_event_details.py | 15 | 14 | https://github.com/getsentry/sentry.git | 1 | 117 | 0 | 32 | 195 | Python | {
"docstring": "Test that a different group's events aren't attributed to the one that was passed",
"language": "en",
"n_whitespaces": 13,
"n_words": 14,
"vocab_size": 13
} | def test_ignores_different_group(self):
url = reverse(
"sentry-api-0-project-event-details",
kwargs={
"event_id": self.next_transaction_event.event_id,
"project_slug": self.next_transaction_event.project.slug,
"organization_slug": self.next_transaction_event.project.organization.slug,
},
)
with self.feature("organizations:performance-issues"):
response = self.client.get(url, format="json", data={"group_id": self.group.id})
assert response.status_code == 200, response.content
assert response.data["id"] == str(self.next_transaction_event.event_id)
assert response.data["nextEventID"] is None
|
|
49,706 | 200,571 | 258 | sympy/tensor/tensor.py | 67 | 26 | def _dedupe_indices(new, exclude, index_structure):
inds_self = set(exclude)
dums_new = set(get_dummy_indices(new))
conflicts = dums_new.intersection(inds_self)
if len(conflicts) == 0:
return None
inds_self.update(dums_new)
self_args_free = [(i, None) for i in inds_self]
gen = index_structure._get_generator_for_dummy_indices(self_args_free)
repl = {}
for d in conflicts:
if -d in repl.keys():
continue
newname = gen(d.te | _dedupe_indices: convert to staticmethod
index_structure is now an additional argument | _dedupe_indices | b5f5ec455e7d003fa214a215475a3fa2407760cc | sympy | tensor.py | 13 | 26 | https://github.com/sympy/sympy.git | 6 | 150 | 0 | 45 | 242 | Python | {
"docstring": "\n exclude: set\n new: TensExpr\n index_structure: _IndexStructure (required to generate new dummy indices)\n\n If ``new`` has any dummy indices that are in ``exclude``, return a version\n of new with those indices replaced. If no replacements are needed,\n return None\n \n ``self_args_free`` is to be passed to ``_IndexStructure._get_generator_for_dummy_indices()``.\n Since the latter does not use the index position for anything, we just\n set it as ``None`` here.\n ",
"language": "en",
"n_whitespaces": 142,
"n_words": 63,
"vocab_size": 53
} | def _dedupe_indices(new, exclude, index_structure):
inds_self = set(exclude)
dums_new = set(get_dummy_indices(new))
conflicts = dums_new.intersection(inds_self)
if len(conflicts) == 0:
return None
inds_self.update(dums_new)
self_args_free = [(i, None) for i in inds_self]
gen = index_structure._get_generator_for_dummy_indices(self_args_free)
repl = {}
for d in conflicts:
if -d in repl.keys():
continue
newname = gen(d.tensor_index_type)
new_d = d.func(newname, *d.args[1:])
repl[d] = new_d
repl[-d] = -new_d
if len(repl) == 0:
return None
new_renamed = new._replace_indices(repl)
return new_renamed
|
|
23,916 | 110,082 | 711 | lib/matplotlib/spines.py | 174 | 39 | def get_window_extent(self, renderer=None):
# make sure the location is updated so that transforms etc are correct:
self._adjust_location()
bb = super().get_window_extent(renderer=renderer)
if self.axis is None or not self.axis.get_visible():
return bb
bboxes = [bb]
drawn_ticks = self.axis._update_ticks()
major_tick = next(iter({*drawn_ticks} & {*self.axis.majorTicks}), None)
minor_tick = next(iter({*drawn_ticks} & {*self.axis.minorTicks}), None)
for tick in [major_tick, minor_tick]:
if tick is None:
continue
bb0 = bb.frozen()
tickl = tick._size
tickdir = tick._tickdir
if tickdir == 'out':
padout = 1
padin = 0
elif tickdir == 'in':
padout = 0
padin = 1
else:
padout = 0.5
padin = 0.5
padout = padout * tickl / 72 * self.figure.dpi
padin = padin * tickl / 72 * self.figure.dpi
if tick.tick1line.get_visible():
if self.spine_type == 'left':
bb0.x0 = bb0.x0 - padout
bb0.x1 = bb0.x1 + padin
elif self.spine_type == 'bottom':
bb0.y0 = bb0.y0 - padout
bb0.y1 = bb0.y1 + padin
if tick.tick2line.get_visible():
if self.spine_type == 'right':
bb0.x1 = bb0.x1 + padout
bb0.x0 = bb0.x0 - padin
elif self.spine_type == 'top':
bb0.y1 = bb0.y1 + padout
bb0.y0 = bb0.y0 - padout
| Fix issue with space allocated for single tick that should not be there
Co-authored-by: Antony Lee <[email protected]> | get_window_extent | 3804cdd8f1771065f9c8616c57357c2b190c3a05 | matplotlib | spines.py | 15 | 42 | https://github.com/matplotlib/matplotlib.git | 13 | 330 | 0 | 80 | 534 | Python | {
"docstring": "\n Return the window extent of the spines in display space, including\n padding for ticks (but not their labels)\n\n See Also\n --------\n matplotlib.axes.Axes.get_tightbbox\n matplotlib.axes.Axes.get_window_extent\n ",
"language": "en",
"n_whitespaces": 73,
"n_words": 23,
"vocab_size": 22
} | def get_window_extent(self, renderer=None):
# make sure the location is updated so that transforms etc are correct:
self._adjust_location()
bb = super().get_window_extent(renderer=renderer)
if self.axis is None or not self.axis.get_visible():
return bb
bboxes = [bb]
drawn_ticks = self.axis._update_ticks()
major_tick = next(iter({*drawn_ticks} & {*self.axis.majorTicks}), None)
minor_tick = next(iter({*drawn_ticks} & {*self.axis.minorTicks}), None)
for tick in [major_tick, minor_tick]:
if tick is None:
continue
bb0 = bb.frozen()
tickl = tick._size
tickdir = tick._tickdir
if tickdir == 'out':
padout = 1
padin = 0
elif tickdir == 'in':
padout = 0
padin = 1
else:
padout = 0.5
padin = 0.5
padout = padout * tickl / 72 * self.figure.dpi
padin = padin * tickl / 72 * self.figure.dpi
if tick.tick1line.get_visible():
if self.spine_type == 'left':
bb0.x0 = bb0.x0 - padout
bb0.x1 = bb0.x1 + padin
elif self.spine_type == 'bottom':
bb0.y0 = bb0.y0 - padout
bb0.y1 = bb0.y1 + padin
if tick.tick2line.get_visible():
if self.spine_type == 'right':
bb0.x1 = bb0.x1 + padout
bb0.x0 = bb0.x0 - padin
elif self.spine_type == 'top':
bb0.y1 = bb0.y1 + padout
bb0.y0 = bb0.y0 - padout
bboxes.append(bb0)
return mtransforms.Bbox.union(bboxes)
|
|
48,985 | 198,533 | 157 | sympy/physics/continuum_mechanics/truss.py | 41 | 11 | def apply_support(self, location, type):
if location not in self._node_labels:
raise ValueError("Support must be added on a known node")
else:
self._supports[location] = type
if type == "pinned":
self._loads['R_'+str(location)+'_x']= []
self._loads['R_'+ | remove_load method added along with other changes | apply_support | 99ede53223eafb56b2c2b4ab7b8a6764b628c9d9 | sympy | truss.py | 16 | 12 | https://github.com/sympy/sympy.git | 5 | 123 | 0 | 32 | 221 | Python | {
"docstring": "\n This method adds a pinned or roller support at a particular node\n\n Parameters\n ==========\n\n location: String or Symbol\n Label of the Node at which support is added.\n\n type: String\n Type of the support being provided at the node.\n\n Examples\n ========\n\n >>> from sympy.physics.continuum_mechanics.truss import Truss\n >>> t = Truss()\n >>> t.add_node('A', 0, 0)\n >>> t.add_node('B', 3, 0)\n >>> t.apply_support('A', 'pinned')\n >>> t.supports\n {'A': 'pinned', 'B': 'none'}\n ",
"language": "en",
"n_whitespaces": 194,
"n_words": 66,
"vocab_size": 50
} | def apply_support(self, location, type):
if location not in self._node_labels:
raise ValueError("Support must be added on a known node")
else:
self._supports[location] = type
if type == "pinned":
self._loads['R_'+str(location)+'_x']= []
self._loads['R_'+str(location)+'_y']= []
elif type == "roller":
self._loads['R_'+str(location)+'_y']= []
if 'R_'+str(location)+'_x' in list(self._loads):
self._loads.pop('R_'+str(location)+'_x')
|
|
102,508 | 303,692 | 119 | homeassistant/components/xiaomi_miio/vacuum.py | 20 | 13 | def timers(self) -> list[dict[str, Any]]:
retu | Improve type hints in xiaomi_miio vacuum entities (#76563)
Co-authored-by: Teemu R. <[email protected]> | timers | 54fc17e10de0752c03d6b95153c3d8168f76ea44 | core | vacuum.py | 11 | 10 | https://github.com/home-assistant/core.git | 2 | 52 | 0 | 20 | 83 | Python | {
"docstring": "Get the list of added timers of the vacuum cleaner.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 8
} | def timers(self) -> list[dict[str, Any]]:
return [
{
"enabled": timer.enabled,
"cron": timer.cron,
"next_schedule": as_utc(timer.next_schedule),
}
for timer in self.coordinator.data.timers
]
|
|
77,142 | 262,205 | 110 | TTS/tts/utils/visual.py | 51 | 27 | def plot_avg_pitch(pitch, chars, fig_size=(30, 10), output_fig=False):
old_fig_size = plt.rcParams["figure.figsize"]
if fig_size is not None:
plt.rcParams["figure.figsize"] = fig_size
fig, ax = plt.subplots()
x = np.array(range(len(chars)))
| Plot pitch over input characters | plot_avg_pitch | 5169d4eb32407ca0278046aaffc56ca6f9e9ef32 | TTS | visual.py | 12 | 26 | https://github.com/coqui-ai/TTS.git | 4 | 142 | 0 | 39 | 233 | Python | {
"docstring": "Plot pitch curves on top of the input characters.\n\n Args:\n pitch (np.array): Pitch values.\n chars (str): Characters to place to the x-axis.\n\n Shapes:\n pitch: :math:`(T,)`\n ",
"language": "en",
"n_whitespaces": 55,
"n_words": 25,
"vocab_size": 22
} | def plot_avg_pitch(pitch, chars, fig_size=(30, 10), output_fig=False):
old_fig_size = plt.rcParams["figure.figsize"]
if fig_size is not None:
plt.rcParams["figure.figsize"] = fig_size
fig, ax = plt.subplots()
x = np.array(range(len(chars)))
my_xticks = [c for c in chars]
plt.xticks(x, my_xticks)
ax.set_xlabel("characters")
ax.set_ylabel("freq")
ax2 = ax.twinx()
ax2.plot(pitch, linewidth=5.0, color="red")
ax2.set_ylabel("F0")
plt.rcParams["figure.figsize"] = old_fig_size
if not output_fig:
plt.close()
return fig
|
|
48,304 | 197,047 | 402 | sympy/ntheory/generate.py | 170 | 23 | def composite(nth):
n = as_int(nth)
if n < 1:
raise ValueError("nth must be a positive integer; composite(1) == 4")
composite_arr = [4, 6, 8, 9, 10, 12, 14, 15, 16, 18]
if n <= 10:
return composite_arr[n - 1]
a, b = 4, sieve._list[-1]
if n <= b - primepi(b) - 1:
while a < b - 1:
mid = (a + b) >> 1
| Refactored import ordering in functions | composite | e0dc14eca132f37c5f49369eb4051eae37c9b119 | sympy | generate.py | 15 | 36 | https://github.com/sympy/sympy.git | 12 | 250 | 0 | 76 | 389 | Python | {
"docstring": " Return the nth composite number, with the composite numbers indexed as\n composite(1) = 4, composite(2) = 6, etc....\n\n Examples\n ========\n\n >>> from sympy import composite\n >>> composite(36)\n 52\n >>> composite(1)\n 4\n >>> composite(17737)\n 20000\n\n See Also\n ========\n\n sympy.ntheory.primetest.isprime : Test if n is prime\n primerange : Generate all primes in a given range\n primepi : Return the number of primes less than or equal to n\n prime : Return the nth prime\n compositepi : Return the number of positive composite numbers less than or equal to n\n ",
"language": "en",
"n_whitespaces": 210,
"n_words": 87,
"vocab_size": 53
} | def composite(nth):
n = as_int(nth)
if n < 1:
raise ValueError("nth must be a positive integer; composite(1) == 4")
composite_arr = [4, 6, 8, 9, 10, 12, 14, 15, 16, 18]
if n <= 10:
return composite_arr[n - 1]
a, b = 4, sieve._list[-1]
if n <= b - primepi(b) - 1:
while a < b - 1:
mid = (a + b) >> 1
if mid - primepi(mid) - 1 > n:
b = mid
else:
a = mid
if isprime(a):
a -= 1
return a
from sympy.functions.elementary.exponential import log
from sympy.functions.special.error_functions import li
a = 4 # Lower bound for binary search
b = int(n*(log(n) + log(log(n)))) # Upper bound for the search.
while a < b:
mid = (a + b) >> 1
if mid - li(mid) - 1 > n:
b = mid
else:
a = mid + 1
n_composites = a - primepi(a) - 1
while n_composites > n:
if not isprime(a):
n_composites -= 1
a -= 1
if isprime(a):
a -= 1
return a
|
|
@set_module('numpy') | 38,645 | 160,496 | 122 | numpy/lib/twodim_base.py | 64 | 19 | def histogram2d(x, y, bins=10, range=None, density=None, weights=None):
from numpy import histogramdd
if len(x) != len(y):
raise ValueError('x and y must have the same length.')
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = asarray(bins)
bins = [xedges, yedges]
hist, edges = histogramdd([x, y], bins, range, density, weights)
return hist, edges[0], edges[1]
@set_module('numpy') | DEP: Remove `normed=` keyword argument from histogroms
The normed keyword argument has been deprecated for a long time.
This removes it, replacing its position with the new density
argument. | histogram2d | 2215054472616df563faa4613734426c790d4217 | numpy | twodim_base.py | 11 | 13 | https://github.com/numpy/numpy.git | 5 | 114 | 1 | 50 | 184 | Python | {
"docstring": "\n Compute the bi-dimensional histogram of two data samples.\n\n Parameters\n ----------\n x : array_like, shape (N,)\n An array containing the x coordinates of the points to be\n histogrammed.\n y : array_like, shape (N,)\n An array containing the y coordinates of the points to be\n histogrammed.\n bins : int or array_like or [int, int] or [array, array], optional\n The bin specification:\n\n * If int, the number of bins for the two dimensions (nx=ny=bins).\n * If array_like, the bin edges for the two dimensions\n (x_edges=y_edges=bins).\n * If [int, int], the number of bins in each dimension\n (nx, ny = bins).\n * If [array, array], the bin edges in each dimension\n (x_edges, y_edges = bins).\n * A combination [int, array] or [array, int], where int\n is the number of bins and array is the bin edges.\n\n range : array_like, shape(2,2), optional\n The leftmost and rightmost edges of the bins along each dimension\n (if not specified explicitly in the `bins` parameters):\n ``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range\n will be considered outliers and not tallied in the histogram.\n density : bool, optional\n If False, the default, returns the number of samples in each bin.\n If True, returns the probability *density* function at the bin,\n ``bin_count / sample_count / bin_area``.\n weights : array_like, shape(N,), optional\n An array of values ``w_i`` weighing each sample ``(x_i, y_i)``.\n Weights are normalized to 1 if `density` is True. If `density` is\n False, the values of the returned histogram are equal to the sum of\n the weights belonging to the samples falling into each bin.\n\n Returns\n -------\n H : ndarray, shape(nx, ny)\n The bi-dimensional histogram of samples `x` and `y`. Values in `x`\n are histogrammed along the first dimension and values in `y` are\n histogrammed along the second dimension.\n xedges : ndarray, shape(nx+1,)\n The bin edges along the first dimension.\n yedges : ndarray, shape(ny+1,)\n The bin edges along the second dimension.\n\n See Also\n --------\n histogram : 1D histogram\n histogramdd : Multidimensional histogram\n\n Notes\n -----\n When `density` is True, then the returned histogram is the sample\n density, defined such that the sum over bins of the product\n ``bin_value * bin_area`` is 1.\n\n Please note that the histogram does not follow the Cartesian convention\n where `x` values are on the abscissa and `y` values on the ordinate\n axis. Rather, `x` is histogrammed along the first dimension of the\n array (vertical), and `y` along the second dimension of the array\n (horizontal). This ensures compatibility with `histogramdd`.\n\n Examples\n --------\n >>> from matplotlib.image import NonUniformImage\n >>> import matplotlib.pyplot as plt\n\n Construct a 2-D histogram with variable bin width. First define the bin\n edges:\n\n >>> xedges = [0, 1, 3, 5]\n >>> yedges = [0, 2, 3, 4, 6]\n\n Next we create a histogram H with random bin content:\n\n >>> x = np.random.normal(2, 1, 100)\n >>> y = np.random.normal(1, 1, 100)\n >>> H, xedges, yedges = np.histogram2d(x, y, bins=(xedges, yedges))\n >>> # Histogram does not follow Cartesian convention (see Notes),\n >>> # therefore transpose H for visualization purposes.\n >>> H = H.T\n\n :func:`imshow <matplotlib.pyplot.imshow>` can only display square bins:\n\n >>> fig = plt.figure(figsize=(7, 3))\n >>> ax = fig.add_subplot(131, title='imshow: square bins')\n >>> plt.imshow(H, interpolation='nearest', origin='lower',\n ... extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])\n <matplotlib.image.AxesImage object at 0x...>\n\n :func:`pcolormesh <matplotlib.pyplot.pcolormesh>` can display actual edges:\n\n >>> ax = fig.add_subplot(132, title='pcolormesh: actual edges',\n ... aspect='equal')\n >>> X, Y = np.meshgrid(xedges, yedges)\n >>> ax.pcolormesh(X, Y, H)\n <matplotlib.collections.QuadMesh object at 0x...>\n\n :class:`NonUniformImage <matplotlib.image.NonUniformImage>` can be used to\n display actual bin edges with interpolation:\n\n >>> ax = fig.add_subplot(133, title='NonUniformImage: interpolated',\n ... aspect='equal', xlim=xedges[[0, -1]], ylim=yedges[[0, -1]])\n >>> im = NonUniformImage(ax, interpolation='bilinear')\n >>> xcenters = (xedges[:-1] + xedges[1:]) / 2\n >>> ycenters = (yedges[:-1] + yedges[1:]) / 2\n >>> im.set_data(xcenters, ycenters, H)\n >>> ax.images.append(im)\n >>> plt.show()\n\n It is also possible to construct a 2-D histogram without specifying bin\n edges:\n\n >>> # Generate non-symmetric test data\n >>> n = 10000\n >>> x = np.linspace(1, 100, n)\n >>> y = 2*np.log(x) + np.random.rand(n) - 0.5\n >>> # Compute 2d histogram. Note the order of x/y and xedges/yedges\n >>> H, yedges, xedges = np.histogram2d(y, x, bins=20)\n\n Now we can plot the histogram using\n :func:`pcolormesh <matplotlib.pyplot.pcolormesh>`, and a\n :func:`hexbin <matplotlib.pyplot.hexbin>` for comparison.\n\n >>> # Plot histogram using pcolormesh\n >>> fig, (ax1, ax2) = plt.subplots(ncols=2, sharey=True)\n >>> ax1.pcolormesh(xedges, yedges, H, cmap='rainbow')\n >>> ax1.plot(x, 2*np.log(x), 'k-')\n >>> ax1.set_xlim(x.min(), x.max())\n >>> ax1.set_ylim(y.min(), y.max())\n >>> ax1.set_xlabel('x')\n >>> ax1.set_ylabel('y')\n >>> ax1.set_title('histogram2d')\n >>> ax1.grid()\n\n >>> # Create hexbin plot for comparison\n >>> ax2.hexbin(x, y, gridsize=20, cmap='rainbow')\n >>> ax2.plot(x, 2*np.log(x), 'k-')\n >>> ax2.set_title('hexbin')\n >>> ax2.set_xlim(x.min(), x.max())\n >>> ax2.set_xlabel('x')\n >>> ax2.grid()\n\n >>> plt.show()\n ",
"language": "en",
"n_whitespaces": 1295,
"n_words": 747,
"vocab_size": 356
} | def histogram2d(x, y, bins=10, range=None, density=None, weights=None):
from numpy import histogramdd
if len(x) != len(y):
raise ValueError('x and y must have the same length.')
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = asarray(bins)
bins = [xedges, yedges]
hist, edges = histogramdd([x, y], bins, range, density, weights)
return hist, edges[0], edges[1]
@set_module('numpy') |
91,534 | 292,448 | 25 | homeassistant/components/dlna_dms/dms.py | 11 | 5 | def available(self) -> bool:
return self._device is not None and self._device.profile_device.available
| Add dlna_dms integration to support DLNA Digital Media Servers (#66437) | available | b19bf9b147f4321e89d1f7f01e68337f2102f460 | core | dms.py | 9 | 3 | https://github.com/home-assistant/core.git | 2 | 23 | 0 | 11 | 38 | Python | {
"docstring": "Device is available when we have a connection to it.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | def available(self) -> bool:
return self._device is not None and self._device.profile_device.available
|
|
80,431 | 270,328 | 215 | keras/distribute/distributed_training_utils_v1.py | 115 | 16 | def _get_input_from_iterator(iterator, model):
next_element = iterator.get_next()
# `len(nest.flatten(x))` is going to not count empty elements such as {}.
# len(nest.flatten([[0,1,2], {}])) is 3 and not 4. The `next_element` is
# going to get flattened in `_prepare_feed_values` to work around that. Empty
# elements are going to get filtered out as part of the flattening.
if len(tf.nest.flatten(next_element)) == len(model.inputs):
x = next_element
y = None
sample_weights = None
elif len(tf.nest.flatten(next_element)) == (
len(model.inputs) + len(model.outputs)
):
x, y = next_element
sample_weights = None
else:
x, y, sample_weights = next_element
# Validate that all the elements in x and y are of the same type and shape.
validate_distributed_dataset_inputs(
| Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | _get_input_from_iterator | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | distributed_training_utils_v1.py | 12 | 17 | https://github.com/keras-team/keras.git | 3 | 108 | 0 | 67 | 176 | Python | {
"docstring": "Get elements from the iterator and verify the input shape and type.",
"language": "en",
"n_whitespaces": 11,
"n_words": 12,
"vocab_size": 10
} | def _get_input_from_iterator(iterator, model):
next_element = iterator.get_next()
# `len(nest.flatten(x))` is going to not count empty elements such as {}.
# len(nest.flatten([[0,1,2], {}])) is 3 and not 4. The `next_element` is
# going to get flattened in `_prepare_feed_values` to work around that. Empty
# elements are going to get filtered out as part of the flattening.
if len(tf.nest.flatten(next_element)) == len(model.inputs):
x = next_element
y = None
sample_weights = None
elif len(tf.nest.flatten(next_element)) == (
len(model.inputs) + len(model.outputs)
):
x, y = next_element
sample_weights = None
else:
x, y, sample_weights = next_element
# Validate that all the elements in x and y are of the same type and shape.
validate_distributed_dataset_inputs(
model._distribution_strategy, x, y, sample_weights
)
return x, y, sample_weights
|
|
@array_function_from_c_func_and_dispatcher(_multiarray_umath.where) | 38,647 | 160,501 | 12 | numpy/core/multiarray.py | 7 | 6 | def inner(a, b):
return (a, b)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.where) | DIC: Misc RST reformatting.
This contains various RST reformatting.
One, moving `(C)` one line up, is specific to a bug in tree-sitter-rst
that mis parses this section. Another is adding one black line for a
similar reason where `..` is seen as section underline by
tree-sitter-rst.
This is some shuffling of section underline: try to be consitant,
`=`, then `-`, then `~`, with this refactor there is also no more
section that use backticks as underline.
Note in particular that non-consitency of underline lead to a problem in
datetime64 section where "weekmasks" (underlined with `-`) were actually
a level-4 heading instead of a level 2 or 3 I guess, and thus were
nested under the `busday_count()` section.
You'll note also 2 formulas that are under double-quotes as they are not
references. | inner | 84eeca630ec9c5bf580bc456035c87d8591c1389 | numpy | multiarray.py | 7 | 2 | https://github.com/numpy/numpy.git | 1 | 14 | 1 | 7 | 35 | Python | {
"docstring": "\n inner(a, b, /)\n\n Inner product of two arrays.\n\n Ordinary inner product of vectors for 1-D arrays (without complex\n conjugation), in higher dimensions a sum product over the last axes.\n\n Parameters\n ----------\n a, b : array_like\n If `a` and `b` are nonscalar, their last dimensions must match.\n\n Returns\n -------\n out : ndarray\n If `a` and `b` are both\n scalars or both 1-D arrays then a scalar is returned; otherwise\n an array is returned.\n ``out.shape = (*a.shape[:-1], *b.shape[:-1])``\n\n Raises\n ------\n ValueError\n If both `a` and `b` are nonscalar and their last dimensions have\n different sizes.\n\n See Also\n --------\n tensordot : Sum products over arbitrary axes.\n dot : Generalised matrix product, using second last dimension of `b`.\n einsum : Einstein summation convention.\n\n Notes\n -----\n For vectors (1-D arrays) it computes the ordinary inner-product::\n\n np.inner(a, b) = sum(a[:]*b[:])\n\n More generally, if ``ndim(a) = r > 0`` and ``ndim(b) = s > 0``::\n\n np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1))\n\n or explicitly::\n\n np.inner(a, b)[i0,...,ir-2,j0,...,js-2]\n = sum(a[i0,...,ir-2,:]*b[j0,...,js-2,:])\n\n In addition `a` or `b` may be scalars, in which case::\n\n np.inner(a,b) = a*b\n\n Examples\n --------\n Ordinary inner product for vectors:\n\n >>> a = np.array([1,2,3])\n >>> b = np.array([0,1,0])\n >>> np.inner(a, b)\n 2\n\n Some multidimensional examples:\n\n >>> a = np.arange(24).reshape((2,3,4))\n >>> b = np.arange(4)\n >>> c = np.inner(a, b)\n >>> c.shape\n (2, 3)\n >>> c\n array([[ 14, 38, 62],\n [ 86, 110, 134]])\n\n >>> a = np.arange(2).reshape((1,1,2))\n >>> b = np.arange(6).reshape((3,2))\n >>> c = np.inner(a, b)\n >>> c.shape\n (1, 1, 3)\n >>> c\n array([[[1, 3, 5]]])\n\n An example where `b` is a scalar:\n\n >>> np.inner(np.eye(2), 7)\n array([[7., 0.],\n [0., 7.]])\n\n ",
"language": "en",
"n_whitespaces": 521,
"n_words": 260,
"vocab_size": 162
} | def inner(a, b):
return (a, b)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.where) |
22,539 | 106,987 | 347 | lib/matplotlib/collections.py | 135 | 29 | def contains(self, mouseevent):
inside, info = self._default_contains(mouseevent)
if inside is not None:
return inside, info
if not self.get_visible():
return False, {}
pickradius = (
float(self._picker)
if isinstance(self._picker, Number) and
self._picker is not True # the bool, not just nonzero or 1
else self._pickradius)
if self.axes:
self.axes._unstale_viewLim()
transform, offset_trf, offsets, paths = self._prepare_points()
# Tests if the point is contained on one of the polygons formed
# by the control points of each of the paths. A point is considered
| Switch transOffset to offset_transform.
Note that most APIs *previously* already accepted *offset_transform* as
kwarg, due to the presence of the `set_offset_transform` setter. Prefer
that name (shortening it to `offset_trf` for local variables).
Backcompat for the old `transOffset` name is kept in most places by
introducing a property alias. | contains | c25cf96cfb7e6fc9ad75347cb2a32193c501e82c | matplotlib | collections.py | 12 | 19 | https://github.com/matplotlib/matplotlib.git | 6 | 148 | 0 | 89 | 231 | Python | {
"docstring": "\n Test whether the mouse event occurred in the collection.\n\n Returns ``bool, dict(ind=itemlist)``, where every item in itemlist\n contains the event.\n ",
"language": "en",
"n_whitespaces": 49,
"n_words": 20,
"vocab_size": 17
} | def contains(self, mouseevent):
inside, info = self._default_contains(mouseevent)
if inside is not None:
return inside, info
if not self.get_visible():
return False, {}
pickradius = (
float(self._picker)
if isinstance(self._picker, Number) and
self._picker is not True # the bool, not just nonzero or 1
else self._pickradius)
if self.axes:
self.axes._unstale_viewLim()
transform, offset_trf, offsets, paths = self._prepare_points()
# Tests if the point is contained on one of the polygons formed
# by the control points of each of the paths. A point is considered
# "on" a path if it would lie within a stroke of width 2*pickradius
# following the path. If pickradius <= 0, then we instead simply check
# if the point is *inside* of the path instead.
ind = _path.point_in_path_collection(
mouseevent.x, mouseevent.y, pickradius,
transform.frozen(), paths, self.get_transforms(),
offsets, offset_trf, pickradius <= 0)
return len(ind) > 0, dict(ind=ind)
|
|
1,949 | 10,849 | 103 | jina/orchestrate/deployments/__init__.py | 15 | 7 | def deployments(self) -> List[Dict]:
return [
{
'name': self.name,
| refactor: rename pod to deployment (#4230)
* refactor: rename pod to deployment
* style: fix overload and cli autocomplete
* fix: undo daemon mistake
* refactor: leftover cleanup
* fix: more test fixes
* fix: more fixes
* fix: more fixes
* fix: more fixes
* fix: more tests
* fix: fix more tests
* refactor: fix more tests
* refactor: more tests fixes
* refactor: rename pea to pod
* refactor: adjust docs
* refactor: complete pea renaming
* refactor: more fixes
* fix: pea_type in k8s yamls
* fix: adjust pod args name
* refactor: rename peapods parser folder
* fix: da init
Co-authored-by: Jina Dev Bot <[email protected]> | deployments | 13edc16d806fb5d77a6849551178ccc75937f25f | jina | __init__.py | 9 | 12 | https://github.com/jina-ai/jina.git | 1 | 34 | 0 | 15 | 58 | Python | {
"docstring": "Get deployments of the deployment. The BaseDeployment just gives one deployment.\n\n :return: list of deployments\n ",
"language": "en",
"n_whitespaces": 29,
"n_words": 15,
"vocab_size": 12
} | def deployments(self) -> List[Dict]:
return [
{
'name': self.name,
'head_host': self.head_host,
'head_port_in': self.head_port_in,
}
]
|
|
117,279 | 320,685 | 251 | qutebrowser/browser/webkit/http.py | 100 | 23 | def parse_content_disposition(reply):
is_inline = True
filename = None
content_disposition_header = b'Content-Disposition'
# First check if the Content-Disposition header has a filename
# attribute.
if reply.hasRawHeader(content_disposition_header):
# We use the unsafe variant of the filename as we sanitize it via
# os.path.basename later.
try:
value = bytes(reply.rawHeader(content_disposition_header))
log.network.debug("Parsing Content-Disposition: {value!r}")
content_disposition = ContentDisposition.parse(value)
filename = content_disposition.filename()
except ContentDispositionError as e:
log.network.error(f"Error while parsing filenam | Simplify some syntax
Found via pyupgrade | parse_content_disposition | bd8c940320b7d8476b422edd9c417703db64f603 | qutebrowser | http.py | 14 | 19 | https://github.com/qutebrowser/qutebrowser.git | 5 | 121 | 0 | 68 | 222 | Python | {
"docstring": "Parse a content_disposition header.\n\n Args:\n reply: The QNetworkReply to get a filename for.\n\n Return:\n A (is_inline, filename) tuple.\n ",
"language": "en",
"n_whitespaces": 41,
"n_words": 18,
"vocab_size": 17
} | def parse_content_disposition(reply):
is_inline = True
filename = None
content_disposition_header = b'Content-Disposition'
# First check if the Content-Disposition header has a filename
# attribute.
if reply.hasRawHeader(content_disposition_header):
# We use the unsafe variant of the filename as we sanitize it via
# os.path.basename later.
try:
value = bytes(reply.rawHeader(content_disposition_header))
log.network.debug("Parsing Content-Disposition: {value!r}")
content_disposition = ContentDisposition.parse(value)
filename = content_disposition.filename()
except ContentDispositionError as e:
log.network.error(f"Error while parsing filename: {e}")
else:
is_inline = content_disposition.is_inline()
# Then try to get filename from url
if not filename:
filename = reply.url().path().rstrip('/')
# If that fails as well, use a fallback
if not filename:
filename = 'qutebrowser-download'
return is_inline, os.path.basename(filename)
|
|
7,413 | 41,391 | 415 | seaborn/_core/properties.py | 124 | 32 | def _get_categorical_mapping(self, scale, data):
levels = categorical_order(data, scale.order)
n = len(levels)
values = scale.values
if isinstance(values, dict):
self._check_dict_entries(levels, values)
# TODO where to ensure that dict values have consistent representation?
colors = [values[x] for x in levels]
elif isinstance(values, list):
colors = self._check_list_length(levels, scale.values)
elif isinstance(values, tuple):
colors = blend_palette(values, n)
elif isinstance(values, str):
colors = color_palette(values, n)
elif values is None:
if n <= len(get_color_cycle()):
# Use current (global) default palette
colors = color_palette(n_colors=n)
else:
colors = color_palette("husl", n)
else:
scale_class = scale.__class__.__name__
msg = " ".join([
f"Scale values for {self.variable} with a {scale_class} mapping",
f"must be string, lis | Transition mappings->properties, leaving a few loose ends | _get_categorical_mapping | a07ef69882ed76e09a0ed43d6f3ea33780c1b2be | seaborn | properties.py | 17 | 28 | https://github.com/mwaskom/seaborn.git | 8 | 184 | 0 | 89 | 311 | Python | {
"docstring": "Define mapping as lookup in list of discrete color values.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | def _get_categorical_mapping(self, scale, data):
levels = categorical_order(data, scale.order)
n = len(levels)
values = scale.values
if isinstance(values, dict):
self._check_dict_entries(levels, values)
# TODO where to ensure that dict values have consistent representation?
colors = [values[x] for x in levels]
elif isinstance(values, list):
colors = self._check_list_length(levels, scale.values)
elif isinstance(values, tuple):
colors = blend_palette(values, n)
elif isinstance(values, str):
colors = color_palette(values, n)
elif values is None:
if n <= len(get_color_cycle()):
# Use current (global) default palette
colors = color_palette(n_colors=n)
else:
colors = color_palette("husl", n)
else:
scale_class = scale.__class__.__name__
msg = " ".join([
f"Scale values for {self.variable} with a {scale_class} mapping",
f"must be string, list, tuple, or dict; not {type(scale.values)}."
])
raise TypeError(msg)
# If color specified here has alpha channel, it will override alpha property
colors = self._standardize_colors(colors)
|
|
22,712 | 107,367 | 28 | lib/matplotlib/colorbar.py | 7 | 6 | def minorlocator(self, loc):
self._long_axis().set_minor_locator(loc) | MNT: make colorbars locators and formatters properties | minorlocator | 6010bb43ed01c48c7c403569dd210490b236a853 | matplotlib | colorbar.py | 9 | 3 | https://github.com/matplotlib/matplotlib.git | 1 | 23 | 0 | 7 | 40 | Python | {
"docstring": "\n Set minor locator being used for colorbar\n ",
"language": "en",
"n_whitespaces": 22,
"n_words": 7,
"vocab_size": 7
} | def minorlocator(self, loc):
self._long_axis().set_minor_locator(loc)
self._minorlocator = loc
|
|
@pytest.mark.parametrize(
"time_zone", ["Europe/Berlin", "America/Chicago", "US/Hawaii", "UTC"]
) | 99,327 | 300,467 | 71 | tests/components/recorder/test_models.py | 30 | 16 | def test_process_datetime_to_timestamp_freeze_time(time_zone, hass):
hass.config.set_time_zone(time_zone)
utc_now = dt_util.utcnow()
with freeze_time(utc_now):
epoch = utc_now.timestamp()
assert process_datetime_to_timestamp(dt_util.utcnow()) == epoch
now = dt_util.now()
assert process_datetime_to_timestamp(now) == epoch
@pytest.mark.parametrize(
"time_zone", ["Europe/Berlin", "America/Chicago", "US/Hawaii | Fix process_datetime_to_timestamp and add test coverage (#71755) | test_process_datetime_to_timestamp_freeze_time | 1d9fb4bca871f97109684419f0f9526a0c151f2d | core | test_models.py | 12 | 8 | https://github.com/home-assistant/core.git | 1 | 61 | 1 | 24 | 141 | Python | {
"docstring": "Test we can handle processing database datatimes to timestamps.\n\n This test freezes time to make sure everything matches.\n ",
"language": "en",
"n_whitespaces": 24,
"n_words": 18,
"vocab_size": 17
} | def test_process_datetime_to_timestamp_freeze_time(time_zone, hass):
hass.config.set_time_zone(time_zone)
utc_now = dt_util.utcnow()
with freeze_time(utc_now):
epoch = utc_now.timestamp()
assert process_datetime_to_timestamp(dt_util.utcnow()) == epoch
now = dt_util.now()
assert process_datetime_to_timestamp(now) == epoch
@pytest.mark.parametrize(
"time_zone", ["Europe/Berlin", "America/Chicago", "US/Hawaii", "UTC"]
) |
21,127 | 101,723 | 306 | tools/alignments/jobs.py | 68 | 19 | def _spatially_filter(self) -> np.ndarray:
logger.debug("Spatially Filter")
assert self._shapes_model is not None
landmarks_norm = self._normalized["l | Alignments Tool - Typing, Documentation + Re-org | _spatially_filter | e2a77e7c6e84e81f642cb22f528e25e3f2d2dbc1 | faceswap | jobs.py | 12 | 21 | https://github.com/deepfakes/faceswap.git | 1 | 126 | 0 | 53 | 203 | Python | {
"docstring": " interpret the shapes using our shape model (project and reconstruct)\n\n Returns\n -------\n :class:`numpy.ndarray`\n The filtered landmarks in original coordinate space\n ",
"language": "en",
"n_whitespaces": 60,
"n_words": 20,
"vocab_size": 20
} | def _spatially_filter(self) -> np.ndarray:
logger.debug("Spatially Filter")
assert self._shapes_model is not None
landmarks_norm = self._normalized["landmarks"]
# Convert to matrix form
landmarks_norm_table = np.reshape(landmarks_norm, [68 * 2, landmarks_norm.shape[2]]).T
# Project onto shapes model and reconstruct
landmarks_norm_table_rec = self._shapes_model.inverse_transform(
self._shapes_model.transform(landmarks_norm_table))
# Convert back to shapes (numKeypoint, num_dims, numFrames)
landmarks_norm_rec = np.reshape(landmarks_norm_table_rec.T,
[68, 2, landmarks_norm.shape[2]])
# Transform back to image co-ordinates
retval = self._normalized_to_original(landmarks_norm_rec,
self._normalized["scale_factors"],
self._normalized["mean_coords"])
logger.debug("Spatially Filtered: %s", retval)
return retval
|
|
47,587 | 196,087 | 48 | sympy/combinatorics/free_groups.py | 15 | 7 | def letter_form(self):
return tuple(flatten([(i,)*j if j > 0 else (-i,)*(-j)
for i, j in se | Updated import locations | letter_form | 498015021131af4dbb07eb110e5badaba8250c7b | sympy | free_groups.py | 14 | 3 | https://github.com/sympy/sympy.git | 3 | 44 | 0 | 14 | 69 | Python | {
"docstring": "\n The letter representation of a ``FreeGroupElement`` is a tuple\n of generator symbols, with each entry corresponding to a group\n generator. Inverses of the generators are represented by\n negative generator symbols.\n\n Examples\n ========\n\n >>> from sympy.combinatorics import free_group\n >>> f, a, b, c, d = free_group(\"a b c d\")\n >>> (a**3).letter_form\n (a, a, a)\n >>> (a**2*d**-2*a*b**-4).letter_form\n (a, a, -d, -d, a, -b, -b, -b, -b)\n >>> (a**-2*b**3*d).letter_form\n (-a, -a, b, b, b, d)\n\n See Also\n ========\n\n array_form\n\n ",
"language": "en",
"n_whitespaces": 203,
"n_words": 76,
"vocab_size": 56
} | def letter_form(self):
return tuple(flatten([(i,)*j if j > 0 else (-i,)*(-j)
for i, j in self.array_form]))
|
|
34,858 | 150,868 | 26 | freqtrade/rpc/rpc.py | 12 | 6 | def _handle_default_message(self, type, data):
logger.debug(f"Received message from Leader | Refactoring, minor improvements, data provider improvements | _handle_default_message | 2b5f0678772bea0abaf4abe93efc55de43ea3e0e | freqtrade | rpc.py | 9 | 2 | https://github.com/freqtrade/freqtrade.git | 1 | 17 | 0 | 12 | 37 | Python | {
"docstring": "\n Default leader message handler, just logs it. We should never have to\n run this unless the leader sends us some weird message.\n ",
"language": "en",
"n_whitespaces": 44,
"n_words": 22,
"vocab_size": 21
} | def _handle_default_message(self, type, data):
logger.debug(f"Received message from Leader of type {type}: {data}")
|
|
24,885 | 113,318 | 170 | nni/nas/oneshot/pytorch/base_lightning.py | 37 | 14 | def export_probs(self) -> dict[str, Any]:
result = {}
for module in self.nas_modules:
try:
result.update(module.export_probs(memo=result))
except NotImplementedE | Enhancement of one-shot NAS (v2.9) (#5049) | export_probs | f77db747d07d5c90a3a9f70bb17f71d4573f329e | nni | base_lightning.py | 14 | 22 | https://github.com/microsoft/nni.git | 3 | 52 | 0 | 36 | 86 | Python | {
"docstring": "\n Export the probability of every choice in the search space got chosen.\n\n .. note:: If such method of some modules is not implemented, they will be simply ignored.\n\n Returns\n -------\n dict\n In most cases, keys are names of ``nas_modules`` suffixed with ``/`` and choice name.\n Values are the probability / logits depending on the implementation.\n ",
"language": "en",
"n_whitespaces": 120,
"n_words": 55,
"vocab_size": 47
} | def export_probs(self) -> dict[str, Any]:
result = {}
for module in self.nas_modules:
try:
result.update(module.export_probs(memo=result))
except NotImplementedError:
warnings.warn(
'Some super-modules you have used did not implement export_probs. You might find some logs are missing.',
UserWarning
)
return result
|
|
@region_silo_endpoint | 18,323 | 87,624 | 636 | src/sentry/api/endpoints/organization_releases.py | 265 | 48 | def debounce_update_release_health_data(organization, project_ids):
# Figure out which projects need to get updates from the snuba.
should_update = {}
cache_keys = ["debounce-health:%d" % id for id in project_ids]
cache_data = cache.get_many(cache_keys)
for project_id, cache_key in zip(project_ids, cache_keys):
if cache_data.get(cache_key) is None:
should_update[project_id] = cache_key
if not should_update:
return
projects = {p.id: p for p in Project.objects.get_many_from_cache(should_update.keys())}
# This gives us updates for all release-projects which have seen new
# health data over the last days. It will miss releases where the last
# date is longer than what `get_changed_project_release_model_adoptions`
# considers recent.
project_releases = release_health.get_changed_project_release_model_adoptions(
should_update.keys()
)
# Check which we already have rows for.
existing = set(
ReleaseProject.objects.filter(
project_id__in=[x[0] for x in project_releases],
release__version__in=[x[1] for x in project_releases],
).values_list("project_id", "release__version")
)
to_upsert = []
for key in project_releases:
if key not in existing:
to_upsert.append(key)
if to_upsert:
dates = release_health.get_oldest_health_data_for_releases(to_upsert)
for project_id, version in to_upsert:
project = projects.get(project_id)
if project is None:
# should not happen
continue
# Ignore versions that were saved with an empty string before validation was added
| fix(releases): Use Release.is_valid_version on adding releases (#40930) | debounce_update_release_health_data | 0cfac5c8bd83bcc7b96f3294c41a96524b883786 | sentry | organization_releases.py | 16 | 36 | https://github.com/getsentry/sentry.git | 14 | 268 | 1 | 161 | 440 | Python | {
"docstring": "This causes a flush of snuba health data to the postgres tables once\n per minute for the given projects.\n ",
"language": "en",
"n_whitespaces": 25,
"n_words": 19,
"vocab_size": 18
} | def debounce_update_release_health_data(organization, project_ids):
# Figure out which projects need to get updates from the snuba.
should_update = {}
cache_keys = ["debounce-health:%d" % id for id in project_ids]
cache_data = cache.get_many(cache_keys)
for project_id, cache_key in zip(project_ids, cache_keys):
if cache_data.get(cache_key) is None:
should_update[project_id] = cache_key
if not should_update:
return
projects = {p.id: p for p in Project.objects.get_many_from_cache(should_update.keys())}
# This gives us updates for all release-projects which have seen new
# health data over the last days. It will miss releases where the last
# date is longer than what `get_changed_project_release_model_adoptions`
# considers recent.
project_releases = release_health.get_changed_project_release_model_adoptions(
should_update.keys()
)
# Check which we already have rows for.
existing = set(
ReleaseProject.objects.filter(
project_id__in=[x[0] for x in project_releases],
release__version__in=[x[1] for x in project_releases],
).values_list("project_id", "release__version")
)
to_upsert = []
for key in project_releases:
if key not in existing:
to_upsert.append(key)
if to_upsert:
dates = release_health.get_oldest_health_data_for_releases(to_upsert)
for project_id, version in to_upsert:
project = projects.get(project_id)
if project is None:
# should not happen
continue
# Ignore versions that were saved with an empty string before validation was added
if not Release.is_valid_version(version):
continue
# We might have never observed the release. This for instance can
# happen if the release only had health data so far. For these cases
# we want to create the release the first time we observed it on the
# health side.
release = Release.get_or_create(
project=project, version=version, date_added=dates.get((project_id, version))
)
# Make sure that the release knows about this project. Like we had before
# the project might not have been associated with this release yet.
release.add_project(project)
# Debounce updates for a minute
cache.set_many(dict(zip(should_update.values(), [True] * len(should_update))), 60)
@region_silo_endpoint |
56,613 | 222,520 | 95 | python3.10.4/Lib/dis.py | 96 | 19 | def show_code(co, *, file=None):
print(code_info(co), file=file)
_Instruction = collections.namedtuple("_Instruction",
"opname opcode arg argval argrepr offset starts_line is_jump_target")
_Instruction.opname.__doc__ = "Human readable name for operation"
_Instruction.opcode.__doc__ = "Numeric co | add python 3.10.4 for windows | show_code | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | dis.py | 9 | 2 | https://github.com/XX-net/XX-Net.git | 1 | 23 | 0 | 67 | 171 | Python | {
"docstring": "Print details of methods, functions, or code to *file*.\n\n If *file* is not provided, the output is printed on stdout.\n ",
"language": "en",
"n_whitespaces": 26,
"n_words": 20,
"vocab_size": 19
} | def show_code(co, *, file=None):
print(code_info(co), file=file)
_Instruction = collections.namedtuple("_Instruction",
"opname opcode arg argval argrepr offset starts_line is_jump_target")
_Instruction.opname.__doc__ = "Human readable name for operation"
_Instruction.opcode.__doc__ = "Numeric code for operation"
_Instruction.arg.__doc__ = "Numeric argument to operation (if any), otherwise None"
_Instruction.argval.__doc__ = "Resolved arg value (if known), otherwise same as arg"
_Instruction.argrepr.__doc__ = "Human readable description of operation argument"
_Instruction.offset.__doc__ = "Start index of operation within bytecode sequence"
_Instruction.starts_line.__doc__ = "Line started by this opcode (if any), otherwise None"
_Instruction.is_jump_target.__doc__ = "True if other code jumps to here, otherwise False"
_OPNAME_WIDTH = 20
_OPARG_WIDTH = 5
|
|
@_copy_docstring_and_deprecators(matplotlib.image.imread) | 23,562 | 109,381 | 38 | lib/matplotlib/pyplot.py | 17 | 11 | def set_cmap(cmap):
cmap = colormaps[cmap]
rc('image', cmap=cmap.name)
im = gci()
if im is not None:
im.set_cmap(cmap)
| MNT: convert tests and internal usage way from using mpl.cm.get_cmap | set_cmap | a17f4f3bd63e3ca3754f96d7db4ce5197720589b | matplotlib | pyplot.py | 9 | 6 | https://github.com/matplotlib/matplotlib.git | 2 | 39 | 1 | 15 | 82 | Python | {
"docstring": "\n Set the default colormap, and applies it to the current image if any.\n\n Parameters\n ----------\n cmap : `~matplotlib.colors.Colormap` or str\n A colormap instance or the name of a registered colormap.\n\n See Also\n --------\n colormaps\n matplotlib.cm.register_cmap\n matplotlib.cm.get_cmap\n ",
"language": "en",
"n_whitespaces": 74,
"n_words": 36,
"vocab_size": 33
} | def set_cmap(cmap):
cmap = colormaps[cmap]
rc('image', cmap=cmap.name)
im = gci()
if im is not None:
im.set_cmap(cmap)
@_copy_docstring_and_deprecators(matplotlib.image.imread) |
117,343 | 320,776 | 608 | qutebrowser/completion/completionwidget.py | 114 | 38 | def completion_item_focus(self, which, history=False):
if history:
if (self._cmd.text() == ':' or self._cmd.history.is_browsing() or
not self._active):
if which == 'next':
self._cmd.command_history_next()
return
| mypy: Upgrade to PyQt5-stubs 5.15.6.0
For some unknown reason, those new stubs cause a *lot* of things now to be
checked by mypy which formerly probably got skipped due to Any being implied
somewhere.
The stubs themselves mainly improved, with a couple of regressions too.
In total, there were some 337 (!) new mypy errors. This commit fixes almost all
of them, and the next commit improves a fix to get things down to 0 errors
again.
Overview of the changes:
==== qutebrowser/app.py
- Drop type ignore due to improved stubs.
==== qutebrowser/browser/browsertab.py
- Specify the type of _widget members more closely than just QWidget.
This is debatable: I suppose the abstract stuff shouldn't need to know
anything about the concrete backends at all. But it seems like we cut some
corners when initially implementing things, and put some code in browsertab.py
just because the APIs of both backends happened to be compatible. Perhaps
something to reconsider once we drop QtWebKit and hopefully implement a dummy
backend.
- Add an additional assertion in AbstractAction.run_string. This is already
covered by the isinstance(member, self.action_base) above it, but that's too
dynamic for mypy to understand.
- Fix the return type of AbstractScroller.pos_px, which is a QPoint (with x
and y components), not a single int.
- Fix the return type of AbstractScroller.pos_perc, which is a Tuple (with x
and y components), not a single int.
- Fix the argument types of AbstractScroller.to_perc, as it's possible to pass
fractional percentages too.
- Specify the type for AbstractHistoryPrivate._history. See above (_widget) re
this being debatable.
- Fix the return type of AbstractTabPrivate.event_target(), which can be None
(see #3888).
- Fix the return type of AbstractTabPrivate.run_js_sync, which is Any (the JS
return value), not None.
- Fix the argument type for AbstractTabPrivate.toggle_inspector: position can
be None to use the last used position.
- Declare the type of sub-objects of AbstractTab.
- Fix the return value of AbstractTab.icon(), which is the QIcon, not None.
==== qutebrowser/browser/commands.py
- Make sure the active window is a MainWindow (with a .win_id attribute).
==== qutebrowser/browser/downloadview.py
- Add _model() which makes sure that self.model() is a DownloadModel, not None
or any other model. This is needed because other methods access a variety of
custom attributes on it, e.g. last_index().
==== qutebrowser/browser/greasemonkey.py
- Add an ignore for AbstractDownload.requested_url which we patch onto the
downloads. Probably would be nicer to add it as a proper attribute which always
gets set by the DownloadManager.
==== qutebrowser/browser/hints.py
- Remove type ignores for QUrl.toString().
- Add a new type ignore for combining different URL flags (which works, but is
not exactly type safe... still probably a regression in the stubs).
- Make sure the things we get back from self._get_keyparser are what we actually
expect. Probably should introduce a TypedDict (and/or overloads for
_get_keyparser with typing.Literal) to teach mypy about the exact return value.
See #7098.
This is needed because we access Hint/NormalKeyParser-specific attributes such
as .set_inhibited_timout() or .update_bindings().
==== qutebrowser/browser/inspector.py
- Similar changes than in browsertab.py to make some types where we share API
(e.g. .setPage()) more concrete. Didn't work out unfortunately, see next
commit.
==== qutebrowser/browser/network/pac.py
- Remove now unneeded type ignore for signal.
==== qutebrowser/browser/qtnetworkdownloads.py
- Make sure that downloads is a qtnetworkdownloads.DownloadItem (rather than an
AbstractDownload), so that we can call ._uses_nam() on it.
==== qutebrowser/browser/qutescheme.py
- Remove now unneeded type ignore for QUrl flags.
==== qutebrowser/browser/urlmarks.py
- Specify the type of UrlMarkManager._lineparser, as those only get initialized
in _init_lineparser of subclasses, so mypy doesn't know it's supposed to exist.
==== qutebrowser/browser/webelem.py
- New casts to turn single KeyboardModifier (enum) entries into
KeyboardModifiers (flags). Might not be needed anymore with Qt 6.
- With that, casting the final value is now unneeded.
==== qutebrowser/browser/webengine/notification.py
- Remove now unneeded type ignore for signal.
- Make sure the self.sender() we get in HerbeNotificationAdapter._on_finished()
is a QProcess, not just any QObject.
==== qutebrowser/browser/webengine/webenginedownloads.py
- Remove now unneeded type ignores for signals.
==== qutebrowser/browser/webengine/webengineelem.py
- Specify the type of WebEngineElement._tab.
- Remove now unneeded type ignore for mixed flags.
==== qutebrowser/browser/webengine/webengineinspector.py
- See changes to inspector.py and next commit.
- Remove now unneeded type ignore for signal.
==== qutebrowser/browser/webengine/webenginequtescheme.py
- Remove now unneeded type ignore for mixed flags.
==== qutebrowser/browser/webengine/webenginesettings.py
- Ignore access of .setter attribute which we patch onto QWebEngineProfile.
Would be nice to have a subclass or wrapper-class instead.
==== qutebrowser/browser/webengine/webenginetab.py
- Specified the type of _widget members more closely than just QWidget.
See browsertab.py changes for details.
- Remove some now-unneeded type ignores for creating FindFlags.
- Specify more concrete types for WebEngineTab members where we actually need to
access WebEngine-specific attributes.
- Make sure the page we get is our custom WebEnginePage subclass, not just any
QWebEnginePage. This is needed because we access custom attributes on it.
==== qutebrowser/browser/webengine/webview.py
- Make sure the page we get is our custom WebEnginePage subclass, not just any
QWebEnginePage. This is needed because we access custom attributes on it.
==== qutebrowser/browser/webkit/network/networkreply.py
- Remove now unneeded type ignores for signals.
==== qutebrowser/browser/webkit/webkitinspector.py
- See changes to inspector.py and next commit.
==== qutebrowser/browser/webkit/webkittab.py
- Specify the type of _widget members more closely than just QWidget.
See browsertab.py changes for details.
- Add a type ignore for WebKitAction because our workaround needs to
treat them as ints (which is allowed by PyQt, even if not type-safe).
- Add new ignores for findText calls: The text is a QString and can be None; the
flags are valid despite mypy thinking they aren't (stubs regression?).
- Specify the type for WebKitHistoryPrivate._history, because we access
WebKit-specific attributes. See above (_widget) re this being debatable.
- Make mypy aware that .currentFrame() and .frameAt() can return None (stubs
regression?).
- Make sure the .page() and .page().networkAccessManager() are our subclasses
rather than the more generic QtWebKit objects, as we use custom attributes.
- Add new type ignores for signals (stubs regression!)
==== qutebrowser/browser/webkit/webpage.py
- Make sure the .networkAccessManager() is our subclass rather than the more
generic QtWebKit object, as we use custom attributes.
- Replace a cast by a type ignore. The cast didn't work anymore.
==== qutebrowser/browser/webkit/webview.py
- Make sure the .page() is our subclass rather than the more generic QtWebKit
object, as we use custom attributes.
==== qutebrowser/commands/userscripts.py
- Remove now unneeded type ignore for signal.
==== qutebrowser/completion/completer.py
- Add a new _completion() getter (which ensures it actually gets the completion
view) rather than accessing the .parent() directly (which could be any QObject).
==== qutebrowser/completion/completiondelegate.py
- Make sure self.parent() is a CompletionView (no helper method as there is only
one instance).
- Remove a now-unneeded type ignore for adding QSizes.
==== qutebrowser/completion/completionwidget.py
- Add a ._model() getter which ensures that we get a CompletionModel (with
custom attributes) rather than Qt's .model() which can be any QAbstractItemModel
(or None).
- Removed a now-unneeded type ignore for OR-ing flags.
==== qutebrowser/completion/models/completionmodel.py
- Remove now unneeded type ignores for signals.
- Ignore a complaint about .set_pattern() not being defined. Completion
categories don't share any common parent class, so it would be good to introduce
a typing.Protocol for this. See #7098.
==== qutebrowser/components/misccommands.py
- Removed a now-unneeded type ignore for OR-ing flags.
==== qutebrowser/components/readlinecommands.py
- Make sure QApplication.instance() is a QApplication (and not just a
QCoreApplication). This includes the former "not None" check.
==== qutebrowser/components/scrollcommands.py
- Add basic annotation for "funcs" dict. Could have a callable protocol to
specify it needs a count kwarg, see #7098.
==== qutebrowser/config/stylesheet.py
- Correctly specify that stylesheet apply to QWidgets, not any QObject.
- Ignore an attr-defined for obj.STYLESHEET. Perhaps could somehow teach mypy
about this with overloads and protocols (stylesheet for set_register being None
=> STYLESHEET needs to be defined, otherwise anything goes), but perhaps not
worth the troble. See #7098.
==== qutebrowser/keyinput/keyutils.py
- Remove some now-unneeded type ignores and add a cast for using a single enum
value as flags. Might need to look at this again with Qt 6 support.
==== qutebrowser/keyinput/modeman.py
- Add a FIXME for using a TypedDict, see comments for hints.py above.
==== qutebrowser/mainwindow/mainwindow.py
- Remove now-unneeded type ignores for calling with OR-ed flags.
- Improve where we cast from WindowType to WindowFlags, no int needed
- Use new .tab_bar() getter, see below.
==== qutebrowser/mainwindow/prompt.py
- Remove now-unneeded type ignores for calling with OR-ed flags.
==== qutebrowser/mainwindow/statusbar/bar.py
- Adjust type ignores around @pyqtProperty. The fact one is still needed seems
like a stub regression.
==== qutebrowser/mainwindow/statusbar/command.py
- Fix type for setText() override (from QLineEdit): text can be None
(QString in C++).
==== qutebrowser/mainwindow/statusbar/url.py
- Adjust type ignores around @pyqtProperty. The fact one is still needed seems
like a stub regression.
==== qutebrowser/mainwindow/tabbedbrowser.py
- Specify that TabDeque manages browser tabs, not any QWidgets. It accesses
AbstractTab-specific attributes.
- Make sure that the .tabBar() we get is a tabwidget.TabBar, as we access
.maybe_hide.
- Fix the annotations for stored marks: Scroll positions are a QPoint, not int.
- Add _current_tab() and _tab_by_idx() wrappers for .currentWidget() and
.widget(), which ensures that the return values are valid AbstractTabs (or None
for _tab_by_idx). This is needed because we access AbstractTab-specific
attributes.
- For some places, where the tab can be None, continue using .currentTab() but
add asserts.
- Remove some now-unneeded [unreachable] ignores, as mypy knows about the None
possibility now.
==== qutebrowser/mainwindow/tabwidget.py
- Add new tab_bar() and _tab_by_idx() helpers which check that the .tabBar() and
.widget() are of type TabBar and AbstractTab, respectively.
- Add additional assertions where we expect ._tab_by_idx() to never be None.
- Remove dead code in get_tab_fields for handling a None y scroll position. I
was unable to find any place in the code where this could be set to None.
- Remove some now-unneeded type ignores and casts, as mypy now knows that
_type_by_idx() could be None.
- Work around a strange instance where mypy complains about not being able to
find the type of TabBar.drag_in_progress from TabWidget._toggle_visibility,
despite it clearly being shown as a bool *inside* that class without any
annotation.
- Add a ._tab_widget() getter in TabBar which ensures that the .parent() is in
fact a TabWidget.
==== qutebrowser/misc/crashsignal.py
- Remove now unneeded type ignores for signals.
==== qutebrowser/misc/editor.py
- Remove now unneeded type ignores for signals.
==== qutebrowser/misc/ipc.py
- Remove now unneeded type ignores for signals.
- Add new type ignores for .error() which is both a signal and a getter
(stub regression?). Won't be relevant for Qt 6 anymore, as the signal was
renamed to errorOccurred in 5.15.
==== qutebrowser/misc/objects.py
- Make sure mypy knows that objects.app is our custom Application (with custom
attributes) rather than any QApplication.
==== qutebrowser/utils/objreg.py
- Ignore attr-defined for .win_id attributes. Maybe could add a typing.Protocol,
but ideally, the whole objreg stuff should die one day anyways.
==== tests/unit/completion/test_completer.py
- Make CompletionWidgetStub inherit from CompletionView so that it passes the
new isinstance() asserts in completer.py (see above). | completion_item_focus | a20bb67a878b2e68abf8268c1b0a27f018d01352 | qutebrowser | completionwidget.py | 18 | 41 | https://github.com/qutebrowser/qutebrowser.git | 14 | 292 | 0 | 78 | 494 | Python | {
"docstring": "Shift the focus of the completion menu to another item.\n\n Args:\n which: 'next', 'prev',\n 'next-category', 'prev-category',\n 'next-page', or 'prev-page'.\n history: Navigate through command history if no text was typed.\n ",
"language": "en",
"n_whitespaces": 101,
"n_words": 29,
"vocab_size": 28
} | def completion_item_focus(self, which, history=False):
if history:
if (self._cmd.text() == ':' or self._cmd.history.is_browsing() or
not self._active):
if which == 'next':
self._cmd.command_history_next()
return
elif which == 'prev':
self._cmd.command_history_prev()
return
else:
raise cmdutils.CommandError("Can't combine --history with "
"{}!".format(which))
if not self._active:
return
selmodel = self.selectionModel()
indices = {
'next': lambda: self._next_idx(upwards=False),
'prev': lambda: self._next_idx(upwards=True),
'next-category': lambda: self._next_category_idx(upwards=False),
'prev-category': lambda: self._next_category_idx(upwards=True),
'next-page': lambda: self._next_page(upwards=False),
'prev-page': lambda: self._next_page(upwards=True),
}
idx = indices[which]()
if not idx.isValid():
return
selmodel.setCurrentIndex(
idx,
QItemSelectionModel.ClearAndSelect |
QItemSelectionModel.Rows)
# if the last item is focused, try to fetch more
next_idx = self.indexBelow(idx)
if not self.visualRect(next_idx).isValid():
self.expandAll()
count = self._model().count()
if count == 0:
self.hide()
elif count == 1 and config.val.completion.quick:
self.hide()
elif config.val.completion.show == 'auto':
self.show()
|
|
20,679 | 101,259 | 575 | tools/manual/detected_faces.py | 65 | 58 | def _background_extract(self, output_folder, progress_queue):
_io = dict(saver=ImagesSaver(get_folder(output_folder), as_bytes=True),
loader=ImagesLoader(self._input_location, count=self._alignments.frames_count))
for frame_idx, (filename, image) in enumerate(_io["loader"].load()):
logger.trace("Outputting frame: %s: %s", frame_idx, filename)
src_filename = os.path.basename(filename)
frame_name = os.path.splitext(src_filename)[0]
progress_queue.put(1)
for face_idx, face in enumerate(self._frame_faces[frame_idx]):
output = f"{frame_name}_{face_idx}.png"
aligned = AlignedFace(face.landmarks_xy,
image=image,
centering="head",
size=512) # TODO user selectable size
meta = dict(alignments=face.to_png_meta(),
source=dict(alignments_version=self._alignments.version,
original_filename=output,
face_index=face_idx,
source_filename=src_filename,
source_is_video=self._globals.is_video,
source_frame_dims=image.shape[:2]))
| lib.align updates:
- alignments.py
- Add typed dicts for imported alignments
- Explicitly check for presence of thumb value in alignments dict
- linting
- detected_face.py
- Typing
- Linting
- Legacy support for pre-aligned face
- Update dependencies to new property names | _background_extract | 5e73437be47f2410439a3c6716de96354e6a0c94 | faceswap | detected_faces.py | 18 | 24 | https://github.com/deepfakes/faceswap.git | 3 | 232 | 0 | 56 | 366 | Python | {
"docstring": " Perform the background extraction in a thread so GUI doesn't become unresponsive.\n\n Parameters\n ----------\n output_folder: str\n The location to save the output faces to\n progress_queue: :class:`queue.Queue`\n The queue to place incremental counts to for updating the GUI's progress bar\n ",
"language": "en",
"n_whitespaces": 97,
"n_words": 39,
"vocab_size": 33
} | def _background_extract(self, output_folder, progress_queue):
_io = dict(saver=ImagesSaver(get_folder(output_folder), as_bytes=True),
loader=ImagesLoader(self._input_location, count=self._alignments.frames_count))
for frame_idx, (filename, image) in enumerate(_io["loader"].load()):
logger.trace("Outputting frame: %s: %s", frame_idx, filename)
src_filename = os.path.basename(filename)
frame_name = os.path.splitext(src_filename)[0]
progress_queue.put(1)
for face_idx, face in enumerate(self._frame_faces[frame_idx]):
output = f"{frame_name}_{face_idx}.png"
aligned = AlignedFace(face.landmarks_xy,
image=image,
centering="head",
size=512) # TODO user selectable size
meta = dict(alignments=face.to_png_meta(),
source=dict(alignments_version=self._alignments.version,
original_filename=output,
face_index=face_idx,
source_filename=src_filename,
source_is_video=self._globals.is_video,
source_frame_dims=image.shape[:2]))
b_image = encode_image(aligned.face, ".png", metadata=meta)
_io["saver"].save(output, b_image)
_io["saver"].close()
|
|
117,149 | 320,361 | 40 | src/paperless_tesseract/tests/test_checks.py | 12 | 8 | def test_multi_part_language(self, m):
m.return_value = ["chi_sim", "eng"]
msgs = check_default_language_available(None)
self.assertEqual(len | Fixes language code checks around two part languages | test_multi_part_language | 55ef0d4a1b62c3abe8500cad97ddeecf9f746b84 | paperless-ngx | test_checks.py | 9 | 4 | https://github.com/paperless-ngx/paperless-ngx.git | 1 | 34 | 0 | 11 | 59 | Python | {
"docstring": "\n GIVEN:\n - An OCR language which is multi part (ie chi-sim)\n - The language is correctly formatted\n WHEN:\n - Installed packages are checked\n THEN:\n - No errors are reported\n ",
"language": "en",
"n_whitespaces": 102,
"n_words": 29,
"vocab_size": 23
} | def test_multi_part_language(self, m):
m.return_value = ["chi_sim", "eng"]
msgs = check_default_language_available(None)
self.assertEqual(len(msgs), 0)
|
|
17,072 | 80,512 | 84 | awx/main/tasks/callback.py | 16 | 12 | def finished_callback(self, runner_obj):
| Decoupled callback functions from BaseTask Class
--- Removed all callback functions from 'jobs.py' and put them in a new file '/awx/main/tasks/callback.py'
--- Modified Unit tests unit moved
--- Moved 'update_model' from jobs.py to /awx/main/utils/update_model.py | finished_callback | 443bdc1234682dd0004bae372078512fcf37cce9 | awx | callback.py | 9 | 8 | https://github.com/ansible/awx.git | 1 | 50 | 0 | 16 | 85 | Python | {
"docstring": "\n Ansible runner callback triggered on finished run\n ",
"language": "en",
"n_whitespaces": 22,
"n_words": 7,
"vocab_size": 7
} | def finished_callback(self, runner_obj):
event_data = {
'event': 'EOF',
'final_counter': self.event_ct,
'guid': self.guid,
}
event_data.setdefault(self.event_data_key, self.instance.id)
self.dispatcher.dispatch(event_data)
|
|
@pytest.mark.parametrize(
"sensitive_fields",
[
{"NonExistingType": {}},
{"Product": {"nonExistingField"}},
{"Node": {"id"}},
],
) | 5,117 | 27,579 | 79 | saleor/webhook/observability/tests/test_obfuscation.py | 33 | 11 | def test_anonymize_gql_operation_response_with_fragment_spread(gql_operation_factory):
query =
result = {"data": "result"}
sensitive_fields = {" | Observability reporter (#9803)
* Initial commit
* Add observability celery beat task
* Add observability_reporter_task and observability_send_events
* Convert payload to camel case
* Add fakeredis to dev dependencies
* Add redis buffer tests
* Refactor buffer
* Update
* Optimize buffer
* Add tests
* Add types-redis to dev dependencies
* Refactor
* Fix after rebase
* Refactor opentracing
* Add opentracing to observability tasks
* Add more tests
* Fix buffer fixtures
* Report dropped events
* Fix buffer tests
* Refactor get_buffer
* Refactor unit tests
* Set Redis connection client_name
* Refactor redis tests
* Fix test_get_or_create_connection_pool
* Fix JsonTruncText comparison
* Add more generate_event_delivery_attempt_payload tests | test_anonymize_gql_operation_response_with_fragment_spread | 7ea7916c65357741c3911e307acb58d547a5e91a | saleor | test_obfuscation.py | 11 | 23 | https://github.com/saleor/saleor.git | 1 | 49 | 1 | 29 | 152 | Python | {
"docstring": "\n fragment ProductFragment on Product {\n id\n name\n }\n query products($first: Int){\n products(channel: \"channel-pln\", first:$first){\n edges{\n node{\n ... ProductFragment\n variants {\n variantName: name\n }\n }\n }\n }\n }",
"language": "en",
"n_whitespaces": 139,
"n_words": 27,
"vocab_size": 19
} | def test_anonymize_gql_operation_response_with_fragment_spread(gql_operation_factory):
query =
result = {"data": "result"}
sensitive_fields = {"Product": {"name"}}
operation_result = gql_operation_factory(query, result=result)
anonymize_gql_operation_response(operation_result, sensitive_fields)
assert operation_result.result["data"] == MASK
@pytest.mark.parametrize(
"sensitive_fields",
[
{"NonExistingType": {}},
{"Product": {"nonExistingField"}},
{"Node": {"id"}},
],
) |
@py_random_state(3) | 42,003 | 176,620 | 130 | networkx/algorithms/bipartite/generators.py | 74 | 23 | def complete_bipartite_graph(n1, n2, create_using=None):
G = nx.empty_g | Adjust the usage of nodes_or_number decorator (#5599)
* recorrect typo in decorators.py
* Update tests to show troubles in current code
* fix troubles with usage of nodes_or_number
* fix typo
* remove nodes_or_number where that makes sense
* Reinclude nodes_or_numbers and add some tests for nonstandard usage
* fix typowq
* hopefully final tweaks (no behavior changes
* Update test_classic.py
Co-authored-by: Jarrod Millman <[email protected]> | complete_bipartite_graph | de1d00f20e0bc14f1cc911b3486e50225a8fa168 | networkx | generators.py | 10 | 15 | https://github.com/networkx/networkx.git | 8 | 148 | 1 | 54 | 250 | Python | {
"docstring": "Returns the complete bipartite graph `K_{n_1,n_2}`.\n\n The graph is composed of two partitions with nodes 0 to (n1 - 1)\n in the first and nodes n1 to (n1 + n2 - 1) in the second.\n Each node in the first is connected to each node in the second.\n\n Parameters\n ----------\n n1, n2 : integer or iterable container of nodes\n If integers, nodes are from `range(n1)` and `range(n1, n1 + n2)`.\n If a container, the elements are the nodes.\n create_using : NetworkX graph instance, (default: nx.Graph)\n Return graph of this type.\n\n Notes\n -----\n Nodes are the integers 0 to `n1 + n2 - 1` unless either n1 or n2 are\n containers of nodes. If only one of n1 or n2 are integers, that\n integer is replaced by `range` of that integer.\n\n The nodes are assigned the attribute 'bipartite' with the value 0 or 1\n to indicate which bipartite set the node belongs to.\n\n This function is not imported in the main namespace.\n To use it use nx.bipartite.complete_bipartite_graph\n ",
"language": "en",
"n_whitespaces": 237,
"n_words": 166,
"vocab_size": 93
} | def complete_bipartite_graph(n1, n2, create_using=None):
G = nx.empty_graph(0, create_using)
if G.is_directed():
raise nx.NetworkXError("Directed Graph not supported")
n1, top = n1
n2, bottom = n2
if isinstance(n1, numbers.Integral) and isinstance(n2, numbers.Integral):
bottom = [n1 + i for i in bottom]
G.add_nodes_from(top, bipartite=0)
G.add_nodes_from(bottom, bipartite=1)
if len(G) != len(top) + len(bottom):
raise nx.NetworkXError("Inputs n1 and n2 must contain distinct nodes")
G.add_edges_from((u, v) for u in top for v in bottom)
G.graph["name"] = f"complete_bipartite_graph({n1}, {n2})"
return G
@py_random_state(3) |
56,088 | 220,698 | 130 | python3.10.4/Lib/asyncio/sslproto.py | 45 | 13 | def shutdown(self, callback=None):
if self._state == _UNWRAPPED:
raise RuntimeError('no security layer present')
if self._state == _SHUTDOWN:
raise RuntimeError('shutdown in progress')
assert self._state in (_WRAPPED, _DO_HANDSHAKE)
self._state = _SHUTDOWN
self._shutdown_cb = callback
ssldata, ap | add python 3.10.4 for windows | shutdown | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | sslproto.py | 10 | 11 | https://github.com/XX-net/XX-Net.git | 4 | 79 | 0 | 31 | 130 | Python | {
"docstring": "Start the SSL shutdown sequence.\n\n Return a list of ssldata. A ssldata element is a list of buffers\n\n The optional *callback* argument can be used to install a callback that\n will be called when the shutdown is complete. The callback will be\n called without arguments.\n ",
"language": "en",
"n_whitespaces": 80,
"n_words": 45,
"vocab_size": 32
} | def shutdown(self, callback=None):
if self._state == _UNWRAPPED:
raise RuntimeError('no security layer present')
if self._state == _SHUTDOWN:
raise RuntimeError('shutdown in progress')
assert self._state in (_WRAPPED, _DO_HANDSHAKE)
self._state = _SHUTDOWN
self._shutdown_cb = callback
ssldata, appdata = self.feed_ssldata(b'')
assert appdata == [] or appdata == [b'']
return ssldata
|
|
54,359 | 216,053 | 154 | salt/cloud/clouds/proxmox.py | 46 | 18 | def preferred_ip(vm_, ips):
proto = config.get_cloud_config_value(
"protocol", vm_, __opts__, default="ipv4", search_global=False
)
family = socket.AF_INET
if proto == "ipv6":
family = socket.AF_INET6
for ip in ips:
ignore_ip = ignore_cidr(vm_, ip)
if ignore_ip:
continue
try:
socket.inet_pton(family, ip)
return ip
except Exception: # pylint: disable=broad-except
continue
return False
| Add support for get IP-address from agent | preferred_ip | a5679caf65c7c79cd72841b6e5793b9b693744c9 | salt | proxmox.py | 11 | 17 | https://github.com/saltstack/salt.git | 5 | 78 | 0 | 36 | 128 | Python | {
"docstring": "\n Return either an 'ipv4' (default) or 'ipv6' address depending on 'protocol' option.\n The list of 'ipv4' IPs is filtered by ignore_cidr() to remove any unreachable private addresses.\n ",
"language": "en",
"n_whitespaces": 37,
"n_words": 27,
"vocab_size": 26
} | def preferred_ip(vm_, ips):
proto = config.get_cloud_config_value(
"protocol", vm_, __opts__, default="ipv4", search_global=False
)
family = socket.AF_INET
if proto == "ipv6":
family = socket.AF_INET6
for ip in ips:
ignore_ip = ignore_cidr(vm_, ip)
if ignore_ip:
continue
try:
socket.inet_pton(family, ip)
return ip
except Exception: # pylint: disable=broad-except
continue
return False
|
|
14,866 | 68,785 | 17 | erpnext/accounts/doctype/account/account.py | 23 | 11 | def get_parent_account(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql(
% ("%s", searchfield, "%s", "%s", "%s"),
(filters["company"], "%%%s%%" % txt, page_len, start),
as_list=1,
| refactor: use db independent offset syntax (#31345)
* chore: use db independent offset syntax
* fix: typo
* style: reformat code to black spec
Co-authored-by: Ankush Menat <[email protected]> | get_parent_account | 00ef499739959630cd7cf97419fbb6ca59be05f2 | erpnext | account.py | 10 | 9 | https://github.com/frappe/erpnext.git | 1 | 56 | 0 | 18 | 85 | Python | {
"docstring": "select name from tabAccount\n\t\twhere is_group = 1 and docstatus != 2 and company = %s\n\t\tand %s like %s order by name limit %s offset %s",
"language": "en",
"n_whitespaces": 24,
"n_words": 27,
"vocab_size": 19
} | def get_parent_account(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql(
% ("%s", searchfield, "%s", "%s", "%s"),
(filters["company"], "%%%s%%" % txt, page_len, start),
as_list=1,
)
|
|
81,783 | 276,930 | 30 | keras/utils/kernelized_utils.py | 19 | 13 | def exact_laplacian_kernel(x, y, stddev):
r
x_aligned, y_aligned = _align_matrices(x, y)
diff_l1_norm = tf.reduce_sum(tf.abs(tf.subtract(x_aligned, y_aligned)), 2)
| Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | exact_laplacian_kernel | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | kernelized_utils.py | 12 | 27 | https://github.com/keras-team/keras.git | 1 | 53 | 0 | 18 | 81 | Python | {
"docstring": "Computes exact Laplacian kernel value(s) for tensors x and y using stddev.\n\n The Laplacian kernel for vectors u, v is defined as follows:\n K(u, v) = exp(-||u-v|| / stddev)\n where the norm is the l1-norm. x, y can be either vectors or matrices. If they\n are vectors, they must have the same dimension. If they are matrices, they\n must have the same number of columns. In the latter case, the method returns\n (as a matrix) K(u, v) values for all pairs (u, v) where u is a row from x and\n v is a row from y.\n\n Args:\n x: a tensor of rank 1 or 2. It's shape should be either [dim] or [m, dim].\n y: a tensor of rank 1 or 2. It's shape should be either [dim] or [n, dim].\n stddev: The width of the Gaussian kernel.\n\n Returns:\n A single value (scalar) with shape (1, 1) if x, y are vectors or a matrix\n of shape (m, n) with entries K(u, v) (where K is the Laplacian kernel) for\n all (u,v) pairs where u, v are rows from x and y respectively.\n\n Raises:\n ValueError: if the shapes of x, y are not compatible.\n ",
"language": "en",
"n_whitespaces": 269,
"n_words": 195,
"vocab_size": 106
} | def exact_laplacian_kernel(x, y, stddev):
r
x_aligned, y_aligned = _align_matrices(x, y)
diff_l1_norm = tf.reduce_sum(tf.abs(tf.subtract(x_aligned, y_aligned)), 2)
return tf.exp(-diff_l1_norm / stddev)
|
|
75,534 | 259,028 | 232 | sklearn/manifold/_isomap.py | 93 | 23 | def transform(self, X):
check_is_fitted(self)
if self.n_neighbors is not None:
distances, indices = self.nbrs_.kneighbors(X, return_dis | ENH Isomap supports radius-based neighbors (#19794)
Co-authored-by: Tom Dupré la Tour <[email protected]>
Co-authored-by: Olivier Grisel <[email protected]>
Co-authored-by: Thomas J. Fan <[email protected]>
Co-authored-by: Julien Jerphanion <[email protected]> | transform | 71656844586f212324678804ace73f7a266deb00 | scikit-learn | _isomap.py | 13 | 14 | https://github.com/scikit-learn/scikit-learn.git | 3 | 140 | 0 | 75 | 213 | Python | {
"docstring": "Transform X.\n\n This is implemented by linking the points X into the graph of geodesic\n distances of the training data. First the `n_neighbors` nearest\n neighbors of X are found in the training data, and from these the\n shortest geodesic distances from each point in X to each point in\n the training data are computed in order to construct the kernel.\n The embedding of X is the projection of this kernel onto the\n embedding vectors of the training set.\n\n Parameters\n ----------\n X : array-like, shape (n_queries, n_features)\n If neighbors_algorithm='precomputed', X is assumed to be a\n distance matrix or a sparse graph of shape\n (n_queries, n_samples_fit).\n\n Returns\n -------\n X_new : array-like, shape (n_queries, n_components)\n X transformed in the new space.\n ",
"language": "en",
"n_whitespaces": 260,
"n_words": 118,
"vocab_size": 69
} | def transform(self, X):
check_is_fitted(self)
if self.n_neighbors is not None:
distances, indices = self.nbrs_.kneighbors(X, return_distance=True)
else:
distances, indices = self.nbrs_.radius_neighbors(X, return_distance=True)
# Create the graph of shortest distances from X to
# training data via the nearest neighbors of X.
# This can be done as a single array operation, but it potentially
# takes a lot of memory. To avoid that, use a loop:
n_samples_fit = self.nbrs_.n_samples_fit_
n_queries = distances.shape[0]
G_X = np.zeros((n_queries, n_samples_fit))
for i in range(n_queries):
G_X[i] = np.min(self.dist_matrix_[indices[i]] + distances[i][:, None], 0)
G_X **= 2
G_X *= -0.5
return self.kernel_pca_.transform(G_X)
|
|
46,283 | 189,989 | 212 | manim/mobject/svg/svg_mobject.py | 48 | 12 | def generate_config_style_dict(self) -> dict[str, str]:
keys_converting_dict = {
"fill": ("color", "fill_color"),
"fill-opacity": ("opacity", "fill_opacity"),
"stroke": ("color", "stroke_color"),
"stroke-opacity": ("opacity", "stroke_opacity"),
"stroke-width": ("stroke_width",),
}
svg_default_dict = self.svg_default
result = {}
for | Ported improved implementation of :class:`.SVGMobject` from 3b1b/manim (#2898)
* port SVGMobject from 3b1b/manim
* added svgelements as dependency
* revert change of default values
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* set default stroke_width of svg elements to 0 if not set
* fix handling of circles with different rx/ry
* turn more methods into staticmethods
* removed duplicated method
* set/adapt stroke-width of some test SVGs
* updated control data
* forgot some control data
* fixed init_colors in tex_mobject and text_mobject
* minor changes, added docstrings
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* module docstring, removed import
* vector_to_coords changed again
* nail sphinx version to below 5.1 to fix rtd (?)
* update test_text control data for science
* changed Brace to use VMobjectFromSVGPath
* remove unused classes and methods depending on old SVG path implementation
* remove style_utils and svg_path modules
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* change test_text to use monospace font
* restore geometry.polygram
* added get_mobject_type_class auxiliary method; changed polyline implementation to ad-hoc approach
* restore test_text to previous version
* skip Use tags as svgelements already populates them
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> | generate_config_style_dict | 309c9d41eb734ca85a7aea5533f88a6d4ee7c944 | manim | svg_mobject.py | 13 | 17 | https://github.com/ManimCommunity/manim.git | 4 | 104 | 0 | 40 | 180 | Python | {
"docstring": "Generate a dictionary holding the default style information.",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 8
} | def generate_config_style_dict(self) -> dict[str, str]:
keys_converting_dict = {
"fill": ("color", "fill_color"),
"fill-opacity": ("opacity", "fill_opacity"),
"stroke": ("color", "stroke_color"),
"stroke-opacity": ("opacity", "stroke_opacity"),
"stroke-width": ("stroke_width",),
}
svg_default_dict = self.svg_default
result = {}
for svg_key, style_keys in keys_converting_dict.items():
for style_key in style_keys:
if svg_default_dict[style_key] is None:
continue
result[svg_key] = str(svg_default_dict[style_key])
return result
|
|
78,524 | 266,709 | 110 | test/lib/ansible_test/_internal/bootstrap.py | 22 | 13 | def get_variables(self): # type: () -> t.Dict[str, t.Union[str, t.List[str]]]
return dict(
| ansible-test - Code cleanup and refactoring. (#77169)
* Remove unnecessary PyCharm ignores.
* Ignore intentional undefined attribute usage.
* Add missing type hints. Fix existing type hints.
* Fix docstrings and comments.
* Use function to register completion handler.
* Pass strings to display functions.
* Fix CompositeAction handling of dest argument.
* Use consistent types in expressions/assignments.
* Use custom function to keep linters happy.
* Add missing raise for custom exception.
* Clean up key/value type handling in cloud plugins.
* Use dataclass instead of dict for results.
* Add custom type_guard function to check lists.
* Ignore return type that can't be checked (yet).
* Avoid changing types on local variables. | get_variables | a06fa496d3f837cca3c437ab6e9858525633d147 | ansible | bootstrap.py | 10 | 9 | https://github.com/ansible/ansible.git | 2 | 56 | 0 | 22 | 87 | Python | {
"docstring": "The variables to template in the bootstrapping script.",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 8
} | def get_variables(self): # type: () -> t.Dict[str, t.Union[str, t.List[str]]]
return dict(
bootstrap_type=self.bootstrap_type,
controller='yes' if self.controller else '',
python_versions=self.python_versions,
ssh_key_type=self.ssh_key.KEY_TYPE,
ssh_private_key=self.ssh_key.key_contents,
ssh_public_key=self.ssh_key.pub_contents,
)
|
|
54,718 | 217,321 | 224 | python3.10.4/Lib/enum.py | 71 | 16 | def _create_pseudo_member_(cls, value):
pseudo_member = cls._value2member_map_.get(value, None)
if pseudo_member is None:
| add python 3.10.4 for windows | _create_pseudo_member_ | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | enum.py | 14 | 11 | https://github.com/XX-net/XX-Net.git | 3 | 83 | 0 | 54 | 136 | Python | {
"docstring": "\n Create a composite member iff value contains only members.\n ",
"language": "en",
"n_whitespaces": 24,
"n_words": 9,
"vocab_size": 9
} | def _create_pseudo_member_(cls, value):
pseudo_member = cls._value2member_map_.get(value, None)
if pseudo_member is None:
# verify all bits are accounted for
_, extra_flags = _decompose(cls, value)
if extra_flags:
raise ValueError("%r is not a valid %s" % (value, cls.__qualname__))
# construct a singleton enum pseudo-member
pseudo_member = object.__new__(cls)
pseudo_member._name_ = None
pseudo_member._value_ = value
# use setdefault in case another thread already created a composite
# with this value
pseudo_member = cls._value2member_map_.setdefault(value, pseudo_member)
return pseudo_member
|
|
75,639 | 259,201 | 237 | sklearn/preprocessing/_encoders.py | 78 | 26 | def _identify_infrequent(self, category_count, n_samples, col_idx):
if isinstance(self.min_frequency, numbers.Integral):
infrequent_mask = category_count < self.min_frequency
elif isinstance(self.min_frequency, numbers.Real):
min_frequency_abs = n_samples * self.min_frequency
infrequent_mask = category_count < min_frequency_abs
else:
infrequent_mask = np.zeros(category_count.shape[0], dtype=bool)
n_current_features = category_count.size - infrequent_mask.sum() + 1
if self.max_categories is not None and self.max_categories < n_current_features:
# stable sort to preserve original co | ENH Adds infrequent categories to OneHotEncoder (#16018)
* ENH Completely adds infrequent categories
* STY Linting
* STY Linting
* DOC Improves wording
* DOC Lint
* BUG Fixes
* CLN Address comments
* CLN Address comments
* DOC Uses math to description float min_frequency
* DOC Adds comment regarding drop
* BUG Fixes method name
* DOC Clearer docstring
* TST Adds more tests
* FIX Fixes mege
* CLN More pythonic
* CLN Address comments
* STY Flake8
* CLN Address comments
* DOC Fix
* MRG
* WIP
* ENH Address comments
* STY Fix
* ENH Use functiion call instead of property
* ENH Adds counts feature
* CLN Rename variables
* DOC More details
* CLN Remove unneeded line
* CLN Less lines is less complicated
* CLN Less diffs
* CLN Improves readiabilty
* BUG Fix
* CLN Address comments
* TST Fix
* CLN Address comments
* CLN Address comments
* CLN Move docstring to userguide
* DOC Better wrapping
* TST Adds test to handle_unknown='error'
* ENH Spelling error in docstring
* BUG Fixes counter with nan values
* BUG Removes unneeded test
* BUG Fixes issue
* ENH Sync with main
* DOC Correct settings
* DOC Adds docstring
* DOC Immprove user guide
* DOC Move to 1.0
* DOC Update docs
* TST Remove test
* DOC Update docstring
* STY Linting
* DOC Address comments
* ENH Neater code
* DOC Update explaination for auto
* Update sklearn/preprocessing/_encoders.py
Co-authored-by: Roman Yurchak <[email protected]>
* TST Uses docstring instead of comments
* TST Remove call to fit
* TST Spelling error
* ENH Adds support for drop + infrequent categories
* ENH Adds infrequent_if_exist option
* DOC Address comments for user guide
* DOC Address comments for whats_new
* DOC Update docstring based on comments
* CLN Update test with suggestions
* ENH Adds computed property infrequent_categories_
* DOC Adds where the infrequent column is located
* TST Adds more test for infrequent_categories_
* DOC Adds docstring for _compute_drop_idx
* CLN Moves _convert_to_infrequent_idx into its own method
* TST Increases test coverage
* TST Adds failing test
* CLN Careful consideration of dropped and inverse_transform
* STY Linting
* DOC Adds docstrinb about dropping infrequent
* DOC Uses only
* DOC Numpydoc
* TST Includes test for get_feature_names_out
* DOC Move whats new
* DOC Address docstring comments
* DOC Docstring changes
* TST Better comments
* TST Adds check for handle_unknown='ignore' for infrequent
* CLN Make _infrequent_indices private
* CLN Change min_frequency default to None
* DOC Adds comments
* ENH adds support for max_categories=1
* ENH Describe lexicon ordering for ties
* DOC Better docstring
* STY Fix
* CLN Error when explicity dropping an infrequent category
* STY Grammar
Co-authored-by: Joel Nothman <[email protected]>
Co-authored-by: Roman Yurchak <[email protected]>
Co-authored-by: Guillaume Lemaitre <[email protected]> | _identify_infrequent | 7f0006c8aad1a09621ad19c3db19c3ff0555a183 | scikit-learn | _encoders.py | 13 | 16 | https://github.com/scikit-learn/scikit-learn.git | 6 | 146 | 0 | 56 | 227 | Python | {
"docstring": "Compute the infrequent indices.\n\n Parameters\n ----------\n category_count : ndarray of shape (n_cardinality,)\n Category counts.\n\n n_samples : int\n Number of samples.\n\n col_idx : int\n Index of the current category. Only used for the error message.\n\n Returns\n -------\n output : ndarray of shape (n_infrequent_categories,) or None\n If there are infrequent categories, indices of infrequent\n categories. Otherwise None.\n ",
"language": "en",
"n_whitespaces": 173,
"n_words": 55,
"vocab_size": 41
} | def _identify_infrequent(self, category_count, n_samples, col_idx):
if isinstance(self.min_frequency, numbers.Integral):
infrequent_mask = category_count < self.min_frequency
elif isinstance(self.min_frequency, numbers.Real):
min_frequency_abs = n_samples * self.min_frequency
infrequent_mask = category_count < min_frequency_abs
else:
infrequent_mask = np.zeros(category_count.shape[0], dtype=bool)
n_current_features = category_count.size - infrequent_mask.sum() + 1
if self.max_categories is not None and self.max_categories < n_current_features:
# stable sort to preserve original count order
smallest_levels = np.argsort(category_count, kind="mergesort")[
: -self.max_categories + 1
]
infrequent_mask[smallest_levels] = True
output = np.flatnonzero(infrequent_mask)
return output if output.size > 0 else None
|
|
8,405 | 44,892 | 76 | airflow/providers/google/cloud/hooks/kubernetes_engine.py | 27 | 9 | def get_conn(self) -> container_v1.ClusterManagerClient:
if self._client is None:
credentials = self._get_credentials()
self._client = container_v1.ClusterManagerClient(credentials=credent | Extract ClientInfo to module level (#21554) | get_conn | 1b568d73e1dfb838a3a0446e3a6063b9f27f04b8 | airflow | kubernetes_engine.py | 11 | 10 | https://github.com/apache/airflow.git | 2 | 44 | 0 | 23 | 74 | Python | {
"docstring": "\n Returns ClusterManagerCLinet object.\n\n :rtype: google.cloud.container_v1.ClusterManagerClient\n ",
"language": "en",
"n_whitespaces": 27,
"n_words": 5,
"vocab_size": 5
} | def get_conn(self) -> container_v1.ClusterManagerClient:
if self._client is None:
credentials = self._get_credentials()
self._client = container_v1.ClusterManagerClient(credentials=credentials, client_info=CLIENT_INFO)
return self._client
# To preserve backward compatibility
# TODO: remove one day |
|
55,985 | 220,414 | 60 | python3.10.4/Lib/asyncio/coroutines.py | 28 | 15 | def iscoroutinefunction(func):
return (inspect.isco | add python 3.10.4 for windows | iscoroutinefunction | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | coroutines.py | 11 | 3 | https://github.com/XX-net/XX-Net.git | 2 | 26 | 0 | 26 | 81 | Python | {
"docstring": "Return True if func is a decorated coroutine function.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | def iscoroutinefunction(func):
return (inspect.iscoroutinefunction(func) or
getattr(func, '_is_coroutine', None) is _is_coroutine)
# Prioritize native coroutine check to speed-up
# asyncio.iscoroutine.
_COROUTINE_TYPES = (types.CoroutineType, types.GeneratorType,
collections.abc.Coroutine, CoroWrapper)
_iscoroutine_typecache = set()
|
|
115,428 | 316,852 | 49 | homeassistant/helpers/storage.py | 17 | 7 | async def async_load(self) -> _T | None:
if self._load_task is None:
self._load_task = self.hass.async_create_task(self._async_load())
return await self._load_task
| Make Store a generic class (#74617) | async_load | 16900dcef15bdb9016feabd12bfec94d61ed4df6 | core | storage.py | 12 | 12 | https://github.com/home-assistant/core.git | 2 | 38 | 0 | 14 | 65 | Python | {
"docstring": "Load data.\n\n If the expected version and minor version do not match the given versions, the\n migrate function will be invoked with migrate_func(version, minor_version, config).\n\n Will ensure that when a call comes in while another one is in progress,\n the second call will wait and return the result of the first call.\n ",
"language": "en",
"n_whitespaces": 87,
"n_words": 52,
"vocab_size": 42
} | async def async_load(self) -> _T | None:
if self._load_task is None:
self._load_task = self.hass.async_create_task(self._async_load())
return await self._load_task
|
|
108,897 | 310,210 | 81 | homeassistant/components/vera/lock.py | 28 | 13 | def extra_state_attributes(self) -> dict[str, Any] | None:
data = super().extra | Remove vera from mypy ignore list (#64474)
* Remove vera from mypy ignore list
* Fix pylint | extra_state_attributes | 03bf2cdd56eb9a0a9ed56d7afb700d5f7d9cf75e | core | lock.py | 10 | 13 | https://github.com/home-assistant/core.git | 3 | 63 | 0 | 22 | 101 | Python | {
"docstring": "Who unlocked the lock and did a low battery alert fire.\n\n Reports on the previous poll cycle.\n changed_by_name is a string like 'Bob'.\n low_battery is 1 if an alert fired, 0 otherwise.\n ",
"language": "en",
"n_whitespaces": 60,
"n_words": 32,
"vocab_size": 28
} | def extra_state_attributes(self) -> dict[str, Any] | None:
data = super().extra_state_attributes or {}
last_user = self.vera_device.get_last_user_alert()
if last_user is not None:
data[ATTR_LAST_USER_NAME] = last_user[1]
data[ATTR_LOW_BATTERY] = self.vera_device.get_low_battery_alert()
return data
|
|
52,421 | 208,620 | 153 | IPython/core/interactiveshell.py | 51 | 16 | def magic(self, arg_s):
warnings.warn(
"`magic(...)` is deprecated since IPython 0.13 (warning added in "
"8.1), use run_line_magic(magic_name, parameter_s).",
DeprecationWarning,
stacklevel=2,
)
# TODO: should we issue a loud deprecation warning here?
magic_name, _, magic_arg_s = arg_s.partition(' ')
magic_name = magic_name.lstrip(prefilter.ESC_MAGIC)
return self.run_line_magic(magic_name, magic_arg_s, _stack_depth=2)
#-------------------------------------------------------------------------
# Th | Update old deprecation | magic | e306c9d3f707de42b47a1e7c4c8034d6862fba5f | ipython | interactiveshell.py | 9 | 10 | https://github.com/ipython/ipython.git | 1 | 57 | 0 | 48 | 97 | Python | {
"docstring": "\n DEPRECATED\n\n Deprecated since IPython 0.13 (warning added in\n 8.1), use run_line_magic(magic_name, parameter_s).\n\n Call a magic function by name.\n\n Input: a string containing the name of the magic function to call and\n any additional arguments to be passed to the magic.\n\n magic('name -opt foo bar') is equivalent to typing at the ipython\n prompt:\n\n In[1]: %name -opt foo bar\n\n To call a magic without arguments, simply use magic('name').\n\n This provides a proper Python function to call IPython's magics in any\n valid Python code you can type at the interpreter, including loops and\n compound statements.\n ",
"language": "en",
"n_whitespaces": 191,
"n_words": 92,
"vocab_size": 67
} | def magic(self, arg_s):
warnings.warn(
"`magic(...)` is deprecated since IPython 0.13 (warning added in "
"8.1), use run_line_magic(magic_name, parameter_s).",
DeprecationWarning,
stacklevel=2,
)
# TODO: should we issue a loud deprecation warning here?
magic_name, _, magic_arg_s = arg_s.partition(' ')
magic_name = magic_name.lstrip(prefilter.ESC_MAGIC)
return self.run_line_magic(magic_name, magic_arg_s, _stack_depth=2)
#-------------------------------------------------------------------------
# Things related to macros
#-------------------------------------------------------------------------
|
|
12,211 | 60,572 | 49 | .venv/lib/python3.8/site-packages/pip/_internal/commands/__init__.py | 28 | 13 | def create_command(name, **kwargs):
# type: (str, **Any) -> Command
module_path, class_name, summary = commands_dict[name]
module = importlib.import_module(module_path)
command_class = getattr(module, class_name)
command = command_cl | upd; format | create_command | f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | transferlearning | __init__.py | 9 | 6 | https://github.com/jindongwang/transferlearning.git | 1 | 52 | 0 | 24 | 82 | Python | {
"docstring": "\n Create an instance of the Command class with the given name.\n ",
"language": "en",
"n_whitespaces": 18,
"n_words": 11,
"vocab_size": 10
} | def create_command(name, **kwargs):
# type: (str, **Any) -> Command
module_path, class_name, summary = commands_dict[name]
module = importlib.import_module(module_path)
command_class = getattr(module, class_name)
command = command_class(name=name, summary=summary, **kwargs)
return command
|
|
29,316 | 130,580 | 34 | python/ray/data/dataset.py | 13 | 11 | def to_pandas_refs(self) -> List[ObjectRef["pandas.DataFrame"]]:
block_to_df = cached_remote_fn(_block_to_df)
return [block_to_df.remote(b | [CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes. | to_pandas_refs | 7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065 | ray | dataset.py | 10 | 15 | https://github.com/ray-project/ray.git | 2 | 39 | 0 | 13 | 65 | Python | {
"docstring": "Convert this dataset into a distributed set of Pandas dataframes.\n\n This is only supported for datasets convertible to Arrow records.\n This function induces a copy of the data. For zero-copy access to the\n underlying data, consider using ``.to_arrow()`` or\n ``.get_internal_block_refs()``.\n\n Time complexity: O(dataset size / parallelism)\n\n Returns:\n A list of remote Pandas dataframes created from this dataset.\n ",
"language": "en",
"n_whitespaces": 117,
"n_words": 57,
"vocab_size": 49
} | def to_pandas_refs(self) -> List[ObjectRef["pandas.DataFrame"]]:
block_to_df = cached_remote_fn(_block_to_df)
return [block_to_df.remote(block) for block in self._blocks.get_blocks()]
|
|
@array_function_dispatch(_hvdsplit_dispatcher) | 38,466 | 160,008 | 64 | numpy/lib/shape_base.py | 32 | 9 | def hsplit(ary, indices_or_sections):
if _nx.ndim(ary) == 0:
raise ValueError('hsplit only works on arrays of 1 or more dimensions')
if ary.ndim > 1:
return split(ary, indices_or_sections, 1)
else:
retur | DOC: Include special case in `hsplit` doc (#20974) | hsplit | 1cacb2ffb1113167a4995f4f4c183f9a8356c2f0 | numpy | shape_base.py | 10 | 7 | https://github.com/numpy/numpy.git | 3 | 50 | 1 | 28 | 89 | Python | {
"docstring": "\n Split an array into multiple sub-arrays horizontally (column-wise).\n\n Please refer to the `split` documentation. `hsplit` is equivalent\n to `split` with ``axis=1``, the array is always split along the second\n axis except for 1-D arrays, where it is split at ``axis=0``.\n\n See Also\n --------\n split : Split an array into multiple sub-arrays of equal size.\n\n Examples\n --------\n >>> x = np.arange(16.0).reshape(4, 4)\n >>> x\n array([[ 0., 1., 2., 3.],\n [ 4., 5., 6., 7.],\n [ 8., 9., 10., 11.],\n [12., 13., 14., 15.]])\n >>> np.hsplit(x, 2)\n [array([[ 0., 1.],\n [ 4., 5.],\n [ 8., 9.],\n [12., 13.]]),\n array([[ 2., 3.],\n [ 6., 7.],\n [10., 11.],\n [14., 15.]])]\n >>> np.hsplit(x, np.array([3, 6]))\n [array([[ 0., 1., 2.],\n [ 4., 5., 6.],\n [ 8., 9., 10.],\n [12., 13., 14.]]),\n array([[ 3.],\n [ 7.],\n [11.],\n [15.]]),\n array([], shape=(4, 0), dtype=float64)]\n\n With a higher dimensional array the split is still along the second axis.\n\n >>> x = np.arange(8.0).reshape(2, 2, 2)\n >>> x\n array([[[0., 1.],\n [2., 3.]],\n [[4., 5.],\n [6., 7.]]])\n >>> np.hsplit(x, 2)\n [array([[[0., 1.]],\n [[4., 5.]]]),\n array([[[2., 3.]],\n [[6., 7.]]])]\n\n With a 1-D array, the split is along axis 0.\n\n >>> x = np.array([0, 1, 2, 3, 4, 5])\n >>> np.hsplit(x, 2)\n [array([0, 1, 2]), array([3, 4, 5])]\n\n ",
"language": "en",
"n_whitespaces": 562,
"n_words": 203,
"vocab_size": 116
} | def hsplit(ary, indices_or_sections):
if _nx.ndim(ary) == 0:
raise ValueError('hsplit only works on arrays of 1 or more dimensions')
if ary.ndim > 1:
return split(ary, indices_or_sections, 1)
else:
return split(ary, indices_or_sections, 0)
@array_function_dispatch(_hvdsplit_dispatcher) |
13,748 | 64,884 | 36 | erpnext/accounts/doctype/loyalty_program/test_loyalty_program.py | 59 | 27 | def get_points_earned(self):
def get_returned_amount():
returned_amount = frappe.db.sql(
,
self.name,
)
return abs(flt(returned_amount[0][0])) if returned_amount else 0
lp_details = get_loyalty_program_details_with_points(
self.customer,
company=self.company,
loyalty_program=self.loyalty_program,
expiry_date=self.posting_date,
include_expired_entry=True,
)
if (
lp_details
and getdate(lp_details. | style: format code with black | get_points_earned | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | test_loyalty_program.py | 14 | 18 | https://github.com/frappe/erpnext.git | 6 | 114 | 0 | 42 | 231 | Python | {
"docstring": "\n\t\t\tselect sum(grand_total)\n\t\t\tfrom `tabSales Invoice`\n\t\t\twhere docstatus=1 and is_return=1 and ifnull(return_against, '')=%s\n\t\t",
"language": "en",
"n_whitespaces": 9,
"n_words": 12,
"vocab_size": 11
} | def get_points_earned(self):
def get_returned_amount():
returned_amount = frappe.db.sql(
,
self.name,
)
return abs(flt(returned_amount[0][0])) if returned_amount else 0
lp_details = get_loyalty_program_details_with_points(
self.customer,
company=self.company,
loyalty_program=self.loyalty_program,
expiry_date=self.posting_date,
include_expired_entry=True,
)
if (
lp_details
and getdate(lp_details.from_date) <= getdate(self.posting_date)
and (not lp_details.to_date or getdate(lp_details.to_date) >= getdate(self.posting_date))
):
returned_amount = get_returned_amount()
eligible_amount = flt(self.grand_total) - cint(self.loyalty_amount) - returned_amount
points_earned = cint(eligible_amount / lp_details.collection_factor)
return points_earned or 0
|
|
20,326 | 100,875 | 119 | lib/model/losses_plaid.py | 49 | 17 | def _get_kernel(self) -> plaidml.tile.Value:
coords = np.arange(self._filter_size, dtype="float32")
coords -= (self._filter_size - 1) / 2.
kernel = np.square(coords)
kernel *= -0.5 / np.square(self._filter_sigma)
kernel = np.reshape(kernel, (1, -1)) + np.reshape(kernel, (-1, 1))
kernel = K.constant(np.reshape(kernel, (1, -1)))
kernel = K.softmax(kernel)
kernel = K.reshape(kernel, ( | SSIM Updates
- Standardize DSSIM Function
- Implement MSSIM function for AMD | _get_kernel | 04337e0c5efd442c1ce3e2da193dd8749f1e30d8 | faceswap | losses_plaid.py | 12 | 17 | https://github.com/deepfakes/faceswap.git | 1 | 143 | 0 | 33 | 214 | Python | {
"docstring": " Obtain the base kernel for performing depthwise convolution.\n\n Returns\n -------\n :class:`plaidml.tile.Value`\n The gaussian kernel based on selected size and sigma\n ",
"language": "en",
"n_whitespaces": 60,
"n_words": 20,
"vocab_size": 19
} | def _get_kernel(self) -> plaidml.tile.Value:
coords = np.arange(self._filter_size, dtype="float32")
coords -= (self._filter_size - 1) / 2.
kernel = np.square(coords)
kernel *= -0.5 / np.square(self._filter_sigma)
kernel = np.reshape(kernel, (1, -1)) + np.reshape(kernel, (-1, 1))
kernel = K.constant(np.reshape(kernel, (1, -1)))
kernel = K.softmax(kernel)
kernel = K.reshape(kernel, (self._filter_size, self._filter_size, 1, 1))
return kernel
|
|
15,499 | 70,351 | 55 | wagtail/core/tests/test_blocks.py | 13 | 13 | def test_default_default(self):
block = blocks.ListBlock(blocks.CharBlock(default='chocolate'))
self.assertEqual(list(block.get_default()), ['chocolate'])
block.set_name('test_shoppinglistblock')
| Implement a ListValue type for ListBlocks | test_default_default | 4a848bfb4e3ec1a84a3d36fda577c1ed784de498 | wagtail | test_blocks.py | 12 | 6 | https://github.com/wagtail/wagtail.git | 1 | 65 | 0 | 12 | 115 | Python | {
"docstring": "\n if no explicit 'default' is set on the ListBlock, it should fall back on\n a single instance of the child block in its default state.\n ",
"language": "en",
"n_whitespaces": 47,
"n_words": 25,
"vocab_size": 23
} | def test_default_default(self):
block = blocks.ListBlock(blocks.CharBlock(default='chocolate'))
self.assertEqual(list(block.get_default()), ['chocolate'])
block.set_name('test_shoppinglistblock')
js_args = ListBlockAdapter().js_args(block)
self.assertEqual(js_args[2], 'chocolate')
|
|
39,674 | 165,521 | 39 | pandas/io/formats/xml.py | 14 | 5 | def convert_empty_str_key(self) -> None:
| CLN/DOC: typos (#46328)
* fix typos
* fix typo
* fix typo
* fix typo | convert_empty_str_key | a72fa1b400234d3a05342f17c3c0b1e3993a6bd8 | pandas | xml.py | 11 | 9 | https://github.com/pandas-dev/pandas.git | 3 | 40 | 0 | 14 | 71 | Python | {
"docstring": "\n Replace zero-length string in `namespaces`.\n\n This method will replace '' with None to align to `lxml`\n requirement that empty string prefixes are not allowed.\n ",
"language": "en",
"n_whitespaces": 53,
"n_words": 24,
"vocab_size": 22
} | def convert_empty_str_key(self) -> None:
if self.namespaces and "" in self.namespaces.keys():
self.namespaces[None] = self.namespaces.pop("", "default")
|
|
16,399 | 75,353 | 149 | wagtail/images/tests/tests.py | 30 | 13 | def test_get_with_custom_key_using_default_key(self):
# Generate signature
signature = generate_signature(self.image.id, "fill-800x600")
# Get the image
response = self.client.get(
reverse(
"wagtailimages_serve_custom_key",
args=(signature, self.image.id, "fill-800x600"),
| Reformat with black | test_get_with_custom_key_using_default_key | d10f15e55806c6944827d801cd9c2d53f5da4186 | wagtail | tests.py | 15 | 10 | https://github.com/wagtail/wagtail.git | 1 | 58 | 0 | 24 | 100 | Python | {
"docstring": "\n Test that that the key can be changed on the view\n\n This tests that the default key no longer works when the key is changed on the view\n ",
"language": "en",
"n_whitespaces": 50,
"n_words": 28,
"vocab_size": 17
} | def test_get_with_custom_key_using_default_key(self):
# Generate signature
signature = generate_signature(self.image.id, "fill-800x600")
# Get the image
response = self.client.get(
reverse(
"wagtailimages_serve_custom_key",
args=(signature, self.image.id, "fill-800x600"),
)
+ "test.png"
)
# Check response
self.assertEqual(response.status_code, 403)
|
|
11,033 | 54,348 | 22 | tests/test_engine.py | 8 | 7 | async def test_timeouts_do_not_hide_crashes(self, flow_run, orion_client):
started = anyio.Event()
| Fix engine tests; move function to other crash handleres | test_timeouts_do_not_hide_crashes | 3e657b429b967fa532d2f97ed7e6809112db3107 | prefect | test_engine.py | 8 | 24 | https://github.com/PrefectHQ/prefect.git | 1 | 121 | 0 | 8 | 30 | Python | {
"docstring": "\n Since timeouts capture anyio cancellations, we want to ensure that something\n still ends up in a 'Crashed' state if it is cancelled independently from our\n timeout cancellation.\n ",
"language": "en",
"n_whitespaces": 56,
"n_words": 27,
"vocab_size": 27
} | async def test_timeouts_do_not_hide_crashes(self, flow_run, orion_client):
started = anyio.Event()
|
|
24,437 | 111,556 | 258 | spacy/tests/pipeline/test_pipe_methods.py | 72 | 17 | def test_enable_disable_conflict_with_config():
nlp = English()
nlp.add_pipe("tagger")
nlp.add_pipe("senter")
nlp.add_pipe("sentencizer")
with make_tempdir() as tmp_dir:
nlp.to_disk(tmp_dir)
# Expected to fail, as config and arguments conflict.
with | Simplify and clarify enable/disable behavior of spacy.load() (#11459)
* Change enable/disable behavior so that arguments take precedence over config options. Extend error message on conflict. Add warning message in case of overwriting config option with arguments.
* Fix tests in test_serialize_pipeline.py to reflect changes to handling of enable/disable.
* Fix type issue.
* Move comment.
* Move comment.
* Issue UserWarning instead of printing wasabi message. Adjust test.
* Added pytest.warns(UserWarning) for expected warning to fix tests.
* Update warning message.
* Move type handling out of fetch_pipes_status().
* Add global variable for default value. Use id() to determine whether used values are default value.
* Fix default value for disable.
* Rename DEFAULT_PIPE_STATUS to _DEFAULT_EMPTY_PIPES. | test_enable_disable_conflict_with_config | aea16719be04d4d6ab889cd20fe0e323b2c7ffee | spaCy | test_pipe_methods.py | 18 | 19 | https://github.com/explosion/spaCy.git | 1 | 127 | 0 | 49 | 235 | Python | {
"docstring": "Test conflict between enable/disable w.r.t. `nlp.disabled` set in the config.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | def test_enable_disable_conflict_with_config():
nlp = English()
nlp.add_pipe("tagger")
nlp.add_pipe("senter")
nlp.add_pipe("sentencizer")
with make_tempdir() as tmp_dir:
nlp.to_disk(tmp_dir)
# Expected to fail, as config and arguments conflict.
with pytest.raises(ValueError):
spacy.load(
tmp_dir, enable=["tagger"], config={"nlp": {"disabled": ["senter"]}}
)
# Expected to succeed without warning due to the lack of a conflicting config option.
spacy.load(tmp_dir, enable=["tagger"])
# Expected to succeed with a warning, as disable=[] should override the config setting.
with pytest.warns(UserWarning):
spacy.load(
tmp_dir,
enable=["tagger"],
disable=[],
config={"nlp": {"disabled": ["senter"]}},
)
|
|
@pytest.mark.asyncio | 28,233 | 126,689 | 51 | dashboard/modules/job/tests/test_job_manager.py | 23 | 13 | async def test_logs_streaming(job_manager):
stream_logs_script =
stream_logs_cmd = f'python -c "{stream_logs_script}"'
job_id = await job_manager.submit_job(entrypoint=stream_logs_cmd)
await async_wait_for_condition(
lambda: "STREAMED" i | Convert job_manager to be async (#27123)
Updates jobs api
Updates snapshot api
Updates state api
Increases jobs api version to 2
Signed-off-by: Alan Guo [email protected]
Why are these changes needed?
follow-up for #25902 (comment) | test_logs_streaming | 326b5bd1acc6d3d00ab0546e4ae45da6bed501f7 | ray | test_job_manager.py | 12 | 13 | https://github.com/ray-project/ray.git | 1 | 44 | 1 | 20 | 95 | Python | {
"docstring": "Test that logs are streamed during the job, not just at the end.\nimport time\nprint('STREAMED')\nwhile True:\n time.sleep(1)\n",
"language": "en",
"n_whitespaces": 18,
"n_words": 19,
"vocab_size": 18
} | async def test_logs_streaming(job_manager):
stream_logs_script =
stream_logs_cmd = f'python -c "{stream_logs_script}"'
job_id = await job_manager.submit_job(entrypoint=stream_logs_cmd)
await async_wait_for_condition(
lambda: "STREAMED" in job_manager.get_job_logs(job_id)
)
job_manager.stop_job(job_id)
@pytest.mark.asyncio |
80,978 | 272,209 | 159 | keras/integration_test/gradient_checkpoint_test.py | 60 | 31 | def _train_no_recompute(n_steps):
img_dim, n_channels, batch_size = 256, 1, 4
x, y = _get_dummy_data(img_dim, n_channels, batch_size)
model = _get_big_cnn_model(
img_dim, n_channels, num_partitions=3, blocks_per_partition=2
)
optimizer = optimizers.SGD()
losses = []
tr_vars = model.traina | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | _train_no_recompute | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | gradient_checkpoint_test.py | 12 | 18 | https://github.com/keras-team/keras.git | 2 | 123 | 0 | 46 | 195 | Python | {
"docstring": "Trains a single large model without gradient checkpointing.",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 8
} | def _train_no_recompute(n_steps):
img_dim, n_channels, batch_size = 256, 1, 4
x, y = _get_dummy_data(img_dim, n_channels, batch_size)
model = _get_big_cnn_model(
img_dim, n_channels, num_partitions=3, blocks_per_partition=2
)
optimizer = optimizers.SGD()
losses = []
tr_vars = model.trainable_variables
for _ in range(n_steps):
with tf.GradientTape() as tape:
logits = model(x)
loss = _compute_loss(logits, y)
losses.append(loss)
grads = tape.gradient(loss, tr_vars) # tr_vars
optimizer.apply_gradients(zip(grads, tr_vars))
del grads
return losses
|
|
80,457 | 270,367 | 44 | keras/distribute/distributed_training_utils_v1.py | 18 | 9 | def _make_replica_execution_function(model, mode):
| Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | _make_replica_execution_function | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | distributed_training_utils_v1.py | 10 | 11 | https://github.com/keras-team/keras.git | 4 | 60 | 0 | 14 | 57 | Python | {
"docstring": "A single step of the distributed execution on a replica.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | def _make_replica_execution_function(model, mode):
if mode == ModeKeys.TRAIN:
func = model.train_on_batch
elif mode == ModeKeys.TEST:
func = model.test_on_batch
else:
|