code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def find_slot(self, wanted, slots=None):
"""
Searches the given slots or, if not given,
active hotbar slot, hotbar, inventory, open window in this order.
Args:
wanted: function(Slot) or Slot or itemID or (itemID, metadata)
Returns:
Optional[Slot]: The first slot containing the item
or None if not found.
"""
for slot in self.find_slots(wanted, slots):
return slot
return None | Searches the given slots or, if not given,
active hotbar slot, hotbar, inventory, open window in this order.
Args:
wanted: function(Slot) or Slot or itemID or (itemID, metadata)
Returns:
Optional[Slot]: The first slot containing the item
or None if not found. |
def on_open(self):
"""
Shows an open file dialog and open the file if the dialog was
accepted.
"""
filename, filter = QtWidgets.QFileDialog.getOpenFileName(
self, _('Open'))
if filename:
self.open_file(filename) | Shows an open file dialog and open the file if the dialog was
accepted. |
def setColumnCount( self, count ):
"""
Sets the number of columns used for this tree widget, updating the \
column resizing modes to stretch the first column.
:param count | <int>
"""
super(XColorTreeWidget, self).setColumnCount(count)
header = self.header()
header.setResizeMode(0, header.Stretch)
for i in range(1, count):
header.setResizeMode(i, header.Fixed) | Sets the number of columns used for this tree widget, updating the \
column resizing modes to stretch the first column.
:param count | <int> |
def add_unique_rule(self, rule, opname, arg_count, customize):
"""Add rule to grammar, but only if it hasn't been added previously
opname and stack_count are used in the customize() semantic
the actions to add the semantic action rule. Stack_count is
used in custom opcodes like MAKE_FUNCTION to indicate how
many arguments it has. Often it is not used.
"""
if rule not in self.new_rules:
# print("XXX ", rule) # debug
self.new_rules.add(rule)
self.addRule(rule, nop_func)
customize[opname] = arg_count
pass
return | Add rule to grammar, but only if it hasn't been added previously
opname and stack_count are used in the customize() semantic
the actions to add the semantic action rule. Stack_count is
used in custom opcodes like MAKE_FUNCTION to indicate how
many arguments it has. Often it is not used. |
def ParseFromUnicode(self, value):
"""Parse a string into a client URN.
Convert case so that all URNs are of the form C.[0-9a-f].
Args:
value: string value to parse
"""
precondition.AssertType(value, Text)
value = value.strip()
super(ClientURN, self).ParseFromUnicode(value)
match = self.CLIENT_ID_RE.match(self._string_urn)
if not match:
raise type_info.TypeValueError("Client urn malformed: %s" % value)
clientid = match.group("clientid")
clientid_correctcase = "".join((clientid[0].upper(), clientid[1:].lower()))
self._string_urn = self._string_urn.replace(clientid, clientid_correctcase,
1) | Parse a string into a client URN.
Convert case so that all URNs are of the form C.[0-9a-f].
Args:
value: string value to parse |
def get_matrix(self, x1, x2=None):
"""
Get the covariance matrix at a given set or two of independent
coordinates.
:param x1: ``(nsamples,)`` or ``(nsamples, ndim)``
A list of samples.
:param x2: ``(nsamples,)`` or ``(nsamples, ndim)`` (optional)
A second list of samples. If this is given, the cross covariance
matrix is computed. Otherwise, the auto-covariance is evaluated.
"""
x1 = self.parse_samples(x1)
if x2 is None:
return self.kernel.get_value(x1)
x2 = self.parse_samples(x2)
return self.kernel.get_value(x1, x2) | Get the covariance matrix at a given set or two of independent
coordinates.
:param x1: ``(nsamples,)`` or ``(nsamples, ndim)``
A list of samples.
:param x2: ``(nsamples,)`` or ``(nsamples, ndim)`` (optional)
A second list of samples. If this is given, the cross covariance
matrix is computed. Otherwise, the auto-covariance is evaluated. |
def update(self, model_alias, code='general', name=None, order=None, display_filter=None):
"""
Update given tab
:param model_alias:
:param code:
:param name:
:param order:
:param display_filter:
:return:
"""
model_alias = self.get_model_alias(model_alias)
for item in self.tabs[model_alias]:
if item.code != code:
continue
if name:
item.name = name
if order:
item.order = order
if display_filter:
item.display_filter = display_filter
break
self.tabs[model_alias] = sorted(self.tabs[model_alias], key=lambda item: item.code if item.code else 999) | Update given tab
:param model_alias:
:param code:
:param name:
:param order:
:param display_filter:
:return: |
def _warn_deprecated_outside_JSONField(self): # pylint: disable=invalid-name
"""Certain methods will be moved to JSONField.
This warning marks calls when the object is not
derived from that class.
"""
if not isinstance(self, JSONField) and not self.warned:
warnings.warn(
"Deprecated. JSONifiable fields should derive from JSONField ({name})".format(name=self.name),
DeprecationWarning,
stacklevel=3
)
self.warned = True | Certain methods will be moved to JSONField.
This warning marks calls when the object is not
derived from that class. |
def delete(self, dict_name):
'''Delete an entire dictionary.
This operation on its own is atomic and does not require a
session lock, but a session lock is honored.
:param str dict_name: name of the dictionary to delete
:raises rejester.exceptions.LockError: if called with a session
lock, but the system does not currently have that lock; or if
called without a session lock but something else holds it
'''
conn = redis.Redis(connection_pool=self.pool)
script = conn.register_script('''
if redis.call("get", KEYS[1]) == ARGV[1]
then
redis.call("del", KEYS[2], KEYS[3])
return 0
else
return -1
end
''')
res = script(keys=[self._lock_name,
self._namespace(dict_name),
self._namespace(dict_name) + 'keys'],
args=[self._session_lock_identifier])
if res == -1:
raise LockError() | Delete an entire dictionary.
This operation on its own is atomic and does not require a
session lock, but a session lock is honored.
:param str dict_name: name of the dictionary to delete
:raises rejester.exceptions.LockError: if called with a session
lock, but the system does not currently have that lock; or if
called without a session lock but something else holds it |
async def sign_url(self, url, method=HASH):
"""
Sign an URL with this request's auth token
"""
token = await self.get_token()
if method == self.QUERY:
return patch_qs(url, {
settings.WEBVIEW_TOKEN_KEY: token,
})
elif method == self.HASH:
hash_id = 5
p = list(urlparse(url))
p[hash_id] = quote(token)
return urlunparse(p)
else:
raise ValueError(f'Invalid signing method "{method}"') | Sign an URL with this request's auth token |
def startProducing(self, consumer):
"""
Start a cooperative task which will read bytes from the input file and
write them to C{consumer}. Return a L{Deferred} which fires after all
bytes have been written.
@param consumer: Any L{IConsumer} provider
"""
self._task = self._cooperate(self._writeloop(consumer))
d = self._task.whenDone()
def maybeStopped(reason):
# IBodyProducer.startProducing's Deferred isn't support to fire if
# stopProducing is called.
reason.trap(task.TaskStopped)
return defer.Deferred()
d.addCallbacks(lambda ignored: None, maybeStopped)
return d | Start a cooperative task which will read bytes from the input file and
write them to C{consumer}. Return a L{Deferred} which fires after all
bytes have been written.
@param consumer: Any L{IConsumer} provider |
def _load_features_from_array(self, features):
""" Load feature data from a 2D ndarray on disk. """
self.feature_images = np.load(features)
self.feature_names = range(self.feature_images.shape[1]) | Load feature data from a 2D ndarray on disk. |
def register_seo_admin(admin_site, metadata_class):
"""Register the backends specified in Meta.backends with the admin."""
if metadata_class._meta.use_sites:
path_admin = SitePathMetadataAdmin
model_instance_admin = SiteModelInstanceMetadataAdmin
model_admin = SiteModelMetadataAdmin
view_admin = SiteViewMetadataAdmin
else:
path_admin = PathMetadataAdmin
model_instance_admin = ModelInstanceMetadataAdmin
model_admin = ModelMetadataAdmin
view_admin = ViewMetadataAdmin
def get_list_display():
return tuple(
name for name, obj in metadata_class._meta.elements.items()
if obj.editable)
backends = metadata_class._meta.backends
if 'model' in backends:
class ModelAdmin(model_admin):
form = get_model_form(metadata_class)
list_display = model_admin.list_display + get_list_display()
_register_admin(admin_site, metadata_class._meta.get_model('model'),
ModelAdmin)
if 'view' in backends:
class ViewAdmin(view_admin):
form = get_view_form(metadata_class)
list_display = view_admin.list_display + get_list_display()
_register_admin(admin_site, metadata_class._meta.get_model('view'),
ViewAdmin)
if 'path' in backends:
class PathAdmin(path_admin):
form = get_path_form(metadata_class)
list_display = path_admin.list_display + get_list_display()
_register_admin(admin_site, metadata_class._meta.get_model('path'),
PathAdmin)
if 'modelinstance' in backends:
class ModelInstanceAdmin(model_instance_admin):
form = get_modelinstance_form(metadata_class)
list_display = (model_instance_admin.list_display +
get_list_display())
_register_admin(admin_site,
metadata_class._meta.get_model('modelinstance'),
ModelInstanceAdmin) | Register the backends specified in Meta.backends with the admin. |
def draw_to_notebook(layers, **kwargs):
"""
Draws a network diagram in an IPython notebook
:parameters:
- layers : list or NeuralNet instance
List of layers or the neural net to draw.
- **kwargs : see the docstring of make_pydot_graph for other options
"""
from IPython.display import Image
layers = (layers.get_all_layers() if hasattr(layers, 'get_all_layers')
else layers)
dot = make_pydot_graph(layers, **kwargs)
return Image(dot.create_png()) | Draws a network diagram in an IPython notebook
:parameters:
- layers : list or NeuralNet instance
List of layers or the neural net to draw.
- **kwargs : see the docstring of make_pydot_graph for other options |
def is_integer(self, value, strict=False):
"""if value is an integer"""
if value is not None:
if isinstance(value, numbers.Number):
return
value = stringify(value)
if value is not None and value.isnumeric():
return
self.shout('value %r is not an integer', strict, value) | if value is an integer |
def post(method, hmc, uri, uri_parms, body, logon_required,
wait_for_completion):
"""Operation: Create Hipersocket (requires DPM mode)."""
assert wait_for_completion is True
cpc_oid = uri_parms[0]
try:
cpc = hmc.cpcs.lookup_by_oid(cpc_oid)
except KeyError:
raise InvalidResourceError(method, uri)
if not cpc.dpm_enabled:
raise CpcNotInDpmError(method, uri, cpc)
check_required_fields(method, uri, body, ['name'])
# We need to emulate the behavior of this POST to always create a
# hipersocket, but the add() method is used for adding all kinds of
# faked adapters to the faked HMC. So we need to specify the adapter
# type, but because the behavior of the Adapter resource object is
# that it only has its input properties set, we add the 'type'
# property on a copy of the input properties.
body2 = body.copy()
body2['type'] = 'hipersockets'
try:
new_adapter = cpc.adapters.add(body2)
except InputError as exc:
raise BadRequestError(method, uri, reason=5, message=str(exc))
return {'object-uri': new_adapter.uri} | Operation: Create Hipersocket (requires DPM mode). |
def sqlalch_datetime(dt):
"""Convert a SQLAlchemy datetime string to a datetime object."""
if isinstance(dt, str):
return datetime.strptime(dt, "%Y-%m-%d %H:%M:%S.%f").replace(tzinfo=UTC)
if dt.tzinfo is not None and dt.tzinfo.utcoffset(dt) is not None:
return dt.astimezone(UTC)
return dt.replace(tzinfo=UTC) | Convert a SQLAlchemy datetime string to a datetime object. |
def is_parent_of_gradebook(self, id_, gradebook_id):
"""Tests if an ``Id`` is a direct parent of a gradebook.
arg: id (osid.id.Id): an ``Id``
arg: gradebook_id (osid.id.Id): the ``Id`` of a gradebook
return: (boolean) - ``true`` if this ``id`` is a parent of
``gradebook_id,`` ``false`` otherwise
raise: NotFound - ``gradebook_id`` is not found
raise: NullArgument - ``id`` or ``gradebook_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` not found return ``false``.
"""
# Implemented from template for
# osid.resource.BinHierarchySession.is_parent_of_bin
if self._catalog_session is not None:
return self._catalog_session.is_parent_of_catalog(id_=id_, catalog_id=gradebook_id)
return self._hierarchy_session.is_parent(id_=gradebook_id, parent_id=id_) | Tests if an ``Id`` is a direct parent of a gradebook.
arg: id (osid.id.Id): an ``Id``
arg: gradebook_id (osid.id.Id): the ``Id`` of a gradebook
return: (boolean) - ``true`` if this ``id`` is a parent of
``gradebook_id,`` ``false`` otherwise
raise: NotFound - ``gradebook_id`` is not found
raise: NullArgument - ``id`` or ``gradebook_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` not found return ``false``. |
def _normal_model(self, beta):
""" Creates the structure of the model (model matrices etc) for
a Normal family ARIMA model.
Parameters
----------
beta : np.ndarray
Contains untransformed starting values for the latent variables
Returns
----------
mu : np.ndarray
Contains the predicted values (location) for the time series
Y : np.ndarray
Contains the length-adjusted time series (accounting for lags)
"""
Y = np.array(self.data[self.max_lag:])
# Transform latent variables
z = np.array([self.latent_variables.z_list[k].prior.transform(beta[k]) for k in range(beta.shape[0])])
# Constant and AR terms
if self.ar != 0:
mu = np.matmul(np.transpose(self.X),z[:-self.family_z_no-self.ma])
else:
mu = np.ones(Y.shape[0])*z[0]
# MA terms
if self.ma != 0:
mu = arima_recursion_normal(z, mu, Y, self.max_lag, Y.shape[0], self.ar, self.ma)
return mu, Y | Creates the structure of the model (model matrices etc) for
a Normal family ARIMA model.
Parameters
----------
beta : np.ndarray
Contains untransformed starting values for the latent variables
Returns
----------
mu : np.ndarray
Contains the predicted values (location) for the time series
Y : np.ndarray
Contains the length-adjusted time series (accounting for lags) |
def find_period(data,
min_period=0.2, max_period=32.0,
coarse_precision=1e-5, fine_precision=1e-9,
periodogram=Lomb_Scargle,
period_jobs=1):
"""find_period(data, min_period=0.2, max_period=32.0, coarse_precision=1e-5, fine_precision=1e-9, periodogram=Lomb_Scargle, period_jobs=1)
Returns the period of *data* according to the given *periodogram*,
searching first with a coarse precision, and then a fine precision.
**Parameters**
data : array-like, shape = [n_samples, 2] or [n_samples, 3]
Array containing columns *time*, *mag*, and (optional) *error*.
min_period : number
Minimum period in search-space.
max_period : number
Maximum period in search-space.
coarse_precision : number
Distance between contiguous frequencies in search-space during first
sweep.
fine_precision : number
Distance between contiguous frequencies in search-space during second
sweep.
periodogram : function
A function with arguments *data*, *precision*, *min_period*,
*max_period*, and *period_jobs*, and return value *period*.
period_jobs : int, optional
Number of simultaneous processes to use while searching (default 1).
**Returns**
period : number
The period of *data*.
"""
if min_period >= max_period:
return min_period
coarse_period = periodogram(data, coarse_precision, min_period, max_period,
period_jobs=period_jobs)
return coarse_period if coarse_precision <= fine_precision else \
periodogram(data, fine_precision,
coarse_period - coarse_precision,
coarse_period + coarse_precision,
period_jobs=period_jobs) | find_period(data, min_period=0.2, max_period=32.0, coarse_precision=1e-5, fine_precision=1e-9, periodogram=Lomb_Scargle, period_jobs=1)
Returns the period of *data* according to the given *periodogram*,
searching first with a coarse precision, and then a fine precision.
**Parameters**
data : array-like, shape = [n_samples, 2] or [n_samples, 3]
Array containing columns *time*, *mag*, and (optional) *error*.
min_period : number
Minimum period in search-space.
max_period : number
Maximum period in search-space.
coarse_precision : number
Distance between contiguous frequencies in search-space during first
sweep.
fine_precision : number
Distance between contiguous frequencies in search-space during second
sweep.
periodogram : function
A function with arguments *data*, *precision*, *min_period*,
*max_period*, and *period_jobs*, and return value *period*.
period_jobs : int, optional
Number of simultaneous processes to use while searching (default 1).
**Returns**
period : number
The period of *data*. |
def resolve_compound_variable_fields(dbg, thread_id, frame_id, scope, attrs):
"""
Resolve compound variable in debugger scopes by its name and attributes
:param thread_id: id of the variable's thread
:param frame_id: id of the variable's frame
:param scope: can be BY_ID, EXPRESSION, GLOBAL, LOCAL, FRAME
:param attrs: after reaching the proper scope, we have to get the attributes until we find
the proper location (i.e.: obj\tattr1\tattr2)
:return: a dictionary of variables's fields
"""
var = getVariable(dbg, thread_id, frame_id, scope, attrs)
try:
_type, _typeName, resolver = get_type(var)
return _typeName, resolver.get_dictionary(var)
except:
pydev_log.exception('Error evaluating: thread_id: %s\nframe_id: %s\nscope: %s\nattrs: %s.',
thread_id, frame_id, scope, attrs) | Resolve compound variable in debugger scopes by its name and attributes
:param thread_id: id of the variable's thread
:param frame_id: id of the variable's frame
:param scope: can be BY_ID, EXPRESSION, GLOBAL, LOCAL, FRAME
:param attrs: after reaching the proper scope, we have to get the attributes until we find
the proper location (i.e.: obj\tattr1\tattr2)
:return: a dictionary of variables's fields |
def neural_gpu_body(inputs, hparams, name=None):
"""The core Neural GPU."""
with tf.variable_scope(name, "neural_gpu"):
def step(state, inp): # pylint: disable=missing-docstring
x = tf.nn.dropout(state, 1.0 - hparams.dropout)
for layer in range(hparams.num_hidden_layers):
x = common_layers.conv_gru(
x, (hparams.kernel_height, hparams.kernel_width),
hparams.hidden_size,
name="cgru_%d" % layer)
# Padding input is zeroed-out in the modality, we check this by summing.
padding_inp = tf.less(tf.reduce_sum(tf.abs(inp), axis=[1, 2]), 0.00001)
new_state = tf.where(padding_inp, state, x) # No-op where inp is padding.
return new_state
return tf.foldl(
step,
tf.transpose(inputs, [1, 0, 2, 3]),
initializer=inputs,
parallel_iterations=1,
swap_memory=True) | The core Neural GPU. |
def create_asset_browser(self, ):
"""Create the asset browser
This creates a list browser for assets
and adds it to the ui
:returns: the created borwser
:rtype: :class:`jukeboxcore.gui.widgets.browser.ListBrowser`
:raises: None
"""
assetbrws = ListBrowser(4, headers=['Assettype', 'Asset', 'Task', 'Descriptor'])
self.asset_browser_vbox.insertWidget(0, assetbrws)
return assetbrws | Create the asset browser
This creates a list browser for assets
and adds it to the ui
:returns: the created borwser
:rtype: :class:`jukeboxcore.gui.widgets.browser.ListBrowser`
:raises: None |
def drop_namespace_by_url(self, url: str) -> None:
"""Drop the namespace at the given URL.
Won't work if the edge store is in use.
:param url: The URL of the namespace to drop
"""
namespace = self.get_namespace_by_url(url)
self.session.query(NamespaceEntry).filter(NamespaceEntry.namespace == namespace).delete()
self.session.delete(namespace)
self.session.commit() | Drop the namespace at the given URL.
Won't work if the edge store is in use.
:param url: The URL of the namespace to drop |
def bayesfactor_pearson(r, n):
"""
Bayes Factor of a Pearson correlation.
Parameters
----------
r : float
Pearson correlation coefficient
n : int
Sample size
Returns
-------
bf : str
Bayes Factor (BF10).
The Bayes Factor quantifies the evidence in favour of the alternative
hypothesis.
Notes
-----
Adapted from a Matlab code found at
https://github.com/anne-urai/Tools/blob/master/stats/BayesFactors/corrbf.m
If you would like to compute the Bayes Factor directly from the raw data
instead of from the correlation coefficient, use the
:py:func:`pingouin.corr` function.
The JZS Bayes Factor is approximated using the formula described in
ref [1]_:
.. math::
BF_{10} = \\frac{\\sqrt{n/2}}{\\gamma(1/2)}*
\\int_{0}^{\\infty}e((n-2)/2)*
log(1+g)+(-(n-1)/2)log(1+(1-r^2)*g)+(-3/2)log(g)-n/2g
where **n** is the sample size and **r** is the Pearson correlation
coefficient.
References
----------
.. [1] Wetzels, R., Wagenmakers, E.-J., 2012. A default Bayesian
hypothesis test for correlations and partial correlations.
Psychon. Bull. Rev. 19, 1057–1064.
https://doi.org/10.3758/s13423-012-0295-x
Examples
--------
Bayes Factor of a Pearson correlation
>>> from pingouin import bayesfactor_pearson
>>> bf = bayesfactor_pearson(0.6, 20)
>>> print("Bayes Factor: %s" % bf)
Bayes Factor: 8.221
"""
from scipy.special import gamma
# Function to be integrated
def fun(g, r, n):
return np.exp(((n - 2) / 2) * np.log(1 + g) + (-(n - 1) / 2)
* np.log(1 + (1 - r**2) * g) + (-3 / 2)
* np.log(g) + - n / (2 * g))
# JZS Bayes factor calculation
integr = quad(fun, 0, np.inf, args=(r, n))[0]
bf10 = np.sqrt((n / 2)) / gamma(1 / 2) * integr
return _format_bf(bf10) | Bayes Factor of a Pearson correlation.
Parameters
----------
r : float
Pearson correlation coefficient
n : int
Sample size
Returns
-------
bf : str
Bayes Factor (BF10).
The Bayes Factor quantifies the evidence in favour of the alternative
hypothesis.
Notes
-----
Adapted from a Matlab code found at
https://github.com/anne-urai/Tools/blob/master/stats/BayesFactors/corrbf.m
If you would like to compute the Bayes Factor directly from the raw data
instead of from the correlation coefficient, use the
:py:func:`pingouin.corr` function.
The JZS Bayes Factor is approximated using the formula described in
ref [1]_:
.. math::
BF_{10} = \\frac{\\sqrt{n/2}}{\\gamma(1/2)}*
\\int_{0}^{\\infty}e((n-2)/2)*
log(1+g)+(-(n-1)/2)log(1+(1-r^2)*g)+(-3/2)log(g)-n/2g
where **n** is the sample size and **r** is the Pearson correlation
coefficient.
References
----------
.. [1] Wetzels, R., Wagenmakers, E.-J., 2012. A default Bayesian
hypothesis test for correlations and partial correlations.
Psychon. Bull. Rev. 19, 1057–1064.
https://doi.org/10.3758/s13423-012-0295-x
Examples
--------
Bayes Factor of a Pearson correlation
>>> from pingouin import bayesfactor_pearson
>>> bf = bayesfactor_pearson(0.6, 20)
>>> print("Bayes Factor: %s" % bf)
Bayes Factor: 8.221 |
def find_by_any(self, identifier, how):
"""
how should be a string with any or all of the characters "ilc"
"""
if "i" in how:
match = self.find_by_id(identifier)
if match:
return match
if "l" in how:
match = self.find_by_localpath(identifier)
if match:
return match
if "c" in how:
match = self.find_by_canonical(identifier)
if match:
return match | how should be a string with any or all of the characters "ilc" |
def initialize_repository(path, spor_dir='.spor'):
"""Initialize a spor repository in `path` if one doesn't already exist.
Args:
path: Path to any file or directory within the repository.
spor_dir: The name of the directory containing spor data.
Returns: A `Repository` instance.
Raises:
ValueError: A repository already exists at `path`.
"""
path = pathlib.Path(path)
spor_path = path / spor_dir
if spor_path.exists():
raise ValueError('spor directory already exists: {}'.format(spor_path))
spor_path.mkdir()
return Repository(path, spor_dir) | Initialize a spor repository in `path` if one doesn't already exist.
Args:
path: Path to any file or directory within the repository.
spor_dir: The name of the directory containing spor data.
Returns: A `Repository` instance.
Raises:
ValueError: A repository already exists at `path`. |
def close(self):
"""Shut down an SOL session,
"""
if self.ipmi_session:
self.ipmi_session.unregister_keepalive(self.keepaliveid)
if self.activated:
try:
self.ipmi_session.raw_command(netfn=6, command=0x49,
data=(1, 1, 0, 0, 0, 0))
except exc.IpmiException:
# if underlying ipmi session is not working, then
# run with the implicit success
pass | Shut down an SOL session, |
def func_str(func, args=[], kwargs={}, type_aliases=[], packed=False,
packkw=None, truncate=False):
"""
string representation of function definition
Returns:
str: a representation of func with args, kwargs, and type_aliases
Args:
func (function):
args (list): argument values (default = [])
kwargs (dict): kwargs values (default = {})
type_aliases (list): (default = [])
packed (bool): (default = False)
packkw (None): (default = None)
Returns:
str: func_str
CommandLine:
python -m utool.util_str --exec-func_str
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_str import * # NOQA
>>> func = byte_str
>>> args = [1024, 'MB']
>>> kwargs = dict(precision=2)
>>> type_aliases = []
>>> packed = False
>>> packkw = None
>>> _str = func_str(func, args, kwargs, type_aliases, packed, packkw)
>>> result = _str
>>> print(result)
byte_str(1024, 'MB', precision=2)
"""
import utool as ut
# if truncate:
# truncatekw = {'maxlen': 20}
# else:
truncatekw = {}
argrepr_list = ([] if args is None else
ut.get_itemstr_list(args, nl=False, truncate=truncate,
truncatekw=truncatekw))
kwrepr_list = ([] if kwargs is None else
ut.dict_itemstr_list(kwargs, explicit=True, nl=False,
truncate=truncate,
truncatekw=truncatekw))
repr_list = argrepr_list + kwrepr_list
argskwargs_str = ', '.join(repr_list)
_str = '%s(%s)' % (meta_util_six.get_funcname(func), argskwargs_str)
if packed:
packkw_ = dict(textwidth=80, nlprefix=' ', break_words=False)
if packkw is not None:
packkw_.update(packkw_)
_str = packstr(_str, **packkw_)
return _str | string representation of function definition
Returns:
str: a representation of func with args, kwargs, and type_aliases
Args:
func (function):
args (list): argument values (default = [])
kwargs (dict): kwargs values (default = {})
type_aliases (list): (default = [])
packed (bool): (default = False)
packkw (None): (default = None)
Returns:
str: func_str
CommandLine:
python -m utool.util_str --exec-func_str
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_str import * # NOQA
>>> func = byte_str
>>> args = [1024, 'MB']
>>> kwargs = dict(precision=2)
>>> type_aliases = []
>>> packed = False
>>> packkw = None
>>> _str = func_str(func, args, kwargs, type_aliases, packed, packkw)
>>> result = _str
>>> print(result)
byte_str(1024, 'MB', precision=2) |
def valid_flows_array(catchment):
"""
Return array of valid flows (i.e. excluding rejected years etc)
:param catchment: gauged catchment with amax_records set
:type catchment: :class:`floodestimation.entities.Catchment`
:return: 1D array of flow values
:rtype: :class:`numpy.ndarray`
"""
return np.array([record.flow for record in catchment.amax_records if record.flag == 0]) | Return array of valid flows (i.e. excluding rejected years etc)
:param catchment: gauged catchment with amax_records set
:type catchment: :class:`floodestimation.entities.Catchment`
:return: 1D array of flow values
:rtype: :class:`numpy.ndarray` |
def vars(self):
"""
:return: Returns a list of dependent, independent and sigma variables, in that order.
"""
return self.independent_vars + self.dependent_vars + [self.sigmas[var] for var in self.dependent_vars] | :return: Returns a list of dependent, independent and sigma variables, in that order. |
def add_directories(self, directories, except_blacklisted=True):
"""
Adds `directories` to the set of plugin directories.
`directories` may be either a single object or a iterable.
`directories` can be relative paths, but will be converted into
absolute paths based on the current working directory.
if `except_blacklisted` is `True` all `directories` in
`self.blacklisted_directories` will be removed
"""
directories = util.to_absolute_paths(directories)
if except_blacklisted:
directories = self._remove_blacklisted(directories)
self.plugin_directories.update(directories) | Adds `directories` to the set of plugin directories.
`directories` may be either a single object or a iterable.
`directories` can be relative paths, but will be converted into
absolute paths based on the current working directory.
if `except_blacklisted` is `True` all `directories` in
`self.blacklisted_directories` will be removed |
def stream(self, model, position):
"""Create a :class:`~bloop.stream.Stream` that provides approximate chronological ordering.
.. code-block:: pycon
# Create a user so we have a record
>>> engine = Engine()
>>> user = User(id=3, email="[email protected]")
>>> engine.save(user)
>>> user.email = "[email protected]"
>>> engine.save(user)
# First record lacks an "old" value since it's an insert
>>> stream = engine.stream(User, "trim_horizon")
>>> next(stream)
{'key': None,
'old': None,
'new': User(email='[email protected]', id=3, verified=None),
'meta': {
'created_at': datetime.datetime(2016, 10, 23, ...),
'event': {
'id': '3fe6d339b7cb19a1474b3d853972c12a',
'type': 'insert',
'version': '1.1'},
'sequence_number': '700000000007366876916'}
}
:param model: The model to stream records from.
:param position: "trim_horizon", "latest", a stream token, or a :class:`datetime.datetime`.
:return: An iterator for records in all shards.
:rtype: :class:`~bloop.stream.Stream`
:raises bloop.exceptions.InvalidStream: if the model does not have a stream.
"""
validate_not_abstract(model)
if not model.Meta.stream or not model.Meta.stream.get("arn"):
raise InvalidStream("{!r} does not have a stream arn".format(model))
stream = Stream(model=model, engine=self)
stream.move_to(position=position)
return stream | Create a :class:`~bloop.stream.Stream` that provides approximate chronological ordering.
.. code-block:: pycon
# Create a user so we have a record
>>> engine = Engine()
>>> user = User(id=3, email="[email protected]")
>>> engine.save(user)
>>> user.email = "[email protected]"
>>> engine.save(user)
# First record lacks an "old" value since it's an insert
>>> stream = engine.stream(User, "trim_horizon")
>>> next(stream)
{'key': None,
'old': None,
'new': User(email='[email protected]', id=3, verified=None),
'meta': {
'created_at': datetime.datetime(2016, 10, 23, ...),
'event': {
'id': '3fe6d339b7cb19a1474b3d853972c12a',
'type': 'insert',
'version': '1.1'},
'sequence_number': '700000000007366876916'}
}
:param model: The model to stream records from.
:param position: "trim_horizon", "latest", a stream token, or a :class:`datetime.datetime`.
:return: An iterator for records in all shards.
:rtype: :class:`~bloop.stream.Stream`
:raises bloop.exceptions.InvalidStream: if the model does not have a stream. |
def keypoint_scale(keypoint, scale_x, scale_y, **params):
"""Scales a keypoint by scale_x and scale_y."""
x, y, a, s = keypoint
return [x * scale_x, y * scale_y, a, s * max(scale_x, scale_y)] | Scales a keypoint by scale_x and scale_y. |
def get_backend(alias):
"""
Returns ``Repository`` class identified by the given alias or raises
VCSError if alias is not recognized or backend class cannot be imported.
"""
if alias not in settings.BACKENDS:
raise VCSError("Given alias '%s' is not recognized! Allowed aliases:\n"
"%s" % (alias, pformat(settings.BACKENDS.keys())))
backend_path = settings.BACKENDS[alias]
klass = import_class(backend_path)
return klass | Returns ``Repository`` class identified by the given alias or raises
VCSError if alias is not recognized or backend class cannot be imported. |
def is_fnmatch_regex(string):
"""
Returns True if the given string is considered a fnmatch
regular expression, False otherwise.
It will look for
:param string: str
"""
is_regex = False
regex_chars = ['!', '*', '$']
for c in regex_chars:
if string.find(c) > -1:
return True
return is_regex | Returns True if the given string is considered a fnmatch
regular expression, False otherwise.
It will look for
:param string: str |
def get_instance(self, payload):
"""
Build an instance of SessionInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.proxy.v1.service.session.SessionInstance
:rtype: twilio.rest.proxy.v1.service.session.SessionInstance
"""
return SessionInstance(self._version, payload, service_sid=self._solution['service_sid'], ) | Build an instance of SessionInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.proxy.v1.service.session.SessionInstance
:rtype: twilio.rest.proxy.v1.service.session.SessionInstance |
async def main():
redis = await create_pool(RedisSettings())
job = await redis.enqueue_job('the_task')
# get the job's id
print(job.job_id)
"""
> 68362958a244465b9be909db4b7b5ab4 (or whatever)
"""
# get information about the job, will include results if the job has finished, but
# doesn't await the job's result
debug(await job.info())
"""
> docs/examples/job_results.py:23 main
JobDef(
function='the_task',
args=(),
kwargs={},
job_try=None,
enqueue_time=datetime.datetime(2019, 4, 23, 13, 58, 56, 781000),
score=1556027936781
) (JobDef)
"""
# get the Job's status
print(await job.status())
"""
> JobStatus.queued
"""
# poll redis for the job result, if the job raised an exception,
# it will be raised here
# (You'll need the worker running at the same time to get a result here)
print(await job.result(timeout=5))
"""
> 42
""" | > 68362958a244465b9be909db4b7b5ab4 (or whatever) |
def _endCodeIfNeeded(line, inCodeBlock):
"""Simple routine to append end code marker if needed."""
assert isinstance(line, str)
if inCodeBlock:
line = '# @endcode{0}{1}'.format(linesep, line.rstrip())
inCodeBlock = False
return line, inCodeBlock | Simple routine to append end code marker if needed. |
def configure(self, cfg, handler, path=""):
"""
Start configuration process for the provided handler
Args:
cfg (dict): config container
handler (config.Handler class): config handler to use
path (str): current path in the configuration progress
"""
# configure simple value attributes (str, int etc.)
for name, attr in handler.attributes():
if cfg.get(name) is not None:
continue
if attr.expected_type not in [list, dict]:
cfg[name] = self.set(handler, attr, name, path, cfg)
elif attr.default is None and not hasattr(handler, "configure_%s" % name):
self.action_required.append(("%s.%s: %s" % (path, name, attr.help_text)).strip("."))
# configure attributes that have complex handlers defined
# on the config Handler class (class methods prefixed by
# configure_ prefix
for name, attr in handler.attributes():
if cfg.get(name) is not None:
continue
if hasattr(handler, "configure_%s" % name):
fn = getattr(handler, "configure_%s" % name)
fn(self, cfg, "%s.%s"% (path, name))
if attr.expected_type in [list, dict] and not cfg.get(name):
try:
del cfg[name]
except KeyError:
pass | Start configuration process for the provided handler
Args:
cfg (dict): config container
handler (config.Handler class): config handler to use
path (str): current path in the configuration progress |
def keep_entry_range(entry, lows, highs, converter, regex):
"""
Check if an entry falls into a desired range.
Every number in the entry will be extracted using *regex*,
if any are within a given low to high range the entry will
be kept.
Parameters
----------
entry : str
lows : iterable
Collection of low values against which to compare the entry.
highs : iterable
Collection of high values against which to compare the entry.
converter : callable
Function to convert a string to a number.
regex : regex object
Regular expression to locate numbers in a string.
Returns
-------
True if the entry should be kept, False otherwise.
"""
return any(
low <= converter(num) <= high
for num in regex.findall(entry)
for low, high in zip(lows, highs)
) | Check if an entry falls into a desired range.
Every number in the entry will be extracted using *regex*,
if any are within a given low to high range the entry will
be kept.
Parameters
----------
entry : str
lows : iterable
Collection of low values against which to compare the entry.
highs : iterable
Collection of high values against which to compare the entry.
converter : callable
Function to convert a string to a number.
regex : regex object
Regular expression to locate numbers in a string.
Returns
-------
True if the entry should be kept, False otherwise. |
def card_names_and_ids(self):
"""Returns [(name, id), ...] pairs of cards from current board"""
b = Board(self.client, self.board_id)
cards = b.getCards()
card_names_and_ids = [(unidecode(c.name), c.id) for c in cards]
return card_names_and_ids | Returns [(name, id), ...] pairs of cards from current board |
def close(self):
"""Flush data, write 28 bytes BGZF EOF marker, and close BGZF file.
samtools will look for a magic EOF marker, just a 28 byte empty BGZF
block, and if it is missing warns the BAM file may be truncated. In
addition to samtools writing this block, so too does bgzip - so this
implementation does too.
"""
if self._buffer:
self.flush()
self._handle.write(_bgzf_eof)
self._handle.flush()
self._handle.close() | Flush data, write 28 bytes BGZF EOF marker, and close BGZF file.
samtools will look for a magic EOF marker, just a 28 byte empty BGZF
block, and if it is missing warns the BAM file may be truncated. In
addition to samtools writing this block, so too does bgzip - so this
implementation does too. |
def get_sds_by_ip(self,ip):
"""
Get ScaleIO SDS object by its ip address
:param name: IP address of SDS
:return: ScaleIO SDS object
:raise KeyError: No SDS with specified ip found
:rtype: SDS object
"""
if self.conn.is_ip_addr(ip):
for sds in self.sds:
for sdsIp in sds.ipList:
if sdsIp == ip:
return sds
raise KeyError("SDS of that name not found")
else:
raise ValueError("Malformed IP address - get_sds_by_ip()") | Get ScaleIO SDS object by its ip address
:param name: IP address of SDS
:return: ScaleIO SDS object
:raise KeyError: No SDS with specified ip found
:rtype: SDS object |
def mass_fraction_within_radius(self, kwargs_lens, center_x, center_y, theta_E, numPix=100):
"""
computes the mean convergence of all the different lens model components within a spherical aperture
:param kwargs_lens: lens model keyword argument list
:param center_x: center of the aperture
:param center_y: center of the aperture
:param theta_E: radius of aperture
:return: list of average convergences for all the model components
"""
x_grid, y_grid = util.make_grid(numPix=numPix, deltapix=2.*theta_E / numPix)
x_grid += center_x
y_grid += center_y
mask = mask_util.mask_sphere(x_grid, y_grid, center_x, center_y, theta_E)
kappa_list = []
for i in range(len(kwargs_lens)):
kappa = self.LensModel.kappa(x_grid, y_grid, kwargs_lens, k=i)
kappa_mean = np.sum(kappa * mask) / np.sum(mask)
kappa_list.append(kappa_mean)
return kappa_list | computes the mean convergence of all the different lens model components within a spherical aperture
:param kwargs_lens: lens model keyword argument list
:param center_x: center of the aperture
:param center_y: center of the aperture
:param theta_E: radius of aperture
:return: list of average convergences for all the model components |
def isometric_load(script, AbsName="TEMP3D.abs"):
"""Isometric parameterization: Load Abstract Domain
"""
filter_xml = ''.join([
' <filter name="Iso Parametrization Load Abstract Domain">\n',
' <Param name="AbsName"',
'value="%s"' % AbsName,
'description="Abstract Mesh file"',
'type="RichString"',
'tooltip="The filename of the abstract mesh that has to be loaded"',
'/>\n',
' </filter>\n'])
util.write_filter(script, filter_xml)
return None | Isometric parameterization: Load Abstract Domain |
def patches(self, dwn, install, comp_sum, uncomp_sum):
"""Seperates packages from patches/ directory
"""
dwnp, installp, comp_sump, uncomp_sump = ([] for i in range(4))
for d, i, c, u in zip(dwn, install, comp_sum, uncomp_sum):
if "_slack" + slack_ver() in i:
dwnp.append(d)
dwn.remove(d)
installp.append(i)
install.remove(i)
comp_sump.append(c)
comp_sum.remove(c)
uncomp_sump.append(u)
uncomp_sum.remove(u)
if "--patches" in self.flag:
return dwnp, installp, comp_sump, uncomp_sump
return dwn, install, comp_sum, uncomp_sum | Seperates packages from patches/ directory |
def inject_url_defaults(self, endpoint, values):
"""Injects the URL defaults for the given endpoint directly into
the values dictionary passed. This is used internally and
automatically called on URL building.
.. versionadded:: 0.7
"""
funcs = self.url_default_functions.get(None, ())
if '.' in endpoint:
bp = endpoint.rsplit('.', 1)[0]
funcs = chain(funcs, self.url_default_functions.get(bp, ()))
for func in funcs:
func(endpoint, values) | Injects the URL defaults for the given endpoint directly into
the values dictionary passed. This is used internally and
automatically called on URL building.
.. versionadded:: 0.7 |
async def forget(request, response):
"""Forget previously remembered identity.
Usually it clears cookie or server-side storage to forget user
session.
"""
identity_policy = request.config_dict.get(IDENTITY_KEY)
if identity_policy is None:
text = ("Security subsystem is not initialized, "
"call aiohttp_security.setup(...) first")
# in order to see meaningful exception message both: on console
# output and rendered page we add same message to *reason* and
# *text* arguments.
raise web.HTTPInternalServerError(reason=text, text=text)
await identity_policy.forget(request, response) | Forget previously remembered identity.
Usually it clears cookie or server-side storage to forget user
session. |
def _parse_array(stream):
"""Parse an array, stream should be passed the initial [
returns:
Parsed array
"""
logger.debug("parsing array")
arr = []
while True:
c = stream.read(1)
if c in _GDB_MI_VALUE_START_CHARS:
stream.seek(-1)
val = _parse_val(stream)
arr.append(val)
elif c in _WHITESPACE:
pass
elif c == ",":
pass
elif c == "]":
# Stop when this array has finished. Note
# that elements of this array can be also be arrays.
break
logger.debug("parsed array:")
logger.debug("%s", fmt_green(arr))
return arr | Parse an array, stream should be passed the initial [
returns:
Parsed array |
def _classify_move_register(self, regs_init, regs_fini, mem_fini, written_regs, read_regs):
"""Classify move-register gadgets.
"""
matches = []
regs_init_inv = self._invert_dictionary(regs_init)
# Check for "dst_reg <- src_reg" pattern.
for dst_reg, dst_val in regs_fini.items():
# Make sure the *dst* register was written.
if dst_reg not in written_regs:
continue
for src_reg in regs_init_inv.get(dst_val, []):
# Make sure the *src* register was read.
if src_reg not in read_regs:
continue
# Check restrictions...
if self._arch_regs_size[src_reg] != self._arch_regs_size[dst_reg]:
continue
if src_reg == dst_reg:
continue
if regs_init[dst_reg] == regs_init[src_reg]:
continue
src_reg_ir = ReilRegisterOperand(src_reg, self._arch_regs_size[src_reg])
dst_reg_ir = ReilRegisterOperand(dst_reg, self._arch_regs_size[dst_reg])
matches.append({
"src": [src_reg_ir],
"dst": [dst_reg_ir]
})
return matches | Classify move-register gadgets. |
def parse(cls, args):
"""
Parse command line arguments to construct a dictionary of command
parameters that can be used to create a command
Args:
`args`: sequence of arguments
Returns:
Dictionary that can be used in create method
Raises:
ParseError: when the arguments are not correct
"""
try:
(options, args) = cls.optparser.parse_args(args)
if options.db_tap_id is None:
raise ParseError("db_tap_id is required",
cls.optparser.format_help())
if options.query is None and options.script_location is None:
raise ParseError("query or script location is required",
cls.optparser.format_help())
if options.script_location is not None:
if options.query is not None:
raise ParseError(
"Both query and script_location cannot be specified",
cls.optparser.format_help())
if ((options.script_location.find("s3://") != 0) and
(options.script_location.find("s3n://") != 0)):
# script location is local file
try:
q = open(options.script_location).read()
except IOError as e:
raise ParseError("Unable to open script location: %s" %
str(e),
cls.optparser.format_help())
options.script_location = None
options.query = q
except OptionParsingError as e:
raise ParseError(e.msg, cls.optparser.format_help())
except OptionParsingExit as e:
return None
if options.macros is not None:
options.macros = json.loads(options.macros)
v = vars(options)
v["command_type"] = "DbTapQueryCommand"
return v | Parse command line arguments to construct a dictionary of command
parameters that can be used to create a command
Args:
`args`: sequence of arguments
Returns:
Dictionary that can be used in create method
Raises:
ParseError: when the arguments are not correct |
def dump_artifact(obj, path, filename=None):
'''
Write the artifact to disk at the specified path
Args:
obj (string): The string object to be dumped to disk in the specified
path. The artifact filename will be automatically created
path (string): The full path to the artifacts data directory.
filename (string, optional): The name of file to write the artifact to.
If the filename is not provided, then one will be generated.
Returns:
string: The full path filename for the artifact that was generated
'''
p_sha1 = None
if not os.path.exists(path):
os.makedirs(path, mode=0o700)
else:
p_sha1 = hashlib.sha1()
p_sha1.update(obj.encode(encoding='UTF-8'))
if filename is None:
fd, fn = tempfile.mkstemp(dir=path)
else:
fn = os.path.join(path, filename)
if os.path.exists(fn):
c_sha1 = hashlib.sha1()
with open(fn) as f:
contents = f.read()
c_sha1.update(contents.encode(encoding='UTF-8'))
if not os.path.exists(fn) or p_sha1.hexdigest() != c_sha1.hexdigest():
lock_fp = os.path.join(path, '.artifact_write_lock')
lock_fd = os.open(lock_fp, os.O_RDWR | os.O_CREAT, stat.S_IRUSR | stat.S_IWUSR)
fcntl.lockf(lock_fd, fcntl.LOCK_EX)
try:
with open(fn, 'w') as f:
os.chmod(fn, stat.S_IRUSR)
f.write(str(obj))
finally:
fcntl.lockf(lock_fd, fcntl.LOCK_UN)
os.close(lock_fd)
os.remove(lock_fp)
return fn | Write the artifact to disk at the specified path
Args:
obj (string): The string object to be dumped to disk in the specified
path. The artifact filename will be automatically created
path (string): The full path to the artifacts data directory.
filename (string, optional): The name of file to write the artifact to.
If the filename is not provided, then one will be generated.
Returns:
string: The full path filename for the artifact that was generated |
def parse_args(self, argv=None):
""" Return an argparse.Namespace of the argv string or sys.argv if
argv is None. """
arg_input = shlex.split(argv) if argv is not None else None
self.get_or_create_session()
return self.argparser.parse_args(arg_input) | Return an argparse.Namespace of the argv string or sys.argv if
argv is None. |
def _get_base_url(request):
"""
Construct a base URL, given a request object.
This comprises the protocol prefix (http:// or https://) and the host,
which can include the port number. For example:
http://www.openquake.org or https://www.openquake.org:8000.
"""
if request.is_secure():
base_url = 'https://%s'
else:
base_url = 'http://%s'
base_url %= request.META['HTTP_HOST']
return base_url | Construct a base URL, given a request object.
This comprises the protocol prefix (http:// or https://) and the host,
which can include the port number. For example:
http://www.openquake.org or https://www.openquake.org:8000. |
def split_certificate(certificate_path, destination_folder, password=None):
"""Splits a PKCS12 certificate into Base64-encoded DER certificate and key.
This method splits a potentially password-protected
`PKCS12 <https://en.wikipedia.org/wiki/PKCS_12>`_ certificate
(format ``.p12`` or ``.pfx``) into one certificate and one key part, both in
`pem <https://en.wikipedia.org/wiki/X.509#Certificate_filename_extensions>`_
format.
:returns: Tuple of certificate and key string data.
:rtype: tuple
"""
try:
# Attempt Linux and Darwin call first.
p = subprocess.Popen(
["openssl", "version"], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
sout, serr = p.communicate()
openssl_executable_version = sout.decode().lower()
if not (
openssl_executable_version.startswith("openssl")
or openssl_executable_version.startswith("libressl")
):
raise BankIDError(
"OpenSSL executable could not be found. "
"Splitting cannot be performed."
)
openssl_executable = "openssl"
except Exception:
# Attempt to call on standard Git for Windows path.
p = subprocess.Popen(
["C:\\Program Files\\Git\\mingw64\\bin\\openssl.exe", "version"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
sout, serr = p.communicate()
if not sout.decode().lower().startswith("openssl"):
raise BankIDError(
"OpenSSL executable could not be found. "
"Splitting cannot be performed."
)
openssl_executable = "C:\\Program Files\\Git\\mingw64\\bin\\openssl.exe"
if not os.path.exists(os.path.abspath(os.path.expanduser(destination_folder))):
os.makedirs(os.path.abspath(os.path.expanduser(destination_folder)))
# Paths to output files.
out_cert_path = os.path.join(
os.path.abspath(os.path.expanduser(destination_folder)), "certificate.pem"
)
out_key_path = os.path.join(
os.path.abspath(os.path.expanduser(destination_folder)), "key.pem"
)
# Use openssl for converting to pem format.
pipeline_1 = [
openssl_executable,
"pkcs12",
"-in",
"{0}".format(certificate_path),
"-passin" if password is not None else "",
"pass:{0}".format(password) if password is not None else "",
"-out",
"{0}".format(out_cert_path),
"-clcerts",
"-nokeys",
]
p = subprocess.Popen(
list(filter(None, pipeline_1)), stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
p.communicate()
pipeline_2 = [
openssl_executable,
"pkcs12",
"-in",
"{0}".format(certificate_path),
"-passin" if password is not None else "",
"pass:{0}".format(password) if password is not None else "",
"-out",
"{0}".format(out_key_path),
"-nocerts",
"-nodes",
]
p = subprocess.Popen(
list(filter(None, pipeline_2)), stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
p.communicate()
# Return path tuples.
return out_cert_path, out_key_path | Splits a PKCS12 certificate into Base64-encoded DER certificate and key.
This method splits a potentially password-protected
`PKCS12 <https://en.wikipedia.org/wiki/PKCS_12>`_ certificate
(format ``.p12`` or ``.pfx``) into one certificate and one key part, both in
`pem <https://en.wikipedia.org/wiki/X.509#Certificate_filename_extensions>`_
format.
:returns: Tuple of certificate and key string data.
:rtype: tuple |
def map_sprinkler(self, sx, sy, watered_crop='^', watered_field='_', dry_field=' ', dry_crop='x'):
"""
Return a version of the ASCII map showing reached crop cells.
"""
# convert strings (rows) to lists of characters for easier map editing
maplist = [list(s) for s in self.maplist]
for y, row in enumerate(maplist):
for x, cell in enumerate(row):
if sprinkler_reaches_cell(x, y, sx, sy, self.r):
if cell == 'x':
cell = watered_crop
else:
cell = watered_field
else:
cell = dry_crop if cell == 'x' else dry_field
maplist[y][x] = cell
maplist[sy][sx] = 'O' # sprinkler
return '\n'.join([''.join(row) for row in maplist]) | Return a version of the ASCII map showing reached crop cells. |
def search(self, cond):
"""
Search for all documents matching a 'where' cond.
:param cond: the condition to check against
:type cond: Query
:returns: list of matching documents
:rtype: list[Element]
"""
if cond in self._query_cache:
return self._query_cache.get(cond, [])[:]
docs = [doc for doc in self.all() if cond(doc)]
self._query_cache[cond] = docs
return docs[:] | Search for all documents matching a 'where' cond.
:param cond: the condition to check against
:type cond: Query
:returns: list of matching documents
:rtype: list[Element] |
def diff_archives(archive1, archive2, verbosity=0, interactive=True):
"""Print differences between two archives."""
util.check_existing_filename(archive1)
util.check_existing_filename(archive2)
if verbosity >= 0:
util.log_info("Comparing %s with %s ..." % (archive1, archive2))
res = _diff_archives(archive1, archive2, verbosity=verbosity, interactive=interactive)
if res == 0 and verbosity >= 0:
util.log_info("... no differences found.") | Print differences between two archives. |
def get_question_mdata():
"""Return default mdata map for Question"""
return {
'item': {
'element_label': {
'text': 'item',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'instructions': {
'text': 'accepts an osid.id.Id object',
'languageTypeId': str(DEFAULT_LANGUAGE_TYPE),
'scriptTypeId': str(DEFAULT_SCRIPT_TYPE),
'formatTypeId': str(DEFAULT_FORMAT_TYPE),
},
'required': False,
'read_only': False,
'linked': False,
'array': False,
'default_id_values': [''],
'syntax': 'ID',
'id_set': [],
},
} | Return default mdata map for Question |
def _remove_call(self, real_time, call):
"""
Internal helper. Removes a (possibly still pending) call from a
bucket. It is *not* an error of the bucket is gone (e.g. the
call has already happened).
"""
try:
(delayed_call, calls) = self._buckets[real_time]
except KeyError:
# no such bucket ... error? swallow?
return
# remove call; if we're empty, cancel underlying
# bucket-timeout IDelayedCall
calls.remove(call)
if not calls:
del self._buckets[real_time]
delayed_call.cancel() | Internal helper. Removes a (possibly still pending) call from a
bucket. It is *not* an error of the bucket is gone (e.g. the
call has already happened). |
def install_python_package(self, arch, name=None, env=None, is_dir=True):
'''Automate the installation of a Python package (or a cython
package where the cython components are pre-built).'''
# arch = self.filtered_archs[0] # old kivy-ios way
if name is None:
name = self.name
if env is None:
env = self.get_recipe_env(arch)
info('Installing {} into site-packages'.format(self.name))
with current_directory(self.get_build_dir(arch.arch)):
hostpython = sh.Command(self.hostpython_location)
if self.ctx.python_recipe.name != 'python2legacy':
hpenv = env.copy()
shprint(hostpython, 'setup.py', 'install', '-O2',
'--root={}'.format(self.ctx.get_python_install_dir()),
'--install-lib=.',
_env=hpenv, *self.setup_extra_args)
elif self.call_hostpython_via_targetpython:
shprint(hostpython, 'setup.py', 'install', '-O2', _env=env,
*self.setup_extra_args)
else: # python2legacy
hppath = join(dirname(self.hostpython_location), 'Lib', 'site-packages')
hpenv = env.copy()
if 'PYTHONPATH' in hpenv:
hpenv['PYTHONPATH'] = ':'.join([hppath] + hpenv['PYTHONPATH'].split(':'))
else:
hpenv['PYTHONPATH'] = hppath
shprint(hostpython, 'setup.py', 'install', '-O2',
'--root={}'.format(self.ctx.get_python_install_dir()),
'--install-lib=lib/python2.7/site-packages',
_env=hpenv, *self.setup_extra_args)
# If asked, also install in the hostpython build dir
if self.install_in_hostpython:
self.install_hostpython_package(arch) | Automate the installation of a Python package (or a cython
package where the cython components are pre-built). |
def walk_links(directory, prefix='', linkbase=None):
""" Return all links contained in directory (or any sub directory).
"""
links = {}
try:
for child in os.listdir(directory):
fullname = os.path.join(directory, child)
if os.path.islink(fullname):
link_path = os.path.normpath(os.path.join(directory, os.readlink(fullname)))
if linkbase:
link_path = os.path.relpath(link_path, linkbase)
links[os.path.join(prefix, child)] = link_path
elif os.path.isdir(fullname):
links.update(walk_links(fullname,
prefix=os.path.join(prefix, child),
linkbase=linkbase))
except OSError as err:
if err.errno != 2: # Ignore unknown directory error
raise
return links | Return all links contained in directory (or any sub directory). |
def _add_cytomine_cli_args(argparse):
"""
Add cytomine CLI args to the ArgumentParser object: cytomine_host, cytomine_public_key, cytomine_private_key and
cytomine_verbose.
Parameters
----------
argparse: ArgumentParser
The argument parser
Return
------
argparse: ArgumentParser
The argument parser (same object as parameter)
"""
argparse.add_argument(*_cytomine_parameter_name_synonyms("host"),
dest="host", help="The Cytomine host (without protocol).", required=True)
argparse.add_argument(*_cytomine_parameter_name_synonyms("public_key"),
dest="public_key", help="The Cytomine public key.", required=True)
argparse.add_argument(*_cytomine_parameter_name_synonyms("private_key"),
dest="private_key", help="The Cytomine private key.", required=True)
argparse.add_argument("--verbose", "--cytomine_verbose",
dest="verbose", type=int, default=logging.INFO,
help="The verbosity level of the client (as an integer value).")
argparse.add_argument("-l", "--log_level", "--cytomine_log_level",
dest="log_level", choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
help="The logging level of the client (as a string value)")
return argparse | Add cytomine CLI args to the ArgumentParser object: cytomine_host, cytomine_public_key, cytomine_private_key and
cytomine_verbose.
Parameters
----------
argparse: ArgumentParser
The argument parser
Return
------
argparse: ArgumentParser
The argument parser (same object as parameter) |
def clean_conf_folder(self, locale):
"""Remove the configuration directory for `locale`"""
dirname = self.configuration.get_messages_dir(locale)
dirname.removedirs_p() | Remove the configuration directory for `locale` |
def create_user(username):
"Create a new user."
password = prompt_pass("Enter password")
user = User(username=username, password=password)
db.session.add(user)
db.session.commit() | Create a new user. |
def admin_tools_render_menu_css(context, menu=None):
"""
Template tag that renders the menu css files,, it takes an optional
``Menu`` instance as unique argument, if not given, the menu will be
retrieved with the ``get_admin_menu`` function.
"""
if menu is None:
menu = get_admin_menu(context)
context.update({
'template': 'admin_tools/menu/css.html',
'css_files': menu.Media.css,
})
return context | Template tag that renders the menu css files,, it takes an optional
``Menu`` instance as unique argument, if not given, the menu will be
retrieved with the ``get_admin_menu`` function. |
def isConnected(self, signal, slot):
"""
Returns if the given signal is connected to the inputted slot.
:param signal | <variant>
slot | <callable>
:return <bool> | is connected
"""
sig_calls = self._callbacks.get(signal, [])
for callback in sig_calls:
if callback == slot:
return True
return False | Returns if the given signal is connected to the inputted slot.
:param signal | <variant>
slot | <callable>
:return <bool> | is connected |
def community_post_comment_down_create(self, post_id, id, data, **kwargs):
"https://developer.zendesk.com/rest_api/docs/help_center/votes#create-vote"
api_path = "/api/v2/community/posts/{post_id}/comments/{id}/down.json"
api_path = api_path.format(post_id=post_id, id=id)
return self.call(api_path, method="POST", data=data, **kwargs) | https://developer.zendesk.com/rest_api/docs/help_center/votes#create-vote |
def put_multi(entities):
"""Persist a set of entities to Datastore.
Note:
This uses the adapter that is tied to the first Entity in the
list. If the entities have disparate adapters this function may
behave in unexpected ways.
Warning:
You must pass a **list** and not a generator or some other kind
of iterable to this function as it has to iterate over the list
of entities multiple times.
Parameters:
entities(list[Model]): The list of entities to persist.
Raises:
RuntimeError: If the given set of models use a disparate set of
adapters.
Returns:
list[Model]: The list of persisted entitites.
"""
if not entities:
return []
adapter, requests = None, []
for entity in entities:
if adapter is None:
adapter = entity._adapter
entity.pre_put_hook()
requests.append(PutRequest(entity.key, entity.unindexed_properties, entity))
keys = adapter.put_multi(requests)
for key, entity in zip(keys, entities):
entity.key = key
entity.post_put_hook()
return entities | Persist a set of entities to Datastore.
Note:
This uses the adapter that is tied to the first Entity in the
list. If the entities have disparate adapters this function may
behave in unexpected ways.
Warning:
You must pass a **list** and not a generator or some other kind
of iterable to this function as it has to iterate over the list
of entities multiple times.
Parameters:
entities(list[Model]): The list of entities to persist.
Raises:
RuntimeError: If the given set of models use a disparate set of
adapters.
Returns:
list[Model]: The list of persisted entitites. |
def get_bel_resource_hash(location, hash_function=None):
"""Get a BEL resource file and returns its semantic hash.
:param str location: URL of a resource
:param hash_function: A hash function or list of hash functions, like :func:`hashlib.md5` or :code:`hashlib.sha512`
:return: The hexadecimal digest of the hash of the values in the resource
:rtype: str
:raises: pybel.resources.exc.ResourceError
"""
resource = get_bel_resource(location)
return hash_names(
resource['Values'],
hash_function=hash_function
) | Get a BEL resource file and returns its semantic hash.
:param str location: URL of a resource
:param hash_function: A hash function or list of hash functions, like :func:`hashlib.md5` or :code:`hashlib.sha512`
:return: The hexadecimal digest of the hash of the values in the resource
:rtype: str
:raises: pybel.resources.exc.ResourceError |
def installed(name,
pkgs=None,
pip_bin=None,
requirements=None,
bin_env=None,
use_wheel=False,
no_use_wheel=False,
log=None,
proxy=None,
timeout=None,
repo=None,
editable=None,
find_links=None,
index_url=None,
extra_index_url=None,
no_index=False,
mirrors=None,
build=None,
target=None,
download=None,
download_cache=None,
source=None,
upgrade=False,
force_reinstall=False,
ignore_installed=False,
exists_action=None,
no_deps=False,
no_install=False,
no_download=False,
install_options=None,
global_options=None,
user=None,
cwd=None,
pre_releases=False,
cert=None,
allow_all_external=False,
allow_external=None,
allow_unverified=None,
process_dependency_links=False,
env_vars=None,
use_vt=False,
trusted_host=None,
no_cache_dir=False,
cache_dir=None,
no_binary=None,
extra_args=None,
**kwargs):
'''
Make sure the package is installed
name
The name of the python package to install. You can also specify version
numbers here using the standard operators ``==, >=, <=``. If
``requirements`` is given, this parameter will be ignored.
Example:
.. code-block:: yaml
django:
pip.installed:
- name: django >= 1.6, <= 1.7
- require:
- pkg: python-pip
This will install the latest Django version greater than 1.6 but less
than 1.7.
requirements
Path to a pip requirements file. If the path begins with salt://
the file will be transferred from the master file server.
user
The user under which to run pip
use_wheel : False
Prefer wheel archives (requires pip>=1.4)
no_use_wheel : False
Force to not use wheel archives (requires pip>=1.4)
no_binary
Force to not use binary packages (requires pip >= 7.0.0)
Accepts either :all: to disable all binary packages, :none: to empty the set,
or a list of one or more packages
Example:
.. code-block:: yaml
django:
pip.installed:
- no_binary: ':all:'
flask:
pip.installed:
- no_binary:
- itsdangerous
- click
log
Log file where a complete (maximum verbosity) record will be kept
proxy
Specify a proxy in the form
user:[email protected]:port. Note that the
user:password@ is optional and required only if you
are behind an authenticated proxy. If you provide
[email protected]:port then you will be prompted for a
password.
timeout
Set the socket timeout (default 15 seconds)
editable
install something editable (i.e.
git+https://github.com/worldcompany/djangoembed.git#egg=djangoembed)
find_links
URL to look for packages at
index_url
Base URL of Python Package Index
extra_index_url
Extra URLs of package indexes to use in addition to ``index_url``
no_index
Ignore package index
mirrors
Specific mirror URL(s) to query (automatically adds --use-mirrors)
build
Unpack packages into ``build`` dir
target
Install packages into ``target`` dir
download
Download packages into ``download`` instead of installing them
download_cache
Cache downloaded packages in ``download_cache`` dir
source
Check out ``editable`` packages into ``source`` dir
upgrade
Upgrade all packages to the newest available version
force_reinstall
When upgrading, reinstall all packages even if they are already
up-to-date.
ignore_installed
Ignore the installed packages (reinstalling instead)
exists_action
Default action when a path already exists: (s)witch, (i)gnore, (w)ipe,
(b)ackup
no_deps
Ignore package dependencies
no_install
Download and unpack all packages, but don't actually install them
no_cache_dir:
Disable the cache.
cwd
Current working directory to run pip from
pre_releases
Include pre-releases in the available versions
cert
Provide a path to an alternate CA bundle
allow_all_external
Allow the installation of all externally hosted files
allow_external
Allow the installation of externally hosted files (comma separated list)
allow_unverified
Allow the installation of insecure and unverifiable files (comma separated list)
process_dependency_links
Enable the processing of dependency links
bin_env : None
Absolute path to a virtual environment directory or absolute path to
a pip executable. The example below assumes a virtual environment
has been created at ``/foo/.virtualenvs/bar``.
env_vars
Add or modify environment variables. Useful for tweaking build steps,
such as specifying INCLUDE or LIBRARY paths in Makefiles, build scripts or
compiler calls. This must be in the form of a dictionary or a mapping.
Example:
.. code-block:: yaml
django:
pip.installed:
- name: django_app
- env_vars:
CUSTOM_PATH: /opt/django_app
VERBOSE: True
use_vt
Use VT terminal emulation (see output while installing)
trusted_host
Mark this host as trusted, even though it does not have valid or any
HTTPS.
Example:
.. code-block:: yaml
django:
pip.installed:
- name: django >= 1.6, <= 1.7
- bin_env: /foo/.virtualenvs/bar
- require:
- pkg: python-pip
Or
Example:
.. code-block:: yaml
django:
pip.installed:
- name: django >= 1.6, <= 1.7
- bin_env: /foo/.virtualenvs/bar/bin/pip
- require:
- pkg: python-pip
.. admonition:: Attention
The following arguments are deprecated, do not use.
pip_bin : None
Deprecated, use ``bin_env``
.. versionchanged:: 0.17.0
``use_wheel`` option added.
install_options
Extra arguments to be supplied to the setup.py install command.
If you are using an option with a directory path, be sure to use
absolute path.
Example:
.. code-block:: yaml
django:
pip.installed:
- name: django
- install_options:
- --prefix=/blah
- require:
- pkg: python-pip
global_options
Extra global options to be supplied to the setup.py call before the
install command.
.. versionadded:: 2014.1.3
.. admonition:: Attention
As of Salt 0.17.0 the pip state **needs** an importable pip module.
This usually means having the system's pip package installed or running
Salt from an active `virtualenv`_.
The reason for this requirement is because ``pip`` already does a
pretty good job parsing its own requirements. It makes no sense for
Salt to do ``pip`` requirements parsing and validation before passing
them to the ``pip`` library. It's functionality duplication and it's
more error prone.
.. admonition:: Attention
Please set ``reload_modules: True`` to have the salt minion
import this module after installation.
Example:
.. code-block:: yaml
pyopenssl:
pip.installed:
- name: pyOpenSSL
- reload_modules: True
- exists_action: i
extra_args
pip keyword and positional arguments not yet implemented in salt
.. code-block:: yaml
pandas:
pip.installed:
- name: pandas
- extra_args:
- --latest-pip-kwarg: param
- --latest-pip-arg
.. warning::
If unsupported options are passed here that are not supported in a
minion's version of pip, a `No such option error` will be thrown.
.. _`virtualenv`: http://www.virtualenv.org/en/latest/
'''
if pip_bin and not bin_env:
bin_env = pip_bin
# If pkgs is present, ignore name
if pkgs:
if not isinstance(pkgs, list):
return {'name': name,
'result': False,
'changes': {},
'comment': 'pkgs argument must be formatted as a list'}
else:
pkgs = [name]
# Assumption: If `pkg` is not an `string`, it's a `collections.OrderedDict`
# prepro = lambda pkg: pkg if type(pkg) == str else \
# ' '.join((pkg.items()[0][0], pkg.items()[0][1].replace(',', ';')))
# pkgs = ','.join([prepro(pkg) for pkg in pkgs])
prepro = lambda pkg: pkg if isinstance(pkg, six.string_types) else \
' '.join((six.iteritems(pkg)[0][0], six.iteritems(pkg)[0][1]))
pkgs = [prepro(pkg) for pkg in pkgs]
ret = {'name': ';'.join(pkgs), 'result': None,
'comment': '', 'changes': {}}
try:
cur_version = __salt__['pip.version'](bin_env)
except (CommandNotFoundError, CommandExecutionError) as err:
ret['result'] = None
ret['comment'] = 'Error installing \'{0}\': {1}'.format(name, err)
return ret
# Check that the pip binary supports the 'use_wheel' option
if use_wheel:
min_version = '1.4'
max_version = '9.0.3'
too_low = salt.utils.versions.compare(ver1=cur_version, oper='<', ver2=min_version)
too_high = salt.utils.versions.compare(ver1=cur_version, oper='>', ver2=max_version)
if too_low or too_high:
ret['result'] = False
ret['comment'] = ('The \'use_wheel\' option is only supported in '
'pip between {0} and {1}. The version of pip detected '
'was {2}.').format(min_version, max_version, cur_version)
return ret
# Check that the pip binary supports the 'no_use_wheel' option
if no_use_wheel:
min_version = '1.4'
max_version = '9.0.3'
too_low = salt.utils.versions.compare(ver1=cur_version, oper='<', ver2=min_version)
too_high = salt.utils.versions.compare(ver1=cur_version, oper='>', ver2=max_version)
if too_low or too_high:
ret['result'] = False
ret['comment'] = ('The \'no_use_wheel\' option is only supported in '
'pip between {0} and {1}. The version of pip detected '
'was {2}.').format(min_version, max_version, cur_version)
return ret
# Check that the pip binary supports the 'no_binary' option
if no_binary:
min_version = '7.0.0'
too_low = salt.utils.versions.compare(ver1=cur_version, oper='<', ver2=min_version)
if too_low:
ret['result'] = False
ret['comment'] = ('The \'no_binary\' option is only supported in '
'pip {0} and newer. The version of pip detected '
'was {1}.').format(min_version, cur_version)
return ret
# Get the packages parsed name and version from the pip library.
# This only is done when there is no requirements or editable parameter.
pkgs_details = []
if pkgs and not (requirements or editable):
comments = []
for pkg in iter(pkgs):
out = _check_pkg_version_format(pkg)
if out['result'] is False:
ret['result'] = False
comments.append(out['comment'])
elif out['result'] is True:
pkgs_details.append((out['prefix'], pkg, out['version_spec']))
if ret['result'] is False:
ret['comment'] = '\n'.join(comments)
return ret
# If a requirements file is specified, only install the contents of the
# requirements file. Similarly, using the --editable flag with pip should
# also ignore the "name" and "pkgs" parameters.
target_pkgs = []
already_installed_comments = []
if requirements or editable:
comments = []
# Append comments if this is a dry run.
if __opts__['test']:
ret['result'] = None
if requirements:
# TODO: Check requirements file against currently-installed
# packages to provide more accurate state output.
comments.append('Requirements file \'{0}\' will be '
'processed.'.format(requirements))
if editable:
comments.append(
'Package will be installed in editable mode (i.e. '
'setuptools "develop mode") from {0}.'.format(editable)
)
ret['comment'] = ' '.join(comments)
return ret
# No requirements case.
# Check pre-existence of the requested packages.
else:
# Attempt to pre-cache a the current pip list
try:
pip_list = __salt__['pip.list'](bin_env=bin_env, user=user, cwd=cwd)
# If we fail, then just send False, and we'll try again in the next function call
except Exception as exc:
log.exception(exc)
pip_list = False
for prefix, state_pkg_name, version_spec in pkgs_details:
if prefix:
state_pkg_name = state_pkg_name
version_spec = version_spec
out = _check_if_installed(prefix, state_pkg_name, version_spec,
ignore_installed, force_reinstall,
upgrade, user, cwd, bin_env, env_vars,
index_url, extra_index_url, pip_list,
**kwargs)
# If _check_if_installed result is None, something went wrong with
# the command running. This way we keep stateful output.
if out['result'] is None:
ret['result'] = False
ret['comment'] = out['comment']
return ret
else:
out = {'result': False, 'comment': None}
result = out['result']
# The package is not present. Add it to the pkgs to install.
if result is False:
# Replace commas (used for version ranges) with semicolons
# (which are not supported) in name so it does not treat
# them as multiple packages.
target_pkgs.append((prefix, state_pkg_name.replace(',', ';')))
# Append comments if this is a dry run.
if __opts__['test']:
msg = 'Python package {0} is set to be installed'
ret['result'] = None
ret['comment'] = msg.format(state_pkg_name)
return ret
# The package is already present and will not be reinstalled.
elif result is True:
# Append comment stating its presence
already_installed_comments.append(out['comment'])
# The command pip.list failed. Abort.
elif result is None:
ret['result'] = None
ret['comment'] = out['comment']
return ret
# No packages to install.
if not target_pkgs:
ret['result'] = True
aicomms = '\n'.join(already_installed_comments)
last_line = 'All specified packages are already installed' + (' and up-to-date' if upgrade else '')
ret['comment'] = aicomms + ('\n' if aicomms else '') + last_line
return ret
# Construct the string that will get passed to the install call
pkgs_str = ','.join([state_name for _, state_name in target_pkgs])
# Call to install the package. Actual installation takes place here
pip_install_call = __salt__['pip.install'](
pkgs='{0}'.format(pkgs_str) if pkgs_str else '',
requirements=requirements,
bin_env=bin_env,
use_wheel=use_wheel,
no_use_wheel=no_use_wheel,
no_binary=no_binary,
log=log,
proxy=proxy,
timeout=timeout,
editable=editable,
find_links=find_links,
index_url=index_url,
extra_index_url=extra_index_url,
no_index=no_index,
mirrors=mirrors,
build=build,
target=target,
download=download,
download_cache=download_cache,
source=source,
upgrade=upgrade,
force_reinstall=force_reinstall,
ignore_installed=ignore_installed,
exists_action=exists_action,
no_deps=no_deps,
no_install=no_install,
no_download=no_download,
install_options=install_options,
global_options=global_options,
user=user,
cwd=cwd,
pre_releases=pre_releases,
cert=cert,
allow_all_external=allow_all_external,
allow_external=allow_external,
allow_unverified=allow_unverified,
process_dependency_links=process_dependency_links,
saltenv=__env__,
env_vars=env_vars,
use_vt=use_vt,
trusted_host=trusted_host,
no_cache_dir=no_cache_dir,
extra_args=extra_args,
**kwargs
)
if pip_install_call and pip_install_call.get('retcode', 1) == 0:
ret['result'] = True
if requirements or editable:
comments = []
if requirements:
PIP_REQUIREMENTS_NOCHANGE = [
'Requirement already satisfied',
'Requirement already up-to-date',
'Requirement not upgraded',
'Collecting',
'Cloning',
'Cleaning up...',
]
for line in pip_install_call.get('stdout', '').split('\n'):
if not any(
[
line.strip().startswith(x)
for x in PIP_REQUIREMENTS_NOCHANGE
]
):
ret['changes']['requirements'] = True
if ret['changes'].get('requirements'):
comments.append('Successfully processed requirements file '
'{0}.'.format(requirements))
else:
comments.append('Requirements were already installed.')
if editable:
comments.append('Package successfully installed from VCS '
'checkout {0}.'.format(editable))
ret['changes']['editable'] = True
ret['comment'] = ' '.join(comments)
else:
# Check that the packages set to be installed were installed.
# Create comments reporting success and failures
pkg_404_comms = []
already_installed_packages = set()
for line in pip_install_call.get('stdout', '').split('\n'):
# Output for already installed packages:
# 'Requirement already up-to-date: jinja2 in /usr/local/lib/python2.7/dist-packages\nCleaning up...'
if line.startswith('Requirement already up-to-date: '):
package = line.split(':', 1)[1].split()[0]
already_installed_packages.add(package.lower())
for prefix, state_name in target_pkgs:
# Case for packages that are not an URL
if prefix:
pipsearch = salt.utils.data.CaseInsensitiveDict(
__salt__['pip.list'](prefix, bin_env,
user=user, cwd=cwd,
env_vars=env_vars,
**kwargs)
)
# If we didn't find the package in the system after
# installing it report it
if not pipsearch:
pkg_404_comms.append(
'There was no error installing package \'{0}\' '
'although it does not show when calling '
'\'pip.freeze\'.'.format(pkg)
)
else:
if prefix in pipsearch \
and prefix.lower() not in already_installed_packages:
ver = pipsearch[prefix]
ret['changes']['{0}=={1}'.format(prefix, ver)] = 'Installed'
# Case for packages that are an URL
else:
ret['changes']['{0}==???'.format(state_name)] = 'Installed'
# Set comments
aicomms = '\n'.join(already_installed_comments)
succ_comm = 'All packages were successfully installed'\
if not pkg_404_comms else '\n'.join(pkg_404_comms)
ret['comment'] = aicomms + ('\n' if aicomms else '') + succ_comm
return ret
elif pip_install_call:
ret['result'] = False
if 'stdout' in pip_install_call:
error = 'Error: {0} {1}'.format(pip_install_call['stdout'],
pip_install_call['stderr'])
else:
error = 'Error: {0}'.format(pip_install_call['comment'])
if requirements or editable:
comments = []
if requirements:
comments.append('Unable to process requirements file '
'"{0}".'.format(requirements))
if editable:
comments.append('Unable to install from VCS checkout'
'{0}.'.format(editable))
comments.append(error)
ret['comment'] = ' '.join(comments)
else:
pkgs_str = ', '.join([state_name for _, state_name in target_pkgs])
aicomms = '\n'.join(already_installed_comments)
error_comm = ('Failed to install packages: {0}. '
'{1}'.format(pkgs_str, error))
ret['comment'] = aicomms + ('\n' if aicomms else '') + error_comm
else:
ret['result'] = False
ret['comment'] = 'Could not install package'
return ret | Make sure the package is installed
name
The name of the python package to install. You can also specify version
numbers here using the standard operators ``==, >=, <=``. If
``requirements`` is given, this parameter will be ignored.
Example:
.. code-block:: yaml
django:
pip.installed:
- name: django >= 1.6, <= 1.7
- require:
- pkg: python-pip
This will install the latest Django version greater than 1.6 but less
than 1.7.
requirements
Path to a pip requirements file. If the path begins with salt://
the file will be transferred from the master file server.
user
The user under which to run pip
use_wheel : False
Prefer wheel archives (requires pip>=1.4)
no_use_wheel : False
Force to not use wheel archives (requires pip>=1.4)
no_binary
Force to not use binary packages (requires pip >= 7.0.0)
Accepts either :all: to disable all binary packages, :none: to empty the set,
or a list of one or more packages
Example:
.. code-block:: yaml
django:
pip.installed:
- no_binary: ':all:'
flask:
pip.installed:
- no_binary:
- itsdangerous
- click
log
Log file where a complete (maximum verbosity) record will be kept
proxy
Specify a proxy in the form
user:[email protected]:port. Note that the
user:password@ is optional and required only if you
are behind an authenticated proxy. If you provide
[email protected]:port then you will be prompted for a
password.
timeout
Set the socket timeout (default 15 seconds)
editable
install something editable (i.e.
git+https://github.com/worldcompany/djangoembed.git#egg=djangoembed)
find_links
URL to look for packages at
index_url
Base URL of Python Package Index
extra_index_url
Extra URLs of package indexes to use in addition to ``index_url``
no_index
Ignore package index
mirrors
Specific mirror URL(s) to query (automatically adds --use-mirrors)
build
Unpack packages into ``build`` dir
target
Install packages into ``target`` dir
download
Download packages into ``download`` instead of installing them
download_cache
Cache downloaded packages in ``download_cache`` dir
source
Check out ``editable`` packages into ``source`` dir
upgrade
Upgrade all packages to the newest available version
force_reinstall
When upgrading, reinstall all packages even if they are already
up-to-date.
ignore_installed
Ignore the installed packages (reinstalling instead)
exists_action
Default action when a path already exists: (s)witch, (i)gnore, (w)ipe,
(b)ackup
no_deps
Ignore package dependencies
no_install
Download and unpack all packages, but don't actually install them
no_cache_dir:
Disable the cache.
cwd
Current working directory to run pip from
pre_releases
Include pre-releases in the available versions
cert
Provide a path to an alternate CA bundle
allow_all_external
Allow the installation of all externally hosted files
allow_external
Allow the installation of externally hosted files (comma separated list)
allow_unverified
Allow the installation of insecure and unverifiable files (comma separated list)
process_dependency_links
Enable the processing of dependency links
bin_env : None
Absolute path to a virtual environment directory or absolute path to
a pip executable. The example below assumes a virtual environment
has been created at ``/foo/.virtualenvs/bar``.
env_vars
Add or modify environment variables. Useful for tweaking build steps,
such as specifying INCLUDE or LIBRARY paths in Makefiles, build scripts or
compiler calls. This must be in the form of a dictionary or a mapping.
Example:
.. code-block:: yaml
django:
pip.installed:
- name: django_app
- env_vars:
CUSTOM_PATH: /opt/django_app
VERBOSE: True
use_vt
Use VT terminal emulation (see output while installing)
trusted_host
Mark this host as trusted, even though it does not have valid or any
HTTPS.
Example:
.. code-block:: yaml
django:
pip.installed:
- name: django >= 1.6, <= 1.7
- bin_env: /foo/.virtualenvs/bar
- require:
- pkg: python-pip
Or
Example:
.. code-block:: yaml
django:
pip.installed:
- name: django >= 1.6, <= 1.7
- bin_env: /foo/.virtualenvs/bar/bin/pip
- require:
- pkg: python-pip
.. admonition:: Attention
The following arguments are deprecated, do not use.
pip_bin : None
Deprecated, use ``bin_env``
.. versionchanged:: 0.17.0
``use_wheel`` option added.
install_options
Extra arguments to be supplied to the setup.py install command.
If you are using an option with a directory path, be sure to use
absolute path.
Example:
.. code-block:: yaml
django:
pip.installed:
- name: django
- install_options:
- --prefix=/blah
- require:
- pkg: python-pip
global_options
Extra global options to be supplied to the setup.py call before the
install command.
.. versionadded:: 2014.1.3
.. admonition:: Attention
As of Salt 0.17.0 the pip state **needs** an importable pip module.
This usually means having the system's pip package installed or running
Salt from an active `virtualenv`_.
The reason for this requirement is because ``pip`` already does a
pretty good job parsing its own requirements. It makes no sense for
Salt to do ``pip`` requirements parsing and validation before passing
them to the ``pip`` library. It's functionality duplication and it's
more error prone.
.. admonition:: Attention
Please set ``reload_modules: True`` to have the salt minion
import this module after installation.
Example:
.. code-block:: yaml
pyopenssl:
pip.installed:
- name: pyOpenSSL
- reload_modules: True
- exists_action: i
extra_args
pip keyword and positional arguments not yet implemented in salt
.. code-block:: yaml
pandas:
pip.installed:
- name: pandas
- extra_args:
- --latest-pip-kwarg: param
- --latest-pip-arg
.. warning::
If unsupported options are passed here that are not supported in a
minion's version of pip, a `No such option error` will be thrown.
.. _`virtualenv`: http://www.virtualenv.org/en/latest/ |
def _create_bv_circuit(self, bit_map: Dict[str, str]) -> Program:
"""
Implementation of the Bernstein-Vazirani Algorithm.
Given a list of input qubits and an ancilla bit, all initially in the
:math:`\\vert 0\\rangle` state, create a program that can find :math:`\\vec{a}` with one
query to the given oracle.
:param Dict[String, String] bit_map: truth-table of a function for Bernstein-Vazirani with
the keys being all possible bit vectors strings and the values being the function values
:rtype: Program
"""
unitary, _ = self._compute_unitary_oracle_matrix(bit_map)
full_bv_circuit = Program()
full_bv_circuit.defgate("BV-ORACLE", unitary)
# Put ancilla bit into minus state
full_bv_circuit.inst(X(self.ancilla), H(self.ancilla))
full_bv_circuit.inst([H(i) for i in self.computational_qubits])
full_bv_circuit.inst(
tuple(["BV-ORACLE"] + sorted(self.computational_qubits + [self.ancilla], reverse=True)))
full_bv_circuit.inst([H(i) for i in self.computational_qubits])
return full_bv_circuit | Implementation of the Bernstein-Vazirani Algorithm.
Given a list of input qubits and an ancilla bit, all initially in the
:math:`\\vert 0\\rangle` state, create a program that can find :math:`\\vec{a}` with one
query to the given oracle.
:param Dict[String, String] bit_map: truth-table of a function for Bernstein-Vazirani with
the keys being all possible bit vectors strings and the values being the function values
:rtype: Program |
def register(self, model_cls):
"""Register model(s) with app"""
assert issubclass(model_cls, peewee.Model)
assert not hasattr(model_cls._meta, 'database_manager')
if model_cls in self:
raise RuntimeError("Model already registered")
self.append(model_cls)
model_cls._meta.database = self.dbm
return model_cls | Register model(s) with app |
def DisplayGetter(accessor, *args, **kwargs):
"""
Returns a Getter that gets the display name for a model field with choices.
"""
short_description = get_pretty_name(accessor)
accessor = 'get_%s_display' % accessor
getter = Getter(accessor, *args, **kwargs)
getter.short_description = short_description
return getter | Returns a Getter that gets the display name for a model field with choices. |
def start_state_manager_watches(self):
"""
Receive updates to the packing plan from the statemgrs and update processes as needed.
"""
Log.info("Start state manager watches")
statemgr_config = StateMgrConfig()
statemgr_config.set_state_locations(configloader.load_state_manager_locations(
self.cluster, state_manager_config_file=self.state_manager_config_file,
overrides={"heron.statemgr.connection.string": self.state_manager_connection}))
try:
self.state_managers = statemanagerfactory.get_all_state_managers(statemgr_config)
for state_manager in self.state_managers:
state_manager.start()
except Exception as ex:
Log.error("Found exception while initializing state managers: %s. Bailing out..." % ex)
traceback.print_exc()
sys.exit(1)
# pylint: disable=unused-argument
def on_packing_plan_watch(state_manager, new_packing_plan):
Log.debug("State watch triggered for PackingPlan update on shard %s. Existing: %s, New: %s" %
(self.shard, str(self.packing_plan), str(new_packing_plan)))
if self.packing_plan != new_packing_plan:
Log.info("PackingPlan change detected on shard %s, relaunching effected processes."
% self.shard)
self.update_packing_plan(new_packing_plan)
Log.info("Updating executor processes")
self.launch()
else:
Log.info(
"State watch triggered for PackingPlan update but plan not changed so not relaunching.")
for state_manager in self.state_managers:
# The callback function with the bound
# state_manager as first variable.
onPackingPlanWatch = functools.partial(on_packing_plan_watch, state_manager)
state_manager.get_packing_plan(self.topology_name, onPackingPlanWatch)
Log.info("Registered state watch for packing plan changes with state manager %s." %
str(state_manager)) | Receive updates to the packing plan from the statemgrs and update processes as needed. |
def merge_arena(self, mujoco_arena):
"""Adds arena model to the MJCF model."""
self.arena = mujoco_arena
self.bin_offset = mujoco_arena.bin_abs
self.bin_size = mujoco_arena.table_full_size
self.bin2_body = mujoco_arena.bin2_body
self.merge(mujoco_arena) | Adds arena model to the MJCF model. |
def ParseTable(table):
"""Parses table of osquery output.
Args:
table: A table in a "parsed JSON" representation.
Returns:
A parsed `rdf_osquery.OsqueryTable` instance.
"""
precondition.AssertIterableType(table, dict)
result = rdf_osquery.OsqueryTable()
result.header = ParseHeader(table)
for row in table:
result.rows.append(ParseRow(result.header, row))
return result | Parses table of osquery output.
Args:
table: A table in a "parsed JSON" representation.
Returns:
A parsed `rdf_osquery.OsqueryTable` instance. |
def bestfit(self):
"""
Returns a series with the bestfit values.
Example:
Series.bestfit()
Returns: series
The returned series contains a parameter
called 'formula' which includes the string representation
of the bestfit line.
"""
# statsmodel cannot be included on requirements.txt
# see https://github.com/scikit-learn/scikit-learn/issues/4164
# which shares the same issue as statsmodel
try:
import statsmodels.api as sm
except:
raise Exception("statsmodels is required: " \
"please run " \
"pip install statsmodels" )
x=pd.Series(list(range(1,len(self)+1)),index=self.index)
x=sm.add_constant(x)
model=sm.OLS(self,x)
fit=model.fit()
vals=fit.params.values
best_fit=fit.fittedvalues
# the below methods have been deprecated in Pandas
# model=pd.ols(x=x,y=self,intercept=True)
# best_fit=model.y_fitted
best_fit.formula='%.2f*x+%.2f' % (vals[0],vals[1])
return best_fit | Returns a series with the bestfit values.
Example:
Series.bestfit()
Returns: series
The returned series contains a parameter
called 'formula' which includes the string representation
of the bestfit line. |
def rebuild_system(self, override=False, **kwargs):
"""
Rebuild molecules in molecular system.
Parameters
----------
override : :class:`bool`, optional (default=False)
If False the rebuild molecular system is returned as a new
:class:`MolecularSystem`, if True, the current
:class:`MolecularSystem` is modified.
"""
# First we create a 3x3x3 supercell with the initial unit cell in the
# centre and the 26 unit cell translations around to provide all the
# atom positions necessary for the molecules passing through periodic
# boundary reconstruction step.
supercell_333 = create_supercell(self.system, **kwargs)
# smolsys = self.load_system(supercell_333, self.system_id + '_311')
# smolsys.dump_system(override=True)
discrete = discrete_molecules(self.system, rebuild=supercell_333)
# This function overrides the initial data for 'coordinates',
# 'atom_ids', and 'elements' instances in the 'system' dictionary.
coordinates = np.array([], dtype=np.float64).reshape(0, 3)
atom_ids = np.array([])
elements = np.array([])
for i in discrete:
coordinates = np.concatenate(
[coordinates, i['coordinates']], axis=0
)
atom_ids = np.concatenate([atom_ids, i['atom_ids']], axis=0)
elements = np.concatenate([elements, i['elements']], axis=0)
rebuild_system = {
'coordinates': coordinates,
'atom_ids': atom_ids,
'elements': elements
}
if override is True:
self.system.update(rebuild_system)
return None
else:
return self.load_system(rebuild_system) | Rebuild molecules in molecular system.
Parameters
----------
override : :class:`bool`, optional (default=False)
If False the rebuild molecular system is returned as a new
:class:`MolecularSystem`, if True, the current
:class:`MolecularSystem` is modified. |
def render_exception_js(self, exception):
"""
Return a response with the body containing a JSON-formatter version of the exception.
"""
from .http import JsonResponse
response = {}
response["error"] = exception.error
response["error_description"] = exception.reason
return JsonResponse(response, status=getattr(exception, 'code', 400)) | Return a response with the body containing a JSON-formatter version of the exception. |
def set_temperature(self, temp):
"""Set current goal temperature / setpoint"""
self.set_service_value(
self.thermostat_setpoint,
'CurrentSetpoint',
'NewCurrentSetpoint',
temp)
self.set_cache_value('setpoint', temp) | Set current goal temperature / setpoint |
def _update_records(self, records, data):
"""Insert or update a list of DNS records, specified in the netcup API
convention.
The fields ``hostname``, ``type``, and ``destination`` are mandatory
and must be provided either in the record dict or through ``data``!
"""
data = {k: v for k, v in data.items() if v}
records = [dict(record, **data) for record in records]
return self._apicall(
'updateDnsRecords',
domainname=self.domain,
dnsrecordset={'dnsrecords': records},
).get('dnsrecords', []) | Insert or update a list of DNS records, specified in the netcup API
convention.
The fields ``hostname``, ``type``, and ``destination`` are mandatory
and must be provided either in the record dict or through ``data``! |
def get_input_shape(sym, proto_obj):
"""Helper function to obtain the shape of an array"""
arg_params = proto_obj.arg_dict
aux_params = proto_obj.aux_dict
model_input_shape = [data[1] for data in proto_obj.model_metadata.get('input_tensor_data')]
data_names = [data[0] for data in proto_obj.model_metadata.get('input_tensor_data')]
# creating dummy inputs
inputs = []
for in_shape in model_input_shape:
inputs.append(nd.ones(shape=in_shape))
data_shapes = []
for idx, input_name in enumerate(data_names):
data_shapes.append((input_name, inputs[idx].shape))
ctx = context.cpu()
# create a module
mod = module.Module(symbol=sym, data_names=data_names, context=ctx, label_names=None)
mod.bind(for_training=False, data_shapes=data_shapes, label_shapes=None)
mod.set_params(arg_params=arg_params, aux_params=aux_params)
data_forward = []
for idx, input_name in enumerate(data_names):
val = inputs[idx]
data_forward.append(val)
mod.forward(io.DataBatch(data_forward))
result = mod.get_outputs()[0].asnumpy()
return result.shape | Helper function to obtain the shape of an array |
def get_program(self, program_resource_name: str) -> Dict:
"""Returns the previously created quantum program.
Params:
program_resource_name: A string of the form
`projects/project_id/programs/program_id`.
Returns:
A dictionary containing the metadata and the program.
"""
return self.service.projects().programs().get(
name=program_resource_name).execute() | Returns the previously created quantum program.
Params:
program_resource_name: A string of the form
`projects/project_id/programs/program_id`.
Returns:
A dictionary containing the metadata and the program. |
def _execute_callback_async(self, callback, data):
"""Execute the callback asynchronously.
If the callback is not a coroutine, convert it.
Note: The WebClient passed into the callback is running in "async" mode.
This means all responses will be futures.
"""
if asyncio.iscoroutine(callback):
asyncio.ensure_future(
callback(rtm_client=self, web_client=self._web_client, data=data)
)
else:
asyncio.ensure_future(
asyncio.coroutine(callback)(
rtm_client=self, web_client=self._web_client, data=data
)
) | Execute the callback asynchronously.
If the callback is not a coroutine, convert it.
Note: The WebClient passed into the callback is running in "async" mode.
This means all responses will be futures. |
def remove_image_info_cb(self, viewer, channel, image_info):
"""Almost the same as remove_image_cb().
"""
return self.remove_image_cb(viewer, channel.name,
image_info.name, image_info.path) | Almost the same as remove_image_cb(). |
def rect(self, x, y, width, height, roundness=0.0, draw=True, **kwargs):
'''
Draw a rectangle from x, y of width, height.
:param startx: top left x-coordinate
:param starty: top left y-coordinate
:param width: height Size of rectangle.
:roundness: Corner roundness defaults to 0.0 (a right-angle).
:draw: If True draws immediately.
:fill: Optionally pass a fill color.
:return: path representing the rectangle.
'''
path = self.BezierPath(**kwargs)
path.rect(x, y, width, height, roundness, self.rectmode)
if draw:
path.draw()
return path | Draw a rectangle from x, y of width, height.
:param startx: top left x-coordinate
:param starty: top left y-coordinate
:param width: height Size of rectangle.
:roundness: Corner roundness defaults to 0.0 (a right-angle).
:draw: If True draws immediately.
:fill: Optionally pass a fill color.
:return: path representing the rectangle. |
def infer(self, **options):
"""https://github.com/frictionlessdata/datapackage-py#resource
"""
descriptor = deepcopy(self.__current_descriptor)
# Blank -> Stop
if self.__source_inspection.get('blank'):
return descriptor
# Name
if not descriptor.get('name'):
descriptor['name'] = self.__source_inspection['name']
# Only for non inline/storage
if not self.inline and not self.__storage:
# Format
if not descriptor.get('format'):
descriptor['format'] = self.__source_inspection['format']
# Mediatype
if not descriptor.get('mediatype'):
descriptor['mediatype'] = 'text/%s' % descriptor['format']
# Encoding
if not descriptor.get('encoding'):
contents = b''
with self.raw_iter(stream=True) as stream:
for chunk in stream:
contents += chunk
if len(contents) > 1000: break
encoding = cchardet.detect(contents)['encoding']
if encoding is not None:
encoding = encoding.lower()
descriptor['encoding'] = 'utf-8' if encoding == 'ascii' else encoding
# Schema
if not descriptor.get('schema'):
if self.tabular:
descriptor['schema'] = self.__get_table().infer(**options)
# Profile
if descriptor.get('profile') == config.DEFAULT_RESOURCE_PROFILE:
if self.tabular:
descriptor['profile'] = 'tabular-data-resource'
# Save descriptor
self.__current_descriptor = descriptor
self.__build()
return descriptor | https://github.com/frictionlessdata/datapackage-py#resource |
def get(self):
"""Returns a requests.Session object.
Gets Session from sqlite3 cache or creates a new Session.
"""
if not HAS_SQL: # pragma: nocover
return requests.session()
try:
conn, c = self.connect()
except:
log.traceback(logging.DEBUG)
return requests.session()
session = None
try:
c.execute('BEGIN IMMEDIATE')
c.execute('SELECT value FROM {0} LIMIT 1'.format(self.table_name))
row = c.fetchone()
if row is not None:
session = pickle.loads(row[0])
except: # pragma: nocover
log.traceback(logging.DEBUG)
try:
conn.close()
except: # pragma: nocover
log.traceback(logging.DEBUG)
return session if session is not None else requests.session() | Returns a requests.Session object.
Gets Session from sqlite3 cache or creates a new Session. |
def update(self, instance, condition):
"""Update the instance to the database
:param instance: an instance of modeled data object
:param condition: condition evaluated to determine record(s) to update
:returns: record id updated or None
:rtype: int
"""
item = self.dbi.get(condition)
if item is None:
return None
item.update(instance.as_dict())
self.dbi.update(item, condition)
return item.eid | Update the instance to the database
:param instance: an instance of modeled data object
:param condition: condition evaluated to determine record(s) to update
:returns: record id updated or None
:rtype: int |
def get_instance(self, payload):
"""
Build an instance of TranscriptionInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.recording.transcription.TranscriptionInstance
:rtype: twilio.rest.api.v2010.account.recording.transcription.TranscriptionInstance
"""
return TranscriptionInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
recording_sid=self._solution['recording_sid'],
) | Build an instance of TranscriptionInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.recording.transcription.TranscriptionInstance
:rtype: twilio.rest.api.v2010.account.recording.transcription.TranscriptionInstance |
def content(self):
"""
:return: string (unicode) with Dockerfile content
"""
if self.cache_content and self.cached_content:
return self.cached_content
try:
with self._open_dockerfile('rb') as dockerfile:
content = b2u(dockerfile.read())
if self.cache_content:
self.cached_content = content
return content
except (IOError, OSError) as ex:
logger.error("Couldn't retrieve content of dockerfile: %r", ex)
raise | :return: string (unicode) with Dockerfile content |
def decode_offset_commit_response(cls, data):
"""
Decode bytes to an OffsetCommitResponse
Arguments:
data: bytes to decode
"""
((correlation_id,), cur) = relative_unpack('>i', data, 0)
((num_topics,), cur) = relative_unpack('>i', data, cur)
for _ in xrange(num_topics):
(topic, cur) = read_short_string(data, cur)
((num_partitions,), cur) = relative_unpack('>i', data, cur)
for _ in xrange(num_partitions):
((partition, error), cur) = relative_unpack('>ih', data, cur)
yield OffsetCommitResponse(topic, partition, error) | Decode bytes to an OffsetCommitResponse
Arguments:
data: bytes to decode |
def create_from_fitsfile(cls, fitsfile):
""" Read a fits file and use it to make a mapping
"""
from fermipy.skymap import Map
index_map = Map.create_from_fits(fitsfile)
mult_map = Map.create_from_fits(fitsfile, hdu=1)
ff = fits.open(fitsfile)
hpx = HPX.create_from_hdu(ff[0])
mapping_data = dict(ipixs=index_map.counts,
mult_val=mult_map.counts,
npix=mult_map.counts.shape)
return cls(hpx, index_map.wcs, mapping_data) | Read a fits file and use it to make a mapping |
def add_raw_code(self, string_or_list):
"""Add raw Gmsh code.
"""
if _is_string(string_or_list):
self._GMSH_CODE.append(string_or_list)
else:
assert isinstance(string_or_list, list)
for string in string_or_list:
self._GMSH_CODE.append(string)
return | Add raw Gmsh code. |
def dump_dict_to_file(dictionary, filepath):
"""Dump @dictionary as a line to @filepath."""
create_dirs(
os.path.dirname(filepath)
)
with open(filepath, 'a') as outfile:
json.dump(dictionary, outfile)
outfile.write('\n') | Dump @dictionary as a line to @filepath. |
def get_globals(self):
"""Get enriched globals"""
if self.shell:
globals_ = dict(_initial_globals)
else:
globals_ = dict(self.current_frame.f_globals)
globals_['_'] = self.db.last_obj
if cut is not None:
globals_.setdefault('cut', cut)
# For meta debuging purpose
globals_['___wdb'] = self.db
# Hack for function scope eval
globals_.update(self.current_locals)
for var, val in self.db.extra_vars.items():
globals_[var] = val
self.db.extra_items = {}
return globals_ | Get enriched globals |
def truncate(self, before=None, after=None):
"""
Slice index between two labels / tuples, return new MultiIndex
Parameters
----------
before : label or tuple, can be partial. Default None
None defaults to start
after : label or tuple, can be partial. Default None
None defaults to end
Returns
-------
truncated : MultiIndex
"""
if after and before and after < before:
raise ValueError('after < before')
i, j = self.levels[0].slice_locs(before, after)
left, right = self.slice_locs(before, after)
new_levels = list(self.levels)
new_levels[0] = new_levels[0][i:j]
new_codes = [level_codes[left:right] for level_codes in self.codes]
new_codes[0] = new_codes[0] - i
return MultiIndex(levels=new_levels, codes=new_codes,
verify_integrity=False) | Slice index between two labels / tuples, return new MultiIndex
Parameters
----------
before : label or tuple, can be partial. Default None
None defaults to start
after : label or tuple, can be partial. Default None
None defaults to end
Returns
-------
truncated : MultiIndex |
def launch_frozen(in_name, out_name, script_path, frozen_tar_path=None,
temp_path='_hadoopy_temp', cache=True, check_script=False,
**kw):
"""Freezes a script and then launches it.
This function will freeze your python program, and place it on HDFS
in 'temp_path'. It will not remove it afterwards as they are typically
small, you can easily reuse/debug them, and to avoid any risks involved
with removing the file.
:param in_name: Input path (string or list)
:param out_name: Output path
:param script_path: Path to the script (e.g., script.py)
:param frozen_tar_path: If not None, use this path to a previously frozen archive. You can get such a path from the return value of this function, it is particularly helpful in iterative programs.
:param cache: If True (default) then use previously frozen scripts. Cache is stored in memory (not persistent).
:param temp_path: HDFS path that we can use to store temporary files (default to _hadoopy_temp)
:param partitioner: If True, the partitioner is the value.
:param wait: If True, wait till the process is completed (default True) this is useful if you want to run multiple jobs concurrently by using the 'process' entry in the output.
:param files: Extra files (other than the script) (iterator). NOTE: Hadoop copies the files into working directory
:param jobconfs: Extra jobconf parameters (iterator)
:param cmdenvs: Extra cmdenv parameters (iterator)
:param hstreaming: The full hadoop streaming path to call.
:param name: Set the job name to this (default None, job name is the script name)
:param use_typedbytes: If True (default), use typedbytes IO.
:param use_seqoutput: True (default), output sequence file. If False, output is text.
:param use_autoinput: If True (default), sets the input format to auto.
:param config: If a string, set the hadoop config path
:param pipe: If true (default) then call user code through a pipe to isolate it and stop bugs when printing to stdout. See project docs.
:param python_cmd: The python command to use. The default is "python". Can be used to override the system default python, e.g. python_cmd = "python2.6"
:param num_mappers: The number of mappers to use, i.e. the argument given to 'numMapTasks'. If None, then do not specify this argument to hadoop streaming.
:param num_reducers: The number of reducers to use, i.e. the argument given to 'numReduceTasks'. If None, then do not specify this argument to hadoop streaming.
:param check_script: If True, then copy script and .py(c) files to a temporary directory and verify that it can be executed. This catches the majority of errors related to not included locally imported files. The default is False when using launch_frozen as the freeze process packages local files.
:rtype: Dictionary with some of the following entries (depending on options)
:returns: freeze_cmds: Freeze command(s) ran
:returns: frozen_tar_path: HDFS path to frozen file
:returns: hadoop_cmds: Hadoopy command(s) ran
:returns: process: subprocess.Popen object
:returns: output: Iterator of (key, value) pairs
:raises: subprocess.CalledProcessError: Hadoop error.
:raises: OSError: Hadoop streaming not found.
:raises: TypeError: Input types are not correct.
:raises: ValueError: Script not found
"""
if (('files' in kw and isinstance(kw['files'], (str, unicode))) or
('jobconfs' in kw and isinstance(kw['jobconfs'], (str, unicode))) or
('cmdenvs' in kw and isinstance(kw['cmdenvs'], (str, unicode)))):
raise TypeError('files, jobconfs, and cmdenvs must be iterators of strings and not strings!')
if 'jobconfs' in kw:
kw['jobconfs'] = _listeq_to_dict(kw['jobconfs'])
if 'cmdenvs' in kw:
kw['cmdenvs'] = _listeq_to_dict(kw['cmdenvs'])
cmds = []
if not frozen_tar_path:
freeze_out = hadoopy.freeze_script(script_path, temp_path=temp_path, cache=cache)
frozen_tar_path = freeze_out['frozen_tar_path']
cmds += freeze_out['cmds']
jobconfs = kw.get('jobconfs', {})
jobconfs['mapred.cache.archives'] = '%s#_frozen' % frozen_tar_path
jobconfs['mapreduce.job.cache.archives'] = '%s#_frozen' % frozen_tar_path
kw['copy_script'] = False
kw['add_python'] = False
kw['jobconfs'] = jobconfs
out = launch(in_name, out_name, script_path,
script_dir='_frozen', remove_ext=True, check_script=check_script,
make_executable=False, **kw)
out['freeze_cmds'] = cmds
out['frozen_tar_path'] = frozen_tar_path
return out | Freezes a script and then launches it.
This function will freeze your python program, and place it on HDFS
in 'temp_path'. It will not remove it afterwards as they are typically
small, you can easily reuse/debug them, and to avoid any risks involved
with removing the file.
:param in_name: Input path (string or list)
:param out_name: Output path
:param script_path: Path to the script (e.g., script.py)
:param frozen_tar_path: If not None, use this path to a previously frozen archive. You can get such a path from the return value of this function, it is particularly helpful in iterative programs.
:param cache: If True (default) then use previously frozen scripts. Cache is stored in memory (not persistent).
:param temp_path: HDFS path that we can use to store temporary files (default to _hadoopy_temp)
:param partitioner: If True, the partitioner is the value.
:param wait: If True, wait till the process is completed (default True) this is useful if you want to run multiple jobs concurrently by using the 'process' entry in the output.
:param files: Extra files (other than the script) (iterator). NOTE: Hadoop copies the files into working directory
:param jobconfs: Extra jobconf parameters (iterator)
:param cmdenvs: Extra cmdenv parameters (iterator)
:param hstreaming: The full hadoop streaming path to call.
:param name: Set the job name to this (default None, job name is the script name)
:param use_typedbytes: If True (default), use typedbytes IO.
:param use_seqoutput: True (default), output sequence file. If False, output is text.
:param use_autoinput: If True (default), sets the input format to auto.
:param config: If a string, set the hadoop config path
:param pipe: If true (default) then call user code through a pipe to isolate it and stop bugs when printing to stdout. See project docs.
:param python_cmd: The python command to use. The default is "python". Can be used to override the system default python, e.g. python_cmd = "python2.6"
:param num_mappers: The number of mappers to use, i.e. the argument given to 'numMapTasks'. If None, then do not specify this argument to hadoop streaming.
:param num_reducers: The number of reducers to use, i.e. the argument given to 'numReduceTasks'. If None, then do not specify this argument to hadoop streaming.
:param check_script: If True, then copy script and .py(c) files to a temporary directory and verify that it can be executed. This catches the majority of errors related to not included locally imported files. The default is False when using launch_frozen as the freeze process packages local files.
:rtype: Dictionary with some of the following entries (depending on options)
:returns: freeze_cmds: Freeze command(s) ran
:returns: frozen_tar_path: HDFS path to frozen file
:returns: hadoop_cmds: Hadoopy command(s) ran
:returns: process: subprocess.Popen object
:returns: output: Iterator of (key, value) pairs
:raises: subprocess.CalledProcessError: Hadoop error.
:raises: OSError: Hadoop streaming not found.
:raises: TypeError: Input types are not correct.
:raises: ValueError: Script not found |