blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
261
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
45
| license_type
stringclasses 2
values | repo_name
stringlengths 8
111
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 72
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 530k
616M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
24.6k
| gha_license_id
stringclasses 9
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 40
values | src_encoding
stringclasses 10
values | language
stringclasses 1
value | is_vendor
bool 1
class | is_generated
bool 2
classes | length_bytes
int64 11
4.05M
| extension
stringclasses 25
values | content
stringlengths 10
4.04M
| authors
sequencelengths 1
1
| author_id
stringclasses 578
values |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fb2c64c0218df858e821204c4c485f29f4b33c74 | e0527bce5c53a196752d3a16adf50cb60754de5f | /10-How to Stop Programs Crashing Demos/3-is_square.py | 8bf01fcece7fa35279f95d25ece62fa140398965 | [] | no_license | ARWA-ALraddadi/python-tutorial-for-beginners | ddeb657f419fbc176bea273bc9fb6b88d1894191 | 21cedfc47871ca4d25c2382464c60ab0a2121205 | refs/heads/master | 2023-06-30T20:24:30.688800 | 2021-08-08T08:22:29 | 2021-08-08T08:22:29 | 193,094,651 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,066 | py | ################################################################
##
## As a demonstration of a function which applies defensive
## programming in different ways, consider a predicate
## which is intended to return True if a given natural
## number (i.e., a non-negative integer) is a square of
## another natural number.
##
## From this description the function could be "misused" in
## three ways:
##
## 1) It could be given a negative number.
## 2) It could be given a floating point number.
## 3) It could be given a value which is not a number at
## all.
##
## By adding some "defensive" code we can make a naive
## implementation more robust by responding appropriately
## to each of these cases:
##
## 1) A negative number can never be a square of another
## number, so we can always return False in this case.
## Here we choose to do so "silently", not drawing
## attention to the unexpected value at all, since the
## answer returned is still "correct" mathematically.
## 2) A positive floating point number could be a square of
## a natural number so, even though we're not required
## to handle floating point numbers we can still do so,
## but choose to generate a "warning" message in this
## case.
## 3) If the function is given a non-numerical value it
## is reasonable to assume that something is seriously
## wrong with the calling code, so in this case we
## generate an "error" message and return the special
## value None.
#---------------------------------------------------------
# Return True if the given natural number is the square of
# some other natural number
def is_square(natural_number):
from math import sqrt
# Three "defensive" checks follow
## # Check that the parameter is a number
## if not (isinstance(natural_number, int) or isinstance(natural_number, float)):
## print('ERROR - parameter must be numeric, given:', repr(natural_number))
## return None
##
## # Check that the parameter is positive
## if natural_number < 0:
## return False
##
## # Check that the parameter is a natural number
## if isinstance(natural_number, float):
## print('Warning - expected natural, given float:', natural_number)
# Return True if the number's square root is a whole number
return sqrt(natural_number) % 1 == 0
#---------------------------------------------------------
# Some tests
#
# The first of these tests is a "valid" one, but the remaining
# three all provide unexpected inputs. Uncommenting the
# "defensive" checks above will cause the function to respond
# appropriately. (It will crash until the defensive code is
# uncommented. Why?)
print(is_square(36)) # expected input
print()
print(is_square(-1)) # unexpected input, but handled silently
print()
print(is_square(225.0)) # unexpected input, handled with warning
print()
print(is_square('August')) # unexpected input, handled as an error
| [
"[email protected]"
] | |
d44bbb217114c0831167824d694d57c29ab86665 | e3f3f911019ac126d01c056eafc7c3183107a5af | /Traffic Sign Detection/all_signs_combined/src/predict.py | 19ed9a428015b625610be9930dfee35938fb451b | [] | no_license | uncctrafficsigndetection/Traffic-Sign-Detection | 595258766f865c4b3c628b002d7b93a774168a9b | 3ff4be52357f4b6340fef94124f8c835ab66fd8a | refs/heads/master | 2020-04-09T20:28:33.910961 | 2018-12-05T21:29:50 | 2018-12-05T21:29:50 | 160,574,509 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 959 | py | import numpy as np
import time
from sample_model import Model
from data_loader import data_loader
from generator import Generator
checkpoint_dir='tf_data/sample_model'
X='C:/Users/Karthick/Desktop/cvproject/data/5/00000_00000.ppmspeed_2_.ppm'
M = Model(mode = 'test')
yhat = M.predict(X = X, checkpoint_dir = checkpoint_dir)
# save_dir="C:/Users/Karthick/Desktop/cvproject/speedlimitckp/"
# #saver = tf.train.Saver()
# sess = tf.Session()
# saver = tf.train.import_meta_graph('C:/Users/Karthick/Desktop/cvproject/src/tf_data/sample_model/model_epoch70.ckpt.meta')
# saver.restore(sess,tf.train.latest_checkpoint('C:/Users/Karthick/Desktop/cvproject/src/tf_data/sample_model/'))
# #checkpoint_name = tf.train.latest_checkpoint(save_dir)
# #saver.restore(sess, checkpoint_name)
# yhat_numpy = sess.run(yhat, feed_dict = {X : X, keep_prob: 1.0})
# print(yhat_numpy)
# #C:/Users/Karthick/Desktop/cvproject/src/tf_data/sample_model | [
"[email protected]"
] | |
948e7570c22e3a814efc70ef208bb5769a7b3ba1 | f2568af5aacdb3045f8dd20ec2fd91e395ba57d1 | /createmylvm.py | a2bcdb8ebcc803398d9d04adf65faf10eb88ceef | [] | no_license | shubhambhardwaj007/lvm-automation | e93d9efe61e9951710dc5ee6579ef01d139304e3 | e446f794fc05d1f3dac8e152d428cfc9657b817e | refs/heads/master | 2023-03-26T02:07:51.421192 | 2021-03-27T19:51:46 | 2021-03-27T19:51:46 | 352,161,993 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,070 | py |
import subprocess
def createmylv():
print(subprocess.getoutput('lsblk'))
device = input("Choose the devices for PV separated by space in between : ").split(" ")
for i in device:
pvcreate = subprocess.getstatusoutput("pvcreate {0}".format(i))
if pvcreate[0] == 0:
print("{0} pv created".format(i))
else:
print("{0} pv failed".format(i))
vgname = input("Enter VG name: ")
x= ' '.join(device)
vgcreate = subprocess.getstatusoutput("vgcreate {0} {1}".format(vgname,x))
lvname = input("Enter LV name: ")
size = input("Enter Size of LV: ")
lvcreate = subprocess.getstatusoutput("lvcreate --size {0} --name {1} {2}".format(size,lvname,vgname))
mount = input("Enter the mountpoint: ")
formating = subprocess.getstatusoutput("mkfs.ext4 /dev/{0}/{1}".format(vgname,lvname))
mount_path = subprocess.getstatusoutput("mount /dev/{0}/{1} {2}".format(vgname,lvname,mount))
if mount_path[0] == 0:
print("Done")
else:
print("Can't mount")
createlv()
| [
"[email protected]"
] | |
c7e2d80388cbe425136e01a06bdb2ea24fa604c6 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/sdssj9-10_163557.64+341427.0/sdB_sdssj9-10_163557.64+341427.0_coadd.py | 39e21f206956741881cd664d37e0bb5ecdba667f | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 498 | py | from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[248.990167,34.240833], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_sdssj9-10_163557.64+341427.0/sdB_sdssj9-10_163557.64+341427.0_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_sdssj9-10_163557.64+341427.0/sdB_sdssj9-10_163557.64+341427.0_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
f82a7850addf3773f1ce92a89e4d51f96cf3f763 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_2_neat/16_0_2_tkdkop_pancake.py | 259ec04a68548d92ceed7f438162fc6b46baa760 | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 286 | py | #!/usr/bin/env python
import sys
import itertools
m = sys.stdin.readline()
i = 0
for line in sys.stdin.readlines():
line = line.strip()
i += 1
out_str = "Case #%d: " % i
line += '+'
k = itertools.groupby(line)
out_str += str(len(list(k))-1)
print out_str
| [
"[[email protected]]"
] | |
4723c6f7c093e3989d133cebab10e0c13bf512a0 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03997/s926418877.py | acd277945016fcae9d48adcc8806653b1aeeec5f | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 58 | py | a,b,c,d=eval('['+'int(input()),'*3+'0]');print((a+b)*c//2) | [
"[email protected]"
] | |
9567422e1472a65046cf8160b1bdae8fbcf7dcd3 | 080c13cd91a073457bd9eddc2a3d13fc2e0e56ae | /MY_REPOS/awesome-4-new-developers/tensorflow-master/tensorflow/python/types/internal.py | c56c7aa6d7790b4c36d248603f2282e60af08a39 | [
"Apache-2.0"
] | permissive | Portfolio-Projects42/UsefulResourceRepo2.0 | 1dccc8961a09347f124d3ed7c27c6d73b9806189 | 75b1e23c757845b5f1894ebe53551a1cf759c6a3 | refs/heads/master | 2023-08-04T12:23:48.862451 | 2021-09-15T12:51:35 | 2021-09-15T12:51:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,129 | py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Types internal to TensorFlow.
These types should not be exported. External code should not rely on these.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# TODO(mdan): Is this strictly needed? Only ops.py really uses it.
class NativeObject(object):
"""Types natively supported by various TF operations.
The most notable example of NativeObject is Tensor.
"""
| [
"[email protected]"
] | |
91781778b2e281bd6402914cfd6ee441e7a46194 | fe17c327916695ca3f21c0f9bb85396237be3125 | /DSA_in_python/DSA_BST.py | a8068494dc2cb5cb28703d631d7f490f052d2915 | [] | no_license | tanmay6414/Python | d222255f3b4e60b42c7bed7613f11ef449ebc00e | 54659aebe0ed15f722cd469d10a42cea82f6c7f6 | refs/heads/master | 2021-07-12T18:26:59.590813 | 2020-08-20T08:15:11 | 2020-08-20T08:15:11 | 195,175,648 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,732 | py | class Node:
def __init__(self,value):
self.left = None
self.right = None
self.value = value
class BST:
def __init__(self):
self.root = None
def insert( self, node, value):
# If the tree is empty, return a new node
if node is None:
return Node(value)
# Otherwise recur down the tree
if value < node.value:
node.left = self.insert(node.left, value)
else:
node.right = self.insert(node.right, value)
# return the (unchanged) node pointer
return node
def inorder(self,root):
if root:
self.inorder(root.left)
print(root.value)
self.inorder(root.right)
def preorder(self,root):
if root:
print(root.value)
self.preorder(root.left)
self.preorder(root.right)
def postorder(self,root):
if root:
self.postorder(root.left)
self.preorder(root.right)
print(root.value)
def minval_node(self,node):
current = node
while(current.left is not None):
current = current.left
return current
def deleteNode(self,root,value):
if root is None:
return root
if value<root.value:
root.left = self.deleteNode(root.left,value)
elif(value > root.value):
root.right = self.deleteNode(root.right,value)
else:
if root.left is None:
temp = root.right
root = None
return temp
elif root.right is None:
temp = root.right
root = None
return temp
temp = self.minval_node(root.right)
root.value = temp.value
root.right = self.deleteNode(root.right, temp.value)
print(value," deleted")
return root
def search(self,value):
if self.root!=None:
return self._search(value,self.root)
else:
return False
def _search(self,value,node):
if value==node.value:
return True
elif value<node.value and node.left != None:
self._search(value, node.left)
elif value>node.value and node.right != None:
self._search(value, node.right)
return False
print("*"*25, "Delete Node BST", "*"*25)
root = Node(50)
s = BST()
s.insert(root,40)
s.insert(root,30)
s.insert(root,4)
s.insert(root,78)
print("\nInorder :")
s.inorder(root)
print("\nPostorder :")
s.postorder(root)
print("\nPreorder :")
s.preorder(root)
print("\n\tSearch Result :",s.search(50))
print("\n")
s.deleteNode(root,30)
print("\n")
s.preorder(root)
| [
"[email protected]"
] | |
808ac7632e66327e3f8d1fe634dab41d619f065e | 786de89be635eb21295070a6a3452f3a7fe6712c | /CorAna/tags/V00-00-04/src/ConfigParametersCorAna.py | 8baf5f326ca6758d621cc3f9f8cf43ac75c28720 | [] | no_license | connectthefuture/psdmrepo | 85267cfe8d54564f99e17035efe931077c8f7a37 | f32870a987a7493e7bf0f0a5c1712a5a030ef199 | refs/heads/master | 2021-01-13T03:26:35.494026 | 2015-09-03T22:22:11 | 2015-09-03T22:22:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,606 | py | #--------------------------------------------------------------------------
# File and Version Information:
# $Id$
#
# Description:
# Module ConfigParametersCorAna...
#
#------------------------------------------------------------------------
"""Is intended as a storage for configuration parameters for CorAna project.
This software was developed for the LCLS project. If you use all or
part of it, please give an appropriate acknowledgment.
@version $Id: template!python!py 4 2008-10-08 19:27:36Z salnikov $
@author Mikhail S. Dubrovin
"""
#------------------------------
# Module's version from CVS --
#------------------------------
__version__ = "$Revision: 4 $"
# $Source$
#--------------------------------
# Imports of standard modules --
#--------------------------------
import sys
import os
from copy import deepcopy
#-----------------------------
# Imports for other modules --
#-----------------------------
#import ConfigParameters as cpbase
from ConfigParameters import * # ConfigParameters
from Logger import logger
from PyQt4 import QtGui # for icons only...
import AppDataPath as apputils # for icons
#---------------------
# Class definition --
#---------------------
class ConfigParametersCorAna ( ConfigParameters ) :
"""Is intended as a storage for configuration parameters for CorAna project.
#@see BaseClass ConfigParameters
#@see OtherClass Parameters
"""
list_pars = []
def __init__ ( self, fname=None ) :
"""Constructor.
@param fname the file name with configuration parameters, if not specified then it will be set to the default value at declaration.
"""
ConfigParameters.__init__(self)
self.declareCorAnaParameters()
self.readParametersFromFile ( fname )
self.initRunTimeParameters()
self.defineStyles()
def initRunTimeParameters( self ) :
self.char_expand = u' \u25BE' # down-head triangle
self.iconsAreLoaded = False
self.plotarray_is_on = False
self.plotg2_is_on = False
self.autoRunStatus = 0 # 0=inctive, 1=split, 2=process, 3=merge
#self.plotimgspe = None
self.plotimgspe_g = None
#-----------------------------
def setIcons(self) :
if self.iconsAreLoaded : return
self.iconsAreLoaded = True
path_icon_contents = apputils.AppDataPath('CorAna/icons/contents.png').path()
path_icon_mail_forward = apputils.AppDataPath('CorAna/icons/mail-forward.png').path()
path_icon_button_ok = apputils.AppDataPath('CorAna/icons/button_ok.png').path()
path_icon_button_cancel = apputils.AppDataPath('CorAna/icons/button_cancel.png').path()
path_icon_exit = apputils.AppDataPath('CorAna/icons/exit.png').path()
path_icon_home = apputils.AppDataPath('CorAna/icons/home.png').path()
path_icon_redo = apputils.AppDataPath('CorAna/icons/redo.png').path()
path_icon_undo = apputils.AppDataPath('CorAna/icons/undo.png').path()
path_icon_reload = apputils.AppDataPath('CorAna/icons/reload.png').path()
path_icon_save = apputils.AppDataPath('CorAna/icons/save.png').path()
path_icon_save_cfg = apputils.AppDataPath('CorAna/icons/fileexport.png').path()
path_icon_edit = apputils.AppDataPath('CorAna/icons/edit.png').path()
path_icon_browser = apputils.AppDataPath('CorAna/icons/fileopen.png').path()
path_icon_monitor = apputils.AppDataPath('CorAna/icons/icon-monitor.png').path()
path_icon_unknown = apputils.AppDataPath('CorAna/icons/icon-unknown.png').path()
path_icon_logviewer = apputils.AppDataPath('CorAna/icons/logviewer.png').path()
path_icon_locked = apputils.AppDataPath('CorAna/icons/locked-icon.png').path()
path_icon_unlocked = apputils.AppDataPath('CorAna/icons/unlocked-icon.png').path()
self.icon_contents = QtGui.QIcon(path_icon_contents )
self.icon_mail_forward = QtGui.QIcon(path_icon_mail_forward)
self.icon_button_ok = QtGui.QIcon(path_icon_button_ok)
self.icon_button_cancel = QtGui.QIcon(path_icon_button_cancel)
self.icon_exit = QtGui.QIcon(path_icon_exit )
self.icon_home = QtGui.QIcon(path_icon_home )
self.icon_redo = QtGui.QIcon(path_icon_redo )
self.icon_undo = QtGui.QIcon(path_icon_undo )
self.icon_reload = QtGui.QIcon(path_icon_reload )
self.icon_save = QtGui.QIcon(path_icon_save )
self.icon_save_cfg = QtGui.QIcon(path_icon_save_cfg )
self.icon_edit = QtGui.QIcon(path_icon_edit )
self.icon_browser = QtGui.QIcon(path_icon_browser )
self.icon_monitor = QtGui.QIcon(path_icon_monitor )
self.icon_unknown = QtGui.QIcon(path_icon_unknown )
self.icon_logviewer = QtGui.QIcon(path_icon_logviewer)
self.icon_lock = QtGui.QIcon(path_icon_locked )
self.icon_unlock = QtGui.QIcon(path_icon_unlocked )
#base_dir = '/usr/share/icons/Bluecurve/24x24/'
#self.icon_contents = QtGui.QIcon(base_dir + 'actions/contents.png')
#self.icon_mail_forward = QtGui.QIcon(base_dir + '../../gnome/24x24/actions/mail-forward.png')
#self.icon_button_ok = QtGui.QIcon(base_dir + 'actions/button_ok.png')
#self.icon_button_cancel = QtGui.QIcon(base_dir + 'actions/button_cancel.png')
#self.icon_exit = QtGui.QIcon(base_dir + 'actions/exit.png')
#self.icon_home = QtGui.QIcon(base_dir + 'actions/gohome.png')
#self.icon_redo = QtGui.QIcon(base_dir + 'actions/redo.png')
#self.icon_undo = QtGui.QIcon(base_dir + 'actions/undo.png')
#self.icon_reload = QtGui.QIcon(base_dir + 'actions/reload.png')
#self.icon_stop = QtGui.QIcon(base_dir + 'actions/stop.png')
#self.icon_save_cfg = QtGui.QIcon(base_dir + 'actions/fileexport.png')
#self.icon_save = QtGui.QIcon(base_dir + 'stock/stock-save.png')
#self.icon_edit = QtGui.QIcon(base_dir + 'actions/edit.png')
#self.icon_browser = QtGui.QIcon(base_dir + 'actions/fileopen.png')
#self.icon_monitor = QtGui.QIcon(base_dir + 'apps/icon-monitor.png')
#self.icon_unknown = QtGui.QIcon(base_dir + 'apps/icon-unknown.png')
#self.icon_logviewer = QtGui.QIcon(base_dir + '../32x32/apps/logviewer.png')
self.icon_logger = self.icon_edit
self.icon_help = self.icon_unknown
self.icon_reset = self.icon_reload
#-----------------------------
def declareCorAnaParameters( self ) :
# Possible typs for declaration : 'str', 'int', 'long', 'float', 'bool'
# GUIInstrExpRun.py.py
# self.fname_cp = self.declareParameter( name='FNAME_CONFIG_PARS', val_def='confpars.txt', type='str' )
# self.fname_ped = self.declareParameter( name='FNAME_PEDESTALS', val_def='my_ped.txt', type='str' )
# self.fname_dat = self.declareParameter( name='FNAME_DATA', val_def='my_dat.txt', type='str' )
# self.instr_dir = self.declareParameter( name='INSTRUMENT_DIR', val_def='/reg/d/psdm', type='str' )
# self.instr_name = self.declareParameter( name='INSTRUMENT_NAME', val_def='XCS', type='str' )
# self.exp_name = self.declareParameter( name='EXPERIMENT_NAME', val_def='xcsi0112', type='str' )
# self.str_run_number = self.declareParameter( name='RUN_NUMBER', val_def='0015', type='str' )
# self.str_run_number_dark= self.declareParameter( name='RUN_NUMBER_DARK', val_def='0014', type='str' )
# GUIMainTB.py
# GUIMainSplit.py
self.current_tab = self.declareParameter( name='CURRENT_TAB' , val_def='Files', type='str' )
# GUILogger.py
self.log_level = self.declareParameter( name='LOG_LEVEL_OF_MSGS', val_def='info', type='str' )
# GUIFiles.py
self.current_file_tab = self.declareParameter( name='CURRENT_FILE_TAB' , val_def='Work/Results', type='str' )
# GUIRun.py
self.current_run_tab = self.declareParameter( name='CURRENT_RUN_TAB' , val_def='Input', type='str' )
# GUIWorkResDirs.py
self.dir_work = self.declareParameter( name='DIRECTORY_WORK', val_def='./work', type='str' )
self.dir_results = self.declareParameter( name='DIRECTORY_RESULTS', val_def='./results', type='str' )
self.fname_prefix = self.declareParameter( name='FILE_NAME_PREFIX', val_def='cora-', type='str' )
self.fname_prefix_cora = self.declareParameter( name='FILE_NAME_PREFIX_CORA', val_def='cora-proc', type='str' )
# GUIDark.py
self.use_dark_xtc_all = self.declareParameter( name='USE_DARK_XTC_ALL_CHUNKS', val_def=True, type='bool' )
self.in_dir_dark = self.declareParameter( name='IN_DIRECTORY_DARK', val_def='/reg/d/psdm/XCS/xcsi0112/xtc',type='str' )
self.in_file_dark = self.declareParameter( name='IN_FILE_NAME_DARK', val_def='e167-r0020-s00-c00.xtc',type='str' )
# GUIFlatField.py
self.ccdcorr_flatfield = self.declareParameter( name='CCD_CORRECTION_FLATFIELD', val_def=False, type='bool' )
self.dname_flat = self.declareParameter( name='DIRECTORY_FLAT', val_def='.',type='str' )
self.fname_flat = self.declareParameter( name='FILE_NAME_FLAT', val_def='flat_field.txt',type='str' )
#self.in_dir_flat = self.declareParameter( name='IN_DIRECTORY_FLAT', val_def='/reg/d/psdm/XCS/xcsi0112/xtc',type='str' )
#self.in_file_flat = self.declareParameter( name='IN_FILE_NAME_FLAT', val_def='e167-r0020-s00-c00.xtc',type='str' )
# GUIBlemish.py
self.ccdcorr_blemish = self.declareParameter( name='CCD_CORRECTION_BLEMISH', val_def=False, type='bool' )
self.dname_blem = self.declareParameter( name='DIRECTORY_BLEM', val_def='.',type='str' )
self.fname_blem = self.declareParameter( name='FILE_NAME_BLEM', val_def='blemish.txt',type='str' )
#self.in_dir_blem = self.declareParameter( name='IN_DIRECTORY_BLEM', val_def='/reg/d/psdm/XCS/xcsi0112/xtc',type='str' )
#self.in_file_blem = self.declareParameter( name='IN_FILE_NAME_BLEM', val_def='e167-r0020-s00-c00.xtc',type='str' )
# GUIData.py
self.use_data_xtc_all = self.declareParameter( name='USE_DATA_XTC_ALL_CHUNKS', val_def=True, type='bool' )
self.is_active_data_gui = self.declareParameter( name='IS_ACTIVE_DATA_GUI', val_def=True, type='bool' )
self.in_dir_data = self.declareParameter( name='IN_DIRECTORY_DATA', val_def='/reg/d/psdm/XCS/xcsi0112/xtc',type='str' )
self.in_file_data = self.declareParameter( name='IN_FILE_NAME_DATA', val_def='e167-r0020-s00-c00.xtc',type='str' )
# GUISetupBeamZero.py
self.x_coord_beam0 = self.declareParameter( name='X_COORDINATE_BEAM_ZERO', val_def=1234.5, type='float' )
self.y_coord_beam0 = self.declareParameter( name='Y_COORDINATE_BEAM_ZERO', val_def=1216.5, type='float' )
self.x0_pos_in_beam0 = self.declareParameter( name='X_CCD_POS_IN_BEAM_ZERO', val_def=-59, type='float' )
self.y0_pos_in_beam0 = self.declareParameter( name='Y_CCD_POS_IN_BEAM_ZERO', val_def=175, type='float' )
# GUISetupSpecular.py
self.x_coord_specular = self.declareParameter( name='X_COORDINATE_SPECULAR', val_def=-1, type='float' )
self.y_coord_specular = self.declareParameter( name='Y_COORDINATE_SPECULAR', val_def=-2, type='float' )
self.x0_pos_in_specular = self.declareParameter( name='X_CCD_POS_IN_SPECULAR', val_def=-3, type='float' )
self.y0_pos_in_specular = self.declareParameter( name='Y_CCD_POS_IN_SPECULAR', val_def=-4, type='float' )
# GUISetupData.py
self.x0_pos_in_data = self.declareParameter( name='X_CCD_POS_IN_DATA', val_def=-51, type='float' )
self.y0_pos_in_data = self.declareParameter( name='Y_CCD_POS_IN_DATA', val_def=183, type='float' )
# GUISetupInfoLeft.py
self.sample_det_dist = self.declareParameter( name='SAMPLE_TO_DETECTOR_DISTANCE', val_def=4000.1, type='float' )
self.exp_setup_geom = self.declareParameter( name='EXP_SETUP_GEOMETRY', val_def='Baem Zero', type='str' )
self.photon_energy = self.declareParameter( name='PHOTON_ENERGY', val_def=7.6543, type='float' )
self.nominal_angle = self.declareParameter( name='NOMINAL_ANGLE', val_def=-1, type='float' )
self.real_angle = self.declareParameter( name='REAL_ANGLE', val_def=-1, type='float' )
# GUIImgSizePosition.py
self.col_begin = self.declareParameter( name='IMG_COL_BEGIN', val_def=0, type='int' )
self.col_end = self.declareParameter( name='IMG_COL_END', val_def=1339, type='int' )
self.row_begin = self.declareParameter( name='IMG_ROW_BEGIN', val_def=1, type='int' )
self.row_end = self.declareParameter( name='IMG_ROW_END', val_def=1299, type='int' )
# GUIKineticMode.py
self.kin_mode = self.declareParameter( name='KINETICS_MODE', val_def='Non-Kinetics',type='str' )
self.kin_win_size = self.declareParameter( name='KINETICS_WIN_SIZE', val_def=1, type='int' )
self.kin_top_row = self.declareParameter( name='KINETICS_TOP_ROW', val_def=2, type='int' )
self.kin_slice_first = self.declareParameter( name='KINETICS_SLICE_FIRST', val_def=3, type='int' )
self.kin_slice_last = self.declareParameter( name='KINETICS_SLICE_LAST', val_def=4, type='int' )
# GUISetupPars.py
self.bat_num = self.declareParameter( name='BATCH_NUM', val_def= 1, type='int' )
self.bat_num_max = self.declareParameter( name='BATCH_NUM_MAX', val_def= 9, type='int' )
#self.bat_data_is_used = self.declareParameter( name='BATCH_DATA_IS_USED', val_def=True, type='bool' )
self.bat_data_start = self.declareParameter( name='BATCH_DATA_START', val_def= 1, type='int' )
self.bat_data_end = self.declareParameter( name='BATCH_DATA_END' , val_def=-1, type='int' )
self.bat_data_total = self.declareParameter( name='BATCH_DATA_TOTAL', val_def=-1, type='int' )
self.bat_data_time = self.declareParameter( name='BATCH_DATA_TIME' , val_def=-1.0, type='float' )
self.bat_data_dt_ave = self.declareParameter( name='BATCH_DATA_DT_AVE', val_def=-1.0, type='float' )
self.bat_data_dt_rms = self.declareParameter( name='BATCH_DATA_DT_RMS', val_def=0.0, type='float' )
self.bat_dark_is_used = self.declareParameter( name='BATCH_DARK_IS_USED', val_def=True, type='bool' )
self.bat_dark_start = self.declareParameter( name='BATCH_DARK_START', val_def= 1, type='int' )
self.bat_dark_end = self.declareParameter( name='BATCH_DARK_END' , val_def=-1, type='int' )
self.bat_dark_total = self.declareParameter( name='BATCH_DARK_TOTAL', val_def=-1, type='int' )
self.bat_dark_time = self.declareParameter( name='BATCH_DARK_TIME' , val_def=-1.0, type='float' )
self.bat_dark_dt_ave = self.declareParameter( name='BATCH_DARK_DT_AVE', val_def=-1.0, type='float' )
self.bat_dark_dt_rms = self.declareParameter( name='BATCH_DARK_DT_RMS', val_def=0.0, type='float' )
#self.bat_flat_is_used = self.declareParameter( name='BATCH_FLAT_IS_USED', val_def=True, type='bool' )
self.bat_flat_start = self.declareParameter( name='BATCH_FLAT_START', val_def= 1, type='int' )
self.bat_flat_end = self.declareParameter( name='BATCH_FLAT_END' , val_def=-1, type='int' )
self.bat_flat_total = self.declareParameter( name='BATCH_FLAT_TOTAL', val_def=-1, type='int' )
self.bat_flat_time = self.declareParameter( name='BATCH_FLAT_TIME' , val_def=-1.0, type='float' )
self.bat_queue = self.declareParameter( name='BATCH_QUEUE', val_def='psfehq', type='str' )
self.bat_det_info = self.declareParameter( name='BATCH_DET_INFO', val_def='DetInfo(:Princeton)', type='str' )
#self.bat_det_info = self.declareParameter( name='BATCH_DET_INFO', val_def='DetInfo(XcsBeamline.0:Princeton.0)', type='str' )
self.bat_img_rec_mod = self.declareParameter( name='BATCH_IMG_REC_MODULE', val_def='ImgAlgos.PrincetonImageProducer', type='str' )
# BatchLogParser.py
self.bat_img_rows = self.declareParameter( name='BATCH_IMG_ROWS', val_def= -1, type='int' )
self.bat_img_cols = self.declareParameter( name='BATCH_IMG_COLS', val_def= -1, type='int' )
self.bat_img_size = self.declareParameter( name='BATCH_IMG_SIZE', val_def= -1, type='int' )
self.bat_img_nparts = self.declareParameter( name='BATCH_IMG_NPARTS', val_def= 8, type='int' )
# GUIAnaSettingsLeft.py
self.ana_type = self.declareParameter( name='ANA_TYPE', val_def='Static',type='str' )
self.ana_stat_meth_q = self.declareParameter( name='ANA_STATIC_METHOD_Q', val_def='evenly-spaced',type='str' )
self.ana_stat_meth_phi = self.declareParameter( name='ANA_STATIC_METHOD_PHI', val_def='evenly-spaced',type='str' )
self.ana_dyna_meth_q = self.declareParameter( name='ANA_DYNAMIC_METHOD_Q', val_def='evenly-spaced',type='str' )
self.ana_dyna_meth_phi = self.declareParameter( name='ANA_DYNAMIC_METHOD_PHI', val_def='evenly-spaced',type='str' )
self.ana_stat_part_q = self.declareParameter( name='ANA_STATIC_PARTITION_Q', val_def='1',type='str' )
self.ana_stat_part_phi = self.declareParameter( name='ANA_STATIC_PARTITION_PHI', val_def='2',type='str' )
self.ana_dyna_part_q = self.declareParameter( name='ANA_DYNAMIC_PARTITION_Q', val_def='3',type='str' )
self.ana_dyna_part_phi = self.declareParameter( name='ANA_DYNAMIC_PARTITION_PHI', val_def='4',type='str' )
self.ana_mask_type = self.declareParameter( name='ANA_MASK_TYPE', val_def='no-mask',type='str' )
self.ana_mask_fname = self.declareParameter( name='ANA_MASK_FILE', val_def='./roi-mask.txt',type='str' )
self.ana_mask_dname = self.declareParameter( name='ANA_MASK_DIRECTORY', val_def='.',type='str' )
# GUIAnaSettingsRight.py
self.ana_ndelays = self.declareParameter( name='ANA_NDELAYS_PER_MTAU_LEVEL', val_def=4, type='int' )
self.ana_nslice_delays = self.declareParameter( name='ANA_NSLICE_DELAYS_PER_MTAU_LEVEL', val_def=4, type='int' )
self.ana_npix_to_smooth= self.declareParameter( name='ANA_NPIXELS_TO_SMOOTH', val_def=100, type='int' )
self.ana_smooth_norm = self.declareParameter( name='ANA_SMOOTH_SYM_NORM', val_def=False, type='bool' )
self.ana_two_corfuns = self.declareParameter( name='ANA_TWO_TIME_CORFUNS_CONTROL', val_def=False, type='bool' )
self.ana_spec_stab = self.declareParameter( name='ANA_CHECK_SPECKLE_STABILITY', val_def=False, type='bool' )
self.lld_type = self.declareParameter( name='LOW_LEVEL_DISC_TYPE', val_def='NONE',type='str' )
self.lld_adu = self.declareParameter( name='LOW_LEVEL_DISC_ADU', val_def=15, type='float' )
self.lld_rms = self.declareParameter( name='LOW_LEVEL_DISC_RMS', val_def=4, type='float' )
self.res_ascii_out = self.declareParameter( name='RES_ASCII_OUTPUT', val_def=True, type='bool' )
self.res_fit1 = self.declareParameter( name='RES_PERFORM_FIT1', val_def=False, type='bool' )
self.res_fit2 = self.declareParameter( name='RES_PERFORM_FIT1', val_def=False, type='bool' )
self.res_fit_cust = self.declareParameter( name='RES_PERFORM_FIT_CUSTOM', val_def=False, type='bool' )
self.res_png_out = self.declareParameter( name='RES_PNG_FILES', val_def=False, type='bool' )
self.res_save_log = self.declareParameter( name='RES_SAVE_LOG_FILE', val_def=False, type='bool' )
# GUILoadResults.py
self.res_load_mode = self.declareParameter( name='RES_LOAD_MODE', val_def='NONE',type='str' )
self.res_fname = self.declareParameter( name='RES_LOAD_FNAME', val_def='NONE',type='str' )
# GUISystemSettingsRight.py
self.thickness_type = self.declareParameter( name='THICKNESS_TYPE', val_def='NONORM',type='str' )
self.thickness_sample = self.declareParameter( name='THICKNESS_OF_SAMPLE', val_def=-1, type='float' )
self.thickness_attlen = self.declareParameter( name='THICKNESS_ATTENUATION_LENGTH', val_def=-2, type='float' )
self.ccd_orient = self.declareParameter( name='CCD_ORIENTATION', val_def='180', type='str' )
self.y_is_flip = self.declareParameter( name='Y_IS_FLIPPED', val_def='True', type='bool' )
# GUICCDSettings.py
self.ccdset_pixsize = self.declareParameter( name='CCD_SETTINGS_PIXEL_SIZE', val_def=0.1, type='float' )
self.ccdset_adcsatu = self.declareParameter( name='CCD_SETTINGS_ADC_SATTURATION', val_def=12345, type='int' )
self.ccdset_aduphot = self.declareParameter( name='CCD_SETTINGS_ADU_PER_PHOTON', val_def=123, type='float' )
self.ccdset_ccdeff = self.declareParameter( name='CCD_SETTINGS_EFFICIENCY', val_def=0.55, type='float' )
self.ccdset_ccdgain = self.declareParameter( name='CCD_SETTINGS_GAIN', val_def=0.8, type='float' )
# GUIELogPostingDialog.py
# GUIELogPostingFields.py
#self.elog_post_cbx_state = self.declareParameter( name='ELOG_POST_CBX_STATE', val_def=True, type='bool' )
self.elog_post_rad = self.declareParameter( name='ELOG_POST_RAD_STATE', val_def='Default', type='str' )
self.elog_post_ins = self.declareParameter( name='ELOG_POST_INSTRUMENT', val_def='AMO', type='str' )
self.elog_post_exp = self.declareParameter( name='ELOG_POST_EXPERIMENT', val_def='amodaq09', type='str' )
self.elog_post_run = self.declareParameter( name='ELOG_POST_RUN', val_def='825', type='str' )
self.elog_post_tag = self.declareParameter( name='ELOG_POST_TAG', val_def='TAG1', type='str' )
self.elog_post_res = self.declareParameter( name='ELOG_POST_RESPONCE', val_def='None', type='str' )
self.elog_post_msg = self.declareParameter( name='ELOG_POST_MESSAGE', val_def='EMPTY MSG', type='str' )
self.elog_post_att = self.declareParameter( name='ELOG_POST_ATTACHED_FILE', val_def='None', type='str' )
#GUIViewControl.py
self.vc_cbx_show_more = self.declareParameter( name='SHOW_MORE_BUTTONS', val_def=True, type='bool' )
#-----------------------------
imon_names = [ ('BldInfo(FEEGasDetEnergy)', None ,'str'), \
('BldInfo(XCS-IPM-02)', None ,'str'), \
('BldInfo(XCS-IPM-mono)', None ,'str'), \
('DetInfo(XcsBeamline.1:Ipimb.4)', None ,'str'), \
('DetInfo(XcsBeamline.1:Ipimb.5)', None ,'str') ]
self.imon_name_list = self.declareListOfPars( 'IMON_NAMES', imon_names )
#-----------------------------
imon_short_names = [ ('FEEGasDetEnergy', None ,'str'), \
('XCS-IPM-02', None ,'str'), \
('XCS-IPM-mono', None ,'str'), \
('Ipimb.4', None ,'str'), \
('Ipimb.5', None ,'str') ]
self.imon_short_name_list = self.declareListOfPars( 'IMON_SHORT_NAMES', imon_short_names )
#-----------------------------
imon_cbxs = [ (True, True ,'bool'), \
(True, True ,'bool'), \
(True, True ,'bool'), \
(True, True ,'bool'), \
(True, True ,'bool') ]
self.imon_ch1_list = self.declareListOfPars( 'IMON_CH1', deepcopy(imon_cbxs) )
self.imon_ch2_list = self.declareListOfPars( 'IMON_CH2', deepcopy(imon_cbxs) )
self.imon_ch3_list = self.declareListOfPars( 'IMON_CH3', deepcopy(imon_cbxs) )
self.imon_ch4_list = self.declareListOfPars( 'IMON_CH4', deepcopy(imon_cbxs) )
#-----------------------------
imon_norm_cbx = [ (False, False ,'bool'), \
(False, False ,'bool'), \
(False, False ,'bool'), \
(False, False ,'bool'), \
(False, False ,'bool') ]
self.imon_norm_cbx_list = self.declareListOfPars( 'IMON_NORM_CBX', imon_norm_cbx )
#-----------------------------
imon_sele_cbx = [ (False, False ,'bool'), \
(False, False ,'bool'), \
(False, False ,'bool'), \
(False, False ,'bool'), \
(False, False ,'bool') ]
self.imon_sele_cbx_list = self.declareListOfPars( 'IMON_SELE_CBX', imon_sele_cbx )
#-----------------------------
imon_sele_min = [ (-1., -1. ,'float'), \
(-1., -1. ,'float'), \
(-1., -1. ,'float'), \
(-1., -1. ,'float'), \
(-1., -1. ,'float') ]
self.imon_sele_min_list = self.declareListOfPars( 'IMON_SELE_MIN', imon_sele_min )
#-----------------------------
imon_sele_max = [ (-1., -1. ,'float'), \
(-1., -1. ,'float'), \
(-1., -1. ,'float'), \
(-1., -1. ,'float'), \
(-1., -1. ,'float') ]
self.imon_sele_max_list = self.declareListOfPars( 'IMON_SELE_MAX', imon_sele_max )
#-----------------------------
self.imon_pars_list = zip( self.imon_name_list,
self.imon_ch1_list,
self.imon_ch2_list,
self.imon_ch3_list,
self.imon_ch4_list,
self.imon_norm_cbx_list,
self.imon_sele_cbx_list,
self.imon_sele_min_list,
self.imon_sele_max_list,
self.imon_short_name_list )
#print self.imon_pars_list
#-----------------------------
def defineStyles( self ) :
self.styleYellowish = "background-color: rgb(255, 255, 220); color: rgb(0, 0, 0);" # Yellowish
self.stylePink = "background-color: rgb(255, 200, 220); color: rgb(0, 0, 0);" # Pinkish
self.styleYellowBkg = "background-color: rgb(255, 255, 120); color: rgb(0, 0, 0);" # Pinkish
self.styleGray = "background-color: rgb(230, 240, 230); color: rgb(0, 0, 0);" # Gray
self.styleGreenish = "background-color: rgb(100, 255, 200); color: rgb(0, 0, 0);" # Greenish
self.styleGreenPure = "background-color: rgb(150, 255, 150); color: rgb(0, 0, 0);" # Green
self.styleBluish = "background-color: rgb(200, 200, 255); color: rgb(0, 0, 0);" # Bluish
self.styleWhite = "background-color: rgb(255, 255, 255); color: rgb(0, 0, 0);"
self.styleRedBkgd = "background-color: rgb(255, 0, 0); color: rgb(0, 0, 0);" # Red background
#self.styleTitle = "background-color: rgb(239, 235, 231, 255); color: rgb(100, 160, 100);" # Gray bkgd
#self.styleTitle = "color: rgb(150, 160, 100);"
self.styleBlue = "color: rgb(000, 000, 255);"
self.styleBuriy = "color: rgb(150, 100, 50);"
self.styleRed = "color: rgb(255, 0, 0);"
self.styleGreen = "color: rgb(0, 150, 0);"
self.styleYellow = "color: rgb(0, 150, 150);"
self.styleBkgd = self.styleYellowish
self.styleTitle = self.styleBuriy
self.styleLabel = self.styleBlue
self.styleEdit = self.styleWhite
self.styleEditInfo = self.styleGreenish
self.styleEditBad = self.styleRedBkgd
self.styleButton = self.styleGray
self.styleButtonOn = self.styleBluish
self.styleButtonClose = self.stylePink
self.styleButtonWarning= self.styleYellowBkg
self.styleButtonGood = self.styleGreenPure
self.styleButtonBad = self.stylePink
self.styleBox = self.styleGray
self.styleCBox = self.styleYellowish
self.styleStatusGood = self.styleGreen
self.styleStatusWarning= self.styleYellow
self.styleStatusAlarm = self.styleRed
self.styleTitleBold = self.styleTitle + 'font-size: 18pt; font-family: Courier; font-weight: bold;'
self.styleWhiteFixed = self.styleWhite + 'font-family: Fixed;'
self.colorEditInfo = QtGui.QColor(100, 255, 200)
self.colorEditBad = QtGui.QColor(255, 0, 0)
self.colorEdit = QtGui.QColor('white')
def printParsDirectly( self ) :
logger.info('Direct use of parameter:' + self.fname_cp .name() + ' ' + self.fname_cp .value(), __name__ )
logger.info('Direct use of parameter:' + self.fname_ped.name() + ' ' + self.fname_ped.value(), __name__ )
logger.info('Direct use of parameter:' + self.fname_dat.name() + ' ' + self.fname_dat.value(), __name__ )
#-----------------------------
confpars = ConfigParametersCorAna (fname=getConfigFileFromInput())
#-----------------------------
#
# In case someone decides to run this module
#
if __name__ == "__main__" :
confpars.printParameters()
#confpars.printParsDirectly()
confpars.saveParametersInFile()
confpars.printListOfPars('IMON_NAMES')
sys.exit ( 'End of test for ConfigParametersCorAna' )
#-----------------------------
| [
"[email protected]@b967ad99-d558-0410-b138-e0f6c56caec7"
] | [email protected]@b967ad99-d558-0410-b138-e0f6c56caec7 |
becaebfd57de87517f83fb188ffe1860ee44300a | f08c79663074bfd104135e1347f3228b29620d24 | /csrt.py | 6da5c8ba236a0d1428f0aadc2f3e058f81921930 | [] | no_license | battcheeks/Computer-Vision | 140e3d0a3b20cba637b275dc6d7ebc5f413a2e31 | ffa8f277312fc4553e25db09a6f53a107d7f4d41 | refs/heads/master | 2022-11-10T19:33:31.721963 | 2020-06-27T09:54:15 | 2020-06-27T09:54:15 | 275,339,008 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,146 | py | from imutils.video import VideoStream
from imutils.video import FPS
import argparse
import imutils
import time
import cv2
global a,b
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", type=str,
help="path to input video file")
ap.add_argument("-t", "--tracker", type=str, default="kcf",
help="OpenCV object tracker type")
args = vars(ap.parse_args())
(major, minor) = cv2.__version__.split(".")[:2]
if int(major) == 3 and int(minor) < 3:
tracker = cv2.Tracker_create(args["tracker"].upper())
else:
OPENCV_OBJECT_TRACKERS = {
"csrt": cv2.TrackerCSRT_create,
"kcf": cv2.TrackerKCF_create,
"boosting": cv2.TrackerBoosting_create,
"mil": cv2.TrackerMIL_create,
"tld": cv2.TrackerTLD_create,
"medianflow": cv2.TrackerMedianFlow_create,
"mosse": cv2.TrackerMOSSE_create
}
tracker = OPENCV_OBJECT_TRACKERS[args["tracker"]]()
initBB = None
if not args.get("video", False):
print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
time.sleep(1.0)
else:
vs = cv2.VideoCapture(args["video"])
fps = None
# loop over frames from the video stream
while True:
# grab the current frame, then handle if we are using a
# VideoStream or VideoCapture object
frame = vs.read()
frame = frame[1] if args.get("video", False) else frame
# check to see if we have reached the end of the stream
if frame is None:
break
frame = imutils.resize(frame, width=500)
(H, W) = frame.shape[:2]
# check to see if we are currently tracking an object
if initBB is not None:
(success, box) = tracker.update(frame)
if success:
(x, y, w, h) = [int(v) for v in box]
cv2.rectangle(frame, (x, y), (x + w, y + h),
(0, 255, 0), 2)
print(str(x+w/2)+","+str(y+h/2))
a=str(x+w/2)
b=str(y+h/2)
# update the FPS counter
fps.update()
fps.stop()
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("s"):
initBB = cv2.selectROI("Frame", frame, fromCenter=False,
showCrosshair=True)
tracker.init(frame, initBB)
fps = FPS().start()
elif key == ord("q"):
break
if not args.get("video", False):
vs.stop()
else:
vs.release()
cv2.destroyAllWindows() | [
"[email protected]"
] | |
28e7dee0700c6fe42c004b939fcaa2b9ff69d27e | eb64b799ff1d7ef3a244bf8e6f9f4e9118d5cfcd | /homeassistant/components/trafikverket_weatherstation/const.py | 7bb53dc5356a0b8a392104982912658806275659 | [
"Apache-2.0"
] | permissive | JeffLIrion/home-assistant | 53966b81b5d5816679f12fc761f79e8777c738d6 | 8f4ec89be6c2505d8a59eee44de335abe308ac9f | refs/heads/dev | 2023-08-22T09:42:02.399277 | 2022-02-16T01:26:13 | 2022-02-16T01:26:13 | 136,679,169 | 5 | 2 | Apache-2.0 | 2023-09-13T06:59:25 | 2018-06-09T00:58:35 | Python | UTF-8 | Python | false | false | 466 | py | """Adds constants for Trafikverket Weather integration."""
from homeassistant.const import Platform
DOMAIN = "trafikverket_weatherstation"
CONF_STATION = "station"
PLATFORMS = [Platform.SENSOR]
ATTRIBUTION = "Data provided by Trafikverket"
ATTR_MEASURE_TIME = "measure_time"
ATTR_ACTIVE = "active"
NONE_IS_ZERO_SENSORS = {
"air_temp",
"road_temp",
"wind_direction",
"wind_speed",
"wind_speed_max",
"humidity",
"precipitation_amount",
}
| [
"[email protected]"
] | |
d3e3b20b1ce012f78bbc61c3eb7dc31075d016ca | c9094a4ed256260bc026514a00f93f0b09a5d60c | /tests/components/accuweather/test_system_health.py | 749f516e44c748caf05503460e8a72ec34d085d3 | [
"Apache-2.0"
] | permissive | turbokongen/home-assistant | 824bc4704906ec0057f3ebd6d92788e096431f56 | 4ab0151fb1cbefb31def23ba850e197da0a5027f | refs/heads/dev | 2023-03-12T05:49:44.508713 | 2021-02-17T14:06:16 | 2021-02-17T14:06:16 | 50,231,140 | 4 | 1 | Apache-2.0 | 2023-02-22T06:14:30 | 2016-01-23T08:55:09 | Python | UTF-8 | Python | false | false | 1,785 | py | """Test AccuWeather system health."""
import asyncio
from unittest.mock import Mock
from aiohttp import ClientError
from homeassistant.components.accuweather.const import COORDINATOR, DOMAIN
from homeassistant.setup import async_setup_component
from tests.common import get_system_health_info
async def test_accuweather_system_health(hass, aioclient_mock):
"""Test AccuWeather system health."""
aioclient_mock.get("https://dataservice.accuweather.com/", text="")
hass.config.components.add(DOMAIN)
assert await async_setup_component(hass, "system_health", {})
hass.data[DOMAIN] = {}
hass.data[DOMAIN]["0123xyz"] = {}
hass.data[DOMAIN]["0123xyz"][COORDINATOR] = Mock(
accuweather=Mock(requests_remaining="42")
)
info = await get_system_health_info(hass, DOMAIN)
for key, val in info.items():
if asyncio.iscoroutine(val):
info[key] = await val
assert info == {
"can_reach_server": "ok",
"remaining_requests": "42",
}
async def test_accuweather_system_health_fail(hass, aioclient_mock):
"""Test AccuWeather system health."""
aioclient_mock.get("https://dataservice.accuweather.com/", exc=ClientError)
hass.config.components.add(DOMAIN)
assert await async_setup_component(hass, "system_health", {})
hass.data[DOMAIN] = {}
hass.data[DOMAIN]["0123xyz"] = {}
hass.data[DOMAIN]["0123xyz"][COORDINATOR] = Mock(
accuweather=Mock(requests_remaining="0")
)
info = await get_system_health_info(hass, DOMAIN)
for key, val in info.items():
if asyncio.iscoroutine(val):
info[key] = await val
assert info == {
"can_reach_server": {"type": "failed", "error": "unreachable"},
"remaining_requests": "0",
}
| [
"[email protected]"
] | |
1b32ea37e4c7f6126f63d235f5bc196330d2dc7e | d94b6845aeeb412aac6850b70e22628bc84d1d6d | /dimensions_of_motion/geometry.py | d7a317cb08a95e69785f8cd0af032ae5db8a1f29 | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | ishine/google-research | 541aea114a68ced68736340e037fc0f8257d1ea2 | c1ae273841592fce4c993bf35cdd0a6424e73da4 | refs/heads/master | 2023-06-08T23:02:25.502203 | 2023-05-31T01:00:56 | 2023-05-31T01:06:45 | 242,478,569 | 0 | 0 | Apache-2.0 | 2020-06-23T01:55:11 | 2020-02-23T07:59:42 | Jupyter Notebook | UTF-8 | Python | false | false | 7,466 | py | # coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
"""Functions for sampling and warping images.
We use texture coordinates to represent points and offsets in images. They go
from (0,0) in the top-left corner of an image to (1,1) in the bottom right. It
is convenient to work with these coordinates rather than counts of pixels,
because they are resolution-independent.
"""
import tensorflow as tf
import tensorflow_addons as tfa
import utils
def check_input_shape(name, tensor, axis, value):
"""Utility function for checking tensor shapes."""
shape = tensor.shape.as_list()
if shape[axis] != value:
raise ValueError('Input "%s": dimension %d should be %s. Shape = %s' %
(name, axis, value, shape))
def pixel_center_grid(height, width):
"""Produce a grid of (x,y) texture-coordinate pairs of pixel centers.
Args:
height: (integer) height, not a tensor
width: (integer) width, not a tensor
Returns:
A tensor of shape [height, width, 2] where each entry gives the (x,y)
texture coordinates of the corresponding pixel center. For example, for
pixel_center_grid(2, 3) the result is:
[[[1/6, 1/4], [3/6, 1/4], [5/6, 1/4]],
[[1/6, 3/4], [3/6, 3/4], [5/6, 3/4]]]
"""
height_float = tf.cast(height, dtype=tf.float32)
width_float = tf.cast(width, dtype=tf.float32)
ys = tf.linspace(0.5 / height_float, 1.0 - 0.5 / height_float, height)
xs = tf.linspace(0.5 / width_float, 1.0 - 0.5 / width_float, width)
xs, ys = tf.meshgrid(xs, ys)
grid = tf.stack([xs, ys], axis=-1)
assert grid.shape.as_list() == [height, width, 2]
return grid
def sample_image(image, coords):
"""Sample points from an image, using bilinear filtering.
Args:
image: [B0, ..., Bn-1, height, width, channels] image data
coords: [B0, ..., Bn-1, ..., 2] (x,y) texture coordinates
Returns:
[B0, ..., Bn-1, ..., channels] image data, in which each value is sampled
with bilinear interpolation from the image at position indicated by the
(x,y) texture coordinates. The image and coords parameters must have
matching batch dimensions B0, ..., Bn-1.
Raises:
ValueError: if shapes are incompatible.
"""
check_input_shape('coords', coords, -1, 2)
tfshape = tf.shape(image)[-3:-1]
height = tf.cast(tfshape[0], dtype=tf.float32)
width = tf.cast(tfshape[1], dtype=tf.float32)
# Resampler expects coordinates where (0,0) is the center of the top-left
# pixel and (width-1, height-1) is the center of the bottom-right pixel.
pixel_coords = coords * [width, height] - 0.5
# tfa.image.resampler only works with exactly one batch dimension, i.e. it
# expects image to be [batch, height, width, channels] and pixel_coords to be
# [batch, ..., 2]. So we need to reshape, perform the resampling, and then
# reshape back to what we had.
batch_dims = len(image.shape.as_list()) - 3
assert (image.shape.as_list()[:batch_dims] == pixel_coords.shape.as_list()
[:batch_dims])
batched_image, _ = utils.flatten_batch(image, batch_dims)
batched_coords, unflatten_coords = utils.flatten_batch(
pixel_coords, batch_dims)
resampled = tfa.image.resampler(batched_image, batched_coords)
# Convert back to the right shape to return
resampled = unflatten_coords(resampled)
return resampled
def bilinear_forward_warp(image, coords, weights=None):
"""Forward warp each point in an image using bilinear filtering.
This is a sort of reverse of sample_image, in the sense that scatter is the
reverse of gather. A new image is generated of the same size as the input, in
which each pixel has been splatted onto the 2x2 block containing the
corresponding coordinates, using bilinear weights (multiplied with the input
per-pixel weights, if supplied). Thus if two or more pixels warp to the same
point, the result will be a blend of the their values. If no pixels warp to a
location, the result at that location will be zero.
Args:
image: [B0, ..., Bn-1, height, width, channels] image data
coords: [B0, ..., Bn-1, height, width, 2] (x,y) texture coordinates
weights: [B0, ... ,Bn-1, height, width] weights for each point. If omitted,
all points are weighed equally. Use this to implement, for example, soft
z-buffering.
Returns:
[B0, ..., Bn-1, ..., channels] image data, in which each point in the
input image has been moved to the position indicated by the corresponding
(x,y) texture coordinates. The image and coords parameters must have
matching batch dimensions B0, ..., Bn-1.
"""
# Forward-warp computed using the gradient of reverse-warp. We use a dummy
# image of the right size for reverse-warping. An extra channel is used to
# accumulate the total weight for each pixel which we'll then divide by.
image_and_ones = tf.concat([image, tf.ones_like(image[Ellipsis, -1:])], axis=-1)
dummy = tf.zeros_like(image_and_ones)
if weights is None:
weighted_image = image_and_ones
else:
weighted_image = image_and_ones * weights[Ellipsis, tf.newaxis]
with tf.GradientTape(watch_accessed_variables=False) as g:
g.watch(dummy)
reverse = tf.reduce_sum(
sample_image(dummy, coords) * weighted_image, [-3, -2])
grads = g.gradient(reverse, dummy)
rgb = grads[Ellipsis, :-1]
total = grads[Ellipsis, -1:]
result = tf.math.divide_no_nan(rgb, total)
return result
def flow_warp(image, flow):
"""Warp images by resampling according to flow vectors.
Args:
image: [..., H, W, C] images
flow: [..., H, W, 2] (x, y) texture offsets
Returns:
[..., H, W, C] resampled images. Each pixel in each output image has been
bilinearly sampled from the corresponding pixel in its input image plus
the (x, y) flow vector. The flow vectors are texture coordinate offsets,
e.g. (1, 1) is an offset of the whole width and height of the image.
Sampling outside the image yields zero values.
"""
width = image.shape.as_list()[-2]
height = image.shape.as_list()[-3]
grid = pixel_center_grid(height, width)
coords = grid + flow
return sample_image(image, coords)
def flow_forward_warp(image, flow):
"""Forward-warp images according to flow vectors.
Args:
image: [..., H, W, C] images
flow: [..., H, W, 2] (x, y) texture offsets
Returns:
[..., H, W, C] warped images. Each pixel in each image is offset according
to the corresponding value in the flow, and splatted onto a 2x2 pixel block.
(See bilinear_forward_warp for details.) If no points warp to a location,
the result will be zero. The flow vectors are texture coordinate offsets,
e.g. (1, 1) is an offset of the whole width and height of the image.
"""
width = image.shape.as_list()[-2]
height = image.shape.as_list()[-3]
grid = pixel_center_grid(height, width)
coords = grid + flow
return bilinear_forward_warp(image, coords)
| [
"[email protected]"
] | |
6c67dfbe348126447354bd125a22c8c109b0ab15 | a6bd7d3c2dfd6f22b22b7390a2230651e1f3febd | /1.py | 412b8d7720f095722caac5fb02499d4d2a29fbb3 | [] | no_license | NicolasQueiroga/Resolucao_PF_DesSoft--2020.1 | 5c9e8b8a19045763c5af1e32426fa4e2c1891096 | fcafa170b0cec6dcaa658c3c72746d51ed8acc88 | refs/heads/master | 2022-11-06T14:15:31.544152 | 2020-06-22T21:02:12 | 2020-06-22T21:02:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 648 | py | with open('criptografado.txt', 'r') as arquivo:
conteudo = arquivo.readlines()
for i in conteudo:
i = i.strip()
letra = [char for char in i]
for e in range(len(letra)):
if letra[e] == 's':
letra[e] = 'z'
elif letra[e] == 'a':
letra[e] = 'e'
elif letra[e] == 'r':
letra[e] = 'b'
elif letra[e] == 'b':
letra[e] = 'r'
elif letra[e] == 'e':
letra[e] = 'a'
elif letra[e] == 'z':
letra[e] = 's'
new = ''.join(letra)
print(new)
| [
"[email protected]"
] | |
85ef73de5c1fceffd5aff452e2b9902d1718602f | 5ca6730fa1178582d5f5875155f340ec0f406294 | /practice_problem-16.py | 44785ae4df282d5b7cc6f83173866d825eb41375 | [] | no_license | MahadiRahman262523/Python_Code_Part-1 | 9740d5ead27209d69af4497eea410f2faef50ff3 | e2f08e3d0564a003400743ae6050fd687c280639 | refs/heads/main | 2023-07-25T09:10:53.649082 | 2021-09-05T19:39:14 | 2021-09-05T19:39:14 | 403,396,706 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135 | py | # Write a program to count the number of zeros in the following tuple:
# a = (7,0,8,0,0,9)
a = (7,0,8,0,0,9)
print(a.count(0)) | [
"[email protected]"
] | |
a5235c799186a4e9446f729d5748ae459dd5f73e | 4870960bc25aa9264d3ead399f1662bda3880e19 | /Create_video.py | cdf7329a51f8592ae582ad5bbc39b6293f031836 | [] | no_license | Megapixel-code/Video-maker-with-Reddit | 5fff90a2241298044c8c567dcc39fc4e60218285 | 0f69670fce22e0de652448ee59236dfad29aee7b | refs/heads/main | 2023-03-21T02:47:58.804567 | 2021-03-06T09:44:39 | 2021-03-06T09:44:39 | 344,571,437 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,952 | py | import glob
import os
import praw
import requests
import shutil
import json
import moviepy.editor as mp
import moviepy.video as mpv
import moviepy.video.fx.all as vfx
from gtts import gTTS
from PIL import Image, ImageDraw, ImageFont
from unidecode import unidecode
from os.path import isfile, join
def delete_all_folder():
directory = 'reddit'
os.chdir(directory)
files = glob.glob('*')
for file_name in files:
os.unlink(file_name)
os.chdir('..')
def deemojify(input_str):
return_output = ''
for car in input_str:
try:
car.encode('ascii')
return_output += car
except UnicodeEncodeError:
replaced = unidecode(str(car))
if replaced != '':
return_output += replaced
return " ".join(return_output.split())
def get_images():
directory = 'reddit'
# https://www.reddit.com/r/mildlyinteresting/top/?t=week
with open('credentials.json') as c:
params = json.load(c)
reddit = praw.Reddit(
client_id=params['client_id'],
client_secret=params['api_key'],
password=params['password'],
user_agent='<reddit_top> accessAPI:v0.0.1 (by/u/Megapixel_YTB)',
username=params['username']
)
subreddit = reddit.subreddit('mildlyinteresting')
name = 0
for submitions in subreddit.top("week", limit=50):
name += 1
url = submitions.url
file_name = str(name)
if url.endswith('.jpg'):
file_name += '.jpg'
found = True
else:
found = False
if found:
r = requests.get(url)
with open(file_name, 'wb') as f:
f.write(r.content)
shutil.move(file_name, directory)
caption = submitions.title
title = str(name)
title += '.txt'
with open(title, 'wt') as c:
c.write(deemojify(caption))
c.close()
shutil.move(title, directory)
def resize(im, fill_color=(0, 0, 0, 0)):
img = Image.open(im)
x, y = img.size
sizex = int(y / 1080 * 1920)
sizey = y
new_im = Image.new('RGB', (sizex, sizey), fill_color)
new_im.paste(img, (int((sizex - x) / 2), int((sizey - y) / 2)))
new_im = new_im.resize((1920, 1080), Image.LANCZOS)
f = open(im[:-4] + '.txt', 'r')
content = f.read()
draw = ImageDraw.Draw(new_im)
draw.rectangle(((0, 0), (1920, 25)), fill=(0, 0, 0))
font = ImageFont.truetype('arialbd.ttf', size=18)
txt_size = draw.textsize(content, font=font)[0]
draw.text((int((1920 - txt_size) / 2), 0), content, fill=(255, 255, 255), font=font)
f.close()
os.remove(im)
new_im.save(im)
def create_tts():
for file in [f for f in os.listdir('reddit/') if isfile(join('reddit/', f)) and f.endswith('.txt')]:
f = open('reddit/' + file, 'r')
my_txt = f.read()
f.close()
out = gTTS(text=my_txt, lang='en', slow=False)
out.save('reddit/' + file[:-4] + '.mp3')
def finish_video():
all_clips = []
for file in [f for f in os.listdir('reddit/') if isfile(join('reddit/', f)) and f.endswith('.mp3')]:
sound = mp.AudioFileClip('reddit/' + file)
sound = mp.concatenate_audioclips([sound, mp.AudioClip(lambda t: 0, duration=3)])
all_clips.append(sound)
all_video_clips = []
x = 0
for file in [f for f in os.listdir('reddit/') if isfile(join('reddit/', f)) and f.endswith('.jpg')]:
resize('reddit/' + file)
vid = mp.ImageClip('reddit/' + file, duration=all_clips[x].duration)
all_video_clips.append(vid)
x += 1
sound = mp.concatenate_audioclips(all_clips)
video = mp.concatenate_videoclips(all_video_clips)
video.audio = sound
video.fps = 60
background = mp.VideoFileClip('space.mpeg')
masked_clip = mpv.fx.all.mask_color(video, color=[0, 0, 0], thr=0, s=0)
midle_video = mp.CompositeVideoClip([background, masked_clip]).set_duration(video.duration)
intro = mp.VideoFileClip('Intro.mpeg')
outro = mp.VideoFileClip('Outro.mpeg')
final_video = mp.concatenate_videoclips([intro, midle_video, outro])
os.remove('ma_video.mp4')
final_video.write_videofile('ma_video.mp4')
def create():
print()
delete_all_folder()
print('Importing the images .....', end='')
get_images()
print(' done !')
print('creating tts .............', end='')
create_tts()
print(' done !')
print('Making the video .........')
print('===============================================================================================')
finish_video()
print('===============================================================================================')
| [
"[email protected]"
] | |
1b406b2dc38004db14248af19fb7f7be9b8e7f6c | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/BuildLinks1.10/test_input/CJ_16_1/16_1_1_FreeTShirt_a.py | 0207b362ff64f55d6e7a49f758c368374d2c5dc1 | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 404 | py | def argmax(s):
z = max(s)
return [(idx, c) for idx, c in enumerate(s) if c == z]
def last(s):
if len(s) <= 1:
return s
return max([s[idx]+last(s[:idx])+s[idx+1:] for idx, c in argmax(s)])
fw = open('a-o', 'w')
for idx, line in enumerate(open('A-small-i')):
if idx == 0:
continue
s = line.strip()
print(s)
fw.write('Case #{0}: {1}\n'.format(idx,last(s)))
| [
"[[email protected]]"
] | |
87476fc48dcc81c8407d184dc2ba254400452b87 | c5389783a234bc755571f84e619ac296cff4aa4b | /views.py | f9c5a4ac7104989c4e658990236b9aeb89d4533d | [] | no_license | abhishekmajhi42/the_weather_app | 0f5381b2f832077334bb6597c2f55eca6c4b7709 | e52cf4a218c0464fbe542cf47a94b70aa103a796 | refs/heads/master | 2022-12-24T09:08:56.809596 | 2020-09-27T17:15:26 | 2020-09-27T17:15:26 | 299,080,529 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 862 | py | from django.shortcuts import render
import requests
# Create your views here.
from weatherapp.forms import CityForm
from weatherapp.models import City
def index(request):
url='http://api.openweathermap.org/data/2.5/weather?q={}&units=imperial&appid=271d1234d3f497eed5b1d80a07b3fcd1'
if request.method=="POST":
form=CityForm(request.POST)
form.save()
#city='Las Vegas'
form = CityForm()
cities=City.objects.all()
weather_data=[]
for city in cities:
r=requests.get(url.format(city)).json()
city_weather={'city':city,'temperature':r['main']["temp"],'description':r["weather"][0]["description"],'icon':r["weather"][0]["icon"],}
weather_data.append(city_weather)
context={'weather_data':weather_data,'form':form}
return render(request,'weather.html',context)
| [
"[email protected]"
] | |
aaebcd30e1283732990421e052eb0d5cecb7a098 | f2abbeb892780b584feb2fd94e7ec5da8ecdc763 | /exporter/opentelemetry-exporter-otlp-proto-http/setup.py | 510eceba6c5abfb14c1de8ec0b03b368df4c4f0c | [
"Apache-2.0"
] | permissive | SigNoz/opentelemetry-python | 6fa5fd92584d2fb3ca71c958004cd56332c764a7 | 9e397c895797891b709a9f1c68345e9a1c357ad8 | refs/heads/main | 2023-07-15T10:43:17.064286 | 2021-09-02T12:25:18 | 2021-09-02T12:25:18 | 401,617,913 | 1 | 0 | Apache-2.0 | 2021-08-31T07:49:24 | 2021-08-31T07:49:24 | null | UTF-8 | Python | false | false | 943 | py | # Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import setuptools
BASE_DIR = os.path.dirname(__file__)
VERSION_FILENAME = os.path.join(
BASE_DIR,
"src",
"opentelemetry",
"exporter",
"otlp",
"proto",
"http",
"version.py",
)
PACKAGE_INFO = {}
with open(VERSION_FILENAME) as f:
exec(f.read(), PACKAGE_INFO)
setuptools.setup(version=PACKAGE_INFO["__version__"])
| [
"[email protected]"
] | |
4f3a8886eb59966fc5887dccc5604e3f38aad5d6 | 89e21b0c761d450ef8381bc4575e16d29244fb79 | /rr.py | 70882a541779763715acdbdd1f495fc1d98a7fe4 | [] | no_license | BnkColon/operating-systems | 0669b2368cc98b363fdaaf1fd67e134ecdcce7d6 | bf3b60f96f37e727e576e339520659ba5e7f8edd | refs/heads/master | 2021-01-18T07:30:17.331414 | 2016-10-07T15:31:21 | 2016-10-07T15:31:21 | 68,657,227 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,584 | py | # Bianca I. Colon Rosado
# $ python rr.py quanta input.txt
from string import *
from sys import argv
class Process:
"""docstring for Process"""
def __init__(self, pid, ptime):
self.id = pid # Take the ID of that instance
self.time = int(ptime) # Take the time of that instance
self.qConsumption = 0 # Initialize the consumption time to 0
def __str__(self): # Return the string version of the instance
return str(self.id) + str(self.qConsumption)
def setTime(self, ptime): # Set the time
self.time = ptime
def getTime(self): # Return the time
return self.time
def getID(self): # Return the ID
return self.id
def setQuanta(self, qConsumption): # Set the Quanta
self.qConsumption = qConsumption
def getQuanta(self): # Return the Quanta
return self.qConsumption
def main():
if (len(argv) == 3): # If recive $ python rr.py quanta input.txt
quanta = int(argv[1]) # Save the quanta number gived in the command line
# print type(quanta) / <type 'int'>
fileInput = argv[2] # Save the file input gived in the command line
# print type(fileInput) / <type 'str'>
else: # If not recieve this $ python rr.py quanta input.txt
quanta = 3 # Assing quanta = 3
fileInput = 'input.txt' # Search for a file named input.txt [10,2,3,4]
f = open(fileInput) # Open the file in read mode
# print f / <open file 'input.txt', mode 'r' at 0x2b366f908e40>
lists = f.readlines() # Read all the file
f.close() # Close the file
results = [None] * len(lists) # Create a empty list with the maxsize of the processes
for i in range(len(lists)): # Iterate throught lists, to create the processes (instances)
lists[i] = Process(i, int(lists[i].strip())) # Process('P'+str(i+i)+':')
quantaTotal = 0 # Variable "Global" to get the quantum time of all processes
average = 0 # Variable that save the average of all the processes
while lists: # While lists is not empty
finished_processes = [] # Empty list to save the index of the processes that finished
for i in range(len(lists)): # Iterate all processes
if (lists[i].getTime() <= quanta): # If the time of the process is minor or equal to the quantum
if (lists[i].getTime() == quanta): # If is equal to the quantum
quantaTotal += quanta # Save the quantum
else: # If the time of the process is minor to the quantum
quantaTotal += lists[i].getTime() # Save time of the process
lists[i].setQuanta(quantaTotal) # Set the quantum to the process
lists[i].setTime(0) # When finished set the time to 0
results[lists[i].getID()] = lists[i] # Insert the index to remove
finished_processes.insert(0, i) # Insert to the list of finished processes
#print i, lists[i].getQuanta()
else: # If the time of the process is bigger to the quantum
lists[i].setTime(int(lists[i].getTime()) - quanta) # To the time rest quanta
quantaTotal += quanta # Save the quantum
lists[i].setQuanta(quantaTotal) # Set the quantum to the process
# print i, lists[i].getQuanta()
for i in finished_processes: # Iterate the list of finished processes
lists.pop(i) # Delete from the list of processes
# Close While
for i in range(len(results)): # Iterate the list of results
print 'P%d:%d' %(results[i].getID() + 1,results[i].getQuanta()) # Print P(ID):Time spended
average += results[i].getQuanta() # Save all the time spended
average = float(average)/ len(results) # to calculate the average
print 'Avg:%1.2f' % (average) # print Average
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
35c9dd19ef1d0bbdfd5644a92542771a5c6fbf58 | 10659041996f62d28cebf9ba92dcad2d6d5ecb26 | /factors of cofficent in counting.py | 02e97f638b7ca1550ec438f04c2c5d2c91a83ad3 | [] | no_license | shailajaBegari/loops | 4e92d04ee55d0564de417b7b126d4b77dc5a8816 | 173d5a2b6c3a921efe5a38e2d763dd59759b05c4 | refs/heads/main | 2023-07-13T23:37:39.642374 | 2021-08-28T10:48:31 | 2021-08-28T10:48:31 | 400,764,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 152 | py | n=int(input('enter number'))
i=1
fact=1
count=0
while i<=n:
if n%i==0:
print(i)
count=count+1
i=i+1
print(count,'count') | [
"[email protected]"
] | |
d3e7e9dae606fe6dc77d9c43997e9c592fbcd477 | 982bc95ab762829c8b6913e44504415cdd77241a | /account_easy_reconcile/base_reconciliation.py | b50c06b9eed699d96da272f0fb9dd9613177c235 | [] | no_license | smart-solution/natuurpunt-finance | 6b9eb65be96a4e3261ce46d7f0c31de3589e1e0d | 6eeb48468792e09d46d61b89499467a44d67bc79 | refs/heads/master | 2021-01-23T14:42:05.017263 | 2020-11-03T15:56:35 | 2020-11-03T15:56:35 | 39,186,046 | 0 | 1 | null | 2020-11-03T15:56:37 | 2015-07-16T08:36:54 | Python | UTF-8 | Python | false | false | 7,776 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright 2012-2013 Camptocamp SA (Guewen Baconnier)
# Copyright (C) 2010 Sébastien Beau
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, orm
from operator import itemgetter, attrgetter
class easy_reconcile_base(orm.AbstractModel):
"""Abstract Model for reconciliation methods"""
_name = 'easy.reconcile.base'
_inherit = 'easy.reconcile.options'
_columns = {
'account_id': fields.many2one(
'account.account', 'Account', required=True),
'partner_ids': fields.many2many(
'res.partner', string="Restrict on partners"),
# other columns are inherited from easy.reconcile.options
}
def automatic_reconcile(self, cr, uid, ids, context=None):
""" Reconciliation method called from the view.
:return: list of reconciled ids, list of partially reconciled items
"""
if isinstance(ids, (int, long)):
ids = [ids]
assert len(ids) == 1, "Has to be called on one id"
rec = self.browse(cr, uid, ids[0], context=context)
return self._action_rec(cr, uid, rec, context=context)
def _action_rec(self, cr, uid, rec, context=None):
""" Must be inherited to implement the reconciliation
:return: list of reconciled ids
"""
raise NotImplementedError
def _base_columns(self, rec):
""" Mandatory columns for move lines queries
An extra column aliased as ``key`` should be defined
in each query."""
aml_cols = (
'id',
'debit',
'credit',
'date',
'period_id',
'ref',
'name',
'partner_id',
'account_id',
'move_id')
return ["account_move_line.%s" % col for col in aml_cols]
def _select(self, rec, *args, **kwargs):
return "SELECT %s" % ', '.join(self._base_columns(rec))
def _from(self, rec, *args, **kwargs):
return "FROM account_move_line"
def _where(self, rec, *args, **kwargs):
where = ("WHERE account_move_line.account_id = %s "
"AND account_move_line.reconcile_id IS NULL ")
# it would be great to use dict for params
# but as we use _where_calc in _get_filter
# which returns a list, we have to
# accomodate with that
params = [rec.account_id.id]
if rec.partner_ids:
where += " AND account_move_line.partner_id IN %s"
params.append(tuple([l.id for l in rec.partner_ids]))
return where, params
def _get_filter(self, cr, uid, rec, context):
ml_obj = self.pool.get('account.move.line')
where = ''
params = []
if rec.filter:
dummy, where, params = ml_obj._where_calc(
cr, uid, eval(rec.filter), context=context).get_sql()
if where:
where = " AND %s" % where
return where, params
def _below_writeoff_limit(self, cr, uid, rec, lines,
writeoff_limit, context=None):
precision = self.pool.get('decimal.precision').precision_get(
cr, uid, 'Account')
keys = ('debit', 'credit')
sums = reduce(
lambda line, memo:
dict((key, value + memo[key])
for key, value
in line.iteritems()
if key in keys), lines)
debit, credit = sums['debit'], sums['credit']
writeoff_amount = round(debit - credit, precision)
return bool(writeoff_limit >= abs(writeoff_amount)), debit, credit
def _get_rec_date(self, cr, uid, rec, lines,
based_on='end_period_last_credit', context=None):
period_obj = self.pool.get('account.period')
def last_period(mlines):
period_ids = [ml['period_id'] for ml in mlines]
periods = period_obj.browse(
cr, uid, period_ids, context=context)
return max(periods, key=attrgetter('date_stop'))
def last_date(mlines):
return max(mlines, key=itemgetter('date'))
def credit(mlines):
return [l for l in mlines if l['credit'] > 0]
def debit(mlines):
return [l for l in mlines if l['debit'] > 0]
if based_on == 'end_period_last_credit':
return last_period(credit(lines)).date_stop
if based_on == 'end_period':
return last_period(lines).date_stop
elif based_on == 'newest':
return last_date(lines)['date']
elif based_on == 'newest_credit':
return last_date(credit(lines))['date']
elif based_on == 'newest_debit':
return last_date(debit(lines))['date']
# reconcilation date will be today
# when date is None
return None
def _reconcile_lines(self, cr, uid, rec, lines, allow_partial=False, context=None):
""" Try to reconcile given lines
:param list lines: list of dict of move lines, they must at least
contain values for : id, debit, credit
:param boolean allow_partial: if True, partial reconciliation will be
created, otherwise only Full
reconciliation will be created
:return: tuple of boolean values, first item is wether the items
have been reconciled or not,
the second is wether the reconciliation is full (True)
or partial (False)
"""
if context is None:
context = {}
ml_obj = self.pool.get('account.move.line')
writeoff = rec.write_off
line_ids = [l['id'] for l in lines]
below_writeoff, sum_debit, sum_credit = self._below_writeoff_limit(
cr, uid, rec, lines, writeoff, context=context)
date = self._get_rec_date(
cr, uid, rec, lines, rec.date_base_on, context=context)
rec_ctx = dict(context, date_p=date)
if below_writeoff:
if sum_credit < sum_debit:
writeoff_account_id = rec.account_profit_id.id
else:
writeoff_account_id = rec.account_lost_id.id
period_id = self.pool.get('account.period').find(
cr, uid, dt=date, context=context)[0]
ml_obj.reconcile(
cr, uid,
line_ids,
type='auto',
writeoff_acc_id=writeoff_account_id,
writeoff_period_id=period_id,
writeoff_journal_id=rec.journal_id.id,
context=rec_ctx)
return True, True
elif allow_partial:
ml_obj.reconcile_partial(
cr, uid,
line_ids,
type='manual',
context=rec_ctx)
return True, False
return False, False
| [
"[email protected]"
] | |
0d4ab487c9de86cce3e199c7f5a4c2c87e57c607 | 2612f336d667a087823234daf946f09b40d8ca3d | /python/lib/Lib/site-packages/django/contrib/gis/tests/geoapp/models.py | 89027eedfbc919466ac7c1335c42dfb57aea547a | [
"Apache-2.0"
] | permissive | tnorbye/intellij-community | df7f181861fc5c551c02c73df3b00b70ab2dd589 | f01cf262fc196bf4dbb99e20cd937dee3705a7b6 | refs/heads/master | 2021-04-06T06:57:57.974599 | 2018-03-13T17:37:00 | 2018-03-13T17:37:00 | 125,079,130 | 2 | 0 | Apache-2.0 | 2018-03-13T16:09:41 | 2018-03-13T16:09:41 | null | UTF-8 | Python | false | false | 1,546 | py | from django.contrib.gis.db import models
from django.contrib.gis.tests.utils import mysql, spatialite
# MySQL spatial indices can't handle NULL geometries.
null_flag = not mysql
class Country(models.Model):
name = models.CharField(max_length=30)
mpoly = models.MultiPolygonField() # SRID, by default, is 4326
objects = models.GeoManager()
def __unicode__(self): return self.name
class City(models.Model):
name = models.CharField(max_length=30)
point = models.PointField()
objects = models.GeoManager()
def __unicode__(self): return self.name
# This is an inherited model from City
class PennsylvaniaCity(City):
county = models.CharField(max_length=30)
objects = models.GeoManager() # TODO: This should be implicitly inherited.
class State(models.Model):
name = models.CharField(max_length=30)
poly = models.PolygonField(null=null_flag) # Allowing NULL geometries here.
objects = models.GeoManager()
def __unicode__(self): return self.name
class Track(models.Model):
name = models.CharField(max_length=30)
line = models.LineStringField()
objects = models.GeoManager()
def __unicode__(self): return self.name
if not spatialite:
class Feature(models.Model):
name = models.CharField(max_length=20)
geom = models.GeometryField()
objects = models.GeoManager()
def __unicode__(self): return self.name
class MinusOneSRID(models.Model):
geom = models.PointField(srid=-1) # Minus one SRID.
objects = models.GeoManager()
| [
"[email protected]"
] | |
b6e187de710d37037dd7c0d830a50e7eaee1aa28 | 786027545626c24486753351d6e19093b261cd7d | /ghidra9.2.1_pyi/ghidra/app/util/bin/format/xcoff/XCoffSectionHeaderFlags.pyi | 43a745532a3157885655ec9c25a175e6ac3df2ec | [
"MIT"
] | permissive | kohnakagawa/ghidra_scripts | 51cede1874ef2b1fed901b802316449b4bf25661 | 5afed1234a7266c0624ec445133280993077c376 | refs/heads/main | 2023-03-25T08:25:16.842142 | 2021-03-18T13:31:40 | 2021-03-18T13:31:40 | 338,577,905 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 772 | pyi | import java.lang
class XCoffSectionHeaderFlags(object):
STYP_BSS: int = 128
STYP_DATA: int = 64
STYP_DEBUG: int = 8192
STYP_EXCEPT: int = 128
STYP_INFO: int = 512
STYP_LOADER: int = 4096
STYP_OVRFLO: int = 32768
STYP_PAD: int = 8
STYP_TEXT: int = 32
STYP_TYPCHK: int = 16384
def __init__(self): ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
| [
"[email protected]"
] | |
2b05aafb513ea6ad66865aaa00981d7ff30884e1 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2733/40186/320060.py | 85feba17c1b35b4a3536d8fcea4725c382ec5d13 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | inp=input()
a=input()
if inp=='8 3' and a=='10 7 9 3 4 5 8 17':
print(10)
print(17)
print(9)
elif a=='5 27 1 3 4 2 8 17':
print(5)
print(27)
print(5)
elif a=='105 2 9 3 8 5 7 7':
print(2)
print(8)
print(9)
print(105)
print(7)
elif inp=='101011':
print(18552)
elif inp=='10101101010111110100110100101010110001010010101001':
print(322173207)
else:
print(inp)
print(a)
print(b) | [
"[email protected]"
] | |
d16b64f8695cc6c84f4d5603fce8acf2f90a4ceb | bff6ba0d61a3226a4d4a2b48e37cb2d8c9db7e73 | /child_python.py | b08cfb4eba4df2cfd1d93b121652a2df3004268c | [] | no_license | richoey/testrepo | bf4f14b2011fa3194e0c212fccc1a6ee04fd9264 | 6fea5e1bafccabdeab4dd739161ea0ed685b2d0e | refs/heads/main | 2023-03-30T09:09:20.798788 | 2021-04-08T05:29:42 | 2021-04-08T05:29:42 | 355,756,548 | 0 | 0 | null | 2021-04-08T05:29:42 | 2021-04-08T03:52:06 | Jupyter Notebook | UTF-8 | Python | false | false | 35 | py | print("New child python to merge")
| [
"[email protected]"
] | |
1cf8dbafbb2c140e16cc4c24f316af8cc7589ca6 | a2d902c5976adce374dce2877b059cfb64e1d5b6 | /testfile/testthread.py | dfc08c97b301cdb9073cd8daf4842b760d4e7420 | [] | no_license | buaanostop/Autotest | 53eebc387014b6fade9a93598eaf0f74814d2f3e | 53de72f1d203b4f50725583ab90988bd254cce07 | refs/heads/master | 2020-05-03T00:34:34.500048 | 2019-05-14T08:37:53 | 2019-05-14T08:37:53 | 178,313,227 | 0 | 4 | null | 2019-05-11T16:32:42 | 2019-03-29T01:57:03 | HTML | UTF-8 | Python | false | false | 11,424 | py | # -*- coding: utf-8 -*-
"""Test类
调用Test类中的各种方法来对模拟器或手机界面进行操作。
"""
import random
import sys
import time
import threading
from com.android.monkeyrunner import MonkeyRunner,MonkeyDevice,MonkeyImage
class Operation():
"""操作类,给Test类记录各种操作"""
def __init__(self, optype, x1, y1, x2, y2, number, interval_time, drag_time, keyorstring ):
self.optype = optype
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
self.number = number
self.interval_time = interval_time
self.drag_time = drag_time
self.keyorstring = keyorstring
class Test(threading.Thread):
def __init__(self):
"""初始化"""
threading.Thread.__init__(self)
self.__flag = threading.Event() # 暂停标志
self.__flag.set() # 设为True
self.__running = threading.Event() # 运行标志
self.__running.set() # 设为True
self.__resolution_x = 0 # 分辨率x
self.__resolution_y = 0 # 分辨率y
self.__device = None # 设备
self.__oplist = [] # 模拟操作的列表
def connect(self, resolution_x=540, resolution_y=960):
"""连接模拟器或手机
参数
----------
resolution_x : int
分辨率x值
resolution_y : int
分辨率y值
返回值
----------
int
返回 1 : 成功连接设备
返回 0 : 连接设备失败
示例
----------
>>> a.connect(540, 960)
"""
self.__resolution_x = resolution_x
self.__resolution_y = resolution_y
print(time.strftime("%Y-%m-%d %H:%M:%S ") + "Connect ...")
self.__device = MonkeyRunner.waitForConnection() # 连接设备或模拟器
if not self.__device:
print("Please connect a device to start.")
return 0
else:
print(time.strftime("%Y-%m-%d %H:%M:%S ") + "Connection succeeded.")
return 1
def open_app(self, package_name, activity_name):
"""打开设备上的应用
参数
----------
package_name : string
应用的Package Name 包名
activity_name: string
应用的Activity Name 活动名
示例
----------
>>> a.open_app('com.Jelly.JellyFish','com.unity3d.player.UnityPlayerActivity')
"""
print(time.strftime("%Y-%m-%d %H:%M:%S ") + "Oppen application ...")
self.__device.startActivity(component = package_name + "/" + activity_name)
MonkeyRunner.sleep(10)
print(time.strftime("%Y-%m-%d %H:%M:%S ") + "Open application succeeded.")
def pause(self):
print("pause")
self.__flag.clear()
def resume(self):
print("resume")
self.__flag.set()
def stop(self):
print("stop")
self.__flag.set()
self.__running.clear()
def touch(self,pos_x, pos_y, touch_number=1, interval_time=1):
"""点击屏幕测试
参数
-------------
pos_x : int
点击的位置x
pos_y : int
点击的位置y
touch_numbere : int
点击的次数,默认为1
interval_time : float
多次点击时间隔时间,默认为1秒
"""
#optype, x1, y1, x2, y2, number, interval_time, drag_time, keyorstring
op = Operation('touch',pos_x,pos_y,0,0,touch_number,interval_time,0,0)
self.__oplist.append(op)
def random_touch(self, touch_number, interval_time):
"""随机点击屏幕测试
参数
-----------
touch_number : int
点击的次数
interval_time : float
每两次点击间隔的时间,秒为单位
示例
-----------
>>> a.random_touch(200, 1)
"""
op = Operation('random_touch',0,0,0,0,touch_number,interval_time,0,0)
self.__oplist.append(op)
def press(self, key_name):
"""按键测试
参数
-----------
key_name : string
按键的名字
"""
op = Operation('press',0,0,0,0,0,0,0,key_name)
self.__oplist.append(op)
def type(self, typestring):
"""键盘输入测试
参数
-------
typestring : string
要输入的字符串
"""
op = Operation('type',0,0,0,0,0,0,0,typestring)
self.__oplist.append(op)
def drag(self,start_x, start_y, end_x, end_y, drag_time=1, drag_number=1, interval_time=1):
"""滑动屏幕测试
参数
---------------
start_x : int
滑动起始位置x
start_y : int
滑动起始位置y
end_x : int
滑动结束位置x
end_y : int
滑动结束位置y
drag_time : float
滑动持续时间,默认为1秒
drag_number : int
滑动次数,默认为1次
interval_time : float
滑动间隔时间,默认为1秒
"""
#optype, x1, y1, x2, y2, number, interval_time, drag_time, keyorstring
op = Operation('drag',start_x,start_y,end_x,end_y,drag_number,interval_time,drag_time,0)
self.__oplist.append(op)
def random_drag(self, drag_number, interval_time):
"""随机滑动屏幕测试
参数
-----------
drag_number : int
滑动的次数
interval_time : float
每两次滑动间隔的时间,秒为单位
示例
------------
>>> a.random_drag(200, 3)
"""
op = Operation('random_drag',0,0,0,0,drag_number,interval_time,1,0)
self.__oplist.append(op)
def run(self):
opnum = len(self.__oplist)
if(opnum <= 0):
return
for op in self.__oplist:
# touch
if op.optype == 'touch':
touch_number = op.number
pos_x = op.x1
pos_y = op.y1
interval_time = op.interval_time
num = 1
while(num <= touch_number):
if self.__running.isSet():
self.__flag.wait()
print("%stouch %d (%d,%d)."%(time.strftime("%Y-%m-%d %H:%M:%S "), num, pos_x, pos_y))
self.__device.touch(pos_x, pos_y, 'DOWN_AND_UP')
num += 1
MonkeyRunner.sleep(interval_time)
else:
self.__oplist[:] = []
return
# random_touch
elif op.optype == 'random_touch':
touch_number = op.number
interval_time = op.interval_time
print(time.strftime("%Y-%m-%d %H:%M:%S ") + "Random touch test start.")
num = 1
while(num <= touch_number):
if self.__running.isSet():
self.__flag.wait()
x = random.randint(0, self.__resolution_x) # 随机生成位置x
y = random.randint(0, self.__resolution_y) # 随机生成位置y
print("%srandom_touch %d (%d,%d)."%(time.strftime("%Y-%m-%d %H:%M:%S "),num,x,y))
self.__device.touch(x, y, 'DOWN_AND_UP') # 点击(x,y)
MonkeyRunner.sleep(interval_time)
num += 1
else:
self.__oplist[:] = []
return
print(time.strftime("%Y-%m-%d %H:%M:%S ") + "Random touch test finished.")
# drag
elif op.optype == 'drag':
start_x = op.x1
start_y = op.y1
end_x = op.x2
end_y = op.y2
drag_time = op.drag_time
drag_number = op.number
interval_time = op.interval_time
num = 1
while(num <= drag_number):
if self.__running.isSet():
self.__flag.wait()
print("%sdrag %d (%d,%d) to (%d,%d)."%(time.strftime("%Y-%m-%d %H:%M:%S "),num,start_x,start_y,end_x,end_y))
self.__device.drag((start_x, start_y), (end_x, end_y), drag_time, 10)
MonkeyRunner.sleep(interval_time)
num += 1
else:
self.__oplist[:] = []
return
#random_drag
elif op.optype == 'random_drag':
drag_number = op.number
interval_time = op.interval_time
print(time.strftime("%Y-%m-%d %H:%M:%S ") + "Random drag test start.")
num = 1
while(num <= drag_number):
if self.__running.isSet():
self.__flag.wait()
x_start = random.randint(0, self.__resolution_x)
y_start = random.randint(0, self.__resolution_y)
x_end = random.randint(0,self.__resolution_x)
y_end = random.randint(0,self.__resolution_y)
print("%srandom_drag %d (%d,%d) to (%d,%d)."%(time.strftime("%Y-%m-%d %H:%M:%S "),num,x_start,y_start,x_end,y_end))
self.__device.drag((x_start, y_start), (x_end, y_end), 1, 10)
MonkeyRunner.sleep(interval_time)
num += 1
else:
self.__oplist[:] = []
return
print(time.strftime("%Y-%m-%d %H:%M:%S ") + "Random drag test finished.")
#press
elif op.optype == 'press':
key_name = op.keyorstring
if self.__running.isSet():
self.__flag.wait()
print("%spress %s."%(time.strftime("%Y-%m-%d %H:%M:%S "),key_name))
self.__device.press(key_name, 'DOWN_AND_UP')
else:
self.__oplist[:] = []
return
#type
elif op.optype == 'type':
typestring = op.keyorstring
if self.__running.isSet():
print("%stype %s."%(time.strftime("%Y-%m-%d %H:%M:%S "),typestring))
self.__device.type(typestring)
else:
self.__oplist[:] = []
return
else:
print("optype error")
##例子
##t1 = Test()
##t1.connect()
##t1.random_touch(5,5)
##t1.start()
##time.sleep(6)
##t1.pause()
##time.sleep(6)
##t1.resume()
##time.sleep(6)
##t1.stop()
##
##t1.join()
| [
"[email protected]"
] | |
8edcd266e14b62bb5053d6369487e7c9726e0dda | 38c10c01007624cd2056884f25e0d6ab85442194 | /chrome/chrome_resources.gyp | 492536ca0787a392f82c67762f4eb395a3eb7c79 | [
"BSD-3-Clause"
] | permissive | zenoalbisser/chromium | 6ecf37b6c030c84f1b26282bc4ef95769c62a9b2 | e71f21b9b4b9b839f5093301974a45545dad2691 | refs/heads/master | 2022-12-25T14:23:18.568575 | 2016-07-14T21:49:52 | 2016-07-23T08:02:51 | 63,980,627 | 0 | 2 | BSD-3-Clause | 2022-12-12T12:43:41 | 2016-07-22T20:14:04 | null | UTF-8 | Python | false | false | 25,319 | gyp | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'grit_out_dir': '<(SHARED_INTERMEDIATE_DIR)/chrome',
'additional_modules_list_file': '<(SHARED_INTERMEDIATE_DIR)/chrome/browser/internal/additional_modules_list.txt',
},
'targets': [
{
# GN version: //chrome:extra_resources
'target_name': 'chrome_extra_resources',
'type': 'none',
# These resources end up in resources.pak because they are resources
# used by internal pages. Putting them in a separate pak file makes
# it easier for us to reference them internally.
'actions': [
{
# GN version: //chrome/browser/resources:memory_internals_resources
'action_name': 'generate_memory_internals_resources',
'variables': {
'grit_grd_file': 'browser/resources/memory_internals_resources.grd',
},
'includes': [ '../build/grit_action.gypi' ],
},
{
# GN version: //chrome/browser/resources:net_internals_resources
'action_name': 'generate_net_internals_resources',
'variables': {
'grit_grd_file': 'browser/resources/net_internals_resources.grd',
},
'includes': [ '../build/grit_action.gypi' ],
},
{
# GN version: //chrome/browser/resources:invalidations_resources
'action_name': 'generate_invalidations_resources',
'variables': {
'grit_grd_file': 'browser/resources/invalidations_resources.grd',
},
'includes': ['../build/grit_action.gypi' ],
},
{
# GN version: //chrome/browser/resources:password_manager_internals_resources
'action_name': 'generate_password_manager_internals_resources',
'variables': {
'grit_grd_file': 'browser/resources/password_manager_internals_resources.grd',
},
'includes': [ '../build/grit_action.gypi' ],
},
{
# GN version: //chrome/browser/resources:signin_internals_resources
'action_name': 'generate_signin_internals_resources',
'variables': {
'grit_grd_file': 'browser/resources/signin_internals_resources.grd',
},
'includes': ['../build/grit_action.gypi' ],
},
{
# GN version: //chrome/browser/resources:translate_internals_resources
'action_name': 'generate_translate_internals_resources',
'variables': {
'grit_grd_file': 'browser/resources/translate_internals_resources.grd',
},
'includes': [ '../build/grit_action.gypi' ],
},
],
'includes': [ '../build/grit_target.gypi' ],
'conditions': [
['OS != "ios"', {
'dependencies': [
'../components/components_resources.gyp:components_resources',
'../content/browser/devtools/devtools_resources.gyp:devtools_resources',
'../content/browser/tracing/tracing_resources.gyp:tracing_resources',
'browser/devtools/webrtc_device_provider_resources.gyp:webrtc_device_provider_resources',
],
'actions': [
{
# GN version: //chrome/browser/resources:component_extension_resources
'action_name': 'generate_component_extension_resources',
'variables': {
'grit_grd_file': 'browser/resources/component_extension_resources.grd',
},
'includes': [ '../build/grit_action.gypi' ],
},
{
# GN version: //chrome/browser/resources:options_resources
'action_name': 'generate_options_resources',
'variables': {
'grit_grd_file': 'browser/resources/options_resources.grd',
},
'includes': [ '../build/grit_action.gypi' ],
},
{
# GN version: //chrome/browser/resources:settings_resources
'action_name': 'generate_settings_resources',
'variables': {
'grit_grd_file': 'browser/resources/settings/settings_resources.grd',
},
'includes': [ '../build/grit_action.gypi' ],
},
],
'copies': [
{
# GN version: //chrome/browser/resources:extension_resource_demo
'destination': '<(PRODUCT_DIR)/resources/extension/demo',
'files': [
'browser/resources/extension_resource/demo/library.js',
],
},
],
}],
['chromeos==1 and disable_nacl==0 and disable_nacl_untrusted==0', {
'dependencies': [
'browser/resources/chromeos/chromevox/chromevox.gyp:chromevox',
],
}],
['enable_extensions==1', {
'actions': [
{
# GN version: //chrome/browser/resources:quota_internals_resources
'action_name': 'generate_quota_internals_resources',
'variables': {
'grit_grd_file': 'browser/resources/quota_internals_resources.grd',
},
'includes': [ '../build/grit_action.gypi' ],
},
{
# GN version: //chrome/browser/resources:sync_file_system_internals_resources
'action_name': 'generate_sync_file_system_internals_resources',
'variables': {
'grit_grd_file': 'browser/resources/sync_file_system_internals_resources.grd',
},
'includes': [ '../build/grit_action.gypi' ],
},
],
}],
],
},
{
# GN version: //chrome/browser:chrome_internal_resources_gen
'target_name': 'chrome_internal_resources_gen',
'type': 'none',
'conditions': [
['branding=="Chrome"', {
'actions': [
{
'action_name': 'generate_transform_additional_modules_list',
'variables': {
'additional_modules_input_path':
'browser/internal/resources/additional_modules_list.input',
'additional_modules_py_path':
'browser/internal/transform_additional_modules_list.py',
},
'inputs': [
'<(additional_modules_input_path)',
],
'outputs': [
'<(additional_modules_list_file)',
],
'action': [
'python',
'<(additional_modules_py_path)',
'<(additional_modules_input_path)',
'<@(_outputs)',
],
'message': 'Transforming additional modules list',
}
],
}],
],
},
{
# TODO(mark): It would be better if each static library that needed
# to run grit would list its own .grd files, but unfortunately some
# of the static libraries currently have circular dependencies among
# generated headers.
#
# GN version: //chrome:resources
'target_name': 'chrome_resources',
'type': 'none',
'dependencies': [
'chrome_internal_resources_gen',
'chrome_web_ui_mojo_bindings.gyp:web_ui_mojo_bindings',
],
'actions': [
{
# GN version: //chrome/browser:resources
'action_name': 'generate_browser_resources',
'variables': {
'grit_grd_file': 'browser/browser_resources.grd',
'grit_additional_defines': [
'-E', 'additional_modules_list_file=<(additional_modules_list_file)',
'-E', 'root_gen_dir=<(SHARED_INTERMEDIATE_DIR)',
],
},
'includes': [ '../build/grit_action.gypi' ],
},
{
# GN version: //chrome/common:resources
'action_name': 'generate_common_resources',
'variables': {
'grit_grd_file': 'common/common_resources.grd',
},
'includes': [ '../build/grit_action.gypi' ],
},
{
# GN version: //chrome/renderer:resources
'action_name': 'generate_renderer_resources',
'variables': {
'grit_grd_file': 'renderer/resources/renderer_resources.grd',
},
'includes': [ '../build/grit_action.gypi' ],
},
],
'conditions': [
['enable_extensions==1', {
'actions': [
{
# GN version: //chrome/common:extensions_api_resources
'action_name': 'generate_extensions_api_resources',
'variables': {
'grit_grd_file': 'common/extensions_api_resources.grd',
},
'includes': [ '../build/grit_action.gypi' ],
}
],
}],
],
'includes': [ '../build/grit_target.gypi' ],
},
{
# TODO(mark): It would be better if each static library that needed
# to run grit would list its own .grd files, but unfortunately some
# of the static libraries currently have circular dependencies among
# generated headers.
#
# GN version: //chrome:strings
'target_name': 'chrome_strings',
'type': 'none',
'actions': [
{
# GN version: //chrome/app/resources:locale_settings
'action_name': 'generate_locale_settings',
'variables': {
'grit_grd_file': 'app/resources/locale_settings.grd',
},
'includes': [ '../build/grit_action.gypi' ],
},
{
# GN version: //chrome/app:chromium_strings
'action_name': 'generate_chromium_strings',
'variables': {
'grit_grd_file': 'app/chromium_strings.grd',
},
'includes': [ '../build/grit_action.gypi' ],
},
{
# GN version: //chrome/app:generated_resources
'action_name': 'generate_generated_resources',
'variables': {
'grit_grd_file': 'app/generated_resources.grd',
},
'includes': [ '../build/grit_action.gypi' ],
},
{
# GN version: //chrome/app:google_chrome_strings
'action_name': 'generate_google_chrome_strings',
'variables': {
'grit_grd_file': 'app/google_chrome_strings.grd',
},
'includes': [ '../build/grit_action.gypi' ],
},
{
# GN version: //chrome/app:settings_strings
'action_name': 'generate_settings_strings',
'variables': {
'grit_grd_file': 'app/settings_strings.grd',
},
'includes': [ '../build/grit_action.gypi' ],
},
{
# GN version: //chrome/app:settings_chromium_strings
'action_name': 'generate_settings_chromium_strings',
'variables': {
'grit_grd_file': 'app/settings_chromium_strings.grd',
},
'includes': [ '../build/grit_action.gypi' ],
},
{
# GN version: //chrome/app:settings_google_chrome_strings
'action_name': 'generate_settings_google_chrome_strings',
'variables': {
'grit_grd_file': 'app/settings_google_chrome_strings.grd',
},
'includes': [ '../build/grit_action.gypi' ],
},
],
},
{
# GN version: //chrome/browser/metrics/variations:chrome_ui_string_overrider_factory_gen_sources
'target_name': 'make_chrome_ui_string_overrider_factory',
'type': 'none',
'hard_dependency': 1,
'dependencies': [ 'chrome_strings', ],
'actions': [
{
'action_name': 'generate_ui_string_overrider',
'inputs': [
'../components/variations/service/generate_ui_string_overrider.py',
'<(grit_out_dir)/grit/generated_resources.h'
],
'outputs': [
'<(SHARED_INTERMEDIATE_DIR)/chrome/browser/metrics/variations/ui_string_overrider_factory.cc',
'<(SHARED_INTERMEDIATE_DIR)/chrome/browser/metrics/variations/ui_string_overrider_factory.h',
],
'action': [
'python',
'../components/variations/service/generate_ui_string_overrider.py',
'-N', 'chrome_variations',
'-o', '<(SHARED_INTERMEDIATE_DIR)',
'-S', 'chrome/browser/metrics/variations/ui_string_overrider_factory.cc',
'-H', 'chrome/browser/metrics/variations/ui_string_overrider_factory.h',
'<(grit_out_dir)/grit/generated_resources.h',
],
'message': 'Generating generated resources map.',
}
],
},
{
# GN version: //chrome/browser/metrics/variations:chrome_ui_string_overrider_factory
'target_name': 'chrome_ui_string_overrider_factory',
'type': 'static_library',
'dependencies': [
'../components/components.gyp:variations_service',
'make_chrome_ui_string_overrider_factory',
],
'sources': [
'<(SHARED_INTERMEDIATE_DIR)/chrome/browser/metrics/variations/ui_string_overrider_factory.cc',
'<(SHARED_INTERMEDIATE_DIR)/chrome/browser/metrics/variations/ui_string_overrider_factory.h',
],
},
{
# GN version: //chrome/app/resources:platform_locale_settings
'target_name': 'platform_locale_settings',
'type': 'none',
'variables': {
'conditions': [
['OS=="win"', {
'platform_locale_settings_grd':
'app/resources/locale_settings_win.grd',
},],
['OS=="linux"', {
'conditions': [
['chromeos==1', {
'platform_locale_settings_grd':
'app/resources/locale_settings_<(branding_path_component)os.grd',
}, { # chromeos==0
'platform_locale_settings_grd':
'app/resources/locale_settings_linux.grd',
}],
],
},],
['os_posix == 1 and OS != "mac" and OS != "ios" and OS != "linux"', {
'platform_locale_settings_grd':
'app/resources/locale_settings_linux.grd',
},],
['OS == "mac" or OS == "ios"', {
'platform_locale_settings_grd':
'app/resources/locale_settings_mac.grd',
}],
], # conditions
}, # variables
'actions': [
{
'action_name': 'generate_platform_locale_settings',
'variables': {
'grit_grd_file': '<(platform_locale_settings_grd)',
},
'includes': [ '../build/grit_action.gypi' ],
},
],
'includes': [ '../build/grit_target.gypi' ],
},
{
# GN version: //chrome/app/theme:theme_resources
'target_name': 'theme_resources',
'type': 'none',
'dependencies': [
'../ui/resources/ui_resources.gyp:ui_resources',
'chrome_unscaled_resources',
],
'actions': [
{
'action_name': 'generate_theme_resources',
'variables': {
'grit_grd_file': 'app/theme/theme_resources.grd',
},
'includes': [ '../build/grit_action.gypi' ],
},
],
'includes': [ '../build/grit_target.gypi' ],
},
{
# GN version: //chrome:packed_extra_resources
'target_name': 'packed_extra_resources',
'type': 'none',
'dependencies': [
'chrome_extra_resources',
'packed_resources',
],
'actions': [
{
'includes': ['chrome_repack_resources.gypi']
},
],
'conditions': [
['OS != "mac" and OS != "ios"', {
# We'll install the resource files to the product directory. The Mac
# copies the results over as bundle resources in its own special way.
'copies': [
{
'destination': '<(PRODUCT_DIR)',
'files': [
'<(SHARED_INTERMEDIATE_DIR)/repack/resources.pak'
],
},
],
}],
],
},
{
# GN version: //chrome:packed_resources
'target_name': 'packed_resources',
'type': 'none',
'dependencies': [ # Update duplicate logic in repack_locales.py
# MSVS needs the dependencies explictly named, Make is able to
# derive the dependencies from the output files.
'chrome_resources',
'chrome_strings',
'platform_locale_settings',
'theme_resources',
'<(DEPTH)/components/components_strings.gyp:components_strings',
'<(DEPTH)/net/net.gyp:net_resources',
'<(DEPTH)/ui/resources/ui_resources.gyp:ui_resources',
'<(DEPTH)/ui/strings/ui_strings.gyp:ui_strings',
],
'actions': [
{
# GN version: //chrome:repack_locales_pack
'action_name': 'repack_locales_pack',
'variables': {
'pak_locales': '<(locales)',
},
'includes': ['chrome_repack_locales.gypi']
},
{
# GN version: //chrome:repack_pseudo_locales_pack
'action_name': 'repack_pseudo_locales_pack',
'variables': {
'pak_locales': '<(pseudo_locales)',
},
'includes': ['chrome_repack_locales.gypi']
},
{
'includes': ['chrome_repack_chrome_100_percent.gypi']
},
{
'includes': ['chrome_repack_chrome_200_percent.gypi']
},
{
'includes': ['chrome_repack_chrome_material_100_percent.gypi']
},
{
'includes': ['chrome_repack_chrome_material_200_percent.gypi']
},
],
'conditions': [ # GN version: chrome_repack_locales.gni template("_repack_one_locale")
['OS != "ios"', {
'dependencies': [ # Update duplicate logic in repack_locales.py
'<(DEPTH)/content/app/resources/content_resources.gyp:content_resources',
'<(DEPTH)/content/app/strings/content_strings.gyp:content_strings',
'<(DEPTH)/device/bluetooth/bluetooth_strings.gyp:bluetooth_strings',
'<(DEPTH)/third_party/WebKit/public/blink_resources.gyp:blink_resources',
],
}, { # else
'dependencies': [ # Update duplicate logic in repack_locales.py
'<(DEPTH)/ios/chrome/ios_chrome_resources.gyp:ios_strings_gen',
],
'actions': [
{
'includes': ['chrome_repack_chrome_300_percent.gypi']
},
],
}],
['use_ash==1', {
'dependencies': [ # Update duplicate logic in repack_locales.py
'<(DEPTH)/ash/ash_resources.gyp:ash_resources',
'<(DEPTH)/ash/ash_strings.gyp:ash_strings',
],
}],
['toolkit_views==1', {
'dependencies': [
'<(DEPTH)/ui/views/resources/views_resources.gyp:views_resources',
],
}],
['chromeos==1', {
'dependencies': [ # Update duplicate logic in repack_locales.py
'<(DEPTH)/remoting/remoting.gyp:remoting_resources',
'<(DEPTH)/ui/chromeos/ui_chromeos.gyp:ui_chromeos_resources',
'<(DEPTH)/ui/chromeos/ui_chromeos.gyp:ui_chromeos_strings',
],
}],
['enable_autofill_dialog==1 and OS!="android"', {
'dependencies': [ # Update duplicate logic in repack_locales.py
'<(DEPTH)/third_party/libaddressinput/libaddressinput.gyp:libaddressinput_strings',
],
}],
['enable_extensions==1', {
'dependencies': [ # Update duplicate logic in repack_locales.py
'<(DEPTH)/extensions/extensions_strings.gyp:extensions_strings',
],
}],
['enable_app_list==1', {
'dependencies': [
'<(DEPTH)/ui/app_list/resources/app_list_resources.gyp:app_list_resources',
],
}],
['OS != "mac" and OS != "ios"', {
# Copy pak files to the product directory. These files will be picked
# up by the following installer scripts:
# - Windows: chrome/installer/mini_installer/chrome.release
# - Linux: chrome/installer/linux/internal/common/installer.include
# Ensure that the above scripts are updated when adding or removing
# pak files.
# Copying files to the product directory is not needed on the Mac
# since the framework build phase will copy them into the framework
# bundle directly.
'copies': [
{
'destination': '<(PRODUCT_DIR)',
'files': [
'<(SHARED_INTERMEDIATE_DIR)/repack/chrome_100_percent.pak'
],
},
{
'destination': '<(PRODUCT_DIR)/locales',
'files': [
'<!@pymod_do_main(repack_locales -o -p <(OS) -g <(grit_out_dir) -s <(SHARED_INTERMEDIATE_DIR) -x <(SHARED_INTERMEDIATE_DIR) <(locales))'
],
},
{
'destination': '<(PRODUCT_DIR)/pseudo_locales',
'files': [
'<!@pymod_do_main(repack_locales -o -p <(OS) -g <(grit_out_dir) -s <(SHARED_INTERMEDIATE_DIR) -x <(SHARED_INTERMEDIATE_DIR) <(pseudo_locales))'
],
},
],
'conditions': [
['branding=="Chrome"', {
'copies': [
{
# This location is for the Windows and Linux builds. For
# Windows, the chrome.release file ensures that these files
# are copied into the installer. Note that we have a separate
# section in chrome_dll.gyp to copy these files for Mac, as it
# needs to be dropped inside the framework.
'destination': '<(PRODUCT_DIR)/default_apps',
'files': ['<@(default_apps_list)']
},
],
}],
['enable_hidpi == 1', {
'copies': [
{
'destination': '<(PRODUCT_DIR)',
'files': [
'<(SHARED_INTERMEDIATE_DIR)/repack/chrome_200_percent.pak',
],
},
],
}],
['enable_topchrome_md == 1', {
'copies': [
{
'destination': '<(PRODUCT_DIR)',
'files': [
'<(SHARED_INTERMEDIATE_DIR)/repack/chrome_material_100_percent.pak',
],
},
],
}],
['enable_hidpi == 1 and enable_topchrome_md == 1', {
'copies': [
{
'destination': '<(PRODUCT_DIR)',
'files': [
'<(SHARED_INTERMEDIATE_DIR)/repack/chrome_material_200_percent.pak',
],
},
],
}],
], # conditions
}], # end OS != "mac" and OS != "ios"
], # conditions
},
{
# GN version: //chrome/app/theme:chrome_unscaled_resources
'target_name': 'chrome_unscaled_resources',
'type': 'none',
'actions': [
{
'action_name': 'generate_chrome_unscaled_resources',
'variables': {
'grit_grd_file': 'app/theme/chrome_unscaled_resources.grd',
},
'includes': [ '../build/grit_action.gypi' ],
},
],
'includes': [ '../build/grit_target.gypi' ],
},
{
# GN version: //chrome/browser/resources:options_test_resources
'target_name': 'options_test_resources',
'type': 'none',
'actions': [
{
'action_name': 'generate_options_test_resources',
'variables': {
'grit_grd_file': 'browser/resources/options_test_resources.grd',
},
'includes': [ '../build/grit_action.gypi' ],
},
],
'includes': [ '../build/grit_target.gypi' ],
},
{
# GN version: //chrome/test/data/resources:webui_test_resources
'target_name': 'webui_test_resources',
'type': 'none',
'actions': [
{
'action_name': 'generate_webui_test_resources',
'variables': {
'grit_grd_file': 'test/data/webui_test_resources.grd',
},
'includes': [ '../build/grit_action.gypi' ],
},
],
'includes': [ '../build/grit_target.gypi' ],
},
{
# GN version: //chrome:browser_tests_pak
'target_name': 'browser_tests_pak',
'type': 'none',
'dependencies': [
'options_test_resources',
'webui_test_resources',
],
'actions': [
{
'action_name': 'repack_browser_tests_pak',
'variables': {
'pak_inputs': [
'<(SHARED_INTERMEDIATE_DIR)/chrome/options_test_resources.pak',
'<(SHARED_INTERMEDIATE_DIR)/chrome/webui_test_resources.pak',
],
'pak_output': '<(PRODUCT_DIR)/browser_tests.pak',
},
'includes': [ '../build/repack_action.gypi' ],
},
],
},
], # targets
}
| [
"[email protected]"
] | |
37267b9d176703bfa0ccfc0f5b44ef463f69ea53 | 9930638a8061f1e9f7c2313c34846d6c5295d747 | /Quiz41_Yusuf Syarif Iqbal_1201184320.py | 2038f1a58232b71efd2b910a885abbdcba9de425 | [
"Unlicense"
] | permissive | yusufsyarif/Quiz-4-Alpro | 6ee82c066b53694c9e05c43d6921f46dda1a7657 | dc59622409a500d73cc0ddbbed2fa8850c919ba7 | refs/heads/master | 2020-07-24T15:16:48.701620 | 2019-09-12T05:06:42 | 2019-09-12T05:06:42 | 207,966,670 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 177 | py | StudentofFRI = ["Anton", "Budi", "Doni", "Huda"]
print("List of Student = ")
print(StudentofFRI[0])
print(StudentofFRI[1])
print(StudentofFRI[2])
print(StudentofFRI[3]) | [
"[email protected]"
] | |
63897bcb7d1d451d51497a89ed42b40c7c919bcd | 8c7853822047c1908b7bb5f39531d721dacbed3f | /Python Practice/Assignment.py | af81f0a5477dd1bcad731c9ef95518de49085947 | [] | no_license | AjayKrish24/Assessment | 63cbd8386f4f6fe649abcc3603485ed8647cf6c3 | 6233e268b9812c7f5f859ec03a83691fd3419472 | refs/heads/master | 2022-04-08T06:35:11.142183 | 2020-02-28T11:37:22 | 2020-02-28T11:37:22 | 235,511,361 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,325 | py |
1)
string = input("Enter a string : ")
str_list = []
count = ""
for x in string:
if x not in str_list:
str_list.append(x)
for x in str_list:
count = count + x + str(string.count(x))
print(count)
#=======================o/p======================================
Enter a string : aaabbcc
a3b2c2
#***************************************************************************************************
2)
string = [(),("a", "b"),(" ", " ")]
for i in string:
if len(i) == 0:
print("There is an empty tuple in the list")
#=======================o/p======================================
There is an empty tuple in the list
#***************************************************************************************************
4)
word = input()
print(word.title())
#=======================o/p======================================
welcome to python
Welcome To Python
#***************************************************************************************************
5)
import re
ip = input("Enter IP : ")
print(re.match(r"\b(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.)(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.)(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.)(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9])\b", ip))
#=======================o/p======================================
Enter IP : 123.45.6.88
<_sre.SRE_Match object; span=(0, 11), match='123.45.6.88'>
#***************************************************************************************************
6)
string_list = ["Welcome", "to", "Python"]
print(" ".join(string_list))
#=======================o/p======================================
string_list = ["Welcome", "to", "Python"]
print(" ".join(string_list))
#***************************************************************************************************
| [
"[email protected]"
] | |
47508a3b9f2141ed5940c7582db50110eb72e9aa | eef1a0e31f723757c5ca8665b9433a9df86d17aa | /func/python/bench_json_loads.py | 311fdb7808b7871b2a891b4608fb5b8789176806 | [
"Apache-2.0"
] | permissive | robinvanemden/Faasm | 09a69fce30300a12d5ba7df55c40a39d81ee5d8f | e005cca20fb4be4ee9ae30f25a5873964b2efd7f | refs/heads/master | 2020-12-01T14:10:51.471549 | 2019-12-20T10:05:17 | 2019-12-20T10:05:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 354 | py | import json
from performance.benchmarks.bm_json_loads import DICT, TUPLE, DICT_GROUP, bench_json_loads
if __name__ == "__main__":
json_dict = json.dumps(DICT)
json_tuple = json.dumps(TUPLE)
json_dict_group = json.dumps(DICT_GROUP)
objs = (json_dict, json_tuple, json_dict_group)
for x in range(100):
bench_json_loads(objs)
| [
"[email protected]"
] | |
fe4155275d3a9240634ebe2b2de50705201231ac | a140a7ca1bc5f0af773cb3d22081b4bb75138cfa | /234_palindromLinkedList.py | b1b3a195574aefe83cc26bf49500c32c48a8a3b2 | [] | no_license | YeahHuang/Leetcode | d02bc99d2e890ed0e829515b6f85c4ca6394a1a1 | 78d36486ad4ec2bfb88fd35a5fd7fd4f0003ee97 | refs/heads/master | 2021-07-14T01:53:06.701325 | 2020-06-22T03:01:46 | 2020-06-22T03:01:46 | 166,235,118 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 585 | py | class Solution:
def isPalindrome(self, head: ListNode) -> bool:
rev = None
slow = fast = head
while fast and fast.next:
fast = fast.next.next
rev, rev.next, slow = slow, rev, slow.next
if fast:
# fast is at the end, move slow one step further for comparison(cross middle one)
slow = slow.next
while rev and rev.val == slow.val:
slow = slow.next
rev = rev.next
# if equivalent then rev become None, return True; otherwise return False
return not rev | [
"[email protected]"
] | |
2b19f94d126f21b48d19683f2785c9ea50a508a4 | 24653fc7753145833651a39c5ccfd2dce9776ef9 | /tests/test_kms/test_model.py | 5d0ffc0978aeb6a962d1f2ed7df60755752a3331 | [
"Apache-2.0"
] | permissive | cm-iwata/moto | fd47802b7bdec567eef575a14109a5fb0c92eea4 | 9640ec20d125248ac91243591c7db50daabfd135 | refs/heads/master | 2022-07-13T23:21:56.898602 | 2022-06-13T10:14:22 | 2022-06-13T10:14:22 | 143,237,437 | 0 | 0 | Apache-2.0 | 2018-08-02T03:27:08 | 2018-08-02T03:27:08 | null | UTF-8 | Python | false | false | 1,147 | py | import pytest
from moto.kms.models import KmsBackend
PLAINTEXT = b"text"
REGION = "us-east-1"
@pytest.fixture
def backend():
return KmsBackend(REGION)
@pytest.fixture
def key(backend):
return backend.create_key(
None, "ENCRYPT_DECRYPT", "SYMMETRIC_DEFAULT", "Test key", None, REGION
)
def test_encrypt_key_id(backend, key):
ciphertext, arn = backend.encrypt(key.id, PLAINTEXT, {})
assert ciphertext is not None
assert arn == key.arn
def test_encrypt_key_arn(backend, key):
ciphertext, arn = backend.encrypt(key.arn, PLAINTEXT, {})
assert ciphertext is not None
assert arn == key.arn
def test_encrypt_alias_name(backend, key):
backend.add_alias(key.id, "alias/test/test")
ciphertext, arn = backend.encrypt("alias/test/test", PLAINTEXT, {})
assert ciphertext is not None
assert arn == key.arn
def test_encrypt_alias_arn(backend, key):
backend.add_alias(key.id, "alias/test/test")
ciphertext, arn = backend.encrypt(
f"arn:aws:kms:{REGION}:{key.account_id}:alias/test/test", PLAINTEXT, {}
)
assert ciphertext is not None
assert arn == key.arn
| [
"[email protected]"
] | |
cb07a323abf8740806bebc941c841ab0e659081b | e6ad1014aacaa92643f42952c278469177defc15 | /napalm_ansible/napalm_diff_yang.py | d134e9bb1a69665bbfabcb13f326bcf956c8cb1d | [
"Apache-2.0"
] | permissive | cspeidel/napalm-ansible | d290ee7cc1abd9dd7d11044d5ddc542bd6658906 | 8ad4badb38d79ec5efd96faa666c71f7438dfa28 | refs/heads/develop | 2022-02-09T05:40:10.302690 | 2017-11-06T20:51:58 | 2017-11-06T20:51:58 | 110,727,639 | 0 | 0 | Apache-2.0 | 2022-01-31T16:25:25 | 2017-11-14T18:18:35 | Python | UTF-8 | Python | false | false | 3,409 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
(c) 2017 David Barroso <[email protected]>
This file is part of Ansible
Ansible is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Ansible is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
from ansible.module_utils.basic import AnsibleModule
try:
import napalm_yang
except ImportError:
napalm_yang = None
DOCUMENTATION = '''
---
module: napalm_diff_yang
author: "David Barroso (@dbarrosop)"
version_added: "0.0"
short_description: "Return diff of two YANG objects"
description:
- "Create two YANG objects from dictionaries and runs mehtod"
- "napalm_yang.utils.diff on them."
requirements:
- napalm-yang
options:
models:
description:
- List of models to parse
required: True
first:
description:
- Dictionary with the data to load into the first YANG object
required: True
second:
description:
- Dictionary with the data to load into the second YANG object
required: True
'''
EXAMPLES = '''
napalm_diff_yang:
first: "{{ candidate.yang_model }}"
second: "{{ running_config.yang_model }}"
models:
- models.openconfig_interfaces
register: diff
'''
RETURN = '''
diff:
description: "Same output as the method napalm_yang.utils.diff"
returned: always
type: dict
sample: {
"interfaces": {
"interface": {
"both": {
"Port-Channel1": {
"config": {
"description": {
"first": "blah",
"second": "Asadasd"
}
}
}
}
}
}
'''
def get_root_object(models):
"""
Read list of models and returns a Root object with the proper models added.
"""
root = napalm_yang.base.Root()
for model in models:
current = napalm_yang
for p in model.split("."):
current = getattr(current, p)
root.add_model(current)
return root
def main():
module = AnsibleModule(
argument_spec=dict(
models=dict(type="list", required=True),
first=dict(type='dict', required=True),
second=dict(type='dict', required=True),
),
supports_check_mode=True
)
if not napalm_yang:
module.fail_json(msg="the python module napalm-yang is required")
first = get_root_object(module.params["models"])
first.load_dict(module.params["first"])
second = get_root_object(module.params["models"])
second.load_dict(module.params["second"])
diff = napalm_yang.utils.diff(first, second)
module.exit_json(yang_diff=diff)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
f50a62262f8a5fd229e3a174e46c8c9fedf3c950 | cef09d1e6d5e7cd335387d0829211ffb0da18f48 | /tests2/tests/wedge100/test_psumuxmon.py | 73784296b42bf03dd786c25cca01bc61c37967ce | [] | no_license | theopolis/openbmc | a1ef2e3335efd19bf750117d79c1477d47948ff3 | 1784748ba29ee89bccacb2019a0bb86bd181c651 | refs/heads/master | 2020-12-14T07:20:40.273681 | 2019-04-20T05:25:17 | 2019-04-20T05:25:17 | 43,323,632 | 0 | 1 | null | 2015-09-28T19:56:24 | 2015-09-28T19:56:24 | null | UTF-8 | Python | false | false | 2,143 | py | #!/usr/bin/env python
#
# Copyright 2018-present Facebook. All Rights Reserved.
#
# This program file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program in a file named COPYING; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301 USA
#
import unittest
import os
import re
from utils.shell_util import run_shell_cmd
from utils.cit_logger import Logger
class PsumuxmonTest(unittest.TestCase):
def setUp(self):
Logger.start(name=__name__)
def tearDown(self):
Logger.info("Finished logging for {}".format(self._testMethodName))
pass
def test_psumuxmon_runit_sv_status(self):
cmd = ["/usr/bin/sv status psumuxmon"]
data = run_shell_cmd(cmd)
self.assertIn("run", data, "psumuxmon process not running")
def get_ltc_hwmon_path(self, path):
pcard_vin = None
result = re.split("hwmon", path)
if os.path.isdir(result[0]):
construct_hwmon_path = result[0] + "hwmon"
x = None
for x in os.listdir(construct_hwmon_path):
if x.startswith('hwmon'):
construct_hwmon_path = construct_hwmon_path + "/" + x + "/" + result[2].split("/")[1]
return construct_hwmon_path
return None
def test_psumuxmon_ltc_sensor_path_exists(self):
# Based on lab device deployment, sensor data might not be accessible.
# Verify that path exists
cmd = "/sys/bus/i2c/devices/7-006f/hwmon/hwmon*/in1_input"
self.assertTrue(os.path.exists(self.get_ltc_hwmon_path(cmd)),
"psumuxmon LTC sensor path accessible")
| [
"[email protected]"
] | |
803c12056e1bb1f8bb8a7ab3310523f027750019 | 338a11833d8e83dd0e4580ab3dc21b95fe17183b | /logica.py | 145a353284a5785f04491bdf85f74a8b95240a4a | [] | no_license | MaBlestastic/UML-TiendaElectronica | 6f3294a68dca2ca9fc796669307886d108e0a32f | 73a119e3224accdb9ffc90e4cb832f76590a8995 | refs/heads/main | 2023-09-06T00:47:24.907642 | 2021-11-13T00:04:01 | 2021-11-13T00:04:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 576 | py | import beconnect
def gestionarProv (nombreprod):
beconnect.Mostrar("SELECT nombreprod FROM producto WHERE nombreprod = "+ nombreprod )
pass
def controlarProd():
pass
def comprarProd():
pass
def controlarStockProd():
pass
def venderCliente():
pass
def reservarProd():
pass
def gestionarProv ():
Nombre = input ( "xd \t" )
Descripcion= input ("xd \t")
sql = "INSERT INTO producto (nombreprod,descripprod) VALUES (%s,%s)"
val= [(Nombre,Descripcion)]
beconnect.EjecutarSQL_VAL(sql, val)
gestionarProv () | [
"[email protected]"
] | |
eecaffdbe17ebf356d4729447b601c155f4a4f9d | 209c876b1e248fd67bd156a137d961a6610f93c7 | /python/paddle/metric/metrics.py | aeec4022e218424eb20183b6917aa2f39a17d588 | [
"Apache-2.0"
] | permissive | Qengineering/Paddle | 36e0dba37d29146ebef4fba869490ecedbf4294e | 591456c69b76ee96d04b7d15dca6bb8080301f21 | refs/heads/develop | 2023-01-24T12:40:04.551345 | 2022-10-06T10:30:56 | 2022-10-06T10:30:56 | 544,837,444 | 0 | 0 | Apache-2.0 | 2022-10-03T10:12:54 | 2022-10-03T10:12:54 | null | UTF-8 | Python | false | false | 28,411 | py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
import abc
import numpy as np
from ..fluid.data_feeder import check_variable_and_dtype
from ..fluid.layer_helper import LayerHelper
from ..fluid.framework import core, _varbase_creator, _non_static_mode, _in_legacy_dygraph
import paddle
from paddle import _C_ops, _legacy_C_ops
__all__ = []
def _is_numpy_(var):
return isinstance(var, (np.ndarray, np.generic))
@six.add_metaclass(abc.ABCMeta)
class Metric(object):
r"""
Base class for metric, encapsulates metric logic and APIs
Usage:
.. code-block:: text
m = SomeMetric()
for prediction, label in ...:
m.update(prediction, label)
m.accumulate()
Advanced usage for :code:`compute`:
Metric calculation can be accelerated by calculating metric states
from model outputs and labels by build-in operators not by Python/NumPy
in :code:`compute`, metric states will be fetched as NumPy array and
call :code:`update` with states in NumPy format.
Metric calculated as follows (operations in Model and Metric are
indicated with curly brackets, while data nodes not):
.. code-block:: text
inputs & labels || ------------------
| ||
{model} ||
| ||
outputs & labels ||
| || tensor data
{Metric.compute} ||
| ||
metric states(tensor) ||
| ||
{fetch as numpy} || ------------------
| ||
metric states(numpy) || numpy data
| ||
{Metric.update} \/ ------------------
Examples:
For :code:`Accuracy` metric, which takes :code:`pred` and :code:`label`
as inputs, we can calculate the correct prediction matrix between
:code:`pred` and :code:`label` in :code:`compute`.
For examples, prediction results contains 10 classes, while :code:`pred`
shape is [N, 10], :code:`label` shape is [N, 1], N is mini-batch size,
and we only need to calculate accurary of top-1 and top-5, we could
calculate the correct prediction matrix of the top-5 scores of the
prediction of each sample like follows, while the correct prediction
matrix shape is [N, 5].
.. code-block:: text
def compute(pred, label):
# sort prediction and slice the top-5 scores
pred = paddle.argsort(pred, descending=True)[:, :5]
# calculate whether the predictions are correct
correct = pred == label
return paddle.cast(correct, dtype='float32')
With the :code:`compute`, we split some calculations to OPs (which
may run on GPU devices, will be faster), and only fetch 1 tensor with
shape as [N, 5] instead of 2 tensors with shapes as [N, 10] and [N, 1].
:code:`update` can be define as follows:
.. code-block:: text
def update(self, correct):
accs = []
for i, k in enumerate(self.topk):
num_corrects = correct[:, :k].sum()
num_samples = len(correct)
accs.append(float(num_corrects) / num_samples)
self.total[i] += num_corrects
self.count[i] += num_samples
return accs
"""
def __init__(self):
pass
@abc.abstractmethod
def reset(self):
"""
Reset states and result
"""
raise NotImplementedError(
"function 'reset' not implemented in {}.".format(
self.__class__.__name__))
@abc.abstractmethod
def update(self, *args):
"""
Update states for metric
Inputs of :code:`update` is the outputs of :code:`Metric.compute`,
if :code:`compute` is not defined, the inputs of :code:`update`
will be flatten arguments of **output** of mode and **label** from data:
:code:`update(output1, output2, ..., label1, label2,...)`
see :code:`Metric.compute`
"""
raise NotImplementedError(
"function 'update' not implemented in {}.".format(
self.__class__.__name__))
@abc.abstractmethod
def accumulate(self):
"""
Accumulates statistics, computes and returns the metric value
"""
raise NotImplementedError(
"function 'accumulate' not implemented in {}.".format(
self.__class__.__name__))
@abc.abstractmethod
def name(self):
"""
Returns metric name
"""
raise NotImplementedError(
"function 'name' not implemented in {}.".format(
self.__class__.__name__))
def compute(self, *args):
"""
This API is advanced usage to accelerate metric calculating, calulations
from outputs of model to the states which should be updated by Metric can
be defined here, where Paddle OPs is also supported. Outputs of this API
will be the inputs of "Metric.update".
If :code:`compute` is defined, it will be called with **outputs**
of model and **labels** from data as arguments, all outputs and labels
will be concatenated and flatten and each filed as a separate argument
as follows:
:code:`compute(output1, output2, ..., label1, label2,...)`
If :code:`compute` is not defined, default behaviour is to pass
input to output, so output format will be:
:code:`return output1, output2, ..., label1, label2,...`
see :code:`Metric.update`
"""
return args
class Accuracy(Metric):
"""
Encapsulates accuracy metric logic.
Args:
topk (list[int]|tuple[int]): Number of top elements to look at
for computing accuracy. Default is (1,).
name (str, optional): String name of the metric instance. Default
is `acc`.
Example by standalone:
.. code-block:: python
import numpy as np
import paddle
x = paddle.to_tensor(np.array([
[0.1, 0.2, 0.3, 0.4],
[0.1, 0.4, 0.3, 0.2],
[0.1, 0.2, 0.4, 0.3],
[0.1, 0.2, 0.3, 0.4]]))
y = paddle.to_tensor(np.array([[0], [1], [2], [3]]))
m = paddle.metric.Accuracy()
correct = m.compute(x, y)
m.update(correct)
res = m.accumulate()
print(res) # 0.75
Example with Model API:
.. code-block:: python
import paddle
from paddle.static import InputSpec
import paddle.vision.transforms as T
from paddle.vision.datasets import MNIST
input = InputSpec([None, 1, 28, 28], 'float32', 'image')
label = InputSpec([None, 1], 'int64', 'label')
transform = T.Compose([T.Transpose(), T.Normalize([127.5], [127.5])])
train_dataset = MNIST(mode='train', transform=transform)
model = paddle.Model(paddle.vision.models.LeNet(), input, label)
optim = paddle.optimizer.Adam(
learning_rate=0.001, parameters=model.parameters())
model.prepare(
optim,
loss=paddle.nn.CrossEntropyLoss(),
metrics=paddle.metric.Accuracy())
model.fit(train_dataset, batch_size=64)
"""
def __init__(self, topk=(1, ), name=None, *args, **kwargs):
super(Accuracy, self).__init__(*args, **kwargs)
self.topk = topk
self.maxk = max(topk)
self._init_name(name)
self.reset()
def compute(self, pred, label, *args):
"""
Compute the top-k (maximum value in `topk`) indices.
Args:
pred (Tensor): The predicted value is a Tensor with dtype
float32 or float64. Shape is [batch_size, d0, ..., dN].
label (Tensor): The ground truth value is Tensor with dtype
int64. Shape is [batch_size, d0, ..., 1], or
[batch_size, d0, ..., num_classes] in one hot representation.
Return:
Tensor: Correct mask, a tensor with shape [batch_size, d0, ..., topk].
"""
pred = paddle.argsort(pred, descending=True)
pred = paddle.slice(pred,
axes=[len(pred.shape) - 1],
starts=[0],
ends=[self.maxk])
if (len(label.shape) == 1) or \
(len(label.shape) == 2 and label.shape[-1] == 1):
# In static mode, the real label data shape may be different
# from shape defined by paddle.static.InputSpec in model
# building, reshape to the right shape.
label = paddle.reshape(label, (-1, 1))
elif label.shape[-1] != 1:
# one-hot label
label = paddle.argmax(label, axis=-1, keepdim=True)
correct = pred == label
return paddle.cast(correct, dtype='float32')
def update(self, correct, *args):
"""
Update the metrics states (correct count and total count), in order to
calculate cumulative accuracy of all instances. This function also
returns the accuracy of current step.
Args:
correct: Correct mask, a tensor with shape [batch_size, d0, ..., topk].
Return:
Tensor: the accuracy of current step.
"""
if isinstance(correct, (paddle.Tensor, paddle.fluid.core.eager.Tensor)):
correct = correct.numpy()
num_samples = np.prod(np.array(correct.shape[:-1]))
accs = []
for i, k in enumerate(self.topk):
num_corrects = correct[..., :k].sum()
accs.append(float(num_corrects) / num_samples)
self.total[i] += num_corrects
self.count[i] += num_samples
accs = accs[0] if len(self.topk) == 1 else accs
return accs
def reset(self):
"""
Resets all of the metric state.
"""
self.total = [0.] * len(self.topk)
self.count = [0] * len(self.topk)
def accumulate(self):
"""
Computes and returns the accumulated metric.
"""
res = []
for t, c in zip(self.total, self.count):
r = float(t) / c if c > 0 else 0.
res.append(r)
res = res[0] if len(self.topk) == 1 else res
return res
def _init_name(self, name):
name = name or 'acc'
if self.maxk != 1:
self._name = ['{}_top{}'.format(name, k) for k in self.topk]
else:
self._name = [name]
def name(self):
"""
Return name of metric instance.
"""
return self._name
class Precision(Metric):
"""
Precision (also called positive predictive value) is the fraction of
relevant instances among the retrieved instances. Refer to
https://en.wikipedia.org/wiki/Evaluation_of_binary_classifiers
Noted that this class manages the precision score only for binary
classification task.
Args:
name (str, optional): String name of the metric instance.
Default is `precision`.
Example by standalone:
.. code-block:: python
import numpy as np
import paddle
x = np.array([0.1, 0.5, 0.6, 0.7])
y = np.array([0, 1, 1, 1])
m = paddle.metric.Precision()
m.update(x, y)
res = m.accumulate()
print(res) # 1.0
Example with Model API:
.. code-block:: python
import numpy as np
import paddle
import paddle.nn as nn
class Data(paddle.io.Dataset):
def __init__(self):
super(Data, self).__init__()
self.n = 1024
self.x = np.random.randn(self.n, 10).astype('float32')
self.y = np.random.randint(2, size=(self.n, 1)).astype('float32')
def __getitem__(self, idx):
return self.x[idx], self.y[idx]
def __len__(self):
return self.n
model = paddle.Model(nn.Sequential(
nn.Linear(10, 1),
nn.Sigmoid()
))
optim = paddle.optimizer.Adam(
learning_rate=0.001, parameters=model.parameters())
model.prepare(
optim,
loss=nn.BCELoss(),
metrics=paddle.metric.Precision())
data = Data()
model.fit(data, batch_size=16)
"""
def __init__(self, name='precision', *args, **kwargs):
super(Precision, self).__init__(*args, **kwargs)
self.tp = 0 # true positive
self.fp = 0 # false positive
self._name = name
def update(self, preds, labels):
"""
Update the states based on the current mini-batch prediction results.
Args:
preds (numpy.ndarray): The prediction result, usually the output
of two-class sigmoid function. It should be a vector (column
vector or row vector) with data type: 'float64' or 'float32'.
labels (numpy.ndarray): The ground truth (labels),
the shape should keep the same as preds.
The data type is 'int32' or 'int64'.
"""
if isinstance(preds, (paddle.Tensor, paddle.fluid.core.eager.Tensor)):
preds = preds.numpy()
elif not _is_numpy_(preds):
raise ValueError("The 'preds' must be a numpy ndarray or Tensor.")
if isinstance(labels, (paddle.Tensor, paddle.fluid.core.eager.Tensor)):
labels = labels.numpy()
elif not _is_numpy_(labels):
raise ValueError("The 'labels' must be a numpy ndarray or Tensor.")
sample_num = labels.shape[0]
preds = np.floor(preds + 0.5).astype("int32")
for i in range(sample_num):
pred = preds[i]
label = labels[i]
if pred == 1:
if pred == label:
self.tp += 1
else:
self.fp += 1
def reset(self):
"""
Resets all of the metric state.
"""
self.tp = 0
self.fp = 0
def accumulate(self):
"""
Calculate the final precision.
Returns:
A scaler float: results of the calculated precision.
"""
ap = self.tp + self.fp
return float(self.tp) / ap if ap != 0 else .0
def name(self):
"""
Returns metric name
"""
return self._name
class Recall(Metric):
"""
Recall (also known as sensitivity) is the fraction of
relevant instances that have been retrieved over the
total amount of relevant instances
Refer to:
https://en.wikipedia.org/wiki/Precision_and_recall
Noted that this class manages the recall score only for
binary classification task.
Args:
name (str, optional): String name of the metric instance.
Default is `recall`.
Example by standalone:
.. code-block:: python
import numpy as np
import paddle
x = np.array([0.1, 0.5, 0.6, 0.7])
y = np.array([1, 0, 1, 1])
m = paddle.metric.Recall()
m.update(x, y)
res = m.accumulate()
print(res) # 2.0 / 3.0
Example with Model API:
.. code-block:: python
import numpy as np
import paddle
import paddle.nn as nn
class Data(paddle.io.Dataset):
def __init__(self):
super(Data, self).__init__()
self.n = 1024
self.x = np.random.randn(self.n, 10).astype('float32')
self.y = np.random.randint(2, size=(self.n, 1)).astype('float32')
def __getitem__(self, idx):
return self.x[idx], self.y[idx]
def __len__(self):
return self.n
model = paddle.Model(nn.Sequential(
nn.Linear(10, 1),
nn.Sigmoid()
))
optim = paddle.optimizer.Adam(
learning_rate=0.001, parameters=model.parameters())
model.prepare(
optim,
loss=nn.BCELoss(),
metrics=[paddle.metric.Precision(), paddle.metric.Recall()])
data = Data()
model.fit(data, batch_size=16)
"""
def __init__(self, name='recall', *args, **kwargs):
super(Recall, self).__init__(*args, **kwargs)
self.tp = 0 # true positive
self.fn = 0 # false negative
self._name = name
def update(self, preds, labels):
"""
Update the states based on the current mini-batch prediction results.
Args:
preds(numpy.array): prediction results of current mini-batch,
the output of two-class sigmoid function.
Shape: [batch_size, 1]. Dtype: 'float64' or 'float32'.
labels(numpy.array): ground truth (labels) of current mini-batch,
the shape should keep the same as preds.
Shape: [batch_size, 1], Dtype: 'int32' or 'int64'.
"""
if isinstance(preds, (paddle.Tensor, paddle.fluid.core.eager.Tensor)):
preds = preds.numpy()
elif not _is_numpy_(preds):
raise ValueError("The 'preds' must be a numpy ndarray or Tensor.")
if isinstance(labels, (paddle.Tensor, paddle.fluid.core.eager.Tensor)):
labels = labels.numpy()
elif not _is_numpy_(labels):
raise ValueError("The 'labels' must be a numpy ndarray or Tensor.")
sample_num = labels.shape[0]
preds = np.rint(preds).astype("int32")
for i in range(sample_num):
pred = preds[i]
label = labels[i]
if label == 1:
if pred == label:
self.tp += 1
else:
self.fn += 1
def accumulate(self):
"""
Calculate the final recall.
Returns:
A scaler float: results of the calculated Recall.
"""
recall = self.tp + self.fn
return float(self.tp) / recall if recall != 0 else .0
def reset(self):
"""
Resets all of the metric state.
"""
self.tp = 0
self.fn = 0
def name(self):
"""
Returns metric name
"""
return self._name
class Auc(Metric):
"""
The auc metric is for binary classification.
Refer to https://en.wikipedia.org/wiki/Receiver_operating_characteristic#Area_under_the_curve.
Please notice that the auc metric is implemented with python, which may be a little bit slow.
The `auc` function creates four local variables, `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` that are used to
compute the AUC. To discretize the AUC curve, a linearly spaced set of
thresholds is used to compute pairs of recall and precision values. The area
under the ROC-curve is therefore computed using the height of the recall
values by the false positive rate, while the area under the PR-curve is the
computed using the height of the precision values by the recall.
Args:
curve (str): Specifies the mode of the curve to be computed,
'ROC' or 'PR' for the Precision-Recall-curve. Default is 'ROC'.
num_thresholds (int): The number of thresholds to use when
discretizing the roc curve. Default is 4095.
'ROC' or 'PR' for the Precision-Recall-curve. Default is 'ROC'.
name (str, optional): String name of the metric instance. Default
is `auc`.
"NOTE: only implement the ROC curve type via Python now."
Example by standalone:
.. code-block:: python
import numpy as np
import paddle
m = paddle.metric.Auc()
n = 8
class0_preds = np.random.random(size = (n, 1))
class1_preds = 1 - class0_preds
preds = np.concatenate((class0_preds, class1_preds), axis=1)
labels = np.random.randint(2, size = (n, 1))
m.update(preds=preds, labels=labels)
res = m.accumulate()
Example with Model API:
.. code-block:: python
import numpy as np
import paddle
import paddle.nn as nn
class Data(paddle.io.Dataset):
def __init__(self):
super(Data, self).__init__()
self.n = 1024
self.x = np.random.randn(self.n, 10).astype('float32')
self.y = np.random.randint(2, size=(self.n, 1)).astype('int64')
def __getitem__(self, idx):
return self.x[idx], self.y[idx]
def __len__(self):
return self.n
model = paddle.Model(nn.Sequential(
nn.Linear(10, 2), nn.Softmax())
)
optim = paddle.optimizer.Adam(
learning_rate=0.001, parameters=model.parameters())
def loss(x, y):
return nn.functional.nll_loss(paddle.log(x), y)
model.prepare(
optim,
loss=loss,
metrics=paddle.metric.Auc())
data = Data()
model.fit(data, batch_size=16)
"""
def __init__(self,
curve='ROC',
num_thresholds=4095,
name='auc',
*args,
**kwargs):
super(Auc, self).__init__(*args, **kwargs)
self._curve = curve
self._num_thresholds = num_thresholds
_num_pred_buckets = num_thresholds + 1
self._stat_pos = np.zeros(_num_pred_buckets)
self._stat_neg = np.zeros(_num_pred_buckets)
self._name = name
def update(self, preds, labels):
"""
Update the auc curve with the given predictions and labels.
Args:
preds (numpy.array): An numpy array in the shape of
(batch_size, 2), preds[i][j] denotes the probability of
classifying the instance i into the class j.
labels (numpy.array): an numpy array in the shape of
(batch_size, 1), labels[i] is either o or 1,
representing the label of the instance i.
"""
if isinstance(labels, (paddle.Tensor, paddle.fluid.core.eager.Tensor)):
labels = labels.numpy()
elif not _is_numpy_(labels):
raise ValueError("The 'labels' must be a numpy ndarray or Tensor.")
if isinstance(preds, (paddle.Tensor, paddle.fluid.core.eager.Tensor)):
preds = preds.numpy()
elif not _is_numpy_(preds):
raise ValueError("The 'preds' must be a numpy ndarray or Tensor.")
for i, lbl in enumerate(labels):
value = preds[i, 1]
bin_idx = int(value * self._num_thresholds)
assert bin_idx <= self._num_thresholds
if lbl:
self._stat_pos[bin_idx] += 1.0
else:
self._stat_neg[bin_idx] += 1.0
@staticmethod
def trapezoid_area(x1, x2, y1, y2):
return abs(x1 - x2) * (y1 + y2) / 2.0
def accumulate(self):
"""
Return the area (a float score) under auc curve
Return:
float: the area under auc curve
"""
tot_pos = 0.0
tot_neg = 0.0
auc = 0.0
idx = self._num_thresholds
while idx >= 0:
tot_pos_prev = tot_pos
tot_neg_prev = tot_neg
tot_pos += self._stat_pos[idx]
tot_neg += self._stat_neg[idx]
auc += self.trapezoid_area(tot_neg, tot_neg_prev, tot_pos,
tot_pos_prev)
idx -= 1
return auc / tot_pos / tot_neg if tot_pos > 0.0 and tot_neg > 0.0 else 0.0
def reset(self):
"""
Reset states and result
"""
_num_pred_buckets = self._num_thresholds + 1
self._stat_pos = np.zeros(_num_pred_buckets)
self._stat_neg = np.zeros(_num_pred_buckets)
def name(self):
"""
Returns metric name
"""
return self._name
def accuracy(input, label, k=1, correct=None, total=None, name=None):
"""
accuracy layer.
Refer to the https://en.wikipedia.org/wiki/Precision_and_recall
This function computes the accuracy using the input and label.
If the correct label occurs in top k predictions, then correct will increment by one.
Note: the dtype of accuracy is determined by input. the input and label dtype can be different.
Args:
input(Tensor): The input of accuracy layer, which is the predictions of network. A Tensor with type float32,float64.
The shape is ``[sample_number, class_dim]`` .
label(Tensor): The label of dataset. Tensor with type int64 or int32. The shape is ``[sample_number, 1]`` .
k(int, optional): The top k predictions for each class will be checked. Data type is int64 or int32.
correct(Tensor, optional): The correct predictions count. A Tensor with type int64 or int32.
total(Tensor, optional): The total entries count. A tensor with type int64 or int32.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tensor, the correct rate. A Tensor with type float32.
Examples:
.. code-block:: python
import paddle
predictions = paddle.to_tensor([[0.2, 0.1, 0.4, 0.1, 0.1], [0.2, 0.3, 0.1, 0.15, 0.25]], dtype='float32')
label = paddle.to_tensor([[2], [0]], dtype="int64")
result = paddle.metric.accuracy(input=predictions, label=label, k=1)
# [0.5]
"""
if label.dtype == paddle.int32:
label = paddle.cast(label, paddle.int64)
if _non_static_mode():
if correct is None:
correct = _varbase_creator(dtype="int32")
if total is None:
total = _varbase_creator(dtype="int32")
topk_out, topk_indices = paddle.topk(input, k=k)
_acc, _, _ = _legacy_C_ops.accuracy(topk_out, topk_indices, label,
correct, total)
return _acc
helper = LayerHelper("accuracy", **locals())
check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'],
'accuracy')
topk_out, topk_indices = paddle.topk(input, k=k)
acc_out = helper.create_variable_for_type_inference(dtype="float32")
if correct is None:
correct = helper.create_variable_for_type_inference(dtype="int32")
if total is None:
total = helper.create_variable_for_type_inference(dtype="int32")
helper.append_op(type="accuracy",
inputs={
"Out": [topk_out],
"Indices": [topk_indices],
"Label": [label]
},
outputs={
"Accuracy": [acc_out],
"Correct": [correct],
"Total": [total],
})
return acc_out
| [
"[email protected]"
] | |
9b6a313c4143391d0e759e966d2a74b8e14b3fb2 | 6cee35876c6a1afdc1a2f9293fbcf41719f3852d | /chap_2/exercise2.py | c89851bc22530e3167a8bbdee2b1449bc3979f7b | [] | no_license | SiddhantAshtekar/python-algorithem-for-begginers | a7c31cd2cd96d70e13a2d0119da94fe7f38c5056 | 07803850aa78c07ce608d18173afebd398543121 | refs/heads/master | 2020-05-07T10:28:20.310114 | 2019-04-09T17:33:19 | 2019-04-09T17:33:19 | 180,417,449 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 99 | py | name=input("Enter your name ")
print(f"the revers of your name is {name[-1::-1]}")#revers of sting | [
"[email protected]"
] | |
62057c8eb956315f5f52fa00e9a3237b9e78aa7e | c1faf35b2fe1beda6c839031465195ea58b4c495 | /panelserverextension.py | eae784198ae449200859acbf4742f46ee152c279 | [] | no_license | makwingchi/philly-route-finder | ff9f6001a39a7d838ff143ee5445cc848f456205 | c807c76290772f4b31bd0cdaab7a1ab6e505d8e7 | refs/heads/master | 2020-05-22T10:19:23.003297 | 2019-07-14T15:25:46 | 2019-07-14T15:25:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 206 | py | from subprocess import Popen
def load_jupyter_server_extension(nbapp):
"""serve the app.ipynb directory with bokeh server"""
Popen(["panel", "serve", "app.ipynb", "--allow-websocket-origin=*"]) | [
"[email protected]"
] | |
0b122012c36b3cd5dad4e207579418712c3535ca | 642e8d6d8cd8d08a73bdcf82ae9689a09284025c | /celery/worker/__init__.py | 96b994779744ff87efee9a3dcbecee7745c8b868 | [
"BSD-3-Clause"
] | permissive | abecciu/celery | 941f29c033b54b766166f17aa8c5e4be05df08b9 | f0c399e34d56c7a2a14cb42bfb2b6455c68ef0c0 | refs/heads/master | 2021-01-14T12:57:11.230199 | 2009-09-10T13:44:51 | 2009-09-10T13:44:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,215 | py | """
The Multiprocessing Worker Server
Documentation for this module is in ``docs/reference/celery.worker.rst``.
"""
from carrot.connection import DjangoBrokerConnection, AMQPConnectionException
from celery.worker.controllers import Mediator, PeriodicWorkController
from celery.worker.job import TaskWrapper
from celery.exceptions import NotRegistered
from celery.messaging import get_consumer_set
from celery.conf import DAEMON_CONCURRENCY, DAEMON_LOG_FILE
from celery.conf import AMQP_CONNECTION_RETRY, AMQP_CONNECTION_MAX_RETRIES
from celery.log import setup_logger
from celery.pool import TaskPool
from celery.utils import retry_over_time
from celery.datastructures import SharedCounter
from Queue import Queue
import traceback
import logging
import socket
class AMQPListener(object):
"""Listen for messages received from the AMQP broker and
move them the the bucket queue for task processing.
:param bucket_queue: See :attr:`bucket_queue`.
:param hold_queue: See :attr:`hold_queue`.
.. attribute:: bucket_queue
The queue that holds tasks ready for processing immediately.
.. attribute:: hold_queue
The queue that holds paused tasks. Reasons for being paused include
a countdown/eta or that it's waiting for retry.
.. attribute:: logger
The logger used.
"""
def __init__(self, bucket_queue, hold_queue, logger,
initial_prefetch_count=2):
self.amqp_connection = None
self.task_consumer = None
self.bucket_queue = bucket_queue
self.hold_queue = hold_queue
self.logger = logger
self.prefetch_count = SharedCounter(initial_prefetch_count)
def start(self):
"""Start the consumer.
If the connection is lost, it tries to re-establish the connection
over time and restart consuming messages.
"""
while True:
self.reset_connection()
try:
self.consume_messages()
except (socket.error, AMQPConnectionException):
self.logger.error("AMQPListener: Connection to broker lost. "
+ "Trying to re-establish connection...")
def consume_messages(self):
"""Consume messages forever (or until an exception is raised)."""
task_consumer = self.task_consumer
self.logger.debug("AMQPListener: Starting message consumer...")
it = task_consumer.iterconsume(limit=None)
self.logger.debug("AMQPListener: Ready to accept tasks!")
while True:
self.task_consumer.qos(prefetch_count=int(self.prefetch_count))
it.next()
def stop(self):
"""Stop processing AMQP messages and close the connection
to the broker."""
self.close_connection()
def receive_message(self, message_data, message):
"""The callback called when a new message is received.
If the message has an ``eta`` we move it to the hold queue,
otherwise we move it the bucket queue for immediate processing.
"""
try:
task = TaskWrapper.from_message(message, message_data,
logger=self.logger)
except NotRegistered, exc:
self.logger.error("Unknown task ignored: %s" % (exc))
return
eta = message_data.get("eta")
if eta:
self.prefetch_count.increment()
self.logger.info("Got task from broker: %s[%s] eta:[%s]" % (
task.task_name, task.task_id, eta))
self.hold_queue.put((task, eta, self.prefetch_count.decrement))
else:
self.logger.info("Got task from broker: %s[%s]" % (
task.task_name, task.task_id))
self.bucket_queue.put(task)
def close_connection(self):
"""Close the AMQP connection."""
if self.task_consumer:
self.task_consumer.close()
self.task_consumer = None
if self.amqp_connection:
self.logger.debug(
"AMQPListener: Closing connection to the broker...")
self.amqp_connection.close()
self.amqp_connection = None
def reset_connection(self):
"""Reset the AMQP connection, and reinitialize the
:class:`carrot.messaging.ConsumerSet` instance.
Resets the task consumer in :attr:`task_consumer`.
"""
self.logger.debug(
"AMQPListener: Re-establishing connection to the broker...")
self.close_connection()
self.amqp_connection = self._open_connection()
self.task_consumer = get_consumer_set(connection=self.amqp_connection)
self.task_consumer.register_callback(self.receive_message)
def _open_connection(self):
"""Retries connecting to the AMQP broker over time.
See :func:`celery.utils.retry_over_time`.
"""
def _connection_error_handler(exc, interval):
"""Callback handler for connection errors."""
self.logger.error("AMQP Listener: Connection Error: %s. " % exc
+ "Trying again in %d seconds..." % interval)
def _establish_connection():
"""Establish a connection to the AMQP broker."""
conn = DjangoBrokerConnection()
connected = conn.connection # Connection is established lazily.
return conn
if not AMQP_CONNECTION_RETRY:
return _establish_connection()
conn = retry_over_time(_establish_connection, socket.error,
errback=_connection_error_handler,
max_retries=AMQP_CONNECTION_MAX_RETRIES)
self.logger.debug("AMQPListener: Connection Established.")
return conn
class WorkController(object):
"""Executes tasks waiting in the task queue.
:param concurrency: see :attr:`concurrency`.
:param logfile: see :attr:`logfile`.
:param loglevel: see :attr:`loglevel`.
.. attribute:: concurrency
The number of simultaneous processes doing work (default:
:const:`celery.conf.DAEMON_CONCURRENCY`)
.. attribute:: loglevel
The loglevel used (default: :const:`logging.INFO`)
.. attribute:: logfile
The logfile used, if no logfile is specified it uses ``stderr``
(default: :const:`celery.conf.DAEMON_LOG_FILE`).
.. attribute:: logger
The :class:`logging.Logger` instance used for logging.
.. attribute:: is_detached
Flag describing if the worker is running as a daemon or not.
.. attribute:: pool
The :class:`multiprocessing.Pool` instance used.
.. attribute:: bucket_queue
The :class:`Queue.Queue` that holds tasks ready for immediate
processing.
.. attribute:: hold_queue
The :class:`Queue.Queue` that holds paused tasks. Reasons for holding
back the task include waiting for ``eta`` to pass or the task is being
retried.
.. attribute:: periodic_work_controller
Instance of :class:`celery.worker.controllers.PeriodicWorkController`.
.. attribute:: mediator
Instance of :class:`celery.worker.controllers.Mediator`.
.. attribute:: amqp_listener
Instance of :class:`AMQPListener`.
"""
loglevel = logging.ERROR
concurrency = DAEMON_CONCURRENCY
logfile = DAEMON_LOG_FILE
_state = None
def __init__(self, concurrency=None, logfile=None, loglevel=None,
is_detached=False):
# Options
self.loglevel = loglevel or self.loglevel
self.concurrency = concurrency or self.concurrency
self.logfile = logfile or self.logfile
self.is_detached = is_detached
self.logger = setup_logger(loglevel, logfile)
# Queues
self.bucket_queue = Queue()
self.hold_queue = Queue()
self.logger.debug("Instantiating thread components...")
# Threads+Pool
self.periodic_work_controller = PeriodicWorkController(
self.bucket_queue,
self.hold_queue)
self.pool = TaskPool(self.concurrency, logger=self.logger)
self.amqp_listener = AMQPListener(self.bucket_queue, self.hold_queue,
logger=self.logger,
initial_prefetch_count=concurrency)
self.mediator = Mediator(self.bucket_queue, self.safe_process_task)
# The order is important here;
# the first in the list is the first to start,
# and they must be stopped in reverse order.
self.components = [self.pool,
self.mediator,
self.periodic_work_controller,
self.amqp_listener]
def start(self):
"""Starts the workers main loop."""
self._state = "RUN"
try:
for component in self.components:
self.logger.debug("Starting thread %s..." % \
component.__class__.__name__)
component.start()
finally:
self.stop()
def safe_process_task(self, task):
"""Same as :meth:`process_task`, but catches all exceptions
the task raises and log them as errors, to make sure the
worker doesn't die."""
try:
try:
self.process_task(task)
except Exception, exc:
self.logger.critical("Internal error %s: %s\n%s" % (
exc.__class__, exc, traceback.format_exc()))
except (SystemExit, KeyboardInterrupt):
self.stop()
def process_task(self, task):
"""Process task by sending it to the pool of workers."""
task.execute_using_pool(self.pool, self.loglevel, self.logfile)
def stop(self):
"""Gracefully shutdown the worker server."""
# shut down the periodic work controller thread
if self._state != "RUN":
return
[component.stop() for component in reversed(self.components)]
self._state = "STOP"
| [
"[email protected]"
] | |
5387260b0ece475f0630b5bce216b990dc590b25 | dbd848387ab3379627e14aaf5cfaa832449b3bda | /tests/test_core_socks_async_trio.py | 13d57e1d3d48e62ce6a282d6f4bf46d12f15ee89 | [] | no_license | Sweety1337/py-socks-updated | 9940b1256eee6db80a9b170574b90d7ccf617dd1 | ddda6575368022107143245787beed90e4a277fa | refs/heads/master | 2022-12-16T21:17:55.894217 | 2020-09-24T14:22:30 | 2020-09-24T14:22:30 | 298,301,367 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,899 | py | import socket
import ssl
import trio # noqa
import pytest # noqa
from yarl import URL # noqa
from python_socks import (
ProxyType,
ProxyError,
ProxyTimeoutError,
ProxyConnectionError
)
from python_socks._proxy_async import AsyncProxy # noqa
from python_socks.async_.trio import Proxy
from python_socks.async_ import ProxyChain
# noinspection PyUnresolvedReferences,PyProtectedMember
from python_socks._resolver_async_trio import Resolver
from tests.conftest import (
SOCKS5_IPV4_HOST, SOCKS5_IPV4_PORT, LOGIN, PASSWORD, SKIP_IPV6_TESTS,
SOCKS5_IPV4_URL, SOCKS5_IPV4_URL_WO_AUTH, SOCKS5_IPV6_URL, SOCKS4_URL,
HTTP_PROXY_URL
)
# TEST_URL = 'https://httpbin.org/ip'
TEST_URL = 'https://check-host.net/ip'
async def make_request(proxy: AsyncProxy,
url: str, resolve_host=False, timeout=None):
url = URL(url)
dest_host = url.host
if resolve_host:
resolver = Resolver()
_, dest_host = await resolver.resolve(url.host)
sock: socket.socket = await proxy.connect(
dest_host=dest_host,
dest_port=url.port,
timeout=timeout
)
ssl_context = None
if url.scheme == 'https':
ssl_context = ssl.create_default_context()
stream = trio.SocketStream(sock)
if ssl_context is not None:
stream = trio.SSLStream(
stream, ssl_context,
server_hostname=url.host
)
await stream.do_handshake()
request = (
'GET {rel_url} HTTP/1.1\r\n'
'Host: {host}\r\n'
'Connection: close\r\n\r\n'
)
request = request.format(rel_url=url.path_qs, host=url.host)
request = request.encode('ascii')
await stream.send_all(request)
response = await stream.receive_some(1024)
status_line = response.split(b'\r\n', 1)[0]
status_line = status_line.decode('utf-8', 'surrogateescape')
version, status_code, *reason = status_line.split()
return int(status_code)
@pytest.mark.parametrize('rdns', (True, False))
@pytest.mark.parametrize('resolve_host', (True, False))
@pytest.mark.trio
async def test_socks5_proxy_ipv4(rdns, resolve_host):
proxy = Proxy.from_url(SOCKS5_IPV4_URL, rdns=rdns)
status_code = await make_request(
proxy=proxy,
url=TEST_URL,
resolve_host=resolve_host
)
assert status_code == 200
@pytest.mark.parametrize('rdns', (None, True, False))
@pytest.mark.trio
async def test_socks5_proxy_ipv4_with_auth_none(rdns):
proxy = Proxy.from_url(SOCKS5_IPV4_URL_WO_AUTH, rdns=rdns)
status_code = await make_request(proxy=proxy, url=TEST_URL)
assert status_code == 200
@pytest.mark.trio
async def test_socks5_proxy_with_invalid_credentials():
proxy = Proxy.create(
proxy_type=ProxyType.SOCKS5,
host=SOCKS5_IPV4_HOST,
port=SOCKS5_IPV4_PORT,
username=LOGIN,
password=PASSWORD + 'aaa',
)
with pytest.raises(ProxyError):
await make_request(proxy=proxy, url=TEST_URL)
@pytest.mark.trio
async def test_socks5_proxy_with_connect_timeout():
proxy = Proxy.create(
proxy_type=ProxyType.SOCKS5,
host=SOCKS5_IPV4_HOST,
port=SOCKS5_IPV4_PORT,
username=LOGIN,
password=PASSWORD,
)
with pytest.raises(ProxyTimeoutError):
await make_request(proxy=proxy, url=TEST_URL, timeout=0.0001)
@pytest.mark.trio
async def test_socks5_proxy_with_invalid_proxy_port(unused_tcp_port):
proxy = Proxy.create(
proxy_type=ProxyType.SOCKS5,
host=SOCKS5_IPV4_HOST,
port=unused_tcp_port,
username=LOGIN,
password=PASSWORD,
)
with pytest.raises(ProxyConnectionError):
await make_request(proxy=proxy, url=TEST_URL)
@pytest.mark.skipif(SKIP_IPV6_TESTS, reason='TravisCI doesn`t support ipv6')
@pytest.mark.trio
async def test_socks5_proxy_ipv6():
proxy = Proxy.from_url(SOCKS5_IPV6_URL)
status_code = await make_request(proxy=proxy, url=TEST_URL)
assert status_code == 200
@pytest.mark.parametrize('rdns', (None, True, False))
@pytest.mark.parametrize('resolve_host', (True, False))
@pytest.mark.trio
async def test_socks4_proxy(rdns, resolve_host):
proxy = Proxy.from_url(SOCKS4_URL, rdns=rdns)
status_code = await make_request(
proxy=proxy,
url=TEST_URL,
resolve_host=resolve_host
)
assert status_code == 200
@pytest.mark.trio
async def test_http_proxy():
proxy = Proxy.from_url(HTTP_PROXY_URL)
status_code = await make_request(proxy=proxy, url=TEST_URL)
assert status_code == 200
@pytest.mark.trio
async def test_proxy_chain():
proxy = ProxyChain([
Proxy.from_url(SOCKS5_IPV4_URL),
Proxy.from_url(SOCKS4_URL),
Proxy.from_url(HTTP_PROXY_URL),
])
# noinspection PyTypeChecker
status_code = await make_request(proxy=proxy, url=TEST_URL)
assert status_code == 200
| [
"[email protected]"
] | |
997a2a9aa16da7c874e599ae181d4bd45503f1e8 | f82757475ea13965581c2147ff57123b361c5d62 | /gi-stubs/repository/EDataServer/SourceCredentialsProviderImpl.py | 281ee12030f4bf3eeecff51d446fa85a2b655621 | [] | no_license | ttys3/pygobject-stubs | 9b15d1b473db06f47e5ffba5ad0a31d6d1becb57 | d0e6e93399212aada4386d2ce80344eb9a31db48 | refs/heads/master | 2022-09-23T12:58:44.526554 | 2020-06-06T04:15:00 | 2020-06-06T04:15:00 | 269,693,287 | 8 | 2 | null | 2020-06-05T15:57:54 | 2020-06-05T15:57:54 | null | UTF-8 | Python | false | false | 17,806 | py | # encoding: utf-8
# module gi.repository.EDataServer
# from /usr/lib64/girepository-1.0/EDataServer-1.2.typelib
# by generator 1.147
"""
An object which wraps an introspection typelib.
This wrapping creates a python module like representation of the typelib
using gi repository as a foundation. Accessing attributes of the module
will dynamically pull them in and create wrappers for the members.
These members are then cached on this introspection module.
"""
# imports
import gi as __gi
import gi.overrides.GObject as __gi_overrides_GObject
import gi.repository.Gio as __gi_repository_Gio
import gi.repository.GObject as __gi_repository_GObject
import gi.repository.Soup as __gi_repository_Soup
import gobject as __gobject
from .Extension import Extension
class SourceCredentialsProviderImpl(Extension):
"""
:Constructors:
::
SourceCredentialsProviderImpl(**properties)
"""
def bind_property(self, *args, **kwargs): # real signature unknown
pass
def bind_property_full(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def can_process(self, source): # real signature unknown; restored from __doc__
""" can_process(self, source:EDataServer.Source) -> bool """
return False
def can_prompt(self): # real signature unknown; restored from __doc__
""" can_prompt(self) -> bool """
return False
def can_store(self): # real signature unknown; restored from __doc__
""" can_store(self) -> bool """
return False
def chain(self, *args, **kwargs): # real signature unknown
pass
def compat_control(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def connect(self, *args, **kwargs): # real signature unknown
pass
def connect_after(self, *args, **kwargs): # real signature unknown
pass
def connect_data(self, detailed_signal, handler, *data, **kwargs): # reliably restored by inspect
"""
Connect a callback to the given signal with optional user data.
:param str detailed_signal:
A detailed signal to connect to.
:param callable handler:
Callback handler to connect to the signal.
:param *data:
Variable data which is passed through to the signal handler.
:param GObject.ConnectFlags connect_flags:
Flags used for connection options.
:returns:
A signal id which can be used with disconnect.
"""
pass
def connect_object(self, *args, **kwargs): # real signature unknown
pass
def connect_object_after(self, *args, **kwargs): # real signature unknown
pass
def delete_sync(self, source, cancellable=None): # real signature unknown; restored from __doc__
""" delete_sync(self, source:EDataServer.Source, cancellable:Gio.Cancellable=None) -> bool """
return False
def disconnect(*args, **kwargs): # reliably restored by inspect
""" signal_handler_disconnect(instance:GObject.Object, handler_id:int) """
pass
def disconnect_by_func(self, *args, **kwargs): # real signature unknown
pass
def do_can_process(self, *args, **kwargs): # real signature unknown
""" can_process(self, source:EDataServer.Source) -> bool """
pass
def do_can_prompt(self, *args, **kwargs): # real signature unknown
""" can_prompt(self) -> bool """
pass
def do_can_store(self, *args, **kwargs): # real signature unknown
""" can_store(self) -> bool """
pass
def do_delete_sync(self, *args, **kwargs): # real signature unknown
""" delete_sync(self, source:EDataServer.Source, cancellable:Gio.Cancellable=None) -> bool """
pass
def do_lookup_sync(self, *args, **kwargs): # real signature unknown
""" lookup_sync(self, source:EDataServer.Source, cancellable:Gio.Cancellable=None) -> bool, out_credentials:EDataServer.NamedParameters """
pass
def do_store_sync(self, *args, **kwargs): # real signature unknown
""" store_sync(self, source:EDataServer.Source, credentials:EDataServer.NamedParameters, permanently:bool, cancellable:Gio.Cancellable=None) -> bool """
pass
def emit(self, *args, **kwargs): # real signature unknown
pass
def emit_stop_by_name(self, detailed_signal): # reliably restored by inspect
""" Deprecated, please use stop_emission_by_name. """
pass
def find_property(self, property_name): # real signature unknown; restored from __doc__
""" find_property(self, property_name:str) -> GObject.ParamSpec """
pass
def force_floating(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def freeze_notify(self): # reliably restored by inspect
"""
Freezes the object's property-changed notification queue.
:returns:
A context manager which optionally can be used to
automatically thaw notifications.
This will freeze the object so that "notify" signals are blocked until
the thaw_notify() method is called.
.. code-block:: python
with obj.freeze_notify():
pass
"""
pass
def getv(self, names, values): # real signature unknown; restored from __doc__
""" getv(self, names:list, values:list) """
pass
def get_data(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def get_extensible(self): # real signature unknown; restored from __doc__
""" get_extensible(self) -> EDataServer.Extensible """
pass
def get_properties(self, *args, **kwargs): # real signature unknown
pass
def get_property(self, *args, **kwargs): # real signature unknown
pass
def get_provider(self): # real signature unknown; restored from __doc__
""" get_provider(self) """
pass
def get_qdata(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def handler_block(obj, handler_id): # reliably restored by inspect
"""
Blocks the signal handler from being invoked until
handler_unblock() is called.
:param GObject.Object obj:
Object instance to block handlers for.
:param int handler_id:
Id of signal to block.
:returns:
A context manager which optionally can be used to
automatically unblock the handler:
.. code-block:: python
with GObject.signal_handler_block(obj, id):
pass
"""
pass
def handler_block_by_func(self, *args, **kwargs): # real signature unknown
pass
def handler_disconnect(*args, **kwargs): # reliably restored by inspect
""" signal_handler_disconnect(instance:GObject.Object, handler_id:int) """
pass
def handler_is_connected(*args, **kwargs): # reliably restored by inspect
""" signal_handler_is_connected(instance:GObject.Object, handler_id:int) -> bool """
pass
def handler_unblock(*args, **kwargs): # reliably restored by inspect
""" signal_handler_unblock(instance:GObject.Object, handler_id:int) """
pass
def handler_unblock_by_func(self, *args, **kwargs): # real signature unknown
pass
def install_properties(self, pspecs): # real signature unknown; restored from __doc__
""" install_properties(self, pspecs:list) """
pass
def install_property(self, property_id, pspec): # real signature unknown; restored from __doc__
""" install_property(self, property_id:int, pspec:GObject.ParamSpec) """
pass
def interface_find_property(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def interface_install_property(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def interface_list_properties(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def is_floating(self): # real signature unknown; restored from __doc__
""" is_floating(self) -> bool """
return False
def list_properties(self): # real signature unknown; restored from __doc__
""" list_properties(self) -> list, n_properties:int """
return []
def lookup_sync(self, source, cancellable=None): # real signature unknown; restored from __doc__
""" lookup_sync(self, source:EDataServer.Source, cancellable:Gio.Cancellable=None) -> bool, out_credentials:EDataServer.NamedParameters """
return False
def newv(self, object_type, parameters): # real signature unknown; restored from __doc__
""" newv(object_type:GType, parameters:list) -> GObject.Object """
pass
def notify(self, property_name): # real signature unknown; restored from __doc__
""" notify(self, property_name:str) """
pass
def notify_by_pspec(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def override_property(self, property_id, name): # real signature unknown; restored from __doc__
""" override_property(self, property_id:int, name:str) """
pass
def ref(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def ref_sink(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def replace_data(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def replace_qdata(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def run_dispose(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def set_data(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def set_properties(self, *args, **kwargs): # real signature unknown
pass
def set_property(self, *args, **kwargs): # real signature unknown
pass
def steal_data(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def steal_qdata(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def stop_emission(self, detailed_signal): # reliably restored by inspect
""" Deprecated, please use stop_emission_by_name. """
pass
def stop_emission_by_name(*args, **kwargs): # reliably restored by inspect
""" signal_stop_emission_by_name(instance:GObject.Object, detailed_signal:str) """
pass
def store_sync(self, source, credentials, permanently, cancellable=None): # real signature unknown; restored from __doc__
""" store_sync(self, source:EDataServer.Source, credentials:EDataServer.NamedParameters, permanently:bool, cancellable:Gio.Cancellable=None) -> bool """
return False
def thaw_notify(self): # real signature unknown; restored from __doc__
""" thaw_notify(self) """
pass
def unref(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def watch_closure(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def weak_ref(self, *args, **kwargs): # real signature unknown
pass
def _force_floating(self, *args, **kwargs): # real signature unknown
""" force_floating(self) """
pass
def _ref(self, *args, **kwargs): # real signature unknown
""" ref(self) -> GObject.Object """
pass
def _ref_sink(self, *args, **kwargs): # real signature unknown
""" ref_sink(self) -> GObject.Object """
pass
def _unref(self, *args, **kwargs): # real signature unknown
""" unref(self) """
pass
def _unsupported_data_method(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def _unsupported_method(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def __copy__(self, *args, **kwargs): # real signature unknown
pass
def __deepcopy__(self, *args, **kwargs): # real signature unknown
pass
def __delattr__(self, *args, **kwargs): # real signature unknown
""" Implement delattr(self, name). """
pass
def __dir__(self, *args, **kwargs): # real signature unknown
""" Default dir() implementation. """
pass
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __format__(self, *args, **kwargs): # real signature unknown
""" Default object formatter. """
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __init_subclass__(self, *args, **kwargs): # real signature unknown
"""
This method is called when a class is subclassed.
The default implementation does nothing. It may be
overridden to extend subclasses.
"""
pass
def __init__(self, **properties): # real signature unknown; restored from __doc__
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
def __reduce_ex__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
def __setattr__(self, *args, **kwargs): # real signature unknown
""" Implement setattr(self, name, value). """
pass
def __sizeof__(self, *args, **kwargs): # real signature unknown
""" Size of object in memory, in bytes. """
pass
def __str__(self, *args, **kwargs): # real signature unknown
""" Return str(self). """
pass
def __subclasshook__(self, *args, **kwargs): # real signature unknown
"""
Abstract classes can override this to customize issubclass().
This is invoked early on by abc.ABCMeta.__subclasscheck__().
It should return True, False or NotImplemented. If it returns
NotImplemented, the normal algorithm is used. Otherwise, it
overrides the normal algorithm (and the outcome is cached).
"""
pass
g_type_instance = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
parent = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
priv = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
qdata = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
ref_count = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__gpointer__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__grefcount__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
props = None # (!) real value is '<gi._gi.GProps object at 0x7f626e8ec550>'
__class__ = None # (!) real value is "<class 'gi.types.GObjectMeta'>"
__dict__ = None # (!) real value is "mappingproxy({'__info__': ObjectInfo(SourceCredentialsProviderImpl), '__module__': 'gi.repository.EDataServer', '__gtype__': <GType ESourceCredentialsProviderImpl (94877537146240)>, '__doc__': None, '__gsignals__': {}, 'can_process': gi.FunctionInfo(can_process), 'can_prompt': gi.FunctionInfo(can_prompt), 'can_store': gi.FunctionInfo(can_store), 'delete_sync': gi.FunctionInfo(delete_sync), 'get_provider': gi.FunctionInfo(get_provider), 'lookup_sync': gi.FunctionInfo(lookup_sync), 'store_sync': gi.FunctionInfo(store_sync), 'do_can_process': gi.VFuncInfo(can_process), 'do_can_prompt': gi.VFuncInfo(can_prompt), 'do_can_store': gi.VFuncInfo(can_store), 'do_delete_sync': gi.VFuncInfo(delete_sync), 'do_lookup_sync': gi.VFuncInfo(lookup_sync), 'do_store_sync': gi.VFuncInfo(store_sync), 'parent': <property object at 0x7f626e926e00>, 'priv': <property object at 0x7f626e926ef0>})"
__gdoc__ = 'Object ESourceCredentialsProviderImpl\n\nProperties from EExtension:\n extensible -> EExtensible: Extensible Object\n The object being extended\n\nSignals from GObject:\n notify (GParam)\n\n'
__gsignals__ = {}
__gtype__ = None # (!) real value is '<GType ESourceCredentialsProviderImpl (94877537146240)>'
__info__ = ObjectInfo(SourceCredentialsProviderImpl)
| [
"[email protected]"
] | |
e309f63fc32bf788b5f2230bd49a429597fac3cb | ecc65625665286428b1080ee425cc809742e0dcc | /python programs/12th/7.py | b27253215343a4b2a3172737b0ada16fdfd0549e | [] | no_license | RJ-VARMA/11th-cbse-programs | 72a204aa90b3a9ae8cfb7e120ed61fd77c9f326d | 3dad091537872e8aa9028c9e7eddd7e96337bbde | refs/heads/main | 2023-08-22T06:38:46.499429 | 2021-10-18T03:18:12 | 2021-10-18T03:18:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 758 | py | import csv
login = False
answer = input("Do you have an account?(yes or no) ")
if answer == 'yes' :
with open('upassword.csv', 'r') as csvfile:
csv_reader = csv.reader(csvfile)
username = input("Player One Username: ")
password = input("Player One Password: ")
for row in csv_reader:
print(row[0], row[1])
print(username, password)
if row[0]== username and row[1] == password:
login = True
break
else:
login = False
break
if login == True:
print("You are now logged in!")
else:
print("Incorrect. Game Over.")
exit()
else:
print('Only Valid Usernames can play. Game Over.')
exit()
| [
"[email protected]"
] | |
b7d5c92398dbcae7d70b09607ef8e5cd5221e0f7 | d2e3cd42cd150f09f4bdc82286248d692ac46195 | /networkx/algorithms/isomorphism/tests/vf2pp/test_Ti_computing.py | f548fca021c4f13b306a9e1263079ffe8fc30470 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | bangtree/networkx | 7414f13c20ec600822b7de41cb8188f9651cf256 | b37d5931d1d162e98c7c5f10b2f6c7030cc187cf | refs/heads/master | 2022-12-05T19:21:53.915903 | 2022-12-02T02:44:29 | 2022-12-02T02:44:29 | 29,867,402 | 0 | 0 | null | 2015-01-26T15:35:12 | 2015-01-26T15:35:11 | null | UTF-8 | Python | false | false | 9,716 | py | import networkx as nx
from networkx.algorithms.isomorphism.vf2pp import (
_GraphParameters,
_initialize_parameters,
_StateParameters,
)
from networkx.algorithms.isomorphism.vf2pp_helpers.state import (
_restore_Tinout,
_restore_Tinout_Di,
_update_Tinout,
)
class TestGraphTinoutUpdating:
edges = [
(1, 3),
(2, 3),
(3, 4),
(4, 9),
(4, 5),
(3, 9),
(5, 8),
(5, 7),
(8, 7),
(6, 7),
]
mapped = {
0: "x",
1: "a",
2: "b",
3: "c",
4: "d",
5: "e",
6: "f",
7: "g",
8: "h",
9: "i",
}
G1 = nx.Graph()
G1.add_edges_from(edges)
G1.add_node(0)
G2 = nx.relabel_nodes(G1, mapping=mapped)
def test_updating(self):
G2_degree = dict(self.G2.degree)
gparams, sparams = _initialize_parameters(self.G1, self.G2, G2_degree)
m, m_rev, T1, _, T1_tilde, _, T2, _, T2_tilde, _ = sparams
# Add node to the mapping
m[4] = self.mapped[4]
m_rev[self.mapped[4]] = 4
_update_Tinout(4, self.mapped[4], gparams, sparams)
assert T1 == {3, 5, 9}
assert T2 == {"c", "i", "e"}
assert T1_tilde == {0, 1, 2, 6, 7, 8}
assert T2_tilde == {"x", "a", "b", "f", "g", "h"}
# Add node to the mapping
m[5] = self.mapped[5]
m_rev.update({self.mapped[5]: 5})
_update_Tinout(5, self.mapped[5], gparams, sparams)
assert T1 == {3, 9, 8, 7}
assert T2 == {"c", "i", "h", "g"}
assert T1_tilde == {0, 1, 2, 6}
assert T2_tilde == {"x", "a", "b", "f"}
# Add node to the mapping
m[6] = self.mapped[6]
m_rev.update({self.mapped[6]: 6})
_update_Tinout(6, self.mapped[6], gparams, sparams)
assert T1 == {3, 9, 8, 7}
assert T2 == {"c", "i", "h", "g"}
assert T1_tilde == {0, 1, 2}
assert T2_tilde == {"x", "a", "b"}
# Add node to the mapping
m[3] = self.mapped[3]
m_rev.update({self.mapped[3]: 3})
_update_Tinout(3, self.mapped[3], gparams, sparams)
assert T1 == {1, 2, 9, 8, 7}
assert T2 == {"a", "b", "i", "h", "g"}
assert T1_tilde == {0}
assert T2_tilde == {"x"}
# Add node to the mapping
m[0] = self.mapped[0]
m_rev.update({self.mapped[0]: 0})
_update_Tinout(0, self.mapped[0], gparams, sparams)
assert T1 == {1, 2, 9, 8, 7}
assert T2 == {"a", "b", "i", "h", "g"}
assert T1_tilde == set()
assert T2_tilde == set()
def test_restoring(self):
m = {0: "x", 3: "c", 4: "d", 5: "e", 6: "f"}
m_rev = {"x": 0, "c": 3, "d": 4, "e": 5, "f": 6}
T1 = {1, 2, 7, 9, 8}
T2 = {"a", "b", "g", "i", "h"}
T1_tilde = set()
T2_tilde = set()
gparams = _GraphParameters(self.G1, self.G2, {}, {}, {}, {}, {})
sparams = _StateParameters(
m, m_rev, T1, None, T1_tilde, None, T2, None, T2_tilde, None
)
# Remove a node from the mapping
m.pop(0)
m_rev.pop("x")
_restore_Tinout(0, self.mapped[0], gparams, sparams)
assert T1 == {1, 2, 7, 9, 8}
assert T2 == {"a", "b", "g", "i", "h"}
assert T1_tilde == {0}
assert T2_tilde == {"x"}
# Remove a node from the mapping
m.pop(6)
m_rev.pop("f")
_restore_Tinout(6, self.mapped[6], gparams, sparams)
assert T1 == {1, 2, 7, 9, 8}
assert T2 == {"a", "b", "g", "i", "h"}
assert T1_tilde == {0, 6}
assert T2_tilde == {"x", "f"}
# Remove a node from the mapping
m.pop(3)
m_rev.pop("c")
_restore_Tinout(3, self.mapped[3], gparams, sparams)
assert T1 == {7, 9, 8, 3}
assert T2 == {"g", "i", "h", "c"}
assert T1_tilde == {0, 6, 1, 2}
assert T2_tilde == {"x", "f", "a", "b"}
# Remove a node from the mapping
m.pop(5)
m_rev.pop("e")
_restore_Tinout(5, self.mapped[5], gparams, sparams)
assert T1 == {9, 3, 5}
assert T2 == {"i", "c", "e"}
assert T1_tilde == {0, 6, 1, 2, 7, 8}
assert T2_tilde == {"x", "f", "a", "b", "g", "h"}
# Remove a node from the mapping
m.pop(4)
m_rev.pop("d")
_restore_Tinout(4, self.mapped[4], gparams, sparams)
assert T1 == set()
assert T2 == set()
assert T1_tilde == set(self.G1.nodes())
assert T2_tilde == set(self.G2.nodes())
class TestDiGraphTinoutUpdating:
edges = [
(1, 3),
(3, 2),
(3, 4),
(4, 9),
(4, 5),
(3, 9),
(5, 8),
(5, 7),
(8, 7),
(7, 6),
]
mapped = {
0: "x",
1: "a",
2: "b",
3: "c",
4: "d",
5: "e",
6: "f",
7: "g",
8: "h",
9: "i",
}
G1 = nx.DiGraph(edges)
G1.add_node(0)
G2 = nx.relabel_nodes(G1, mapping=mapped)
def test_updating(self):
G2_degree = {
n: (in_degree, out_degree)
for (n, in_degree), (_, out_degree) in zip(
self.G2.in_degree, self.G2.out_degree
)
}
gparams, sparams = _initialize_parameters(self.G1, self.G2, G2_degree)
m, m_rev, T1_out, T1_in, T1_tilde, _, T2_out, T2_in, T2_tilde, _ = sparams
# Add node to the mapping
m[4] = self.mapped[4]
m_rev[self.mapped[4]] = 4
_update_Tinout(4, self.mapped[4], gparams, sparams)
assert T1_out == {5, 9}
assert T1_in == {3}
assert T2_out == {"i", "e"}
assert T2_in == {"c"}
assert T1_tilde == {0, 1, 2, 6, 7, 8}
assert T2_tilde == {"x", "a", "b", "f", "g", "h"}
# Add node to the mapping
m[5] = self.mapped[5]
m_rev[self.mapped[5]] = 5
_update_Tinout(5, self.mapped[5], gparams, sparams)
assert T1_out == {9, 8, 7}
assert T1_in == {3}
assert T2_out == {"i", "g", "h"}
assert T2_in == {"c"}
assert T1_tilde == {0, 1, 2, 6}
assert T2_tilde == {"x", "a", "b", "f"}
# Add node to the mapping
m[6] = self.mapped[6]
m_rev[self.mapped[6]] = 6
_update_Tinout(6, self.mapped[6], gparams, sparams)
assert T1_out == {9, 8, 7}
assert T1_in == {3, 7}
assert T2_out == {"i", "g", "h"}
assert T2_in == {"c", "g"}
assert T1_tilde == {0, 1, 2}
assert T2_tilde == {"x", "a", "b"}
# Add node to the mapping
m[3] = self.mapped[3]
m_rev[self.mapped[3]] = 3
_update_Tinout(3, self.mapped[3], gparams, sparams)
assert T1_out == {9, 8, 7, 2}
assert T1_in == {7, 1}
assert T2_out == {"i", "g", "h", "b"}
assert T2_in == {"g", "a"}
assert T1_tilde == {0}
assert T2_tilde == {"x"}
# Add node to the mapping
m[0] = self.mapped[0]
m_rev[self.mapped[0]] = 0
_update_Tinout(0, self.mapped[0], gparams, sparams)
assert T1_out == {9, 8, 7, 2}
assert T1_in == {7, 1}
assert T2_out == {"i", "g", "h", "b"}
assert T2_in == {"g", "a"}
assert T1_tilde == set()
assert T2_tilde == set()
def test_restoring(self):
m = {0: "x", 3: "c", 4: "d", 5: "e", 6: "f"}
m_rev = {"x": 0, "c": 3, "d": 4, "e": 5, "f": 6}
T1_out = {2, 7, 9, 8}
T1_in = {1, 7}
T2_out = {"b", "g", "i", "h"}
T2_in = {"a", "g"}
T1_tilde = set()
T2_tilde = set()
gparams = _GraphParameters(self.G1, self.G2, {}, {}, {}, {}, {})
sparams = _StateParameters(
m, m_rev, T1_out, T1_in, T1_tilde, None, T2_out, T2_in, T2_tilde, None
)
# Remove a node from the mapping
m.pop(0)
m_rev.pop("x")
_restore_Tinout_Di(0, self.mapped[0], gparams, sparams)
assert T1_out == {2, 7, 9, 8}
assert T1_in == {1, 7}
assert T2_out == {"b", "g", "i", "h"}
assert T2_in == {"a", "g"}
assert T1_tilde == {0}
assert T2_tilde == {"x"}
# Remove a node from the mapping
m.pop(6)
m_rev.pop("f")
_restore_Tinout_Di(6, self.mapped[6], gparams, sparams)
assert T1_out == {2, 9, 8, 7}
assert T1_in == {1}
assert T2_out == {"b", "i", "h", "g"}
assert T2_in == {"a"}
assert T1_tilde == {0, 6}
assert T2_tilde == {"x", "f"}
# Remove a node from the mapping
m.pop(3)
m_rev.pop("c")
_restore_Tinout_Di(3, self.mapped[3], gparams, sparams)
assert T1_out == {9, 8, 7}
assert T1_in == {3}
assert T2_out == {"i", "h", "g"}
assert T2_in == {"c"}
assert T1_tilde == {0, 6, 1, 2}
assert T2_tilde == {"x", "f", "a", "b"}
# Remove a node from the mapping
m.pop(5)
m_rev.pop("e")
_restore_Tinout_Di(5, self.mapped[5], gparams, sparams)
assert T1_out == {9, 5}
assert T1_in == {3}
assert T2_out == {"i", "e"}
assert T2_in == {"c"}
assert T1_tilde == {0, 6, 1, 2, 8, 7}
assert T2_tilde == {"x", "f", "a", "b", "h", "g"}
# Remove a node from the mapping
m.pop(4)
m_rev.pop("d")
_restore_Tinout_Di(4, self.mapped[4], gparams, sparams)
assert T1_out == set()
assert T1_in == set()
assert T2_out == set()
assert T2_in == set()
assert T1_tilde == set(self.G1.nodes())
assert T2_tilde == set(self.G2.nodes())
| [
"[email protected]"
] | |
9f6ac6ecefb20871f98905fe6225b28a48eaf51d | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /9szPm9Mg5D2vJyTvf_14.py | c4b1eb7103a2e128742d7e447be9653582eade63 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 681 | py | """
Write a function that takes three arguments `(x, y, z)` and returns a list
containing `x` sublists (e.g. `[[], [], []]`), each containing `y` number of
item `z`.
* `x` Number of sublists contained within the main list.
* `y` Number of items contained within each sublist.
* `z` Item contained within each sublist.
### Examples
matrix(3, 2, 3) ➞ [[3, 3], [3, 3], [3, 3]]
matrix(2, 1, "edabit") ➞ [["edabit"], ["edabit"]]
matrix(3, 2, 0) ➞ [[0, 0], [0, 0], [0, 0]]
### Notes
* The first two arguments will always be integers.
* The third argument is either a string or an integer.
"""
def matrix(x, y, z):
return [[z] * y] * x
| [
"[email protected]"
] | |
cc5695f1470140f25b2cb77800818102059fa4d6 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /kdhgEC2ECXAfoXWQP_1.py | 18cfc39baa91a8ce324e7628429be8a4c0702226 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,119 | py | """
In this challenge, you have to obtain a sentence from the elements of a given
matrix. In the matrix, each word of the sentence follows a columnar order from
the top to the bottom, instead of the usual left-to-right order: it's time for
**transposition**!
Given a matrix `mtx`, implement a function that returns the complete sentence
as a string, with the words separated by a space between them.
### Examples
transpose_matrix([
["Enter"],
["the"],
["Matrix!"]
]) ➞ "Enter the Matrix!"
transpose_matrix([
["The", "are"],
["columns", "rows."]
]) ➞ "The columns are rows."
transpose_matrix([
["You", "the"],
["must", "table"],
["transpose", "order."]
]) ➞ "You must transpose the table order."
### Notes
* All given matrices are regular, as to say that each column has the same length.
* Punctuation is already given, you just have to add the spaces in the returned string.
"""
def transpose_matrix(mtx):
result = ""
for i in range(len(mtx[0])):
for j in mtx:
result += j[i]+" "
return result[:-1]
| [
"[email protected]"
] | |
0b4285bff2df5cd19b3e3e2f31c78b854999b8f5 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/65/usersdata/185/34920/submittedfiles/investimento.py | c02de528254c4d919d01652089a4c2aa1ade2813 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 515 | py | # -*- coding: utf-8 -*-
from __future__ import division
i0=float(input('digite o valor do investimesnto:'))
taxa=float(input('digite o valor da taxa:'))
i1=(i0+(i0*taxa))
i2=(i1+(i1*taxa))
i3=(i2+(i2*taxa))
i4=(i3+(i3*taxa))
i5=(i4+(i4*taxa))
i6=(i5+(i5*taxa))
i7=(i6+(i6*taxa))
i8=(i7+(i7*taxa))
i9=(i8+(i8*taxa))
i10=(i9+(i9*taxa))
print('%.2f' %i1)
print('%.2f' %i2)
print('%.2f' %i3)
print('%.2f' %i4)
print('%.2f' %i5)
print('%.2f' %i6)
print('%.2f' %i7)
print('%.2f' %i8)
print('%.2f' %i9)
print('%.2f' %i10) | [
"[email protected]"
] | |
2c69894e250cac1001e365aa618ab94646a07ca5 | 12e45ab1a4d996c121f7ebc6a5a60621aaba8354 | /codingbat/String-1/make_tags.py | c159a0edd57c378691498f747d20cfadc4c0c00e | [] | no_license | dastan717/WedDEV | 843983d27325df1684ffbdc53557942433828ae6 | 00406a43b28f8650bf3b5f5d61d2ab234662d09c | refs/heads/main | 2023-03-13T12:37:48.636671 | 2021-04-02T20:31:50 | 2021-04-02T20:31:50 | 337,782,141 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 67 | py | def make_tags(tag, word):
return "<"+tag+">"+word+"</"+tag+">"
| [
"[email protected]"
] | |
7e29e532d2f1285cd50e39b2cb2212b658e5b9a8 | 149db911cd5b9f404e5d74fd6c8ed047482d2c22 | /backend/menu/migrations/0001_initial.py | 2c07fd16d8ed613c8286821c487d80336fef03b4 | [] | no_license | crowdbotics-apps/bigbitesgrill-22907 | 45814458930ad7aed64a1f4941aabd930f1f2587 | 6cd1b7b663de21c7587cdbce1612c4807e2cc5f6 | refs/heads/master | 2023-01-14T05:10:18.129338 | 2020-11-23T03:27:17 | 2020-11-23T03:27:17 | 315,189,727 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,144 | py | # Generated by Django 2.2.17 on 2020-11-23 03:26
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('delivery_user_profile', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('description', models.TextField()),
('image', models.URLField()),
('icon', models.URLField()),
],
),
migrations.CreateModel(
name='Country',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('description', models.TextField()),
('prefix', models.CharField(max_length=8)),
('flag', models.URLField()),
],
),
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('description', models.TextField()),
('image', models.URLField()),
('category', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='item_category', to='menu.Category')),
],
),
migrations.CreateModel(
name='Review',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('rating', models.FloatField()),
('review_text', models.TextField()),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='review_item', to='menu.Item')),
('profile', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='review_profile', to='delivery_user_profile.Profile')),
],
),
migrations.CreateModel(
name='ItemVariant',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('description', models.TextField()),
('price', models.FloatField()),
('image', models.URLField()),
('country', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='itemvariant_country', to='menu.Country')),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='itemvariant_item', to='menu.Item')),
],
),
]
| [
"[email protected]"
] | |
adf6af0524df6ab886504be487d226bb8e2ea86d | eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7 | /google/ads/googleads/v4/googleads-py/tests/unit/gapic/googleads.v4/services/test_ad_parameter_service.py | 738eff76504f6163898c0336379da593536d1d5b | [
"Apache-2.0"
] | permissive | Tryweirder/googleapis-gen | 2e5daf46574c3af3d448f1177eaebe809100c346 | 45d8e9377379f9d1d4e166e80415a8c1737f284d | refs/heads/master | 2023-04-05T06:30:04.726589 | 2021-04-13T23:35:20 | 2021-04-13T23:35:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 34,565 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from unittest import mock
import grpc
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google import auth
from google.ads.googleads.v4.resources.types import ad_parameter
from google.ads.googleads.v4.services.services.ad_parameter_service import AdParameterServiceClient
from google.ads.googleads.v4.services.services.ad_parameter_service import transports
from google.ads.googleads.v4.services.types import ad_parameter_service
from google.api_core import client_options
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.auth import credentials
from google.auth.exceptions import MutualTLSChannelError
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 as field_mask # type: ignore
from google.protobuf import wrappers_pb2 as wrappers # type: ignore
from google.rpc import status_pb2 as status # type: ignore
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert AdParameterServiceClient._get_default_mtls_endpoint(None) is None
assert AdParameterServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
assert AdParameterServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint
assert AdParameterServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint
assert AdParameterServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint
assert AdParameterServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
def test_ad_parameter_service_client_from_service_account_info():
creds = credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory:
factory.return_value = creds
info = {"valid": True}
client = AdParameterServiceClient.from_service_account_info(info)
assert client.transport._credentials == creds
assert client.transport._host == 'googleads.googleapis.com:443'
def test_ad_parameter_service_client_from_service_account_file():
creds = credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory:
factory.return_value = creds
client = AdParameterServiceClient.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
client = AdParameterServiceClient.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert client.transport._host == 'googleads.googleapis.com:443'
def test_ad_parameter_service_client_get_transport_class():
transport = AdParameterServiceClient.get_transport_class()
assert transport == transports.AdParameterServiceGrpcTransport
transport = AdParameterServiceClient.get_transport_class("grpc")
assert transport == transports.AdParameterServiceGrpcTransport
@mock.patch.object(AdParameterServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AdParameterServiceClient))
def test_ad_parameter_service_client_client_options():
# Check that if channel is provided we won't create a new one.
with mock.patch('google.ads.googleads.v4.services.services.ad_parameter_service.AdParameterServiceClient.get_transport_class') as gtc:
transport = transports.AdParameterServiceGrpcTransport(
credentials=credentials.AnonymousCredentials()
)
client = AdParameterServiceClient(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch('google.ads.googleads.v4.services.services.ad_parameter_service.AdParameterServiceClient.get_transport_class') as gtc:
client = AdParameterServiceClient(transport="grpc")
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch('google.ads.googleads.v4.services.services.ad_parameter_service.transports.AdParameterServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = AdParameterServiceClient(client_options=options)
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host="squid.clam.whelk",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT
# is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch('google.ads.googleads.v4.services.services.ad_parameter_service.transports.AdParameterServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = AdParameterServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host=client.DEFAULT_ENDPOINT,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch('google.ads.googleads.v4.services.services.ad_parameter_service.transports.AdParameterServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = AdParameterServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host=client.DEFAULT_MTLS_ENDPOINT,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = AdParameterServiceClient()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}):
with pytest.raises(ValueError):
client = AdParameterServiceClient()
@mock.patch.object(AdParameterServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AdParameterServiceClient))
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
@pytest.mark.parametrize("use_client_cert_env", ["true", "false"])
def test_ad_parameter_service_client_mtls_env_auto(use_client_cert_env):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
options = client_options.ClientOptions(client_cert_source=client_cert_source_callback)
with mock.patch('google.ads.googleads.v4.services.services.ad_parameter_service.transports.AdParameterServiceGrpcTransport.__init__') as grpc_transport:
ssl_channel_creds = mock.Mock()
with mock.patch('grpc.ssl_channel_credentials', return_value=ssl_channel_creds):
grpc_transport.return_value = None
client = AdParameterServiceClient(client_options=options)
if use_client_cert_env == "false":
expected_ssl_channel_creds = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_ssl_channel_creds = ssl_channel_creds
expected_host = client.DEFAULT_MTLS_ENDPOINT
grpc_transport.assert_called_once_with(
ssl_channel_credentials=expected_ssl_channel_creds,
credentials=None,
host=expected_host,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch('google.ads.googleads.v4.services.services.ad_parameter_service.transports.AdParameterServiceGrpcTransport.__init__') as grpc_transport:
with mock.patch('google.auth.transport.grpc.SslCredentials.__init__', return_value=None):
with mock.patch('google.auth.transport.grpc.SslCredentials.is_mtls', new_callable=mock.PropertyMock) as is_mtls_mock:
with mock.patch('google.auth.transport.grpc.SslCredentials.ssl_credentials', new_callable=mock.PropertyMock) as ssl_credentials_mock:
if use_client_cert_env == "false":
is_mtls_mock.return_value = False
ssl_credentials_mock.return_value = None
expected_host = client.DEFAULT_ENDPOINT
expected_ssl_channel_creds = None
else:
is_mtls_mock.return_value = True
ssl_credentials_mock.return_value = mock.Mock()
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_ssl_channel_creds = ssl_credentials_mock.return_value
grpc_transport.return_value = None
client = AdParameterServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=expected_ssl_channel_creds,
credentials=None,
host=expected_host,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch('google.ads.googleads.v4.services.services.ad_parameter_service.transports.AdParameterServiceGrpcTransport.__init__') as grpc_transport:
with mock.patch('google.auth.transport.grpc.SslCredentials.__init__', return_value=None):
with mock.patch('google.auth.transport.grpc.SslCredentials.is_mtls', new_callable=mock.PropertyMock) as is_mtls_mock:
is_mtls_mock.return_value = False
grpc_transport.return_value = None
client = AdParameterServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host=client.DEFAULT_ENDPOINT,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_ad_parameter_service_client_client_options_from_dict():
with mock.patch('google.ads.googleads.v4.services.services.ad_parameter_service.transports.AdParameterServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = AdParameterServiceClient(
client_options={'api_endpoint': 'squid.clam.whelk'}
)
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host="squid.clam.whelk",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_get_ad_parameter(transport: str = 'grpc', request_type=ad_parameter_service.GetAdParameterRequest):
client = AdParameterServiceClient(
credentials=credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_ad_parameter),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = ad_parameter.AdParameter(
resource_name='resource_name_value',
)
response = client.get_ad_parameter(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == ad_parameter_service.GetAdParameterRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, ad_parameter.AdParameter)
assert response.resource_name == 'resource_name_value'
def test_get_ad_parameter_from_dict():
test_get_ad_parameter(request_type=dict)
def test_get_ad_parameter_field_headers():
client = AdParameterServiceClient(
credentials=credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = ad_parameter_service.GetAdParameterRequest()
request.resource_name = 'resource_name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_ad_parameter),
'__call__') as call:
call.return_value = ad_parameter.AdParameter()
client.get_ad_parameter(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'resource_name=resource_name/value',
) in kw['metadata']
def test_get_ad_parameter_flattened():
client = AdParameterServiceClient(
credentials=credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_ad_parameter),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = ad_parameter.AdParameter()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_ad_parameter(
resource_name='resource_name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].resource_name == 'resource_name_value'
def test_get_ad_parameter_flattened_error():
client = AdParameterServiceClient(
credentials=credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_ad_parameter(
ad_parameter_service.GetAdParameterRequest(),
resource_name='resource_name_value',
)
def test_mutate_ad_parameters(transport: str = 'grpc', request_type=ad_parameter_service.MutateAdParametersRequest):
client = AdParameterServiceClient(
credentials=credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.mutate_ad_parameters),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = ad_parameter_service.MutateAdParametersResponse(
)
response = client.mutate_ad_parameters(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == ad_parameter_service.MutateAdParametersRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, ad_parameter_service.MutateAdParametersResponse)
def test_mutate_ad_parameters_from_dict():
test_mutate_ad_parameters(request_type=dict)
def test_mutate_ad_parameters_field_headers():
client = AdParameterServiceClient(
credentials=credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = ad_parameter_service.MutateAdParametersRequest()
request.customer_id = 'customer_id/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.mutate_ad_parameters),
'__call__') as call:
call.return_value = ad_parameter_service.MutateAdParametersResponse()
client.mutate_ad_parameters(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'customer_id=customer_id/value',
) in kw['metadata']
def test_mutate_ad_parameters_flattened():
client = AdParameterServiceClient(
credentials=credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.mutate_ad_parameters),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = ad_parameter_service.MutateAdParametersResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.mutate_ad_parameters(
customer_id='customer_id_value',
operations=[ad_parameter_service.AdParameterOperation(update_mask=field_mask.FieldMask(paths=['paths_value']))],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].customer_id == 'customer_id_value'
assert args[0].operations == [ad_parameter_service.AdParameterOperation(update_mask=field_mask.FieldMask(paths=['paths_value']))]
def test_mutate_ad_parameters_flattened_error():
client = AdParameterServiceClient(
credentials=credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.mutate_ad_parameters(
ad_parameter_service.MutateAdParametersRequest(),
customer_id='customer_id_value',
operations=[ad_parameter_service.AdParameterOperation(update_mask=field_mask.FieldMask(paths=['paths_value']))],
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.AdParameterServiceGrpcTransport(
credentials=credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = AdParameterServiceClient(
credentials=credentials.AnonymousCredentials(),
transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.AdParameterServiceGrpcTransport(
credentials=credentials.AnonymousCredentials(),
)
client = AdParameterServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.AdParameterServiceGrpcTransport(
credentials=credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = AdParameterServiceClient(
credentials=credentials.AnonymousCredentials(),
)
assert isinstance(
client.transport,
transports.AdParameterServiceGrpcTransport,
)
@pytest.mark.parametrize("transport_class", [
transports.AdParameterServiceGrpcTransport,
])
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(auth, 'default') as adc:
adc.return_value = (credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_ad_parameter_service_base_transport():
# Instantiate the base transport.
with mock.patch('google.ads.googleads.v4.services.services.ad_parameter_service.transports.AdParameterServiceTransport.__init__') as Transport:
Transport.return_value = None
transport = transports.AdParameterServiceTransport(
credentials=credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
'get_ad_parameter',
'mutate_ad_parameters',
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
def test_ad_parameter_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(auth, 'default') as adc, mock.patch('google.ads.googleads.v4.services.services.ad_parameter_service.transports.AdParameterServiceTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
adc.return_value = (credentials.AnonymousCredentials(), None)
transport = transports.AdParameterServiceTransport()
adc.assert_called_once()
def test_ad_parameter_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(auth, 'default') as adc:
adc.return_value = (credentials.AnonymousCredentials(), None)
AdParameterServiceClient()
adc.assert_called_once_with(scopes=(
'https://www.googleapis.com/auth/adwords',
))
def test_ad_parameter_service_transport_auth_adc():
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(auth, 'default') as adc:
adc.return_value = (credentials.AnonymousCredentials(), None)
transports.AdParameterServiceGrpcTransport(host="squid.clam.whelk")
adc.assert_called_once_with(scopes=(
'https://www.googleapis.com/auth/adwords',
))
def test_ad_parameter_service_host_no_port():
client = AdParameterServiceClient(
credentials=credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='googleads.googleapis.com'),
)
assert client.transport._host == 'googleads.googleapis.com:443'
def test_ad_parameter_service_host_with_port():
client = AdParameterServiceClient(
credentials=credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='googleads.googleapis.com:8000'),
)
assert client.transport._host == 'googleads.googleapis.com:8000'
def test_ad_parameter_service_grpc_transport_channel():
channel = grpc.insecure_channel('http://localhost/')
# Check that channel is used if provided.
transport = transports.AdParameterServiceGrpcTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
@pytest.mark.parametrize("transport_class", [transports.AdParameterServiceGrpcTransport])
def test_ad_parameter_service_transport_channel_mtls_with_client_cert_source(
transport_class
):
with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred:
with mock.patch.object(transport_class, "create_channel", autospec=True) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(auth, 'default') as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=(
'https://www.googleapis.com/auth/adwords',
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
@pytest.mark.parametrize("transport_class", [transports.AdParameterServiceGrpcTransport,])
def test_ad_parameter_service_transport_channel_mtls_with_adc(
transport_class
):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(transport_class, "create_channel", autospec=True) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=(
'https://www.googleapis.com/auth/adwords',
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_ad_group_criterion_path():
customer = "squid"
ad_group_criterion = "clam"
expected = "customers/{customer}/adGroupCriteria/{ad_group_criterion}".format(customer=customer, ad_group_criterion=ad_group_criterion, )
actual = AdParameterServiceClient.ad_group_criterion_path(customer, ad_group_criterion)
assert expected == actual
def test_parse_ad_group_criterion_path():
expected = {
"customer": "whelk",
"ad_group_criterion": "octopus",
}
path = AdParameterServiceClient.ad_group_criterion_path(**expected)
# Check that the path construction is reversible.
actual = AdParameterServiceClient.parse_ad_group_criterion_path(path)
assert expected == actual
def test_ad_parameter_path():
customer = "oyster"
ad_parameter = "nudibranch"
expected = "customers/{customer}/adParameters/{ad_parameter}".format(customer=customer, ad_parameter=ad_parameter, )
actual = AdParameterServiceClient.ad_parameter_path(customer, ad_parameter)
assert expected == actual
def test_parse_ad_parameter_path():
expected = {
"customer": "cuttlefish",
"ad_parameter": "mussel",
}
path = AdParameterServiceClient.ad_parameter_path(**expected)
# Check that the path construction is reversible.
actual = AdParameterServiceClient.parse_ad_parameter_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "winkle"
expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, )
actual = AdParameterServiceClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "nautilus",
}
path = AdParameterServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = AdParameterServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "scallop"
expected = "folders/{folder}".format(folder=folder, )
actual = AdParameterServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "abalone",
}
path = AdParameterServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = AdParameterServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "squid"
expected = "organizations/{organization}".format(organization=organization, )
actual = AdParameterServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "clam",
}
path = AdParameterServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = AdParameterServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "whelk"
expected = "projects/{project}".format(project=project, )
actual = AdParameterServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "octopus",
}
path = AdParameterServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = AdParameterServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "oyster"
location = "nudibranch"
expected = "projects/{project}/locations/{location}".format(project=project, location=location, )
actual = AdParameterServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "cuttlefish",
"location": "mussel",
}
path = AdParameterServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = AdParameterServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(transports.AdParameterServiceTransport, '_prep_wrapped_messages') as prep:
client = AdParameterServiceClient(
credentials=credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(transports.AdParameterServiceTransport, '_prep_wrapped_messages') as prep:
transport_class = AdParameterServiceClient.get_transport_class()
transport = transport_class(
credentials=credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
b254df743e617dfd1390743f0e04bbe4d12cb542 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03227/s922156594.py | 3367f99a10180d83d75fbea989fb7e0b5a810cdd | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 173 | py | import sys
def input(): return sys.stdin.readline().rstrip()
def main():
s=input()
if len(s)==2:print(s)
else:print(s[::-1])
if __name__=='__main__':
main() | [
"[email protected]"
] | |
ba11fe85c801d07e0e7c25b58d3aee09665d8952 | 77a7508c3a647711191b924959db80fb6d2bd146 | /src/gamesbyexample/worms.py | 2bea231d0dbdaeacc62cad083fcc56fafc920fb4 | [
"MIT"
] | permissive | surlydev/PythonStdioGames | ff7edb4c8c57a5eb6e2036e2b6ebc7e23ec994e0 | d54c2509c12a5b1858eda275fd07d0edd456f23f | refs/heads/master | 2021-05-22T21:01:15.529159 | 2020-03-26T07:34:10 | 2020-03-26T07:34:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,750 | py | """Worm animation, by Al Sweigart [email protected]
A screensaver of multicolor worms moving around.
NOTE: Do not resize the terminal window while this program is running.
Tags: large, artistic, simulation, bext"""
__version__ = 0
import random, shutil, sys, time
try:
import bext
except ImportError:
print('''This program requires the bext module, which you can
install by opening a Terminal window (on macOS & Linux) and running:
python3 -m pip install --user bext
or a Command Prompt window (on Windows) and running:
python -m pip install --user bext''')
sys.exit()
# Set up the constants:
PAUSE_LENGTH = 0.1
# Get the size of the terminal window:
WIDTH, HEIGHT = shutil.get_terminal_size()
# We can't print to the last column on Windows without it adding a
# newline automatically, so reduce the width by one:
WIDTH -= 1
WIDTH //= 2
NUMBER_OF_WORMS = 12 # (!) Try changing this value.
MIN_WORM_LENGTH = 6 # (!) Try changing this value.
MAX_WORM_LENGTH = 16 # (!) Try changing this value.
ALL_COLORS = bext.ALL_COLORS
NORTH = 'north'
SOUTH = 'south'
EAST = 'east'
WEST = 'west'
BLOCK = chr(9608) # Character 9608 is '█'
def main():
# Generate worm data structures:
worms = []
for i in range(NUMBER_OF_WORMS):
worms.append(Worm())
bext.clear()
while True: # Main simulation loop.
# Draw quit message.
bext.fg('white')
bext.goto(0, 0)
print('Ctrl-C to quit.', end='')
for worm in worms:
worm.display()
for worm in worms:
worm.moveRandom()
sys.stdout.flush()
time.sleep(PAUSE_LENGTH)
class Worm:
def __init__(self):
self.length = random.randint(MIN_WORM_LENGTH, MAX_WORM_LENGTH)
coloration = random.choice(['solid', 'stripe', 'random'])
if coloration == 'solid':
self.colors = [random.choice(ALL_COLORS)] * self.length
elif coloration == 'stripe':
color1 = random.choice(ALL_COLORS)
color2 = random.choice(ALL_COLORS)
self.colors = []
for i in range(self.length):
self.colors.append((color1, color2)[i % 2])
elif coloration == 'random':
self.colors = []
for i in range(self.length):
self.colors.append(random.choice(ALL_COLORS))
x = random.randint(0, WIDTH - 1)
y = random.randint(0, HEIGHT - 1)
self.body = []
for i in range(self.length):
self.body.append((x, y))
x, y = getRandomNeighbor(x, y)
def moveNorth(self):
headx, heady = self.body[0]
if self.isBlocked(NORTH):
return False
self.body.insert(0, (headx, heady - 1))
self._eraseLastBodySegment()
return True
def moveSouth(self):
headx, heady = self.body[0]
if self.isBlocked(SOUTH):
return False
self.body.insert(0, (headx, heady + 1))
self._eraseLastBodySegment()
return True
def moveEast(self):
headx, heady = self.body[0]
if self.isBlocked(EAST):
return False
self.body.insert(0, (headx + 1, heady))
self._eraseLastBodySegment()
return True
def moveWest(self):
headx, heady = self.body[0]
if self.isBlocked(WEST):
return False
self.body.insert(0, (headx - 1, heady))
self._eraseLastBodySegment()
return True
def isBlocked(self, direction):
headx, heady = self.body[0]
if direction == NORTH:
return heady == 0 or (headx, heady - 1) in self.body
elif direction == SOUTH:
return heady == HEIGHT - 1 or (headx, heady + 1) in self.body
elif direction == EAST:
return headx == WIDTH - 1 or (headx + 1, heady) in self.body
elif direction == WEST:
return headx == 0 or (headx - 1, heady) in self.body
def moveRandom(self):
if self.isBlocked(NORTH) and self.isBlocked(SOUTH) and self.isBlocked(EAST) and self.isBlocked(WEST):
self.body.reverse()
if self.isBlocked(NORTH) and self.isBlocked(SOUTH) and self.isBlocked(EAST) and self.isBlocked(WEST):
return False
hasMoved = False
while not hasMoved:
direction = random.choice([NORTH, SOUTH, EAST, WEST])
if direction == NORTH:
hasMoved = self.moveNorth()
elif direction == SOUTH:
hasMoved = self.moveSouth()
elif direction == EAST:
hasMoved = self.moveEast()
elif direction == WEST:
hasMoved = self.moveWest()
def _eraseLastBodySegment(self):
# Erase the last body segment:
bext.goto(self.body[-1][0] * 2, self.body[-1][1])
print(' ', end='')
self.body.pop() # Delete the last (x, y) tuple in self.body.
def display(self):
for i, (x, y) in enumerate(self.body):
bext.goto(x * 2, y)
bext.fg(self.colors[i])
print(BLOCK + BLOCK, end='')
def getRandomNeighbor(x, y):
while True:
direction = random.choice((NORTH, SOUTH, EAST, WEST))
if direction == NORTH and y != 0:
return (x, y - 1)
elif direction == SOUTH and y != HEIGHT - 1:
return (x, y + 1)
elif direction == EAST and x != WIDTH - 1:
return (x + 1, y)
elif direction == WEST and x != 0:
return (x - 1, y)
# If this program was run (instead of imported), run the game:
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
sys.exit() # When Ctrl-C is pressed, end the program.
| [
"[email protected]"
] | |
b6bbb6f5cdddba874b8e7c74471e4bb45a157100 | 30f4230650a73d3cb112ab17a46c9c9375734308 | /Covid_chat_bot.py | 72ef13f0267f5701e927d37a5585c60fb0487005 | [] | no_license | suyash-dubey/PROJECT-2-COVID-CHATBOT | f47108b44aebb23e1b33f59d8e17591bffd8f306 | 7fc147b56268697355e5b1c606c680860c89cd29 | refs/heads/main | 2023-06-10T02:00:04.503911 | 2021-07-03T07:54:08 | 2021-07-03T07:54:08 | 382,557,861 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,864 | py | import random
from newspaper import Article
import string
import nltk
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
import warnings
warnings.filterwarnings('ignore')
#pip install newspaper3k
nltk.download('punkt',quiet=True)
#get the Article
article=Article('https://en.wikipedia.org/wiki/COVID-19')
article.download()
article.parse()
article.nlp()
corpus=article.text
#tokenisation
test=corpus
sentence_list=nltk.sent_tokenize(test)#list of sentences
#function to return a random greeting msg to user
def greet_res(text):
text=text.lower()
#boots greetin response
bot_greetings=['hello','hi','hey']
#user greeting response
user_greetings=['hello','hi','hey','hii','wassup','lo','hellooooooo']
for word in text.split():
if word in user_greetings:
return random.choice(bot_greetings)
#function to sort index_sort
def index_sort(list_var):
length=len(list_var)
list_index=list(range(0,length))
x=list_var
for i in range(length):
for j in range(length):
if x[list_index[i]]>x[list_index[j]]:
temp=list_index[i]
list_index[i]=list_index[j]
list_index[j]=temp
return list_index
#function for bot response
def bot_res(user_input):
user_input=user_input.lower()
sentence_list.append(user_input)
bot_res=''
#convert the whole sentence in form of vector
cm=CountVectorizer().fit_transform(sentence_list)
#check input matches in our sentence lst or not
s_score=cosine_similarity(cm[-1],cm)#cm[-1]means last jo hmne append kia tha input
s_score_list=s_score.flatten()#we have conerted the s_score into a list
index=index_sort(s_score_list)
index=index[1:]
res_flag=0
j=0
for i in range(len(index)):
if s_score_list[index[i]]>0.0:
bot_res=bot_res+' '+sentence_list[index[i]]
res_flag=1
j=j+1
#if we want to print max 2 sentence i response not more than that
if j>2:
break
if res_flag==0:
bot_res=bot_res+' I apologise that i have not understood ur meaning plz be specific'
sentence_list.remove(user_input)
return bot_res
#start chat
print('Covid Helpline: I m here to help u with the information regarding corona virus. If u want to exit type nye or exit or quit')
exit_list=['bye','exit','byeee','quit']
while(True):
user_input=input()
if user_input.lower() in exit_list:
print('Bot: Thanks for ur queries')
break
else:
if greet_res(user_input)!=None:
print('Bot:'+greet_res(user_input))
else:
print('Bot:'+bot_res(user_input))
| [
"[email protected]"
] | |
5f6ac666d9265fdaba9d9dffc042793d684732b2 | a3ee120ce3b32d20184df44eeb35b38a3423e322 | /image_utils.py | 8d4098ab115428b822978bf48c6d852ba87f4a0b | [
"MIT"
] | permissive | Ikhwansong/ComputerSoftware_OCR | 6857b3af121c5b7767893e72c7a10b27ef77ac87 | e1664c7fd6081e00e39d9b5fbc48e902bbb5fdfc | refs/heads/master | 2022-10-21T02:36:43.333591 | 2020-06-11T20:13:40 | 2020-06-11T20:13:40 | 271,306,157 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,139 | py | import os
from PIL import Image
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import random
import numpy as np
import tensorflow as tf
import math
import cv2
from box_utils import compute_iou
class ImageVisualizer_cv2(object):
def __init__(self, idx_to_name, class_colors = None, save_dir = None):
self.idx_to_name = idx_to_name
self.color_matrix = self._colorizing()
self.color_matrix = np.random.shuffle(self.color_matrix)
if save_dir is None:
self.save_dir = './'
else:
self.save_dir = save_dir
os.makedirs(self.save_dir, exist_ok=True)
def _colorizing(self,):
factor = math.floor(math.pow(len(self.idx_to_name), 1/3))
color_divider = 255/factor
color_matrix = np.zeros(((factor+1)*(factor+1)*(factor+1),3))
index = 0
for x in range(factor+1):
for y in range(factor+1) :
for z in range(factor+1) :
color_matrix[index,:] = np.array([x*color_divider, y*color_divider, z*color_divider])
index = index + 1
return color_matrix[1:-1]
def save_image(self, img_path, boxes, labels, name):
img = cv2.imread(img_path)
save_path = os.path.join(self.save_dir, name)
for i, box in enumerate(boxes):
idx = labels[i] -1
cls_name = self.idx_to_name[idx]
top_left = (box[0], box[1])
bot_right = (box[2], box[3])
cv2.rectangle(img,top_left, bot_right, self.color_matrix[idx], 1 )
cv2.putText(img, cls_name, top_left,1, (255,255,255), 1)
cv2.imwrite(save_path, img)
class ImageVisualizer(object):
""" Class for visualizing image
Attributes:
idx_to_name: list to convert integer to string label
class_colors: colors for drawing boxes and labels
save_dir: directory to store images
"""
def __init__(self, idx_to_name, class_colors=None, save_dir=None):
self.idx_to_name = idx_to_name
if class_colors is None or len(class_colors) != len(self.idx_to_name):
self.class_colors = [[0, 255, 0]] * len(self.idx_to_name)
else:
self.class_colors = class_colors
if save_dir is None:
self.save_dir = './'
else:
self.save_dir = save_dir
os.makedirs(self.save_dir, exist_ok=True)
def save_image(self, img, boxes, labels, name):
""" Method to draw boxes and labels
then save to dir
Args:
img: numpy array (width, height, 3)
boxes: numpy array (num_boxes, 4)
labels: numpy array (num_boxes)
name: name of image to be saved
"""
plt.figure()
fig, ax = plt.subplots(1)
ax.imshow(img)
save_path = os.path.join(self.save_dir, name)
for i, box in enumerate(boxes):
idx = labels[i] - 1
cls_name = self.idx_to_name[idx]
top_left = (box[0], box[1])
bot_right = (box[2], box[3])
ax.add_patch(patches.Rectangle(
(box[0], box[1]),
box[2] - box[0], box[3] - box[1],
linewidth=2, edgecolor=(0., 1., 0.),
facecolor="none"))
plt.text(
box[0],
box[1],
s=cls_name,
fontsize = 'small',
color="white",
verticalalignment="top",
bbox={"color": (0., 1., 0.), "pad": 0},
)
plt.axis("off")
# plt.gca().xaxis.set_major_locator(NullLocator())
# plt.gca().yaxis.set_major_locator(NullLocator())
plt.savefig(save_path, bbox_inches="tight", pad_inches=0.0)
plt.close('all')
def generate_patch(boxes, threshold):
""" Function to generate a random patch within the image
If the patch overlaps any gt boxes at above the threshold,
then the patch is picked, otherwise generate another patch
Args:
boxes: box tensor (num_boxes, 4)
threshold: iou threshold to decide whether to choose the patch
Returns:
patch: the picked patch
ious: an array to store IOUs of the patch and all gt boxes
"""
while True:
patch_w = random.uniform(0.1, 1)
scale = random.uniform(0.5, 2)
patch_h = patch_w * scale
patch_xmin = random.uniform(0, 1 - patch_w)
patch_ymin = random.uniform(0, 1 - patch_h)
patch_xmax = patch_xmin + patch_w
patch_ymax = patch_ymin + patch_h
patch = np.array(
[[patch_xmin, patch_ymin, patch_xmax, patch_ymax]],
dtype=np.float32)
patch = np.clip(patch, 0.0, 1.0)
ious = compute_iou(tf.constant(patch), boxes)
if tf.math.reduce_any(ious >= threshold):
break
return patch[0], ious[0]
def random_patching(img, boxes, labels):
""" Function to apply random patching
Firstly, a patch is randomly picked
Then only gt boxes of which IOU with the patch is above a threshold
and has center point lies within the patch will be selected
Args:
img: the original PIL Image
boxes: gt boxes tensor (num_boxes, 4)
labels: gt labels tensor (num_boxes,)
Returns:
img: the cropped PIL Image
boxes: selected gt boxes tensor (new_num_boxes, 4)
labels: selected gt labels tensor (new_num_boxes,)
"""
threshold = np.random.choice(np.linspace(0.1, 0.7, 4))
patch, ious = generate_patch(boxes, threshold)
box_centers = (boxes[:, :2] + boxes[:, 2:]) / 2
keep_idx = (
(ious > 0.3) &
(box_centers[:, 0] > patch[0]) &
(box_centers[:, 1] > patch[1]) &
(box_centers[:, 0] < patch[2]) &
(box_centers[:, 1] < patch[3])
)
if not tf.math.reduce_any(keep_idx):
return img, boxes, labels
img = img.crop(patch)
boxes = boxes[keep_idx]
patch_w = patch[2] - patch[0]
patch_h = patch[3] - patch[1]
boxes = tf.stack([
(boxes[:, 0] - patch[0]) / patch_w,
(boxes[:, 1] - patch[1]) / patch_h,
(boxes[:, 2] - patch[0]) / patch_w,
(boxes[:, 3] - patch[1]) / patch_h], axis=1)
boxes = tf.clip_by_value(boxes, 0.0, 1.0)
labels = labels[keep_idx]
return img, boxes, labels
def horizontal_flip(img, boxes, labels):
""" Function to horizontally flip the image
The gt boxes will be need to be modified accordingly
Args:
img: the original PIL Image
boxes: gt boxes tensor (num_boxes, 4)
labels: gt labels tensor (num_boxes,)
Returns:
img: the horizontally flipped PIL Image
boxes: horizontally flipped gt boxes tensor (num_boxes, 4)
labels: gt labels tensor (num_boxes,)
"""
img = img.transpose(Image.FLIP_LEFT_RIGHT)
boxes = tf.stack([
1 - boxes[:, 2],
boxes[:, 1],
1 - boxes[:, 0],
boxes[:, 3]], axis=1)
return img, boxes, labels
| [
"[email protected]"
] | |
0b879b61c2d2b1641c7c52ec97b5e9506509dfd9 | 300cd7b8c6a4f05c3d1c455d4a37d7a1dd12bd28 | /something.py | ead38d2c401f6ef69fdbdb8dc02ed7d85e47230d | [] | no_license | cravenormality/portfolioterm3 | 5a370b7bfd46f192b1719eee9e0dcbd169e8db6a | 0273ab8adeb963fbb3a382e9367666a55e5204c0 | refs/heads/master | 2020-04-29T01:33:37.442705 | 2019-03-15T17:02:12 | 2019-03-15T17:02:12 | 175,734,716 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,808 | py | class Book:
def __init__(self,id,bookName,authorName,nextNode=None):
self.id = id
self.bookName = bookName
self.authorName = authorName
self.nextNode = nextNode
def getId(self):
return self.id
def getBookName(self):
return self.bookName
def getAuthorName(self):
return self.authorName
def getNextNode(self):
return self.nextNode
def setNextNode(self,val):
self.nextNode = val
class LinkedList:
def __init__(self,head = None):
self.head = head
self.size = 0
def getSize(self):
return self.size
def AddBookToFront(self,newBook):
newBook.setNextNode(self.head)
self.head = newBook
self.size+=1
def DisplayBook(self):
curr = self.head
while curr:
print(curr.getId(),curr.getBookName(),curr.getAuthorName())
curr = curr.getNextNode()
def RemoveBookAtPosition(self,n):
prev = None
curr = self.head
curPos = 0
while curr:
if curPos == n:
if prev:
prev.setNextNode(curr.getNextNode())
else:
self.head = curr.getNextNode()
self.size = self.size - 1
return True
prev = curr
curr = curr.getNextNode()
curPos = curPos + 1
return False
def AddBookAtPosition(self,newBook,n):
curPos = 1
if n == 0:
newBook.setNextNode(self.head)
self.head = newBook
self.size+=1
return
else:
currentNode = self.head
while currentNode.getNextNode() is not None:
if curPos == n:
newBook.setNextNode(currentNode.getNextNode())
currentNode.setNextNode(newBook)
self.size+=1
return
currentNode = currentNode.getNextNode()
curPos = curPos + 1
if curPos == n:
newBook.setNextNode(None)
currentNode.setNextNode(newBook)
self.size+=1
else:
print("cannot add",newBook.getId(),newBook.getBookName(),"at that position")
def SortByAuthorName(self):
for i in range(1,self.size):
node1 = self.head
node2 = node1.getNextNode()
while node2 is not None:
if node1.authorName > node2.authorName:
temp = node1.id
temp2 = node1.bookName
temp3 = node1.authorName
node1.id = node2.id
node1.bookName = node2.bookName
node1.authorName = node2.authorName
node2.id = temp
node2.bookName = temp2
node2.authorName = temp3
node1 = node1.getNextNode()
node2 = node2.getNextNode()
myLinkedList = LinkedList()
nodeA = Book("#1","cool","Isaac")
nodeB = Book("#2","amazing","Alfred")
nodeC = Book("#3","hello","John")
nodeD = Book("#4","why","Chase")
nodeE = Book("#5","good","Mary")
nodeF = Book("#6","hahaha","Radin")
myLinkedList.AddBookToFront(nodeA)
myLinkedList.AddBookToFront(nodeB)
myLinkedList.AddBookToFront(nodeC)
myLinkedList.AddBookAtPosition(nodeD,1)
myLinkedList.AddBookAtPosition(nodeE,1)
myLinkedList.AddBookAtPosition(nodeF,1)
myLinkedList.RemoveBookAtPosition(2)
myLinkedList.RemoveBookAtPosition(2)
myLinkedList.DisplayBook()
myLinkedList.SortByAuthorName()
print(myLinkedList.getSize())
myLinkedList.DisplayBook()
| [
"[email protected]"
] | |
4dceb544e9e5cb1d823f903dc4ef905e43deca34 | 264ff719d21f2f57451f322e9296b2f55b473eb2 | /gvsoc/gvsoc/models/pulp/fll/fll_v1.py | 4bba0a37a2e965a05864f2ac8ec5e53627d4f934 | [
"Apache-2.0"
] | permissive | knmcguire/gap_sdk | 06c9537c16fa45dea6b7f5c6b162b53953262915 | 7b0a09a353ab6f0550793d40bd46e98051f4a3d7 | refs/heads/master | 2020-12-20T06:51:19.580497 | 2020-01-21T14:52:28 | 2020-01-21T14:52:28 | 235,992,961 | 0 | 0 | Apache-2.0 | 2020-01-24T11:45:59 | 2020-01-24T11:45:58 | null | UTF-8 | Python | false | false | 772 | py | #
# Copyright (C) 2018 ETH Zurich and University of Bologna
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: Germain Haugou, ETH ([email protected])
import vp_core as vp
class component(vp.component):
implementation = 'pulp.fll.fll_v1_impl'
| [
"[email protected]"
] | |
ecf839c9ca68216641195e95ef5da5bca7e6347b | 2315b173b7a04c8c94b188394aff4656a3b82a9b | /lib/net/rpn.py | d8e3a899db703fd57da78a00b123dd86c96395f7 | [
"MIT"
] | permissive | StiphyJay/WS3D | a551b7c169990cf0699ddd48a43d59c1148f2aeb | db90ba12026fb1c4a12e9b791117d6af4ccb052f | refs/heads/master | 2022-11-27T23:52:00.387317 | 2020-08-15T04:58:58 | 2020-08-15T04:58:58 | 288,369,260 | 1 | 1 | MIT | 2020-08-18T06:03:39 | 2020-08-18T06:03:38 | null | UTF-8 | Python | false | false | 3,375 | py | import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import pointnet2_lib.pointnet2.pytorch_utils as pt_utils
import lib.utils.loss_utils as loss_utils
from lib.config import cfg
import importlib
class RPN(nn.Module):
def __init__(self, use_xyz=True, mode='TRAIN',old_model=False):
super().__init__()
self.training_mode = (mode == 'TRAIN')
# backbone network
MODEL = importlib.import_module(cfg.RPN.BACKBONE)
self.backbone_net = MODEL.get_model(input_channels=int(cfg.RPN.USE_INTENSITY), use_xyz=use_xyz)
# classification branch
cls_layers = []
pre_channel = cfg.RPN.FP_MLPS[0][-1]
for k in range(0, cfg.RPN.CLS_FC.__len__()):
cls_layers.append(pt_utils.Conv1d(pre_channel, cfg.RPN.CLS_FC[k], bn=cfg.RPN.USE_BN))
pre_channel = cfg.RPN.CLS_FC[k]
cls_layers.append(pt_utils.Conv1d(pre_channel, 1, activation=None))
if cfg.RPN.DP_RATIO >= 0:
cls_layers.insert(1, nn.Dropout(cfg.RPN.DP_RATIO))
self.rpn_cls_layer = nn.Sequential(*cls_layers)
# regression branch
per_loc_bin_num = int(cfg.RPN.LOC_SCOPE / cfg.RPN.LOC_BIN_SIZE) * 2
reg_channel = per_loc_bin_num * 4
if old_model:
reg_channel = per_loc_bin_num * 4 + 12 * 2 + 3
reg_channel += 1
reg_layers = []
pre_channel = cfg.RPN.FP_MLPS[0][-1]
for k in range(0, cfg.RPN.REG_FC.__len__()):
reg_layers.append(pt_utils.Conv1d(pre_channel, cfg.RPN.REG_FC[k], bn=cfg.RPN.USE_BN))
pre_channel = cfg.RPN.REG_FC[k]
reg_layers.append(pt_utils.Conv1d(pre_channel, reg_channel, activation=None))
if cfg.RPN.DP_RATIO >= 0:
reg_layers.insert(1, nn.Dropout(cfg.RPN.DP_RATIO))
self.rpn_reg_layer = nn.Sequential(*reg_layers)
# LOSS defination
if cfg.RPN.LOSS_CLS == 'DiceLoss':
self.rpn_cls_loss_func = loss_utils.DiceLoss(ignore_target=-1)
elif cfg.RPN.LOSS_CLS == 'SigmoidFocalLoss':
self.rpn_cls_loss_func = loss_utils.SigmoidFocalClassificationLoss(alpha=cfg.RPN.FOCAL_ALPHA[0],
gamma=cfg.RPN.FOCAL_GAMMA)
elif cfg.RPN.LOSS_CLS == 'BinaryCrossEntropy':
self.rpn_cls_loss_func = F.binary_cross_entropy
else:
raise NotImplementedError
self.init_weights()
def init_weights(self):
if cfg.RPN.LOSS_CLS in ['SigmoidFocalLoss']:
pi = 0.01
nn.init.constant_(self.rpn_cls_layer[2].conv.bias, -np.log((1 - pi) / pi))
nn.init.normal_(self.rpn_reg_layer[-1].conv.weight, mean=0, std=0.001)
def forward(self, input_data):
"""
:param input_data: dict (point_cloud)
:return:
"""
pts_input = input_data['pts_input']
backbone_xyz, backbone_features = self.backbone_net(pts_input) # (B, N, 3), (B, C, N)
rpn_cls = self.rpn_cls_layer(backbone_features).transpose(1, 2).contiguous() # (B, N, 1)
rpn_reg = self.rpn_reg_layer(backbone_features).transpose(1, 2).contiguous() # (B, N, C)
ret_dict = {'rpn_cls': rpn_cls, 'rpn_reg': rpn_reg,
'backbone_xyz': backbone_xyz, 'backbone_features': backbone_features}
return ret_dict
| [
"[email protected]"
] | |
856794859aa4e34f0efcddebb80b7314f97f9f4c | 2c6e600de029d38478e3c4e4d4500d9a42b6dd98 | /End to End Multipli Disease Prediction/models/diabetes.py | 5b002b10b6b71b2d573735048b385db4a7fcbd6e | [] | no_license | githubybf/All-End-to-End-Projects | ae76434f49808a5076a5ec788f650c850b908cec | 40d0d5f1016c3c7b7e6457d697e6dc727f8c388c | refs/heads/main | 2023-03-07T09:07:19.289830 | 2021-02-07T08:06:08 | 2021-02-07T08:06:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 334 | py | import numpy as np
import pandas as pd
from sklearn.linear_model import LogisticRegression
import joblib
data=pd.read_csv("diabetes.csv")
print(data.head())
logreg=LogisticRegression()
X=data.iloc[:,:8]
print(X.shape[1])
y=data[["Outcome"]]
X=np.array(X)
y=np.array(y)
logreg.fit(X,y.reshape(-1,))
joblib.dump(logreg,"model1")
| [
"[email protected]"
] | |
780073cc16c8f338f3195e45934b88dd0709ef5b | f777b5e4a98c40f4bfc5c5c9e326faa09beb2d53 | /projects/DensePose/densepose/modeling/cse/utils.py | 18480db5e485dec3bd0daf3cae69263a6abdde4f | [
"Apache-2.0"
] | permissive | alekseynp/detectron2 | 04ae9a47d950ea4c737715b5f2aa7637d3742264 | 2409af0bf0d4bdcc685feb6d2c7fd659828acac4 | refs/heads/master | 2022-05-30T09:13:26.438077 | 2022-04-11T20:59:40 | 2022-04-11T20:59:40 | 254,280,315 | 0 | 1 | Apache-2.0 | 2020-04-09T05:34:15 | 2020-04-09T05:34:14 | null | UTF-8 | Python | false | false | 3,538 | py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
from torch.nn import functional as F
def squared_euclidean_distance_matrix(pts1: torch.Tensor, pts2: torch.Tensor) -> torch.Tensor:
"""
Get squared Euclidean Distance Matrix
Computes pairwise squared Euclidean distances between points
Args:
pts1: Tensor [M x D], M is the number of points, D is feature dimensionality
pts2: Tensor [N x D], N is the number of points, D is feature dimensionality
Return:
Tensor [M, N]: matrix of squared Euclidean distances; at index (m, n)
it contains || pts1[m] - pts2[n] ||^2
"""
edm = torch.mm(-2 * pts1, pts2.t())
edm += (pts1 * pts1).sum(1, keepdim=True) + (pts2 * pts2).sum(1, keepdim=True).t()
return edm.contiguous()
def normalize_embeddings(embeddings: torch.Tensor, epsilon: float = 1e-6) -> torch.Tensor:
"""
Normalize N D-dimensional embedding vectors arranged in a tensor [N, D]
Args:
embeddings (tensor [N, D]): N D-dimensional embedding vectors
epsilon (float): minimum value for a vector norm
Return:
Normalized embeddings (tensor [N, D]), such that L2 vector norms are all equal to 1.
"""
return embeddings / torch.clamp(
embeddings.norm(p=None, dim=1, keepdim=True), min=epsilon # pyre-ignore[6]
)
def get_closest_vertices_mask_from_ES(
E: torch.Tensor,
S: torch.Tensor,
h: int,
w: int,
mesh_vertex_embeddings: torch.Tensor,
device: torch.device,
):
"""
Interpolate Embeddings and Segmentations to the size of a given bounding box,
and compute closest vertices and the segmentation mask
Args:
E (tensor [1, D, H, W]): D-dimensional embedding vectors for every point of the
default-sized box
S (tensor [1, 2, H, W]): 2-dimensional segmentation mask for every point of the
default-sized box
h (int): height of the target bounding box
w (int): width of the target bounding box
mesh_vertex_embeddings (tensor [N, D]): vertex embeddings for a chosen mesh
N is the number of vertices in the mesh, D is feature dimensionality
device (torch.device): device to move the tensors to
Return:
Closest Vertices (tensor [h, w]), int, for every point of the resulting box
Segmentation mask (tensor [h, w]), boolean, for every point of the resulting box
"""
# pyre-fixme[6]: Expected `Optional[int]` for 2nd param but got `Tuple[int, int]`.
embedding_resized = F.interpolate(E, size=(h, w), mode="bilinear")[0].to(device)
# pyre-fixme[6]: Expected `Optional[int]` for 2nd param but got `Tuple[int, int]`.
coarse_segm_resized = F.interpolate(S, size=(h, w), mode="bilinear")[0].to(device)
mask = coarse_segm_resized.argmax(0) > 0
closest_vertices = torch.zeros(mask.shape, dtype=torch.long, device=device)
all_embeddings = embedding_resized[:, mask].t()
size_chunk = 10_000 # Chunking to avoid possible OOM
edm = []
if len(all_embeddings) == 0:
return closest_vertices, mask
for chunk in range((len(all_embeddings) - 1) // size_chunk + 1):
chunk_embeddings = all_embeddings[size_chunk * chunk : size_chunk * (chunk + 1)]
edm.append(
torch.argmin(
squared_euclidean_distance_matrix(chunk_embeddings, mesh_vertex_embeddings), dim=1
)
)
closest_vertices[mask] = torch.cat(edm)
return closest_vertices, mask
| [
"[email protected]"
] | |
de41515bdfe3faa82c3ce8ed5c220f24b123aac9 | 3712a929d1124f514ea7af1ac0d4a1de03bb6773 | /开班笔记/pythonMongoDB部分/day39/code/mongo1.py | 5b9aef2d4d0dc19f114aaca150810694bc086161 | [] | no_license | jiyabing/learning | abd82aa3fd37310b4a98b11ea802c5b0e37b7ad9 | 6059006b0f86aee9a74cfc116d2284eb44173f41 | refs/heads/master | 2020-04-02T20:47:33.025331 | 2018-10-26T05:46:10 | 2018-10-26T05:46:10 | 154,779,387 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 990 | py | #coding:utf8
#索引和聚合操作
from pymongo import MongoClient,IndexModel
conn = MongoClient('localhost',27017)
db = conn.stu
my_set = db.class4
#创建索引,并且将索引名返回
#index = my_set.ensure_index('name')
#print(index)
#复合索引
#index = my_set.ensure_index([('name',1),('king',-1)])
#print(index)
#唯一索引和稀疏索引
cls = db.class0
#唯一索引
#index = cls.ensure_index('name',unique=True)
#稀疏索引
#index = my_set.ensure_index('king_name',sparse=True)
#删除索引
#my_set.drop_index('name_1')
#my_set.drop_indexes() #删除所有索引
#同时创建多个索引
#index1 = IndexModel([('name',1),('king',-1)])
#index2 = IndexModel([('king_name',1)])
#indexes = my_set.create_indexes([index1,index2])
#查看一个集合中的索引
#for i in my_set.list_indexes():
# print(i)
#聚合管道
l = [{'$group':{'_id':'$king','count':{'$sum':1}}},{'$match':{'count':{'$gt':1}}}]
cursor = my_set.aggregate(l)
for i in cursor:
print(i)
| [
"[email protected]"
] | |
806e3cd0e19b3608d616b002a8bb2b876ca9e51a | d564c1dcde3a139960e441a732f308dee7bac268 | /code/run5All_Content_PlainUD.py | 517e77577c22a0ae492044444a377776233b03a6 | [] | no_license | m-hahn/left-right-asymmetries | 9b5142dcf822194068feea2ccc0e8cc3b0573bbe | 45e5b40a145e2a9d51c12617dc76be5a49ddf43e | refs/heads/master | 2020-04-26T11:47:57.781431 | 2019-03-22T01:00:48 | 2019-03-22T01:00:48 | 173,528,908 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 355 | py | from ud_languages import languages
import subprocess
languages = sorted(languages, reverse=True)
for language in languages:
for model in ["REAL_REAL", "REVERSE"]: #, "GROUND"] + (["RANDOM_BY_TYPE"] * 5):
command = ["./python27", "testLeftRightEntUniHDCond3FilterMIWord5_Content_PlainUD_Bugfix.py", language, model]
subprocess.call(command)
| [
"[email protected]"
] | |
9101f98269a25bbd0d6a6a65bc58c9e4c128d376 | 443927accc10828ea1e81c46c38f4f7b0815390c | /armstrong.py | d3a9905eed995beb6394b333b62043b5a5161e61 | [] | no_license | 1PrathameshMore/PPL-assignments | 690042bceba55731c01e99903dd8cb3fc7e50f97 | 7633a001b9bebc7ab2057fa9ab72a9c1fdbaeda3 | refs/heads/master | 2022-10-14T22:00:25.658406 | 2020-06-10T16:55:49 | 2020-06-10T16:55:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 530 | py | def armstrong():
d = input()
e = input()
d = int(d)
e = int(e)
if d < e:
for i in range(d,e):
z = noofplaces(i)
c = i
sum = 0
while c > 0 :
r = c % 10
f = pow(r,z)
sum = sum + f
c = c // 10
if sum == i:
print(sum)
return 0
def noofplaces(x):
j = 0
while x > 0:
x = x // 10
j += 1
return j
armstrong() | [
"[email protected]"
] | |
bfe97c1c044bc1c39b5f9594fdec9c287f465d1d | 407d52d5d9cfb8f04861a92bc97724bf2e07857d | /utils/views.py | 352d299563eb6e04d754988a7d942141e8516f1e | [] | no_license | buddy-israel/win_crawler | 43b6e60d5008d6138f8fea07f56633acced29ff4 | e2daf56e6c7be803146623a2c56f2e46cfc63909 | refs/heads/main | 2023-05-09T03:30:05.238468 | 2021-06-01T17:52:46 | 2021-06-01T17:52:46 | 372,916,826 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 605 | py | import sqlite3
from sqlite3 import Error
database = "../spider.db"
def create_connection():
conn = None
try:
conn = sqlite3.connect(database)
except Error as e:
print(e)
return conn
def create_views():
conn = create_connection()
view_1 = """SELECT DISTINCT UserName, PostCount FROM User ORDER BY PostCount DESC LIMIT 10;"""
try:
with conn:
c = conn.cursor()
c.execute(view_1)
result = [username for username in c.fetchall()]
except Error as e:
print(e)
for x, y in result:
print(f'{y}\t\t{x}')
if __name__ == '__main__':
create_views() | [
"[email protected]"
] | |
d3ed2e74b0e9dba9944dd11ca896b5016acd263d | 154fd16fe7828cb6925ca8f90e049b754ce06413 | /lino_book/projects/lydia/tests/dumps/18.12.0/teams_team.py | e3d2d31af2866296e853ea6765cf5e65fe6a2a6c | [
"BSD-2-Clause"
] | permissive | lino-framework/book | 68de2f8d130266bd9d9de7576d30597b3cde1c91 | 4eab916832cd8f48ff1b9fc8c2789f0b437da0f8 | refs/heads/master | 2021-03-27T16:16:55.403940 | 2021-03-15T02:53:50 | 2021-03-15T02:53:50 | 58,830,342 | 3 | 9 | BSD-2-Clause | 2021-03-09T13:11:27 | 2016-05-14T21:02:17 | Python | UTF-8 | Python | false | false | 254 | py | # -*- coding: UTF-8 -*-
logger.info("Loading 2 objects to table teams_team...")
# fields: id, ref, name
loader.save(create_teams_team(1,u'E',['Eupen', '', '']))
loader.save(create_teams_team(2,u'S',['St. Vith', '', '']))
loader.flush_deferred_objects()
| [
"[email protected]"
] | |
4f93eb5a8ca11eb43e6903d4e1ccd3e91447737d | d2fe0085b52506733b72229cd1b851bfbdbfeb1f | /mean_var_std.py | b736ca0969bbde30a669f565be16ad47e4505ec5 | [] | no_license | maheshdbabar9340/Mean-Variance-Standard_Deviation_Calculator | 1b62a3203e3d20c7252f18ec5d7d05a8debb9388 | cffbc2974ead977b53fc0e6d65669b36fe7eae47 | refs/heads/main | 2023-05-12T15:48:00.725344 | 2021-06-03T10:51:40 | 2021-06-03T10:51:40 | 373,472,238 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,073 | py | import numpy as np
def calculate(list):
if len(list) != 9:
raise ValueError("List must contain nine numbers.")
array = np.array(list).reshape((3, 3))
calculations = {
"mean": [
np.mean(array, axis = 0).tolist(),
np.mean(array, axis = 1).tolist(),
np.mean(array.tolist())
],
"variance": [
np.var(array, axis = 0).tolist(),
np.var(array, axis = 1).tolist(),
np.var(array) .tolist()
],
"standard deviation": [
np.std(array, axis = 0).tolist(),
np.std(array, axis = 1).tolist(),
np.std(array).tolist()
],
"max": [
np.max(array, axis = 0).tolist(),
np.max(array, axis = 1).tolist(),
np.max(array).tolist()
],
"min": [
np.min(array, axis = 0).tolist(),
np.min(array, axis = 1).tolist(),
np.min(array).tolist()
],
"sum": [
np.sum(array, axis = 0).tolist(),
np.sum(array, axis = 1).tolist(),
np.sum(array).tolist()
],
}
return calculations | [
"[email protected]"
] | |
f8bc2d9b846c19133bad97101540f24fbf6596a6 | 72918242418d81e3ef2b9030553d3d8cc14f28ad | /code/RF-cv.py | d27454dd58f6eae12da993f41380f140aa46e724 | [] | no_license | qq-shu/TextCNN | e03e179454d52fcf00de4c4678d92e277b33cd4d | cdf61e9f72b1c0e7a71cb00b4f91d96b416a0ac3 | refs/heads/main | 2023-08-16T12:59:08.563400 | 2021-09-30T03:30:52 | 2021-09-30T03:30:52 | 411,896,405 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,726 | py | from sklearn.ensemble import RandomForestClassifier as RF
# from sklearn import cross_validation
from sklearn import model_selection
from sklearn.metrics import confusion_matrix
import pandas as pd
subtrainLabel = pd.read_csv('subtrainLabels.csv')
subtrainfeature = pd.read_csv("3gramfeature.csv")
subtrain = pd.merge(subtrainLabel,subtrainfeature,on='Id')
labels = subtrain.Class
subtrain.drop(["Class","Id"], axis=1, inplace=True)
subtrain = subtrain.values
# X_train, X_test, y_train, y_test = model_selection.train_test_split(subtrain,labels,test_size=0.4)
from sklearn.model_selection import cross_val_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import StratifiedKFold,KFold
srf = RF(n_estimators=200, n_jobs=-1)
kfolder = KFold(n_splits=10,random_state=1)
scores4=cross_val_score(srf, subtrain, labels,cv=kfolder)
print(scores4)
print(scores4.mean())
sfolder = StratifiedKFold(n_splits=4,random_state=0)
sfolder = StratifiedKFold(n_splits=4,random_state=0)
scores3=cross_val_score(srf, subtrain, labels,cv=sfolder)
print(scores3)
print(scores3.mean())
clf = KNeighborsClassifier()
kfolder = KFold(n_splits=10,random_state=1)
scores=cross_val_score(clf, subtrain, labels,cv=kfolder)
print(scores)
print(scores.mean())
from sklearn.svm import SVC
clf2 = SVC(kernel='rbf', probability=True)
sfolder = StratifiedKFold(n_splits=4,random_state=0)
scores2=cross_val_score(clf2, subtrain, labels,cv=sfolder)
print(scores2)
print(scores2.mean())
# srf = RF(n_estimators=200, n_jobs=-1)
# srf.fit(X_train,y_train)
# print (srf.score(X_test,y_test))
# y_pred = srf.predict(X_test)
# print (confusion_matrix(y_test, y_pred)) | [
"[email protected]"
] | |
99a64502bc4d3c80b07c903df53770314112a9ed | df7f13ec34591fe1ce2d9aeebd5fd183e012711a | /hata/discord/user/thread_profile/tests/test__ThreadProfile__magic.py | 62df5d60ace156b75fde3936db52d10717f48aed | [
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | HuyaneMatsu/hata | 63e2f6a2d7a7539fd8f18498852d9d3fe5c41d2e | 53f24fdb38459dc5a4fd04f11bdbfee8295b76a4 | refs/heads/master | 2023-08-20T15:58:09.343044 | 2023-08-20T13:09:03 | 2023-08-20T13:09:03 | 163,677,173 | 3 | 3 | Apache-2.0 | 2019-12-18T03:46:12 | 2018-12-31T14:59:47 | Python | UTF-8 | Python | false | false | 1,575 | py | from datetime import datetime as DateTime
import vampytest
from ..flags import ThreadProfileFlag
from ..thread_profile import ThreadProfile
def test__ThreadProfile__repr():
"""
Tests whether ``ThreadProfile.__repr__`` works as intended.
"""
flags = ThreadProfileFlag(2)
joined_at = DateTime(2016, 5, 15)
thread_profile = ThreadProfile(
flags = flags,
joined_at = joined_at,
)
vampytest.assert_instance(repr(thread_profile), str)
def test__ThreadProfile__hash():
"""
Tests whether ``ThreadProfile.__hash__`` works as intended.
"""
flags = ThreadProfileFlag(2)
joined_at = DateTime(2016, 5, 15)
thread_profile = ThreadProfile(
flags = flags,
joined_at = joined_at,
)
vampytest.assert_instance(hash(thread_profile), int)
def test__ThreadProfile__eq():
"""
Tests whether ``ThreadProfile.__eq__`` works as intended.
"""
flags = ThreadProfileFlag(2)
joined_at = DateTime(2016, 5, 15)
keyword_parameters = {
'flags': flags,
'joined_at': joined_at,
}
thread_profile = ThreadProfile(**keyword_parameters)
vampytest.assert_eq(thread_profile, thread_profile)
vampytest.assert_ne(thread_profile, object())
for field_name, field_value in (
('flags', ThreadProfileFlag(4)),
('joined_at', None),
):
test_thread_profile = ThreadProfile(**{**keyword_parameters, field_name: field_value})
vampytest.assert_ne(thread_profile, test_thread_profile)
| [
"[email protected]"
] | |
dca2f3644310a1e7c67b6ae89b9eb9ea3a0c23db | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/bob/b4037f9e2f47429f9d3e6ac8ed0fa8bf.py | 1a70ecf442bfba0ffa267c895ed7411ce53dcf4a | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 515 | py | class Bob:
def hey(self, ask):
conversation = Identify(ask)
if conversation.question():
return "Sure."
elif conversation.yell():
return "Woah, chill out!"
elif conversation.anything():
return "Fine. Be that way!"
else:
return "Whatever."
class Identify:
def __init__(self, ask):
self.ask = ask or ""
def question(self):
return self.ask.endswith("?")
def yell(self):
return self.ask == self.ask.upper()
def anything(self):
return self.ask.replace(" ","") == self.ask.split()
| [
"[email protected]"
] | |
fc668b0f4beb102abcf466f2f54e0323dd94b77f | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /k9usvZ8wfty4HwqX2_2.py | 6df3da8982061b94fd50d4d07581a39b1c4e148e | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,045 | py | """
Create a function to check whether a given number is **Cuban Prime**. A cuban
prime is a prime number that is a solution to one of two different specific
equations involving third powers of x and y. For this challenge we are only
concerned with the cuban numbers from the **first equation**. We **ignore**
the cuban numbers from the **second equation**.
### Equation Form
p = (x^3 - y^3)/(x - y), x = y + 1, y > 0
... and the first few cuban primes from this equation are 7, 19, 37, 61, 127,
271.
### Examples
cuban_prime(7) ➞ "7 is cuban prime"
cuban_prime(9) ➞ "9 is not cuban prime"
cuban_prime(331) ➞ "331 is cuban prime"
cuban_prime(40) ➞ "40 is not cuban prime"
### Notes
* The inputs are positive integers only.
* Check the **Resources** for help.
"""
is_prime=lambda p:p>1and all(p%i for i in range(2,int(p**0.5+1)))
def cuban_prime(n):
for y in range(n):
if n==3*y**2+3*y+1 and is_prime(n):return str(n)+' is cuban prime'
return str(n)+' is not cuban prime'
| [
"[email protected]"
] | |
8baf39710e255504a040b81bb6999e6e90b09408 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/nnn998.py | 6856125ce675d837a7794fd06121ad13f12b0ccf | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 120 | py | ii = [('UnitAI.py', 2), ('WadeJEB.py', 1), ('MereHHB3.py', 4), ('StorJCC.py', 2), ('SomeMMH.py', 2), ('MereHHB2.py', 1)] | [
"[email protected]"
] | |
3ca771e19dc6b23d14b4a8164764a44e5830a529 | 03195a6f98396fd27aedc3c06d81f1553fb1d16b | /pandas/core/_numba/executor.py | 0b59d0717a476b949054b145952a0c044d5e15b9 | [
"BSD-3-Clause"
] | permissive | huaxz1986/pandas | a08d80d27726fe141d449835b9a09265bca5b5e0 | ba2473834fedcf571d3f8245b4b24796873f2736 | refs/heads/master | 2023-06-11T02:20:14.544220 | 2022-01-12T04:40:06 | 2022-01-12T04:40:06 | 131,370,494 | 3 | 4 | BSD-3-Clause | 2018-04-28T03:51:05 | 2018-04-28T03:51:05 | null | UTF-8 | Python | false | false | 1,726 | py | from __future__ import annotations
from typing import (
TYPE_CHECKING,
Callable,
)
import numpy as np
from pandas._typing import Scalar
from pandas.compat._optional import import_optional_dependency
from pandas.core.util.numba_ import (
NUMBA_FUNC_CACHE,
get_jit_arguments,
)
def generate_shared_aggregator(
func: Callable[..., Scalar],
engine_kwargs: dict[str, bool] | None,
cache_key_str: str,
):
"""
Generate a Numba function that loops over the columns 2D object and applies
a 1D numba kernel over each column.
Parameters
----------
func : function
aggregation function to be applied to each column
engine_kwargs : dict
dictionary of arguments to be passed into numba.jit
cache_key_str: str
string to access the compiled function of the form
<caller_type>_<aggregation_type> e.g. rolling_mean, groupby_mean
Returns
-------
Numba function
"""
nopython, nogil, parallel = get_jit_arguments(engine_kwargs, None)
cache_key = (func, cache_key_str)
if cache_key in NUMBA_FUNC_CACHE:
return NUMBA_FUNC_CACHE[cache_key]
if TYPE_CHECKING:
import numba
else:
numba = import_optional_dependency("numba")
@numba.jit(nopython=nopython, nogil=nogil, parallel=parallel)
def column_looper(
values: np.ndarray,
start: np.ndarray,
end: np.ndarray,
min_periods: int,
*args,
):
result = np.empty((len(start), values.shape[1]), dtype=np.float64)
for i in numba.prange(values.shape[1]):
result[:, i] = func(values[:, i], start, end, min_periods, *args)
return result
return column_looper
| [
"[email protected]"
] | |
99f8c1a49641c470c778fea08467ebaf332d4693 | 8997a0bf1e3b6efe5dd9d5f307e1459f15501f5a | /graph__networkx__d3__dot_graphviz/graphviz__examples/generate__as__bytes.py | cad13511e6c2200cf6958416c256790986119d81 | [
"CC-BY-4.0"
] | permissive | stepik/SimplePyScripts | 01092eb1b2c1c33756427abb2debbd0c0abf533f | 3259d88cb58b650549080d6f63b15910ae7e4779 | refs/heads/master | 2023-05-15T17:35:55.743164 | 2021-06-11T22:59:07 | 2021-06-11T22:59:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 300 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# pip install graphviz
from graphviz import Digraph
g = Digraph('G', format='svg')
g.edge('Hello', 'World')
# Get bytes
print(g.pipe())
print(g.pipe('png'))
# OR:
# g.format = 'png'
# print(g.pipe())
print(g.pipe('pdf'))
| [
"[email protected]"
] | |
d62a1e6ee7429d4badfe985780e74c2507865591 | 5eb02446f374a9c73f54a859668f46e80f4ef2fe | /ex35.py | f3e8b1edbe06119982737eb64ad94555c1bee7e2 | [] | no_license | Unique-Red/HardwaySeries | 1b93429da48b1b2c0c3b5a6de8e8f5c635cd959a | 3166a864c28e1ea49dce3f427f5f63429b4e2f53 | refs/heads/main | 2023-06-21T14:55:55.116124 | 2021-07-27T01:26:02 | 2021-07-27T01:26:02 | 370,170,353 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,047 | py | from sys import exit
def gold_room():
print("This room is full of gold. How much do you take?")
choice = input("> ")
if "0" in choice or "1" in choice:
how_much = int(choice)
else:
dead("Man, learn to type a number.")
if how_much < 50:
print("Nice, you're not greedy, you win!")
exit(0)
else:
dead("You greedy bastard!")
def bear_room():
print("There is a bear here.")
print("The bear has a bunch of honey.")
print("The fat bear is in front of another door.")
print("How are you going to move the bear?")
bear_moved = False
while True:
choice = input("> ")
if choice == "take honey":
dead("The bear looks at you then slaps your face off.")
elif choice == "taunt bear" and not bear_moved:
print("The bear has moved from the door.")
print("You can go through it now.")
bear_moved = True
elif choice == "taunt bear" and bear_moved:
dead("The bear gets pissed off and chews your leg off.")
elif choice == "open door" and bear_moved:
gold_room()
else:
print("I got no idea what that means.")
def cthulhu_room():
print("Here you see the great evil Cthulhu.")
print("He, it, whatever stares at you and you go insane.")
print("Do you flee for your life or eat your head?")
choice = input("> ")
if "flee" in choice:
start()
elif "head" in choice:
dead("Well that was tasty!")
else:
cthulhu_room()
def dead(why):
print(why, "Good job!")
exit()
def start():
print("You are in a dark room.")
print("There is a door to your right and left.")
print("Which one do you take?")
choice = input("> ")
if choice == "left":
bear_room()
elif choice == "right":
cthulhu_room()
else:
dead("You stumble around the room until you starve.")
start() | [
"[email protected]"
] | |
ef456e67563e978d78cbc6a2c22cf101e2d80c1b | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/adjur.py | b95db66e7aeb7eacf64d904654b1562db5591749 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 370 | py | ii = [('GodwWSL2.py', 4), ('FerrSDO3.py', 1), ('CarlTFR.py', 2), ('LyttELD.py', 1), ('TalfTAC.py', 2), ('KiddJAE.py', 1), ('BailJD1.py', 1), ('ClarGE.py', 1), ('LandWPA.py', 1), ('AinsWRR.py', 1), ('LandWPA2.py', 2), ('TalfTIT.py', 1), ('NewmJLP.py', 1), ('SoutRD.py', 1), ('HowiWRL2.py', 1), ('BailJD3.py', 1), ('HogaGMM.py', 1), ('AinsWRR2.py', 3), ('HogaGMM2.py', 1)] | [
"[email protected]"
] | |
f4f5aba0f8f2e294996ec623c74604d180bfc276 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/2D_20200722181027.py | ea6e6f492e2b93ebfeedfabbb4e5edb694f6f6ce | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,348 | py | def array(n,m):
# where n is row size and m is column size
array = [[0 for x in range(n)] for x in range(m)]
print(array)
a = [[2, 4, 6, 8, 10], [3, 6, 9, 12, 15], [4, 8, 12, 16, 20]]
# where the first arguement reps the row and second arguement reps the column
print(a[0][3])
from sys import maxint
def hourGlass(arr):
# you have a 2d array
# get max hour glass
# var maxCount to keep record of the max count
# what do you know about an hourglass
# the indicies fall in a pattern where
# i and i+2 are not equal to 0 and i + 1 is equal to 0
maxCount = - maxint
if arr !=[]:
for i in range(len(arr)-2):
totalCount = 0
# remember j is looping through arr[i]
for j in range(len(arr[i])-2):
totalCount = arr[i][j] + arr[i][j+1] + arr[i][j+2] + arr[i+1][j+1] + arr[i+2][j] + arr[i+2][j+1] + arr[i+2][j+2]
print('total',totalCount)
if totalCount > maxCount:
maxCount = totalCount
print(maxCount)
else:
return 0
print(hourGlass([[-1,-1,0,-9,-2,-2],[-2,-1,-6,-8,-2,-5],[-1,-1,-1,-2,-3,-4],[-1,-9,2,-4,-4,-5],[-7,-3,-3,-2,-9,-9],[-1,-3,-1,-2,-4,-5]])) | [
"[email protected]"
] | |
776d0b19881407fcf3376a214c627dbe68eb9edd | b2e3878ffd64e00cdfc2f4a2227f5b48aa0b7860 | /Modules/llpgenerator.py | 850c6c82240284fc06cb48062dedd35b95580905 | [] | no_license | TheoMoretti/PreShower_ALP-W | c5e149f664aa440b192c729be7540cdea6807cd2 | a93240c7390f0895561a734196beee637b0d8c38 | refs/heads/main | 2023-07-05T00:16:46.324734 | 2021-08-18T14:24:00 | 2021-08-18T14:24:00 | 313,635,494 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,832 | py | import pythia8
from various import *
from llpmodel import LLPModel
class LLPGenerator():
def __init__(self,model, setup="theory"):
self.model = model
self.setup = setup
#specify Dark Photon model
def specify_darkphoton(self,pythia,mass):
pythia.readString("ProcessLevel:all = off");
pythia.readString("PartonLevel:FSR = off");
pythia.readString("111:mayDecay = off");
pythia.readString("310:mayDecay = off");
pythia.readString("130:mayDecay = off");
#define LLP model
llpmodel = LLPModel(self.model, mass=mass, coupling=1)
channels, branching = llpmodel.decays, llpmodel.branching
#Decay into Hadrons
if mass <= 1.50001:
#use Y(1s)
pythia.readString("553:m0="+str(mass));
command="oneChannel"
for channel in channels:
# ignore 'other' channel and decay into quarks
if channels[channel][0] == "large": continue
if channels[channel][1] is None: continue
# bRatio
bRatio = str(branching[channel])
# products
products = ""
for pid in channels[channel][1]: products = products + str(pid) + " "
# meMode
meMode =channels[channel][2]
# add to pythia
pythia.readString("553:"+command+" = 1 "+bRatio+" "+meMode + " "+products)
command="addChannel"
return pythia, 553
else:
#use Z'
pythia.readString("32:m0="+str(mass));
command="oneChannel"
for channel in channels:
# ignore decay into hadrons
if channels[channel][0] == "small": continue
# bRatio
bRatio = str(branching[channel])
# products
products = ""
for pid in channels[channel][1]: products = products + str(pid) + " "
# meMode
meMode =channels[channel][2]
# add to pythia
pythia.readString("32:"+command+" = 1 "+bRatio+" "+meMode + " "+products)
command="addChannel"
return pythia, 32
#specify Dark Photon model
def specify_darkhiggs(self,pythia,mass):
pythia.readString("ProcessLevel:all = off");
pythia.readString("PartonLevel:FSR = off");
pythia.readString("111:mayDecay = off");
pythia.readString("310:mayDecay = off");
pythia.readString("130:mayDecay = off");
#define LLP model
llpmodel = LLPModel(self.model, mass=mass, coupling=1)
channels, branching = llpmodel.decays, llpmodel.branching
#Decay into Hadrons
if mass <= 2.0001:
#use etab0(1P)
pythia.readString("10551:m0="+str(mass));
command="oneChannel"
for channel in channels:
# ignore 'other' channel and decay into quarks
if channels[channel][0] == "large": continue
if channels[channel][1] is None: continue
# bRatio
bRatio = str(branching[channel])
# products
products = ""
for pid in channels[channel][1]: products = products + str(pid) + " "
# meMode
meMode =channels[channel][2]
# add to pythia
pythia.readString("10551:"+command+" = 1 "+bRatio+" "+meMode + " "+products)
command="addChannel"
return pythia, 10551
else:
#use Higgs
pythia.readString("25:m0="+str(mass));
command="oneChannel"
for channel in channels:
# ignore decay into hadrons
if channels[channel][0] == "small": continue
# bRatio
bRatio = str(branching[channel])
# products
products = ""
for pid in channels[channel][1]: products = products + str(pid) + " "
# meMode
meMode =channels[channel][2]
# add to pythia
pythia.readString("25:"+command+" = 1 "+bRatio+" "+meMode + " "+products)
command="addChannel"
return pythia, 25
#specify ALP-W model
def specify_alpw(self,pythia,mass):
pythia.readString("ProcessLevel:all = off");
pythia.readString("PartonLevel:FSR = off");
#define LLP model
llpmodel = LLPModel(self.model, mass=mass, coupling=1)
channels, branching = llpmodel.decays, llpmodel.branching
#Decay into Hadrons
if mass <= 1:
#use etab0(1P)
pythia.readString("10551:m0="+str(mass));
command="oneChannel"
for channel in channels:
# ignore 'other' channel and decay into quarks
if channels[channel][0] == "large": continue
if channels[channel][1] is None: continue
# bRatio
bRatio = str(branching[channel])
# products
products = ""
for pid in channels[channel][1]: products = products + str(pid) + " "
# meMode
meMode =channels[channel][2]
# add to pythia
pythia.readString("10551:"+command+" = 1 "+bRatio+" "+meMode + " "+products)
command="addChannel"
return pythia, 10551
else:
#use Higgs
pythia.readString("25:m0="+str(mass));
command="oneChannel"
for channel in channels:
# ignore decay into hadrons
if channels[channel][0] == "small": continue
# bRatio
bRatio = str(branching[channel])
# products
products = ""
for pid in channels[channel][1]: products = products + str(pid) + " "
# meMode
meMode =channels[channel][2]
# add to pythia
pythia.readString("25:"+command+" = 1 "+bRatio+" "+meMode + " "+products)
command="addChannel"
return pythia, 25
#specify Dark Photon model
def specify_darkphoton_pythia(self,pythia,mass):
pythia.readString("Zprime:universality=on");
pythia.readString("32:m0="+str(mass));
pythia.readString("Zprime:vd=-0.3333");
pythia.readString("Zprime:vu=0.6666");
pythia.readString("Zprime:ve=-1");
pythia.readString("Zprime:vnue=0");
pythia.readString("Zprime:ad=0");
pythia.readString("Zprime:au=0");
pythia.readString("Zprime:ae=0");
pythia.readString("Zprime:anue=0");
pythia.readString("ProcessLevel:all = off");
pythia.readString("PartonLevel:FSR = off");
pythia.readString("111:mayDecay = off");
pythia.readString("310:mayDecay = off");
pythia.readString("130:mayDecay = off");
return pythia, 32
#specify Dark Higgs model
def specify_darkhiggs_pythia(self,pythia,mass):
pythia.readString("54:m0="+str(mass));
pythia.readString("Sdm:vf=1");
pythia.readString("Sdm:af=0");
pythia.readString("Sdm:vX=0");
pythia.readString("Sdm:aX=0");
pythia.readString("ProcessLevel:all = off");
pythia.readString("PartonLevel:FSR = off");
pythia.readString("111:mayDecay = off");
pythia.readString("310:mayDecay = off");
pythia.readString("130:mayDecay = off");
return pythia, 54
# function that simulates `nevent` dark photon decays for dark photon mass `mass`
def simulate_events(self,mass, nevent=1000, print_first_event=False,print_partile_data = False,outputfile=None):
#specify particle
px,py,pz,en = 0,0,0,mass
status,col,acol,scale,pol = 2,0,0,0,9.
#initialize pythia
pythia = pythia8.Pythia()
if self.model=="DarkPhoton" and self.setup=="theory":
pythia, pid =self.specify_darkphoton(pythia=pythia,mass=mass)
if self.model=="DarkHiggs" and self.setup=="theory":
pythia, pid =self.specify_darkhiggs(pythia=pythia,mass=mass)
if self.model=="DarkPhoton" and self.setup=="pythia":
pythia, pid =self.specify_darkphoton_pythia(pythia=pythia,mass=mass)
if self.model=="DarkHiggs" and self.setup=="pythia":
pythia, pid =self.specify_darkhiggs_pythia(pythia=pythia,mass=mass)
if self.model=="ALP-W":
pythia, pid =self.specify_alpw(pythia=pythia,mass=mass)
if print_partile_data: print (pythia.particleData)
pythia.init()
# Begin event loop. Generate event. Skip if error. List first one.
events = []
for iEvent in range(0, nevent):
pythia.event.reset()
pythia.event.append(pid, status, col, acol, px, py, pz, en, mass, scale, pol)
pythia.next()
if print_first_event and iEvent==0: print(pythia.event)
#Loop over particles in event. Find pions
event = []
for part in pythia.event:
if part.status()>0:
event.append([part.id(),part.px(),part.py(),part.pz(),part.e()])
events.append(event)
if outputfile is not None:
np.save(outputfile,events)
return events
# function that extracts branching fractions
def extract_br(self,events):
nevent = float(len(events))
branching_fraction={}
for event in events:
final_state=[particle[0] for particle in event]
final_state=list(np.sort(final_state))
if str(final_state) in branching_fraction.keys():
branching_fraction[str(final_state)] += 1./nevent
else:
branching_fraction[str(final_state)] = 1./nevent
return branching_fraction
# function that scans over the mass and obtains the branching fraction
def br_scan(self,massmin=0.105, massmax=1.95, nmass = 40, nevent=1000):
branching_fractions=[]
for mass in np.linspace(massmin, massmax, nmass):
events=self.simulate_events(mass=mass,nevent=nevent)
bf=self.extract_br(events)
branching_fractions.append([mass,bf])
return np.array(branching_fractions)
# scan over mass and claculate BR
def scan_br(self, massmin=0.01, massmax=2.0, nmass=40, nevent=1000):
# Simulate BR
data=self.br_scan(massmin=massmin, massmax=massmax,nmass=nmass, nevent=nevent)
np.save("files/results/brscan_"+self.model+".npy",data)
| [
"[email protected]"
] | |
f2961d2cf0e5afc4a1c5617b84a68c33f5035a08 | 5f91ef601aa3e58cb5c24f1a4cfefd264078f4ee | /Python Task Day4/Text Wrap.py | fbee398512838dcdefb4e3d1b75cbbab797692cd | [] | no_license | Jayasree-Repalla/Innomatics_Internship_APR_21 | 386a71d2a7da788aa6088087c5e42c97271c6afe | f5505045a09b8445c704c0b0135502731fc42a5f | refs/heads/main | 2023-05-09T13:59:20.194465 | 2021-05-29T12:21:47 | 2021-05-29T12:21:47 | 363,651,405 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 159 | py | import textwrap
def wrap(string, max_width):
l=textwrap.wrap(string,max_width)
str1=''
for i in l:
str1=str1+i+"\n"
return str1 | [
"[email protected]"
] | |
c807082254ccacd00922058c4aff343aabe56cb0 | 46a784ddc9377216faa9ba205729ca6524f901b6 | /classoutput.py | 4941a65639929d210976591e886a48ed590b58f3 | [] | no_license | asiddiq1/MapQuestAPI | 9933253e9baaea54df0fb1f0d6f09035ca577a0c | c2d8813238cb3023169c2f1dae682dd3bb545696 | refs/heads/master | 2021-05-15T14:24:34.920005 | 2018-04-10T00:39:18 | 2018-04-10T00:39:18 | 107,197,433 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,649 | py | #Aisha Siddiq lab 12:00 - 1:50 pm (project 3)
class STEPS:
def __init__(self, jsondata):
self.jsonD = jsondata
def return_data(self)->list:
'''Returns the json steps in a list
'''
directions = ["DIRECTIONS"]
for items in self.jsonD['route']['legs']:
for maneuvers in items['maneuvers']:
directions.append(maneuvers['narrative'])
return directions
class TOTALDISTANCE:
def __init__(self, jsondata):
self.jsonD = jsondata
def return_data(self)->list:
'''Returns the total distance in a list
'''
distance = []
distance.append('TOTAL DISTANCE: ' + str(round(self.jsonD['route']['distance'])) + ' '+ "miles")
return distance
class TOTALTIME:
def __init__(self, jsondata):
self.jsonD = jsondata
def return_data(self)->list:
'''Returns the total time in a list
'''
time = []
time_mins = round(self.jsonD['route']['time']/60)
time.append('TOTAL TIME: ' + str(time_mins) + ' ' + 'minutes')
return time
class LATLONG:
def __init__(self, jsondata):
self.jsonD = jsondata
def return_data(self)->list:
'''Returns the formatted longitude and latitude in a list
'''
latlonglist = ['LATLONGS']
for items in self.jsonD['route']['locations']:
latlong = items['latLng']
if latlong['lat'] < 0:
latitude = '{:.2f}S'.format(latlong['lat'] * -1)
elif latlong['lat'] > 0:
latitude = '{:.2f}N'.format(latlong['lat'])
else:
latitude = '{}'.format(latlong['lat'])
if latlong['lng'] < 0:
longitude = '{:.2f}W'.format(latlong['lng'] * -1)
elif latlong['lng'] > 0:
longitude = '{:.2f}E'.format(latlong['lng'])
else:
longitude = '{}'.format(latlong['lng'])
latlonglist.append(latitude + ' ' + longitude)
return latlonglist
class ELEVATION:
def __init__(self, jsonlist):
self.jsonDlist = jsonlist
def return_data(self)->list:
'''Returns the elevation in a list
'''
elevation_list = ['ELEVATIONS']
for jsondatalist in self.jsonDlist:
for distance in jsondatalist['elevationProfile']:
elevation_list.append(round(distance['height'] * 3.2808))
return elevation_list
| [
"[email protected]"
] | |
31640ba88e52306b8f9a5469864d401ce4d992e4 | f101fe75892da8d7b5258d22bd31534d47f39ec1 | /feature.py | 039980b31ea2d443121913c748e60ed024f11554 | [] | no_license | xianjunxia/Acoustic-event-detection-with-feature-space-attention-based-convolution-recurrent-neural-network | 2ae9d4d0148f5082cc6739f753bf750e1940ecfb | d2a7b36700e798e0da02d3efebb27cd340235f36 | refs/heads/master | 2020-03-22T17:11:53.028900 | 2018-07-10T05:15:32 | 2018-07-10T05:15:32 | 140,379,734 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,150 | py | import wave
import numpy as np
import utils
#import librosa
from IPython import embed
import os
from sklearn import preprocessing
import scipy.io as sio
def load_audio(filename, mono=True, fs=44100):
file_base, file_extension = os.path.splitext(filename)
if file_extension == '.wav':
_audio_file = wave.open(filename)
# Audio info
sample_rate = _audio_file.getframerate()
sample_width = _audio_file.getsampwidth()
number_of_channels = _audio_file.getnchannels()
number_of_frames = _audio_file.getnframes()
# Read raw bytes
data = _audio_file.readframes(number_of_frames)
_audio_file.close()
# Convert bytes based on sample_width
num_samples, remainder = divmod(len(data), sample_width * number_of_channels)
if remainder > 0:
raise ValueError('The length of data is not a multiple of sample size * number of channels.')
if sample_width > 4:
raise ValueError('Sample size cannot be bigger than 4 bytes.')
if sample_width == 3:
# 24 bit audio
a = np.empty((num_samples, number_of_channels, 4), dtype=np.uint8)
raw_bytes = np.fromstring(data, dtype=np.uint8)
a[:, :, :sample_width] = raw_bytes.reshape(-1, number_of_channels, sample_width)
a[:, :, sample_width:] = (a[:, :, sample_width - 1:sample_width] >> 7) * 255
audio_data = a.view('<i4').reshape(a.shape[:-1]).T
else:
# 8 bit samples are stored as unsigned ints; others as signed ints.
dt_char = 'u' if sample_width == 1 else 'i'
a = np.fromstring(data, dtype='<%s%d' % (dt_char, sample_width))
audio_data = a.reshape(-1, number_of_channels).T
if mono:
# Down-mix audio
audio_data = np.mean(audio_data, axis=0)
# Convert int values into float
audio_data = audio_data / float(2 ** (sample_width * 8 - 1) + 1)
# Resample
if fs != sample_rate:
audio_data = librosa.core.resample(audio_data, sample_rate, fs)
sample_rate = fs
return audio_data, sample_rate
return None, None
def load_desc_file(_desc_file):
_desc_dict = dict()
cnt = 1
for line in open(_desc_file):
#print(cnt)
cnt = cnt + 1
words = line.strip().split('\t')
name = words[0].split('/')[-1]
if name not in _desc_dict:
_desc_dict[name] = list()
_desc_dict[name].append([float(words[2]), float(words[3]), __class_labels[words[-1]]])
return _desc_dict
def extract_mbe(_y, _sr, _nfft, _nb_mel):
spec, n_fft = librosa.core.spectrum._spectrogram(y=_y, n_fft=_nfft, hop_length=_nfft/2, power=1)
'''
import matplotlib.pyplot as plot
print(y.shape)
plot.subplot(411)
Pxx, freqs, bins, im = plot.specgram(y, NFFT=_nfft, Fs=44100, noverlap=_nfft/2)
print('freqs_{}'.format(freqs))
print(freqs.shape)
print(spec.shape)
plot.subplot(412)
mel_basis = librosa.filters.mel(sr=_sr, n_fft=_nfft, n_mels=_nb_mel)
print(mel_basis.shape)
import scipy.io as sio
sio.savemat("/data/users/21799506/Data/DCASE2017_Data/Evaluation/feat/Melbank",{'arr_0':mel_basis})
plot.plot(mel_basis[:,500])
plot.subplot(413)
plot.plot(mel_basis[1,:])
plot.subplot(414)
mbe = np.log(np.dot(mel_basis, spec))
print(mbe.shape)
plot.plot(np.log(np.dot(mel_basis, spec)))
plot.show()
exit()
'''
mel_basis = librosa.filters.mel(sr=_sr, n_fft=_nfft, n_mels=_nb_mel)
return np.log(np.dot(mel_basis, spec))
# ###################################################################
# Main script starts here
# ###################################################################
is_mono = True
__class_labels = {
'brakes squeaking': 0,
'car': 1,
'children': 2,
'large vehicle': 3,
'people speaking': 4,
'people walking': 5
}
# location of data.
#folds_list = [1, 2, 3, 4]
folds_list = [0]
evaluation_setup_folder = '/data/users/21799506/Data/DCASE2017_Data/Evaluation/evaluation_setup/'
audio_folder = '/data/users/21799506/Data/DCASE2017_Data/Evaluation/audio/'
# Output
feat_folder = '/data/users/21799506/Data/DCASE2017_Data/Evaluation/feat/'
utils.create_folder(feat_folder)
# User set parameters
nfft = 2048
win_len = nfft
hop_len = win_len / 2
nb_mel_bands = 40
sr = 44100
# -----------------------------------------------------------------------
# Feature extraction and label generation
# -----------------------------------------------------------------------
# Load labels
train_file = os.path.join(evaluation_setup_folder, 'street_fold{}_train.txt'.format(0))
evaluate_file = os.path.join(evaluation_setup_folder, 'street_fold{}_evaluate.txt'.format(0))
print(train_file)
desc_dict = load_desc_file(train_file)
desc_dict.update(load_desc_file(evaluate_file)) # contains labels for all the audio in the dataset
'''
# Extract features for all audio files, and save it along with labels
for audio_filename in os.listdir(audio_folder):
audio_file = os.path.join(audio_folder, audio_filename)
print('Extracting features and label for : {}'.format(audio_file))
y, sr = load_audio(audio_file, mono=is_mono, fs=sr)
mbe = None
if is_mono:
mbe = extract_mbe(y, sr, nfft, nb_mel_bands).T
else:
for ch in range(y.shape[0]):
mbe_ch = extract_mbe(y[ch, :], sr, nfft, nb_mel_bands).T
if mbe is None:
mbe = mbe_ch
else:
mbe = np.concatenate((mbe, mbe_ch), 1)
label = np.zeros((mbe.shape[0], len(__class_labels)))
tmp_data = np.array(desc_dict[audio_filename])
frame_start = np.floor(tmp_data[:, 0] * sr / hop_len).astype(int)
frame_end = np.ceil(tmp_data[:, 1] * sr / hop_len).astype(int)
se_class = tmp_data[:, 2].astype(int)
for ind, val in enumerate(se_class):
label[frame_start[ind]:frame_end[ind], val] = 1
tmp_feat_file = os.path.join(feat_folder, '{}_{}.npz'.format(audio_filename, 'mon' if is_mono else 'bin'))
np.savez(tmp_feat_file, mbe, label)
'''
# -----------------------------------------------------------------------
# Feature Normalization
# -----------------------------------------------------------------------
for fold in folds_list:
train_file = os.path.join(evaluation_setup_folder, 'street_fold{}_train.txt'.format(0))
evaluate_file = os.path.join(evaluation_setup_folder, 'street_fold{}_evaluate.txt'.format(0))
train_dict = load_desc_file(train_file)
test_dict = load_desc_file(evaluate_file)
X_train, Y_train, X_test, Y_test = None, None, None, None
for key in train_dict.keys():
tmp_feat_file = os.path.join(feat_folder, '{}_{}.npz'.format(key, 'mon' if is_mono else 'bin'))
dmp = np.load(tmp_feat_file)
tmp_mbe, tmp_label = dmp['arr_0'], dmp['arr_1']
if X_train is None:
X_train, Y_train = tmp_mbe, tmp_label
else:
X_train, Y_train = np.concatenate((X_train, tmp_mbe), 0), np.concatenate((Y_train, tmp_label), 0)
for key in test_dict.keys():
tmp_feat_file = os.path.join(feat_folder, '{}_{}.npz'.format(key, 'mon' if is_mono else 'bin'))
dmp = np.load(tmp_feat_file)
tmp_mbe, tmp_label = dmp['arr_0'], dmp['arr_1']
if X_test is None:
X_test, Y_test = tmp_mbe, tmp_label
else:
X_test, Y_test = np.concatenate((X_test, tmp_mbe), 0), np.concatenate((Y_test, tmp_label), 0)
# Normalize the training data, and scale the testing data using the training data weights
scaler = preprocessing.StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
normalized_feat_file = os.path.join(feat_folder, 'mbe_{}_fold{}_GAN_allthreeclass.npz'.format('mon' if is_mono else 'bin', fold))
np.savez(normalized_feat_file, X_train, Y_train, X_test, Y_test)
print(X_train.shape)
print('normalized_feat_file : {}'.format(normalized_feat_file))
| [
"[email protected]"
] | |
44856c368483e969256dc97c44a426028c3bbf50 | 980841fc87bba9a00d849f372528b888453b89ba | /Python 3 Scripting for System Administrators/Accepting Simple Positional Arguments.py | 20e611ada99bb5acedb5953be31816bf9a56f018 | [] | no_license | Frijke1978/LinuxAcademy | c682eedb48ed637ffe28a55cdfbc7d33ba635779 | 5100f96b5ba56063042ced3b2737057016caaff3 | refs/heads/master | 2022-03-24T12:28:25.413483 | 2019-12-21T12:27:02 | 2019-12-21T12:27:02 | 229,418,319 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,205 | py | Accepting Simple Positional Arguments
Most of the scripts and utilities that we work with accept positional arguments instead of prompting us for information after we’ve run the command. The simplest way for us to do this in Python is to use the sys module’s argv attribute. Let’s try this out by writing a small script that echoes our first argument back to us:
~/bin/param_echo
#!/usr/bin/env python3.6
import sys
print(f"First argument {sys.argv[0]}")
After we make this executable and give it a shot, we see that the first argument is the script itself:
$ chmod u+x ~/bin/param_echo
$ param_echo testing
First argument /home/user/bin/param_echo
That’s not quite what we wanted, but now we know that argv will contain the script and we’ll need to get the index of 1 for our first argument. Let’s adjust our script to echo all of the arguments except the script name and then echo the first positional argument by itself:
~/bin/param_echo
#!/usr/bin/env python3.6
import sys
print(f"Positional arguments: {sys.argv[1:]}")
print(f"First argument: {sys.argv[1]}")
Trying the same command again, we get a much different result:
$ param_echo testing
Positional arguments: ['testing']
First argument: testing
$ param_echo testing testing12 'another argument'
Positional arguments: ['testing', 'testing12', 'another argument']
First argument: testing
$ param_echo
Positional arguments: []
Traceback (most recent call last):
File "/home/user/bin/param_echo", line 6, in
print(f"First argument: {sys.argv[1]}")
IndexError: list index out of range
This shows us a few things about working with argv:
Positional arguments are based on spaces unless we explicitly wrap the argument in quotes.
We can get a slice of the first index and after without worrying about it being empty.
We risk an IndexError if we assume that there will be an argument for a specific position and one isn’t given.
Using sys.argv is the simplest way to allow our scripts to accept positional arguments. In the next video, we’ll explore a standard library package that will allow us to provide a more robust command line experience with help text, named arguments, and flags. | [
"[email protected]"
] | |
d3905ca9265658e5bf4b7a91a378ed0ea340b520 | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-gsn-edf/gsn-edf_ut=3.0_rd=1_rw=0.04_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=82/sched.py | 304905f0cc9f12230fa3ed58eca351b59ad910a9 | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 337 | py | -X FMLP -Q 0 -L 2 105 400
-X FMLP -Q 0 -L 2 85 250
-X FMLP -Q 0 -L 2 70 250
-X FMLP -Q 1 -L 2 66 200
-X FMLP -Q 1 -L 2 64 250
-X FMLP -Q 1 -L 2 50 200
-X FMLP -Q 2 -L 1 41 150
-X FMLP -Q 2 -L 1 40 125
-X FMLP -Q 2 -L 1 34 100
-X FMLP -Q 3 -L 1 33 200
-X FMLP -Q 3 -L 1 20 250
-X FMLP -Q 3 -L 1 10 100
| [
"[email protected]"
] | |
0e647dd279872f9ca98db25c23550b1a1e7e5fb4 | df83f97ed2c6dd199005e96bc7c494cfb3b49f8c | /GeeksForGeeks/String Rotations.py | 42ed217509cdfcaf23e1e662e437f71bfb0dfa7b | [] | no_license | poojan14/Python-Practice | 45f0b68b0ad2f92bbf0b92286602d64f3b1ae992 | ed98acc788ba4a1b53bec3d0757108abb5274c0f | refs/heads/master | 2022-03-27T18:24:18.130598 | 2019-12-25T07:26:09 | 2019-12-25T07:26:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 469 | py | '''
Given strings s1 and s2, you need to find if s2 is a rotated version of the string s1. The strings are lowercase.
'''
if __name__ == '__main__':
T = int(input())
for _ in range(T):
s1 = input()
s2 = input()
if len(s1)==len(s2):
tmp = s1+s1 # It gives all possible rotations
if s2 in tmp : print(1) # of a string.
else : print(0)
else:
print(0)
| [
"[email protected]"
] | |
1a95afb8fe2a0cbbec27d84d31a8ca32f302e201 | d1847e96c14a7d06aeab2a557eb25b1c6d5170d7 | /Python Web App/myapp.py | 65c164ffd18f5fef19f59380536518c22555e13e | [] | no_license | ANA-POTJE/WEB_Applications | 5dc043b9b63ed5ddb1cc8a17dba4d5de6fb68712 | c9c0869b9f8787eb8e400a4f774f9ba387e3bf71 | refs/heads/master | 2022-11-09T07:53:30.720297 | 2020-06-18T14:27:53 | 2020-06-18T14:27:53 | 273,253,091 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,230 | py | import yfinance as yf
import streamlit as st
st.write("""
# Simple Stock Price App
Shown are the stock closing price and volume of Google!
""")
# https://towardsdatascience.com/how-to-get-stock-data-using-python-c0de1df17e75
#define the ticker symbol
tickerSymbol = 'GOOGL'
#get data on this ticker
tickerData = yf.Ticker(tickerSymbol)
#get the historical prices for this ticker
tickerDf = tickerData.history(period='1d', start='2010-5-31', end='2020-5-31')
# Open High Low Close Volume Dividends Stock Splits
st.line_chart(tickerDf.Close)
st.line_chart(tickerDf.Volume)
#Running the web app
#After saving the code into a file called myapp.py, fire up the command prompt
#(or Power Shell in Microsoft Windows) and run the following command:
#####
##### WORKED IN ANACONDA PROMPT!!! (conda activate env first!)
#####
# streamlit run myapp.py
#Next, we should see the following message:
#> streamlit run myapp.py
#You can now view your Streamlit app in your browser.
#Local URL: http://localhost:8501
#Network URL: http://10.0.0.11:8501
#In a short moment, an internet browser window should pop-up and directs you to the
#created web app by taking you to [http://localhost:8501.]http://localhost:8501 as shown below.
| [
"[email protected]"
] | |
a17893e3403ed935e013c8026c259ffe22a74959 | 64ef95039cec3c508b8e3ab911a3318121778119 | /day_3_ai_boot_camp_.py | 40deaa3367a0ea97035e9ce5b03d418a833f1188 | [] | no_license | ALEENA-KT/Practical-AI-Bootcamp | c98f752112e8febb7e7d324ded177f5d36dd0180 | 0a12a5124e4587decec21354f0f0dbbc40ea4fc9 | refs/heads/main | 2023-08-18T02:40:34.992591 | 2021-09-13T18:22:45 | 2021-09-13T18:22:45 | 404,694,854 | 0 | 0 | null | 2021-09-09T12:13:57 | 2021-09-09T11:25:19 | null | UTF-8 | Python | false | false | 3,442 | py | # -*- coding: utf-8 -*-
"""Day 3 AI BOOT CAMP .ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1e_Ee9jcv9rIfmnVXTXQAktNKBwu0GejP
"""
import tensorflow_datasets as tfds
print(tfds.list_builders())
dataloader = tfds.load("cifar10", as_supervised=True)
train, test = dataloader["train"], dataloader["test"]
import tensorflow as tf
directory_url = 'https://storage.googleapis.com/download.tensorflow.org/data/illiad/'
file_names = ['cowper.txt', 'derby.txt', 'butler.txt']
file_paths = [
tf.keras.utils.get_file(file_name, directory_url + file_name)
for file_name in file_names
]
dataset = tf.data.TextLineDataset(file_paths)
import torch
from torch.utils.data import Dataset
from torchvision import datasets
from torchvision.transforms import ToTensor
import matplotlib.pyplot as plt
training_data = datasets.FashionMNIST(
root="data",
train=True,
download=True,
transform=ToTensor()
)
test_data = datasets.FashionMNIST(
root="data",
train=False,
download=True,
transform=ToTensor()
)
import tensorflow as tf
directory_url = 'https://storage.googleapis.com/download.tensorflow.org/data/illiad/'
file_names = ['cowper.txt', 'derby.txt', 'butler.txt']
file_paths = [
tf.keras.utils.get_file(file_name, directory_url + file_name)
for file_name in file_names
]
dataset = tf.data.TextLineDataset(file_paths)
for line in dataset.take(5):
print(line.numpy())
import torch
from torch.utils.data import Dataset
from torchvision import datasets
from torchvision.transforms import ToTensor
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
training_data = datasets.FashionMNIST(
root="data",
train=True,
download=True,
transform=ToTensor()
)
test_data = datasets.FashionMNIST(
root="data",
train=False,
download=True,
transform=ToTensor()
)
train_dataloader = DataLoader(training_data, batch_size=64, shuffle=True)
test_dataloader = DataLoader(test_data, batch_size=64, shuffle=True)
train_features, train_labels = next(iter(train_dataloader))
print(f"Feature batch shape: {train_features.size()}")
print(f"Labels batch shape: {train_labels.size()}")
img = train_features[0].squeeze()
label = train_labels[0]
plt.imshow(img, cmap="gray")
plt.show()
print(f"Label: {label}")
import tensorflow_datasets as tfds
dataloader = tfds.load("cifar10", as_supervised=True)
train, test = dataloader["train"], dataloader["test"]
train = train.map(
lambda image, label: (tf.image.convert_image_dtype(image, tf.float32), label)
).cache().map(
lambda image, label: (tf.image.random_flip_left_right(image), label)
).map(
lambda image, label: (tf.image.random_contrast(image, lower=0.0, upper=1.0), label)
).shuffle(
100
).batch(
64
).repeat()
import tensorflow as tf
directory_url = 'https://storage.googleapis.com/download.tensorflow.org/data/illiad/'
file_names = ['cowper.txt', 'derby.txt', 'butler.txt']
file_paths = [
tf.keras.utils.get_file(file_name, directory_url + file_name)
for file_name in file_names
]
dataset = tf.data.TextLineDataset(file_paths)
import tensorflow_datasets as tfds
from tensorflow.keras.utils import to_categorical
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.keras.callbacks import EarlyStopping
import tensorflow.keras.backend as K
import numpy as np
from lrfinder import LRFinder | [
"[email protected]"
] | |
9c726b92873e564d1807d53aeb25eb416f88fba3 | e6c65e2e354336a4bea5b6a4ccbccd3682915fe2 | /out-bin/py/google/fhir/seqex/bundle_to_seqex_test.runfiles/pypi__apache_beam_2_9_0/apache_beam/runners/worker/sideinputs_test.py | 57d59bfa69ad81880b5237c6baf3ea3f0406a320 | [
"Apache-2.0"
] | permissive | rasalt/fhir-datalab | c30ab773d84983dd04a37e9d0ddec8bf2824b8a4 | 3e329fc8b4226d3e3a4a7c23c306a86e7a9ea0de | refs/heads/master | 2021-10-09T05:51:04.593416 | 2018-12-21T18:11:03 | 2018-12-22T05:38:32 | 162,744,237 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | /home/rkharwar/.cache/bazel/_bazel_rkharwar/0ddaa3627472ad9d1367a008236ce2f5/external/pypi__apache_beam_2_9_0/apache_beam/runners/worker/sideinputs_test.py | [
"[email protected]"
] | |
fcf1f6548924e0a7b8d556c5614e9aee7511b172 | ffea8d9c5a742170fb21c5c95e3f84ce9ab2f3bd | /algorithms_find_unique_chars.py | 43ee638f4f6311ce56d55f48d9b623bd298e71d9 | [] | no_license | catechnix/greentree | 3b8583bd4ccb1a506f3e24f03a6c1592f1664518 | ffcd7b1b26fa6552b4f58ac9645151afb591628b | refs/heads/master | 2023-04-08T23:41:15.502014 | 2021-04-03T03:48:07 | 2021-04-03T03:48:07 | 288,299,640 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,043 | py | # Given a string, find the first non-repeating character in it and return its index.
# If it doesn't exist, return -1. # Note: all the input strings are already lowercase.
#Approach 1
def solution(s):
frequency = {}
for i in s:
if i not in frequency:
frequency[i] = 1
else:
frequency[i] +=1
for i in range(len(s)):
if frequency[s[i]] == 1:
return i
return -1
print(solution('alphabet'))
print(solution('barbados'))
print(solution('crunchy'))
print('###')
#Approach 2
import collections
def solution(s):
# build hash map : character and how often it appears
count = collections.Counter(s) # <-- gives back a dictionary with words occurrence count
#Counter({'l': 1, 'e': 3, 't': 1, 'c': 1, 'o': 1, 'd': 1})
# find the index
for idx, ch in enumerate(s):
if count[ch] == 1:
return idx
return -1
print(solution('alphabet'))
print(solution('barbados'))
print(solution('crunchy')) | [
"[email protected]"
] | |
537cc1b377a1a29fe985de13d1284703ca373594 | ebcc40516adba151e6a1c772223b0726899a26eb | /slicedimage/url/__init__.py | 903fa8c5d102018aed1a5b5cd312397b50a9e499 | [
"MIT"
] | permissive | spacetx/slicedimage | acf4a767f87b6ab78e657d85efad22ee241939f4 | eb8e1d3899628db66cffed1370f2a7e6dd729c4f | refs/heads/master | 2021-04-09T10:53:15.057821 | 2020-05-26T17:40:11 | 2020-05-26T17:40:11 | 125,316,414 | 7 | 4 | MIT | 2020-05-26T17:40:15 | 2018-03-15T05:24:24 | Python | UTF-8 | Python | false | false | 19 | py | from . import path
| [
"[email protected]"
] | |
e1ff873dc7162e68805ea496e18d054103fd202b | 6246f61c6bb4143fc88d74c72f6d2e7936433ee9 | /saper.py | d8ce9e5291c0319c76368b2ce8e78d6c68c45df6 | [] | no_license | aramann/mineswapper | 0663d1189d38ec0704d39e6b97f8690e80367b38 | 8fab851d7e948924e88c4101bc35e4745d7971b3 | refs/heads/master | 2021-06-13T20:36:39.147322 | 2017-03-06T14:28:31 | 2017-03-06T14:28:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,094 | py | import random
import tkinter as tk
def gen_bomb(field):
i = random.randint(1, m - 1)
j = random.randint(1, n - 1)
while field[i][j] == 'b':
i = random.randint(1, m - 1)
j = random.randint(1, n - 1)
field[i][j] = 'b'
return field
# if field[i][j] == 'b':
# return gen_field(field)
# else:
# field[i][j] = 'b'
# return field
def gen_field(field):
for i in range(1, m):
for j in range(1, n):
cnt = 0
if field[i][j] == 'b':
continue
else:
if field[i - 1][j - 1] == 'b':
cnt += 1
if field[i - 1][j] == 'b':
cnt += 1
if field[i - 1][j + 1] == 'b':
cnt += 1
if field[i][j - 1] == 'b':
cnt += 1
if field[i][j + 1] == 'b':
cnt += 1
if field[i + 1][j - 1] == 'b':
cnt += 1
if field[i + 1][j] == 'b':
cnt += 1
if field[i + 1][j + 1] == 'b':
cnt += 1
field[i][j] = cnt
return field
def opencell(i, j):
if field[i][j] == 'b':
for k in range(1, n):
for l in range(1, m):
if field[k][l] == 'b':
buttons[k][l]["bg"] = 'red'
buttons[k][l]["text"] = 'bomb'
# exit()
if field[i][j] == -1:
return
if field[i][j] == 0 and (i, j) not in walken:
walken.append((i, j))
opencell(i - 1, j - 1)
opencell(i - 1, j)
opencell(i - 1, j - 1)
opencell(i, j - 1)
opencell(i, j + 1)
opencell(i + 1, j - 1)
opencell(i + 1, j)
opencell(i + 1, j + 1)
if field[i][j] == 0:
buttons[i][j]["text"] = 'no'
else:
buttons[i][j]["text"] = field[i][j]
if buttons[i][j] == 1:
buttons[i][j]["fg"] = 'azure'
elif buttons[i][j] == 2:
buttons[i][j]["fg"] = 'green'
elif buttons[i][j] == 3:
buttons[i][j]["fg"] = 'red'
elif buttons[i][j] == 4:
buttons[i][j]["fg"] = 'purple'
elif buttons[i][j] == 5:
buttons[i][j]["fg"] = 'brown'
elif buttons[i][j] == 6:
buttons[i][j]["fg"] = 'yellow'
elif buttons[i][j] == 7:
buttons[i][j]["fg"] = 'orange'
elif buttons[i][j] == 8:
buttons[i][j]["fg"] = 'white'
buttons[i][j]["bg"] = 'grey'
def setflag(i, j):
if buttons[i][j]["text"] == 'b':
buttons[i][j]["text"] = '?'
elif buttons[i][j]["text"] == '?':
buttons[i][j]["text"] = ''
else:
buttons[i][j]["text"] = 'b'
def _opencell(i, j):
def opencell_(event):
opencell(i, j)
return opencell_
def _setflag(i, j):
def setflag_(event):
setflag(i, j)
return setflag_
root = tk.Tk()
print('Select level of difficulty(1 - easy (9x9 10 mines), 2 - medium (16x16 40 mines), 3 - hard (30x16 99 mines), 4 - custom')
lvl = int(input())
if lvl == 1:
n, m, bombs = 9, 9, 10
elif lvl == 2:
n, m, bombs = 16, 16, 40
elif lvl == 3:
n, m, bombs = 30, 16, 99
else:
print('Enter size of the field (x, y) and number of bombs, spliting with space')
n, m, bombs = map(int, input().split())
if n * m <= bombs:
bombs = n * m - 1
field = [[0 for i in range(n + 1)] for j in range(m + 1)]
for i in range(n + 1):
field[0][i] = -1
field[-1][i] = -1
for i in range(m + 1):
field[i][0] = -1
field[i][-1] = -1
for i in range(bombs):
field = gen_bomb(field)
field = gen_field(field)
for i in range(m + 1):
print(*field[i])
buttons = [[0 for i in range(0, n + 1)] for j in range(0, m + 1)]
for i in range(n + 1):
buttons[0][i] = -1
buttons[-1][i] = -1
for i in range(m + 1):
buttons[i][0] = -1
buttons[i][-1] = -1
for i in range(1, m):
for j in range(1, n):
btn = tk.Button(root, text='', bg='grey')
btn.bind("<Button-1>", _opencell(i, j))
btn.bind("<Button-2>", _setflag(i, j))
btn.grid(row=i, column=j)
buttons[i][j] = btn
walken = []
# btn = tk.Button(root, #родительское окно
# text="Click me", #надпись на кнопке
# width=30,height=5, #ширина и высота
# bg="white",fg="black")
# btn.bind("<Button-1>", opencell)
# btn.pack()
root.mainloop()
# root = tk.Tk()
# def Hello(event):
# print("Yet another hello world")
#
# btn = tk.Button(root, #родительское окно
# text="Click me", #надпись на кнопке
# width=30,height=5, #ширина и высота
# bg="white",fg="black") #цвет фона и надписи
# btn.bind("<Button-1>", Hello) #при нажатии ЛКМ на кнопку вызывается функция Hello
# btn.pack() #расположить кнопку на главном окне
# root.mainloop() | [
"[email protected]"
] | |
51c188fc3582d89f30984fe761bd4de74c07d286 | 711756b796d68035dc6a39060515200d1d37a274 | /output_cog/optimized_24247.py | f41dd9eb54effc2fae8b2b76ddc93da38babc1a1 | [] | no_license | batxes/exocyst_scripts | 8b109c279c93dd68c1d55ed64ad3cca93e3c95ca | a6c487d5053b9b67db22c59865e4ef2417e53030 | refs/heads/master | 2020-06-16T20:16:24.840725 | 2016-11-30T16:23:16 | 2016-11-30T16:23:16 | 75,075,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,840 | py | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((591.127, 550.172, 433.724), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_0" not in marker_sets:
s=new_marker_set('Cog2_0')
marker_sets["Cog2_0"]=s
s= marker_sets["Cog2_0"]
mark=s.place_marker((558.151, 528.977, 490.027), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_1" not in marker_sets:
s=new_marker_set('Cog2_1')
marker_sets["Cog2_1"]=s
s= marker_sets["Cog2_1"]
mark=s.place_marker((514.189, 493.443, 549.935), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((541.078, 422.008, 433.053), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((416.095, 453.45, 712.259), (0.89, 0.1, 0.1), 18.4716)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((560.441, 539.013, 466.666), (1, 1, 0), 18.4716)
if "Cog3_0" not in marker_sets:
s=new_marker_set('Cog3_0')
marker_sets["Cog3_0"]=s
s= marker_sets["Cog3_0"]
mark=s.place_marker((560.484, 539.715, 465.403), (1, 1, 0.2), 17.1475)
if "Cog3_1" not in marker_sets:
s=new_marker_set('Cog3_1')
marker_sets["Cog3_1"]=s
s= marker_sets["Cog3_1"]
mark=s.place_marker((552.11, 550.537, 440.768), (1, 1, 0.2), 17.1475)
if "Cog3_2" not in marker_sets:
s=new_marker_set('Cog3_2')
marker_sets["Cog3_2"]=s
s= marker_sets["Cog3_2"]
mark=s.place_marker((528.693, 538.373, 431.649), (1, 1, 0.2), 17.1475)
if "Cog3_3" not in marker_sets:
s=new_marker_set('Cog3_3')
marker_sets["Cog3_3"]=s
s= marker_sets["Cog3_3"]
mark=s.place_marker((511.774, 558.115, 441.881), (1, 1, 0.2), 17.1475)
if "Cog3_4" not in marker_sets:
s=new_marker_set('Cog3_4')
marker_sets["Cog3_4"]=s
s= marker_sets["Cog3_4"]
mark=s.place_marker((491.711, 546.71, 425.922), (1, 1, 0.2), 17.1475)
if "Cog3_5" not in marker_sets:
s=new_marker_set('Cog3_5')
marker_sets["Cog3_5"]=s
s= marker_sets["Cog3_5"]
mark=s.place_marker((490.69, 571.213, 412.926), (1, 1, 0.2), 17.1475)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((585.097, 551.597, 459.839), (1, 1, 0.4), 18.4716)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((395.497, 593.978, 372.046), (1, 1, 0.4), 18.4716)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((354.731, 547.327, 564.438), (0, 0, 0.8), 18.4716)
if "Cog4_0" not in marker_sets:
s=new_marker_set('Cog4_0')
marker_sets["Cog4_0"]=s
s= marker_sets["Cog4_0"]
mark=s.place_marker((354.731, 547.327, 564.438), (0, 0, 0.8), 17.1475)
if "Cog4_1" not in marker_sets:
s=new_marker_set('Cog4_1')
marker_sets["Cog4_1"]=s
s= marker_sets["Cog4_1"]
mark=s.place_marker((381.51, 546.932, 552.901), (0, 0, 0.8), 17.1475)
if "Cog4_2" not in marker_sets:
s=new_marker_set('Cog4_2')
marker_sets["Cog4_2"]=s
s= marker_sets["Cog4_2"]
mark=s.place_marker((408.798, 544.379, 541.772), (0, 0, 0.8), 17.1475)
if "Cog4_3" not in marker_sets:
s=new_marker_set('Cog4_3')
marker_sets["Cog4_3"]=s
s= marker_sets["Cog4_3"]
mark=s.place_marker((436.698, 538.561, 532.225), (0, 0, 0.8), 17.1475)
if "Cog4_4" not in marker_sets:
s=new_marker_set('Cog4_4')
marker_sets["Cog4_4"]=s
s= marker_sets["Cog4_4"]
mark=s.place_marker((464.967, 534.516, 524.764), (0, 0, 0.8), 17.1475)
if "Cog4_5" not in marker_sets:
s=new_marker_set('Cog4_5')
marker_sets["Cog4_5"]=s
s= marker_sets["Cog4_5"]
mark=s.place_marker((492.885, 537.772, 518.397), (0, 0, 0.8), 17.1475)
if "Cog4_6" not in marker_sets:
s=new_marker_set('Cog4_6')
marker_sets["Cog4_6"]=s
s= marker_sets["Cog4_6"]
mark=s.place_marker((513.156, 551.557, 503.757), (0, 0, 0.8), 17.1475)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((263.588, 534.134, 438.154), (0, 0, 0.8), 18.4716)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((762.458, 564.868, 573.574), (0, 0, 0.8), 18.4716)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((516.971, 541.55, 550.287), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_0" not in marker_sets:
s=new_marker_set('Cog5_0')
marker_sets["Cog5_0"]=s
s= marker_sets["Cog5_0"]
mark=s.place_marker((516.971, 541.55, 550.287), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_1" not in marker_sets:
s=new_marker_set('Cog5_1')
marker_sets["Cog5_1"]=s
s= marker_sets["Cog5_1"]
mark=s.place_marker((524.701, 520.043, 532.301), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_2" not in marker_sets:
s=new_marker_set('Cog5_2')
marker_sets["Cog5_2"]=s
s= marker_sets["Cog5_2"]
mark=s.place_marker((518.156, 496.095, 517.283), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_3" not in marker_sets:
s=new_marker_set('Cog5_3')
marker_sets["Cog5_3"]=s
s= marker_sets["Cog5_3"]
mark=s.place_marker((522.974, 469.313, 527.494), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((597.329, 492.17, 431.012), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((450.148, 440.633, 626.042), (0.3, 0.3, 0.3), 18.4716)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((567.05, 508.111, 472.181), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_0" not in marker_sets:
s=new_marker_set('Cog6_0')
marker_sets["Cog6_0"]=s
s= marker_sets["Cog6_0"]
mark=s.place_marker((567.18, 507.83, 472.003), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_1" not in marker_sets:
s=new_marker_set('Cog6_1')
marker_sets["Cog6_1"]=s
s= marker_sets["Cog6_1"]
mark=s.place_marker((539.941, 505.099, 467.782), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_2" not in marker_sets:
s=new_marker_set('Cog6_2')
marker_sets["Cog6_2"]=s
s= marker_sets["Cog6_2"]
mark=s.place_marker((516.22, 513.736, 455.831), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_3" not in marker_sets:
s=new_marker_set('Cog6_3')
marker_sets["Cog6_3"]=s
s= marker_sets["Cog6_3"]
mark=s.place_marker((495.42, 532.128, 457.926), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_4" not in marker_sets:
s=new_marker_set('Cog6_4')
marker_sets["Cog6_4"]=s
s= marker_sets["Cog6_4"]
mark=s.place_marker((478.453, 554.246, 457.003), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_5" not in marker_sets:
s=new_marker_set('Cog6_5')
marker_sets["Cog6_5"]=s
s= marker_sets["Cog6_5"]
mark=s.place_marker((483.011, 579.139, 445.049), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_6" not in marker_sets:
s=new_marker_set('Cog6_6')
marker_sets["Cog6_6"]=s
s= marker_sets["Cog6_6"]
mark=s.place_marker((500.654, 596.305, 432.034), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((545.229, 600.839, 505.286), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((453.3, 585.077, 359.443), (0.21, 0.49, 0.72), 18.4716)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((569.881, 572.648, 532.303), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_0" not in marker_sets:
s=new_marker_set('Cog7_0')
marker_sets["Cog7_0"]=s
s= marker_sets["Cog7_0"]
mark=s.place_marker((564.77, 547.607, 527.181), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_1" not in marker_sets:
s=new_marker_set('Cog7_1')
marker_sets["Cog7_1"]=s
s= marker_sets["Cog7_1"]
mark=s.place_marker((551.494, 493.359, 514.045), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_2" not in marker_sets:
s=new_marker_set('Cog7_2')
marker_sets["Cog7_2"]=s
s= marker_sets["Cog7_2"]
mark=s.place_marker((536.082, 439.216, 501.45), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((608.487, 438.957, 465.359), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((461.56, 366.071, 509.789), (0.7, 0.7, 0.7), 18.4716)
if "Cog8_0" not in marker_sets:
s=new_marker_set('Cog8_0')
marker_sets["Cog8_0"]=s
s= marker_sets["Cog8_0"]
mark=s.place_marker((575.251, 474.626, 465.989), (1, 0.5, 0), 17.1475)
if "Cog8_1" not in marker_sets:
s=new_marker_set('Cog8_1')
marker_sets["Cog8_1"]=s
s= marker_sets["Cog8_1"]
mark=s.place_marker((595.039, 471.362, 485.911), (1, 0.5, 0), 17.1475)
if "Cog8_2" not in marker_sets:
s=new_marker_set('Cog8_2')
marker_sets["Cog8_2"]=s
s= marker_sets["Cog8_2"]
mark=s.place_marker((591.024, 491.415, 505.418), (1, 0.5, 0), 17.1475)
if "Cog8_3" not in marker_sets:
s=new_marker_set('Cog8_3')
marker_sets["Cog8_3"]=s
s= marker_sets["Cog8_3"]
mark=s.place_marker((580.611, 493.353, 531.521), (1, 0.5, 0), 17.1475)
if "Cog8_4" not in marker_sets:
s=new_marker_set('Cog8_4')
marker_sets["Cog8_4"]=s
s= marker_sets["Cog8_4"]
mark=s.place_marker((564.708, 490.26, 555.028), (1, 0.5, 0), 17.1475)
if "Cog8_5" not in marker_sets:
s=new_marker_set('Cog8_5')
marker_sets["Cog8_5"]=s
s= marker_sets["Cog8_5"]
mark=s.place_marker((553.024, 482.707, 580.407), (1, 0.5, 0), 17.1475)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((570.894, 525.42, 514.688), (1, 0.6, 0.1), 18.4716)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((533.826, 440.217, 648.202), (1, 0.6, 0.1), 18.4716)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| [
"[email protected]"
] | |
19e02e30a90fc1c0781c84ee261b118d7bd1b1bb | 91652afbc75037f6c631dbe9c14c343514d07469 | /examples/static.py | 80e29ed6bb54b099e5dd92eeaef8911dc9804300 | [] | no_license | BitTheByte/Pybook | beb2186077cdecd821a25b015522beeb3e1d4426 | 8385a9006b4c8577412fa75d7c2196e0e0c539a5 | refs/heads/master | 2023-06-11T02:57:21.458411 | 2023-06-04T18:58:26 | 2023-06-04T18:58:26 | 148,077,126 | 4 | 2 | null | 2023-06-04T18:58:27 | 2018-09-10T00:15:29 | Python | UTF-8 | Python | false | false | 717 | py | """
please move this example to the root directory
"""
from lib.session import *
from lib.parser import *
from lib.engine import *
fbsession = login("[email protected]","Secret_Password123") # login with facebook
def hi(msg):
print msg
return "HELLO FROM FUNCTION"
"""
def custom(message):
print message
return message + " WOW!"
"""
myreplies = {
"hi":"Hello from python!",
"failReply":"Sorry i don't understand :(",
"func_hello":hi
}
options = {
"keysearch" :1, # find the closest key replies
"failReply" :0, # use a fail reply
#"replyHook" :custom, use a custom function to generate answers
}
StaticMessageHook(fbsession,options,myreplies)
| [
"[email protected]"
] | |
47204ab5273867d202c0c4bdbd8c953a99b17499 | f9c223341e3c052705cc08291d2246399121f037 | /LSR/lsr.py | 3e5ea30d0781c904bd887def9f5932d263d6258a | [] | no_license | andreaeliasc/Lab3-Redes | 185155d91e7f0eec6c59956751c830a19e2e197e | 037f06a632d0e5972f150dc005c29cae232dcf48 | refs/heads/main | 2023-07-15T14:57:28.684337 | 2021-09-01T03:03:57 | 2021-09-01T03:03:57 | 401,242,615 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,107 | py | import asyncio
from asyncio.tasks import sleep
import slixmpp
from getpass import getpass
from aioconsole import ainput, aprint
import time
from utils import *
class LSRClient(slixmpp.ClientXMPP):
def __init__(self, jid, password, topo_file,names_file):
slixmpp.ClientXMPP.__init__(self, jid, password)
self.add_event_handler("session_start", self.start)
self.add_event_handler("message", self.message)
self.topo_file = topo_file
self.names_file = names_file
self.network = []
self.echo_sent = None
self.LSP = {
'type': lsp,
'from': self.boundjid.bare,
'sequence': 1,
'neighbours':{}
}
self.id = get_ID(self.names_file, jid)
self.neighbours_IDS = get_neighbors(self.topo_file, self.id)
self.neighbours = []
self.neighbours_JID()
async def start(self, event):
self.send_presence()
await self.get_roster()
print("Press enter to start:")
start = await ainput()
for neighbour in self.neighbours:
await self.send_hello_msg(neighbour)
for neighbour in self.neighbours:
await self.send_echo_message(neighbour, echo_send)
self.network.append(self.LSP)
self.loop.create_task(self.send_LSP())
await sleep(2)
print("Type the jid of the user you want to message (or wait until someone messages you!)")
send = await ainput()
if send != None:
message = await ainput('Type your message: ')
#Waiting some time so that the network converges
print("Waiting for network to converge")
await sleep(17)
print("Network converged, sending message")
self.send_chat_message(self.boundjid.bare,send,steps=1,visited_nodes=[self.boundjid.bare],message=message)
print("press enter to exit")
exit = await ainput()
self.disconnect()
def neighbours_JID(self):
for id in self.neighbours_IDS:
neighbour_JID = get_JID(self.names_file, id)
self.neighbours.append(neighbour_JID)
async def message(self, msg):
body = json_to_object(msg['body'])
if body['type'] == hello:
print("Hello from: ", msg['from'])
elif body['type'] == echo_send:
print("Echoing back to: ", msg['from'])
await self.send_echo_message(body['from'],echo_response)
elif body['type'] == echo_response:
distance = time.time()-self.echo_sent
print("Distance to ", msg['from'], ' is ', distance)
self.LSP['neighbours'][body['from']] = distance
elif body['type'] == lsp:
new = await self.update_network(body)
await self.flood_LSP(body, new)
elif body['type'] == message_type:
if body['to'] != self.boundjid.bare:
print('Got a message that is not for me, sending it ')
self.send_chat_message(source = body['from'],to = body['to'], steps=body['steps'] +1, distance=body['distance'],visited_nodes= body['visited_nodes'].append(self.boundjid.bare),message=body['mesage'])
elif body['to'] == self.boundjid.bare:
print('Got a message!')
print(body['from'], " : ", body['mesage'])
print(body)
async def send_hello_msg(self,to, steps = 1):
you = self.boundjid.bare
to = to
json = {
'type': hello,
'from':you,
'to': to,
'steps': steps
}
to_send = object_to_json(json)
self.send_message(mto = to, mbody=to_send, mtype='chat')
async def send_echo_message(self, to, type ,steps = 1):
you = self.boundjid.bare
to = to
json = {
'type': type,
'from':you,
'to': to,
'steps': steps
}
to_send = object_to_json(json)
self.send_message(mto = to, mbody=to_send, mtype='chat')
self.echo_sent = time.time()
async def send_LSP(self):
while True:
for neighbour in self.neighbours:
lsp_to_send = object_to_json(self.LSP)
self.send_message(mto =neighbour,mbody=lsp_to_send,mtype='chat')
await sleep(2)
self.LSP['sequence'] += 1
def send_chat_message(self,source,to,steps=0, distance = 0, visited_nodes = [],message="Hola mundo"):
body ={
'type':message_type,
'from': source,
'to': to,
'steps': steps,
'distance': distance,
'visited_nodes':visited_nodes,
'mesage':message
}
path = self.calculate_path(self.boundjid.bare, to)
body['distance'] += self.LSP['neighbours'][path[1]['from']]
to_send = object_to_json(body)
self.send_message(mto=path[1]['from'],mbody = to_send,mtype='chat')
async def update_network(self, lsp):
for i in range(0,len(self.network)):
node = self.network[i]
if lsp['from'] == node['from']:
if lsp['sequence'] > node['sequence']:
node['sequence'] = lsp['sequence']
node['neighbours'] = lsp['neighbours']
return 1
if lsp['sequence'] <= node['sequence']:
return None
self.network.append(lsp)
return 1
def calculate_path(self, source, dest):
distance = 0
visited = []
current_node = self.find_node_in_network(source)
while current_node['from'] != dest:
node_distances = []
neighbours = current_node['neighbours']
for neighbour in neighbours.keys():
if neighbour == dest:
visited.append(current_node)
current_node = self.find_node_in_network(neighbour)
visited.append(current_node)
return visited
elif neighbour not in visited:
distance_to_neighbour = neighbours[neighbour]
node_distances.append(distance_to_neighbour)
min_distance = min(node_distances)
node_index = node_distances.index(min_distance)
all_nodes = list(current_node['neighbours'].keys())
next_node_id = all_nodes[node_index]
visited.append(current_node)
next_node = self.find_node_in_network(next_node_id)
current_node = next_node
distance += min_distance
return visited
def find_node_in_network(self, id):
for i in range(len(self.network)):
node = self.network[i]
if id in node['from']:
return node
return False
async def flood_LSP(self, lsp, new):
for neighbour in self.neighbours:
if new and neighbour != lsp['from']:
self.send_message(mto =neighbour,mbody=object_to_json(lsp),mtype='chat') | [
"[email protected]"
] | |
1a7945122da319698aab18dff3ea548ff1990001 | cd7557f4daedf3447673c67e13b1c67220905b0e | /Judgment Classifier.py | 718395714852f46853f26e330aace481d2f0abae | [] | no_license | Jason1286/Copyright_88_Classifier | 5774703773ac5816401ba2256777f74d0f9a0859 | 02ba028235c21aa79cae00727effb15a111b8568 | refs/heads/main | 2023-06-02T01:51:59.552419 | 2021-06-25T07:12:30 | 2021-06-25T07:12:30 | 380,103,097 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,381 | py | #!/usr/bin/env python
# coding: utf-8
# 使用套件
import os
import re
import pandas as pd
import numpy as np
from itertools import compress
# 人工標記結果
manual_label_df = pd.read_excel(r'C:\Users\ASUS VivoBook\Desktop\計算與法律分析\Final_Project\判決標註.xlsx', sheet_name = '工作表1') # read all sheets
manual_label_id = list(manual_label_df['檔案編號'])
manual_filename = ['verdict_' + str('{:03}'.format(x)) + '.txt' for x in sorted(manual_label_id)]
# 建立自動判決結果dataframe
dict2df = {'verdict':manual_filename,
'判決書案號':list(manual_label_df['判決書案號']),
'駁回_Auto':None,'駁回_Manual':manual_label_df['駁回'],
'原告引用法條_Auto':None,'法官判決法條_Auto':None,
'原告引用法條_Manual':manual_label_df['原告引用法條'],
'法官判決法條_Manual':manual_label_df['法官判決法條'],
'駁回_Diff':None,'原告引用法條_Diff':None,'法官判決法條_Diff':None
}
label_df = pd.DataFrame.from_dict(dict2df)
label_df = label_df.set_index(['verdict'])
# 讀去判決書
def read_verdict(entry):
os.chdir(r'C:\Users\ASUS VivoBook\Desktop\計算與法律分析\Final_Project\All_Verdicts')
f = open(entry, 'r', encoding = 'utf-8-sig')
txt = f.readlines()
txt = [re.sub('\n', '', x) for x in txt]
txt = [x for x in txt if x != '']
return txt
# 著作權法第88條項目提取
def case_detection(txt):
c23_regex = re.compile(r'著作權法(第\d+條)?(、)?第(88|八十八)條(第)?(1|一)?(項)?(、)?(第)?(2|二)(項)?(、)?(或)?(第)?(3|三)項')
c2_regex = re.compile(r'著作權法(第\d+條)?(、)?第(88|八十八)條第(1|一)?(項)?(、)?(第)?(2|二)項')
c3_regex = re.compile(r'著作權法(第\d+條)?(、)?第(88|八十八)條第(1|一)?(項)?(、)?(第)?(3|三)項')
cX_regex = re.compile(r'著作權法(第\d+條)?(、)?第(88|八十八)條(\S+)?')
if bool(c23_regex.search(txt)) == True:
return 4
elif bool(c2_regex.search(txt)) == True:
return 2
elif bool(c3_regex.search(txt)) == True:
return 3
else:
return 99
def fill_dataframe(classify_, colname, filename):
if 4 in classify_:
label_df.loc[filename,colname] = 4
elif 3 in classify_:
label_df.loc[filename,colname] = 3
elif 2 in classify_:
label_df.loc[filename,colname] = 2
elif 99 in classify_:
label_df.loc[filename,colname] = 99
elif classify_ == []:
label_df.loc[filename,colname] = 99
# 著作權法第88條項目分類
def Classify(filename):
current_verdict = read_verdict(filename)
# dissmiss detection
main_rex = re.compile('^主文')
main_txt = [current_verdict[i] for i, x in enumerate(current_verdict) if main_rex.search(x) != None]
rex1 = re.compile(r'(應?(連帶)?給付)(周年利率|週年利率|年息|年利率)?(百分之五|百分之5|5%|5%)?')
if bool(rex1.search(main_txt[0])) == True:
label_df.loc[filename,'駁回_Auto'] = 0
else:
label_df.loc[filename,'駁回_Auto'] = 1
# 提取著作權法第88條相關條文
rex88 = re.compile(r'著作權法(第\d+條)?(、)?(第\d+項)?(、)?第(88|八十八)(、\d+-\d)?(、\d+){0,2}?條(第)?(1|一|2|二|3|三)?(項)?(及)?((、)?第(2|二)項)?((、)?第(3|三)項)?((、)?(2|二)項)?((、)?(3|三)項)?')
filter1 = [current_verdict[i] for i, x in enumerate(current_verdict) if rex88.search(x) != None]
filter1
# 原告引用法條
copyright88 = [filter1[i] for i, x in enumerate(filter1) if re.search(r'(原告|被告|被上訴人|上訴人|被害人|公司)', x) != None]
copyright88 = [copyright88[i] for i, x in enumerate(copyright88) if not bool(re.search(r'(二造|爭點|抗辯|\?|\?|定有明文)', x)) == True]
plaintiff = [copyright88[i] for i, x in enumerate(copyright88) if bool(re.search('請求(原告|被告|被害人|上訴人|被上訴人)?(等連帶負損害賠償責任)?', x)) == True]
# 法官判決法條
court = [copyright88[i] for i, x in enumerate(copyright88) if bool(re.search('(為有理由|即有理由|洵屬正當|即非不合|核屬正當|應予准許|核屬合法適當|核屬有據|於法有據|即無不合)(,)?(應予准許)?', x)) == True]
court_ = [x for x in court if x in plaintiff]
plaintiff_ = [x for x in plaintiff if x not in court_]
plaintiff_classify = list(set([case_detection(x) for x in plaintiff_]))
court_classify = list(set([case_detection(x) for x in court_]))
# 填入dataframe
fill_dataframe(plaintiff_classify, '原告引用法條_Auto', filename)
fill_dataframe(court_classify, '法官判決法條_Auto', filename)
# 判斷分類對錯
if label_df.loc[filename, '駁回_Auto'] != label_df.loc[filename, '駁回_Manual']:
label_df.loc[filename, '駁回_Diff'] = 1
else:
label_df.loc[filename, '駁回_Diff'] = 0
if label_df.loc[filename, '原告引用法條_Auto'] != label_df.loc[filename, '原告引用法條_Manual']:
label_df.loc[filename, '原告引用法條_Diff'] = 1
else:
label_df.loc[filename, '原告引用法條_Diff'] = 0
if label_df.loc[filename, '法官判決法條_Auto'] != label_df.loc[filename, '法官判決法條_Manual']:
label_df.loc[filename, '法官判決法條_Diff'] = 1
else:
label_df.loc[filename, '法官判決法條_Diff'] = 0
def Copyright_88_Classifier(filename_lst):
# 將挑選判決進行分類並填入表格
for filename in filename_lst:
Classify(filename)
# 結果分析
dismiss_wrong = label_df.loc[label_df['駁回_Diff'] == 1,:]
both_wrong = label_df.loc[label_df.loc[:,['原告引用法條_Diff','法官判決法條_Diff']].sum(axis = 1) == 2,:]
tmp = label_df.loc[label_df['原告引用法條_Diff'] == 1,:]
plaintiff_wrong = tmp.loc[[ind for ind in list(tmp.index) if ind not in list(both_wrong.index)],:]
tmp = label_df.loc[label_df['法官判決法條_Diff'] == 1,:]
court_wrong = tmp.loc[[ind for ind in list(tmp.index) if ind not in list(both_wrong.index)],:]
both_right = label_df.loc[label_df.loc[:,['原告引用法條_Diff','法官判決法條_Diff']].sum(axis = 1) == 0,:]
cases_dct = {'both_wrong':both_wrong,
'plaintiff_wrong':plaintiff_wrong,
'court_wrong': court_wrong,
'both_right': both_right,
'dismiss_wrong': dismiss_wrong}
summary_dict = {'Case':['僅原告引用法條分錯', '僅法官判決法條分錯','皆分錯','皆分對','總和'],
'amount':None,'proportion':None}
summary_df = pd.DataFrame.from_dict(summary_dict)
summary_df = summary_df.set_index(['Case'])
summary_df.iloc[0,0:2] = [len(plaintiff_wrong), len(plaintiff_wrong)/len(label_df)]
summary_df.iloc[1,0:2] = [len(court_wrong), len(court_wrong)/len(label_df)]
summary_df.iloc[2,0:2] = [len(both_wrong), len(both_wrong)/len(label_df)]
summary_df.iloc[3,0:2] = [len(both_right), len(both_right)/len(label_df)]
summary_df.iloc[4,0:2] = summary_df.iloc[0:4,].sum(axis = 0)
summary_df
return label_df, summary_df, cases_dct
label_df, summary_df, cases_dct = Copyright_88_Classifier(manual_filename)
| [
"[email protected]"
] | |
06f952c695c3533ca0dd029f3e93895af5b02c59 | 5c8139f1e57e06c7eaf603bd8fe74d9f22620513 | /PartB/py删除链表的倒数第n个节点的位置的值2.py | ab9093a8ca2755b9b1f62111641d210996e07d4a | [] | no_license | madeibao/PythonAlgorithm | c8a11d298617d1abb12a72461665583c6a44f9d2 | b4c8a75e724a674812b8a38c0202485776445d89 | refs/heads/master | 2023-04-03T07:18:49.842063 | 2021-04-11T12:02:40 | 2021-04-11T12:02:40 | 325,269,130 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 915 | py |
# 把一个链表的倒数的第n个节点来进行删除。
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def remove(self, head, n):
dummy = ListNode(-1)
dummy.next = head
slow = dummy
fast = dummy
for i in range(n):
fast = fast.next
while fast and fast.next:
fast = fast.next
slow = slow.next
slow.next = slow.next.next
return dummy.next
if __name__ == "__main__":
s = Solution()
n1 = ListNode(1)
n2 = ListNode(2)
n3 = ListNode(3)
n4 = ListNode(4)
n5 = ListNode(5)
n6 = ListNode(6)
n1.next = n2
n2.next = n3
n3.next = n4
n4.next = n5
n5.next = n6
n6.next = None
k = 2
res = s.remove(n1, k)
while res:
print(res.val, end="->")
res = res.next
| [
"[email protected]"
] | |
47e8f9432798989895c7cbfef00d209e0fdc4bb3 | 45c870a3edf37781efd6059a3d879aedf9da7f7f | /custom_resize_drag_toolbar_pyqt5/example.py | cd9c9f2dad5a08e344715d5aaa95e6dcedafa101 | [] | no_license | saladeen/custom_resize_drag_toolbar_pyqt5 | e6dc8598df6b7d58bf3114bfa348db38c2b1512b | f38aa8b263b08fd0f94ea2e1428e873cdadce80e | refs/heads/main | 2023-08-11T04:44:53.349929 | 2021-10-01T19:10:20 | 2021-10-01T19:10:20 | 412,588,371 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 618 | py | from PyQt5.QtWidgets import QApplication, QWidget, QHBoxLayout
from PyQt5.QtCore import Qt
import resizable_qwidget
import toolbar
import sys
class ExampleWindow(resizable_qwidget.TestWindow):
def __init__(self):
super().__init__()
layout = QHBoxLayout()
layout.addWidget(toolbar.CustomToolbar(self, "Example"))
layout.setAlignment(Qt.AlignTop)
self.setLayout(layout)
self.move(300, 300)
self.resize(300, 300)
if __name__ == "__main__":
app = QApplication(sys.argv)
mw = ExampleWindow()
mw.show()
sys.exit(app.exec_()) | [
"[email protected]"
] | |
b0ebd397cc8459a46dd0ef18c330ccdc2c8d2efb | bef4b43dc0a93697dfb7befdf4434994d109d242 | /extract_features.py | 0bb7bcc29969f2399ab42483e98a35287f5e4aac | [] | no_license | karanjsingh/Object-detector | 69d9e5154b9f73028760d6d76da1a0f55038cfea | 9114e95f79e2dd77a3cbbbee92e4432e5c237362 | refs/heads/master | 2020-06-25T22:31:14.941147 | 2020-01-14T23:36:22 | 2020-01-14T23:36:22 | 199,440,746 | 1 | 0 | null | 2019-07-29T11:43:34 | 2019-07-29T11:34:47 | null | UTF-8 | Python | false | false | 3,513 | py | #import necessary packages
from __future__ import print_function
from sklearn.feature_extraction.image import extract_patches_2d
from pyimagesearch.object_detection import helpers
from pyimagesearch.utils import dataset
from pyimagesearch.utils import conf
from pyimagesearch.descriptors import hog
from imutils import paths
from scipy import io
import numpy as np
import argparse
import random
import cv2
import progressbar
# construct an argument parser
ap = argparse.ArgumentParser()
ap.add_argument("-c","--conf",required=True,help="path to configuration file")
args = vars(ap.parse_args())
#load configuration file
conf= conf.Conf(args["conf"])
hog = hog.HOG(orientations=conf["orientations"], pixelsPerCell = tuple(conf["pixels_per_cell"]),
cellsPerBlock=tuple(conf["cells_per_block"]) , normalise = conf["normalize"])
data=[]
labels=[]
#grab the ground truth of in=mages and select a percentage of them for training
trnPaths=list(paths.list_images(conf["image_dataset"]))
trnPaths= random.sample(trnPaths, int(len(trnPaths)*conf["percent_gt_images"]))
print("[info] describing training ROI.........")
# set up the progress bar
widgets = ["Extracting: ", progressbar.Percentage(), " ", progressbar.Bar(), " ", progressbar.ETA()]
pbar = progressbar.ProgressBar(maxval=len(trnPaths), widgets=widgets).start()
#loop over training paths
for (i,trnPath) in enumerate(trnPaths):
#load image cvt it into gray scl , extractthe image ID from the path
image = cv2.imread(trnPath)
image = cv2.cvtColor(image , cv2.COLOR_BGR2GRAY)
imageID = trnPath[trnPath.rfind("_")+1:].replace(".jpg","")
#load the annotation file and extract the bb
p="{}/annotation_{}.mat".format(conf["image_annotations"], imageID)
bb=io.loadmat(p)["box_coord"][0]
roi = helpers.crop_ct101_bb(image,bb,padding=conf["offset"],dstSize=tuple(conf["window_dim"]))
# define the list of ROIs that will be described, based on whether or not the
# horizontal flip of the image should be used
rois = (roi, cv2.flip(roi, 1)) if conf["use_flip"] else (roi,)
#loop over the ROIs
for roi in rois:
#extractfeatures from the ROI and update the list of features and labels
features = hog.describe(roi)
data.append(features)
labels.append(1)
#update the process bar
pbar.update(i)
## grab the disttraction(-ve) image path and reset the process bar
pbar.finish()
dstPaths= list(paths.list_images(conf["image_distractions"]))
pbar = progressbar.ProgressBar(maxval=conf["num_distraction_images"], widgets=widgets).start()
print("[INFO] describing distraction ROIs...")
#Loop over desired number of distraction images
for i in np.arange(0,conf["num_distraction_images"]):
# randomly select a distraction image, load it, convert it to grayscale, and
# then extract random patches from the image
image = cv2.imread(random.choice(dstPaths))
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
patches = extract_patches_2d(image, tuple(conf["window_dim"]),
max_patches=conf["num_distractions_per_image"])
# loop over the patches
for patch in patches:
# extract features from the patch, then update the data and label list
features = hog.describe(patch)
data.append(features)
labels.append(-1)
# update the progress bar
pbar.update(i)
#dump the dataset to file
pbar.finish()
print("[INFO] dumping features and labels to file...")
dataset.dump_dataset(data, labels, conf["features_path"], "features")
| [
"[email protected]"
] | |
a161266ee413fb7f3bb8b94466c9d03314de7ee9 | 633b695a03e789f6aa644c7bec7280367a9252a8 | /lmfit_gallery/documentation/fitting_withreport.py | 412f4c07159b2a6fb06c2af10b0d239b29d68e3f | [] | no_license | tnakaicode/PlotGallery | 3d831d3245a4a51e87f48bd2053b5ef82cf66b87 | 5c01e5d6e2425dbd17593cb5ecc973982f491732 | refs/heads/master | 2023-08-16T22:54:38.416509 | 2023-08-03T04:23:21 | 2023-08-03T04:23:21 | 238,610,688 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,206 | py | """
doc_fitting_withreport.py
=========================
"""
# <examples/doc_fitting_withreport.py>
from numpy import exp, linspace, pi, random, sign, sin
from lmfit import Parameters, fit_report, minimize
p_true = Parameters()
p_true.add('amp', value=14.0)
p_true.add('period', value=5.46)
p_true.add('shift', value=0.123)
p_true.add('decay', value=0.032)
def residual(pars, x, data=None):
"""Model a decaying sine wave and subtract data."""
vals = pars.valuesdict()
amp = vals['amp']
per = vals['period']
shift = vals['shift']
decay = vals['decay']
if abs(shift) > pi/2:
shift = shift - sign(shift)*pi
model = amp * sin(shift + x/per) * exp(-x*x*decay*decay)
if data is None:
return model
return model - data
random.seed(0)
x = linspace(0.0, 250., 1001)
noise = random.normal(scale=0.7215, size=x.size)
data = residual(p_true, x) + noise
fit_params = Parameters()
fit_params.add('amp', value=13.0)
fit_params.add('period', value=2)
fit_params.add('shift', value=0.0)
fit_params.add('decay', value=0.02)
out = minimize(residual, fit_params, args=(x,), kws={'data': data})
print(fit_report(out))
# <end examples/doc_fitting_withreport.py>
| [
"[email protected]"
] | |
7c851f6cf3c45e4effa984c2a42fc8551f5c800e | a40950330ea44c2721f35aeeab8f3a0a11846b68 | /INTERACTIONS_V1/INTERACTION2/AppSBC/UI/UI.py | d3fdd88cbfb7142e29190f9222894fe2a9977d87 | [] | no_license | huang443765159/kai | 7726bcad4e204629edb453aeabcc97242af7132b | 0d66ae4da5a6973e24e1e512fd0df32335e710c5 | refs/heads/master | 2023-03-06T23:13:59.600011 | 2023-03-04T06:14:12 | 2023-03-04T06:14:12 | 233,500,005 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 35,377 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'UI.ui'
#
# Created by: PyQt5 UI code generator 5.15.0
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_SBC(object):
def setupUi(self, SBC):
SBC.setObjectName("SBC")
SBC.resize(395, 602)
self.SBC_2 = QtWidgets.QWidget(SBC)
self.SBC_2.setObjectName("SBC_2")
self.tab_device = QtWidgets.QTabWidget(self.SBC_2)
self.tab_device.setGeometry(QtCore.QRect(10, 20, 371, 91))
self.tab_device.setTabPosition(QtWidgets.QTabWidget.West)
self.tab_device.setTabShape(QtWidgets.QTabWidget.Triangular)
self.tab_device.setElideMode(QtCore.Qt.ElideLeft)
self.tab_device.setObjectName("tab_device")
self.device = QtWidgets.QWidget()
self.device.setObjectName("device")
self.label_pump_station = QtWidgets.QLabel(self.device)
self.label_pump_station.setGeometry(QtCore.QRect(0, 20, 91, 14))
self.label_pump_station.setMinimumSize(QtCore.QSize(0, 14))
self.label_pump_station.setMaximumSize(QtCore.QSize(16777215, 14))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.label_pump_station.setFont(font)
self.label_pump_station.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label_pump_station.setObjectName("label_pump_station")
self.ip_local = QtWidgets.QLabel(self.device)
self.ip_local.setGeometry(QtCore.QRect(180, 20, 150, 14))
self.ip_local.setMinimumSize(QtCore.QSize(75, 14))
self.ip_local.setMaximumSize(QtCore.QSize(150, 14))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.ip_local.setFont(font)
self.ip_local.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.ip_local.setObjectName("ip_local")
self.ip_nuc = QtWidgets.QLabel(self.device)
self.ip_nuc.setGeometry(QtCore.QRect(180, 50, 160, 14))
self.ip_nuc.setMinimumSize(QtCore.QSize(160, 14))
self.ip_nuc.setMaximumSize(QtCore.QSize(170, 14))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.ip_nuc.setFont(font)
self.ip_nuc.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.ip_nuc.setObjectName("ip_nuc")
self.led_pump_station = QtWidgets.QToolButton(self.device)
self.led_pump_station.setGeometry(QtCore.QRect(100, 20, 50, 14))
self.led_pump_station.setMinimumSize(QtCore.QSize(50, 0))
self.led_pump_station.setMaximumSize(QtCore.QSize(50, 14))
font = QtGui.QFont()
font.setPointSize(8)
self.led_pump_station.setFont(font)
self.led_pump_station.setToolTip("")
self.led_pump_station.setToolTipDuration(-1)
self.led_pump_station.setObjectName("led_pump_station")
self.label_guides = QtWidgets.QLabel(self.device)
self.label_guides.setGeometry(QtCore.QRect(0, 50, 91, 14))
self.label_guides.setMinimumSize(QtCore.QSize(0, 14))
self.label_guides.setMaximumSize(QtCore.QSize(16777215, 14))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.label_guides.setFont(font)
self.label_guides.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label_guides.setObjectName("label_guides")
self.led_guides = QtWidgets.QToolButton(self.device)
self.led_guides.setGeometry(QtCore.QRect(100, 50, 50, 14))
self.led_guides.setMinimumSize(QtCore.QSize(50, 0))
self.led_guides.setMaximumSize(QtCore.QSize(50, 14))
font = QtGui.QFont()
font.setPointSize(8)
self.led_guides.setFont(font)
self.led_guides.setToolTip("")
self.led_guides.setToolTipDuration(-1)
self.led_guides.setObjectName("led_guides")
self.tab_device.addTab(self.device, "")
self.tab_device_2 = QtWidgets.QTabWidget(self.SBC_2)
self.tab_device_2.setGeometry(QtCore.QRect(10, 120, 371, 111))
self.tab_device_2.setTabPosition(QtWidgets.QTabWidget.West)
self.tab_device_2.setTabShape(QtWidgets.QTabWidget.Triangular)
self.tab_device_2.setElideMode(QtCore.Qt.ElideLeft)
self.tab_device_2.setObjectName("tab_device_2")
self.device_2 = QtWidgets.QWidget()
self.device_2.setObjectName("device_2")
self.gridLayoutWidget_4 = QtWidgets.QWidget(self.device_2)
self.gridLayoutWidget_4.setGeometry(QtCore.QRect(-10, 20, 361, 40))
self.gridLayoutWidget_4.setObjectName("gridLayoutWidget_4")
self.gridLayout_4 = QtWidgets.QGridLayout(self.gridLayoutWidget_4)
self.gridLayout_4.setContentsMargins(0, 0, 0, 0)
self.gridLayout_4.setObjectName("gridLayout_4")
self.ui_stage_show = QtWidgets.QLineEdit(self.gridLayoutWidget_4)
self.ui_stage_show.setMaximumSize(QtCore.QSize(250, 14))
font = QtGui.QFont()
font.setPointSize(9)
self.ui_stage_show.setFont(font)
self.ui_stage_show.setObjectName("ui_stage_show")
self.gridLayout_4.addWidget(self.ui_stage_show, 0, 1, 1, 1)
self.label_stage_show = QtWidgets.QLabel(self.gridLayoutWidget_4)
self.label_stage_show.setMinimumSize(QtCore.QSize(0, 14))
self.label_stage_show.setMaximumSize(QtCore.QSize(70, 14))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.label_stage_show.setFont(font)
self.label_stage_show.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label_stage_show.setObjectName("label_stage_show")
self.gridLayout_4.addWidget(self.label_stage_show, 0, 0, 1, 1)
self.label_stage_show_btn = QtWidgets.QLabel(self.gridLayoutWidget_4)
self.label_stage_show_btn.setMinimumSize(QtCore.QSize(0, 14))
self.label_stage_show_btn.setMaximumSize(QtCore.QSize(70, 14))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.label_stage_show_btn.setFont(font)
self.label_stage_show_btn.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label_stage_show_btn.setObjectName("label_stage_show_btn")
self.gridLayout_4.addWidget(self.label_stage_show_btn, 1, 0, 1, 1)
self.btn_welcome = QtWidgets.QPushButton(self.device_2)
self.btn_welcome.setGeometry(QtCore.QRect(10, 60, 80, 20))
self.btn_welcome.setMaximumSize(QtCore.QSize(80, 25))
font = QtGui.QFont()
font.setPointSize(9)
self.btn_welcome.setFont(font)
self.btn_welcome.setObjectName("btn_welcome")
self.btn_forward = QtWidgets.QPushButton(self.device_2)
self.btn_forward.setGeometry(QtCore.QRect(120, 60, 80, 20))
self.btn_forward.setMaximumSize(QtCore.QSize(80, 25))
font = QtGui.QFont()
font.setPointSize(9)
self.btn_forward.setFont(font)
self.btn_forward.setObjectName("btn_forward")
self.btn_stop_forward = QtWidgets.QPushButton(self.device_2)
self.btn_stop_forward.setGeometry(QtCore.QRect(230, 60, 80, 20))
self.btn_stop_forward.setMaximumSize(QtCore.QSize(80, 25))
font = QtGui.QFont()
font.setPointSize(9)
self.btn_stop_forward.setFont(font)
self.btn_stop_forward.setObjectName("btn_stop_forward")
self.btn_back_driving = QtWidgets.QPushButton(self.device_2)
self.btn_back_driving.setGeometry(QtCore.QRect(10, 80, 80, 20))
self.btn_back_driving.setMaximumSize(QtCore.QSize(80, 25))
font = QtGui.QFont()
font.setPointSize(9)
self.btn_back_driving.setFont(font)
self.btn_back_driving.setObjectName("btn_back_driving")
self.btn_washing = QtWidgets.QPushButton(self.device_2)
self.btn_washing.setGeometry(QtCore.QRect(120, 80, 80, 20))
self.btn_washing.setMaximumSize(QtCore.QSize(80, 25))
font = QtGui.QFont()
font.setPointSize(9)
self.btn_washing.setFont(font)
self.btn_washing.setObjectName("btn_washing")
self.btn_washing_end = QtWidgets.QPushButton(self.device_2)
self.btn_washing_end.setGeometry(QtCore.QRect(230, 80, 80, 20))
self.btn_washing_end.setMaximumSize(QtCore.QSize(80, 25))
font = QtGui.QFont()
font.setPointSize(9)
self.btn_washing_end.setFont(font)
self.btn_washing_end.setObjectName("btn_washing_end")
self.gridLayoutWidget_2 = QtWidgets.QWidget(self.device_2)
self.gridLayoutWidget_2.setGeometry(QtCore.QRect(0, 0, 341, 17))
self.gridLayoutWidget_2.setObjectName("gridLayoutWidget_2")
self.gridLayout_2 = QtWidgets.QGridLayout(self.gridLayoutWidget_2)
self.gridLayout_2.setContentsMargins(0, 0, 0, 0)
self.gridLayout_2.setObjectName("gridLayout_2")
self.ui_guides_data1 = QtWidgets.QLineEdit(self.gridLayoutWidget_2)
self.ui_guides_data1.setMaximumSize(QtCore.QSize(16777215, 15))
font = QtGui.QFont()
font.setPointSize(9)
self.ui_guides_data1.setFont(font)
self.ui_guides_data1.setObjectName("ui_guides_data1")
self.gridLayout_2.addWidget(self.ui_guides_data1, 0, 1, 1, 1)
self.label_guides_2 = QtWidgets.QLabel(self.gridLayoutWidget_2)
self.label_guides_2.setMinimumSize(QtCore.QSize(0, 14))
self.label_guides_2.setMaximumSize(QtCore.QSize(16777215, 14))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.label_guides_2.setFont(font)
self.label_guides_2.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label_guides_2.setObjectName("label_guides_2")
self.gridLayout_2.addWidget(self.label_guides_2, 0, 0, 1, 1)
self.ui_guides_data2 = QtWidgets.QLineEdit(self.gridLayoutWidget_2)
self.ui_guides_data2.setMaximumSize(QtCore.QSize(16777215, 15))
font = QtGui.QFont()
font.setPointSize(9)
self.ui_guides_data2.setFont(font)
self.ui_guides_data2.setObjectName("ui_guides_data2")
self.gridLayout_2.addWidget(self.ui_guides_data2, 0, 2, 1, 1)
self.tab_device_2.addTab(self.device_2, "")
self.tab_pumps_station = QtWidgets.QTabWidget(self.SBC_2)
self.tab_pumps_station.setGeometry(QtCore.QRect(10, 370, 371, 221))
self.tab_pumps_station.setTabPosition(QtWidgets.QTabWidget.West)
self.tab_pumps_station.setTabShape(QtWidgets.QTabWidget.Triangular)
self.tab_pumps_station.setElideMode(QtCore.Qt.ElideLeft)
self.tab_pumps_station.setObjectName("tab_pumps_station")
self.device_3 = QtWidgets.QWidget()
self.device_3.setObjectName("device_3")
self.gridLayoutWidget = QtWidgets.QWidget(self.device_3)
self.gridLayoutWidget.setGeometry(QtCore.QRect(10, 10, 321, 17))
self.gridLayoutWidget.setObjectName("gridLayoutWidget")
self.gridLayout = QtWidgets.QGridLayout(self.gridLayoutWidget)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setObjectName("gridLayout")
self.ui_drain_data1 = QtWidgets.QLineEdit(self.gridLayoutWidget)
self.ui_drain_data1.setMaximumSize(QtCore.QSize(16777215, 15))
font = QtGui.QFont()
font.setPointSize(9)
self.ui_drain_data1.setFont(font)
self.ui_drain_data1.setObjectName("ui_drain_data1")
self.gridLayout.addWidget(self.ui_drain_data1, 0, 1, 1, 1)
self.DRAIN = QtWidgets.QLabel(self.gridLayoutWidget)
self.DRAIN.setMinimumSize(QtCore.QSize(0, 14))
self.DRAIN.setMaximumSize(QtCore.QSize(16777215, 14))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.DRAIN.setFont(font)
self.DRAIN.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.DRAIN.setObjectName("DRAIN")
self.gridLayout.addWidget(self.DRAIN, 0, 0, 1, 1)
self.ui_drain_data2 = QtWidgets.QLineEdit(self.gridLayoutWidget)
self.ui_drain_data2.setMaximumSize(QtCore.QSize(16777215, 15))
font = QtGui.QFont()
font.setPointSize(9)
self.ui_drain_data2.setFont(font)
self.ui_drain_data2.setObjectName("ui_drain_data2")
self.gridLayout.addWidget(self.ui_drain_data2, 0, 2, 1, 1)
self.gridLayoutWidget_3 = QtWidgets.QWidget(self.device_3)
self.gridLayoutWidget_3.setGeometry(QtCore.QRect(10, 40, 321, 173))
self.gridLayoutWidget_3.setObjectName("gridLayoutWidget_3")
self.gridLayout_3 = QtWidgets.QGridLayout(self.gridLayoutWidget_3)
self.gridLayout_3.setContentsMargins(0, 0, 0, 0)
self.gridLayout_3.setObjectName("gridLayout_3")
self.ui_wheel_data = QtWidgets.QLineEdit(self.gridLayoutWidget_3)
self.ui_wheel_data.setMaximumSize(QtCore.QSize(35, 15))
font = QtGui.QFont()
font.setPointSize(9)
self.ui_wheel_data.setFont(font)
self.ui_wheel_data.setObjectName("ui_wheel_data")
self.gridLayout_3.addWidget(self.ui_wheel_data, 4, 1, 1, 1)
self.DRAIN_6 = QtWidgets.QLabel(self.gridLayoutWidget_3)
self.DRAIN_6.setMinimumSize(QtCore.QSize(0, 14))
self.DRAIN_6.setMaximumSize(QtCore.QSize(25, 14))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.DRAIN_6.setFont(font)
self.DRAIN_6.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.DRAIN_6.setObjectName("DRAIN_6")
self.gridLayout_3.addWidget(self.DRAIN_6, 2, 2, 1, 1)
self.DRAIN_10 = QtWidgets.QLabel(self.gridLayoutWidget_3)
self.DRAIN_10.setMinimumSize(QtCore.QSize(0, 14))
self.DRAIN_10.setMaximumSize(QtCore.QSize(25, 14))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.DRAIN_10.setFont(font)
self.DRAIN_10.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.DRAIN_10.setObjectName("DRAIN_10")
self.gridLayout_3.addWidget(self.DRAIN_10, 4, 2, 1, 1)
self.ui_acid_data = QtWidgets.QLineEdit(self.gridLayoutWidget_3)
self.ui_acid_data.setMaximumSize(QtCore.QSize(35, 15))
font = QtGui.QFont()
font.setPointSize(9)
self.ui_acid_data.setFont(font)
self.ui_acid_data.setObjectName("ui_acid_data")
self.gridLayout_3.addWidget(self.ui_acid_data, 3, 1, 1, 1)
self.ui_alkali_data = QtWidgets.QLineEdit(self.gridLayoutWidget_3)
self.ui_alkali_data.setMaximumSize(QtCore.QSize(35, 15))
font = QtGui.QFont()
font.setPointSize(9)
self.ui_alkali_data.setFont(font)
self.ui_alkali_data.setObjectName("ui_alkali_data")
self.gridLayout_3.addWidget(self.ui_alkali_data, 2, 1, 1, 1)
self.DRAIN_4 = QtWidgets.QLabel(self.gridLayoutWidget_3)
self.DRAIN_4.setMinimumSize(QtCore.QSize(0, 14))
self.DRAIN_4.setMaximumSize(QtCore.QSize(25, 14))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.DRAIN_4.setFont(font)
self.DRAIN_4.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.DRAIN_4.setObjectName("DRAIN_4")
self.gridLayout_3.addWidget(self.DRAIN_4, 1, 2, 1, 1)
self.DRAIN_8 = QtWidgets.QLabel(self.gridLayoutWidget_3)
self.DRAIN_8.setMinimumSize(QtCore.QSize(0, 14))
self.DRAIN_8.setMaximumSize(QtCore.QSize(25, 14))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.DRAIN_8.setFont(font)
self.DRAIN_8.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.DRAIN_8.setObjectName("DRAIN_8")
self.gridLayout_3.addWidget(self.DRAIN_8, 3, 2, 1, 1)
self.label_chem = QtWidgets.QLabel(self.gridLayoutWidget_3)
self.label_chem.setMinimumSize(QtCore.QSize(0, 14))
self.label_chem.setMaximumSize(QtCore.QSize(40, 14))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.label_chem.setFont(font)
self.label_chem.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label_chem.setObjectName("label_chem")
self.gridLayout_3.addWidget(self.label_chem, 0, 0, 1, 1)
self.ui_wax_data = QtWidgets.QLineEdit(self.gridLayoutWidget_3)
self.ui_wax_data.setMaximumSize(QtCore.QSize(35, 15))
font = QtGui.QFont()
font.setPointSize(9)
self.ui_wax_data.setFont(font)
self.ui_wax_data.setObjectName("ui_wax_data")
self.gridLayout_3.addWidget(self.ui_wax_data, 5, 1, 1, 1)
self.label_wheel_data = QtWidgets.QLabel(self.gridLayoutWidget_3)
self.label_wheel_data.setMinimumSize(QtCore.QSize(0, 14))
self.label_wheel_data.setMaximumSize(QtCore.QSize(40, 14))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.label_wheel_data.setFont(font)
self.label_wheel_data.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label_wheel_data.setObjectName("label_wheel_data")
self.gridLayout_3.addWidget(self.label_wheel_data, 4, 0, 1, 1)
self.label_wax_data = QtWidgets.QLabel(self.gridLayoutWidget_3)
self.label_wax_data.setMinimumSize(QtCore.QSize(0, 14))
self.label_wax_data.setMaximumSize(QtCore.QSize(40, 14))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.label_wax_data.setFont(font)
self.label_wax_data.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label_wax_data.setObjectName("label_wax_data")
self.gridLayout_3.addWidget(self.label_wax_data, 5, 0, 1, 1)
self.label_acid_data = QtWidgets.QLabel(self.gridLayoutWidget_3)
self.label_acid_data.setMinimumSize(QtCore.QSize(0, 14))
self.label_acid_data.setMaximumSize(QtCore.QSize(40, 14))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.label_acid_data.setFont(font)
self.label_acid_data.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label_acid_data.setObjectName("label_acid_data")
self.gridLayout_3.addWidget(self.label_acid_data, 3, 0, 1, 1)
self.label_water_data = QtWidgets.QLabel(self.gridLayoutWidget_3)
self.label_water_data.setMinimumSize(QtCore.QSize(0, 14))
self.label_water_data.setMaximumSize(QtCore.QSize(40, 14))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.label_water_data.setFont(font)
self.label_water_data.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label_water_data.setObjectName("label_water_data")
self.gridLayout_3.addWidget(self.label_water_data, 1, 0, 1, 1)
self.label_alkali_data = QtWidgets.QLabel(self.gridLayoutWidget_3)
self.label_alkali_data.setMinimumSize(QtCore.QSize(0, 14))
self.label_alkali_data.setMaximumSize(QtCore.QSize(40, 14))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.label_alkali_data.setFont(font)
self.label_alkali_data.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label_alkali_data.setObjectName("label_alkali_data")
self.gridLayout_3.addWidget(self.label_alkali_data, 2, 0, 1, 1)
self.ui_water_data = QtWidgets.QLineEdit(self.gridLayoutWidget_3)
self.ui_water_data.setMaximumSize(QtCore.QSize(35, 15))
font = QtGui.QFont()
font.setPointSize(9)
self.ui_water_data.setFont(font)
self.ui_water_data.setObjectName("ui_water_data")
self.gridLayout_3.addWidget(self.ui_water_data, 1, 1, 1, 1)
self.DRAIN_12 = QtWidgets.QLabel(self.gridLayoutWidget_3)
self.DRAIN_12.setMinimumSize(QtCore.QSize(0, 14))
self.DRAIN_12.setMaximumSize(QtCore.QSize(25, 14))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.DRAIN_12.setFont(font)
self.DRAIN_12.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.DRAIN_12.setObjectName("DRAIN_12")
self.gridLayout_3.addWidget(self.DRAIN_12, 5, 2, 1, 1)
self.led_water = QtWidgets.QToolButton(self.gridLayoutWidget_3)
self.led_water.setMaximumSize(QtCore.QSize(150, 15))
font = QtGui.QFont()
font.setPointSize(9)
self.led_water.setFont(font)
self.led_water.setObjectName("led_water")
self.gridLayout_3.addWidget(self.led_water, 1, 3, 1, 1)
self.led_alkali = QtWidgets.QToolButton(self.gridLayoutWidget_3)
self.led_alkali.setMaximumSize(QtCore.QSize(150, 15))
font = QtGui.QFont()
font.setPointSize(9)
self.led_alkali.setFont(font)
self.led_alkali.setObjectName("led_alkali")
self.gridLayout_3.addWidget(self.led_alkali, 2, 3, 1, 1)
self.led_acid = QtWidgets.QToolButton(self.gridLayoutWidget_3)
self.led_acid.setMaximumSize(QtCore.QSize(150, 15))
font = QtGui.QFont()
font.setPointSize(9)
self.led_acid.setFont(font)
self.led_acid.setObjectName("led_acid")
self.gridLayout_3.addWidget(self.led_acid, 3, 3, 1, 1)
self.led_wheel = QtWidgets.QToolButton(self.gridLayoutWidget_3)
self.led_wheel.setMaximumSize(QtCore.QSize(150, 15))
font = QtGui.QFont()
font.setPointSize(9)
self.led_wheel.setFont(font)
self.led_wheel.setObjectName("led_wheel")
self.gridLayout_3.addWidget(self.led_wheel, 4, 3, 1, 1)
self.led_wax = QtWidgets.QToolButton(self.gridLayoutWidget_3)
self.led_wax.setMaximumSize(QtCore.QSize(150, 15))
font = QtGui.QFont()
font.setPointSize(9)
self.led_wax.setFont(font)
self.led_wax.setObjectName("led_wax")
self.gridLayout_3.addWidget(self.led_wax, 5, 3, 1, 1)
self.tab_pumps_station.addTab(self.device_3, "")
self.tab_device_3 = QtWidgets.QTabWidget(self.SBC_2)
self.tab_device_3.setGeometry(QtCore.QRect(10, 230, 371, 141))
self.tab_device_3.setTabPosition(QtWidgets.QTabWidget.West)
self.tab_device_3.setTabShape(QtWidgets.QTabWidget.Triangular)
self.tab_device_3.setElideMode(QtCore.Qt.ElideLeft)
self.tab_device_3.setObjectName("tab_device_3")
self.pumpswitch = QtWidgets.QWidget()
self.pumpswitch.setObjectName("pumpswitch")
self.btn_all_stop = QtWidgets.QCheckBox(self.pumpswitch)
self.btn_all_stop.setGeometry(QtCore.QRect(0, 60, 91, 16))
font = QtGui.QFont()
font.setPointSize(10)
self.btn_all_stop.setFont(font)
self.btn_all_stop.setObjectName("btn_all_stop")
self.btn_high_water = QtWidgets.QCheckBox(self.pumpswitch)
self.btn_high_water.setGeometry(QtCore.QRect(70, 60, 91, 16))
font = QtGui.QFont()
font.setPointSize(10)
self.btn_high_water.setFont(font)
self.btn_high_water.setObjectName("btn_high_water")
self.btn_wheel = QtWidgets.QCheckBox(self.pumpswitch)
self.btn_wheel.setGeometry(QtCore.QRect(170, 60, 71, 16))
font = QtGui.QFont()
font.setPointSize(10)
self.btn_wheel.setFont(font)
self.btn_wheel.setObjectName("btn_wheel")
self.btn_alkali = QtWidgets.QCheckBox(self.pumpswitch)
self.btn_alkali.setGeometry(QtCore.QRect(240, 60, 71, 16))
font = QtGui.QFont()
font.setPointSize(10)
self.btn_alkali.setFont(font)
self.btn_alkali.setObjectName("btn_alkali")
self.btn_acid = QtWidgets.QCheckBox(self.pumpswitch)
self.btn_acid.setGeometry(QtCore.QRect(0, 80, 71, 16))
font = QtGui.QFont()
font.setPointSize(10)
self.btn_acid.setFont(font)
self.btn_acid.setObjectName("btn_acid")
self.btn_water_wax = QtWidgets.QCheckBox(self.pumpswitch)
self.btn_water_wax.setGeometry(QtCore.QRect(70, 80, 91, 16))
font = QtGui.QFont()
font.setPointSize(10)
self.btn_water_wax.setFont(font)
self.btn_water_wax.setObjectName("btn_water_wax")
self.btn_drain = QtWidgets.QCheckBox(self.pumpswitch)
self.btn_drain.setGeometry(QtCore.QRect(170, 80, 91, 16))
font = QtGui.QFont()
font.setPointSize(10)
self.btn_drain.setFont(font)
self.btn_drain.setObjectName("btn_drain")
self.btn_water_inflow = QtWidgets.QCheckBox(self.pumpswitch)
self.btn_water_inflow.setGeometry(QtCore.QRect(240, 80, 101, 16))
font = QtGui.QFont()
font.setPointSize(10)
self.btn_water_inflow.setFont(font)
self.btn_water_inflow.setObjectName("btn_water_inflow")
self.label_pump_1 = QtWidgets.QLabel(self.pumpswitch)
self.label_pump_1.setGeometry(QtCore.QRect(0, 10, 51, 14))
self.label_pump_1.setMinimumSize(QtCore.QSize(0, 14))
self.label_pump_1.setMaximumSize(QtCore.QSize(16777215, 14))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.label_pump_1.setFont(font)
self.label_pump_1.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label_pump_1.setObjectName("label_pump_1")
self.ui_log_pump = QtWidgets.QLineEdit(self.pumpswitch)
self.ui_log_pump.setGeometry(QtCore.QRect(40, 10, 251, 15))
self.ui_log_pump.setMaximumSize(QtCore.QSize(16777215, 15))
font = QtGui.QFont()
font.setPointSize(9)
self.ui_log_pump.setFont(font)
self.ui_log_pump.setText("")
self.ui_log_pump.setObjectName("ui_log_pump")
self.led_high_water = QtWidgets.QToolButton(self.pumpswitch)
self.led_high_water.setGeometry(QtCore.QRect(40, 30, 50, 14))
self.led_high_water.setMinimumSize(QtCore.QSize(50, 0))
self.led_high_water.setMaximumSize(QtCore.QSize(55, 14))
font = QtGui.QFont()
font.setPointSize(8)
self.led_high_water.setFont(font)
self.led_high_water.setToolTip("")
self.led_high_water.setToolTipDuration(-1)
self.led_high_water.setObjectName("led_high_water")
self.led_ch_alkali = QtWidgets.QToolButton(self.pumpswitch)
self.led_ch_alkali.setGeometry(QtCore.QRect(90, 30, 50, 14))
self.led_ch_alkali.setMinimumSize(QtCore.QSize(50, 0))
self.led_ch_alkali.setMaximumSize(QtCore.QSize(55, 14))
font = QtGui.QFont()
font.setPointSize(8)
self.led_ch_alkali.setFont(font)
self.led_ch_alkali.setToolTip("")
self.led_ch_alkali.setToolTipDuration(-1)
self.led_ch_alkali.setObjectName("led_ch_alkali")
self.led_ch_acid = QtWidgets.QToolButton(self.pumpswitch)
self.led_ch_acid.setGeometry(QtCore.QRect(140, 30, 50, 14))
self.led_ch_acid.setMinimumSize(QtCore.QSize(50, 0))
self.led_ch_acid.setMaximumSize(QtCore.QSize(55, 14))
font = QtGui.QFont()
font.setPointSize(8)
self.led_ch_acid.setFont(font)
self.led_ch_acid.setToolTip("")
self.led_ch_acid.setToolTipDuration(-1)
self.led_ch_acid.setObjectName("led_ch_acid")
self.led_ch1_wheel = QtWidgets.QToolButton(self.pumpswitch)
self.led_ch1_wheel.setGeometry(QtCore.QRect(190, 30, 50, 14))
self.led_ch1_wheel.setMinimumSize(QtCore.QSize(50, 0))
self.led_ch1_wheel.setMaximumSize(QtCore.QSize(55, 14))
font = QtGui.QFont()
font.setPointSize(8)
self.led_ch1_wheel.setFont(font)
self.led_ch1_wheel.setToolTip("")
self.led_ch1_wheel.setToolTipDuration(-1)
self.led_ch1_wheel.setObjectName("led_ch1_wheel")
self.led_ch1_wax = QtWidgets.QToolButton(self.pumpswitch)
self.led_ch1_wax.setGeometry(QtCore.QRect(240, 30, 50, 14))
self.led_ch1_wax.setMinimumSize(QtCore.QSize(50, 0))
self.led_ch1_wax.setMaximumSize(QtCore.QSize(55, 14))
font = QtGui.QFont()
font.setPointSize(8)
self.led_ch1_wax.setFont(font)
self.led_ch1_wax.setToolTip("")
self.led_ch1_wax.setToolTipDuration(-1)
self.led_ch1_wax.setObjectName("led_ch1_wax")
self.label_pump_2 = QtWidgets.QLabel(self.pumpswitch)
self.label_pump_2.setGeometry(QtCore.QRect(10, 110, 51, 14))
self.label_pump_2.setMinimumSize(QtCore.QSize(0, 14))
self.label_pump_2.setMaximumSize(QtCore.QSize(16777215, 14))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.label_pump_2.setFont(font)
self.label_pump_2.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label_pump_2.setObjectName("label_pump_2")
self.ui_log_pump_countdown = QtWidgets.QLineEdit(self.pumpswitch)
self.ui_log_pump_countdown.setGeometry(QtCore.QRect(50, 110, 121, 15))
self.ui_log_pump_countdown.setMaximumSize(QtCore.QSize(16777215, 15))
font = QtGui.QFont()
font.setPointSize(9)
self.ui_log_pump_countdown.setFont(font)
self.ui_log_pump_countdown.setText("")
self.ui_log_pump_countdown.setObjectName("ui_log_pump_countdown")
self.label_pump_3 = QtWidgets.QLabel(self.pumpswitch)
self.label_pump_3.setGeometry(QtCore.QRect(190, 110, 71, 14))
self.label_pump_3.setMinimumSize(QtCore.QSize(0, 14))
self.label_pump_3.setMaximumSize(QtCore.QSize(16777215, 14))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.label_pump_3.setFont(font)
self.label_pump_3.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label_pump_3.setObjectName("label_pump_3")
self.pump_countdown_box = QtWidgets.QSpinBox(self.pumpswitch)
self.pump_countdown_box.setGeometry(QtCore.QRect(260, 110, 48, 16))
font = QtGui.QFont()
font.setPointSize(10)
self.pump_countdown_box.setFont(font)
self.pump_countdown_box.setObjectName("pump_countdown_box")
self.tab_device_3.addTab(self.pumpswitch, "")
SBC.setCentralWidget(self.SBC_2)
self.retranslateUi(SBC)
self.tab_device.setCurrentIndex(0)
self.tab_device_2.setCurrentIndex(0)
self.tab_pumps_station.setCurrentIndex(0)
self.tab_device_3.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(SBC)
def retranslateUi(self, SBC):
_translate = QtCore.QCoreApplication.translate
SBC.setWindowTitle(_translate("SBC", "SBC"))
self.label_pump_station.setText(_translate("SBC", "PUMP STATION"))
self.ip_local.setText(_translate("SBC", "LocalIP : 0.0.0.0"))
self.ip_nuc.setText(_translate("SBC", "NucIP : 0.0.0.0"))
self.led_pump_station.setText(_translate("SBC", "OFF"))
self.label_guides.setText(_translate("SBC", "GUIDES"))
self.led_guides.setText(_translate("SBC", "OFF"))
self.tab_device.setTabText(self.tab_device.indexOf(self.device), _translate("SBC", "DEVICE"))
self.label_stage_show.setText(_translate("SBC", "STAGE SHOW"))
self.label_stage_show_btn.setText(_translate("SBC", "SHOW BTN"))
self.btn_welcome.setText(_translate("SBC", "欢迎光临"))
self.btn_forward.setText(_translate("SBC", "向前行驶"))
self.btn_stop_forward.setText(_translate("SBC", "停止行驶"))
self.btn_back_driving.setText(_translate("SBC", "向后行驶"))
self.btn_washing.setText(_translate("SBC", "正在清洗"))
self.btn_washing_end.setText(_translate("SBC", "清洗结束"))
self.label_guides_2.setText(_translate("SBC", "GUIDES"))
self.tab_device_2.setTabText(self.tab_device_2.indexOf(self.device_2), _translate("SBC", "GUIDES"))
self.DRAIN.setText(_translate("SBC", "DRAIN"))
self.DRAIN_6.setText(_translate("SBC", "mm"))
self.DRAIN_10.setText(_translate("SBC", "mm"))
self.DRAIN_4.setText(_translate("SBC", "mm"))
self.DRAIN_8.setText(_translate("SBC", "mm"))
self.label_chem.setText(_translate("SBC", "LIQUID"))
self.label_wheel_data.setText(_translate("SBC", "WHEEL"))
self.label_wax_data.setText(_translate("SBC", "WAX"))
self.label_acid_data.setText(_translate("SBC", "ACID"))
self.label_water_data.setText(_translate("SBC", "WATER"))
self.label_alkali_data.setText(_translate("SBC", "ALKALI"))
self.DRAIN_12.setText(_translate("SBC", "mm"))
self.led_water.setText(_translate("SBC", "full"))
self.led_alkali.setText(_translate("SBC", "full"))
self.led_acid.setText(_translate("SBC", "full"))
self.led_wheel.setText(_translate("SBC", "full"))
self.led_wax.setText(_translate("SBC", "full"))
self.tab_pumps_station.setTabText(self.tab_pumps_station.indexOf(self.device_3), _translate("SBC", "PUMPS STATION"))
self.btn_all_stop.setText(_translate("SBC", "ALL STOP"))
self.btn_high_water.setText(_translate("SBC", "HIGH WATER"))
self.btn_wheel.setText(_translate("SBC", "WHEEL"))
self.btn_alkali.setText(_translate("SBC", "ALKALI "))
self.btn_acid.setText(_translate("SBC", "ACID"))
self.btn_water_wax.setText(_translate("SBC", "WATER WAX"))
self.btn_drain.setText(_translate("SBC", "DRAIN"))
self.btn_water_inflow.setText(_translate("SBC", "WATER INFLOW"))
self.label_pump_1.setText(_translate("SBC", "PUMP"))
self.led_high_water.setText(_translate("SBC", "P"))
self.led_ch_alkali.setText(_translate("SBC", "C1"))
self.led_ch_acid.setText(_translate("SBC", "C2"))
self.led_ch1_wheel.setText(_translate("SBC", "WE"))
self.led_ch1_wax.setText(_translate("SBC", "WX"))
self.label_pump_2.setText(_translate("SBC", "PUMP"))
self.label_pump_3.setText(_translate("SBC", "剩余延迟时间"))
self.tab_device_3.setTabText(self.tab_device_3.indexOf(self.pumpswitch), _translate("SBC", "PUMPSWITCH"))
| [
"[email protected]"
] | |
99f3dea40c103f391f5bbedf2c955812f133372f | 51253a1765ed005a8804b7bf1b6372429f94a020 | /calculate.py | 293d04cdf75a8d63db1a5b87dc0823716b7c1751 | [] | no_license | xly135846/MEGC2021 | b766d3ae295c238c305ae3f7fa0d8056f1ae9ba0 | 83bf61440aca980fb11e789dc3dfd47be78db81d | refs/heads/main | 2023-06-26T23:15:14.433211 | 2021-07-12T14:10:36 | 2021-07-12T14:10:36 | 384,604,569 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,000 | py | import numpy as np
from scipy import signal
from utils.utils import *
def cal_TP(left_count_1, label):
result = []
for inter_2 in label:
temp = 0
for inter_1 in left_count_1:
if cal_IOU(inter_1, inter_2)>=0.5:
temp += 1
result.append(temp)
return result
def spotting_evaluation(pred, express_inter, K, P):
pred = np.array(pred)
threshold = np.mean(pred)+ P*(np.max(pred)-np.mean(pred))
num_peak = signal.find_peaks(pred, height=threshold, distance=K*2)
pred_inter = []
for peak in num_peak[0]:
pred_inter.append([peak-K, peak+K])
result = cal_TP(pred_inter, express_inter)
result = np.array(result)
TP = len(np.where(result!=0)[0])
n = len(pred_inter)-(sum(result)-TP)
m = len(express_inter)
FP = n-TP
FN = m-TP
return TP, FP, FN, pred_inter
def spotting_evaluation_V2(pred_inter, express_inter):
result = cal_TP(pred_inter, express_inter)
result = np.array(result)
TP = len(np.where(result!=0)[0])
n = len(pred_inter)-(sum(result)-TP)
m = len(express_inter)
FP = n-TP
FN = m-TP
return TP, FP, FN
def cal_f1_score(TP, FP, FN):
recall = TP/(TP+FP)
precision = TP/(TP+FN)
f1_score = 2*recall*precision/(recall+precision)
return recall, precision, f1_score
def merge(alist, blist, pred_value, K):
alist_str = ""
for i in alist:
alist_str +=str(i)
split_str = str(1-pred_value)
num = max([len(i) for i in alist_str.split(split_str)])-1
for i in range(num):
i=0
while i<(len(alist)-1):
if (alist[i]==pred_value and alist[i+1]==pred_value) and abs(blist[i][1]-blist[i+1][0])<=K*2:
clist = alist[:i]+[pred_value]+alist[i+2:]
dlist = blist[:i]+[[blist[i][0],blist[i+1][1]]]+blist[i+2:]
alist, blist = clist, dlist
i+=1
return alist,blist | [
"[email protected]"
] | |
b4fd30ba3e9eec7528a1c2334b4650b1dacbdb00 | 6bd93b73213dd97e6e9529db6d7eecdb0504697d | /GoDigital_Python_Codes/command_line3.py | c24de0ae40d597420fe8c15ab992d2fd0ae6e462 | [] | no_license | patiltushar9820/Python_Code | 02e9558e63068823008645892e944894c1a31e62 | b8f1abc448ba738cab6763000f57ba3e9fc2a376 | refs/heads/main | 2023-08-16T13:35:05.532766 | 2021-09-24T10:57:49 | 2021-09-24T10:57:49 | 407,141,302 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 84 | py | #
def f(c):
return c
#>>> c=[1,2,3]
#>>> e=f(c)
#>>> e is c
#output - True | [
"[email protected]"
] | |
d699aa415671a09c0d3cb6f790fbd8d199a1e504 | 7b6377050fba4d30f00e9fb5d56dfacb22d388e1 | /numericalFunctions/ptwXY/Python/Test/UnitTesting/convolution/convolution.py | 23e1f84ea78f302c6955c15e21ec6115a7eb5cc4 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | LLNL/fudge | 0a4fe8e3a68b66d58e42d1f4d209ea3f713c6370 | 6ba80855ae47cb32c37f635d065b228fadb03412 | refs/heads/master | 2023-08-16T21:05:31.111098 | 2023-08-01T22:09:32 | 2023-08-01T22:09:32 | 203,678,373 | 21 | 4 | NOASSERTION | 2023-06-28T20:51:02 | 2019-08-21T23:22:20 | Python | UTF-8 | Python | false | false | 3,194 | py | # <<BEGIN-copyright>>
# Copyright 2022, Lawrence Livermore National Security, LLC.
# See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
# <<END-copyright>>
import os
from numericalFunctions import pointwiseXY_C
if( 'CHECKOPTIONS' in os.environ ) :
options = os.environ['CHECKOPTIONS'].split( )
if( '-e' in options ) : print( __file__ )
CPATH = '../../../../Test/UnitTesting/convolution'
os.system( 'cd %s; ./convolution -v > v' % CPATH )
f = open( os.path.join( CPATH, 'v' ) )
ls = f.readlines( )
f.close( )
line = 1
def getIntegerValue( name, ls ) :
global line
s = "# %s = " % name
n = len( s )
if( ls[0][:n] != s ) : raise Exception( '%s: line at %s does not contain %s info: "%s"' % ( __file__, line, name, ls[0][:-1] ) )
value = int( ls[0].split( '=' )[1] )
line += 1
return( ls[1:], value )
def getDoubleValue( name, ls ) :
global line
s = "# %s = " % name
n = len( s )
if( ls[0][:n] != s ) : raise Exception( '%s: line at %s does not contain %s info: "%s"' % ( __file__, line, name, ls[0][:-1] ) )
value = float( ls[0].split( '=' )[1] )
line += 1
return( ls[1:], value )
def compareValues( label, i, v1, v2 ) :
sv1, sv2 = '%.12g' % v1, '%.12g' % v2
sv1, sv2 = '%.8g' % float( sv1 ), '%.8g' % float( sv2 )
if( sv1 != sv2 ) : print( '<%s> <%s>' % ( sv1, sv2 ) )
if( sv1 != sv2 ) : raise Exception( '%s: values %s %s diff by %g at %d for label = %s' % ( __file__, v1, v2, v2 - v1, i, label ) )
def getData( ls, accuracy ) :
global line
i = 0
for l in ls :
if( l.strip( ) != '' ) : break
i = i + 1
line += i
ls = ls[i:]
ls, length = getIntegerValue( 'length', ls )
data = [ list( map( float, ls[i].split( )[:2] ) ) for i in range( length ) ]
data = pointwiseXY_C.pointwiseXY_C( data, initialSize = len( data ), overflowSize = 10, accuracy = accuracy )
line += length
return( ls[length:], data )
def getDatas( ls ) :
global line
i = 0
for l in ls :
if( l.strip( ) != '' ) : break
i = i + 1
line += i
ls = ls[i:]
if( len( ls ) == 0 ) : return( ls )
if( ls[0][:9] == '# Area = ' ) : ls = ls[1:]
if( len( ls ) == 0 ) : return( ls )
label, ls = ls[0], ls[1:]
if( label[:10] != '# label = ' ) : raise Exception( '%s: invalid label = "%s"' % ( __file__, label[:-1] ) )
line += 1
label = label.split( '=' )[1].strip( )
ls, mode = getIntegerValue( 'mode', ls )
ls, accuracy = getDoubleValue( 'accuracy', ls )
ls, self = getData( ls, accuracy )
ls, other = getData( ls, accuracy )
ls, cConvolution = getData( ls, accuracy )
convolution = self.convolute( other, mode )
if( len( convolution ) != len( cConvolution ) ) : raise Exception( '%s: len( convolution ) = %d != len( cConvolution ) = %d for label "%s"' %
( __file__, len( convolution ), len( cConvolution ), label ) )
for i , dXY in enumerate( convolution ) :
gXY = cConvolution[i]
compareValues( label, i, dXY[0], gXY[0] )
compareValues( label, i, dXY[1], gXY[1] )
return( ls )
while( len( ls ) ) : ls = getDatas( ls )
| [
"[email protected]"
] |