repo_name
stringlengths 5
100
| path
stringlengths 4
254
| copies
stringlengths 1
5
| size
stringlengths 4
7
| content
stringlengths 681
1M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,298,349B
| line_mean
float64 3.5
100
| line_max
int64 15
1k
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class | ratio
float64 1.5
8.15
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
JonW27/labob | viewer.py | 1 | 1519 | #! /usr/bin/python
import cgi
import cgitb
import urllib, cStringIO
import hmac
import hashlib
cgitb.enable()
from bs4 import BeautifulSoup
HTML_HEADER = 'Content-type: text/html\n'
HEAD = '''
<!DOCTYPE html>
<html lang="en">
<head>
<title>Papers</title>
<meta charset="UTF-8">
</head>
<body>
'''
END = '''
</body>
</html>
'''
def screenshotlayer(access_key, secret_keyword, url, args):
# encode URL
query = urllib.urlencode(dict(url=url, **args))
# generate md5 secret key
secret_key = hashlib.md5('{}{}'.format(url, secret_keyword)).hexdigest()
return "https://api.screenshotlayer.com/api/capture?access_key=%s&secret_key=%s&%s" % (access_key, query)
params = {
'fullpage': '1',
'width': '',
'viewport': '',
'format': '',
'css_url': '',
'delay': '',
'ttl': '',
'force': '',
'placeholder': '',
'user_agent': '',
'accept_lang': '',
'export': ''
};
access_key = "b2b1a6a29159797f73e852ab0e012372"
secret_keyword = "hob"
url = ''
def main():
print HTML_HEADER
print HEAD
d = urllib.urlopen('http://marge.stuy.edu/~jonathan.wong/labob/contribs')
links = []
soup = BeautifulSoup(d)
for i in soup.find_all('a', href=True):
links.append(i['href'])
for i in range(len(links)):
url = "http://marge.stuy.edu/~jonathan.wong/labob/contribs" + links[i]
print screenshotlayer(access_key, secret_keyword, url, params)
print links[i]
print END
main() | gpl-3.0 | -4,429,456,980,170,782,000 | 20.408451 | 109 | 0.593812 | false | 3.044088 | false | false | false |
largetalk/tenbagger | capital/reactor/cc/migrations/0003_auto_20180228_1145.py | 1 | 1210 | # Generated by Django 2.0.2 on 2018-02-28 03:45
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cc', '0002_auto_20180224_0348'),
]
operations = [
migrations.AddField(
model_name='creditcard',
name='lines',
field=models.PositiveIntegerField(default=0, help_text='额度'),
),
migrations.AlterField(
model_name='cashout',
name='amount',
field=models.DecimalField(decimal_places=2, help_text='金额', max_digits=11),
),
migrations.AlterField(
model_name='cashout',
name='fee',
field=models.DecimalField(decimal_places=2, help_text='手续费', max_digits=9),
),
migrations.AlterField(
model_name='cashout',
name='pos_rate',
field=models.FloatField(default=0.6, help_text='刷卡费率', verbose_name='rate'),
),
migrations.AlterField(
model_name='cashout',
name='swipe_day',
field=models.DateField(default=datetime.date.today, help_text='刷卡日'),
),
]
| mit | 3,035,395,281,549,715,000 | 29.307692 | 88 | 0.563452 | false | 3.87541 | false | false | false |
derrickorama/image_optim | image_optim/core.py | 1 | 5109 | # -*- coding: utf-8 -*-
import math
import os
import re
import subprocess
import sys
import traceback
class ImageOptim():
def __init__(self, config_path=None):
if config_path is not None:
print('load config')
# self.config_path = '"'
def get_bytes(self, number):
value = float(number[:-1])
if number.endswith('K'):
value = value * 1024
elif number.endswith('M'):
value = value * 1024 * 1024
return math.ceil(value)
def get_percent(self, number):
if number.endswith('%'):
number = number[:-1]
number = float(number)
return round(number, 2)
def split_output(self, line):
# Parse ratio
ratio_match = re.search(r'^[^\s]+\s*', line)
ratio = ratio_match.group(0).strip()
# Parse size
size_match = re.search(r'^[^\s]+\s*', line[len(ratio_match.group(0)):])
size = size_match.group(0).strip()
# Consider the rest of the line as the file name
# - this captures file names that contains spaces
filename = line[(len(size_match.group(0)) + len(ratio_match.group(0))):]
return ratio, size, filename
def interpret(self, stdout):
# Split output into lines/columns & images vs totals
images = []
output = [line.strip() for line in re.split(r'\n', stdout.decode('utf-8').strip())]
total_output = output.pop(len(output) - 1)
# Gather results for each image
for line in output:
# Zero out image results if there are no savings
if line.find('------') > -1:
ratio = '0%'
size = '0B'
filename = line[6:].strip()
else:
# Parse image results
ratio, size, filename = self.split_output(line)
# Add to list of images
images.append({
'ratioSavings': self.get_percent(ratio),
'sizeSavings': self.get_bytes(size),
'path': filename
})
# Zero out totals when there are no savings
if total_output.find('------') > -1:
total_ratio = '0%'
total_size = '0B'
else:
# Parse totals
# - Note: starting at index 6 so "Total: " doesn't go through
total_ratio, total_size, total_filename = self.split_output(total_output[6:].strip())
totals = {
# Save ratio savings in totals
'ratioSavings': round(float(total_ratio[:-1]), 4),
# Set size savings equal to the # of bytes (based on suffix)
'sizeSavings': self.get_bytes(total_size)
}
return {
'images': images,
'totals': totals
}
def run_command(self, command):
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
if proc.returncode != 0:
raise subprocess.CalledProcessError(proc.returncode, ' '.join(command), 'Captured stdout/stderr:\n%s\n%s' % (stdout.decode('utf-8'), stderr.decode('utf-8')))
return stdout, stderr
def feature_detection(self):
utils = ['pngcrush', 'jpegoptim', 'gifsicle', 'jpegtran', 'pngout', 'advpng', 'optipng', 'pngquant', 'jhead', 'svgo']
disabled_utils = []
# Try getting the help docs for each utility
for util in utils:
try:
stdout, stderr = self.run_command([util, '-h'])
except FileNotFoundError:
# If a FileNotFoundError error is thrown, the utility is not available
disabled_utils.append('--no-%s' % util)
except subprocess.CalledProcessError:
pass # who cares
return disabled_utils
def optimize(self, path, exclude=None, callback=None):
command = ['image_optim', path]
# Recursively optimize images if a directory is given
if os.path.isdir(path):
command.append('--recursive')
# Exclude paths as defined by "exclude" glob
if exclude is not None:
command.append('--exclude')
command.append(exclude)
# Determine which optimization utilities are available
command += self.feature_detection()
# Run image_optim
try:
stdout, stderr = self.run_command(command)
except subprocess.CalledProcessError as e:
raise e
# If nothing comes through the stdout/stderr, nothing was optimized
if stdout == b'' and stderr == b'':
raise NoImagesOptimizedError(path)
# Convert result to JSON
results = self.interpret(stdout)
if callback is not None:
return callback(results)
else:
return results
class NoImagesOptimizedError(Exception):
def __init__(self, path):
self.path = path
def __str__(self):
return 'No images were optimized at the given path: %s' % os.path.abspath(self.path)
| mit | -6,236,471,516,162,944,000 | 30.93125 | 169 | 0.558818 | false | 4.215347 | false | false | false |
RNAcentral/rnacentral-webcode | rnacentral/portal/models/sequence_regions.py | 1 | 1612 | """
Copyright [2009-2018] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.db import models
from django.contrib.postgres.fields import ArrayField
from portal.models import EnsemblAssembly, RnaPrecomputed
class SequenceRegion(models.Model):
id = models.AutoField(primary_key=True)
urs_taxid = models.ForeignKey(
RnaPrecomputed,
related_name='regions',
db_column='urs_taxid',
to_field='id',
on_delete=models.CASCADE
)
region_name = models.TextField()
chromosome = models.TextField()
strand = models.IntegerField()
region_start = models.IntegerField()
region_stop = models.IntegerField()
assembly = models.ForeignKey(
EnsemblAssembly,
related_name='regions',
db_column='assembly_id',
to_field='assembly_id',
on_delete=models.CASCADE
)
was_mapped = models.BooleanField()
identity = models.IntegerField()
providing_databases = ArrayField(models.TextField())
exon_count = models.IntegerField()
class Meta:
db_table = 'rnc_sequence_regions'
| apache-2.0 | 1,916,004,264,865,602,600 | 32.583333 | 72 | 0.715881 | false | 4.050251 | false | false | false |
tobecontinued/onedrive-e | onedrivee/common/utils.py | 1 | 1602 | import os
import pkgutil
from pwd import getpwnam, getpwuid
def pretty_print_bytes(size, precision=2):
suffixes = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
index = 0
while size > 1024:
index += 1 # increment the index of the suffix
size /= 1024.0 # apply the division
return "%.*f %s" % (precision, size, suffixes[index])
def get_current_os_user():
"""
Find the real user who runs the current process. Return a tuple of uid, username, homedir.
:rtype: (int, str, str, int)
"""
user_name = os.getenv('SUDO_USER')
if not user_name:
user_name = os.getenv('USER')
if user_name:
pw = getpwnam(user_name)
user_uid = pw.pw_uid
else:
# If cannot find the user, use ruid instead.
user_uid = os.getresuid()[0]
pw = getpwuid(user_uid)
user_name = pw.pw_name
user_gid = pw.pw_gid
user_home = pw.pw_dir
return user_uid, user_name, user_home, user_gid
OS_USER_ID, OS_USER_NAME, OS_USER_HOME, OS_USER_GID = get_current_os_user()
OS_HOSTNAME = os.uname()[1]
def get_content(file_name, pkg_name='onedrivee', is_text=True):
"""
Read a resource file in data/.
:param str file_name:
:param str pkg_name:
:param True | False is_text: True to indicate the text is UTF-8 encoded.
:return str | bytes: Content of the file.
"""
content = pkgutil.get_data(pkg_name, 'store/' + file_name)
if is_text:
content = content.decode('utf-8')
return content
def mkdir(path):
os.makedirs(path, mode=0o700)
os.chown(path, OS_USER_ID, OS_USER_GID)
| gpl-3.0 | -919,203,654,911,786,500 | 28.666667 | 94 | 0.611111 | false | 3.11068 | false | false | false |
preshing/junction | samples/MapMemoryBench/RenderGraphs.py | 1 | 7125 | #!/usr/bin/env python
import os
import cairo
import math
import glob
#---------------------------------------------------
# Cairo drawing helpers
#---------------------------------------------------
def createScaledFont(family, size, slant=cairo.FONT_SLANT_NORMAL, weight=cairo.FONT_WEIGHT_NORMAL):
""" Simple helper function to create a cairo ScaledFont. """
face = cairo.ToyFontFace(family, slant, weight)
DEFAULT_FONT_OPTIONS = cairo.FontOptions()
DEFAULT_FONT_OPTIONS.set_antialias(cairo.ANTIALIAS_SUBPIXEL)
return cairo.ScaledFont(face, cairo.Matrix(xx=size, yy=size), cairo.Matrix(), DEFAULT_FONT_OPTIONS)
def fillAlignedText(cr, x, y, scaledFont, text, alignment = 0):
""" Draw some aligned text at the specified co-ordinates.
alignment = 0: left-justify
alignment = 0.5: center
alignment = 1: right-justify """
ascent, descent = scaledFont.extents()[:2]
x_bearing, y_bearing, width, height = scaledFont.text_extents(text)[:4]
with Saved(cr):
cr.set_scaled_font(scaledFont)
cr.move_to(math.floor(x + 0.5 - width * alignment), math.floor(y + 0.5))
cr.text_path(text)
cr.fill()
class Saved():
""" Preserve cairo state inside the scope of a with statement. """
def __init__(self, cr):
self.cr = cr
def __enter__(self):
self.cr.save()
return self.cr
def __exit__(self, type, value, traceback):
self.cr.restore()
#---------------------------------------------------
# AxisAttribs
#---------------------------------------------------
class AxisAttribs:
""" Describes one axis on the graph. Can be linear or logarithmic. """
def __init__(self, size, min, max, step, logarithmic = False, labeler = lambda x: str(int(x + 0.5))):
self.size = float(size)
self.logarithmic = logarithmic
self.labeler = labeler
self.toAxis = lambda x: math.log(x) if logarithmic else float(x)
self.fromAxis = lambda x: math.exp(x) if logarithmic else float(x)
self.min = self.toAxis(min)
self.max = self.toAxis(max)
self.step = self.toAxis(step)
def mapAxisValue(self, x):
""" Maps x to a point along the axis.
x should already have been filtered through self.toAxis(), especially if logarithmic. """
return (x - self.min) / (self.max - self.min) * self.size
def iterLabels(self):
""" Helper to iterate through all the tick marks along the axis. """
lo = int(math.floor(self.min / self.step + 1 - 1e-9))
hi = int(math.floor(self.max / self.step + 1e-9))
for i in xrange(lo, hi + 1):
value = i * self.step
if self.min == 0 and i == 0:
continue
yield self.mapAxisValue(value), self.labeler(self.fromAxis(value))
#---------------------------------------------------
# Graph
#---------------------------------------------------
class Curve:
def __init__(self, name, points, color):
self.name = name
self.points = points
self.color = color
class Graph:
""" Renders a graph. """
def __init__(self, xAttribs, yAttribs):
self.xAttribs = xAttribs
self.yAttribs = yAttribs
self.curves = []
def addCurve(self, curve):
self.curves.append(curve)
def renderTo(self, fileName):
xAttribs = self.xAttribs
yAttribs = self.yAttribs
# Create the image surface and cairo context
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, 140 + int(xAttribs.size + 0.5), 65 + int(yAttribs.size + 0.5))
cr = cairo.Context(surface)
cr.set_source_rgb(1, 1, 1)
cr.paint()
cr.set_miter_limit(1.414)
cr.translate(58, 11 + yAttribs.size)
# Draw axes
labelFont = createScaledFont('Arial', 11)
with Saved(cr):
cr.set_line_width(1)
cr.set_source_rgb(.4, .4, .4)
# Horizontal axis
cr.move_to(0, -0.5)
cr.rel_line_to(xAttribs.size + 1, 0)
for pos, label in xAttribs.iterLabels(): # Tick marks
x = math.floor(pos + 0.5) + 0.5
cr.move_to(x, -1)
cr.rel_line_to(0, 4)
cr.stroke()
for pos, label in xAttribs.iterLabels(): # Labels
x = math.floor(pos + 0.5)
with Saved(cr):
cr.translate(x - 1, 5)
cr.rotate(-math.pi / 4)
fillAlignedText(cr, 0, 6, labelFont, label, 1)
# Vertical axis
cr.move_to(0.5, 0)
cr.rel_line_to(0, -yAttribs.size - 0.5)
for pos, label in yAttribs.iterLabels(): # Tick marks
if label == '0':
continue
y = -math.floor(pos + 0.5) - 0.5
cr.move_to(1, y)
cr.rel_line_to(-4, 0)
cr.stroke()
for pos, label in yAttribs.iterLabels(): # Labels
if label == '0':
continue
fillAlignedText(cr, -4, -pos + 4, labelFont, label, 1)
# Draw curves
for curve in self.curves:
points = curve.points
width = 2.5
color = curve.color
with Saved(cr):
cr.set_line_width(width)
cr.set_source_rgba(*color)
with Saved(cr):
cr.rectangle(0, 5, xAttribs.size, -yAttribs.size - 15)
cr.clip()
cr.move_to(xAttribs.mapAxisValue(points[0][0]), -yAttribs.mapAxisValue(points[0][1]))
for x, y, yHi in points[1:]:
cr.line_to(xAttribs.mapAxisValue(x) + 0.5, -yAttribs.mapAxisValue(y) - 0.5)
cr.stroke()
# Label
labelFont = createScaledFont('Arial', 11)
label = curve.name
x, y, yHi = points[-1]
fillAlignedText(cr, xAttribs.mapAxisValue(x) + 3, -yAttribs.mapAxisValue(y) + 4, labelFont, label, 0)
# Draw axis names
cr.set_source_rgb(0, 0, 0)
axisFont = createScaledFont('Helvetica', 14, weight=cairo.FONT_WEIGHT_BOLD)
with Saved(cr):
cr.translate(-47, -yAttribs.size / 2.0)
cr.rotate(-math.pi / 2)
fillAlignedText(cr, 0, 0, axisFont, "Bytes In Use", 0.5)
fillAlignedText(cr, xAttribs.size / 2.0, 50, axisFont, "Population", 0.5)
# Save PNG file
surface.write_to_png(fileName)
#---------------------------------------------------
# main
#---------------------------------------------------
graph = Graph(AxisAttribs(600, 0, 1000000, 200000), AxisAttribs(320, 0, 50000000, 10000000))
COLORS = [
(1, 0, 0),
(1, 0.5, 0),
(0.5, 0.5, 0),
(0, 1, 0),
(0, 0.5, 1),
(0, 0, 1),
(1, 0, 1)
]
for i, fn in enumerate(glob.glob('build*/results.txt')):
points = eval(open(fn, 'r').read())
graph.addCurve(Curve(os.path.split(fn)[0], points, COLORS[i % len(COLORS)]))
graph.renderTo('out.png')
| bsd-2-clause | -3,046,255,733,022,424,000 | 35.917098 | 120 | 0.518035 | false | 3.485812 | false | false | false |
uchicago-voth/cgmap | test/molecular_map_test/single_protein_explicit_mapping/test_single_protein_explicit_mapping.py | 1 | 3175 | #!/usr/bin/env python2
import sys
sys.path.append('../../../src/')
import cgmap as cg
import mdtraj as md
import md_check as check
############################### config #####################################
input_traj = "protein.trr"
input_top = "protein.pdb"
output_traj = "protein.trr"
output_top = "protein.pdb"
reference_traj = "protein.trr"
reference_top = "protein.pdb"
output_dir ='./output/'
input_dir ='./input/'
reference_dir ='./reference/'
############################### run ########################################
### pull in trajectories
trj = md.load(input_dir + input_traj,top=input_dir + input_top)
### define mapping based on knowledge of topology
### in this instance, map every residue into a single site
for a in trj.top.atoms: a.mass = a.element.mass
for a in trj.top.atoms: a.charge = 0
# first residue is SER148 (zero index'd)
name_lists = []
label_lists = []
molecule_types = []
resREF = 148
istart = 0
iend = 0
iname = "SER"
molnum = 0
maxSize = len(list(trj.top.atoms))
stopFlag = False
tempMol = []
tempCGL = []
name_lists_key = []
for i, a in enumerate(trj.top.atoms) :
resNAME = str(a.residue)[0:3]
resNUM = int(str(a.residue)[3:6])
aINDEX = a.index
if resNAME not in name_lists_key :
name_lists_key.append(resNAME)
if (resNUM != resREF) :
#first append name_lists and label
iend = aINDEX - 1
tempMol.append("index %d to %d" % (istart, iend))
tempCGL.append(iname)
#then update things for next residue
iname = resNAME
istart = aINDEX
if resNUM < resREF :
#stopFlag = True
molecule_types.append(int(molnum))
name_lists.append(tempMol)
label_lists.append(tempCGL)
tempMol = []
tempCGL = []
molnum += 1
resREF = resNUM
# special case if last item
if (i == (maxSize-1)) :
iend = aINDEX
tempMol.append("index %d to %d" % (istart, iend))
tempCGL.append(iname)
molecule_types.append(int(molnum))
name_lists.append(tempMol)
label_lists.append(tempCGL)
#actual map command
print name_lists
print label_lists
print molecule_types
print "Lengths of all three lists should be equivalent: %d = %d = %d" % (len(name_lists), len(label_lists), len(molecule_types))
cg_trj = cg.map_unfiltered_molecules( trj = trj,
selection_list = name_lists,
bead_label_list = label_lists,
molecule_types = molecule_types,
mapping_function = "com")
cg_trj.save(output_dir + output_traj)
cg_trj[0].save(output_dir + output_top)
############################### check results ###############################
# reloading results from disk.
cg_traj = cg_trj.load(output_dir + output_traj,top=output_dir + output_top)
ref_cg_traj = cg_trj.load(reference_dir + reference_traj,
top=reference_dir + reference_top)
result=check.md_content_equality(cg_traj,ref_cg_traj)
sys.exit(check.check_result_to_exitval(result))
| apache-2.0 | 3,298,537,481,466,733,600 | 27.348214 | 128 | 0.566299 | false | 3.328092 | false | false | false |
hal0x2328/neo-python | neo/Core/State/ValidatorState.py | 1 | 2648 | from .StateBase import StateBase
from neo.Core.IO.BinaryReader import BinaryReader
from neo.Core.IO.BinaryWriter import BinaryWriter
from neo.IO.MemoryStream import StreamManager
from neo.Core.Cryptography.ECCurve import EllipticCurve, ECDSA
from neo.Core.Size import Size as s
from neo.Core.Size import GetVarSize
from neo.Core.Fixed8 import Fixed8
class ValidatorState(StateBase):
def __init__(self, pub_key=None):
"""
Create an instance.
Args:
pub_key (EllipticCurve.ECPoint):
Raises:
Exception: if `pub_key` is not a valid ECPoint.
"""
if pub_key is not None and type(pub_key) is not EllipticCurve.ECPoint:
raise Exception("Pubkey must be ECPoint Instance")
self.PublicKey = pub_key
self.Registered = False
self.Votes = Fixed8.Zero()
def Size(self):
"""
Get the total size in bytes of the object.
Returns:
int: size.
"""
return super(ValidatorState, self).Size() + self.PublicKey.Size() + s.uint8 + self.Votes.Size()
def Deserialize(self, reader: BinaryReader):
"""
Deserialize full object.
Args:
reader (neo.Core.IO.BinaryReader):
"""
super(ValidatorState, self).Deserialize(reader)
self.PublicKey = ECDSA.Deserialize_Secp256r1(reader)
self.Registered = reader.ReadBool()
self.Votes = reader.ReadFixed8()
@staticmethod
def DeserializeFromDB(buffer):
"""
Deserialize full object.
Args:
buffer (bytes, bytearray, BytesIO): (Optional) data to create the stream from.
Returns:
ValidatorState:
"""
m = StreamManager.GetStream(buffer)
reader = BinaryReader(m)
v = ValidatorState()
v.Deserialize(reader)
StreamManager.ReleaseStream(m)
return v
def Serialize(self, writer: BinaryWriter):
"""
Serialize full object.
Args:
writer (neo.IO.BinaryWriter):
"""
super(ValidatorState, self).Serialize(writer)
self.PublicKey.Serialize(writer)
writer.WriteBool(self.Registered)
writer.WriteFixed8(self.Votes)
def ToJson(self):
"""
Convert object members to a dictionary that can be parsed as JSON.
Returns:
dict:
"""
return {
'pubkey': self.PublicKey.ToString()
}
def Clone(self):
vs = ValidatorState(self.PublicKey)
vs.Registered = self.Registered
vs.Votes = self.Votes
return vs
| mit | -6,787,392,062,032,740,000 | 26.298969 | 103 | 0.603852 | false | 4.118196 | false | false | false |
HuberTRoy/MusicPlayer | MusicPlayer/apis/qqApi.py | 1 | 8394 | # coding = utf-8
import json
import logging
import urllib.parse
from apiRequestsBase import HttpRequest, ignored
logger = logging.getLogger(__name__)
class QQApi(HttpRequest):
default_timeout = 3.05
def __init__(self):
super(QQApi, self).__init__()
self.headers['Host'] = 'c.y.qq.com'
self.headers['Referer'] = 'https://y.qq.com/portal/playlist.html'
self.playlistHeaders = self.headers.copy()
self.playlistHeaders['Host'] = 'shc.y.qq.com'
self.tokenHeaders = self.headers.copy()
self.tokenHeaders['Host'] = 'base.music.qq.com'
self.tokenHeaders.pop('Referer')
self.token = self._get_qqtoken()
self.key = self.token.get('key')
self.sip = self.token.get('sip')[0]
# 随便写一个就可以,原本是根据cookies里一个根据时间变化的参数确定的。
self.guid = 3768717388
if not self.sip:
logger.info("获取QQToken失败。当前key: {0}, 当前sip: {1}".format(
self.key, self.sip))
print('QQ 播放地址获取失败,请勿播放QQ音乐。')
def httpRequest(self, *args, **kwargs):
html = super(QQApi, self).httpRequest(*args, **kwargs)
logger.info("进行QQ Url请求, args: {0}, kwargs: {1}".format(args, kwargs))
with ignored():
return html.text
logger.info("url: {0} 请求失败. Header: {1}".format(
args[0], kwargs.get('headers')))
return ''
def _get_qqtoken(self):
"""
更新后的不需要再获取token, sip变为固定URL:
http://dl.stream.qqmusic.qq.com/
"""
# token_url = 'http://base.music.qq.com/fcgi-bin/fcg_musicexpress.fcg?' + \
# 'json=3&guid=3768717388&g_tk=938407465&loginUin=0&hostUin=0&' + \
# 'format=jsonp&inCharset=GB2312&outCharset=GB2312¬ice=0&' + \
# 'platform=yqq&jsonpCallback=jsonCallback&needNewCode=0'
# data = self.httpRequest(token_url, method='GET',
# headers=self.tokenHeaders)
# with ignored():
# data = data[len("jsonCallback("):-2]
# return json.loads(data)
return {'key': '1', 'sip': ['http://dl.stream.qqmusic.qq.com/']}
def _getImgUrl(self, mid):
imgUrl = 'https://y.gtimg.cn/music/photo_new/'
return imgUrl + 'T002R300x300M000' + mid + '.jpg'
def _getSongUrl(self, mid):
vkey = self._getSongUrlVkey(mid)
if not vkey:
vkey = '000'
return '{0}C400{1}.m4a?vkey={2}&guid={3}&uin=0&fromtag=66'.format(self.sip, mid, vkey, self.guid)
def _getSongUrlVkey(self, mid):
# 获取得到QQ音乐歌曲地址所需的vkey。
# 返回的是vkey。
vkey_url = 'https://c.y.qq.com/base/fcgi-bin/fcg_music_express_mobile3.fcg'
params = {
'g_tk': '5381',
'jsonpCallback': 'MusicJsonCallback8571665793949388',
'loginUin': '0',
'hostUin': '0',
'format': 'json',
'inCharset': 'utf8',
'outCharset': 'utf-8',
'notice': '0',
'platform': 'yqq',
'needNewCode': '0',
'cid': '205361747',
'callback': 'MusicJsonCallback8571665793949388',
'uin': '0',
'songmid': mid,
'filename': 'C400' + mid + '.m4a',
'guid': '{}'.format(self.guid)
}
response = self.httpRequest(vkey_url, method="GET", headers=self.headers, params=params)
with ignored():
data = json.loads(response[response.find("{"):-1])
return data['data']['items'][0]['vkey']
return False
def _fromSongUrlGetSongMid(self, songUrl):
# 根据一个完整的歌曲Url,获取出它的mid。
# 'http://dl.stream.qqmusic.qq.com/
# C400 0000ASDASD.m4a
# ?vkey=' + vkey + '&guid=7133372870&uin=0&fromtag=66'
songUrl = songUrl.split("?")[0]
return songUrl[songUrl.find('C400')+4:-4]
def getSongUrl(self, songUrl):
# songUrl格式:
# 1. 歌曲mid: 000xkbLI2QEKE9 这样的形式。
# 2. 完整URL,上次授权后得到的URL,但已过期需要重新获取。
mid = songUrl
if 'http' in songUrl:
mid = self._fromSongUrlGetSongMid(songUrl)
return self._getSongUrl(mid)
def playList(self, ein=29):
"""
ein控制返回的歌单。
29, 59, 89....
"""
url = 'https://c.y.qq.com/splcloud/fcgi-bin/' +\
'fcg_get_diss_by_tag.fcg?rnd=0.5136307078685405&g_tk=5381&' +\
'jsonpCallback=getPlaylist&loginUin=0&hostUin=0&format=jsonp&inCharset=utf8' +\
'&outCharset=utf-8¬ice=0&platform=yqq&needNewCode=0&categoryId=10000000&' +\
'sortId=5&sin=30&ein={0}'.format(ein)
response = self.httpRequest(url, method='GET', headers=self.headers)
with ignored():
data = json.loads(response[len('getPlaylist('):-1])
return data['data']['list']
return False
def getPlaylist(self, ids):
url = 'https://shc.y.qq.com/qzone/fcg-bin/fcg_ucc_getcdinfo_byids_cp.fcg?type=1&json=1&utf8=1&onlysong=0' +\
'&disstid={0}&format=jsonp&g_tk=5381&jsonpCallback=playlistinfoCallback&loginUin=0&hostUin=0&'.format(ids) +\
'format=jsonp&inCharset=utf8&outCharset=utf-8¬ice=0&platform=yqq&needNewCode=0'
response = self.httpRequest(
url, method='GET', headers=self.playlistHeaders)
with ignored():
data = json.loads(response[len('playlistinfoCallback('):-len(')')])
data = data['cdlist'][0]
newDatas = {}
newDatas['trackCount'] = data['total_song_num']
newDatas['name'] = data['dissname']
newDatas['creator'] = {'nickname': data['nick']}
newDatas['description'] = data['desc']
songs = data['songlist']
# imgUrl = 'https://y.gtimg.cn/music/photo_new/'
for i in songs:
i['name'] = i['songname']
i['artists'] = [
{'name': ';'.join([x['name'] for x in i['singer']])}]
i['duration'] = int(i['interval']) * 1000
# i['album'] = {'blurPicUrl': imgUrl + 'T002R300x300M000' + i['albummid'] + '.jpg'}
i['album'] = {'blurPicUrl': self._getImgUrl(i['albummid'])}
# i['mp3Url'] = '{0}C400{1}.m4a?vkey={2}&guid={3}'.format(self.sip, i['songmid'], self.key, self.guid)
i['mp3Url'] = self._getSongUrl(i['songmid'])
i['lyric'] = 'qq'
newDatas['tracks'] = songs
return newDatas
return False
def search(self, key):
url = 'https://c.y.qq.com/soso/fcgi-bin/client_search_cp?ct=24&qqmusic_ver=1298&' +\
'new_json=1&remoteplace=txt.yqq.center&searchid=43541888870417375&t=0&aggr=1' +\
'&cr=1&catZhida=1&lossless=0&flag_qc=0&p=1&n=50&' +\
'w={0}'.format(urllib.parse.quote(key)) +\
'&g_tk=5381&jsonpCallback=searchCallbacksong6064&loginUin=0&hostUin=0&' +\
'format=jsonp&inCharset=utf8&outCharset=utf-8¬ice=0&platform=yqq&needNewCode=0'
response = self.httpRequest(url, method='GET')
with ignored():
data = json.loads(
response[len('searchCallbacksong6064('):-1])
data = data['data']['song']
newDatas = {}
newDatas['songCount'] = data['curnum'] - 1
songs = []
for i in data['list']:
songs.append({'name': i['name'],
'ar': [{'name': ';'.join([x['name'] for x in i['singer']])}],
'al': {'picUrl': self._getImgUrl(i['album']['mid'])},
'dt': i['interval'] * 1000,
'id': i['id'],
# 当然这里不是mp3,为了统一接口这样写。
'mp3Url': i['mid'],
'lyric': 'qq'
})
newDatas['songs'] = songs
return newDatas
return False
qqApi = QQApi()
if __name__ == '__main__':
help(qqApi)
| mit | 7,373,061,869,558,684,000 | 33.566524 | 121 | 0.527688 | false | 2.988497 | false | false | false |
wonghoifung/learning-python | spider/extract_all_links_by_tag.py | 1 | 1254 | #-*-coding:utf8-*-
import sys
import urllib
import urllib2
import urlparse
import re
from lxml import etree
def tag_link(tag, startpage):
return 'http://www.douban.com/tag/' + tag + '/movie?start=' + str(startpage)
def trim_link(url):
mre=re.match('^https?://movie.douban.com/subject/([^/]*)',url,re.IGNORECASE)
if not mre:
print 'url:' + url + ' is not valid...'
return ''
url = mre.group(0)
return url
class my_urlopener(urllib.FancyURLopener):
version = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36'
def process():
startpage = 0
links = []
badcount = 0
while True:
url = tag_link('日本', startpage)
startpage += 15
opener = my_urlopener()
page = opener.open(url)
text = page.read()
page.close()
selector = etree.HTML(text)
movielist = selector.xpath('//*[@id="content"]/div/div[@class="article"]/div[@class="mod movie-list"]/dl')
if len(movielist) == 0:
break
for movie in movielist:
movielink = movie.xpath('dd/a/@href')[0]
link = trim_link(movielink)
if len(link) > 0:
links.append(link)
print link
else:
badcount += 1
print len(links)
print badcount
def main():
process()
if __name__ == "__main__":
main()
| mit | 8,063,614,123,801,121,000 | 22.148148 | 122 | 0.6504 | false | 2.648305 | false | false | false |
Afanc/parazite | src/trade_off.py | 1 | 2872 | # -*- coding: utf-8 -*-
from parazite1 import *
from random import uniform, sample
import matplotlib.pyplot as plt
from CONSTANTES import *
from CHANGING_CONST import *
def trade_off(para_i = None, effect_arg = None):
if effect_arg != None:
new_vir = 0
new_recov = 0
new_transmission = 0
effect = effect_arg
new_vir = (effect**2)/100
new_transmission = 1/(1+exp(-(effect/1.1-5)))
new_recov = 0.1 + 1/effect
if new_recov > 1:
new_recov = 1
return [new_vir,new_transmission,new_recov]
if isinstance(para_i, Parazite):
new_vir = 0
new_recov = 0
new_transmission = 0
effect = (para_i.getVir()*100)**0.5
effect += uniform(-2,2)
compteur = 0
while effect > 10 or effect <0 and compteur < 3:
effect = (para_i.getVir()*100)**0.5
effect += uniform(-2,2)
compteur += 1
if effect > 10 or effect <0:
effect = (para_i.getVir()*100)**0.5
new_vir = (effect**2)/100
new_transmission = 1/(1+exp(-(effect/1.1-5)))
new_recov = 0.1 + 1/effect
if new_recov > 1:
new_recov = 1
para_i.setVir(new_vir)
para_i.setTransmRate(new_transmission)
para_i.setRecovProb(new_recov)
else :
new_vir = 0
new_recov = 0
new_transmission = 0
effect = uniform(0,10)
new_vir = (effect**2)/100
new_transmission = 1/(1+exp(-(effect/1.1-5)))
new_recov = 0.1 + 1/effect
if new_recov > 1:
new_recov = 1
return [new_vir,new_transmission,new_recov]
"""
print trade_off(effect_arg = 2.6)
y = []
R0 = 0
for i in arange(0.1,10.0,0.1):
x.append(i)
effect = i
new_vir = (effect**2)/100
new_transmission = 1/(1+exp(-(effect/1.1-5)))
new_recov = 0.1 + 1/effect
if new_recov > 1:
new_recov = 1
R0num = (200 * (1+new_transmission)*0.4)
R0den = ((1+new_vir)*DYING_PROB + (1+new_recov)*BASE_CHANCE_OF_HEALING)
R0 = R0num/R0den
y1.append(new_vir)
y2.append(new_transmission)
y3.append(new_recov)
y4.append(R0)
plt.plot(x,y1, label = 'Virulance')
plt.plot(x,y2, label = 'Transmission')
plt.plot(x,y3, label = 'Guerison')
#plt.plot(x,y4, label = 'infections secondaires',)
plt.xlabel('Charge parasitaire')
#plt.ylabel('Infections secondaires')
plt.legend(loc='best')
plt.show()
#print trade_off(effect_arg = 7.0) #correspond à une virulance de 0.49
'''
new_vir = []
for effect in range(0,10,1):
new_vir.append(1/(1+exp(-(effect/1.1-5))))
plt.scatter(range(0,10,1 ),new_vir)
plt.show()
new_recov = 1- (effect**2)/150
test = Parazite(0.7, 0.1, 0, 'ID23')
trade_off(test)
print test.getVir()
print test.getTransmRate()
print test.getRecovProb()
'''
"""
| gpl-2.0 | 816,983,121,264,258,700 | 25.33945 | 75 | 0.563915 | false | 2.744742 | false | false | false |
broomyocymru/ditto | ditto/core/confluence_client.py | 1 | 3394 | import json
import urllib
import requests
class ConfluenceClient:
def __init__(self, url, username, password):
self.base_url = url
self.username = username
self.password = password
self.http_headers = {'Accept': 'application/json', 'Content-type': 'application/json'}
def page(self, page_id):
query = {'expand': 'version'}
url = self.base_url + '/rest/api/content/' + page_id + '?' + urllib.urlencode(query)
response = requests.get(url, auth=(self.username, self.password), headers=self.http_headers)
return self.error_check(page_id, response)
def get_page_id(self, space, title):
try:
url = self.base_url + '/rest/api/content/?title=' + title + '&spaceKey=' + space
response = requests.get(url, auth=(self.username, self.password), headers=self.http_headers)
obj = self.error_check(title, response)
return obj["results"][0]["id"]
except requests.exceptions.RequestException:
return None
def new_child_page(self, parent_page_id, space, title, content):
data = json.dumps({
'type': 'page',
'title': title,
'ancestors': [{"id": parent_page_id}],
'space': {"key": space},
'body': {
'storage': {
'value': content,
'representation': 'storage'
}
}
})
page_id = self.get_page_id(space, title)
if page_id is not None:
page = self.page(page_id)
data = json.dumps({
'id': page_id,
'type': 'page',
'title': title,
'version': {'number': page['version']['number'] + 1},
'space': {'key': space},
'body': {
'storage': {
'value': content,
'representation': 'storage'
}
}
})
url = self.base_url + '/rest/api/content/' + page_id
response = requests.put(url, auth=(self.username, self.password), headers=self.http_headers, data=data)
return self.error_check(page_id, response)
else:
url = self.base_url + '/rest/api/content'
response = requests.post(url, auth=(self.username, self.password), headers=self.http_headers, data=data)
return self.error_check(page_id, response)
def save_content(self, page_id, version, title, content):
data = json.dumps({
'type': 'page',
'title': title,
'version': {
'number': version
},
'body': {
'storage': {
'value': content,
'representation': 'storage'
}
}
})
url = self.base_url + '/rest/api/content/' + page_id
response = requests.put(url, auth=(self.username, self.password), headers=self.http_headers, data=data)
return self.error_check(page_id, response)
@staticmethod
def error_check(prefix, response):
response.raise_for_status()
obj = response.json()
if 'errorMessages' in obj:
raise ValueError(prefix + ': ' + ','.join(obj['errorMessages']))
return obj
| mit | 1,831,143,497,517,647,400 | 34.354167 | 116 | 0.508544 | false | 4.184957 | false | false | false |
hvdieren/asap_operators | forth/clusterClassif.py | 1 | 4351 | Skip to content
Personal Open source Business Explore
Sign upSign inPricingBlogSupport
This repository
Search
Watch 2 Star 0 Fork 1 project-asap/telecom-analytics
Code Issues 0 Pull requests 0 Projects 0 Pulse Graphs
Branch: current Find file Copy pathtelecom-analytics/src/python/sociometer/stereo_type_classification.py
1f5e90a on Dec 9, 2016
@papagian papagian Fix stereotype classification
1 contributor
RawBlameHistory
108 lines (87 sloc) 3.8 KB
#
# Copyright 2015-2016 WIND,FORTH
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""Stereo Type Classification Module.
Given a set of user profiles for specific region and weeks of year and a set
of labeled calling behaviors, it returns the percentage of each label
on each spatial region.
E.g.:
Region 1, resident, 75%
Region 1, commuter, 20%
Usage:
$SPARK_HOME/bin/spark-submit sociometer/stereo_type_classification.py <profiles> <centroids>
Args:
profiles: The user profiles location. The expected location is expected to match
the following pattern:
/profiles/<region>-<start_week>-<end_week> where start_week and
end_week have the following format: <ISO_year>_<ISO_week>
centroids: The calling behavior dataset location. The expected location is expected to match
the following pattern:
/centroids/<region>-<start_week>-<end_week> where start_week and
end_week have the following format: <ISO_year>_<ISO_week>
Example:
$SPARK_HOME/bin/spark-submit sociometer/stereo_type_classification.py /profiles/aree_roma-2015_53-2016_3 \
/centroids/aree_roma-2015_53-2016_3
Results are stored into a local file: sociometer-<region>-<start_week>-<end_week>.
"""
from pyspark import SparkContext
from pyspark.mllib.clustering import KMeansModel
import re
import sys
def user_type(profile, model, centroids):
if len([x for x in profile if x != 0]) == 1 and sum(profile) < 0.5:
return 'passing by'
else:
idx = model.predict(profile)
cluster = model.clusterCenters[idx]
return centroids[cluster]
if __name__ == '__main__':
sc = SparkContext()
# annotazione utenti
d1 = sys.argv[1]
d2 = sys.argv[1]
pattern = r'/profiles/(?P<region>\w+)-(?P<start_week>\w+)-(?P<end_week>\w+)'
m = re.search(pattern, d1)
region, start_week, end_week = m.groups()
pattern = r'/centroids/(?P<region>\w+)-(?P<start_week>\w+)-(?P<end_week>\w+)'
m = re.search(pattern, d2)
assert((region, start_week, end_week) == m.groups())
r = sc.pickleFile(d2)
centroids = {tuple(v.tolist()): k for k, v in r.collect()}
model = KMeansModel(centroids.keys())
r = sc.pickleFile(d1)
# format: (user_id, profile)
r_auto = r.map(lambda (region, user_id, profile):
(region, user_type(profile, model, centroids), user_id, profile)) \
.map(lambda x: ((region, user_type, 1)) \
.reduceByKey(lambda x, y: x + y)
# ottengo coppie municipio,id_cluster
# risultato finale
#
lst = r_auto.collect()
sociometer = [(region,
user_type,
count * 1.0 / sum([count1 for ((region1, _), count1) in lst if region1 == region])
) for ((region, user_type), count) in lst]
with open("sociometer-%s-%s-%s" %
(region, start_week, end_week), 'w') as outfile:
print >>outfile, "region, profile, percentage"
for region, user_type, count in sorted(sociometer, key=lambda x: x[0][0]):
print>>outfile, region, user_type.replace("\n", ""), count
| apache-2.0 | -4,051,820,865,351,946,000 | 37.166667 | 110 | 0.674787 | false | 3.466932 | false | false | false |
vossman/ctfeval | pyami/mem.py | 1 | 2238 | #!/usr/bin/env python
import os
def meminfo2dict():
if not os.path.exists('/proc/meminfo'):
return None
f = open('/proc/meminfo', 'r')
lines = f.readlines()
f.close()
info = {}
for line in lines:
line = line[:-1]
parts = line.split(':')
key = parts[0]
value = parts[1].strip()
value = value.split()
value = int(value[0])
info[key] = value
return info
def stats(meminfo=meminfo2dict()):
if meminfo is None:
return
total = meminfo['MemTotal']
free = meminfo['MemFree']
used = total - free
buffers = meminfo['Buffers']
cached = meminfo['Cached']
used2 = used - buffers - cached
free2 = free + buffers + cached
swaptotal = meminfo['SwapTotal']
swapfree = meminfo['SwapFree']
swapused = swaptotal - swapfree
print '%10d%10d%10d%10d%10d' % (total, used, free, buffers, cached)
print '%20d%10d' % (used2, free2)
print '%10d%10d%10d' % (swaptotal, swapused, swapfree)
meminfo
def used():
meminfo = meminfo2dict()
used = meminfo['MemTotal'] - meminfo['MemFree']
return used
def active():
return 0
meminfo = meminfo2dict()
used = meminfo['MemTotal'] - meminfo['MemFree'] - meminfo['Cached']
return used
def free():
meminfo = meminfo2dict()
free = meminfo['MemFree'] + meminfo['Cached']
return free
def total():
meminfo = meminfo2dict()
total = meminfo['MemTotal']
return total
def swapused():
meminfo = meminfo2dict()
used = meminfo['SwapTotal'] - meminfo['SwapFree']
return used
def swapfree():
meminfo = meminfo2dict()
free = meminfo['SwapFree']
return free
def swaptotal():
meminfo = meminfo2dict()
total = meminfo['SwapTotal']
return total
multdict = {
'b': 1,
'kb': 1024,
'mb': 1024*1024,
'gb': 1024*1024*1024,
}
def procStatus(pid=None):
if pid is None:
pid = os.getpid()
f = open('/proc/%d/status' % (pid,))
statuslines = f.readlines()
f.close()
vm = {}
for statusline in statuslines:
fields = statusline.split()
if fields[0][:2] == 'Vm':
name = fields[0][:-1]
value = int(fields[1])
mult = multdict[fields[2].lower()]
vm[name] = mult*value
return vm
def mySize():
status = procStatus()
return status['VmRSS']
def test():
mypid = os.getpid()
print 'mypid', mypid
print mySize()
if __name__ == '__main__':
#print used()
test()
| apache-2.0 | -7,636,281,603,136,761,000 | 18.982143 | 68 | 0.650581 | false | 2.673835 | false | false | false |
tgbugs/hypush | hyputils/subscribe.py | 1 | 9535 | #!/usr/bin/env python3.6
import os
import asyncio
import ssl
import uuid
import json
from os import environ
from socket import socketpair
from threading import Thread
import certifi
import websockets
class Handler:
def __init__(self, filter_handlers):
self.filter_handlers = filter_handlers # list of filterHandlers that should be run on every message
def process(self, message):
if message['type'] == 'annotation-notification':
for fh in self.filter_handlers:
fh(message)
else:
print('NOT ANNOTATION')
print(message)
class preFilter:
""" Create a filter that will run on the hypothes.is server
Make group empty to default to allow all groups the authed user
is a member of in the hypothes.is system.
"""
def __init__(self, groups=[], users=[], uris=[], tags=[],
create=True, update=True, delete=True,
match_policy='include_any'):
self.create = create
self.update = update
self.delete = delete
#include_all include_any
self.match_policy = match_policy
self.groups = groups
self.users = users
self.uris = uris # NOTE: uri filters must be exact :(
self.tags = tags
self.clause_map = [
('/group', self.groups), # __world__
('/user', self.users),
('/uri', self.uris),
('/tags', self.tags),
]
def _make_clauses(self):
clauses = []
for field, value in self.clause_map:
if value:
clauses.append(
{'field':field,
'case_sensitive':True,
'operator':'one_of',
'options':{},
'value':value,
}
)
return clauses
def export(self):
output = {
'filter':{
'actions':{
'create':self.create,
'update':self.update,
'delete':self.delete,
},
'match_policy':self.match_policy,
'clauses':self._make_clauses(),
},
}
return output
def _ssl_context(verify=True):
ssl_context = ssl.create_default_context(cafile=certifi.where())
if not verify:
ssl_context.check_hostname = False
ssl_context.verify_mode = ssl.CERT_NONE
return ssl_context
async def setup_connection(websocket):
message = {'messageType': 'client_id',
'value': str(uuid.uuid4()),}
print('SETUP MESSAGE', message)
await websocket.send(json.dumps(message))
async def setup_filters(websocket, filters):
print('SETUP FILTERS\n', json.dumps(filters, indent=2))
await websocket.send(json.dumps(filters))
async def process_messages(websocket, handler):
while True:
response = await websocket.recv()
try:
msg = json.loads(response)
except ValueError:
pass
if msg:
handler.process(msg)
else:
pass
class ExitLoop(Exception):
pass
async def listen_for_exit(reader):
# the managing process will send a message on exit
msg = await reader.readline()
raise ExitLoop(msg.decode())
async def process_or_exit(websock, handler, exit_reader):
process_task = asyncio.ensure_future(process_messages(websock, handler))
exit_task = asyncio.ensure_future(listen_for_exit(exit_reader))
done, pending = await asyncio.wait([process_task, exit_task],
return_when=asyncio.FIRST_EXCEPTION)
future = done.pop()
for task in pending:
task.cancel()
raise future.exception()
def setup_websocket(api_token, filters, filter_handlers,
websocket_endpoint='wss://hypothes.is/ws',
extra_headers=None):
if extra_headers is None:
extra_headers = {}
rsock, wsock = socketpair()
def exit_loop():
try:
# stop the current await
wsock.send(b'Parent processes sent exit\n')
# close the socket and make sure we don't start again
# or more simply, to avoid leaking resources
wsock.close()
except OSError:
pass # socket was already closed
async def ws_loop(loop):
#websocket_endpoint = 'wss://hypothes.is/ws'
#filter_handlers = getFilterHandlers()
handler = Handler(filter_handlers)
ssl_context = _ssl_context(verify=True)
headers = {'Authorization': 'Bearer ' + api_token}
extra_headers.update(headers)
exit_reader, _writer = await asyncio.open_connection(sock=rsock, loop=loop)
while True: # for insurance could also test on closed wsock
print('WE SHOULD GET HERE')
try:
async with websockets.connect(websocket_endpoint,
ssl=ssl_context,
extra_headers=extra_headers) as ws:
await setup_connection(ws)
print(f'websocket connected to {websocket_endpoint}')
await setup_filters(ws, filters)
print('subscribed')
await process_or_exit(ws, handler, exit_reader)
except ExitLoop as e: # for whatever reason the await proceess or exit doesn't work here :/
print(e)
break
except KeyboardInterrupt as e:
break
except (websockets.exceptions.ConnectionClosed, ConnectionResetError) as e:
pass
_writer.close() # prevents ResourceWarning
return ws_loop, exit_loop
class AnnotationStream:
def __init__(self, annos, prefilter, *handler_classes, memoizer=None):
from .hypothesis import api_token
self.api_token = api_token
self.annos = annos
self.filters = prefilter
self.filter_handlers = [handler(self.annos, memoizer=memoizer) for handler in handler_classes]
@staticmethod
def loop_target(loop, ws_loop):
asyncio.set_event_loop(loop)
loop.run_until_complete(ws_loop(loop))
def __call__(self):
loop = asyncio.get_event_loop()
ws_loop, exit_loop = setup_websocket(self.api_token, self.filters, self.filter_handlers)
stream_thread = Thread(target=self.loop_target, args=(loop, ws_loop))
return stream_thread, exit_loop
def main():
from handlers import printHandler, websocketServerHandler
loop = asyncio.get_event_loop()
subscribed = {}
def send_message(d):
for send in subscribed.values():
send(json.dumps(d).encode())
wssh = websocketServerHandler(send_message)
async def incoming_handler(websocket, path):
try:
await websocket.recv() # do nothing except allow us to detect unsubscribe
except websockets.exceptions.ConnectionClosed as e:
pass # working as expected
async def outgoing_handler(websocket, path, reader):
while True:
message = await reader.readline()
await websocket.send(message.decode())
async def conn_handler(websocket, path, reader):
i_task = asyncio.ensure_future(incoming_handler(websocket, path))
o_task = asyncio.ensure_future(outgoing_handler(websocket, path, reader))
done, pending = await asyncio.wait([i_task, o_task], return_when=asyncio.FIRST_COMPLETED)
for task in pending:
task.cancel()
async def subscribe(websocket, path):
name = await websocket.recv() # this is not needed...
print(f"< {name}")
greeting = json.dumps(f"Hello {name}! You are now subscribed to cat facts!{{}}"
f"{list(subscribed)} are also subscribed to cat facts!")
greeting = greeting.format('\n')
rsock, wsock = socketpair()
reader, writer = await asyncio.open_connection(sock=rsock, loop=loop)
for send_something in subscribed.values():
msg = json.dumps(f'{name} also subscribed to cat facts!').encode()
send_something(msg)
def send(bytes_, s=wsock.send):
s(bytes_)
s(b'\n')
subscribed[name] = send # _very_ FIXME NOTE this is how we know where to route all our messages
await websocket.send(greeting)
print(f"> {greeting}")
# we now wait here for something else to happen, in this case
# either there is a subscription or an unsubscription
await conn_handler(websocket, path, reader) # when this completes the connection is closed
subscribed.pop(name)
for send_something in subscribed.values():
msg = json.dumps(f'{name} unsubscribed from cat facts!').encode()
send_something(msg)
start_server = websockets.serve(subscribe, 'localhost', 5050)
loop.run_until_complete(start_server) # TODO need this wrapped so that loop can be passed in
api_token = environ.get('HYP_API_TOKEN', 'TOKEN')
groups = environ.get('HYP_GROUPS', '__world__').split(' ')
filters = preFilter(groups=groups).export()
filter_handlers = [printHandler(), wssh]
print(groups)
ws_loop, exit_loop = setup_websocket(api_token, filters, filter_handlers)
loop.run_until_complete(ws_loop(loop))
if __name__ == '__main__':
main()
| mit | -8,656,371,062,877,171,000 | 32.45614 | 108 | 0.589512 | false | 4.220894 | false | false | false |
mortbauer/verein | wsgi/login.py | 1 | 1799 | import bcrypt
from functools import wraps
from flask import Blueprint, current_app,request, session, Response,g
from utils import send_response
mod = Blueprint('login', __name__, url_prefix='/login')
def authenticate():
"""Sends a 401 response that enables basic auth"""
return Response(
'Could not verify your access level for that URL.\n'
'You have to login with proper credentials', 401,
{'WWW-Authenticate': 'Basic realm="Login Required"'})
def check_valid_auth():
if not session.get('user_id'):
return authenticate()
else:
g.user = current_app.db.users.find_one({'user_id':session['user_id']})
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
check_valid_auth()
return f(*args, **kwargs)
return decorated
@mod.route("/", methods=["GET", "POST"])
def login():
if session.get('user_id'):
return send_response({'message':'you are already logged in'})
auth = request.authorization
if auth:
username = auth.username
password = auth.password
user = current_app.db.users.find_one({'username':username})
if user:
if current_app.bcrypt.check_password_hash(user['password'], password):
session['user_id'] = user['user_id']
return send_response({'message':'login successful'})
else:
return send_response({'message':'wrong passphrase'},status=400)
else:
if not username:
return send_response({'message':'provide a username'},status=400)
else:
return send_response({'message':'unknown user "{0}"'.format(username)},status=400)
else:
return send_response({'message':'username and password required'},status=400)
| gpl-2.0 | 3,872,796,502,596,405,000 | 33.596154 | 98 | 0.620345 | false | 4.183721 | false | false | false |
mcflugen/wmt-rest | wmt/flask/tests/test_sims.py | 1 | 2948 | import json
from uuid import uuid4
from wmt.flask import create_app
from nose.tools import (assert_equal, assert_is_instance, assert_dict_equal,
assert_list_equal, assert_less_equal)
from .tools import (assert_401_unauthorized, assert_404_not_found,
assert_403_forbidden, assert_200_success,
assert_204_empty, loads_if_assert_200,
assert_422_unprocessable_entity,
json_post, json_delete, login_or_fail,
AssertIsResourceResponse, AssertIsCollectionResponse)
from . import (app, FAKE_SIM, FAKE_SIM_NAME, FAKE_SIM_MODEL, FAKE_USER,
FAKE_USER_NAME, FAKE_USER1_NAME, FAKE_USER1_PASS)
class AssertIsSimResource(AssertIsResourceResponse):
__type__ = 'sim'
__fields__ = set(['href', 'id', 'name', 'owner', 'status', 'message',
'user', 'model'])
class AssertIsSimCollection(AssertIsCollectionResponse):
__validator__ = AssertIsSimResource()
assert_is_sim_resource = AssertIsSimResource()
assert_is_sim_collection = AssertIsSimCollection()
def test_show():
with app.test_client() as c:
resp = c.get('/sims/')
assert_200_success(resp)
assert_is_sim_collection(resp)
def test_get_existing():
with app.test_client() as c:
resp = c.get('/sims/1')
assert_200_success(resp)
assert_is_sim_resource(resp, name='foobar')
def test_get_non_existing():
with app.test_client() as c:
assert_404_not_found(c.get('/sims/0'))
def test_new_and_delete():
sim_name = str(uuid4())
with app.test_client() as c:
login_or_fail(c, **FAKE_USER)
resp = json_post(c, '/sims/', data=dict(name=sim_name, model=1))
assert_200_success(resp)
assert_is_sim_resource(resp, name=sim_name)
new_sim = json.loads(resp.data)
with app.test_client() as c:
login_or_fail(c, **FAKE_USER)
assert_204_empty(json_delete(c, new_sim['href']))
def test_new_not_logged_in():
with app.test_client() as c:
assert_401_unauthorized(
json_post(c, '/sims/', data=dict(name='a-new-sim')))
def test_new_existing():
with app.test_client() as c:
login_or_fail(c, **FAKE_USER)
assert_200_success(
json_post(c, '/sims/', data=dict(name=FAKE_SIM_NAME,
model=FAKE_SIM_MODEL)))
def test_delete_non_existing():
with app.test_client() as c:
login_or_fail(c, **FAKE_USER)
assert_404_not_found(json_delete(c, '/sims/999999'))
def test_delete_not_logged_in():
with app.test_client() as c:
assert_401_unauthorized(json_delete(c, '/sims/1'))
def test_delete_wrong_user():
with app.test_client() as c:
login_or_fail(c, username=FAKE_USER1_NAME, password=FAKE_USER1_PASS)
assert_403_forbidden(json_delete(c, '/sims/1'))
| mit | 3,351,741,665,556,969,500 | 30.031579 | 76 | 0.604478 | false | 3.301232 | true | false | false |
ardi69/pyload-0.4.10 | pyload/plugin/hoster/FilepupNet.py | 1 | 1270 | # -*- coding: utf-8 -*-
#
# Test links:
# http://www.filepup.net/files/k5w4ZVoF1410184283.html
# http://www.filepup.net/files/R4GBq9XH1410186553.html
import re
from pyload.plugin.internal.SimpleHoster import SimpleHoster
class FilepupNet(SimpleHoster):
__name = "FilepupNet"
__type = "hoster"
__version = "0.03"
__pattern = r'http://(?:www\.)?filepup\.net/files/\w+'
__config = [("use_premium", "bool", "Use premium account if available", True)]
__description = """Filepup.net hoster plugin"""
__license = "GPLv3"
__authors = [("zapp-brannigan", "[email protected]"),
("Walter Purcaro", "[email protected]")]
NAME_PATTERN = r'>(?P<N>.+?)</h1>'
SIZE_PATTERN = r'class="fa fa-archive"></i> \((?P<S>[\d.,]+) (?P<U>[\w^_]+)'
OFFLINE_PATTERN = r'>This file has been deleted'
LINK_FREE_PATTERN = r'(http://www\.filepup\.net/get/.+?)\''
def setup(self):
self.multiDL = False
self.chunkLimit = 1
def handle_free(self, pyfile):
m = re.search(self.LINK_FREE_PATTERN, self.html)
if m is None:
self.error(_("Download link not found"))
dl_link = m.group(1)
self.download(dl_link, post={'task': "download"})
| gpl-3.0 | 4,836,978,921,758,395,000 | 27.222222 | 83 | 0.577953 | false | 2.96729 | false | false | false |
ratoaq2/deluge | deluge/tests/test_web_api.py | 1 | 7231 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2016 bendikro <[email protected]>
#
# This file is part of Deluge and is licensed under GNU General Public License 3.0, or later, with
# the additional special exception to link portions of this program with the OpenSSL library.
# See LICENSE for more details.
#
from __future__ import unicode_literals
from io import BytesIO
from twisted.internet import defer, reactor
from twisted.python.failure import Failure
from twisted.web.client import Agent, FileBodyProducer
from twisted.web.http_headers import Headers
from twisted.web.static import File
import deluge.component as component
from deluge.ui.client import client
from . import common
from .common_web import WebServerTestBase
common.disable_new_release_check()
class WebAPITestCase(WebServerTestBase):
def test_connect_invalid_host(self):
d = self.deluge_web.web_api.connect('id')
d.addCallback(self.fail)
d.addErrback(self.assertIsInstance, Failure)
return d
def test_connect(self):
d = self.deluge_web.web_api.connect(self.host_id)
def on_connect(result):
self.assertEqual(type(result), tuple)
self.assertTrue(len(result) > 0)
self.addCleanup(client.disconnect)
return result
d.addCallback(on_connect)
d.addErrback(self.fail)
return d
def test_disconnect(self):
d = self.deluge_web.web_api.connect(self.host_id)
@defer.inlineCallbacks
def on_connect(result):
self.assertTrue(self.deluge_web.web_api.connected())
yield self.deluge_web.web_api.disconnect()
self.assertFalse(self.deluge_web.web_api.connected())
d.addCallback(on_connect)
d.addErrback(self.fail)
return d
def test_get_config(self):
config = self.deluge_web.web_api.get_config()
self.assertEqual(self.webserver_listen_port, config['port'])
def test_set_config(self):
config = self.deluge_web.web_api.get_config()
config['pwd_salt'] = 'new_salt'
config['pwd_sha1'] = 'new_sha'
config['sessions'] = {
'233f23632af0a74748bc5dd1d8717564748877baa16420e6898e17e8aa365e6e': {
'login': 'skrot',
'expires': 1460030877.0,
'level': 10
}
}
self.deluge_web.web_api.set_config(config)
web_config = component.get('DelugeWeb').config.config
self.assertNotEquals(config['pwd_salt'], web_config['pwd_salt'])
self.assertNotEquals(config['pwd_sha1'], web_config['pwd_sha1'])
self.assertNotEquals(config['sessions'], web_config['sessions'])
@defer.inlineCallbacks
def get_host_status(self):
host = list(self.deluge_web.web_api._get_host(self.host_id))
host[3] = 'Online'
host[4] = '2.0.0.dev562'
status = yield self.deluge_web.web_api.get_host_status(self.host_id)
self.assertEqual(status, tuple(status))
def test_get_host(self):
self.assertFalse(self.deluge_web.web_api._get_host('invalid_id'))
conn = list(self.deluge_web.web_api.hostlist.get_hosts_info()[0])
self.assertEqual(self.deluge_web.web_api._get_host(conn[0]), conn[0:4])
def test_add_host(self):
conn = ['abcdef', '10.0.0.1', 0, 'user123', 'pass123']
self.assertFalse(self.deluge_web.web_api._get_host(conn[0]))
# Add valid host
result, host_id = self.deluge_web.web_api.add_host(conn[1], conn[2], conn[3], conn[4])
self.assertEqual(result, True)
conn[0] = host_id
self.assertEqual(self.deluge_web.web_api._get_host(conn[0]), conn[0:4])
# Add already existing host
ret = self.deluge_web.web_api.add_host(conn[1], conn[2], conn[3], conn[4])
self.assertEqual(ret, (False, 'Host details already in hostlist'))
# Add invalid port
conn[2] = 'bad port'
ret = self.deluge_web.web_api.add_host(conn[1], conn[2], conn[3], conn[4])
self.assertEqual(ret, (False, 'Invalid port. Must be an integer'))
def test_remove_host(self):
conn = ['connection_id', '', 0, '', '']
self.deluge_web.web_api.hostlist.config['hosts'].append(conn)
self.assertEqual(self.deluge_web.web_api._get_host(conn[0]), conn[0:4])
# Remove valid host
self.assertTrue(self.deluge_web.web_api.remove_host(conn[0]))
self.assertFalse(self.deluge_web.web_api._get_host(conn[0]))
# Remove non-existing host
self.assertFalse(self.deluge_web.web_api.remove_host(conn[0]))
def test_get_torrent_info(self):
filename = common.get_test_data_file('test.torrent')
ret = self.deluge_web.web_api.get_torrent_info(filename)
self.assertEqual(ret['name'], 'azcvsupdater_2.6.2.jar')
self.assertEqual(ret['info_hash'], 'ab570cdd5a17ea1b61e970bb72047de141bce173')
self.assertTrue('files_tree' in ret)
def test_get_magnet_info(self):
ret = self.deluge_web.web_api.get_magnet_info('magnet:?xt=urn:btih:SU5225URMTUEQLDXQWRB2EQWN6KLTYKN')
self.assertEqual(ret['name'], '953bad769164e8482c7785a21d12166f94b9e14d')
self.assertEqual(ret['info_hash'], '953bad769164e8482c7785a21d12166f94b9e14d')
self.assertTrue('files_tree' in ret)
@defer.inlineCallbacks
def test_get_torrent_files(self):
yield self.deluge_web.web_api.connect(self.host_id)
filename = common.get_test_data_file('test.torrent')
torrents = [{'path': filename, 'options': {'download_location': '/home/deluge/'}}]
yield self.deluge_web.web_api.add_torrents(torrents)
ret = yield self.deluge_web.web_api.get_torrent_files('ab570cdd5a17ea1b61e970bb72047de141bce173')
self.assertEqual(ret['type'], 'dir')
self.assertEqual(
ret['contents'], {
'azcvsupdater_2.6.2.jar': {
'priority': 4, 'index': 0, 'offset': 0, 'progress': 0.0, 'path':
'azcvsupdater_2.6.2.jar', 'type': 'file', 'size': 307949}})
@defer.inlineCallbacks
def test_download_torrent_from_url(self):
filename = 'ubuntu-9.04-desktop-i386.iso.torrent'
self.deluge_web.top_level.putChild(filename, File(common.get_test_data_file(filename)))
url = 'http://localhost:%d/%s' % (self.webserver_listen_port, filename)
res = yield self.deluge_web.web_api.download_torrent_from_url(url)
self.assertTrue(res.endswith(filename))
@defer.inlineCallbacks
def test_invalid_json(self):
"""
If json_api._send_response does not return server.NOT_DONE_YET
this error is thrown when json is invalid:
exceptions.RuntimeError: Request.write called on a request after Request.finish was called.
"""
agent = Agent(reactor)
bad_body = b'{ method": "auth.login" }'
d = yield agent.request(
b'POST',
b'http://127.0.0.1:%s/json' % self.webserver_listen_port,
Headers({b'User-Agent': [b'Twisted Web Client Example'],
b'Content-Type': [b'application/json']}),
FileBodyProducer(BytesIO(bad_body)))
yield d
| gpl-3.0 | -6,263,807,956,593,385,000 | 40.085227 | 109 | 0.637809 | false | 3.313932 | true | false | false |
bd808/striker | striker/labsauth/views.py | 1 | 4046 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Wikimedia Foundation and contributors.
# All Rights Reserved.
#
# This file is part of Striker.
#
# Striker is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Striker is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Striker. If not, see <http://www.gnu.org/licenses/>.
import logging
from django import shortcuts
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import views as auth_views
from django.core import urlresolvers
from django.utils.translation import ugettext_lazy as _
from ratelimitbackend import views as ratelimit_views
import mwoauth
from striker.labsauth import forms
from striker.labsauth import utils
NEXT_PAGE = 'striker.oauth.next_page'
REQUEST_TOKEN_KEY = 'striker.oauth.request_token'
ACCESS_TOKEN_KEY = 'striker.oauth.access_token'
logger = logging.getLogger(__name__)
def login(req):
resp = ratelimit_views.login(
request=req,
template_name='labsauth/login.html',
authentication_form=forms.LabsAuthenticationForm)
if 'remember_me' in req.POST:
req.session.set_expiry(settings.REMEMBER_ME_TTL)
req.session.save()
return resp
def logout(req):
auth_views.logout(req)
return shortcuts.redirect(urlresolvers.reverse('index'))
def oauth_initiate(req):
"""Initiate an OAuth login."""
next_page = req.GET.get('next', None)
if next_page is not None:
req.session[NEXT_PAGE] = next_page
consumer_token = mwoauth.ConsumerToken(
settings.OAUTH_CONSUMER_KEY, settings.OAUTH_CONSUMER_SECRET)
try:
redirect, request_token = mwoauth.initiate(
settings.OAUTH_MWURL,
consumer_token,
req.build_absolute_uri(
urlresolvers.reverse('labsauth:oauth_callback')))
except Exception:
# FIXME: get upstream to use a narrower exception class
logger.exception('mwoauth.initiate failed')
messages.error(req, _("OAuth handshake failed."))
return shortcuts.redirect(next_page or '/')
else:
# Convert to unicode for session storage
req.session[REQUEST_TOKEN_KEY] = utils.tuple_to_unicode(request_token)
return shortcuts.redirect(redirect)
def oauth_callback(req):
"""OAuth handshake callback."""
serialized_token = req.session.get(REQUEST_TOKEN_KEY, None)
if serialized_token is None:
messages.error(req, _("Session invalid."))
return shortcuts.redirect(
urlresolvers.reverse('labsauth:oauth_initiate'))
# Convert from unicode stored in session to bytes expected by mwoauth
serialized_token = utils.tuple_to_bytes(serialized_token)
consumer_token = mwoauth.ConsumerToken(
settings.OAUTH_CONSUMER_KEY, settings.OAUTH_CONSUMER_SECRET)
request_token = mwoauth.RequestToken(*serialized_token)
access_token = mwoauth.complete(
settings.OAUTH_MWURL,
consumer_token,
request_token,
req.META['QUERY_STRING'])
# Convert to unicode for session storage
req.session[ACCESS_TOKEN_KEY] = utils.tuple_to_unicode(access_token)
req.user.set_accesstoken(access_token)
sul_user = mwoauth.identify(
settings.OAUTH_MWURL, consumer_token, access_token)
req.user.sulname = sul_user['username']
req.user.sulemail = sul_user['email']
req.user.realname = sul_user['realname']
req.user.save()
messages.info(req, _("Authenticated as OAuth user {user}".format(
user=sul_user['username'])))
return shortcuts.redirect(req.session.get(NEXT_PAGE, '/'))
| gpl-3.0 | 982,522,145,362,879,500 | 34.80531 | 78 | 0.703411 | false | 3.760223 | false | false | false |
ninchat/thumq | test.py | 1 | 3773 | #!/usr/bin/env python3
import argparse
import base64
import os
import signal
import socket
import subprocess
import sys
import tempfile
import time
import webbrowser
from contextlib import closing
from struct import pack, unpack
socket_path = "./test.socket"
imagedir = "exif-orientation-examples"
protoc = os.environ.get("PROTOC", "protoc")
compiledir = tempfile.mkdtemp()
try:
subprocess.check_call([protoc, "--python_out", compiledir, "thumq.proto"])
sys.path.insert(0, compiledir)
sys.dont_write_bytecode = True
import thumq_pb2
sys.path.pop(0)
finally:
for dirpath, dirnames, filenames in os.walk(compiledir, topdown=False):
for filename in filenames:
os.remove(os.path.join(dirpath, filename))
for dirname in dirnames:
os.rmdir(os.path.join(dirpath, dirname))
os.rmdir(compiledir)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--browser", action="store_true", help="open result images in web browser tabs")
parser.add_argument("--top-square", action="store_true", help="enable cropping")
parser.add_argument("scale", type=int, help="maximum width/height of result image")
args = parser.parse_args()
request = thumq_pb2.Request()
request.scale = args.scale
crop = "no-crop"
if args.top_square:
request.crop = thumq_pb2.Request.TOP_SQUARE
crop = "top-square"
request_data = request.SerializeToString()
service = subprocess.Popen(["./thumq", socket_path])
try:
for _ in range(10):
if os.path.exists(socket_path):
break
time.sleep(0.2)
files = []
for kind in ["Landscape", "Portrait"]:
for num in range(1, 8 + 1):
filename = "{}_{}.jpg".format(kind, num)
filepath = os.path.join(imagedir, filename)
files.append((filepath, "image/jpeg", True))
files.append(("test.pdf", "application/pdf", False))
for filepath, expect_type, expect_thumbnail in files:
print(filepath)
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM | socket.SOCK_CLOEXEC)
with closing(sock):
sock.connect(socket_path)
send(sock, request_data)
with open(filepath, "rb") as f:
send(sock, f.read())
response = thumq_pb2.Response.FromString(receive(sock))
output_data = receive(sock)
if expect_thumbnail:
assert response.source_type == expect_type, response
assert response.nail_width in range(1, args.scale + 1), response
assert response.nail_height in range(1, args.scale + 1), response
assert output_data
if args.browser:
output_b64 = base64.standard_b64encode(output_data).decode()
webbrowser.open_new_tab("data:image/jpeg;base64," + output_b64)
else:
with open(filepath.replace(imagedir + "/", "test-output/" + crop + "/"), "wb") as f:
f.write(output_data)
else:
assert response.source_type == expect_type, response
assert not response.nail_width, response
assert not response.nail_height, response
assert not output_data
finally:
os.kill(service.pid, signal.SIGINT)
service.wait()
def send(sock, data):
sock.send(pack("<I", len(data)))
sock.send(data)
def receive(sock):
size, = unpack("<I", sock.recv(4))
return sock.recv(size)
if __name__ == "__main__":
main()
| bsd-2-clause | 3,513,826,372,980,710,400 | 30.705882 | 108 | 0.58362 | false | 3.975764 | false | false | false |
google-research/language | language/capwap/synthetic/filter_round_trip.py | 1 | 6692 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Filter for round trip consistency in QA generations.
This uses a RC model with a no answer option.
This code follows the round-trip consistency check from the paper:
Chris Alberti, Daniel Andor, Emily Pitler, Jacob Devlin, and Michael Collins.
2019. Synthetic QA Corpora Generation with Roundtrip Consistency. In ACL.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
from absl import app
from absl import flags
from language.capwap.utils import experiment_utils
from language.capwap.utils import reward_utils
from language.capwap.utils import text_utils
import tensorflow.compat.v1 as tf
import tensorflow_hub as hub
DATA_DIR = os.getenv("CAPWAP_DATA", "data")
flags.DEFINE_string("input_file", None, "Input TFRecord file.")
flags.DEFINE_string("output_file", None, "Where to write to.")
flags.DEFINE_integer("max_answer_length", 10,
"Maximum answer length for prediction.")
flags.DEFINE_integer("seq_length", 128, "Padded input length.")
flags.DEFINE_float("no_answer_bias", 0, "Bias for CLS prediction.")
flags.DEFINE_string("rc_model", os.path.join(DATA_DIR, "rc_model"),
"TF Hub handle for BERT QA model.")
flags.DEFINE_string("vocab_path", os.path.join(DATA_DIR, "uncased_vocab.txt"),
"Path to BERT directory.")
FLAGS = flags.FLAGS
def clean(text):
"""Postprocessing."""
text = text.strip()
text = " ".join(text.split())
return text
def input_fn(params, input_file):
"""tf.data.Dataset."""
def _parse_example(serialized_example):
"""Parse a serialized example proto."""
features = tf.io.parse_single_example(
serialized_example,
features={
"unique_ids": tf.FixedLenFeature([], tf.int64),
"input_ids": tf.FixedLenFeature([params["seq_length"]], tf.int64),
"input_mask": tf.FixedLenFeature([params["seq_length"]], tf.int64),
"segment_ids": tf.FixedLenFeature([params["seq_length"]], tf.int64),
"start_positions": tf.FixedLenFeature([], tf.int64),
"end_positions": tf.FixedLenFeature([], tf.int64),
"answer_types": tf.FixedLenFeature([], tf.int64),
})
# Remove special [Q] token inserted before start of question.
for k in ["input_ids", "input_mask", "segment_ids"]:
v = features[k]
features[k] = tf.concat([[v[0]], v[2:]], axis=0)
return features
dataset = tf.data.TFRecordDataset(input_file, buffer_size=16 * 1024 * 1024)
dataset = dataset.map(
_parse_example, num_parallel_calls=params["num_input_threads"])
dataset = dataset.batch(params["batch_size"], drop_remainder=True)
dataset = dataset.prefetch(params["prefetch_batches"])
return dataset
def model_fn(features, labels, mode, params):
"""A model function satisfying the tf.estimator API."""
del labels
assert mode == tf.estimator.ModeKeys.PREDICT, "Mode should be PREDICT."
rc_model = hub.Module(params["rc_model"])
outputs = rc_model(
inputs=dict(
input_ids=tf.cast(features["input_ids"], tf.int32),
input_mask=tf.cast(features["input_mask"], tf.int32),
segment_ids=tf.cast(features["segment_ids"], tf.int32)),
signature="extractive_qa",
as_dict=True)
start, end, _ = reward_utils.max_scoring_span(
start_scores=outputs["start_logits"],
end_scores=outputs["end_logits"],
max_length=params["max_answer_length"],
no_answer_bias=params["no_answer_bias"])
is_consistent = tf.logical_and(
tf.logical_and(tf.greater(start, 0), tf.greater(end, 0)),
tf.logical_and(
tf.equal(start, tf.cast(features["start_positions"] - 1, tf.int32)),
tf.equal(end, tf.cast(features["end_positions"] - 1, tf.int32))))
return tf.estimator.tpu.TPUEstimatorSpec(
mode=mode,
predictions=dict(
unique_ids=features["unique_ids"],
input_ids=features["input_ids"],
start=start,
end=end,
is_consistent=is_consistent))
def main(argv):
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
tf.logging.set_verbosity(tf.logging.INFO)
tf.logging.info("***** Generating captions *****")
# Load vocab
vocab = text_utils.Vocab.load(FLAGS.vocab_path)
# Update params.
params = dict(
seq_length=FLAGS.seq_length,
model_dir=os.path.dirname(FLAGS.output_file),
max_answer_length=FLAGS.max_answer_length,
batch_size=FLAGS.batch_size,
rc_model=FLAGS.rc_model,
eval_batch_size=FLAGS.eval_batch_size,
no_answer_bias=FLAGS.no_answer_bias,
num_input_threads=FLAGS.num_input_threads,
predict_batch_size=FLAGS.predict_batch_size,
prefetch_batches=FLAGS.prefetch_batches,
use_tpu=FLAGS.use_tpu,
)
# Get estimator.
estimator = experiment_utils.get_estimator(model_fn, params)
# Write predictions.
tf.logging.info("Writing predictions to disk...")
tf.io.gfile.makedirs(os.path.dirname(FLAGS.output_file))
with tf.io.gfile.GFile(FLAGS.output_file, "w") as f:
iterator = estimator.predict(
input_fn=functools.partial(input_fn, input_file=FLAGS.input_file),
yield_single_examples=True)
total = 0
for i, ex in enumerate(iterator, 1):
if ex["is_consistent"]:
tokens = [vocab.i2t(idx) for idx in ex["input_ids"]]
breakpoint = tokens.index(vocab.PAD)
question = clean(" ".join(vocab.clean(tokens[1:breakpoint])))
context = clean(" ".join(vocab.clean(tokens[breakpoint:])))
answer = clean(" ".join(tokens[ex["start"]:ex["end"] + 1]))
output = [str(ex["unique_ids"]), question, answer, context]
output = "\t".join(output)
f.write(output + "\n")
total += 1
if total % 10000 == 0:
tf.logging.info("Wrote %d predictions", total)
if i % 10000 == 0:
tf.logging.info("Processed %d examples", i)
tf.logging.info("Done.")
if __name__ == "__main__":
tf.disable_v2_behavior()
app.run(main)
| apache-2.0 | -8,093,537,959,721,813,000 | 34.978495 | 80 | 0.661984 | false | 3.5 | false | false | false |
bjthinks/orbital-explorer | radial_analyzer.py | 1 | 14230 | # This file is part of the Electron Orbital Explorer. The Electron
# Orbital Explorer is distributed under the Simplified BSD License
# (also called the "BSD 2-Clause License"), in hopes that these
# rendering techniques might be used by other programmers in
# applications such as scientific visualization, video gaming, and so
# on. If you find value in this software and use its technologies for
# another purpose, I would love to hear back from you at bjthinks (at)
# gmail (dot) com. If you improve this software and agree to release
# your modifications under the below license, I encourage you to fork
# the development tree on github and push your modifications. The
# Electron Orbital Explorer's development URL is:
# https://github.com/bjthinks/orbital-explorer
# (This paragraph is not part of the software license and may be
# removed.)
#
# Copyright (c) 2013, Brian W. Johnson
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# + Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# + Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import numbers
from math import exp, sqrt
from license import license
class Polynomial:
"""Polynomials, immutable, with floating point coefficients"""
# Paul's suggestion: make a list constructor.
def __init__(self, c = 0, n = 0):
'''Polynomial(c, n) creates the polynomial c*x^n.
Polynomial([c0, c1, ..., cn]) creates the polynomial c0 + c1 x + ...'''
# self.__coeffs[n] is the coefficient of x^n. Invariant:
# if len(self.__coeffs) > 0 then self.__coeffs[-1] != 0
if isinstance(c, list):
self.__coeffs = list(c)
else:
self.__coeffs = [0] * n + [c]
self.__standardize()
def __standardize(self):
while self.degree >= 0 and self.__coeffs[-1] == 0:
self.__coeffs.pop()
@property
def degree(self):
return len(self.__coeffs) - 1
@property
def constantTerm(self):
if self.degree == -1:
return 0
else:
return self.__coeffs[0]
@property
def leadingCoefficient(self):
if self.degree == -1:
return 0
else:
return self.__coeffs[-1]
def __eq__(self, other):
return self.__coeffs == other.__coeffs
def __ne__(self, other):
return not (self == other)
def __call__(self, x):
total = 0
for c in reversed(self.__coeffs):
total *= x
total += c
return total
def __add__(self, other):
if isinstance(other, numbers.Number):
return self + Polynomial(other)
if self.degree < other.degree:
sm = self.__coeffs
lg = other.__coeffs
else:
sm = other.__coeffs
lg = self.__coeffs
s = list(lg)
for i in range(len(sm)):
s[i] += sm[i]
return Polynomial(s)
def __radd__(self, other):
return self + other
def __pos__(self):
return self
def __neg__(self):
return Polynomial([-x for x in self.__coeffs])
def __sub__(self, other):
return self + (-other)
def __rsub__(self, other):
return (-self) + other
def __mul__(self, other):
if isinstance(other, int) or isinstance(other, float):
return self * Polynomial(other)
p = [0] * (self.degree + other.degree + 1)
for i in range(len(self.__coeffs)):
for j in range(len(other.__coeffs)):
p[i + j] += self.__coeffs[i] * other.__coeffs[j]
return Polynomial(p)
def __rmul__(self, other):
return self * other
def __truediv__(self, other):
return self * (1 / other)
def __pow__(self, e):
if e < 0:
raise ArithmeticError('Polynomial to a negative power')
if e == 0:
return Polynomial(1)
if e == 1:
return self
if e % 2 == 0:
return (self * self) ** (e >> 1)
return self * (self ** (e - 1))
def derivative(self):
return Polynomial([i * self.__coeffs[i]
for i in range(1, self.degree + 1)])
def factorial(n):
if n < 0:
raise ArithmeticError('Factorial of a negative number')
f = 1
for i in range(2, n + 1):
f *= i
return f
def choose(n, k):
return factorial(n) // (factorial(k) * factorial(n - k))
def laguerre(n, a):
x = Polynomial(1, 1)
f = 0
for i in range(n + 1):
f += ((-1) ** i) * choose(n + a, n - i) * (x ** i) / factorial(i)
return f
def bisect(f, lower, upper):
if not (lower < upper):
raise Exception('bisect: lower not less than upper')
f_lower = f(lower)
if f_lower == 0:
return lower
f_upper = f(upper)
if f_upper == 0:
return upper
if (f_lower < 0 and f_upper < 0) or (f_lower > 0 and f_upper > 0):
raise Exception('bisect: no sign change present')
while True:
mid = (lower + upper) / 2
if not (lower < mid < upper):
return mid
f_mid = f(mid)
if f_mid == 0:
return mid
if f_mid < 0:
if f_lower < 0:
lower = mid
f_lower = f_mid
else:
upper = mid
f_upper = f_mid
else:
if f_lower > 0:
lower = mid
f_lower = f_mid
else:
upper = mid
f_upper = f_mid
def roots(f):
if f.degree < 1:
if f.constantTerm != 0:
return []
raise Exception('roots called on the zero polynomial')
if f.degree == 1:
return [-f.constantTerm / f.leadingCoefficient]
df = f.derivative()
df_roots = roots(df)
leading_coeff_f = f.leadingCoefficient
degree_f = f.degree
# First, handle the case where df has no roots
if len(df_roots) == 0:
assert degree_f % 2 == 1
f0 = f(0)
if f0 == 0:
return [0]
if leading_coeff_f > 0 and f0 < 0:
upper = 1
while f(upper) <= 0:
upper += 1
return [bisect(f, 0, upper)]
if leading_coeff_f > 0 and f0 > 0:
lower = -1
while f(lower) >= 0:
lower -= 1
return [bisect(f, lower, 0)]
if leading_coeff_f < 0 and f0 > 0:
upper = 1
while f(upper) >= 0:
upper += 1
return [bisect(f, 0, upper)]
if leading_coeff_f < 0 and f0 < 0:
lower = -1
while f(lower) <= 0:
lower -= 1
return [bisect(f, lower, 0)]
raise Exception('Impossible monotonic polynomial')
r = []
# Check for a root to the left of the first root of df
first_df_root = df_roots[0]
f_at_first_df_root = f(first_df_root)
negative_behavior_f = leading_coeff_f * ((-1) ** degree_f)
if negative_behavior_f > 0 and f_at_first_df_root < 0:
lower_bound_on_first_root = first_df_root - 1
while f(lower_bound_on_first_root) <= 0:
lower_bound_on_first_root -= 1
r.append(bisect(f, lower_bound_on_first_root, first_df_root))
if negative_behavior_f < 0 and f_at_first_df_root > 0:
lower_bound_on_first_root = first_df_root - 1
while f(lower_bound_on_first_root) >= 0:
lower_bound_on_first_root -= 1
r.append(bisect(f, lower_bound_on_first_root, first_df_root))
# Look at each pair of roots of df
for i in range(len(df_roots) - 1):
dr1 = df_roots[i]
dr2 = df_roots[i + 1]
fdr1 = f(dr1)
fdr2 = f(dr2)
if fdr1 > 0 and fdr2 < 0 or fdr1 < 0 and fdr2 > 0:
r.append(bisect(f, dr1, dr2))
if fdr1 == 0:
r.append(dr1)
# Last one -- just check if it's a root of f
if f(df_roots[-1]) == 0:
r.append(df_roots[-1])
# Check for a root to the right of the last root of df
last_df_root = df_roots[-1]
f_at_last_df_root = f(last_df_root)
positive_behavior_f = leading_coeff_f
if positive_behavior_f > 0 and f_at_last_df_root < 0:
upper_bound_on_last_root = last_df_root + 1
while f(upper_bound_on_last_root) <= 0:
upper_bound_on_last_root += 1
r.append(bisect(f, last_df_root, upper_bound_on_last_root))
if positive_behavior_f < 0 and f_at_last_df_root > 0:
upper_bound_on_last_root = last_df_root + 1
while f(upper_bound_on_last_root) >= 0:
upper_bound_on_last_root += 1
r.append(bisect(f, last_df_root, upper_bound_on_last_root))
return r
def list_to_cpp(nums):
if nums == []:
return ' {}'
return ' {\n ' + ',\n '.join([str(n) for n in nums]) + \
'\n }'
max_n = 16
def make_table3(name, func):
'''Make a C++ table of arrays for each n and L'''
sn = str(max_n)
print('const double ' + name + '[' + sn + '][' + sn + '][' + sn + '] = {')
for n in range(1, max_n + 1):
print(' // n ==', n)
print(' {')
for L in range(0, n):
print(' // L ==', L)
s = list_to_cpp(func(n, L))
if L != n - 1:
s += (',')
print(s)
if n != max_n:
print(' },')
else:
print(' }')
print('};')
def make_table2(name, func):
'''Make a C++ table of values for each n and L'''
sn = str(max_n)
print('const double ' + name + '[' + sn + '][' + sn + '] = {')
for n in range(1, max_n + 1):
print(' // n ==', n)
print(' {')
for L in range(0, n):
print(' // L ==', L)
s = ' ' + str(func(n, L))
if L != n - 1:
s += (',')
print(s)
if n != max_n:
print(' },')
else:
print(' }')
print('};')
'''
The radial factor of the wave function is of the form:
(x ^ L) * exp(-x / 2) * Laguerre(x)
To find radial nodes, we set this to zero, and look for nonzero
solutions. These occur iff the Laguerre polynomial factor is zero.
'''
def radial_nodes(n, L):
return roots(laguerre(n - L - 1, 2 * L + 1))
'''
To find radial maxima, we set the derivative of the radial factor to
zero, like so:
(L * Laguerre(x) + x * (-1 / 2) * Laguerrre(x) + x * Laguerre'(x))
* (x ^ (L-1)) * exp(-x / 2) = 0
Note that this is correct only for positive L, and we must handle the
case L=0 separately.
Simplifying, and ignoring the solution x=0, we get:
(L - x / 2) * Laguerre(x) + x * Laguerre'(x) = 0
For the special case L=0, we instead have:
(-1 / 2) * Laguerre(x) + Laguerre'(x) = 0,
which differs only in not having zero as a root. (Note that an extra
root at x=0 would confuse the C++ use of the table, where zero is
treated as an 'end of data' marker.)
'''
def radial_maxima(n, L):
x = Polynomial(1,1)
la = laguerre(n - L - 1, 2 * L + 1)
dla = la.derivative()
if L != 0:
f = (L - x / 2) * la + x * dla
else:
f = (-1 / 2) * la + dla
return roots(f)
def radial_extent(n, L):
maxes = radial_maxima(n, L)
maxes.append(0)
la = laguerre(n - L - 1, 2 * L + 1)
def f(r):
return abs((r ** L) * exp(-r / 2) * la(r))
big_f = max([f(r) for r in maxes])
upper_x = max(maxes) + 1
while f(upper_x) > big_f / 1e5:
upper_x += 1
return upper_x
def radial_extent2(n, L):
maxes = radial_maxima(n, L)
maxes.append(0)
la = laguerre(n - L - 1, 2 * L + 1)
def f(r):
return ((r ** L) * exp(-r / 2) * la(r)) ** 2
big_f = max([f(r) for r in maxes])
upper_x = max(maxes) + 1
while f(upper_x) > big_f / 1e5:
upper_x += 1
return upper_x
dx = 0.01
def radial_integral(n, L):
outer = radial_extent(n, L)
la = laguerre(n - L - 1, 2 * L + 1)
c = sqrt(factorial(n - L - 1) / (2 * n * factorial(n + L)))
def f(r):
return abs(c * (r ** L) * exp(-r / 2) * la(r))
tot = 0
for s in range(0, int(outer / dx - 0.5)):
x = s * dx
tot += dx * (f(x) + f(x + dx)) / 2
return tot
def radial_integral2(n, L):
outer = radial_extent2(n, L)
la = laguerre(n - L - 1, 2 * L + 1)
c = sqrt(factorial(n - L - 1) / (2 * n * factorial(n + L)))
def f(r):
return (c * (r ** L) * exp(-r / 2) * la(r)) ** 2
tot = 0
for s in range(0, int(outer / dx - 0.5)):
x = s * dx
tot += dx * (f(x) + f(x + dx)) / 2
return tot
if __name__ == '__main__':
for s in license('c'):
print(s)
print('')
print('#include "radial_data.hh"')
print('')
make_table3('radial_nodes', radial_nodes)
print('')
make_table3('radial_maxima', radial_maxima)
print('')
make_table2('radial_extent', radial_extent)
print('')
make_table2('radial_extent2', radial_extent2)
print('')
make_table2('radial_integral', radial_integral)
print('')
make_table2('radial_integral2', radial_integral2)
| bsd-2-clause | 112,988,447,810,036,910 | 29.276596 | 79 | 0.541813 | false | 3.316243 | false | false | false |
redsolution/django-menu-proxy | example/models.py | 1 | 1829 | from django.db import models
from django.core.urlresolvers import reverse
class Page(models.Model):
class Meta:
ordering = ['id']
slug = models.CharField(max_length=100)
title = models.CharField(max_length=100)
text = models.TextField()
parent = models.ForeignKey('self', related_name='children', null=True, blank=True)
def get_absolute_url(self):
return reverse('page', kwargs={'slug': self.slug})
def __unicode__(self):
return self.slug
def get_ancestors(self):
ancestors = []
parent = self.parent
while parent is not None:
ancestors.append(parent)
parent = parent.parent
return self.__class__.objects.filter(id__in=[ancestor.id for ancestor in ancestors])
# Or use:
#mptt.register(Page)
class Catalog(models.Model):
class Meta:
ordering = ['id']
title = models.CharField(max_length=100)
parent = models.ForeignKey('self', related_name='children', null=True, blank=True)
visible = models.BooleanField()
def get_absolute_url(self):
return reverse('catalog', kwargs={'object_id': self.pk})
def __unicode__(self):
return unicode(self.pk)
def get_ancestors(self):
ancestors = []
parent = self.parent
while parent is not None:
ancestors.append(parent)
parent = parent.parent
return self.__class__.objects.filter(id__in=[ancestor.id for ancestor in ancestors])
# Or use:
#mptt.register(Page)
class News(models.Model):
class Meta:
ordering = ['id']
text = models.TextField()
def title(self):
return self.text
def get_absolute_url(self):
return reverse('news', kwargs={'object_id': self.pk})
def __unicode__(self):
return unicode(self.pk)
| gpl-3.0 | 8,239,604,442,253,999,000 | 26.298507 | 92 | 0.621104 | false | 4.010965 | false | false | false |
VMatrixTeam/open-matrix | src/webservice/handlers/question/detail.py | 1 | 1460 | # coding=utf-8
from handlers.base import BaseController
from tornado.web import gen
from model.question.question import Question
from model.question.answer import Answer
from model.question.vote import Vote
from model.user import User
from model.question.tag import Tag
from model.question.comment import Comment
class QuestionDetailHandler(BaseController):
@gen.coroutine
def get(self, qid):
question = yield Question.get_question_by_qid(qid)
if question == None:
self.redirect("/404")
raise gen.Return()
question.author = yield User.get_user_by_id(question.author)
question.votes = yield Vote.get_votes_by_qid(question.qid)
question.answers = yield Answer.get_answers_count_by_qid(question.qid)
question.tags = yield Tag.get_tags_by_qid(question.qid)
question.comments = yield Comment.get_comments_by_qid(question.qid)
answers = yield Answer.get_answers_by_qid(question.qid)
for answer in answers:
answer.author = yield User.get_user_by_id(answer.author)
answer.comments = yield Comment.get_comments_by_aid(answer.aid)
answer.votes = yield Vote.get_votes_by_aid(answer.aid)
data = {
"current_user": self.current_user,
'question': question,
'answers' : answers
}
self.render('question/question-detail.jade', **data)
def post(self):
pass
| mit | -599,517,309,497,890,200 | 32.181818 | 78 | 0.667123 | false | 3.72449 | false | false | false |
MLAB-project/PyMeteostation | pymeteostation/MeteostationLib.py | 1 | 10344 | from pymlab import config
import time, json, urllib, urllib2, sys, os, ast, ConfigParser, base64
class Meteostation:
def __init__(self,configFileName):
self.settings = self.__getSettings(configFileName)
try:
cfg = config.Config(i2c={"port":1}, bus=self.settings["I2C_configuration"])
cfg.initialize()
self.NameTypeDict = self.__getTypes(self.settings["I2C_configuration"])
self.Devices = {}
for device in self.__getNames(self.settings["I2C_configuration"]):
self.Devices[device] = cfg.get_device(device)
except Exception, e:
sys.exit("Initialization of I2c failed: "+str(e))
time.sleep(0.5)
def getData(self,requestList="all"): # returns requested sensor data
outputList = {}
outputList["time"] = int(time.time())
if requestList == "all":
for device in self.Devices.keys():
outputList[device] = self.__getSensorData(device,self.NameTypeDict[device])
else:
for request in requestList:
outputList[request] = self.__getSensorData(request,self.NameTypeDict[device])
return outputList
def __getSensorData(self,sensorName,sensorType): # must return list
try:
if sensorType == "sht25":
self.Devices[sensorName].route()
return [self.Devices[sensorName].get_hum(),self.Devices[sensorName].get_temp()]
elif sensorType == "altimet01": # returns atmospheric pressure readings corrected to sea level altitude.
self.Devices[sensorName].route()
data = self.Devices[sensorName].get_tp()
return [data[0],data[1]/((1-((0.0065*self.settings["altitude"])/288.15))**5.255781292873008*100)]
except Exception, e:
print sensorName + " sensor error:",str(e)
return ["error",str(e)]
def log(self,dataDict,logFileName=""): # logging function
if logFileName == "":
logFileName = time.strftime("%Y-%m-%d:%H-", time.localtime()) + "meteoData.log"
FULLlogFileName = self.settings["logpath"] + time.strftime("%Y/", time.localtime()) + time.strftime("%m/", time.localtime()) + time.strftime("%d/", time.localtime()) + logFileName
if not os.path.exists(FULLlogFileName):
self.__generateLogFile(logFileName,self.settings["logpath"] + time.strftime("%Y/", time.localtime()) + time.strftime("%m/", time.localtime()) + time.strftime("%d/", time.localtime()))
try:
with open(FULLlogFileName,"r") as f:
savedData = json.load(f)
with open(FULLlogFileName,"w") as f:
savedData.append(dataDict)
f.write(json.dumps(savedData))
except Exception, e:
print "Logging failed:", str(e)
def __generateLogFile(self,logFileName,logPath): # generator of a log file
defaultLog = []
try:
if not logPath == "" and not os.path.exists(logPath):
os.makedirs(logPath)
with open(logPath+logFileName,"w") as f:
f.write(json.dumps(defaultLog))
except Exception, e:
print "Cannot generate log file:",str(e)
def sendData(self,username,password,sendDict): # sends data to openweathermap.com
sendData = self.translateToPOST(sendDict)
url = "http://openweathermap.org/data/post"
request = urllib2.Request(url,data=urllib.urlencode(sendData),headers={"Authorization":"Basic "+base64.encodestring(username+":"+password)[:-1]})
try:
result = urllib2.urlopen(request)
except urllib2.URLError as e:
if hasattr(e, "code"):
return (False, {"message":e.reason,"cod":e.code,"id":"0"})
else:
return (False, {"message":e.reason,"cod":"Failed to reach server","id":"0"})
except Exception as e:
return (False, {"message":str(e),"cod":"Network error","id":"0"})
else:
try:
result = result.read()
return (True, json.loads(result))
except Exception as e:
return (False, {"message":result,"cod":str(e),"id":"0"})
def translateToPOST(self,sendDict): # translates sensor values to POST request format
payload = {}
for itemKey in sendDict.keys():
if not itemKey == "time" and not sendDict[itemKey][0] == "error":
for transList in self.settings["Translation_Into_POST"]:
if itemKey == transList[1]:
payload[transList[0]] = str(round(sendDict[itemKey][transList[2]],2))
if self.settings["stationname"]:
payload["name"] = str(self.settings["stationname"])
if self.settings["latitude"] and self.settings["longitude"]:
payload["lat"] = str(self.settings["latitude"])
payload["long"] = str(self.settings["longitude"])
if self.settings["altitude"]:
payload["alt"] = str(self.settings["altitude"])
return payload
def __getNames(self,busConfig): # recursively searches for all "name" dictionary keys and returns their values: ["name1", "name2", ...]
names = []
for item in busConfig:
for key in item.keys():
if key == "name":
names.append(item[key])
if type(item[key]) == list:
names += self.__getNames(item[key])
return names
def __getTypes(self,busConfig): # recursively searches for all "name" and "type" dictionary keys and return their values: {name:type, ...}
names = {}
for item in busConfig:
for key in item.keys():
if key == "name":
names[item[key]] = item["type"]
if type(item[key]) == list:
names = dict(names.items() + self.__getTypes(item[key]).items())
return names
def __getSettings(self,fileName): # returns settings dictionary made of config file
parser = ConfigParser.SafeConfigParser()
try:
parser.read(fileName)
except Exception, e:
sys.exit("Unable to load configuration file. Error: "+str(e))
options = {}
for sectionName in ["Meteostation","I2C_Device","Translation_Into_POST"]:
if not parser.has_section(sectionName):
sys.exit("Unable to find \'%s\' section" % (sectionName))
else:
options[sectionName] = parser.options(sectionName)
requiedOptions = ["username","password","uploadinterval","logpath"]
missingOptions = requiedOptions
missingOptionsString = ""
for requiedOptionID in range(len(requiedOptions)):
for option in options["Meteostation"]:
if option == requiedOptions[requiedOptionID]:
missingOptions[requiedOptionID] = ""
break
for missingOption in missingOptions:
if missingOption != "":
missingOptionsString += "\'"+missingOption+"\', "
if len(missingOptionsString) != 0:
sys.exit("Unable to find %s option(s)." % (missingOptionsString[:len(missingOptionsString)-2]))
possibleOptions = ["username","password","uploadinterval","logpath","stationname","latitude","longitude","altitude"]
settings = {}
try:
for option in possibleOptions:
if parser.has_option("Meteostation",option):
try:
settings[option] = float(parser.get("Meteostation",option))
except ValueError:
settings[option] = parser.get("Meteostation",option)
else:
settings[option] = ""
if not settings["altitude"]:
settings["altitude"] = 0
settings["I2C_configuration"] = [self.__getI2CConfig(parser,"I2C_Device")]
settings["Translation_Into_POST"] = []
for option in options["Translation_Into_POST"]:
if parser.get("Translation_Into_POST",option) == "":
translationListPart = ['',0]
else:
try:
translationListPart = self.__getOptionList(parser.get("Translation_Into_POST",option))
if len(translationListPart) != 2:
print "Strange value set to option \'%s\'. Using default value." % (option)
translationListPart = ['',0]
except:
print "Strange value set to option \'%s\'. Using default value." % (option)
translationListPart = ['',0]
settings["Translation_Into_POST"].append([option,translationListPart[0],int(translationListPart[1])])
except Exception, e:
sys.exit("Bad format of configuration file. Error: "+str(e))
return settings
def __getI2CConfig(self,parser,section): # recursively generates I2C configuration from configuration file
result = {}
for option in parser.options(section):
if option == "children":
children = self.__getOptionList(parser.get(section,option))
result[option] = []
for child in children:
result[option].append(self.__getI2CConfig(parser,child))
elif option == "address":
result[option] = int(parser.get(section,option),base=16)
elif option == "channel":
result[option] = int(parser.get(section,option))
else:
result[option] = parser.get(section,option)
return result
def __getOptionList(self,string):
lastPosition = 0
optionList = []
for letterPos in range(len(string)):
if string[letterPos] == ";":
optionList.append(string[lastPosition:letterPos])
lastPosition = letterPos+1
if lastPosition < len(string):
optionList.append(string[lastPosition:len(string)])
return optionList | gpl-3.0 | -3,879,657,435,161,456,600 | 45.183036 | 195 | 0.563128 | false | 4.340747 | true | false | false |
dimagi/loveseat | tests/test_aggregated_result.py | 1 | 1049 | from __future__ import absolute_import
import unittest
from datetime import timedelta
from loveseat.aggregated_result import AggregatedResult
from loveseat.result import Result
class TestAggregatedResult(unittest.TestCase):
def setUp(self):
self.resultOne = Result(database='a', elapsed=timedelta(0, 0, 2))
self.resultTwo = Result(database='a', elapsed=timedelta(0, 0, 4))
self.resultThree = Result(database='b', elapsed=timedelta(0, 0, 5))
def test_aggregated_result(self):
ag = AggregatedResult('example')
ag.add_results([self.resultOne, self.resultTwo, self.resultThree])
self.assertEqual(ag.results['a']['avg'], 3)
self.assertEqual(ag.results['a']['max'], 4)
self.assertEqual(ag.results['a']['min'], 2)
self.assertEqual(ag.results['a']['count'], 2)
self.assertEqual(ag.results['b']['avg'], 5)
self.assertEqual(ag.results['b']['max'], 5)
self.assertEqual(ag.results['b']['min'], 5)
self.assertEqual(ag.results['b']['count'], 1)
| mit | -1,915,400,627,183,827,700 | 37.851852 | 75 | 0.655863 | false | 3.642361 | false | false | false |
CHBMB/LazyLibrarian | lazylibrarian/__init__.py | 1 | 67424 | # This file is part of Lazylibrarian.
#
# Lazylibrarian is free software':'you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Lazylibrarian is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Lazylibrarian. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import calendar
import json
import locale
import os
import subprocess
import sys
import threading
import time
import webbrowser
import cherrypy
from lazylibrarian import logger, postprocess, searchnzb, searchtorrents, searchrss, \
librarysync, versioncheck, database, searchmag, magazinescan, bookwork, dbupgrade
from lazylibrarian.cache import fetchURL
from lazylibrarian.common import restartJobs
from lazylibrarian.formatter import getList, bookSeries, plural, unaccented
from lib.apscheduler.scheduler import Scheduler
FULL_PATH = None
PROG_DIR = None
ARGS = None
SIGNAL = None
DAEMON = False
PIDFILE = ''
SYS_ENCODING = ''
SCHED = Scheduler()
INIT_LOCK = threading.Lock()
__INITIALIZED__ = False
started = False
GIT_USER = None
GIT_REPO = None
GIT_BRANCH = None
INSTALL_TYPE = None
CURRENT_VERSION = None
LATEST_VERSION = None
COMMITS_BEHIND = None
COMMIT_LIST = None
DATADIR = None
DBFILE = None
UPDATE_MSG = None
CONFIGFILE = ''
CFG = ''
CURRENT_TAB = '1'
LOGDIR = None
CACHEDIR = None
LOGLIST = []
# Info 1, Debug 2, >2 don't toggle console/file
LOGLEVEL = 2
LOGLIMIT = 500
LOGFULL = False # include debug on screen if true
LOGFILES = 10 # 10 log files
LOGSIZE = 204800 # each up to 200K
CFGLOGLEVEL = None
MATCH_RATIO = 80
DLOAD_RATIO = 90
DISPLAYLENGTH = 5
HTTP_HOST = None
HTTP_PORT = 5299
HTTP_USER = None
HTTP_PASS = None
HTTP_PROXY = None
HTTP_ROOT = None
HTTP_LOOK = None
HTTPS_ENABLED = 0
HTTPS_CERT = None
HTTPS_KEY = None
LAUNCH_BROWSER = 0
API_ENABLED = 0
API_KEY = None
PROXY_HOST = None
PROXY_TYPE = None
SAB_HOST = None
SAB_PORT = 0
SAB_SUBDIR = None
SAB_USER = None
SAB_PASS = None
SAB_API = None
SAB_CAT = None
NZBGET_HOST = None
NZBGET_PORT = 0
NZBGET_USER = None
NZBGET_PASS = None
NZBGET_CATEGORY = None
NZBGET_PRIORITY = 0
DESTINATION_COPY = 0
DESTINATION_DIR = None
ALTERNATE_DIR = None
DOWNLOAD_DIR = None
IMP_PREFLANG = None
IMP_MONTHLANG = None
IMP_ONLYISBN = 0
IMP_SINGLEBOOK = 1
IMP_AUTOADD = None
IMP_AUTOSEARCH = 1
IMP_CONVERT = None
IMP_CALIBREDB = None
GIT_PROGRAM = None
BOOK_API = None
GR_API = None
GB_API = None
NZBMATRIX = 0
NZBMATRIX_USER = None
NZBMATRIX_API = None
NEWZBIN = 0
NEWZBIN_UID = None
NEWZBIN_PASS = None
EBOOK_TYPE = None
MAG_TYPE = None
REJECT_WORDS = None
REJECT_MAXSIZE = 0
REJECT_MAGSIZE = 0
MAG_AGE = 31
TOR_DOWNLOADER_BLACKHOLE = 0
TOR_CONVERT_MAGNET = 0
TOR_DOWNLOADER_UTORRENT = 0
TOR_DOWNLOADER_RTORRENT = 0
TOR_DOWNLOADER_QBITTORRENT = 0
TOR_DOWNLOADER_TRANSMISSION = 0
TOR_DOWNLOADER_SYNOLOGY = 0
TOR_DOWNLOADER_DELUGE = 0
NUMBEROFSEEDERS = 10
KEEP_SEEDING = 0
TORRENT_DIR = None
PREFER_MAGNET = 0
RTORRENT_HOST = None
RTORRENT_USER = None
RTORRENT_PASS = None
RTORRENT_LABEL = None
RTORRENT_DIR = None
UTORRENT_HOST = None
UTORRENT_PORT = 0
UTORRENT_USER = None
UTORRENT_PASS = None
UTORRENT_LABEL = None
SYNOLOGY_HOST = None
SYNOLOGY_PORT = 0
SYNOLOGY_USER = None
SYNOLOGY_PASS = None
SYNOLOGY_DIR = None
USE_SYNOLOGY = 0
QBITTORRENT_HOST = None
QBITTORRENT_PORT = 0
QBITTORRENT_USER = None
QBITTORRENT_PASS = None
QBITTORRENT_LABEL = None
TRANSMISSION_HOST = None
TRANSMISSION_PORT = 0
TRANSMISSION_USER = None
TRANSMISSION_PASS = None
DELUGE_PORT = 0
DELUGE_HOST = None
DELUGE_USER = None
DELUGE_PASS = None
DELUGE_LABEL = None
KAT = 0
KAT_HOST = None
TPB = 0
TPB_HOST = None
ZOO = 0
ZOO_HOST = None
EXTRA = 0
EXTRA_HOST = None
TDL = 0
TDL_HOST = None
GEN = 0
GEN_HOST = None
LIME = 0
LIME_HOST = None
NZB_DOWNLOADER_SABNZBD = 0
NZB_DOWNLOADER_NZBGET = 0
NZB_DOWNLOADER_SYNOLOGY = 0
NZB_DOWNLOADER_BLACKHOLE = 0
NZB_BLACKHOLEDIR = None
USENET_RETENTION = 0
VERSIONCHECK_INTERVAL = 24 # Every 2 hours
SEARCH_INTERVAL = 720 # Every 12 hours
SCAN_INTERVAL = 10 # Every 10 minutes
SEARCHRSS_INTERVAL = 20 # Every 20 minutes
FULL_SCAN = 0 # full scan would remove books from db
ADD_AUTHOR = 1 # auto add authors not found in db from goodreads
# value to mark missing books (deleted/removed) in db, can be 'Open', 'Ignored', 'Wanted','Skipped'
NOTFOUND_STATUS = 'Skipped'
# value to mark new books when importing a new author, can be 'Open', 'Ignored', 'Wanted','Skipped'
NEWAUTHOR_STATUS = 'Skipped'
# value to mark new books when rescanning existing author, can be 'Open', 'Ignored', 'Wanted','Skipped'
NEWBOOK_STATUS = 'Skipped'
EBOOK_DEST_FOLDER = None
EBOOK_DEST_FILE = None
MAG_DEST_FOLDER = None
MAG_DEST_FILE = None
MAG_RELATIVE = 1
USE_TWITTER = 0
TWITTER_NOTIFY_ONSNATCH = 0
TWITTER_NOTIFY_ONDOWNLOAD = 0
TWITTER_USERNAME = None
TWITTER_PASSWORD = None
TWITTER_PREFIX = 'LazyLibrarian'
USE_BOXCAR = 0
BOXCAR_TOKEN = None
BOXCAR_NOTIFY_ONSNATCH = 0
BOXCAR_NOTIFY_ONDOWNLOAD = 0
USE_PUSHBULLET = 0
PUSHBULLET_TOKEN = None
PUSHBULLET_DEVICEID = None
PUSHBULLET_NOTIFY_ONSNATCH = 0
PUSHBULLET_NOTIFY_ONDOWNLOAD = 0
USE_PUSHOVER = 0
PUSHOVER_APITOKEN = None
PUSHOVER_KEYS = None
PUSHOVER_DEVICE = None
PUSHOVER_ONSNATCH = 0
PUSHOVER_ONDOWNLOAD = 0
PUSHOVER_PRIORITY = 0
USE_ANDROIDPN = 0
ANDROIDPN_NOTIFY_ONSNATCH = 0
ANDROIDPN_NOTIFY_ONDOWNLOAD = 0
ANDROIDPN_URL = None
ANDROIDPN_BROADCAST = 0
ANDROIDPN_USERNAME = None
USE_NMA = 0
NMA_APIKEY = None
NMA_PRIORITY = 0
NMA_ONSNATCH = None
NMA_ONDOWNLOAD = None
USE_SLACK = 0
SLACK_TOKEN = None
SLACK_NOTIFY_ONSNATCH = 0
SLACK_NOTIFY_ONDOWNLOAD = 0
USE_EMAIL = 0
EMAIL_NOTIFY_ONSNATCH = 0
EMAIL_NOTIFY_ONDOWNLOAD = 0
EMAIL_FROM = None
EMAIL_TO = None
EMAIL_SSL = 0
EMAIL_SMTP_SERVER = None
EMAIL_SMTP_PORT = None
EMAIL_TLS = 0
EMAIL_SMTP_USER = None
EMAIL_SMTP_PASSWORD = None
NEWZNAB_PROV = []
TORZNAB_PROV = []
RSS_PROV = []
# Month names table to hold long/short month names for multiple languages
# which we can match against magazine issues
# Defined as global and initialised early, because locale changes are not thread safe
# This means changes to languages require a restart
MONTH0 = ['en_GB.UTF-8', 'en_GB.UTF-8'] # This holds the language code
MONTH1 = [u'january', u'jan'] # multiple names for first month
MONTH2 = [u'february', u'feb'] # etc...
MONTH3 = [u'march', u'mar']
MONTH4 = [u'april', u'apr']
MONTH5 = [u'may', u'may']
MONTH6 = [u'june', u'jun']
MONTH7 = [u'july', u'jul']
MONTH8 = [u'august', u'aug']
MONTH9 = [u'september', u'sep']
MONTH10 = [u'october', u'oct']
MONTH11 = [u'november', u'nov']
MONTH12 = [u'december', u'dec']
MONTHNAMES = [MONTH0, MONTH1, MONTH2, MONTH3, MONTH4, MONTH5, MONTH6,
MONTH7, MONTH8, MONTH9, MONTH10, MONTH11, MONTH12]
CACHE_HIT = 0
CACHE_MISS = 0
LAST_GOODREADS = 0
LAST_LIBRARYTHING = 0
CACHE_AGE = 30
TASK_AGE = 0
BOOKSTRAP_THEME = ''
BOOKSTRAP_THEMELIST = []
def check_section(sec):
""" Check if INI section exists, if not create it """
if CFG.has_section(sec):
return True
else:
CFG.add_section(sec)
return False
def check_setting_bool(config, cfg_name, item_name, def_val, log=True):
""" Check if option exists and coerce to boolean, if not create it """
try:
my_val = config.getboolean(cfg_name, item_name)
except Exception:
my_val = def_val
check_section(cfg_name)
config.set(cfg_name, item_name, my_val)
if log:
logger.debug(cfg_name + ":" + item_name + " -> " + str(my_val))
return my_val
def check_setting_int(config, cfg_name, item_name, def_val, log=True):
""" Check if option exists and coerce to int, if not create it """
try:
my_val = config.getint(cfg_name, item_name)
except Exception:
my_val = def_val
check_section(cfg_name)
config.set(cfg_name, item_name, my_val)
if log:
logger.debug(cfg_name + ":" + item_name + " -> " + str(my_val))
return my_val
def check_setting_str(config, cfg_name, item_name, def_val, log=True):
""" Check if option exists and coerce to string, if not create it """
try:
my_val = config.get(cfg_name, item_name)
# Old config file format had strings in quotes. ConfigParser doesn't.
if my_val.startswith('"'):
my_val = my_val[1:]
if my_val.endswith('"'):
my_val = my_val[:-1]
except Exception:
my_val = def_val
check_section(cfg_name)
config.set(cfg_name, item_name, my_val)
if log:
logger.debug(cfg_name + ":" + item_name + " -> " + my_val)
return my_val.decode(SYS_ENCODING)
def initialize():
with INIT_LOCK:
global __INITIALIZED__, LOGDIR, LOGLIMIT, LOGFILES, LOGSIZE, CFG, CFGLOGLEVEL, LOGLEVEL, \
LOGFULL, CACHEDIR, DATADIR, LAST_LIBRARYTHING, LAST_GOODREADS, \
IMP_MONTHLANG, BOOKSTRAP_THEMELIST, CURRENT_TAB, UPDATE_MSG
if __INITIALIZED__:
return False
check_section('General')
LOGDIR = check_setting_str(CFG, 'General', 'logdir', '')
LOGLIMIT = check_setting_int(CFG, 'General', 'loglimit', 500)
LOGFILES = check_setting_int(CFG, 'General', 'logfiles', 10)
LOGSIZE = check_setting_int(CFG, 'General', 'logsize', 204800)
if not LOGDIR:
LOGDIR = os.path.join(DATADIR, 'Logs')
# Create logdir
if not os.path.exists(LOGDIR):
try:
os.makedirs(LOGDIR)
except OSError as e:
if LOGLEVEL:
print '%s : Unable to create folder for logs: %s. Only logging to console.' % (LOGDIR, str(e))
# Start the logger, silence console logging if we need to
CFGLOGLEVEL = check_setting_int(CFG, 'General', 'loglevel', 9)
if LOGLEVEL == 1: # default if no debug or quiet on cmdline
if CFGLOGLEVEL == 9: # default value if none in config
LOGLEVEL = 2 # If not set in Config or cmdline, then lets set to DEBUG
else:
LOGLEVEL = CFGLOGLEVEL # Config setting picked up
logger.lazylibrarian_log.initLogger(loglevel=LOGLEVEL)
logger.info("Log level set to [%s]- Log Directory is [%s] - Config level is [%s]" % (
LOGLEVEL, LOGDIR, CFGLOGLEVEL))
if LOGLEVEL > 2:
LOGFULL = True
logger.info("Screen Log set to DEBUG")
else:
LOGFULL = False
logger.info("Screen Log set to INFO/WARN/ERROR")
config_read()
logger.info('SYS_ENCODING is %s' % SYS_ENCODING)
# Put the cache dir in the data dir for now
CACHEDIR = os.path.join(DATADIR, 'cache')
if not os.path.exists(CACHEDIR):
try:
os.makedirs(CACHEDIR)
except OSError:
logger.error('Could not create cachedir. Check permissions of: ' + DATADIR)
# keep track of last api calls so we don't call more than once per second
# to respect api terms, but don't wait un-necessarily either
time_now = int(time.time())
LAST_LIBRARYTHING = time_now
LAST_GOODREADS = time_now
# Initialize the database
try:
curr_ver = db_needs_upgrade()
if curr_ver:
threading.Thread(target=dbupgrade.dbupgrade, name="DB_UPGRADE", args=[curr_ver]).start()
else:
myDB = database.DBConnection()
result = myDB.match('PRAGMA user_version')
if result:
version = result[0]
else:
version = 0
logger.info("Database is version %s" % version)
except Exception as e:
logger.error("Can't connect to the database: %s" % str(e))
build_monthtable()
BOOKSTRAP_THEMELIST = build_bookstrap_themes()
__INITIALIZED__ = True
return True
def config_read(reloaded=False):
global FULL_PATH, PROG_DIR, DAEMON, DISPLAYLENGTH, \
HTTP_HOST, HTTP_PORT, HTTP_USER, HTTP_PASS, HTTP_PROXY, HTTP_ROOT, HTTP_LOOK, API_KEY, API_ENABLED, \
LAUNCH_BROWSER, LOGDIR, CACHE_AGE, MATCH_RATIO, DLOAD_RATIO, PROXY_HOST, PROXY_TYPE, GIT_PROGRAM, \
IMP_ONLYISBN, IMP_SINGLEBOOK, IMP_PREFLANG, IMP_MONTHLANG, IMP_AUTOADD, IMP_CONVERT, IMP_CALIBREDB, \
IMP_AUTOSEARCH, MONTHNAMES, MONTH0, MONTH1, MONTH2, MONTH3, MONTH4, MONTH5, MONTH6, MONTH7, \
MONTH8, MONTH9, MONTH10, MONTH11, MONTH12, CONFIGFILE, CFG, LOGLIMIT, TASK_AGE, \
SAB_HOST, SAB_PORT, SAB_SUBDIR, SAB_API, SAB_USER, SAB_PASS, SAB_CAT, \
DESTINATION_DIR, DESTINATION_COPY, DOWNLOAD_DIR, USENET_RETENTION, NZB_BLACKHOLEDIR, \
ALTERNATE_DIR, GR_API, GB_API, BOOK_API, \
NZBGET_HOST, NZBGET_USER, NZBGET_PASS, NZBGET_CATEGORY, NZBGET_PRIORITY, \
NZBGET_PORT, NZB_DOWNLOADER_NZBGET, NZBMATRIX, NZBMATRIX_USER, NZBMATRIX_API, \
NEWZBIN, NEWZBIN_UID, NEWZBIN_PASS, EBOOK_TYPE, MAG_TYPE, \
KAT, KAT_HOST, TPB, TPB_HOST, ZOO, ZOO_HOST, TDL, TDL_HOST, GEN, GEN_HOST, EXTRA, EXTRA_HOST, \
LIME, LIME_HOST, NEWZNAB_PROV, TORZNAB_PROV, RSS_PROV, REJECT_WORDS, REJECT_MAXSIZE, REJECT_MAGSIZE, \
VERSIONCHECK_INTERVAL, SEARCH_INTERVAL, SCAN_INTERVAL, SEARCHRSS_INTERVAL, MAG_AGE, \
EBOOK_DEST_FOLDER, EBOOK_DEST_FILE, MAG_RELATIVE, MAG_DEST_FOLDER, MAG_DEST_FILE, \
USE_TWITTER, TWITTER_NOTIFY_ONSNATCH, TWITTER_NOTIFY_ONDOWNLOAD, \
TWITTER_USERNAME, TWITTER_PASSWORD, TWITTER_PREFIX, TOR_CONVERT_MAGNET, \
USE_BOXCAR, BOXCAR_NOTIFY_ONSNATCH, BOXCAR_NOTIFY_ONDOWNLOAD, BOXCAR_TOKEN, \
TORRENT_DIR, TOR_DOWNLOADER_BLACKHOLE, TOR_DOWNLOADER_UTORRENT, TOR_DOWNLOADER_RTORRENT, \
TOR_DOWNLOADER_QBITTORRENT, NZB_DOWNLOADER_SABNZBD, NZB_DOWNLOADER_SYNOLOGY, NZB_DOWNLOADER_BLACKHOLE, \
SYNOLOGY_DIR, USE_SYNOLOGY, USE_PUSHBULLET, PUSHBULLET_NOTIFY_ONSNATCH, PUSHBULLET_NOTIFY_ONDOWNLOAD, \
PUSHBULLET_TOKEN, PUSHBULLET_DEVICEID, RTORRENT_HOST, RTORRENT_USER, RTORRENT_PASS, RTORRENT_DIR, \
RTORRENT_LABEL, UTORRENT_HOST, UTORRENT_PORT, UTORRENT_USER, UTORRENT_PASS, UTORRENT_LABEL, \
QBITTORRENT_HOST, QBITTORRENT_PORT, QBITTORRENT_USER, QBITTORRENT_PASS, QBITTORRENT_LABEL, \
SYNOLOGY_PORT, SYNOLOGY_HOST, SYNOLOGY_USER, SYNOLOGY_PASS, USE_PUSHOVER, PUSHOVER_ONSNATCH, \
PUSHOVER_KEYS, PUSHOVER_APITOKEN, PUSHOVER_PRIORITY, PUSHOVER_ONDOWNLOAD, PUSHOVER_DEVICE, \
USE_ANDROIDPN, ANDROIDPN_NOTIFY_ONSNATCH, ANDROIDPN_NOTIFY_ONDOWNLOAD, \
ANDROIDPN_URL, ANDROIDPN_USERNAME, ANDROIDPN_BROADCAST, \
USE_SLACK, SLACK_NOTIFY_ONSNATCH, SLACK_NOTIFY_ONDOWNLOAD, SLACK_TOKEN, \
USE_EMAIL, EMAIL_NOTIFY_ONSNATCH, EMAIL_NOTIFY_ONDOWNLOAD, EMAIL_FROM, EMAIL_TO, \
EMAIL_SSL, EMAIL_SMTP_SERVER, EMAIL_SMTP_PORT, EMAIL_TLS, EMAIL_SMTP_USER, EMAIL_SMTP_PASSWORD, \
TOR_DOWNLOADER_TRANSMISSION, TRANSMISSION_HOST, TRANSMISSION_PORT, TRANSMISSION_PASS, TRANSMISSION_USER, \
TOR_DOWNLOADER_SYNOLOGY, TOR_DOWNLOADER_DELUGE, DELUGE_HOST, DELUGE_USER, DELUGE_PASS, DELUGE_PORT, \
DELUGE_LABEL, FULL_SCAN, ADD_AUTHOR, NOTFOUND_STATUS, NEWBOOK_STATUS, NEWAUTHOR_STATUS, \
USE_NMA, NMA_APIKEY, NMA_PRIORITY, NMA_ONSNATCH, NMA_ONDOWNLOAD, \
GIT_USER, GIT_REPO, GIT_BRANCH, INSTALL_TYPE, CURRENT_VERSION, COMMIT_LIST, PREFER_MAGNET, \
LATEST_VERSION, COMMITS_BEHIND, NUMBEROFSEEDERS, KEEP_SEEDING, SCHED, CACHE_HIT, CACHE_MISS, \
BOOKSTRAP_THEME, LOGFILES, LOGSIZE, HTTPS_ENABLED, HTTPS_CERT, HTTPS_KEY
NEWZNAB_PROV = []
TORZNAB_PROV = []
RSS_PROV = []
# we read the log details earlier for starting the logger process,
# but read them again here so they get listed in the debug log
LOGDIR = check_setting_str(CFG, 'General', 'logdir', '')
LOGLIMIT = check_setting_int(CFG, 'General', 'loglimit', 500)
LOGFILES = check_setting_int(CFG, 'General', 'logfiles', 10)
LOGSIZE = check_setting_int(CFG, 'General', 'logsize', 204800)
HTTP_PORT = check_setting_int(CFG, 'General', 'http_port', 5299)
if HTTP_PORT < 21 or HTTP_PORT > 65535:
HTTP_PORT = 5299
MATCH_RATIO = check_setting_int(CFG, 'General', 'match_ratio', 80)
DLOAD_RATIO = check_setting_int(CFG, 'General', 'dload_ratio', 90)
DISPLAYLENGTH = check_setting_int(CFG, 'General', 'displaylength', 10)
HTTP_HOST = check_setting_str(CFG, 'General', 'http_host', '0.0.0.0')
HTTP_USER = check_setting_str(CFG, 'General', 'http_user', '')
HTTP_PASS = check_setting_str(CFG, 'General', 'http_pass', '')
HTTP_PROXY = check_setting_bool(CFG, 'General', 'http_proxy', 0)
HTTP_ROOT = check_setting_str(CFG, 'General', 'http_root', '')
HTTP_LOOK = check_setting_str(CFG, 'General', 'http_look', 'default')
HTTPS_ENABLED = check_setting_bool(CFG, 'General', 'https_enabled', 0)
HTTPS_CERT = check_setting_str(CFG, 'General', 'https_cert', '')
HTTPS_KEY = check_setting_str(CFG, 'General', 'https_key', '')
BOOKSTRAP_THEME = check_setting_str(CFG, 'General', 'bookstrap_theme', 'slate')
LAUNCH_BROWSER = check_setting_bool(CFG, 'General', 'launch_browser', 1)
API_ENABLED = check_setting_bool(CFG, 'General', 'api_enabled', 0)
API_KEY = check_setting_str(CFG, 'General', 'api_key', '')
PROXY_HOST = check_setting_str(CFG, 'General', 'proxy_host', '')
PROXY_TYPE = check_setting_str(CFG, 'General', 'proxy_type', '')
IMP_PREFLANG = check_setting_str(CFG, 'General', 'imp_preflang', 'en, eng, en-US, en-GB')
IMP_MONTHLANG = check_setting_str(CFG, 'General', 'imp_monthlang', '')
IMP_AUTOADD = check_setting_str(CFG, 'General', 'imp_autoadd', '')
IMP_AUTOSEARCH = check_setting_bool(CFG, 'General', 'imp_autosearch', 0)
IMP_CALIBREDB = check_setting_str(CFG, 'General', 'imp_calibredb', '')
IMP_ONLYISBN = check_setting_bool(CFG, 'General', 'imp_onlyisbn', 0)
IMP_SINGLEBOOK = check_setting_bool(CFG, 'General', 'imp_singlebook', 0)
IMP_CONVERT = check_setting_str(CFG, 'General', 'imp_convert', '')
GIT_PROGRAM = check_setting_str(CFG, 'General', 'git_program', '')
CACHE_AGE = check_setting_int(CFG, 'General', 'cache_age', 30)
TASK_AGE = check_setting_int(CFG, 'General', 'task_age', 0)
GIT_USER = check_setting_str(CFG, 'Git', 'git_user', 'dobytang')
GIT_REPO = check_setting_str(CFG, 'Git', 'git_repo', 'lazylibrarian')
GIT_BRANCH = check_setting_str(CFG, 'Git', 'git_branch', 'master')
INSTALL_TYPE = check_setting_str(CFG, 'Git', 'install_type', '')
CURRENT_VERSION = check_setting_str(CFG, 'Git', 'current_version', '')
LATEST_VERSION = check_setting_str(CFG, 'Git', 'latest_version', '')
COMMITS_BEHIND = check_setting_str(CFG, 'Git', 'commits_behind', '')
SAB_HOST = check_setting_str(CFG, 'SABnzbd', 'sab_host', '')
SAB_PORT = check_setting_int(CFG, 'SABnzbd', 'sab_port', 0)
SAB_SUBDIR = check_setting_str(CFG, 'SABnzbd', 'sab_subdir', '')
SAB_USER = check_setting_str(CFG, 'SABnzbd', 'sab_user', '')
SAB_PASS = check_setting_str(CFG, 'SABnzbd', 'sab_pass', '')
SAB_API = check_setting_str(CFG, 'SABnzbd', 'sab_api', '')
SAB_CAT = check_setting_str(CFG, 'SABnzbd', 'sab_cat', '')
# legacy name conversion, separate out nzbget host/port
if not CFG.has_option('NZBGet', 'nzbget_port'):
port = 0
host = check_setting_str(CFG, 'NZBGet', 'nzbget_host', '')
if host.startswith('http'):
hostpart = 2
else:
hostpart = 1
words = host.split(':')
if len(words) > hostpart:
host = ':'.join(words[:hostpart])
port = ':'.join(words[hostpart:])
CFG.set('NZBGet', 'nzbget_port', port)
CFG.set('NZBGet', 'nzbget_host', host)
NZBGET_HOST = check_setting_str(CFG, 'NZBGet', 'nzbget_host', '')
NZBGET_PORT = check_setting_int(CFG, 'NZBGet', 'nzbget_port', '0')
NZBGET_USER = check_setting_str(CFG, 'NZBGet', 'nzbget_user', '')
NZBGET_PASS = check_setting_str(CFG, 'NZBGet', 'nzbget_pass', '')
NZBGET_CATEGORY = check_setting_str(CFG, 'NZBGet', 'nzbget_cat', '')
NZBGET_PRIORITY = check_setting_int(CFG, 'NZBGet', 'nzbget_priority', '0')
DESTINATION_COPY = check_setting_bool(CFG, 'General', 'destination_copy', 0)
DESTINATION_DIR = check_setting_str(CFG, 'General', 'destination_dir', '')
ALTERNATE_DIR = check_setting_str(CFG, 'General', 'alternate_dir', '')
DOWNLOAD_DIR = check_setting_str(CFG, 'General', 'download_dir', '')
NZB_DOWNLOADER_SABNZBD = check_setting_bool(CFG, 'USENET', 'nzb_downloader_sabnzbd', 0)
NZB_DOWNLOADER_NZBGET = check_setting_bool(CFG, 'USENET', 'nzb_downloader_nzbget', 0)
NZB_DOWNLOADER_SYNOLOGY = check_setting_bool(CFG, 'USENET', 'nzb_downloader_synology', 0)
NZB_DOWNLOADER_BLACKHOLE = check_setting_bool(CFG, 'USENET', 'nzb_downloader_blackhole', 0)
NZB_BLACKHOLEDIR = check_setting_str(CFG, 'USENET', 'nzb_blackholedir', '')
USENET_RETENTION = check_setting_int(CFG, 'USENET', 'usenet_retention', 0)
NZBMATRIX = check_setting_bool(CFG, 'NZBMatrix', 'nzbmatrix', 0)
NZBMATRIX_USER = check_setting_str(CFG, 'NZBMatrix', 'nzbmatrix_user', '')
NZBMATRIX_API = check_setting_str(CFG, 'NZBMatrix', 'nzbmatrix_api', '')
count = 0
while CFG.has_section('Newznab%i' % count):
newz_name = 'Newznab%i' % count
# legacy name conversions
if CFG.has_option(newz_name, 'newznab%i' % count):
CFG.set(newz_name, 'ENABLED', CFG.getboolean(newz_name, 'newznab%i' % count))
CFG.remove_option(newz_name, 'newznab%i' % count)
if CFG.has_option(newz_name, 'newznab_host%i' % count):
CFG.set(newz_name, 'HOST', CFG.get(newz_name, 'newznab_host%i' % count))
CFG.remove_option(newz_name, 'newznab_host%i' % count)
if CFG.has_option(newz_name, 'newznab_api%i' % count):
CFG.set(newz_name, 'API', CFG.get(newz_name, 'newznab_api%i' % count))
CFG.remove_option(newz_name, 'newznab_api%i' % count)
if CFG.has_option(newz_name, 'nzedb'):
CFG.remove_option(newz_name, 'nzedb')
NEWZNAB_PROV.append({"NAME": newz_name,
"ENABLED": check_setting_bool(CFG, newz_name, 'enabled', 0),
"HOST": check_setting_str(CFG, newz_name, 'host', ''),
"API": check_setting_str(CFG, newz_name, 'api', ''),
"GENERALSEARCH": check_setting_str(CFG, newz_name, 'generalsearch', 'search'),
"BOOKSEARCH": check_setting_str(CFG, newz_name, 'booksearch', 'book'),
"MAGSEARCH": check_setting_str(CFG, newz_name, 'magsearch', ''),
"BOOKCAT": check_setting_str(CFG, newz_name, 'bookcat', '7000,7020'),
"MAGCAT": check_setting_str(CFG, newz_name, 'magcat', '7010'),
"EXTENDED": check_setting_str(CFG, newz_name, 'extended', '1'),
"UPDATED": check_setting_str(CFG, newz_name, 'updated', ''),
"MANUAL": check_setting_bool(CFG, newz_name, 'manual', 0)
})
count += 1
# if the last slot is full, add an empty one on the end
add_newz_slot()
count = 0
while CFG.has_section('Torznab%i' % count):
torz_name = 'Torznab%i' % count
# legacy name conversions
if CFG.has_option(torz_name, 'torznab%i' % count):
CFG.set(torz_name, 'ENABLED', CFG.getboolean(torz_name, 'torznab%i' % count))
CFG.remove_option(torz_name, 'torznab%i' % count)
if CFG.has_option(torz_name, 'torznab_host%i' % count):
CFG.set(torz_name, 'HOST', CFG.get(torz_name, 'torznab_host%i' % count))
CFG.remove_option(torz_name, 'torznab_host%i' % count)
if CFG.has_option(torz_name, 'torznab_api%i' % count):
CFG.set(torz_name, 'API', CFG.get(torz_name, 'torznab_api%i' % count))
CFG.remove_option(torz_name, 'torznab_api%i' % count)
if CFG.has_option(torz_name, 'nzedb'):
CFG.remove_option(torz_name, 'nzedb')
TORZNAB_PROV.append({"NAME": torz_name,
"ENABLED": check_setting_bool(CFG, torz_name, 'enabled', 0),
"HOST": check_setting_str(CFG, torz_name, 'host', ''),
"API": check_setting_str(CFG, torz_name, 'api', ''),
"GENERALSEARCH": check_setting_str(CFG, torz_name, 'generalsearch', 'search'),
"BOOKSEARCH": check_setting_str(CFG, torz_name, 'booksearch', 'book'),
"MAGSEARCH": check_setting_str(CFG, torz_name, 'magsearch', ''),
"BOOKCAT": check_setting_str(CFG, torz_name, 'bookcat', '8000,8010'),
"MAGCAT": check_setting_str(CFG, torz_name, 'magcat', '8030'),
"EXTENDED": check_setting_str(CFG, torz_name, 'extended', '1'),
"UPDATED": check_setting_str(CFG, torz_name, 'updated', ''),
"MANUAL": check_setting_bool(CFG, torz_name, 'manual', 0)
})
count += 1
# if the last slot is full, add an empty one on the end
add_torz_slot()
count = 0
while CFG.has_section('RSS_%i' % count):
rss_name = 'RSS_%i' % count
# legacy name conversions
if CFG.has_option(rss_name, 'rss%i' % count):
CFG.set(rss_name, 'ENABLED', CFG.getboolean(rss_name, 'rss%i' % count))
CFG.remove_option(rss_name, 'rss%i' % count)
if CFG.has_option(rss_name, 'rss_host%i' % count):
CFG.set(rss_name, 'HOST', CFG.get(rss_name, 'rss_host%i' % count))
CFG.remove_option(rss_name, 'rss_host%i' % count)
if CFG.has_option(rss_name, 'rss_user%i' % count):
# CFG.set(rss_name, 'USER', CFG.get(rss_name, 'rss_user%i' % count))
CFG.remove_option(rss_name, 'rss_user%i' % count)
if CFG.has_option(rss_name, 'rss_pass%i' % count):
# CFG.set(rss_name, 'PASS', CFG.get(rss_name, 'rss_pass%i' % count))
CFG.remove_option(rss_name, 'rss_pass%i' % count)
if CFG.has_option(rss_name, 'PASS'):
CFG.remove_option(rss_name, 'PASS')
if CFG.has_option(rss_name, 'USER'):
CFG.remove_option(rss_name, 'USER')
RSS_PROV.append({"NAME": rss_name,
"ENABLED": check_setting_bool(CFG, rss_name, 'ENABLED', 0),
"HOST": check_setting_str(CFG, rss_name, 'HOST', '')
})
count += 1
# if the last slot is full, add an empty one on the end
add_rss_slot()
TOR_DOWNLOADER_BLACKHOLE = check_setting_bool(CFG, 'TORRENT', 'tor_downloader_blackhole', 0)
TOR_CONVERT_MAGNET = check_setting_bool(CFG, 'TORRENT', 'tor_convert_magnet', 0)
TOR_DOWNLOADER_UTORRENT = check_setting_bool(CFG, 'TORRENT', 'tor_downloader_utorrent', 0)
TOR_DOWNLOADER_RTORRENT = check_setting_bool(CFG, 'TORRENT', 'tor_downloader_rtorrent', 0)
TOR_DOWNLOADER_QBITTORRENT = check_setting_bool(CFG, 'TORRENT', 'tor_downloader_qbittorrent', 0)
TOR_DOWNLOADER_TRANSMISSION = check_setting_bool(CFG, 'TORRENT', 'tor_downloader_transmission', 0)
TOR_DOWNLOADER_SYNOLOGY = check_setting_bool(CFG, 'TORRENT', 'tor_downloader_synology', 0)
TOR_DOWNLOADER_DELUGE = check_setting_bool(CFG, 'TORRENT', 'tor_downloader_deluge', 0)
NUMBEROFSEEDERS = check_setting_int(CFG, 'TORRENT', 'numberofseeders', 10)
TOR_DOWNLOADER_DELUGE = check_setting_bool(CFG, 'TORRENT', 'tor_downloader_deluge', 0)
KEEP_SEEDING = check_setting_bool(CFG, 'TORRENT', 'keep_seeding', 1)
PREFER_MAGNET = check_setting_bool(CFG, 'TORRENT', 'prefer_magnet', 1)
TORRENT_DIR = check_setting_str(CFG, 'TORRENT', 'torrent_dir', '')
RTORRENT_HOST = check_setting_str(CFG, 'RTORRENT', 'rtorrent_host', '')
RTORRENT_USER = check_setting_str(CFG, 'RTORRENT', 'rtorrent_user', '')
RTORRENT_PASS = check_setting_str(CFG, 'RTORRENT', 'rtorrent_pass', '')
RTORRENT_LABEL = check_setting_str(CFG, 'RTORRENT', 'rtorrent_label', '')
RTORRENT_DIR = check_setting_str(CFG, 'RTORRENT', 'rtorrent_dir', '')
# legacy name conversion, separate out utorrent host/port
if not CFG.has_option('UTORRENT', 'utorrent_port'):
port = 0
host = check_setting_str(CFG, 'UTORRENT', 'utorrent_host', '')
if host.startswith('http'):
hostpart = 2
else:
hostpart = 1
words = host.split(':')
if len(words) > hostpart:
host = ':'.join(words[:hostpart])
port = ':'.join(words[hostpart:])
CFG.set('UTORRENT', 'utorrent_port', port)
CFG.set('UTORRENT', 'utorrent_host', host)
UTORRENT_HOST = check_setting_str(CFG, 'UTORRENT', 'utorrent_host', '')
UTORRENT_PORT = check_setting_int(CFG, 'UTORRENT', 'utorrent_port', 0)
UTORRENT_USER = check_setting_str(CFG, 'UTORRENT', 'utorrent_user', '')
UTORRENT_PASS = check_setting_str(CFG, 'UTORRENT', 'utorrent_pass', '')
UTORRENT_LABEL = check_setting_str(CFG, 'UTORRENT', 'utorrent_label', '')
# legacy name conversion, separate out qbittorrent host/port
if not CFG.has_option('QBITTORRENT', 'qbittorrent_port'):
port = 0
host = check_setting_str(CFG, 'QBITTORRENT', 'qbittorrent_host', '')
if host.startswith('http'):
hostpart = 2
else:
hostpart = 1
words = host.split(':')
if len(words) > hostpart:
host = ':'.join(words[:hostpart])
port = ':'.join(words[hostpart:])
CFG.set('QBITTORRENT', 'qbittorrent_port', port)
CFG.set('QBITTORRENT', 'qbittorrent_host', host)
QBITTORRENT_HOST = check_setting_str(CFG, 'QBITTORRENT', 'qbittorrent_host', '')
QBITTORRENT_PORT = check_setting_int(CFG, 'QBITTORRENT', 'qbittorrent_port', 0)
QBITTORRENT_USER = check_setting_str(CFG, 'QBITTORRENT', 'qbittorrent_user', '')
QBITTORRENT_PASS = check_setting_str(CFG, 'QBITTORRENT', 'qbittorrent_pass', '')
QBITTORRENT_LABEL = check_setting_str(CFG, 'QBITTORRENT', 'qbittorrent_label', '')
# legacy name conversion, separate out transmission host/port
if not CFG.has_option('TRANSMISSION', 'transmission_port'):
port = 0
host = check_setting_str(CFG, 'TRANSMISSION', 'transmission_host', '')
if host.startswith('http'):
hostpart = 2
else:
hostpart = 1
words = host.split(':')
if len(words) > hostpart:
host = ':'.join(words[:hostpart])
port = ':'.join(words[hostpart:])
CFG.set('TRANSMISSION', 'transmission_port', port)
CFG.set('TRANSMISSION', 'transmission_host', host)
TRANSMISSION_HOST = check_setting_str(CFG, 'TRANSMISSION', 'transmission_host', '')
TRANSMISSION_PORT = check_setting_int(CFG, 'TRANSMISSION', 'transmission_port', 0)
TRANSMISSION_USER = check_setting_str(CFG, 'TRANSMISSION', 'transmission_user', '')
TRANSMISSION_PASS = check_setting_str(CFG, 'TRANSMISSION', 'transmission_pass', '')
DELUGE_HOST = check_setting_str(CFG, 'DELUGE', 'deluge_host', '')
DELUGE_PORT = check_setting_int(CFG, 'DELUGE', 'deluge_port', 0)
DELUGE_USER = check_setting_str(CFG, 'DELUGE', 'deluge_user', '')
DELUGE_PASS = check_setting_str(CFG, 'DELUGE', 'deluge_pass', '')
DELUGE_LABEL = check_setting_str(CFG, 'DELUGE', 'deluge_label', '')
SYNOLOGY_HOST = check_setting_str(CFG, 'SYNOLOGY', 'synology_host', '')
SYNOLOGY_PORT = check_setting_int(CFG, 'SYNOLOGY', 'synology_port', 0)
SYNOLOGY_USER = check_setting_str(CFG, 'SYNOLOGY', 'synology_user', '')
SYNOLOGY_PASS = check_setting_str(CFG, 'SYNOLOGY', 'synology_pass', '')
SYNOLOGY_DIR = check_setting_str(CFG, 'SYNOLOGY', 'synology_dir', 'Multimedia/Download')
USE_SYNOLOGY = check_setting_bool(CFG, 'SYNOLOGY', 'use_synology', 0)
KAT = check_setting_bool(CFG, 'KAT', 'kat', 0)
KAT_HOST = check_setting_str(CFG, 'KAT', 'kat_host', 'kickass.cd')
TPB = check_setting_bool(CFG, 'TPB', 'tpb', 0)
TPB_HOST = check_setting_str(CFG, 'TPB', 'tpb_host', 'https://piratebays.co')
ZOO = check_setting_bool(CFG, 'ZOO', 'zoo', 0)
ZOO_HOST = check_setting_str(CFG, 'ZOO', 'zoo_host', 'https://zooqle.com')
EXTRA = check_setting_bool(CFG, 'EXTRA', 'extra', 0)
EXTRA_HOST = check_setting_str(CFG, 'EXTRA', 'extra_host', 'extratorrent.cc')
TDL = check_setting_bool(CFG, 'TDL', 'tdl', 0)
TDL_HOST = check_setting_str(CFG, 'TDL', 'tdl_host', 'torrentdownloads.me')
GEN = check_setting_bool(CFG, 'GEN', 'gen', 0)
GEN_HOST = check_setting_str(CFG, 'GEN', 'gen_host', 'libgen.io')
LIME = check_setting_bool(CFG, 'LIME', 'lime', 0)
LIME_HOST = check_setting_str(CFG, 'LIME', 'lime_host', 'https://www.limetorrents.cc')
NEWZBIN = check_setting_bool(CFG, 'Newzbin', 'newzbin', 0)
NEWZBIN_UID = check_setting_str(CFG, 'Newzbin', 'newzbin_uid', '')
NEWZBIN_PASS = check_setting_str(CFG, 'Newzbin', 'newzbin_pass', '')
EBOOK_TYPE = check_setting_str(CFG, 'General', 'ebook_type', 'epub, mobi, pdf')
EBOOK_TYPE = EBOOK_TYPE.lower() # to make extension matching easier
MAG_TYPE = check_setting_str(CFG, 'General', 'mag_type', 'pdf')
MAG_TYPE = MAG_TYPE.lower() # to make extension matching easier
REJECT_WORDS = check_setting_str(CFG, 'General', 'reject_words', 'audiobook, mp3')
REJECT_WORDS = REJECT_WORDS.lower()
REJECT_MAXSIZE = check_setting_int(CFG, 'General', 'reject_maxsize', 0)
REJECT_MAGSIZE = check_setting_int(CFG, 'General', 'reject_magsize', 0)
MAG_AGE = check_setting_int(CFG, 'General', 'mag_age', 31)
SEARCH_INTERVAL = check_setting_int(CFG, 'SearchScan', 'search_interval', '360')
SCAN_INTERVAL = check_setting_int(CFG, 'SearchScan', 'scan_interval', '10')
SEARCHRSS_INTERVAL = check_setting_int(CFG, 'SearchScan', 'searchrss_interval', '20')
VERSIONCHECK_INTERVAL = check_setting_int(CFG, 'SearchScan', 'versioncheck_interval', '24')
FULL_SCAN = check_setting_bool(CFG, 'LibraryScan', 'full_scan', 0)
ADD_AUTHOR = check_setting_bool(CFG, 'LibraryScan', 'add_author', 1)
NOTFOUND_STATUS = check_setting_str(CFG, 'LibraryScan', 'notfound_status', 'Skipped')
NEWBOOK_STATUS = check_setting_str(CFG, 'LibraryScan', 'newbook_status', 'Skipped')
NEWAUTHOR_STATUS = check_setting_str(CFG, 'LibraryScan', 'newauthor_status', 'Skipped')
EBOOK_DEST_FOLDER = check_setting_str(CFG, 'PostProcess', 'ebook_dest_folder', '$Author/$Title')
EBOOK_DEST_FILE = check_setting_str(CFG, 'PostProcess', 'ebook_dest_file', '$Title - $Author')
MAG_DEST_FOLDER = check_setting_str(CFG, 'PostProcess', 'mag_dest_folder', '_Magazines/$Title/$IssueDate')
MAG_DEST_FILE = check_setting_str(CFG, 'PostProcess', 'mag_dest_file', '$IssueDate - $Title')
MAG_RELATIVE = check_setting_bool(CFG, 'PostProcess', 'mag_relative', 1)
USE_TWITTER = check_setting_bool(CFG, 'Twitter', 'use_twitter', 0)
TWITTER_NOTIFY_ONSNATCH = check_setting_bool(CFG, 'Twitter', 'twitter_notify_onsnatch', 0)
TWITTER_NOTIFY_ONDOWNLOAD = check_setting_bool(CFG, 'Twitter', 'twitter_notify_ondownload', 0)
TWITTER_USERNAME = check_setting_str(CFG, 'Twitter', 'twitter_username', '')
TWITTER_PASSWORD = check_setting_str(CFG, 'Twitter', 'twitter_password', '')
TWITTER_PREFIX = check_setting_str(CFG, 'Twitter', 'twitter_prefix', 'LazyLibrarian')
USE_BOXCAR = check_setting_bool(CFG, 'Boxcar', 'use_boxcar', 0)
BOXCAR_NOTIFY_ONSNATCH = check_setting_bool(CFG, 'Boxcar', 'boxcar_notify_onsnatch', 0)
BOXCAR_NOTIFY_ONDOWNLOAD = check_setting_bool(CFG, 'Boxcar', 'boxcar_notify_ondownload', 0)
BOXCAR_TOKEN = check_setting_str(CFG, 'Boxcar', 'boxcar_token', '')
USE_PUSHBULLET = check_setting_bool(CFG, 'Pushbullet', 'use_pushbullet', 0)
PUSHBULLET_NOTIFY_ONSNATCH = check_setting_bool(CFG, 'Pushbullet', 'pushbullet_notify_onsnatch', 0)
PUSHBULLET_NOTIFY_ONDOWNLOAD = check_setting_bool(CFG, 'Pushbullet', 'pushbullet_notify_ondownload', 0)
PUSHBULLET_TOKEN = check_setting_str(CFG, 'Pushbullet', 'pushbullet_token', '')
PUSHBULLET_DEVICEID = check_setting_str(CFG, 'Pushbullet', 'pushbullet_deviceid', '')
USE_PUSHOVER = check_setting_bool(CFG, 'Pushover', 'use_pushover', 0)
PUSHOVER_ONSNATCH = check_setting_bool(CFG, 'Pushover', 'pushover_onsnatch', 0)
PUSHOVER_ONDOWNLOAD = check_setting_bool(CFG, 'Pushover', 'pushover_ondownload', 0)
PUSHOVER_KEYS = check_setting_str(CFG, 'Pushover', 'pushover_keys', '')
PUSHOVER_APITOKEN = check_setting_str(CFG, 'Pushover', 'pushover_apitoken', '')
PUSHOVER_PRIORITY = check_setting_int(CFG, 'Pushover', 'pushover_priority', 0)
PUSHOVER_DEVICE = check_setting_str(CFG, 'Pushover', 'pushover_device', '')
USE_ANDROIDPN = check_setting_bool(CFG, 'AndroidPN', 'use_androidpn', 0)
ANDROIDPN_NOTIFY_ONSNATCH = check_setting_bool(CFG, 'AndroidPN', 'androidpn_notify_onsnatch', 0)
ANDROIDPN_NOTIFY_ONDOWNLOAD = check_setting_bool(CFG, 'AndroidPN', 'androidpn_notify_ondownload', 0)
ANDROIDPN_URL = check_setting_str(CFG, 'AndroidPN', 'androidpn_url', '')
ANDROIDPN_USERNAME = check_setting_str(CFG, 'AndroidPN', 'androidpn_username', '')
ANDROIDPN_BROADCAST = check_setting_bool(CFG, 'AndroidPN', 'androidpn_broadcast', 0)
USE_NMA = check_setting_bool(CFG, 'NMA', 'use_nma', 0)
NMA_APIKEY = check_setting_str(CFG, 'NMA', 'nma_apikey', '')
NMA_PRIORITY = check_setting_int(CFG, 'NMA', 'nma_priority', 0)
NMA_ONSNATCH = check_setting_bool(CFG, 'NMA', 'nma_onsnatch', 0)
NMA_ONDOWNLOAD = check_setting_bool(CFG, 'NMA', 'nma_ondownload', 0)
USE_SLACK = check_setting_bool(CFG, 'Slack', 'use_slack', 0)
SLACK_NOTIFY_ONSNATCH = check_setting_bool(CFG, 'Slack', 'slack_notify_onsnatch', 0)
SLACK_NOTIFY_ONDOWNLOAD = check_setting_bool(CFG, 'Slack', 'slack_notify_ondownload', 0)
SLACK_TOKEN = check_setting_str(CFG, 'Slack', 'slack_token', '')
USE_EMAIL = check_setting_bool(CFG, 'Email', 'use_email', 0)
EMAIL_NOTIFY_ONSNATCH = check_setting_bool(CFG, 'Email', 'email_notify_onsnatch', 0)
EMAIL_NOTIFY_ONDOWNLOAD = check_setting_bool(CFG, 'Email', 'email_notify_ondownload', 0)
EMAIL_FROM = check_setting_str(CFG, 'Email', 'email_from', '')
EMAIL_TO = check_setting_str(CFG, 'Email', 'email_to', '')
EMAIL_SSL = check_setting_bool(CFG, 'Email', 'email_ssl', 0)
EMAIL_SMTP_SERVER = check_setting_str(CFG, 'Email', 'email_smtp_server', '')
EMAIL_SMTP_PORT = check_setting_int(CFG, 'Email', 'email_smtp_port', 25)
EMAIL_TLS = check_setting_bool(CFG, 'Email', 'email_tls', 0)
EMAIL_SMTP_USER = check_setting_str(CFG, 'Email', 'email_smtp_user', '')
EMAIL_SMTP_PASSWORD = check_setting_str(CFG, 'Email', 'email_smtp_password', '')
BOOK_API = check_setting_str(CFG, 'API', 'book_api', 'GoodReads')
GR_API = check_setting_str(CFG, 'API', 'gr_api', 'ckvsiSDsuqh7omh74ZZ6Q')
GB_API = check_setting_str(CFG, 'API', 'gb_api', '')
if reloaded:
logger.info('Config file reloaded')
else:
logger.info('Config file loaded')
# noinspection PyUnresolvedReferences,PyTypeChecker,PyTypeChecker
def config_write():
check_section('General')
CFG.set('General', 'http_port', HTTP_PORT)
CFG.set('General', 'http_host', HTTP_HOST)
CFG.set('General', 'http_user', HTTP_USER)
CFG.set('General', 'http_pass', HTTP_PASS)
CFG.set('General', 'http_proxy', HTTP_PROXY)
CFG.set('General', 'http_root', HTTP_ROOT)
CFG.set('General', 'http_look', HTTP_LOOK)
CFG.set('General', 'https_enabled', HTTPS_ENABLED)
CFG.set('General', 'https_cert', HTTPS_CERT)
CFG.set('General', 'https_key', HTTPS_KEY)
CFG.set('General', 'bookstrap_theme', BOOKSTRAP_THEME)
CFG.set('General', 'launch_browser', LAUNCH_BROWSER)
CFG.set('General', 'api_enabled', API_ENABLED)
CFG.set('General', 'api_key', API_KEY)
CFG.set('General', 'proxy_host', PROXY_HOST)
CFG.set('General', 'proxy_type', PROXY_TYPE)
CFG.set('General', 'logdir', LOGDIR.encode(SYS_ENCODING))
CFG.set('General', 'loglimit', LOGLIMIT)
CFG.set('General', 'loglevel', LOGLEVEL)
CFG.set('General', 'logsize', LOGSIZE)
CFG.set('General', 'logfiles', LOGFILES)
CFG.set('General', 'match_ratio', MATCH_RATIO)
CFG.set('General', 'dload_ratio', DLOAD_RATIO)
CFG.set('General', 'imp_onlyisbn', IMP_ONLYISBN)
CFG.set('General', 'imp_singlebook', IMP_SINGLEBOOK)
CFG.set('General', 'imp_preflang', IMP_PREFLANG)
CFG.set('General', 'imp_monthlang', IMP_MONTHLANG)
CFG.set('General', 'imp_autoadd', IMP_AUTOADD)
CFG.set('General', 'imp_autosearch', IMP_AUTOSEARCH)
CFG.set('General', 'imp_calibredb', IMP_CALIBREDB)
CFG.set('General', 'imp_convert', IMP_CONVERT.strip())
CFG.set('General', 'git_program', GIT_PROGRAM.strip())
CFG.set('General', 'ebook_type', EBOOK_TYPE.lower())
CFG.set('General', 'mag_type', MAG_TYPE.lower())
CFG.set('General', 'reject_words', REJECT_WORDS.encode(SYS_ENCODING).lower())
CFG.set('General', 'reject_maxsize', REJECT_MAXSIZE)
CFG.set('General', 'reject_magsize', REJECT_MAGSIZE)
CFG.set('General', 'mag_age', MAG_AGE)
CFG.set('General', 'destination_dir', DESTINATION_DIR.encode(SYS_ENCODING))
CFG.set('General', 'alternate_dir', ALTERNATE_DIR.encode(SYS_ENCODING))
CFG.set('General', 'download_dir', DOWNLOAD_DIR.encode(SYS_ENCODING))
CFG.set('General', 'cache_age', CACHE_AGE)
CFG.set('General', 'task_age', TASK_AGE)
CFG.set('General', 'destination_copy', DESTINATION_COPY)
#
CFG.set('General', 'displaylength', DISPLAYLENGTH)
#
check_section('Git')
CFG.set('Git', 'git_user', GIT_USER)
CFG.set('Git', 'git_repo', GIT_REPO)
CFG.set('Git', 'git_branch', GIT_BRANCH)
CFG.set('Git', 'install_type', INSTALL_TYPE)
CFG.set('Git', 'current_version', CURRENT_VERSION)
CFG.set('Git', 'latest_version', LATEST_VERSION)
CFG.set('Git', 'commits_behind', COMMITS_BEHIND)
#
check_section('USENET')
CFG.set('USENET', 'nzb_downloader_sabnzbd', NZB_DOWNLOADER_SABNZBD)
CFG.set('USENET', 'nzb_downloader_nzbget', NZB_DOWNLOADER_NZBGET)
CFG.set('USENET', 'nzb_downloader_synology', NZB_DOWNLOADER_SYNOLOGY)
CFG.set('USENET', 'nzb_downloader_blackhole', NZB_DOWNLOADER_BLACKHOLE)
CFG.set('USENET', 'nzb_blackholedir', NZB_BLACKHOLEDIR)
CFG.set('USENET', 'usenet_retention', USENET_RETENTION)
#
check_section('SABnzbd')
CFG.set('SABnzbd', 'sab_host', SAB_HOST)
CFG.set('SABnzbd', 'sab_port', SAB_PORT)
CFG.set('SABnzbd', 'sab_subdir', SAB_SUBDIR)
CFG.set('SABnzbd', 'sab_user', SAB_USER)
CFG.set('SABnzbd', 'sab_pass', SAB_PASS)
CFG.set('SABnzbd', 'sab_api', SAB_API)
CFG.set('SABnzbd', 'sab_cat', SAB_CAT)
#
check_section('NZBGet')
CFG.set('NZBGet', 'nzbget_host', NZBGET_HOST)
CFG.set('NZBGet', 'nzbget_port', NZBGET_PORT)
CFG.set('NZBGet', 'nzbget_user', NZBGET_USER)
CFG.set('NZBGet', 'nzbget_pass', NZBGET_PASS)
CFG.set('NZBGet', 'nzbget_cat', NZBGET_CATEGORY)
CFG.set('NZBGet', 'nzbget_priority', NZBGET_PRIORITY)
#
check_section('API')
CFG.set('API', 'book_api', BOOK_API)
CFG.set('API', 'gr_api', GR_API)
CFG.set('API', 'gb_api', GB_API)
#
check_section('NZBMatrix')
CFG.set('NZBMatrix', 'nzbmatrix', NZBMATRIX)
CFG.set('NZBMatrix', 'nzbmatrix_user', NZBMATRIX_USER)
CFG.set('NZBMatrix', 'nzbmatrix_api', NZBMATRIX_API)
#
for provider in NEWZNAB_PROV:
check_section(provider['NAME'])
CFG.set(provider['NAME'], 'ENABLED', provider['ENABLED'])
oldprovider = check_setting_str(CFG, provider['NAME'], 'HOST', '', log=False)
CFG.set(provider['NAME'], 'HOST', provider['HOST'])
CFG.set(provider['NAME'], 'API', provider['API'])
CFG.set(provider['NAME'], 'GENERALSEARCH', provider['GENERALSEARCH'])
CFG.set(provider['NAME'], 'BOOKSEARCH', provider['BOOKSEARCH'])
CFG.set(provider['NAME'], 'MAGSEARCH', provider['MAGSEARCH'])
CFG.set(provider['NAME'], 'BOOKCAT', provider['BOOKCAT'])
CFG.set(provider['NAME'], 'MAGCAT', provider['MAGCAT'])
CFG.set(provider['NAME'], 'EXTENDED', provider['EXTENDED'])
if provider['HOST'] == oldprovider:
CFG.set(provider['NAME'], 'UPDATED', provider['UPDATED'])
CFG.set(provider['NAME'], 'MANUAL', provider['MANUAL'])
else:
logger.debug('Reset %s as provider changed' % provider['NAME'])
CFG.set(provider['NAME'], 'UPDATED', '')
CFG.set(provider['NAME'], 'MANUAL', False)
add_newz_slot()
#
for provider in TORZNAB_PROV:
check_section(provider['NAME'])
CFG.set(provider['NAME'], 'ENABLED', provider['ENABLED'])
oldprovider = check_setting_str(CFG, provider['NAME'], 'HOST', '', log=False)
CFG.set(provider['NAME'], 'HOST', provider['HOST'])
CFG.set(provider['NAME'], 'API', provider['API'])
CFG.set(provider['NAME'], 'GENERALSEARCH', provider['GENERALSEARCH'])
CFG.set(provider['NAME'], 'BOOKSEARCH', provider['BOOKSEARCH'])
CFG.set(provider['NAME'], 'MAGSEARCH', provider['MAGSEARCH'])
CFG.set(provider['NAME'], 'BOOKCAT', provider['BOOKCAT'])
CFG.set(provider['NAME'], 'MAGCAT', provider['MAGCAT'])
CFG.set(provider['NAME'], 'EXTENDED', provider['EXTENDED'])
if provider['HOST'] == oldprovider:
CFG.set(provider['NAME'], 'UPDATED', provider['UPDATED'])
CFG.set(provider['NAME'], 'MANUAL', provider['MANUAL'])
else:
logger.debug('Reset %s as provider changed' % provider['NAME'])
CFG.set(provider['NAME'], 'UPDATED', '')
CFG.set(provider['NAME'], 'MANUAL', False)
add_torz_slot()
#
for provider in RSS_PROV:
check_section(provider['NAME'])
CFG.set(provider['NAME'], 'ENABLED', provider['ENABLED'])
CFG.set(provider['NAME'], 'HOST', provider['HOST'])
# CFG.set(provider['NAME'], 'USER', provider['USER'])
# CFG.set(provider['NAME'], 'PASS', provider['PASS'])
add_rss_slot()
#
check_section('Newzbin')
CFG.set('Newzbin', 'newzbin', NEWZBIN)
CFG.set('Newzbin', 'newzbin_uid', NEWZBIN_UID)
CFG.set('Newzbin', 'newzbin_pass', NEWZBIN_PASS)
#
check_section('TORRENT')
CFG.set('TORRENT', 'tor_downloader_blackhole', TOR_DOWNLOADER_BLACKHOLE)
CFG.set('TORRENT', 'tor_convert_magnet', TOR_CONVERT_MAGNET)
CFG.set('TORRENT', 'tor_downloader_utorrent', TOR_DOWNLOADER_UTORRENT)
CFG.set('TORRENT', 'tor_downloader_rtorrent', TOR_DOWNLOADER_RTORRENT)
CFG.set('TORRENT', 'tor_downloader_qbittorrent', TOR_DOWNLOADER_QBITTORRENT)
CFG.set('TORRENT', 'tor_downloader_transmission', TOR_DOWNLOADER_TRANSMISSION)
CFG.set('TORRENT', 'tor_downloader_synology', TOR_DOWNLOADER_SYNOLOGY)
CFG.set('TORRENT', 'tor_downloader_deluge', TOR_DOWNLOADER_DELUGE)
CFG.set('TORRENT', 'numberofseeders', NUMBEROFSEEDERS)
CFG.set('TORRENT', 'torrent_dir', TORRENT_DIR)
CFG.set('TORRENT', 'keep_seeding', KEEP_SEEDING)
CFG.set('TORRENT', 'prefer_magnet', PREFER_MAGNET)
#
check_section('RTORRENT')
CFG.set('RTORRENT', 'rtorrent_host', RTORRENT_HOST)
CFG.set('RTORRENT', 'rtorrent_user', RTORRENT_USER)
CFG.set('RTORRENT', 'rtorrent_pass', RTORRENT_PASS)
CFG.set('RTORRENT', 'rtorrent_label', RTORRENT_LABEL)
CFG.set('RTORRENT', 'rtorrent_dir', RTORRENT_DIR)
#
check_section('UTORRENT')
CFG.set('UTORRENT', 'utorrent_host', UTORRENT_HOST)
CFG.set('UTORRENT', 'utorrent_port', UTORRENT_PORT)
CFG.set('UTORRENT', 'utorrent_user', UTORRENT_USER)
CFG.set('UTORRENT', 'utorrent_pass', UTORRENT_PASS)
CFG.set('UTORRENT', 'utorrent_label', UTORRENT_LABEL)
#
check_section('SYNOLOGY')
CFG.set('SYNOLOGY', 'synology_host', SYNOLOGY_HOST)
CFG.set('SYNOLOGY', 'synology_port', SYNOLOGY_PORT)
CFG.set('SYNOLOGY', 'synology_user', SYNOLOGY_USER)
CFG.set('SYNOLOGY', 'synology_pass', SYNOLOGY_PASS)
CFG.set('SYNOLOGY', 'synology_dir', SYNOLOGY_DIR)
CFG.set('SYNOLOGY', 'use_synology', USE_SYNOLOGY)
#
check_section('QBITTORRENT')
CFG.set('QBITTORRENT', 'qbittorrent_host', QBITTORRENT_HOST)
CFG.set('QBITTORRENT', 'qbittorrent_port', QBITTORRENT_PORT)
CFG.set('QBITTORRENT', 'qbittorrent_user', QBITTORRENT_USER)
CFG.set('QBITTORRENT', 'qbittorrent_pass', QBITTORRENT_PASS)
CFG.set('QBITTORRENT', 'qbittorrent_label', QBITTORRENT_LABEL)
#
check_section('TRANSMISSION')
CFG.set('TRANSMISSION', 'transmission_host', TRANSMISSION_HOST)
CFG.set('TRANSMISSION', 'transmission_port', TRANSMISSION_PORT)
CFG.set('TRANSMISSION', 'transmission_user', TRANSMISSION_USER)
CFG.set('TRANSMISSION', 'transmission_pass', TRANSMISSION_PASS)
#
check_section('DELUGE')
CFG.set('DELUGE', 'deluge_host', DELUGE_HOST)
CFG.set('DELUGE', 'deluge_port', DELUGE_PORT)
CFG.set('DELUGE', 'deluge_user', DELUGE_USER)
CFG.set('DELUGE', 'deluge_pass', DELUGE_PASS)
CFG.set('DELUGE', 'deluge_label', DELUGE_LABEL)
#
check_section('KAT')
CFG.set('KAT', 'kat', KAT)
CFG.set('KAT', 'kat_host', KAT_HOST)
#
check_section('TPB')
CFG.set('TPB', 'tpb', TPB)
CFG.set('TPB', 'tpb_host', TPB_HOST)
#
check_section('ZOO')
CFG.set('ZOO', 'zoo', ZOO)
CFG.set('ZOO', 'zoo_host', ZOO_HOST)
#
check_section('EXTRA')
CFG.set('EXTRA', 'extra', EXTRA)
CFG.set('EXTRA', 'extra_host', EXTRA_HOST)
#
check_section('LIME')
CFG.set('LIME', 'lime', LIME)
CFG.set('LIME', 'lime_host', LIME_HOST)
#
check_section('GEN')
CFG.set('GEN', 'gen', GEN)
CFG.set('GEN', 'gen_host', GEN_HOST)
#
check_section('TDL')
CFG.set('TDL', 'tdl', TDL)
CFG.set('TDL', 'tdl_host', TDL_HOST)
#
check_section('SearchScan')
CFG.set('SearchScan', 'search_interval', SEARCH_INTERVAL)
CFG.set('SearchScan', 'scan_interval', SCAN_INTERVAL)
CFG.set('SearchScan', 'searchrss_interval', SEARCHRSS_INTERVAL)
CFG.set('SearchScan', 'versioncheck_interval', VERSIONCHECK_INTERVAL)
#
check_section('LibraryScan')
CFG.set('LibraryScan', 'full_scan', FULL_SCAN)
CFG.set('LibraryScan', 'add_author', ADD_AUTHOR)
CFG.set('LibraryScan', 'notfound_status', NOTFOUND_STATUS)
CFG.set('LibraryScan', 'newbook_status', NEWBOOK_STATUS)
CFG.set('LibraryScan', 'newauthor_status', NEWAUTHOR_STATUS)
#
check_section('PostProcess')
CFG.set('PostProcess', 'ebook_dest_folder', EBOOK_DEST_FOLDER.encode(SYS_ENCODING))
CFG.set('PostProcess', 'ebook_dest_file', EBOOK_DEST_FILE.encode(SYS_ENCODING))
CFG.set('PostProcess', 'mag_dest_folder', MAG_DEST_FOLDER.encode(SYS_ENCODING))
CFG.set('PostProcess', 'mag_dest_file', MAG_DEST_FILE.encode(SYS_ENCODING))
CFG.set('PostProcess', 'mag_relative', MAG_RELATIVE)
#
check_section('Twitter')
CFG.set('Twitter', 'use_twitter', USE_TWITTER)
CFG.set('Twitter', 'twitter_notify_onsnatch', TWITTER_NOTIFY_ONSNATCH)
CFG.set('Twitter', 'twitter_notify_ondownload', TWITTER_NOTIFY_ONDOWNLOAD)
CFG.set('Twitter', 'twitter_username', TWITTER_USERNAME)
CFG.set('Twitter', 'twitter_password', TWITTER_PASSWORD)
CFG.set('Twitter', 'twitter_prefix', TWITTER_PREFIX)
#
check_section('Boxcar')
CFG.set('Boxcar', 'use_boxcar', USE_BOXCAR)
CFG.set('Boxcar', 'boxcar_notify_onsnatch', BOXCAR_NOTIFY_ONSNATCH)
CFG.set('Boxcar', 'boxcar_notify_ondownload', BOXCAR_NOTIFY_ONDOWNLOAD)
CFG.set('Boxcar', 'boxcar_token', BOXCAR_TOKEN)
#
check_section('Pushbullet')
CFG.set('Pushbullet', 'use_pushbullet', USE_PUSHBULLET)
CFG.set('Pushbullet', 'pushbullet_notify_onsnatch', PUSHBULLET_NOTIFY_ONSNATCH)
CFG.set('Pushbullet', 'pushbullet_notify_ondownload', PUSHBULLET_NOTIFY_ONDOWNLOAD)
CFG.set('Pushbullet', 'pushbullet_token', PUSHBULLET_TOKEN)
CFG.set('Pushbullet', 'pushbullet_deviceid', PUSHBULLET_DEVICEID)
#
check_section('Pushover')
CFG.set('Pushover', 'use_pushover', USE_PUSHOVER)
CFG.set('Pushover', 'pushover_onsnatch', PUSHOVER_ONSNATCH)
CFG.set('Pushover', 'pushover_ondownload', PUSHOVER_ONDOWNLOAD)
CFG.set('Pushover', 'pushover_priority', PUSHOVER_PRIORITY)
CFG.set('Pushover', 'pushover_keys', PUSHOVER_KEYS)
CFG.set('Pushover', 'pushover_apitoken', PUSHOVER_APITOKEN)
CFG.set('Pushover', 'pushover_device', PUSHOVER_DEVICE)
#
check_section('AndroidPN')
CFG.set('AndroidPN', 'use_androidpn', USE_ANDROIDPN)
CFG.set('AndroidPN', 'androidpn_notify_onsnatch', ANDROIDPN_NOTIFY_ONSNATCH)
CFG.set('AndroidPN', 'androidpn_notify_ondownload', ANDROIDPN_NOTIFY_ONDOWNLOAD)
CFG.set('AndroidPN', 'androidpn_url', ANDROIDPN_URL)
CFG.set('AndroidPN', 'androidpn_username', ANDROIDPN_USERNAME)
CFG.set('AndroidPN', 'androidpn_broadcast', ANDROIDPN_BROADCAST)
#
check_section('NMA')
CFG.set('NMA', 'use_nma', USE_NMA)
CFG.set('NMA', 'nma_apikey', NMA_APIKEY)
CFG.set('NMA', 'nma_priority', NMA_PRIORITY)
CFG.set('NMA', 'nma_onsnatch', NMA_ONSNATCH)
CFG.set('NMA', 'nma_ondownload', NMA_ONDOWNLOAD)
#
check_section('Slack')
CFG.set('Slack', 'use_slack', USE_SLACK)
CFG.set('Slack', 'slack_notify_onsnatch', SLACK_NOTIFY_ONSNATCH)
CFG.set('Slack', 'slack_notify_ondownload', SLACK_NOTIFY_ONDOWNLOAD)
CFG.set('Slack', 'slack_token', SLACK_TOKEN)
#
check_section('Email')
CFG.set('Email', 'use_email', USE_EMAIL)
CFG.set('Email', 'email_notify_onsnatch', EMAIL_NOTIFY_ONSNATCH)
CFG.set('Email', 'email_notify_ondownload', EMAIL_NOTIFY_ONDOWNLOAD)
CFG.set('Email', 'email_from', EMAIL_FROM)
CFG.set('Email', 'email_to', EMAIL_TO)
CFG.set('Email', 'email_ssl', EMAIL_SSL)
CFG.set('Email', 'email_smtp_server', EMAIL_SMTP_SERVER)
CFG.set('Email', 'email_smtp_port', EMAIL_SMTP_PORT)
CFG.set('Email', 'email_tls', EMAIL_TLS)
CFG.set('Email', 'email_smtp_user', EMAIL_SMTP_USER)
CFG.set('Email', 'email_smtp_password', EMAIL_SMTP_PASSWORD)
with open(CONFIGFILE + '.new', 'wb') as configfile:
CFG.write(configfile)
try:
os.remove(CONFIGFILE + '.bak')
except OSError as e:
if e.errno is not 2: # doesn't exist is ok
logger.debug('{} {}{} {}'.format('Error deleting backup file:', CONFIGFILE, '.bak', e.strerror))
try:
os.rename(CONFIGFILE, CONFIGFILE + '.bak')
except OSError as e:
if e.errno is not 2: # doesn't exist is ok as wouldn't exist until first save
logger.debug('{} {} {}'.format('Unable to backup config file:', CONFIGFILE, e.strerror))
try:
os.rename(CONFIGFILE + '.new', CONFIGFILE)
except OSError as e:
logger.debug('{} {} {}'.format('Unable to create new config file:', CONFIGFILE, e.strerror))
def add_newz_slot():
count = len(NEWZNAB_PROV)
if count == 0 or len(CFG.get('Newznab%i' % int(count - 1), 'HOST')):
newz_name = 'Newznab%i' % count
check_section(newz_name)
CFG.set(newz_name, 'ENABLED', False)
CFG.set(newz_name, 'HOST', '')
CFG.set(newz_name, 'API', '')
CFG.set(newz_name, 'GENERALSEARCH', 'search')
CFG.set(newz_name, 'BOOKSEARCH', 'book')
CFG.set(newz_name, 'MAGSEARCH', '')
CFG.set(newz_name, 'BOOKCAT', '7000,7020')
CFG.set(newz_name, 'MAGCAT', '7010')
CFG.set(newz_name, 'EXTENDED', '1')
CFG.set(newz_name, 'UPDATED', '')
CFG.set(newz_name, 'MANUAL', False)
NEWZNAB_PROV.append({"NAME": newz_name,
"ENABLED": 0,
"HOST": '',
"API": '',
"GENERALSEARCH": 'search',
"BOOKSEARCH": 'book',
"MAGSEARCH": '',
"BOOKCAT": '7000,7020',
"MAGCAT": '7010',
"EXTENDED": '1',
"UPDATED": '',
"MANUAL": 0
})
def add_torz_slot():
count = len(TORZNAB_PROV)
if count == 0 or len(CFG.get('Torznab%i' % int(count - 1), 'HOST')):
torz_name = 'Torznab%i' % count
check_section(torz_name)
CFG.set(torz_name, 'ENABLED', False)
CFG.set(torz_name, 'HOST', '')
CFG.set(torz_name, 'API', '')
CFG.set(torz_name, 'GENERALSEARCH', 'search')
CFG.set(torz_name, 'BOOKSEARCH', 'book')
CFG.set(torz_name, 'MAGSEARCH', '')
CFG.set(torz_name, 'BOOKCAT', '7000,7020')
CFG.set(torz_name, 'MAGCAT', '7010')
CFG.set(torz_name, 'EXTENDED', '1')
CFG.set(torz_name, 'UPDATED', '')
CFG.set(torz_name, 'MANUAL', False)
TORZNAB_PROV.append({"NAME": torz_name,
"ENABLED": 0,
"HOST": '',
"API": '',
"GENERALSEARCH": 'search',
"BOOKSEARCH": 'book',
"MAGSEARCH": '',
"BOOKCAT": '8000,8010',
"MAGCAT": '8030',
"EXTENDED": '1',
"UPDATED": '',
"MANUAL": 0
})
def USE_NZB():
for provider in NEWZNAB_PROV:
if bool(provider['ENABLED']):
return True
for provider in TORZNAB_PROV:
if bool(provider['ENABLED']):
return True
return False
def DIRECTORY(dirname):
usedir = ''
if dirname == "Destination":
usedir = DESTINATION_DIR
elif dirname == "Download":
usedir = DOWNLOAD_DIR
# elif dirname == "Alternate":
# usedir = ALTERNATE_DIR
else:
return usedir
if not usedir or not os.path.isdir(usedir) or not os.access(usedir, os.W_OK | os.X_OK):
usedir = os.getcwd()
logger.warn("%s dir not usable, using %s" % (dirname, usedir))
# return directory as unicode so we get unicode results from listdir
if isinstance(usedir, str):
usedir = usedir.decode(SYS_ENCODING)
return usedir
def add_rss_slot():
count = len(RSS_PROV)
if count == 0 or len(CFG.get('RSS_%i' % int(count - 1), 'HOST')):
rss_name = 'RSS_%i' % count
check_section(rss_name)
CFG.set(rss_name, 'ENABLED', False)
CFG.set(rss_name, 'HOST', '')
# CFG.set(rss_name, 'USER', '')
# CFG.set(rss_name, 'PASS', '')
RSS_PROV.append({"NAME": rss_name,
"ENABLED": 0,
"HOST": ''
})
def USE_RSS():
for provider in RSS_PROV:
if bool(provider['ENABLED']):
return True
return False
def USE_TOR():
if bool(KAT):
return True
if bool(TPB):
return True
if bool(ZOO):
return True
if bool(EXTRA):
return True
if bool(LIME):
return True
if bool(TDL):
return True
if bool(GEN):
return True
return False
def build_bookstrap_themes():
themelist = []
if not os.path.isdir(os.path.join(PROG_DIR, 'data/interfaces/bookstrap/')):
return themelist # return empty if bookstrap interface not installed
URL = 'http://bootswatch.com/api/3.json'
result, success = fetchURL(URL, None, False) # use default headers, no retry
if not success:
logger.debug("Error getting bookstrap themes : %s" % result)
return themelist
try:
results = json.loads(result)
for theme in results['themes']:
themelist.append(theme['name'].lower())
except Exception as e:
# error reading results
logger.debug('JSON Error reading bookstrap themes, %s' % str(e))
logger.debug("Bookstrap found %i themes" % len(themelist))
return themelist
def build_monthtable():
if len(getList(IMP_MONTHLANG)) == 0: # any extra languages wanted?
return
try:
current_locale = locale.setlocale(locale.LC_ALL, '') # read current state.
# getdefaultlocale() doesnt seem to work as expected on windows, returns 'None'
except locale.Error as e:
logger.debug("Error getting current locale : %s" % str(e))
return
lang = str(current_locale)
if not lang.startswith('en_'): # en_ is preloaded
MONTHNAMES[0].append(lang)
for f in range(1, 13):
MONTHNAMES[f].append(unaccented(calendar.month_name[f]).lower())
MONTHNAMES[0].append(lang)
for f in range(1, 13):
MONTHNAMES[f].append(unaccented(calendar.month_abbr[f]).lower().strip('.'))
logger.info("Added month names for locale [%s], %s, %s ..." % (
lang, MONTHNAMES[1][len(MONTHNAMES[1]) - 2], MONTHNAMES[1][len(MONTHNAMES[1]) - 1]))
for lang in getList(IMP_MONTHLANG):
try:
if len(lang) > 1:
locale.setlocale(locale.LC_ALL, lang)
MONTHNAMES[0].append(lang)
for f in range(1, 13):
MONTHNAMES[f].append(unaccented(calendar.month_name[f]).lower())
MONTHNAMES[0].append(lang)
for f in range(1, 13):
MONTHNAMES[f].append(unaccented(calendar.month_abbr[f]).lower().strip('.'))
locale.setlocale(locale.LC_ALL, current_locale) # restore entry state
logger.info("Added month names for locale [%s], %s, %s ..." % (
lang, MONTHNAMES[1][len(MONTHNAMES[1]) - 2], MONTHNAMES[1][len(MONTHNAMES[1]) - 1]))
except Exception as e:
locale.setlocale(locale.LC_ALL, current_locale) # restore entry state
logger.warn("Unable to load requested locale [%s] %s" % (lang, str(e)))
try:
if '_' in lang:
wanted_lang = lang.split('_')[0]
else:
wanted_lang = lang
params = ['locale', '-a']
all_locales = subprocess.check_output(params).split()
locale_list = []
for a_locale in all_locales:
if a_locale.startswith(wanted_lang):
locale_list.append(a_locale)
if locale_list:
logger.warn("Found these alternatives: " + str(locale_list))
else:
logger.warn("Unable to find an alternative")
except Exception as e:
logger.warn("Unable to get a list of alternatives, %s" % str(e))
logger.info("Set locale back to entry state %s" % current_locale)
def daemonize():
"""
Fork off as a daemon
"""
# Make a non-session-leader child process
try:
pid = os.fork() # @UndefinedVariable - only available in UNIX
if pid != 0:
sys.exit(0)
except OSError as e:
raise RuntimeError("1st fork failed: %s [%d]" %
(e.strerror, e.errno))
os.setsid() # @UndefinedVariable - only available in UNIX
# Make sure I can read my own files and shut out others
prev = os.umask(0)
os.umask(prev and int('077', 8))
# Make the child a session-leader by detaching from the terminal
try:
pid = os.fork() # @UndefinedVariable - only available in UNIX
if pid != 0:
sys.exit(0)
except OSError as e:
raise RuntimeError("2st fork failed: %s [%d]" %
(e.strerror, e.errno))
dev_null = file('/dev/null', 'r')
os.dup2(dev_null.fileno(), sys.stdin.fileno())
if PIDFILE:
pid = str(os.getpid())
logger.debug(u"Writing PID " + pid + " to " + str(PIDFILE))
file(PIDFILE, 'w').write("%s\n" % pid)
def launch_browser(host, port, root):
if host == '0.0.0.0':
host = 'localhost'
try:
webbrowser.open('http://%s:%i%s' % (host, port, root))
except Exception as e:
logger.error('Could not launch browser: %s' % str(e))
def db_needs_upgrade():
"""
Check if database needs upgrading
Return zero if up-to-date
Return current version if needs upgrade
"""
myDB = database.DBConnection()
result = myDB.match('PRAGMA user_version')
# Had a report of "index out of range", can't replicate it.
# Maybe on some versions of sqlite an unset user_version
# or unsupported pragma gives an empty result?
if result:
db_version = result[0]
else:
db_version = 0
# database version history:
# 0 original version or new empty database
# 1 changes up to June 2016
# 2 removed " MB" from nzbsize field in wanted table
# 3 removed SeriesOrder column from books table as redundant
# 4 added duplicates column to stats table
# 5 issue numbers padded to 4 digits with leading zeros
# 6 added Manual field to books table for user editing
# 7 added Source and DownloadID to wanted table for download monitoring
# 8 move image cache from data/images/cache into datadir
# 9 add regex to magazine table
# 10 check for missing columns in pastissues table
db_current_version = 10
if db_version < db_current_version:
return db_current_version
return 0
def start():
global __INITIALIZED__, started
if __INITIALIZED__:
# Crons and scheduled jobs started here
SCHED.start()
restartJobs(start='Start')
started = True
def shutdown(restart=False, update=False):
cherrypy.engine.exit()
SCHED.shutdown(wait=False)
# config_write() don't automatically rewrite config on exit
if not restart and not update:
logger.info('LazyLibrarian is shutting down...')
if update:
logger.info('LazyLibrarian is updating...')
try:
versioncheck.update()
except Exception as e:
logger.warn('LazyLibrarian failed to update: %s. Restarting.' % str(e))
if PIDFILE:
logger.info('Removing pidfile %s' % PIDFILE)
os.remove(PIDFILE)
if restart:
logger.info('LazyLibrarian is restarting ...')
popen_list = [sys.executable, FULL_PATH]
popen_list += ARGS
if '--update' in popen_list:
popen_list.remove('--update')
if '--nolaunch' not in popen_list:
popen_list += ['--nolaunch']
logger.info('Restarting LazyLibrarian with ' + str(popen_list))
subprocess.Popen(popen_list, cwd=os.getcwd())
os._exit(0)
| gpl-3.0 | 6,376,135,073,458,459,000 | 41.51198 | 114 | 0.625994 | false | 3.082804 | true | false | false |
tom-mi/logrotor | test/util/test_syslog_util.py | 1 | 2429 | from datetime import datetime
from freezegun import freeze_time
import pytest
from logrotor.util.syslog import message_to_bytes, bytes_to_message, Message, Facility, Level
@pytest.mark.parametrize('message,data', [
(Message(Level.CRIT, Facility.KERN, datetime(2018, 7, 28, 1, 2, 3), 'localhost', 'tag', None, 'Test message'),
b'<2>Jul 28 01:02:03 localhost tag: Test message'),
(Message(Level.ERR, Facility.CRON, datetime(2018, 1, 1, 15, 55, 55), 'localhost', 'tag', None, 'Test message'),
b'<75>Jan 1 15:55:55 localhost tag: Test message'),
(Message(Level.ERR, Facility.CRON, datetime(2018, 1, 1, 15, 55, 55), 'localhost', 'tag', None, 'Test message'),
b'<75>Jan 1 15:55:55 localhost tag: Test message'),
])
@freeze_time('2018-01-01')
def test_message_conversion(message, data):
assert message_to_bytes(message) == data
converted_message = bytes_to_message(data)
assert converted_message.level == message.level
assert converted_message.facility == message.facility
assert converted_message.timestamp == message.timestamp
assert converted_message.hostname == message.hostname
assert converted_message.tag == message.tag
assert converted_message.message == message.message
@pytest.mark.parametrize('tag,pid,data,two_way', [
('tag', None, b'<75>Jan 1 15:55:55 localhost tag: Test', True),
('tag', None, b'<75>Jan 1 15:55:55 localhost tag Test', False),
('tag', 42, b'<75>Jan 1 15:55:55 localhost tag[42]: Test', True),
('tag', 42, b'<75>Jan 1 15:55:55 localhost tag[42] Test', False),
])
@freeze_time('2018-01-01')
def test_parse_tags(tag, pid, data, two_way):
message = Message(Level.ERR, Facility.CRON, datetime(2018, 1, 1, 15, 55, 55), 'localhost', tag, pid, 'Test')
if two_way:
assert message_to_bytes(message) == data
converted_message = bytes_to_message(data)
assert converted_message.level == message.level
assert converted_message.facility == message.facility
assert converted_message.timestamp == message.timestamp
assert converted_message.hostname == message.hostname
assert converted_message.tag == message.tag
assert converted_message.pid == message.pid
assert converted_message.message == message.message
def test_syslog_bytes_to_message(benchmark):
data = b'<75>Jan 1 15:55:55 localhost tag[42]: Test message'
benchmark(bytes_to_message, data)
| mit | -5,929,938,222,142,129,000 | 42.375 | 115 | 0.685467 | false | 3.421127 | true | false | false |
Poorchop/hexchat-scripts | old/twitch-title.py | 1 | 3987 | #!/usr/bin/env python3
import hexchat
import requests
import sys
import threading
__module_name__ = "Twitch Title"
__module_author__ = "Poorchop"
__module_version__ = "1.0"
__module_description__ = "Display stream status and description for TwitchTV streams"
t = None
twitch_chans = {}
def set_topic(channel, display_name, status, game, title):
global twitch_chans
channel = "#" + channel
msg = "\00318{0}\00399 - {1} | Now playing: \00318{2}\00399 | {3}".format(display_name, status, game, title)
stripped_msg = hexchat.strip(msg, -1, 3)
if twitch_chans[channel] != stripped_msg:
twitch_chans[channel] = stripped_msg
# try to print stream status in current channel - doesn't seem to work without Do At plugin
current_chan = hexchat.get_info("channel")
hexchat.find_context(channel=current_chan).prnt(msg)
# get the proper context for the topic event
context = hexchat.find_context(channel=channel)
if sys.platform == "win32":
# HexChat on Windows has poor support for colors in topic bar
context.command("RECV :{0}[email protected] TOPIC {0} :{1}".format(channel, stripped_msg))
else:
context.command("RECV :{0}[email protected] TOPIC {0} :{1}".format(channel, msg))
def get_stream_info(channel):
url = "https://api.twitch.tv/kraken/streams?"
params = {"channel": channel}
r = requests.get(url, params=params)
data = r.json()
display_name = channel
game = ""
title = "\035Stream is offline\017"
if not data["streams"]:
status = "\00320\002OFFLINE\002\00399"
else:
status = "\00319\002LIVE\002\00399"
display_name = data["streams"][0]["channel"]["display_name"]
game = data["streams"][0]["channel"]["game"]
title = data["streams"][0]["channel"]["status"]
set_topic(channel, display_name, status, game, title)
def update_status():
global twitch_chans
if twitch_chans:
for chan in twitch_chans:
channel = chan[1:]
get_stream_info(channel)
def get_twitch_chans():
global twitch_chans
for chan in hexchat.get_list("channels"):
if chan.type == 2 and chan.context.get_info("server") == "tmi.twitch.tv" and chan.channel not in twitch_chans:
twitch_chans[chan.channel] = ""
def channel_check():
"""
Check to see if there are any open Twitch channels; if so, then start/continue the threaded process
"""
for chan in hexchat.get_list("channels"):
if chan.type == 2 and chan.context.get_info("server") == "tmi.twitch.tv":
return True
return False
def get_current_status():
"""
Update the stream status every 10 minutes
"""
global t
if channel_check():
get_twitch_chans()
update_status()
t = threading.Timer(600, get_current_status)
t.daemon = True
t.start()
else:
if t:
t.cancel()
t.join()
t = None
def is_twitch():
server = hexchat.get_info("server")
if server and "twitch.tv" in server:
return True
else:
return False
def join_cb(word, word_eol, userdata):
"""
Restart the threaded process if necessary, then immediately get the stream status
"""
global t
global twitch_chans
if is_twitch():
if not t:
get_current_status()
channel = hexchat.get_info("channel")
# TODO: make safer and don't modify the same object that is modified by get_stream_status
twitch_chans[channel] = ""
channel = channel[1:]
get_stream_info(channel)
def unload_cb(userdata):
"""
Prevent HexChat from crashing while a thread is active
"""
global t
if t:
t.cancel()
t.join()
hexchat.hook_unload(unload_cb)
hexchat.hook_print("Open Context", join_cb)
get_current_status()
print(__module_name__ + " version " + __module_version__ + " loaded")
| mit | -6,793,062,347,922,079,000 | 28.533333 | 118 | 0.614748 | false | 3.497368 | false | false | false |
kusamau/cedaMarkup | ceda_markup/opensearch/os_request.py | 1 | 11997 | '''
BSD Licence
Copyright (c) 2012, Science & Technology Facilities Council (STFC)
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the Science & Technology Facilities Council (STFC)
nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Created on 5 May 2012
@author: Maurizio Nagni
'''
from osquery import URL_REL_DEFAULT, URL_INDEX_OFFSET_DEFAULT, \
URL_PAGE_OFFSET_DEFAULT
from ceda_markup.markup import createMarkup, createSimpleMarkup
from ceda_markup import get_mimetype
from ceda_markup.opensearch import create_template_query
MAX_OS_SHORT_NAME_LEN = 16
MAX_OS_LONG_NAME_LEN = 48
MAX_OS_TAGS_LEN = 256
MAX_OS_DESCRIPTION_LEN = 1024
MAX_OS_DEVELOPER_LEN = 64
MAX_OS_ATTRIBUTION_LEN = 256
SYNDACATION_OPEN = 'open'
SYNDACATION_LIMITED = 'limited'
SYNDACATION_PRIVATE = 'private'
SYNDACATION_CLOSED = 'closed'
OS_SYNDACATION_RIGHT = [SYNDACATION_OPEN, SYNDACATION_LIMITED, SYNDACATION_PRIVATE, SYNDACATION_CLOSED]
OS_SYNDACATION_RIGHT_DEFAULT = SYNDACATION_OPEN
OS_ADULT_CONTENT_DEFAULT = False
OS_INPUT_ENCODING_DEFAULT = 'UTF-8'
OS_OUTPUT_ENCODING_DEFAULT = 'UTF-8'
OS_NAMESPACE = 'http://a9.com/-/spec/opensearch/1.1/'
OS_PREFIX = 'os'
OS_ROOT_TAG = 'OpenSearchDescription'
def create_url(query, response_type, ospath, root = None, \
tagName = OS_ROOT_TAG, ns = OS_NAMESPACE):
markup = createMarkup('Url', OS_PREFIX, ns, root)
markup.set("type", get_mimetype(response_type))
template_query = create_template_query(root, query)
query_template = ("%s%s?%s") % (ospath, response_type, template_query[:-1])
markup.set("template", query_template)
if query.rel is not None and query.rel != URL_REL_DEFAULT:
markup.set("rel", query.rel)
if query.indexOffset is not None \
and query.indexOffset != URL_INDEX_OFFSET_DEFAULT:
markup.set("indexOffset", str(query.indexOffset))
if query.pageOffset is not None \
and query.pageOffset != URL_PAGE_OFFSET_DEFAULT:
markup.set("pageOffset", str(query.pageOffset))
return markup
def create_short_name(short_name, root = None, \
tag_name = OS_ROOT_TAG, ns = OS_NAMESPACE):
return createSimpleMarkup(short_name, root, 'ShortName', ns, OS_PREFIX)
def create_description(description, root = None, \
tag_name = OS_ROOT_TAG, ns = OS_NAMESPACE):
return createSimpleMarkup(description, root, 'Description', ns, OS_PREFIX)
def create_tags(tags, root = None, tag_name = OS_ROOT_TAG, ns = OS_NAMESPACE):
return createSimpleMarkup(tags, root, 'Tags', ns, OS_PREFIX)
def create_contact(contact, root = None, \
tag_name = OS_ROOT_TAG, ns = OS_NAMESPACE):
return createSimpleMarkup(contact, root, 'Contact', ns, OS_PREFIX)
def create_long_name(long_name, root = None, \
tag_name = OS_ROOT_TAG, ns = OS_NAMESPACE):
return createSimpleMarkup(long_name, root, 'LongName', ns, OS_PREFIX)
def create_developer(developer, root = None, \
tag_name = OS_ROOT_TAG, ns = OS_NAMESPACE):
return createSimpleMarkup(developer, root, 'Developer', ns, OS_PREFIX)
def create_attribution(attribution, root = None, \
tag_name = OS_ROOT_TAG, ns = OS_NAMESPACE):
return createSimpleMarkup(attribution, root, 'Attribution', ns, OS_PREFIX)
def create_syndacation_right(syndacation_right, root = None, \
tag_name = OS_ROOT_TAG, ns = OS_NAMESPACE):
return createSimpleMarkup(syndacation_right, root, 'SyndacationRight', \
ns, OS_PREFIX)
def create_adult_content(adult_content, root = None, \
tagName = OS_ROOT_TAG, ns = OS_NAMESPACE):
return createSimpleMarkup(adult_content, root, 'AdultContent', \
ns, OS_PREFIX)
def create_language(language, root = None, \
tag_name = OS_ROOT_TAG, ns = OS_NAMESPACE):
return createSimpleMarkup(language, root, 'Language', ns, OS_PREFIX)
def create_input_encoding(input_encoding, root = None, \
tag_name = OS_ROOT_TAG, ns = OS_NAMESPACE):
return createSimpleMarkup(input_encoding, root, 'InputEncoding', \
ns, OS_PREFIX)
def create_output_encoding(output_encoding, root = None, \
tag_name = OS_ROOT_TAG, ns = OS_NAMESPACE):
return createSimpleMarkup(output_encoding, root, 'OutputEncoding', \
ns, OS_PREFIX)
def create_image(url, height = None, width = None, root = None, \
tag_name = OS_ROOT_TAG, ns = OS_NAMESPACE):
markup = createSimpleMarkup(url, root, 'Image', ns, OS_PREFIX)
if height is not None and isinstance(height, (int, long)):
markup.set("height", height)
if width is not None and isinstance(width, (int, long)):
markup.set("width", width)
return markup
def create_osdescription(os_responses, os_description, query, ospath, \
root = None, \
tag_name = OS_ROOT_TAG, ns = OS_NAMESPACE):
"""
@param osResponses: a list of OSResponse instances
@param os_description: an OpenSearchDescription instance
@param query: an OSQuery instance
"""
markup = createMarkup(OS_ROOT_TAG, OS_PREFIX, ns, root)
markup.append(create_short_name(os_description.os_short_name, \
root = markup))
markup.append(create_description(os_description.os_description, \
root = markup))
if hasattr(os_description, 'os_tags'):
markup.append(create_tags(os_description.os_tags, root = markup))
if hasattr(os_description, 'os_contact'):
markup.append(create_contact(os_description.os_contact, \
root = markup))
if hasattr(os_description, 'os_long_name'):
markup.append(create_long_name(os_description.os_long_name, \
root = markup))
if hasattr(os_description, 'os_developer'):
markup.append(create_developer(os_description.os_developer, \
root = markup))
if hasattr(os_description, 'os_attribution'):
markup.append(create_attribution(os_description.os_attribution, \
root = markup))
if hasattr(os_description, 'os_image') \
and isinstance(os_description.os_image, list):
for img in os_description.os_image:
markup.append(create_image(img.url, img.height, img.width, \
root = markup))
if hasattr(os_description, 'os_syndacation_right') \
and os_description.os_syndacation_right != OS_SYNDACATION_RIGHT_DEFAULT:
markup.append(create_syndacation_right(os_description.os_syndacation_right, root = markup))
if hasattr(os_description, 'os_adult_content'):
markup.append(create_adult_content(os_description.os_adult_content, \
root = markup))
if os_description.os_language \
and isinstance(os_description.os_language, list):
for item in os_description.os_language:
markup.append(create_language(item, root = markup))
if os_description.os_input_encoding \
and isinstance(os_description.os_input_encoding, list):
for item in os_description.os_input_encoding:
markup.append(create_input_encoding(item, root = markup))
if os_description.os_output_encoding \
and isinstance(os_description.os_output_encoding, list):
for item in os_description.os_output_encoding:
markup.append(create_output_encoding(item, root = markup))
for item in os_responses:
url = create_url(query, item.extension, ospath, root = markup)
markup.append(url)
return markup
class OpenSearchDescription(object):
'''
classdocs
'''
def __init__(self, os_short_name, os_description, \
os_contact = None, os_tags = None, os_long_name = None, \
os_image = [], os_developer = None, os_attribution = None, \
os_syndacation_right = None, os_adult_content = None, \
os_language = ['*'], \
os_input_encoding = [OS_INPUT_ENCODING_DEFAULT], \
os_output_encoding = [OS_OUTPUT_ENCODING_DEFAULT]):
"""
@param os_image: a list of osImage instances
"""
self.os_syndacation_right = None
# should be set to True but because of
# http://code.google.com/p/gdata-python-client/issues/detail?id=611
# we cannot (for now)
self.os_adult_content = '1'
if os_description is not None:
self.os_description = os_description[:MAX_OS_DESCRIPTION_LEN]
if os_short_name is not None:
self.os_short_name = os_short_name[:MAX_OS_SHORT_NAME_LEN]
#Should check that is an email format
if os_contact is not None:
self.os_contact = os_contact
if os_tags is not None:
self.os_tags = os_tags[:MAX_OS_TAGS_LEN]
if os_long_name is not None:
self.os_long_name = os_long_name[:MAX_OS_LONG_NAME_LEN]
if os_developer is not None:
self.os_developer = os_developer[:MAX_OS_DEVELOPER_LEN]
if os_attribution is not None:
self.os_attribution = os_attribution[:MAX_OS_ATTRIBUTION_LEN]
if os_syndacation_right and os_syndacation_right in OS_SYNDACATION_RIGHT:
self.os_syndacation_right = os_syndacation_right
if os_adult_content is not None and os_adult_content in ['false', 'FALSE', '0', 'no', 'NO']:
# should be set to False but because of
# http://code.google.com/p/gdata-python-client/issues/detail?id=611
# we cannot (for now)
self.os_adult_content = '0'
self.os_image = os_image
self.os_language = os_language
self.os_input_encoding = os_input_encoding
self.os_output_encoding = os_output_encoding | bsd-3-clause | -6,705,487,842,680,196,000 | 42.788321 | 107 | 0.616404 | false | 3.886297 | false | false | false |
google/brain-tokyo-workshop | AttentionAgent/protobuf/roll_out_service_pb2.py | 1 | 10105 | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: protobuf/roll_out_service.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='protobuf/roll_out_service.proto',
package='evolution_algorithms',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x1fprotobuf/roll_out_service.proto\x12\x14\x65volution_algorithms\"#\n\rCMAParameters\x12\x12\n\nparameters\x18\x01 \x03(\x01\"\x89\x01\n\x0eRollOutRequest\x12\x16\n\x0eroll_out_index\x18\x01 \x01(\x05\x12\x10\n\x08\x65nv_seed\x18\x02 \x01(\x05\x12\x10\n\x08\x65valuate\x18\x03 \x01(\x08\x12;\n\x0e\x63ma_parameters\x18\x04 \x01(\x0b\x32#.evolution_algorithms.CMAParameters\":\n\x0fRollOutResponse\x12\x16\n\x0eroll_out_index\x18\x01 \x01(\x05\x12\x0f\n\x07\x66itness\x18\x02 \x01(\x01\"\x12\n\x10ParamSyncRequest\"\'\n\x11ParamSyncResponse\x12\x12\n\nparameters\x18\x01 \x03(\x01\x32q\n\x0eRollOutService\x12_\n\x0eperformRollOut\x12$.evolution_algorithms.RollOutRequest\x1a%.evolution_algorithms.RollOutResponse\"\x00\x32z\n\x14ParameterSyncService\x12\x62\n\rsyncParameter\x12&.evolution_algorithms.ParamSyncRequest\x1a\'.evolution_algorithms.ParamSyncResponse\"\x00\x62\x06proto3')
)
_CMAPARAMETERS = _descriptor.Descriptor(
name='CMAParameters',
full_name='evolution_algorithms.CMAParameters',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='parameters', full_name='evolution_algorithms.CMAParameters.parameters', index=0,
number=1, type=1, cpp_type=5, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=57,
serialized_end=92,
)
_ROLLOUTREQUEST = _descriptor.Descriptor(
name='RollOutRequest',
full_name='evolution_algorithms.RollOutRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='roll_out_index', full_name='evolution_algorithms.RollOutRequest.roll_out_index', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='env_seed', full_name='evolution_algorithms.RollOutRequest.env_seed', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='evaluate', full_name='evolution_algorithms.RollOutRequest.evaluate', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='cma_parameters', full_name='evolution_algorithms.RollOutRequest.cma_parameters', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=95,
serialized_end=232,
)
_ROLLOUTRESPONSE = _descriptor.Descriptor(
name='RollOutResponse',
full_name='evolution_algorithms.RollOutResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='roll_out_index', full_name='evolution_algorithms.RollOutResponse.roll_out_index', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='fitness', full_name='evolution_algorithms.RollOutResponse.fitness', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=234,
serialized_end=292,
)
_PARAMSYNCREQUEST = _descriptor.Descriptor(
name='ParamSyncRequest',
full_name='evolution_algorithms.ParamSyncRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=294,
serialized_end=312,
)
_PARAMSYNCRESPONSE = _descriptor.Descriptor(
name='ParamSyncResponse',
full_name='evolution_algorithms.ParamSyncResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='parameters', full_name='evolution_algorithms.ParamSyncResponse.parameters', index=0,
number=1, type=1, cpp_type=5, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=314,
serialized_end=353,
)
_ROLLOUTREQUEST.fields_by_name['cma_parameters'].message_type = _CMAPARAMETERS
DESCRIPTOR.message_types_by_name['CMAParameters'] = _CMAPARAMETERS
DESCRIPTOR.message_types_by_name['RollOutRequest'] = _ROLLOUTREQUEST
DESCRIPTOR.message_types_by_name['RollOutResponse'] = _ROLLOUTRESPONSE
DESCRIPTOR.message_types_by_name['ParamSyncRequest'] = _PARAMSYNCREQUEST
DESCRIPTOR.message_types_by_name['ParamSyncResponse'] = _PARAMSYNCRESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
CMAParameters = _reflection.GeneratedProtocolMessageType('CMAParameters', (_message.Message,), dict(
DESCRIPTOR = _CMAPARAMETERS,
__module__ = 'protobuf.roll_out_service_pb2'
# @@protoc_insertion_point(class_scope:evolution_algorithms.CMAParameters)
))
_sym_db.RegisterMessage(CMAParameters)
RollOutRequest = _reflection.GeneratedProtocolMessageType('RollOutRequest', (_message.Message,), dict(
DESCRIPTOR = _ROLLOUTREQUEST,
__module__ = 'protobuf.roll_out_service_pb2'
# @@protoc_insertion_point(class_scope:evolution_algorithms.RollOutRequest)
))
_sym_db.RegisterMessage(RollOutRequest)
RollOutResponse = _reflection.GeneratedProtocolMessageType('RollOutResponse', (_message.Message,), dict(
DESCRIPTOR = _ROLLOUTRESPONSE,
__module__ = 'protobuf.roll_out_service_pb2'
# @@protoc_insertion_point(class_scope:evolution_algorithms.RollOutResponse)
))
_sym_db.RegisterMessage(RollOutResponse)
ParamSyncRequest = _reflection.GeneratedProtocolMessageType('ParamSyncRequest', (_message.Message,), dict(
DESCRIPTOR = _PARAMSYNCREQUEST,
__module__ = 'protobuf.roll_out_service_pb2'
# @@protoc_insertion_point(class_scope:evolution_algorithms.ParamSyncRequest)
))
_sym_db.RegisterMessage(ParamSyncRequest)
ParamSyncResponse = _reflection.GeneratedProtocolMessageType('ParamSyncResponse', (_message.Message,), dict(
DESCRIPTOR = _PARAMSYNCRESPONSE,
__module__ = 'protobuf.roll_out_service_pb2'
# @@protoc_insertion_point(class_scope:evolution_algorithms.ParamSyncResponse)
))
_sym_db.RegisterMessage(ParamSyncResponse)
_ROLLOUTSERVICE = _descriptor.ServiceDescriptor(
name='RollOutService',
full_name='evolution_algorithms.RollOutService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=355,
serialized_end=468,
methods=[
_descriptor.MethodDescriptor(
name='performRollOut',
full_name='evolution_algorithms.RollOutService.performRollOut',
index=0,
containing_service=None,
input_type=_ROLLOUTREQUEST,
output_type=_ROLLOUTRESPONSE,
serialized_options=None,
),
])
_sym_db.RegisterServiceDescriptor(_ROLLOUTSERVICE)
DESCRIPTOR.services_by_name['RollOutService'] = _ROLLOUTSERVICE
_PARAMETERSYNCSERVICE = _descriptor.ServiceDescriptor(
name='ParameterSyncService',
full_name='evolution_algorithms.ParameterSyncService',
file=DESCRIPTOR,
index=1,
serialized_options=None,
serialized_start=470,
serialized_end=592,
methods=[
_descriptor.MethodDescriptor(
name='syncParameter',
full_name='evolution_algorithms.ParameterSyncService.syncParameter',
index=0,
containing_service=None,
input_type=_PARAMSYNCREQUEST,
output_type=_PARAMSYNCRESPONSE,
serialized_options=None,
),
])
_sym_db.RegisterServiceDescriptor(_PARAMETERSYNCSERVICE)
DESCRIPTOR.services_by_name['ParameterSyncService'] = _PARAMETERSYNCSERVICE
# @@protoc_insertion_point(module_scope)
| apache-2.0 | -6,697,575,156,520,963,000 | 33.138514 | 908 | 0.734092 | false | 3.326201 | false | true | false |
tonimichel/djpl-users | setup.py | 1 | 1092 | #! /usr/bin/env python
import os
from setuptools import setup, find_packages
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except IOError:
return ''
setup(
name='djpl-users',
version='0.1',
description='User functionality beyond admin users',
long_description=read('README.rst'),
license='The MIT License',
keywords='django, django-productline, users, signup, confirmation email',
author='Toni Michel',
author_email='[email protected]',
url="https://github.com/tonimichel/djpl-users",
packages=find_packages(),
package_dir={'users': 'users'},
include_package_data=True,
scripts=[],
zip_safe=False,
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'Operating System :: OS Independent'
],
install_requires=[
'django-productline',
'django-q==0.9.2'
]
)
| mit | -3,572,329,187,022,619,600 | 26.3 | 77 | 0.621795 | false | 3.858657 | false | false | false |
joedeller/pymine | circle.py | 1 | 3410 | #! /usr/bin/python
# Joe Deller 2014
# Drawing circle shapes in Minecraft
# Level : Intermediate
# Uses : Libraries, variables, operators, loops, logic
# This program uses more functions from the maths library
# The mathematics of circles is something that you probably won't
# be taught until secondary school, but don't worry.
# All we really need to understand is where, how big , what color
# This program uses two different types of numbers
# Floating point (numbers with a decimal place ) and integers (whole numbers)
# First the libraries we will need
import mcpi.minecraft as minecraft
import mcpi.block as block
import math
# Make a connection to minecraft
mc = minecraft.Minecraft.create()
# Find out where we are in the world and store this for later
playerPos = mc.player.getPos()
pX = playerPos.x
pY = playerPos.y
pZ = playerPos.z
# Clean up the world and any previous circles nearby
# We will clear and area 20 both the left, right, front and back of where we are standing
# and 64 blocks up into the air
mc.setBlocks(pX - 20, pY, pZ - 20, pX + 20, pY + 64, pZ + 20, block.AIR.id)
# Setup a grass floor, the same size as the area we cleared, but not so high
mc.setBlocks(pX - 20, pY - 1, pZ - 20, pX + 20, pY - 1, pZ + 20, block.GRASS.id)
# Many computer languages, including python, use a measurement called radians to measure angles
# rather than degrees you might be used to.
# If you haven't heard of Pi, (the number rather than the computer)
# then Pi is the number you get if you measure all the way around a circle,
# called the circumference, by the width of the circle
# it is approximately 3.14, or, to be slightly more accurate, 3.1415926
# in fact the numbers after the decimal point go on for ever
# A circle can be divided up into 360 degrees, or 2 * Pi in radians
# A semi circle is 180 degrees, or Pi in radians
# Pi is such an important number that many computer
# languages include it as part of their keywords or
# as part of a library
# For now we won't worry too much about the Mathematics, just the color
# and shape
# How wide in blocks do we want our circle?
# Including the decimal place tells Python that we want a float variable to
# store the width, rather than whole numbers
diameter = 10.0
# Normally we would use a For loop, but in Python "range" only
# works with whole numbers and we need numbers with decimal places
# One way (and there are others) is to use a while loop
# You might wonder why we don't start from zero, try changing i to be 0.0
# and see :-)
i = 0.01
while (i < math.pi * 2):
# When we draw our blocks, we need to convert the floating point numbers back to integers
# Our circle won't be super smooth as minecraft blocks are quite large
# For now don't worry about the sin and cos functions, they work out where to place our blocks
# to make a circle shape
dx = int(diameter * math.cos(i))
dy = int(diameter * math.sin(i))
# We need to add our player X co-ordinate to the circle X coordinate so it is
# drawn close to where we are standing
# We also will draw our circle back a bit from where we are standing, 4
# blocks should be enough
mc.setBlock(dx + pX, pY + dy + diameter, pZ + 10, block.WOOL.id, 1)
# try changing the number we add to different values, for example 0.5
# the more you add the faster the loop finishes, so the less blocks get
# drawn
i = i + 0.4
| mit | -7,876,583,890,132,956,000 | 40.585366 | 98 | 0.7261 | false | 3.537344 | false | false | false |
niwtr/map-walker | src/server/log.py | 1 | 2782 | #! /usr/bin/python
# -*- coding: utf-8 -*-
'''
Log module.
Maintains the mechanism to write log.
Design: Heranort, L.Laddie
'''
import os, time, shutil
'''
Log mode.
If the mode turns to be 'testing', we must write down all the environment.
And if the mode is 'distributed', we should write at least all the information
we need.
'''
#mode=[]
class Log_file():
def __init__(self):
path = os.getcwd()
pparent_path = os.path.dirname(os.path.dirname(path)) #get the root dir
self.file_path = os.path.join(pparent_path, 'data')
self.path_simple_log = os.path.join(self.file_path, 'log.txt') #get the log path
self.path_test_log = os.path.join(self.file_path, 'log_test.txt')
self.open_log()
'''
No less and no more.
'''
def make_simple_log(self, env):
pass
'''
Used Only during development of the program.
'''
def make_full_log(self, env):
pass
'''
Analyze the log file to check where the bug is.
'''
def analyzer(self):
pass
'''
Open the log file
'''
def open_log(self):
line_num = 0
if(os.path.isfile(self.path_simple_log)):
temp = open(self.path_simple_log, 'r')
lines = temp.readlines()
temp.close()
line_num = len(lines)
self.log_simple = open(self.path_simple_log, 'a') #open the log txt with a additional mode
self.log_test = open(self.path_test_log, 'a')
if(line_num >= 1000): #check the log size
self.roll_log()
'''
Preserve the old log
'''
def roll_log(self):
for i in range(1000):
file_name = os.path.join(self.file_path, 'log_pre_%d.log' % i)
if(os.path.isfile(file_name)):
continue
self.log_simple.close()
shutil.move(self.path_simple_log, file_name)
self.open_log()
self.info('log roll to %s', file_name)
return
'''
Write log to the actual disk.
'''
def write_log(self, mode, fmt, *msg):
str = '%s - [%s] %s\n' % (time.ctime()[4:], mode, fmt % msg)
self.log_simple.write(str)
try:
self.log_simple.flush()
except:
pass
'''
Three different types of log
'''
def debug(self, fmt, *msg):
self.write_log('DEBUG', fmt, *msg)
def info(self, fmt, *msg):
self.write_log('INFO', fmt, *msg)
def warn(self, fmt, *msg):
self.write_log('WARN', fmt, *msg)
log_file = Log_file()
if(__name__ == '__main__'):
log_file.debug('test')
log_file.debug('%d*%s', 272, 'test')
log_file.info('%d*%s', 1954, 'test')
for i in range(1500):
log_file.warn('%d*%s', i, 'test') | mit | -1,747,165,687,453,168,400 | 25.009346 | 99 | 0.542416 | false | 3.296209 | true | false | false |
acabey/acabey.github.io | projects/demos/engineering.purdue.edu/scriptingwithobjects/swocode/chap13/PassArgsToCallback.py | 1 | 2006 | #!/usr/bin/env python
### PassArgsToCallbacks.py
from Tkinter import *
mw = Tk()
b1 = Button( mw, text = "b1" ) #(A)
b2 = Button( mw, text = "b2" ) #(B)
b3 = Button( mw, text = "b3" ) #(C)
b4 = Button( mw, text = "b4" ) #(D)
b1.grid(row = 0, column = 0) #(E)
b2.grid(row = 0, column = 1) #(F)
b3.grid(row = 1, column = 0) #(G)
b4.grid(row = 1, column = 1) #(H)
# For the buttons b1 through b2, the callbacks need no arguments:
def says_b1(): print "b1 says hello to me\n" #(I)
def says_b2( e ): print "b2 says hello to me\n" #(J)
b1.configure( command = says_b1 ) #(K)
b2.bind( '<Button-1>', says_b2 ) #(L)
# The callbacks for b3 and b4 need the following two as arguments:
arg1 = "hello" #(M)
arg2 = "to me" #(N)
# Register the callbacks:
b3.configure( command = lambda: saysomething_1( b3, arg1, arg2 ) ) #(O)
b4.bind( '<Button-1>', lambda e: saysomething_2( e, arg1, arg2 ) ) #(P)
def saysomething_1( wdg, p, q ): #(Q)
butt_lbl = wdg.cget( 'text' ) #(R)
print "%s says %s %s\n" % (butt_lbl, p, q) #(S)
def saysomething_2( evt, p, q ): #(T)
butt_lbl = evt.widget.cget( 'text' ) #(U)
print "%s says %s %s\n" % (butt_lbl, p, q) #(V)
mainloop() #(W)
| gpl-3.0 | -2,207,782,935,895,623,200 | 47.926829 | 73 | 0.339482 | false | 3.660584 | false | false | false |
Bouke/django-two-factor-auth | tests/test_views_qrcode.py | 1 | 3279 | from unittest import mock
import qrcode.image.svg
from django.test import RequestFactory, TestCase
from django.urls import reverse
from two_factor.utils import get_otpauth_url
from two_factor.views.core import QRGeneratorView
from .utils import UserMixin
class CustomQRView(QRGeneratorView):
def get_issuer(self):
return "Custom Test Issuer"
class QRTest(UserMixin, TestCase):
test_secret = 'This is a test secret for an OTP Token'
test_img = 'This is a test string that represents a QRCode'
def setUp(self):
super().setUp()
self.user = self.create_user(username='ⓑỚ𝓾⒦ȩ')
self.login_user()
def test_without_secret(self):
response = self.client.get(reverse('two_factor:qr'))
self.assertEqual(response.status_code, 404)
@mock.patch('qrcode.make')
def test_with_secret(self, mockqrcode):
# Setup the mock data
def side_effect(resp):
resp.write(self.test_img)
mockimg = mock.Mock()
mockimg.save.side_effect = side_effect
mockqrcode.return_value = mockimg
# Setup the session
session = self.client.session
session['django_two_factor-qr_secret_key'] = self.test_secret
session.save()
# Get default image factory
default_factory = qrcode.image.svg.SvgPathImage
# Get the QR code
response = self.client.get(reverse('two_factor:qr'))
# Check things went as expected
mockqrcode.assert_called_with(
get_otpauth_url(accountname=self.user.get_username(),
secret=self.test_secret, issuer="testserver"),
image_factory=default_factory)
mockimg.save.assert_called_with(mock.ANY)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content.decode('utf-8'), self.test_img)
self.assertEqual(response['Content-Type'], 'image/svg+xml; charset=utf-8')
@mock.patch('qrcode.make')
def test_custom_issuer(self, mockqrcode):
# Setup the mock data
def side_effect(resp):
resp.write(self.test_img)
mockimg = mock.Mock()
mockimg.save.side_effect = side_effect
mockqrcode.return_value = mockimg
# Setup the session
session = self.client.session
session['django_two_factor-qr_secret_key'] = self.test_secret
session.save()
# Get default image factory
default_factory = qrcode.image.svg.SvgPathImage
# Get the QR code
factory = RequestFactory()
request = factory.get(reverse('two_factor:qr'))
request.user = self.user
request.session = session
response = CustomQRView.as_view()(request)
# Check things went as expected
mockqrcode.assert_called_with(
get_otpauth_url(accountname=self.user.get_username(),
secret=self.test_secret, issuer="Custom Test Issuer"),
image_factory=default_factory)
mockimg.save.assert_called_with(mock.ANY)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content.decode('utf-8'), self.test_img)
self.assertEqual(response['Content-Type'], 'image/svg+xml; charset=utf-8')
| mit | 2,489,858,012,400,676,400 | 34.150538 | 82 | 0.643316 | false | 3.766129 | true | false | false |
X0Leon/XQuant | xquant/engine/backtest.py | 1 | 6837 | # -*- coding: utf-8 -*-
"""
回测的主要接口
@author: Leon Zhang
@version: 0.4
"""
import datetime
import time
import pandas as pd
try:
import queue
except ImportError:
import Queue as queue
from ..utils.logger import setup_logger
from .event import SignalEvent
logger = setup_logger()
class Backtest(object):
"""
封装回测设置和模块的接口
"""
def __init__(self, csv_dir, symbol_list, initial_capital,
heartbeat, start_date, end_date, data_handler,
execution_handler, portfolio, strategy,
commission_type='zero', slippage_type='zero',
**params):
"""
初始化回测
csv_dir: CSV数据文件夹目录
symbol_list: 股票代码str的list,如'600008'
initial_capital: 初始资金,如10000.0
heartbeat: k bar周期,以秒计,如分钟线为60,模拟交易使用
start_date: 策略回测起始时间
end_date: 策略回测结束时间
end_date: 策略回测结束时间
data_handler: (Class) 处理市场数据的类
execution_handler: (Class) 处理order/fill的类
portfolio: (Class) 虚拟账户,追踪组合头寸等信息的类
strategy: (Class) 根据市场数据生成信号的策略类
commission_type: 交易费率模型
slippage_type: 滑点模型
params: 策略参数的字典
"""
self.csv_dir = csv_dir
self.symbol_list = symbol_list
self.initial_capital = initial_capital
self.heartbeat = heartbeat
self.start_date = start_date
self.end_date = end_date
self.data_handler_cls = data_handler
self.execution_handler_cls = execution_handler
self.portfolio_cls = portfolio
self.strategy_cls = strategy
self.commission_type = commission_type
self.slippage_type = slippage_type
self.events = queue.Queue()
self.params = params
self.signals = 0
self.orders = 0
self.fills = 0
self._generate_trading_instances()
def _generate_trading_instances(self):
"""
实例化类,得到data_handler(bars),strategy,portfolio(port),execution_handler(broker)对象
"""
self.data_handler = self.data_handler_cls(self.events, self.csv_dir, self.symbol_list,
self.start_date, self.end_date)
self.strategy = self.strategy_cls(self.data_handler, self.events, **self.params)
self.portfolio = self.portfolio_cls(self.data_handler, self.events, self.start_date,
self.initial_capital)
self.execution_handler = self.execution_handler_cls(self.data_handler, self.events,
slippage_type=self.slippage_type,
commission_type=self.commission_type)
def _run_backtest(self):
"""
执行回测
"""
while True:
# 更新k bar
bars = self.data_handler
if bars.continue_backtest:
bars.update_bars()
else:
break
# 处理events
while True:
try:
event = self.events.get(False)
except queue.Empty:
break
else:
if event is not None:
if event.type == 'BAR': # or event.type == 'TICK'
logger.debug(' '.join([event.bar[0], event.bar[1].strftime("%Y-%m-%d %H:%M:%S"),
str(event.bar[5])]))
self.strategy.calculate_signals(event)
self.portfolio.update_timeindex()
elif event.type == 'SIGNAL':
logger.info(' '.join(['Create Signal:', event.datetime.strftime("%Y-%m-%d %H:%M:%S"),
event.symbol, event.signal_type]))
self.signals += 1
self.portfolio.update_signal(event)
elif event.type == 'ORDER':
self.orders += 1
self.execution_handler.execute_order(event)
elif event.type == 'FILL':
self.fills += 1
self.portfolio.update_fill(event)
# time.sleep(self.heartbeat)
def _force_clear(self):
"""
回测结束,确保强制平仓
"""
for s in self.symbol_list:
self.portfolio.update_signal(SignalEvent(s, self.portfolio.current_datetime, 'EXIT'))
event = self.events.get()
if event is not None:
assert event.type == 'ORDER'
self.execution_handler.execute_order(event)
event = self.events.get()
assert event.type == 'FILL'
self.portfolio.update_fill(event)
self.portfolio.update_timeindex()
logger.info(' '.join(['Force Clear:', self.portfolio.current_datetime.strftime("%Y-%m-%d %H:%M:%S"),
s, 'EXIT']))
def _output_performance(self):
"""
输出策略的回测结果
"""
pass
def trade_record(self):
"""
交易记录
"""
trades = pd.DataFrame(self.portfolio.all_trades, columns=['datetime', 'exchange', 'symbol', 'direction',
'fill_price', 'quantity', 'commission'])
return trades.set_index('datetime')
def simulate_trading(self):
"""
模拟回测并输出结果,返回资金曲线和头寸的DataFrame
"""
start = time.time()
logger.info('Start backtest...')
self._run_backtest()
logger.info('Summary: Signals (%s), Orders (%s), Fills (%s)' % (self.signals, self.orders, self.fills))
self._force_clear()
end = time.time()
timing = round(end-start, 2)
logger.info('Backtest took %s seconds!' % timing)
self._output_performance()
positions = pd.DataFrame(self.portfolio.all_positions).drop_duplicates(subset='datetime', keep='last'
).set_index('datetime')
holdings = pd.DataFrame(self.portfolio.all_holdings).drop_duplicates(subset='datetime', keep='last'
).set_index('datetime')
return positions, holdings
| mit | 7,205,165,296,745,565,000 | 35.494318 | 116 | 0.50397 | false | 3.7278 | true | false | false |
kevinharvey/django-tourney | tourney/players/models.py | 1 | 3456 | from django.db import models
class Player(models.Model):
name = models.CharField(max_length=100)
email = models.EmailField()
def __str__(self):
return '{} ({})'.format(self.name, self.email)
class Pool(models.Model):
tournament = models.ForeignKey('matches.Tournament')
players = models.ManyToManyField('players.Player')
def __str__(self):
return '{} - Pool {}'.format(self.tournament.name, self.id)
def _generate_matches(self):
"""
Create a match for each set of 2 players in the pool, and rounds to hold
them
"""
from matches.models import Match, Round
rounds = {}
players = [player for player in self.players.all()]
if len(players) % 2 != 0: players.append(None)
iterator = list(range(len(players)))
for x in iterator:
if x == 0: continue
round = Round(pool=self, number=x)
round.save()
rounds[round] = []
for x in iterator:
if not players[x]: continue
others_iterator = iterator.copy()
others_iterator.remove(x)
for y in others_iterator:
if not players[y]: continue
match_exists = Match.objects.filter(player_1_init=players[x], player_2_init=players[y]).exists()
inverse_match_exists = Match.objects.filter(player_1_init=players[y], player_2_init=players[x]).exists()
if match_exists or inverse_match_exists:
continue
for scheduled_round, players_in_round in rounds.items():
if (players[x] not in players_in_round) and (players[y] not in players_in_round):
break
match = Match(
player_1_init=players[x],
player_2_init=players[y],
round=scheduled_round,
round_index=0
)
match.save()
rounds[scheduled_round] += [players[x], players[y]]
def get_player_standings(self):
"""
Return a list of dictionaries describing the standings (player name and
win/loss record)
"""
records = []
rounds = self.round_set.all()
for round_object in rounds:
for match in round_object.match_set.all():
if not any(d['name'] == match.player_1.name for d in records):
records.append({'name': match.player_1.name, 'wins': 0, 'losses': 0})
if not any(d['name'] == match.player_2.name for d in records):
records.append({'name': match.player_2.name, 'wins': 0, 'losses': 0})
player_1_record = next((record for record in records if record['name'] == match.player_1.name), None)
player_2_record = next((record for record in records if record['name'] == match.player_2.name), None)
if match.winner() == match.player_1:
player_1_record['wins'] += 1
player_2_record['losses'] += 1
if match.winner() == match.player_2:
player_2_record['wins'] += 1
player_1_record['losses'] += 1
records_by_losses = sorted(records, key=lambda k: k['losses'])
records_by_wins = sorted(records, key=lambda k: k['wins'], reverse=True)
return records_by_wins
| gpl-3.0 | 8,356,018,102,655,234,000 | 35.378947 | 120 | 0.540799 | false | 4.114286 | false | false | false |
davidak/PyZufall | demo.py | 1 | 1801 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function, unicode_literals)
from builtins import *
from pyzufall.version import __version__
from pyzufall.generator import adjektiv, band, bandart, baum, beilage, beruf_m, beruf_w, color, datum, essen, farbe, firma, geburtsdatum, gegenstand, interesse, koerperteil, nachname, objekt, objekt_m, objekt_w, ort, person, person_m, person_objekt_m, person_objekt_w, person_w, pflanze, sprichwort, stadt, stadt_bl, tier, trinken, verbd, verbi, verbi2, verbn, verbt, verbt2, vorname, vorname_m, vorname_w, wort, zahl
from pyzufall.satz import satz
from pyzufall.person import Person
titel = "Demoscript für PyZufall " + __version__
print("\n" + titel + "\n" + '~' * len(titel) + "\n")
print("Person: " + person())
print("Berufsbezeichnung M: " + beruf_m())
print("Berufsbezeichnung W: " + beruf_w())
print("Essen: " + essen())
print("Beilage: " + beilage())
print("Trinken: " + trinken())
print("Stadt: " + stadt())
print("Ort: " + ort())
print("Band: " + band())
print("Bandart: " + bandart())
print("Wort: " + wort())
print("Zahl: " + zahl())
print("Farbe: " + farbe())
print("Datum: " + datum())
print("Sprichwort: " + sprichwort())
anzahl = 10
print("\n" + str(anzahl) + " zufällige Sätze:\n")
for i in range(1, anzahl+1):
print(str(i) + ". " + satz())
print("\n") # Leerzeile
print("Zufällige Personen generieren:\n")
p1 = Person()
print(p1)
p2 = Person()
print(p2)
print("{} und {} sitzen auf einer Bank im Park.\n".format(p1.vorname, p2.vorname))
del p1, p2
s = "Heute Abend gibt es {} mit {} und dazu ein Glas {}.".format(essen(), beilage(), trinken())
print(s)
s = "Meine {} heißt '{}' und besteht aus {}, {} und mir.".format(bandart(), band(), vorname(), vorname())
print(s)
| gpl-3.0 | -8,526,882,945,034,902,000 | 34.215686 | 417 | 0.655902 | false | 2.40107 | false | false | false |
bushvin/ansible-plugins | vars_plugins/user_dir_vars.py | 1 | 3579 | # (c) 2015, William Leemans <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
from ansible import utils
from ansible import errors
import ansible.constants as C
class VarsModule(object):
_base_host_vars = "~/.ansible/host_vars"
_base_group_vars = "~/.ansible/group_vars"
_host_allowed_facts = [ 'ansible_ssh_user', 'ansible_ssh_pass', 'ansible_sudo', 'ansible_sudo_pass', 'ansible_ssh_private_key_file', 'ansible_become', 'ansible_become_user', 'ansible_become_pass' ]
_group_allowed_facts = [ 'ansible_ssh_user', 'ansible_ssh_pass', 'ansible_sudo', 'ansible_sudo_pass', 'ansible_ssh_private_key_file', 'ansible_become', 'ansible_become_user', 'ansible_become_pass' ]
def __init__(self, inventory):
self.inventory = inventory
self.inventory_basedir = inventory.basedir()
self._base_host_vars = os.path.expanduser(self._base_host_vars)
self._base_group_vars = os.path.expanduser(self._base_group_vars)
def run(self, host, vault_password=None):
""" For backwards compatibility, when only vars per host were retrieved
This method should return both host specific vars as well as vars
calculated from groups it is a member of """
result = {}
result.update(self.get_host_vars(host, vault_password))
for g in host.groups:
result.update(self.get_group_vars(g,vault_password))
if C.DEFAULT_HASH_BEHAVIOUR == "merge":
result = utils.merge_hash(result, data)
else:
result.update(data)
return result
def get_host_vars(self, host, vault_password=None):
result = {}
filename = os.path.join(self._base_host_vars, "%s.yml" % host.name)
if os.path.isfile( filename ):
res = utils.parse_yaml_from_file(filename, vault_password=vault_password)
if type(res) != dict:
raise errors.AnsibleError("%s must be stored as a dictionary/hash" % filename)
data = dict()
for el in res:
if len(self._host_allowed_facts) == 0 or el in self._host_allowed_facts:
data.update( { el: res[el] } )
result.update(data)
return result
def get_group_vars(self, group, vault_password=None):
result = {}
filename = os.path.join(self._base_group_vars, "%s.yml" % group.name)
if os.path.isfile( filename ):
res = utils.parse_yaml_from_file(filename, vault_password=vault_password)
if type(res) != dict:
raise errors.AnsibleError("%s must be stored as a dictionary/hash" % filename)
data = dict()
for el in res:
if len(self._group_allowed_facts) == 0 or el in self._group_allowed_facts:
data.update( { el: res[el] } )
result.update(data)
return result
| gpl-3.0 | -8,529,523,066,040,105,000 | 41.105882 | 202 | 0.62727 | false | 3.840129 | false | false | false |
cgarrard/osgeopy-code | Chapter13/listing13_2.py | 1 | 1369 | # Plot countries as multipolygons.
import matplotlib.pyplot as plt
from osgeo import ogr
def plot_polygon(poly, symbol='k-', **kwargs):
"""Plots a polygon using the given symbol."""
for i in range(poly.GetGeometryCount()):
subgeom = poly.GetGeometryRef(i)
x, y = zip(*subgeom.GetPoints())
plt.plot(x, y, symbol, **kwargs)
def plot_layer(filename, symbol, layer_index=0, **kwargs):
"""Plots an OGR polygon layer using the given symbol."""
ds = ogr.Open(filename)
# Loop through all of the features in the layer.
for row in ds.GetLayer(layer_index):
geom = row.geometry()
geom_type = geom.GetGeometryType()
# If the geometry is a single polygon.
if geom_type == ogr.wkbPolygon:
plot_polygon(geom, symbol, **kwargs)
# Else if the geometry is a multipolygon, send each
# part to plot_polygon individually.
elif geom_type == ogr.wkbMultiPolygon:
for i in range(geom.GetGeometryCount()):
subgeom = geom.GetGeometryRef(i)
plot_polygon(subgeom, symbol, **kwargs)
# Plot countries.
plot_layer(r'D:\osgeopy-data\global\ne_110m_admin_0_countries.shp', 'k-')
plt.axis('equal')
# Get rid of the tick marks on the side of the plot.
plt.gca().get_xaxis().set_ticks([])
plt.gca().get_yaxis().set_ticks([])
plt.show()
| mit | 390,147,310,366,246,600 | 33.225 | 73 | 0.639153 | false | 3.537468 | false | false | false |
pankeshang/PPrintJsonEncoder | pp_json_encoder.py | 1 | 10703 | # -*- coding: utf-8 -*-
__author__ = '[email protected]'
"""
PPrintJsonEncoder
This is a wraper of the existing JSONEncoder from python's default ``json`` module.
What has been newly added in is just the ``depth`` attribute
"""
import json
import re
try:
from _json import encode_basestring_ascii as c_encode_basestring_ascii
except ImportError:
c_encode_basestring_ascii = None
try:
from _json import make_encoder as c_make_encoder
except ImportError:
c_make_encoder = None
ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]')
ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
HAS_UTF8 = re.compile(r'[\x80-\xff]')
ESCAPE_DCT = {
'\\': '\\\\',
'"': '\\"',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
}
for i in range(0x20):
ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i))
#ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
INFINITY = float('inf')
FLOAT_REPR = repr
def encode_basestring(s):
"""Return a JSON representation of a Python string
"""
def replace(match):
return ESCAPE_DCT[match.group(0)]
return '"' + ESCAPE.sub(replace, s) + '"'
def py_encode_basestring_ascii(s):
"""Return an ASCII-only JSON representation of a Python string
"""
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
def replace(match):
s = match.group(0)
try:
return ESCAPE_DCT[s]
except KeyError:
n = ord(s)
if n < 0x10000:
return '\\u{0:04x}'.format(n)
#return '\\u%04x' % (n,)
else:
# surrogate pair
n -= 0x10000
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
return '\\u{0:04x}\\u{1:04x}'.format(s1, s2)
#return '\\u%04x\\u%04x' % (s1, s2)
return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
encode_basestring_ascii = (
c_encode_basestring_ascii or py_encode_basestring_ascii)
class PPJSONEncoder(json.JSONEncoder):
def __init__(self, depth=None, **kwargs):
self.depth = depth
super(PPJSONEncoder, self).__init__(**kwargs)
def pp_iterencode(self, o):
""" No we do not care about ont_shot hahaha bite me!
"""
_one_shot = None
if self.check_circular:
markers = {}
else:
markers = None
if self.ensure_ascii:
_encoder = encode_basestring_ascii
else:
_encoder = encode_basestring
if self.encoding != 'utf-8':
def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding):
if isinstance(o, str):
o = o.decode(_encoding)
return _orig_encoder(o)
def floatstr(o, allow_nan=self.allow_nan,
_repr=FLOAT_REPR, _inf=INFINITY, _neginf=-INFINITY):
# Check for specials. Note that this type of test is processor
# and/or platform-specific, so do tests which don't depend on the
# internals.
if o != o:
text = 'NaN'
elif o == _inf:
text = 'Infinity'
elif o == _neginf:
text = '-Infinity'
else:
return _repr(o)
if not allow_nan:
raise ValueError(
"Out of range float values are not JSON compliant: " +
repr(o))
return text
_iterencode = _pp_make_iterencode(
markers, self.default, _encoder, self.indent, floatstr,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, _one_shot, self.depth
)
return _iterencode(o, 0)
def iterencode(self, o, _one_shot=False):
if self.depth:
return self.pp_iterencode(o)
return super(PPJSONEncoder, self).iterencode(o, _one_shot)
def _pp_make_iterencode(markers, _default, _encoder, _indent, _floatstr,
_key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot,
depth,
## HACK: hand-optimized bytecode; turn globals into locals
ValueError=ValueError,
basestring=basestring,
dict=dict,
float=float,
id=id,
int=int,
isinstance=isinstance,
list=list,
long=long,
str=str,
tuple=tuple):
def _iterencode_list(lst, _current_indent_level):
if not lst:
yield '[]'
return
if markers is not None:
markerid = id(lst)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = lst
buf = '['
if _indent is not None:
_current_indent_level += 1
if _current_indent_level > depth:
newline_indent = None
separator = _item_separator
else:
newline_indent = '\n' + (' ' * (_indent * _current_indent_level))
separator = _item_separator + newline_indent
buf += newline_indent
else:
newline_indent = None
separator = _item_separator
first = True
for value in lst:
if first:
first = False
else:
buf = separator
if isinstance(value, basestring):
yield buf + _encoder(value)
elif value is None:
yield buf + 'null'
elif value is True:
yield buf + 'true'
elif value is False:
yield buf + 'false'
elif isinstance(value, (int, long)):
yield buf + str(value)
elif isinstance(value, float):
yield buf + _floatstr(value)
else:
yield buf
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (' ' * (_indent * _current_indent_level))
yield ']'
if markers is not None:
del markers[markerid]
def _iterencode_dict(dct, _current_indent_level):
if not dct:
yield '{}'
return
if markers is not None:
markerid = id(dct)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = dct
yield '{'
if _indent is not None:
_current_indent_level += 1
if _current_indent_level > depth:
newline_indent = None
item_separator = _item_separator
else:
newline_indent = '\n' + (' ' * (_indent * _current_indent_level))
item_separator = _item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
item_separator = _item_separator
first = True
if _sort_keys:
items = sorted(dct.items(), key=lambda kv: kv[0])
else:
items = dct.iteritems()
for key, value in items:
if isinstance(key, basestring):
pass
# JavaScript is weakly typed for these, so it makes sense to
# also allow them. Many encoders seem to do something like this.
elif isinstance(key, float):
key = _floatstr(key)
elif key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
elif isinstance(key, (int, long)):
key = str(key)
elif _skipkeys:
continue
else:
raise TypeError("key " + repr(key) + " is not a string")
if first:
first = False
else:
yield item_separator
yield _encoder(key)
yield _key_separator
if isinstance(value, basestring):
yield _encoder(value)
elif value is None:
yield 'null'
elif value is True:
yield 'true'
elif value is False:
yield 'false'
elif isinstance(value, (int, long)):
yield str(value)
elif isinstance(value, float):
yield _floatstr(value)
else:
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (' ' * (_indent * _current_indent_level))
yield '}'
if markers is not None:
del markers[markerid]
def _iterencode(o, _current_indent_level):
if isinstance(o, basestring):
yield _encoder(o)
elif o is None:
yield 'null'
elif o is True:
yield 'true'
elif o is False:
yield 'false'
elif isinstance(o, (int, long)):
yield str(o)
elif isinstance(o, float):
yield _floatstr(o)
elif isinstance(o, (list, tuple)):
for chunk in _iterencode_list(o, _current_indent_level):
yield chunk
elif isinstance(o, dict):
for chunk in _iterencode_dict(o, _current_indent_level):
yield chunk
else:
if markers is not None:
markerid = id(o)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = o
o = _default(o)
for chunk in _iterencode(o, _current_indent_level):
yield chunk
if markers is not None:
del markers[markerid]
return _iterencode
| mit | 2,360,319,245,648,625,000 | 31.433333 | 83 | 0.502476 | false | 4.235457 | false | false | false |
jimporter/bfg9000 | bfg9000/builtins/find.py | 1 | 5986 | import re
from enum import Enum
from functools import reduce
from . import builtin
from ..glob import NameGlob, PathGlob
from ..iterutils import iterate, listify
from ..backends.make import writer as make
from ..backends.ninja import writer as ninja
from ..backends.make.syntax import Writer, Syntax
from ..build_inputs import build_input
from ..path import Path, Root, walk, uniquetrees
from ..platforms import known_platforms
build_input('find_dirs')(lambda build_inputs, env: set())
depfile_name = '.bfg_find_deps'
@builtin.default()
class FindResult(Enum):
include = 0
not_now = 1
exclude = 2
exclude_recursive = 3
def __bool__(self):
return self == self.include
def __and__(self, rhs):
return type(self)(max(self.value, rhs.value))
def __or__(self, rhs):
return type(self)(min(self.value, rhs.value))
class FileFilter:
def __init__(self, include, type=None, extra=None, exclude=None,
filter_fn=None):
self.include = [PathGlob(i, type) for i in iterate(include)]
if not self.include:
raise ValueError('at least one pattern required')
self.extra = [NameGlob(i, type) for i in iterate(extra)]
self.exclude = [NameGlob(i, type) for i in iterate(exclude)]
self.filter_fn = filter_fn
def bases(self):
return uniquetrees([i.base for i in self.include])
def _match_globs(self, path):
if any(i.match(path) for i in self.exclude):
return FindResult.exclude_recursive
skip_base = len(self.include) == 1
result = reduce(lambda a, b: a | b,
(i.match(path, skip_base) for i in self.include))
if result:
return FindResult.include
if any(i.match(path) for i in self.extra):
return FindResult.not_now
if result == PathGlob.Result.never:
return FindResult.exclude_recursive
return FindResult.exclude
def match(self, path):
result = self._match_globs(path)
if self.filter_fn:
return result & self.filter_fn(path)
return result
def write_depfile(env, path, output, seen_dirs, makeify=False):
with open(path.string(env.base_dirs), 'w') as f:
# Since this file is in the build dir, we can use relative dirs for
# deps also in the build dir.
roots = env.base_dirs.copy()
roots[Root.builddir] = None
out = Writer(f, None)
out.write(output.string(roots), Syntax.target)
out.write_literal(':')
for i in seen_dirs:
out.write_literal(' ')
out.write(i.string(roots), Syntax.dependency)
out.write_literal('\n')
if makeify:
for i in seen_dirs:
out.write(i.string(roots), Syntax.target)
out.write_literal(':\n')
def _path_type(path):
return 'd' if path.directory else 'f'
@builtin.function()
def filter_by_platform(context, path):
env = context.env
my_plat = {env.target_platform.genus, env.target_platform.family}
sub = '|'.join(re.escape(i) for i in known_platforms if i not in my_plat)
ex = r'(^|/|_)(' + sub + r')(\.[^\.]+$|$|/)'
return (FindResult.not_now if re.search(ex, path.suffix)
else FindResult.include)
def _find_files(env, filter, seen_dirs=None):
paths = filter.bases()
for p in paths:
yield p, filter.match(p)
for p in paths:
for base, dirs, files in walk(p, env.base_dirs):
if seen_dirs is not None:
seen_dirs.append(base)
to_remove = []
for i, p in enumerate(dirs):
m = filter.match(p)
if m == FindResult.exclude_recursive:
to_remove.append(i)
yield p, m
for p in files:
yield p, filter.match(p)
for i in reversed(to_remove):
del dirs[i]
def find(env, pattern, type=None, extra=None, exclude=None):
pattern = [Path.ensure(i, Root.srcdir) for i in iterate(pattern)]
file_filter = FileFilter(pattern, type, extra, exclude)
results = []
for path, matched in _find_files(env, file_filter):
if matched == FindResult.include:
results.append(path)
return results
@builtin.function()
def find_files(context, pattern, *, type=None, extra=None, exclude=None,
filter=None, file_type=None, dir_type=None, dist=True,
cache=True):
types = {'f': file_type or context['auto_file'],
'd': dir_type or context['directory']}
extra_types = {'f': context['generic_file'], 'd': context['directory']}
pattern = [context['relpath'](i) for i in iterate(pattern)]
exclude = context.build['project']['find_exclude'] + listify(exclude)
file_filter = FileFilter(pattern, type, extra, exclude, filter)
found, seen_dirs = [], []
for path, matched in _find_files(context.env, file_filter, seen_dirs):
if matched == FindResult.include:
found.append(types[_path_type(path)](path, dist=dist))
elif matched == FindResult.not_now and dist:
extra_types[_path_type(path)](path, dist=dist)
if cache:
context.build['find_dirs'].update(seen_dirs)
context.build['regenerate'].depfile = depfile_name
return found
@builtin.function()
def find_paths(context, *args, **kwargs):
return [i.path for i in context['find_files'](*args, **kwargs)]
@make.post_rule
def make_find_dirs(build_inputs, buildfile, env):
if build_inputs['find_dirs']:
write_depfile(env, Path(depfile_name), make.filepath,
build_inputs['find_dirs'], makeify=True)
buildfile.include(depfile_name)
@ninja.post_rule
def ninja_find_dirs(build_inputs, buildfile, env):
if build_inputs['find_dirs']:
write_depfile(env, Path(depfile_name), ninja.filepath,
build_inputs['find_dirs'])
| bsd-3-clause | -1,055,999,983,932,976,400 | 31.532609 | 77 | 0.607417 | false | 3.647776 | false | false | false |
jorgebaier/iic1103-s4-2016 | clase0922/ejemplo.py | 1 | 1231 | import bigramas_ord
# bigramas_ord.cargar_archivo() : carga archivo de datos
# bigramas_ord.palabra_comun_seguida(palabra):
# retorna una palabra que ocurre frecuentemente despues
# de palabra
def leer_prohibidas():
# retorna un string que contiene las letras prohibidas
print('qué letras quieres omitir? ')
s = ''
c = ''
while c != '0':
s = s + c
c = input()
return s
def legal(prohibidas,palabra):
# retorna True si la palabra no contiene
# ningun caracter en prohibidas
# retorna False en caso contrario
for c in prohibidas:
if c in palabra:
return False
return True
bigramas_ord.cargar_archivo()
cuantas_palabras = int(input('cuantas palabras quieres? '))
palabra_inicial = input('palabra inicial? ')
prohibidas = leer_prohibidas()
print("Letras prohibidas:",prohibidas)
contador = 0
palabra = palabra_inicial
while contador < cuantas_palabras:
print(palabra, end=' ')
palabra_original = palabra
palabra = bigramas_ord.palabra_comun_seguida(palabra_original)
while not legal(prohibidas,palabra):
palabra = bigramas_ord.palabra_comun_seguida(palabra_original)
contador += 1 # contador = contador + 1
| unlicense | -6,015,322,268,898,109,000 | 25.170213 | 74 | 0.681301 | false | 2.605932 | false | false | false |
melmothx/jsonbot | jsb/plugs/core/userstate.py | 1 | 1812 | # jsb/plugs/userstate.py
#
#
""" userstate is stored in jsondata/state/users/<username>. """
## jsb imports
from jsb.lib.commands import cmnds
from jsb.lib.examples import examples
from jsb.lib.persiststate import UserState
from jsb.lib.errors import NoSuchUser
## set command
def handle_set(bot, ievent):
""" let the user manage its own state. """
try: (item, value) = ievent.args
except ValueError: ievent.missing("<item> <value>") ; return
ievent.user.state.data[item.lower()] = value
ievent.user.state.save()
ievent.reply("%s set to %s" % (item.lower(), value))
cmnds.add('set', handle_set, ['OPER', 'USER', 'GUEST'])
examples.add('set', 'set userstate', 'set place heerhugowaard')
## get command
def handle_get(bot, ievent):
""" get state of a user. """
target = ievent.rest
if target: target = target.lower()
userstate = ievent.user.state
result = []
for i, j in userstate.data.iteritems():
if target == i or not target: result.append("%s=%s" % (i, j))
if result: ievent.reply("state: ", result)
else: ievent.reply('no userstate of %s known' % ievent.userhost)
cmnds.add('get', handle_get, ['OPER', 'USER', 'GUEST'])
examples.add('get', 'get your userstate', 'get')
## unset command
def handle_unset(bot, ievent):
""" remove value from user state of the user giving the command. """
try:
item = ievent.args[0].lower()
except (IndexError, TypeError):
ievent.missing('<item>')
return
try: del ievent.user.state.data[item]
except KeyError:
ievent.reply('no such item')
return
ievent.user.state.save()
ievent.reply('item %s deleted' % item)
cmnds.add('unset', handle_unset, ['USER', 'GUEST'])
examples.add('unset', 'delete variable from your state', 'unset TZ')
| mit | 4,534,513,255,737,570,000 | 29.2 | 72 | 0.646799 | false | 3.201413 | false | false | false |
jbernardis/repraptoolbox | src/Printer/heaters.py | 1 | 7394 | import wx
BUTTONDIM = (48, 48)
class HeaterInfo:
def __init__(self, name, tool, info):
self.name = name
self.tool = tool
self.mintemp = info[0]
self.maxtemp = info[1]
self.lowpreset = info[2]
self.highpreset = info[3]
self.setcmd = info[4]
self.setwaitcmd = info[5]
class Heaters(wx.Window):
def __init__(self, parent, reprap, prtName):
self.parent = parent
self.images = parent.images
self.settings = self.parent.settings
self.reprap = reprap
self.prtName = prtName
wx.Window.__init__(self, parent, wx.ID_ANY, size=(-1, -1), style=wx.SIMPLE_BORDER)
szHeaters = wx.BoxSizer(wx.VERTICAL)
self.bedInfo = HeaterInfo("Bed", None, self.settings.bedinfo)
self.hBed = Heater(self, self.bedInfo, self.reprap)
szHeaters.AddSpacer(5)
szHeaters.Add(self.hBed)
self.hHEs = []
self.hHEInfo = []
for i in range(self.settings.nextruders):
if self.settings.nextruders == 1:
tool = None
title = "HE"
else:
tool = i
title = "HE%d" % tool
hi = HeaterInfo(title, tool, self.settings.heinfo)
h = Heater(self, hi, self.reprap)
szHeaters.AddSpacer(5)
szHeaters.Add(h)
self.hHEs.append(h)
self.hHEInfo.append(hi)
szHeaters.AddSpacer(5)
self.SetSizer(szHeaters)
self.Layout()
self.Fit()
def registerGCodeTemps(self, hes, bed):
for i in range(self.settings.nextruders):
self.hHEs[i].enableExtract(hes[i])
self.hBed.enableExtract(bed)
def tempHandler(self, actualOrTarget, hName, tool, value):
if hName == "Bed":
self.hBed.setTemperature(actualOrTarget, value)
elif hName == "HE":
if tool is None:
ix = 0
else:
ix = tool
self.hHEs[ix].setTemperature(actualOrTarget, value)
def getBedInfo(self):
return self.bedInfo
def getHEInfo(self, tx):
if tx >= self.settings.nextruders:
return None
else:
return self.hHEInfo[tx]
class Heater(wx.Window):
def __init__(self, parent, hi, reprap):
self.parent = parent
self.images = parent.images
self.settings = self.parent.settings
self.reprap = reprap
self.htrInfo = hi
self.GCodeTemp = None
self.setting = None
self.actual = None
self.lowpreset = hi.lowpreset
self.highpreset = hi.highpreset
self.mintemp = hi.mintemp
self.maxtemp = hi.maxtemp
self.heaterOn = False
wx.Window.__init__(self, parent, wx.ID_ANY, size=(-1, -1), style=wx.NO_BORDER)
szHeater = wx.BoxSizer(wx.HORIZONTAL)
self.font12bold = wx.Font(12, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD)
self.font20bold = wx.Font(20, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD)
t = wx.StaticText(self, wx.ID_ANY, "%s:" % hi.name, size=(50, -1), style=wx.ALIGN_RIGHT)
t.SetFont(self.font12bold)
szHeater.AddSpacer(10)
szHeater.Add(t, 0, wx.ALIGN_CENTER_VERTICAL, 1)
szHeater.AddSpacer(10)
self.sbIndicator = wx.StaticBitmap(self, wx.ID_ANY, self.images.pngLedoff)
szHeater.Add(self.sbIndicator, 0, wx.ALIGN_CENTER_VERTICAL, 1)
self.bPower = wx.BitmapButton(self, wx.ID_ANY, self.images.pngHeatoff, size=BUTTONDIM, style = wx.NO_BORDER)
self.bPower.SetToolTip("Turn heater on/off")
self.Bind(wx.EVT_BUTTON, self.onBPower, self.bPower)
szHeater.Add(self.bPower)
self.tcActual = wx.TextCtrl(self, wx.ID_ANY, "", size=(70, -1), style=wx.TE_READONLY | wx.TE_RIGHT)
self.tcActual.SetFont(self.font12bold)
szHeater.Add(self.tcActual, 0, wx.ALIGN_CENTER_VERTICAL, 1)
t = wx.StaticText(self, wx.ID_ANY, " / ")
t.SetFont(self.font20bold)
szHeater.Add(t, 0, wx.ALIGN_CENTER_VERTICAL, 1)
self.tcSetting = wx.TextCtrl(self, wx.ID_ANY, "", size=(50, -1), style=wx.TE_READONLY | wx.TE_RIGHT)
self.tcSetting.SetFont(self.font12bold)
szHeater.Add(self.tcSetting, 0, wx.ALIGN_CENTER_VERTICAL, 1)
self.slThermostat = wx.Slider(self, wx.ID_ANY, value=self.lowpreset, size=(180, -1),
minValue=self.mintemp, maxValue=self.maxtemp,
style=wx.SL_HORIZONTAL | wx.SL_VALUE_LABEL)
self.slThermostat.SetToolTip("Choose temperature setting for heater")
szHeater.Add(self.slThermostat, 0, wx.ALIGN_CENTER_VERTICAL, 1)
self.Bind(wx.EVT_SCROLL, self.doThermostat, self.slThermostat)
szHeater.AddSpacer(10)
self.bLowPreset = wx.Button(self, wx.ID_ANY, "%d" % self.lowpreset, size=(40, 22))
self.bLowPreset.SetToolTip("Set heater to low preset value")
self.Bind(wx.EVT_BUTTON, self.doLowPreset, self.bLowPreset)
self.bHighPreset = wx.Button(self, wx.ID_ANY, "%d" % self.highpreset, size=(40, 22))
self.bHighPreset.SetToolTip("Set heater to high preset value")
self.Bind(wx.EVT_BUTTON, self.doHighPreset, self.bHighPreset)
sz = wx.BoxSizer(wx.VERTICAL)
sz.AddSpacer(3)
sz.Add(self.bHighPreset)
sz.Add(self.bLowPreset)
szHeater.Add(sz)
szHeater.AddSpacer(10)
self.bExtract = wx.BitmapButton(self, wx.ID_ANY, self.images.pngFileopen, size=BUTTONDIM)
self.bExtract.SetToolTip("Extract temperature setting from G Code")
self.Bind(wx.EVT_BUTTON, self.onBExtract, self.bExtract)
szHeater.Add(self.bExtract)
self.bExtract.Enable(False)
szHeater.AddSpacer(10)
self.SetSizer(szHeater)
self.Layout()
self.Fit()
def onBPower(self, evt):
if self.heaterOn and self.setting == self.slThermostat.GetValue():
self.heaterOn = False
self.updateSetting(0)
cmd = self.htrInfo.setcmd + " S0"
self.bPower.SetBitmap(self.images.pngHeatoff)
else:
self.heaterOn = True
self.updateSetting(self.slThermostat.GetValue())
cmd = self.htrInfo.setcmd + " S%d" % self.setting
self.bPower.SetBitmap(self.images.pngHeaton)
if self.htrInfo.tool is not None:
cmd += " T%d" % self.htrInfo.tool
self.reprap.sendNow(cmd)
def onBExtract(self, evt):
if self.GCodeTemp is not None:
self.slThermostat.SetValue(self.GCodeTemp)
def enableExtract(self, temp, flag=True):
self.bExtract.Enable(flag)
self.GCodeTemp = temp
def updateSetting(self, newSetting):
self.setting = newSetting
if self.setting is None:
self.tcSetting.SetValue("")
else:
self.tcSetting.SetValue("%d" % self.setting)
if self.setting is None:
self.sbIndicator.SetBitmap(self.images.pngLedoff)
elif self.actual is None:
self.sbIndicator.SetBitmap(self.images.pngLedon)
elif self.setting > self.actual:
self.sbIndicator.SetBitmap(self.images.pngLedon)
else:
self.sbIndicator.SetBitmap(self.images.pngLedoff)
def updateActual(self, newActual):
self.actual = newActual
if self.actual == None:
self.tcActual.SetValue("")
else:
self.tcActual.SetValue("%.1f" % self.actual)
if self.setting is None:
self.sbIndicator.SetBitmap(self.images.pngLedoff)
elif self.actual is None:
self.sbIndicator.SetBitmap(self.images.pngLedon)
elif self.setting > self.actual:
self.sbIndicator.SetBitmap(self.images.pngLedon)
else:
self.sbIndicator.SetBitmap(self.images.pngLedoff)
def doLowPreset(self, evt):
self.slThermostat.SetValue(self.lowpreset)
def doHighPreset(self, evt):
self.slThermostat.SetValue(self.highpreset)
def doThermostat(self, evt):
pass
def setTemperature(self, actualOrTarget, value):
if actualOrTarget == "target":
self.updateSetting(value)
if value == 0:
self.heaterOn = False
self.bPower.SetBitmap(self.images.pngHeatoff)
else:
self.heaterOn = True
self.bPower.SetBitmap(self.images.pngHeaton)
elif actualOrTarget == "actual":
self.updateActual(value)
| gpl-3.0 | -8,565,963,561,155,118,000 | 29.553719 | 110 | 0.70449 | false | 2.654937 | false | false | false |
nanophotonics/nplab | nplab/experiment/scanning_experiment/continuous_linear_scanner.py | 1 | 11973 | from __future__ import print_function
from builtins import str
__author__ = 'alansanders'
from nplab.experiment.scanning_experiment import ScanningExperiment, TimedScan
from threading import Thread
import time
from nplab.utils.gui import *
from nplab.ui.ui_tools import UiTools
from nplab import inherit_docstring
from functools import partial
import numpy as np
class ContinuousLinearScan(ScanningExperiment, TimedScan):
@inherit_docstring(TimedScan)
@inherit_docstring(ScanningExperiment)
def __init__(self):
super(ContinuousLinearScan, self).__init__()
self.step = None
self.direction = 1
# Repeat capabilities
self._num_measurements = 0 # the number of measurements made and incremented to num_repeats
self.num_repeats = 1 # user sets this in subclass
self.hold = False # setting this to true prevents movement commands
self._last_step = 0. # this is useful when incrementing a displacement array
# Feedback attributes
self.engage_feedback = False
self.feedback_on = 'Force'
self.set_point = 0
self.feedback_gain = 1
self.feedback_min = -1
self.feedback_max = 1
@inherit_docstring(ScanningExperiment.run)
def run(self, new=True):
if isinstance(self.acquisition_thread, Thread) and self.acquisition_thread.is_alive():
print('scan already running')
return
self.init_scan()
self.acquisition_thread = Thread(target=self.scan, args=(new,))
self.acquisition_thread.start()
def set_parameter(self, value):
"""Vary the independent parameter."""
raise NotImplementedError
@inherit_docstring(ScanningExperiment.scan_function)
def scan_function(self, index):
raise NotImplementedError
def update_parameter(self, value):
"""Vary the independent parameter."""
raise NotImplementedError
@inherit_docstring(ScanningExperiment.run)
def scan(self, new=True):
self.abort_requested = False
self.open_scan()
self.status = 'acquiring data'
self.acquiring.set()
scan_start_time = time.time()
index = 0 if new else 1
while not self.abort_requested:
if self.hold or self._num_measurements < self.num_repeats:
self._last_step = 0. # used to prevent the incrementing of the displacement
else:
self.set_parameter(self.direction*self.step)
self._num_measurements = 0 # reset the number of measurements made after move
self._last_step = self.direction*self.step
self._num_measurements += 1
self.scan_function(index)
index += 1
if self.engage_feedback:
feedback_input = self.calculate_feedback_input()
direction, step = self.feedback_loop(feedback_input, self.set_point)
self.update_from_feedback(direction, step)
try:
self.update_parameter(self.direction*self.step)
except NotImplementedError:
pass
self.print_scan_time(time.time() - scan_start_time)
self.acquiring.clear()
# finish the scan
self.analyse_scan()
self.close_scan()
self.status = 'scan complete'
def calculate_feedback_input(self):
"""
Return the input to the feedback loop.
:return value: the value of the variable to feed back on
"""
raise NotImplementedError
def feedback_loop(self, feedback_input, set_point):
"""
Returns the direction and step size that should be used in the next loop iteration.
:param feedback_input: the current value of the target variable
:param set_point: the target value that should held
:returns direction, step_size:
:rtype : object
"""
e = feedback_input - set_point
output = -self.feedback_gain*e # if e>0 i.e. input > set_point for d=1 then d goes to -1
output = np.clip(output, self.feedback_min, self.feedback_max)
step_size = abs(output)
direction = np.sign(output)
return direction, step_size
def update_from_feedback(self, direction, step):
"""This function is created simply to be subclass GUI updates."""
self.direction = direction
self.step = step
@inherit_docstring(ContinuousLinearScan)
class ContinuousLinearScanQt(ContinuousLinearScan, QtCore.QObject):
direction_updated = QtCore.Signal(int)
step_updated = QtCore.Signal(float)
@inherit_docstring(ContinuousLinearScan.__init__)
def __init__(self):
ContinuousLinearScan.__init__(self)
QtCore.QObject.__init__(self)
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.update)
@inherit_docstring(ContinuousLinearScan.run)
def run(self, rate=0.1):
super(ContinuousLinearScanQt, self).run()
self.acquiring.wait()
self.timer.start(1000.*rate)
def get_qt_ui(self):
return ContinuousLinearScanUI(self)
@staticmethod
def get_qt_ui_cls():
return ContinuousLinearScanUI
@inherit_docstring(ContinuousLinearScan.update)
def update(self, force=False):
if not self.acquisition_thread.is_alive():
self.timer.stop()
@inherit_docstring(ContinuousLinearScan.update_from_feedback)
def update_from_feedback(self, direction, step):
super(ContinuousLinearScanQt, self).update_from_feedback(direction, step)
self.direction_updated.emit(self.direction)
self.step_updated.emit(self.step)
class ContinuousLinearScanUI(QtWidgets.QWidget, UiTools):
def __init__(self, cont_linear_scan):
assert isinstance(cont_linear_scan, ContinuousLinearScanQt), 'An instance of ContinuousLinearScanQt must be supplied'
super(ContinuousLinearScanUI, self).__init__()
self.linear_scan = cont_linear_scan
uic.loadUi(os.path.join(os.path.dirname(__file__), 'continuous_linear_scanner.ui'), self)
self.rate = 1./30.
self.setWindowTitle(self.linear_scan.__class__.__name__)
self.step.setValidator(QtGui.QDoubleValidator())
self.step.textChanged.connect(self.check_state)
self.step.textChanged.connect(self.on_text_change)
self.start_button.clicked.connect(self.on_click)
self.abort_button.clicked.connect(self.linear_scan.abort)
self.change_direction_button.clicked.connect(self.on_click)
self.step_up.clicked.connect(self.on_click)
self.step_down.clicked.connect(self.on_click)
self.step.setText(str(self.linear_scan.step))
self.direction.setText(str(self.linear_scan.direction))
self.num_repeats.setValidator(QtGui.QDoubleValidator())
self.num_repeats.textChanged.connect(self.check_state)
self.num_repeats.textChanged.connect(self.on_text_change)
self.hold.stateChanged.connect(self.on_state_change)
self.set_point.setValidator(QtGui.QDoubleValidator())
self.set_point.textChanged.connect(self.check_state)
self.set_point.textChanged.connect(self.on_text_change)
self.engage_feedback.stateChanged.connect(self.on_state_change)
self.linear_scan.direction_updated.connect(partial(self.update_param, 'direction'))
self.linear_scan.step_updated.connect(partial(self.update_param, 'step'))
def on_click(self):
sender = self.sender()
if sender == self.start_button:
self.linear_scan.run(self.rate)
elif sender == self.change_direction_button:
self.linear_scan.direction *= -1
self.direction.setText(str(self.linear_scan.direction))
elif sender == self.step_up:
self.step.blockSignals(True)
self.linear_scan.step *= 2
self.step.setText(str(self.linear_scan.step))
self.step.blockSignals(False)
elif sender == self.step_down:
self.step.blockSignals(True)
self.linear_scan.step /= 2
self.step.setText(str(self.linear_scan.step))
self.step.blockSignals(False)
def on_text_change(self, value):
sender = self.sender()
if sender.validator() is not None:
state = sender.validator().validate(value, 0)[0]
if state != QtGui.QValidator.Acceptable:
return
if sender == self.step:
self.linear_scan.step = float(value)
elif sender == self.num_repeats:
self.linear_scan.num_repeats = int(value)
elif sender == self.set_point:
self.linear_scan.set_point = float(value)
def on_state_change(self, state):
sender = self.sender()
if sender == self.hold:
if state == QtCore.Qt.Checked:
self.linear_scan.hold = True
elif state == QtCore.Qt.Unchecked:
self.linear_scan.hold = False
elif sender == self.engage_feedback:
if state == QtCore.Qt.Checked:
self.linear_scan.engage_feedback = True
elif state == QtCore.Qt.Unchecked:
self.linear_scan.engage_feedback = False
def update_param(self, param, value):
if param == 'direction':
self.direction.setText(str(value))
elif param == 'step':
self.step.setText(str(value))
if __name__ == '__main__':
import matplotlib
matplotlib.use('Qt4Agg')
from nplab.ui.mpl_gui import FigureCanvasWithDeferredDraw as FigureCanvas
from matplotlib.figure import Figure
import numpy as np
class DummyLinearScan(ContinuousLinearScanQt):
def __init__(self):
super(DummyLinearScan, self).__init__()
self.step = 1.
self.direction = 1.
self.fig = Figure()
self.p = None
self.x = None
self.y = None
def open_scan(self):
self.fig.clear()
self.p = 0
self.d = []
self.x = []
self.y = []
self.ax = self.fig.add_subplot(111)
def set_parameter(self, value):
self.p += value
#def update_parameter(self, value):
# self.p += value
def scan_function(self, index):
time.sleep(0.01)
self.d.append(index)
self.x.append(self.p)
self.y.append(np.sin(2*np.pi*0.01*self.p))
self.check_for_data_request(self.d, self.x, self.y)
def update(self, force=False):
super(DummyLinearScan, self).update(force)
if self.y == [] or self.fig.canvas is None:
return
if force:
data = (self.d, self.x, self.y)
else:
data = self.request_data()
if data is not False:
d, x, y = data
if not np.any(np.isfinite(y)):
return
if not self.ax.lines:
self.ax.plot(d, y)
else:
l, = self.ax.lines
l.set_data(d, y)
self.ax.relim()
self.ax.autoscale_view()
self.fig.canvas.draw()
def get_qt_ui(self):
return DummyLinearScanUI(self)
def calculate_feedback_input(self):
return self.y[-1]
class DummyLinearScanUI(ContinuousLinearScanUI):
def __init__(self, linear_scan):
super(DummyLinearScanUI, self).__init__(linear_scan)
self.canvas = FigureCanvas(self.linear_scan.fig)
self.canvas.setMaximumSize(300,300)
self.layout.addWidget(self.canvas)
self.resize(self.sizeHint())
ls = DummyLinearScan()
app = get_qt_app()
gui = ls.get_qt_ui()
gui.rate = 1./30.
gui.show()
sys.exit(app.exec_())
| gpl-3.0 | -8,910,935,309,353,352,000 | 36.889241 | 125 | 0.612211 | false | 3.959325 | false | false | false |
ernw/knxmap | knxmap/bus/monitor.py | 1 | 6078 | import logging
import codecs
from knxmap.bus.tunnel import KnxTunnelConnection
from knxmap.data.constants import *
from knxmap.messages import parse_message, KnxConnectRequest, KnxConnectResponse, \
KnxTunnellingRequest, KnxTunnellingAck, KnxConnectionStateResponse, \
KnxDisconnectRequest, KnxDisconnectResponse
LOGGER = logging.getLogger(__name__)
class KnxBusMonitor(KnxTunnelConnection):
"""Implementation of bus_monitor_mode and group_monitor_mode."""
def __init__(self, future, loop=None, group_monitor=True):
super(KnxBusMonitor, self).__init__(future, loop=loop)
self.group_monitor = group_monitor
def connection_made(self, transport):
self.transport = transport
self.peername = self.transport.get_extra_info('peername')
self.sockname = self.transport.get_extra_info('sockname')
if self.group_monitor:
# Create a TUNNEL_LINKLAYER layer request (default)
connect_request = KnxConnectRequest(sockname=self.sockname)
else:
# Create a TUNNEL_BUSMONITOR layer request
connect_request = KnxConnectRequest(sockname=self.sockname,
layer_type='TUNNEL_BUSMONITOR')
LOGGER.trace_outgoing(connect_request)
self.transport.sendto(connect_request.get_message())
# Send CONNECTIONSTATE_REQUEST to keep the connection alive
self.loop.call_later(50, self.knx_keep_alive)
def datagram_received(self, data, addr):
knx_message = parse_message(data)
if not knx_message:
LOGGER.error('Invalid KNX message: {}'.format(data))
self.knx_tunnel_disconnect()
self.transport.close()
self.future.set_result(None)
return
knx_message.set_peer(addr)
LOGGER.trace_incoming(knx_message)
if isinstance(knx_message, KnxConnectResponse):
if not knx_message.ERROR:
if not self.tunnel_established:
self.tunnel_established = True
self.communication_channel = knx_message.communication_channel
else:
if not self.group_monitor and knx_message.ERROR_CODE == 0x23:
LOGGER.error('Device does not support BUSMONITOR, try --group-monitor instead')
else:
LOGGER.error('Connection setup error: {}'.format(knx_message.ERROR))
self.transport.close()
self.future.set_result(None)
elif isinstance(knx_message, KnxTunnellingRequest):
self.print_message(knx_message)
if CEMI_PRIMITIVES[knx_message.cemi.message_code] == 'L_Data.con' or \
CEMI_PRIMITIVES[knx_message.cemi.message_code] == 'L_Data.ind' or \
CEMI_PRIMITIVES[knx_message.cemi.message_code] == 'L_Busmon.ind':
tunnelling_ack = KnxTunnellingAck(
communication_channel=knx_message.communication_channel,
sequence_count=knx_message.sequence_counter)
LOGGER.trace_outgoing(tunnelling_ack)
self.transport.sendto(tunnelling_ack.get_message())
elif isinstance(knx_message, KnxTunnellingAck):
self.print_message(knx_message)
elif isinstance(knx_message, KnxConnectionStateResponse):
# After receiving a CONNECTIONSTATE_RESPONSE schedule the next one
self.loop.call_later(50, self.knx_keep_alive)
elif isinstance(knx_message, KnxDisconnectRequest):
connect_response = KnxDisconnectResponse(communication_channel=self.communication_channel)
self.transport.sendto(connect_response.get_message())
self.transport.close()
self.future.set_result(None)
elif isinstance(knx_message, KnxDisconnectResponse):
self.transport.close()
self.future.set_result(None)
def print_message(self, message):
"""A generic message printing function. It defines
a format for the monitoring modes."""
assert isinstance(message, KnxTunnellingRequest)
cemi = tpci = apci= {}
if message.cemi:
cemi = message.cemi
if cemi.tpci:
tpci = cemi.tpci
if cemi.apci:
apci = cemi.apci
if cemi.knx_destination and cemi.extended_control_field and \
cemi.extended_control_field.get('address_type'):
dst_addr = message.parse_knx_group_address(cemi.knx_destination)
elif cemi.knx_destination:
dst_addr = message.parse_knx_address(cemi.knx_destination)
if self.group_monitor:
format = ('[ chan_id: {chan_id}, seq_no: {seq_no}, message_code: {msg_code}, '
'source_addr: {src_addr}, dest_addr: {dst_addr}, tpci_type: {tpci_type}, '
'tpci_seq: {tpci_seq}, apci_type: {apci_type}, apci_data: {apci_data} ]').format(
chan_id=message.communication_channel,
seq_no=message.sequence_counter,
msg_code=CEMI_PRIMITIVES.get(cemi.message_code),
src_addr=message.parse_knx_address(cemi.knx_source),
dst_addr=dst_addr,
tpci_type=_CEMI_TPCI_TYPES.get(tpci.tpci_type),
tpci_seq=tpci.sequence,
apci_type=_CEMI_APCI_TYPES.get(apci.apci_type),
apci_data=apci.apci_data)
else:
format = ('[ chan_id: {chan_id}, seq_no: {seq_no}, message_code: {msg_code}, '
'timestamp: {timestamp}, raw_frame: {raw_frame} ]').format(
chan_id=message.communication_channel,
seq_no=message.sequence_counter,
msg_code=CEMI_PRIMITIVES.get(cemi.message_code),
timestamp=codecs.encode(cemi.additional_information.get('timestamp'), 'hex'),
raw_frame=codecs.encode(cemi.raw_frame, 'hex'))
LOGGER.info(format)
| gpl-3.0 | 4,277,655,511,188,420,000 | 50.508475 | 103 | 0.608753 | false | 3.79875 | false | false | false |
iLoop2/ResInsight | ThirdParty/Ert/devel/python/test/ert_tests/ecl/test_ecl_init_file.py | 1 | 1467 | # Copyright (C) 2015 Statoil ASA, Norway.
#
# The file 'test_ecl_init_file.py' is part of ERT - Ensemble based Reservoir Tool.
#
# ERT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ERT is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
# for more details.
from ert.test import ExtendedTestCase
from ert.ecl import Ecl3DKW , EclKW, EclTypeEnum, EclInitFile , EclFile, FortIO, EclFileFlagEnum , EclGrid
class InitFileTest(ExtendedTestCase):
def setUp(self):
self.grid_file = self.createTestPath("Statoil/ECLIPSE/Gurbat/ECLIPSE.EGRID")
self.init_file = self.createTestPath("Statoil/ECLIPSE/Gurbat/ECLIPSE.INIT")
def test_load(self):
g = EclGrid( self.grid_file )
f = EclInitFile( g , self.init_file )
head = f["INTEHEAD"][0]
self.assertTrue( isinstance( head , EclKW ))
porv = f["PORV"][0]
self.assertTrue( isinstance( porv , Ecl3DKW ))
poro = f["PORO"][0]
self.assertTrue( isinstance( poro , Ecl3DKW ))
| gpl-3.0 | -7,307,745,755,696,596,000 | 34.780488 | 106 | 0.661895 | false | 3.175325 | true | false | false |
NaohiroTamura/python-ironicclient | ironicclient/tests/unit/test_exc.py | 1 | 2592 | # Copyright 2015 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from six.moves import http_client
from ironicclient.common.apiclient import exceptions
from ironicclient import exc
from ironicclient.tests.unit import utils as test_utils
@mock.patch.object(exceptions, 'from_response')
class ExcTest(test_utils.BaseTestCase):
def setUp(self):
super(ExcTest, self).setUp()
self.message = 'SpongeBob SquarePants'
self.traceback = 'Foo Traceback'
self.method = 'call_spongebob'
self.url = 'http://foo.bar'
self.expected_json = {'error': {'message': self.message,
'details': self.traceback}}
def test_from_response(self, mock_apiclient):
fake_response = mock.Mock(status_code=http_client.BAD_REQUEST)
exc.from_response(fake_response, message=self.message,
traceback=self.traceback, method=self.method,
url=self.url)
self.assertEqual(http_client.BAD_REQUEST, fake_response.status_code)
self.assertEqual(self.expected_json, fake_response.json())
mock_apiclient.assert_called_once_with(
fake_response, method=self.method, url=self.url)
def test_from_response_status(self, mock_apiclient):
fake_response = mock.Mock(status=http_client.BAD_REQUEST)
fake_response.getheader.return_value = 'fake-header'
delattr(fake_response, 'status_code')
exc.from_response(fake_response, message=self.message,
traceback=self.traceback, method=self.method,
url=self.url)
expected_header = {'Content-Type': 'fake-header'}
self.assertEqual(expected_header, fake_response.headers)
self.assertEqual(http_client.BAD_REQUEST, fake_response.status_code)
self.assertEqual(self.expected_json, fake_response.json())
mock_apiclient.assert_called_once_with(
fake_response, method=self.method, url=self.url)
| apache-2.0 | -8,685,826,326,429,128,000 | 42.932203 | 78 | 0.668981 | false | 3.969372 | true | false | false |
sungpil/bigshow | com/sundaytoz/bigshow/chart.py | 1 | 4332 | import ast
import time
from com.sundaytoz.bigshow import models
from com.sundaytoz.bigshow.resources import Resource
from com.sundaytoz.cache import Cache
from com.sundaytoz.logger import Logger
class Chart:
pass
TTL_LAST_JOB = 3600
TTL_LAST_RESULT = 2592000
__data_adapters = {}
@staticmethod
def query(chart_id, resource, query_type, query, query_params):
Logger.debug('chart_id={0}, resource={1}, query_type={2}, query={3}, query_params={4}'
.format(chart_id, resource, query_type, query, query_params))
adapter = Resource.get(resource_id=resource)
if not adapter:
return None
else:
job_id = Chart.get_job_id(chart_id)
adapter.query(job_id=job_id, query_type=query_type, query=query, query_params=query_params)
return job_id
@staticmethod
def query_sync(chart_id, resource, query_type, query, query_params):
job_id = Chart.query(chart_id=chart_id, resource=resource,
query_type=query_type, query=query, query_params=query_params)
if not job_id:
return None, {'message': 'fail to initialize job'}
adapter = Resource.get(resource_id=resource)
if not adapter:
return None, {'message': 'fail to initialize resources'}
retry_count = 100
while retry_count > 0:
status, results, error = adapter.get_result(job_id)
if 'DONE' == status:
return results, error
else:
time.sleep(10)
@staticmethod
def get_result(chart_id, from_cache=True):
Logger().debug("get_result: chart_id={chart_id}, from_cache={from_cache}"
.format(chart_id=chart_id, from_cache=from_cache))
last_job_key = Chart.get_job_key(chart_id=chart_id)
if from_cache is True:
last_job = Cache().get(last_job_key)
else:
last_job = None
if not last_job:
chart = models.Chart.get(chart_id, ['resource,query_type,query,query_params'])
new_job = {'id': Chart.get_job_id(chart_id), 'resource': chart['resource']}
adapter = Resource.get(resource_id=chart['resource'])
adapter.query(job_id=new_job['id'], query_type=chart['query_type'],
query=chart['query'], query_params=chart['query_params'])
Cache().set(last_job_key, new_job, Chart.TTL_LAST_JOB)
return 'RUNNING', None, None
else:
last_job = ast.literal_eval(last_job)
last_job_id = last_job['id']
last_job_result = Cache().get(last_job_id)
if last_job_result:
last_job_result = ast.literal_eval(last_job_result)
return 'DONE', last_job_result['result'], last_job_result['error']
else:
adapter = Resource.get(resource_id=last_job['resource'])
if not adapter.exists(job_id=last_job_id):
chart = models.Chart.get(chart_id, ['resource,query_type,query,query_params'])
adapter.query_async(job_id=last_job['id'], query_type=chart['query_type'],
query=chart['query'], query_params=chart['query_params'])
Cache().set(last_job_key, last_job, Chart.TTL_LAST_JOB)
return 'RUNNING', None, None
else:
status, results, error = adapter.get_result(last_job_id)
if 'DONE' == status:
Cache().set(last_job_id, {'result': results, 'error': error}, Chart.TTL_LAST_RESULT)
return status, results, error
@staticmethod
def del_cache(chart_id):
Cache().delete(Chart.get_job_key(chart_id=chart_id))
@staticmethod
def get_cached_result(last_job_key):
last_job_id = Cache().get(last_job_key)
if last_job_id:
return last_job_id, Cache().get(last_job_id)
else:
return None, None
@staticmethod
def get_job_id(chart_id):
return "chart-{chart_id}-{time}".format(chart_id=chart_id, time=int(time.time()))
@staticmethod
def get_job_key(chart_id):
return "last_job:{chart_id}".format(chart_id=chart_id)
| mit | -5,101,964,833,741,713,000 | 41.058252 | 108 | 0.572253 | false | 3.712082 | false | false | false |
ktbyers/netmiko | netmiko/extreme/extreme_exos.py | 1 | 2550 | """Extreme support."""
import time
import re
from netmiko.no_config import NoConfig
from netmiko.cisco_base_connection import CiscoSSHConnection
class ExtremeExosBase(NoConfig, CiscoSSHConnection):
"""Extreme Exos support.
Designed for EXOS >= 15.0
"""
def session_preparation(self):
self._test_channel_read()
self.set_base_prompt()
self.disable_paging(command="disable clipaging")
self.send_command_timing("disable cli prompting")
# Clear the read buffer
time.sleep(0.3 * self.global_delay_factor)
self.clear_buffer()
def set_base_prompt(self, *args, **kwargs):
"""
Extreme attaches an id to the prompt. The id increases with every command.
It needs to br stripped off to match the prompt. Eg.
testhost.1 #
testhost.2 #
testhost.3 #
If new config is loaded and not saved yet, a '* ' prefix appears before the
prompt, eg.
* testhost.4 #
* testhost.5 #
"""
cur_base_prompt = super().set_base_prompt(*args, **kwargs)
# Strip off any leading * or whitespace chars; strip off trailing period and digits
match = re.search(r"[\*\s]*(.*)\.\d+", cur_base_prompt)
if match:
self.base_prompt = match.group(1)
return self.base_prompt
else:
return self.base_prompt
def send_command(self, *args, **kwargs):
"""Extreme needs special handler here due to the prompt changes."""
# Change send_command behavior to use self.base_prompt
kwargs.setdefault("auto_find_prompt", False)
# refresh self.base_prompt
self.set_base_prompt()
return super().send_command(*args, **kwargs)
def check_config_mode(self, check_string="#"):
"""Checks whether in configuration mode. Returns a boolean."""
return super().check_config_mode(check_string=check_string)
def save_config(
self, cmd="save configuration primary", confirm=False, confirm_response=""
):
"""Saves configuration."""
return super().save_config(
cmd=cmd, confirm=confirm, confirm_response=confirm_response
)
class ExtremeExosSSH(ExtremeExosBase):
pass
class ExtremeExosTelnet(ExtremeExosBase):
def __init__(self, *args, **kwargs):
default_enter = kwargs.get("default_enter")
kwargs["default_enter"] = "\r\n" if default_enter is None else default_enter
super().__init__(*args, **kwargs)
| mit | 5,777,244,633,024,680,000 | 31.692308 | 91 | 0.618824 | false | 3.965785 | true | false | false |
ilblackdragon/django-blogs | blog/migrations/0003_remove_markup_type.py | 1 | 5554 |
from south.db import db
from django.db import models
from blog.models import *
class Migration:
def forwards(self, orm):
# Deleting field 'Post.markup'
db.delete_column('blog_post', 'markup')
# Changing field 'Post.slug'
# (to signature: django.db.models.fields.SlugField(db_index=True, max_length=50, blank=True))
db.alter_column('blog_post', 'slug', orm['blog.post:slug'])
def backwards(self, orm):
# Adding field 'Post.markup'
db.add_column('blog_post', 'markup', orm['blog.post:markup'])
# Changing field 'Post.slug'
# (to signature: django.db.models.fields.SlugField(max_length=50, db_index=True))
db.alter_column('blog_post', 'slug', orm['blog.post:slug'])
models = {
'auth.group': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'blog.category': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'})
},
'blog.post': {
'allow_comments': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'added_posts'", 'to': "orm['auth.User']"}),
'body': ('django.db.models.fields.TextField', [], {}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'post_list'", 'null': 'True', 'to': "orm['blog.Category']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'creator_ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'publish': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'tags': ('tagging.fields.TagField', [], {}),
'tease': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['blog']
| mit | -6,531,511,406,805,938,000 | 61.113636 | 154 | 0.53547 | false | 3.742588 | false | false | false |
bond-anton/ScientificProjects | BDProjects/EntityManagers/LogManager.py | 1 | 3830 | from __future__ import division, print_function
from sqlalchemy import func
from BDProjects.Entities import LogCategory, Log
from BDProjects.Entities import Project
from BDProjects.Entities import Session
from BDProjects.Entities import User
from .EntityManager import EntityManager
default_log_categories = {'Information': 'Informational messages',
'Warning': 'Warning messages',
'Error': 'Error messages'}
class LogManager(EntityManager):
def __init__(self, session_manager, echo=True):
self.echo = echo
super(LogManager, self).__init__(session_manager)
def create_log_category(self, category, description=None):
log_category, category_exists = self._check_category_name(category, description)
if log_category and not category_exists:
if self.session_manager.session_data is not None:
log_category.session_id = self.session_manager.session_data.id
self.session.add(log_category)
self.session.commit()
if log_category.category not in default_log_categories:
record = 'Log category %s successfully created' % log_category.category
self.log_record(record=record, category='Information')
return log_category
else:
self.session.rollback()
if log_category.category not in default_log_categories:
record = 'Log category %s is already registered' % log_category.category
self.log_record(record=record, category='Warning')
return self.session.query(LogCategory).filter(LogCategory.category == log_category.category).one()
def log_record(self, record, category=None):
log_category, category_exists = self._check_category_name(category)
category_id, project_id, session_id = None, None, None
if not category_exists:
record = 'Create log category first'
self.log_record(record=record, category='Warning')
else:
category_id = log_category.id
if self.session_manager.project is not None:
if not isinstance(self.session_manager.project, Project):
raise ValueError('provide a Project instance or None')
project_id = self.session_manager.project.id
if self.session_manager.session_data is not None:
if not isinstance(self.session_manager.session_data, Session):
raise ValueError('provide a valid Session or None')
session_id = self.session_manager.session_data.id
log = Log(record=record, category_id=category_id, project_id=project_id, session_id=session_id)
self.session.add(log)
self.session.commit()
if self.echo:
login_length = self._get_max_login_length()
user_login = self.session_manager.user.login
user_login = '@' + user_login + ' ' * (login_length - len(user_login))
print('[%s] %s: %s' % (log_category.category.upper()[:4], user_login, record))
def _get_max_login_length(self):
return self.session.query(func.max(func.length(User.login))).one()[0]
def _check_category_name(self, category, description=None):
category_exists = False
if isinstance(category, str):
log_category = LogCategory(category=category, description=description)
existing_category = self.session.query(LogCategory).filter(
LogCategory.category == log_category.category).all()
if existing_category:
log_category = existing_category[0]
category_exists = True
else:
log_category = None
return log_category, category_exists
| apache-2.0 | 7,475,034,101,805,334,000 | 46.875 | 110 | 0.629243 | false | 4.352273 | false | false | false |
memespring/open-notices | open_notices/apps/notices/tests.py | 1 | 6463 | from django.test import TestCase
from django.test import Client
from rest_framework.test import APIClient
from notices import models
from django.contrib.auth import get_user_model
from rest_framework.authtoken.models import Token
from django.core.exceptions import ValidationError
from datetime import datetime
class NoticeModelTestCase(TestCase):
def setUp(self):
UserModel = get_user_model()
self.user = UserModel(email='[email protected]')
self.user.set_password('notasecret')
self.user.save()
def test_invalid_date_range(self):
with self.assertRaises(ValidationError):
notice = models.Notice()
notice.title = 'test title'
notice.details = 'test details'
notice.location = 'SRID=3857;POINT (-284821.3533571999869309 6865433.3731604004278779)'
notice.starts_at = datetime(2016, 1, 1)
notice.ends_at = datetime(2012, 1, 1)
notice.timezone = "Europe/London"
notice.user = self.user
notice.save()
class NoticeAPIGeojsonTestCase(TestCase):
def get_valid_data(self):
return {'title': 'test title', 'location': {"type":"Point","coordinates":[-0.09430885313565737,51.43326585306407]}, 'tags': [],"starts_at":"2016-01-01T11:00:00","ends_at":"2016-01-02T12:00:00", "timezone": "Europe/London"}
def setUp(self):
self.client = APIClient()
UserModel = get_user_model()
self.user = UserModel(email='[email protected]')
self.user.set_password('notasecret')
self.user.save()
def test_list(self):
response = self.client.get('/notices.geojson')
self.assertEqual(response.status_code, 200)
def test_create_method_not_allowed(self):
data = self.get_valid_data()
token = Token.objects.get_or_create(user=self.user)[0]
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.post('/notices/new.geojson', data, format='json')
self.assertEqual(response.status_code, 405)
class NoticeAPITestCase(TestCase):
def setUp(self):
self.client = APIClient()
UserModel = get_user_model()
self.user = UserModel(email='[email protected]')
self.user.set_password('notasecret')
self.user.save()
Token.objects.create(user=self.user)
def get_valid_data(self):
return {'title': 'test title', 'location': {"type":"Point","coordinates":[-0.09430885313565737,51.43326585306407]}, 'tags': [],"starts_at":"2016-01-01T11:00:00","ends_at":"2016-01-02T12:00:00", "timezone": "Europe/London"}
def test_create_get_not_found(self):
response = self.client.get('/notices/new.json')
self.assertEqual(response.status_code, 405)
def test_create_unauthorised(self):
response = self.client.post('/notices/new.json')
self.assertEqual(response.status_code, 401)
def test_create_authorised_empty(self):
token = Token.objects.get_or_create(user=self.user)[0]
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.post('/notices/new.json')
self.assertEqual(response.status_code, 400)
def test_create_authorised_valid(self):
data = self.get_valid_data()
token = Token.objects.get_or_create(user=self.user)[0]
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.post('/notices/new.json', data, format='json')
self.assertEqual(response.status_code, 201)
def test_create_non_json_denied(self):
data = self.get_valid_data()
token = Token.objects.get_or_create(user=self.user)[0]
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.post('/notices/new.geojson', data, format='json')
self.assertEqual(response.status_code, 405)
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = self.client.post('/notices/new.csv', data, format='json')
self.assertEqual(response.status_code, 405)
class NoticeTestCase(TestCase):
def setUp(self):
# Every test needs a client.
self.client = Client()
#create a user for use later
UserModel = get_user_model()
self.user = UserModel(email='[email protected]')
self.user.set_password('notasecret')
self.user.save()
def test_list(self):
response = self.client.get('/notices/')
self.assertEqual(response.status_code, 200)
def test_view_notice(self):
notice = models.Notice()
notice.title = 'test title'
notice.details = 'test details'
notice.location = 'SRID=3857;POINT (-284821.3533571999869309 6865433.3731604004278779)'
notice.starts_at = datetime(2016, 1, 1)
notice.ends_at = datetime(2016, 1, 1)
notice.timezone = "Europe/London"
notice.user = self.user
notice.save()
response = self.client.get('/notices/%s/' % notice.pk)
self.assertContains(response, 'test title', 2, 200)
self.assertEqual(response.status_code, 200)
def test_create_unauthorised(self):
response = self.client.post('/notices/new', follow=True)
self.assertRedirects(response, '/signin/?next=/notices/new')
def test_create_empty(self):
self.client.login(email='[email protected]', password='notasecret')
response = self.client.post('/notices/new')
self.assertContains(response, "This field is required", 1, 200)
def test_create_valid(self):
self.client.login(email='[email protected]', password='notasecret')
#information
data = {'title': 'Test notice', 'details': 'It is a test'}
response = self.client.post('/notices/new', data, follow=True)
self.assertRedirects(response, '/notices/new/location')
#location
data = {'location': 'SRID=3857;POINT (-284821.3533571999869309 6865433.3731604004278779)'}
response = self.client.post('/notices/new/location', data, follow=True)
self.assertRedirects(response, '/notices/new/datetime')
#datetime
data = {'starts_at': '2016-01-01', 'ends_at': '2016-01-02', 'timezone': 'Europe/London'}
response = self.client.post('/notices/new/datetime', data)
self.assertEqual(response.status_code, 302)
| agpl-3.0 | 1,189,363,066,844,634,600 | 39.647799 | 230 | 0.652793 | false | 3.556962 | true | false | false |
Garcia1008/tournament | changectx/changectx.py | 1 | 2307 | import discord
from discord.ext import commands
from .utils import checks
import time
from random import randint
class ChangeCTX:
def __init__(self, bot):
self.bot = bot
self.context = None
self.impersonate = None
@checks.is_owner()
@commands.command(name="setcontext", pass_context=True)
async def set_context(self, ctx, channel_id: str):
channel = self.bot.get_channel(channel_id)
if channel is None:
return await self.bot.say("Channel not found")
if channel.type != discord.ChannelType.text:
return await self.bot.say("Try again with a text channel")
self.context = channel
await self.bot.say("Context set to channel {0.name}".format(channel))
@checks.is_owner()
@commands.command(name="setauthor", pass_context=True)
async def set_impersonate(self, ctx, user_id: str=None):
self.impersonate = user_id
await self.bot.say("Impersonate ID set")
@checks.is_owner()
@commands.command(name="runincontext", pass_context=True)
async def run_in_context(self, ctx, *, com: str):
if self.context is None and self.impersonate is None:
return await \
self.bot.say("Hint: `{0.prefix}setcontext`"
"and/or `{0.prefix}setauthor`".format(ctx))
chan = ctx.message.channel if self.context is None \
else self.context
try:
server = chan.server
prefix = self.bot.settings.get_prefixes(server)[0]
except AttributeError:
return await self.bot.say("Are you sure I can see that channel?")
author_id = ctx.message.author.id if self.impersonate is None \
else self.impersonate
data = \
{'timestamp': time.strftime("%Y-%m-%dT%H:%M:%S%z", time.gmtime()),
'content': prefix + com,
'channel': chan,
'channel_id': chan.id,
'author': {'id': author_id},
'nonce': randint(-2**32, (2**32) - 1),
'id': randint(10**(17), (10**18) - 1),
'reactions': []
}
message = discord.Message(**data)
self.bot.dispatch('message', message)
def setup(bot):
n = ChangeCTX(bot)
bot.add_cog(n)
| mit | 6,471,981,614,218,644,000 | 30.60274 | 78 | 0.579541 | false | 3.74513 | false | false | false |
DynamoDS/Coulomb | WorkspaceTools/uses_list_at_level.py | 1 | 2340 | # This tool walks over a dataset and reports how many workspaes have at least one use of
# list at level
import gzip
import json
import base64
import sys
import traceback
import time
from os import listdir
from os.path import isfile, join
from collections import defaultdict
import os
VERBOSE = True
def log(s):
if VERBOSE:
print time.strftime("%Y-%m-%d %H:%M:%S"), s
if len(sys.argv) != 3:
print "Usage: python export_workspaces.py path_to_data out_path"
print "Walk over sessions files to export whether it uses"
print "list at level"
exit(1)
path = sys.argv[1]
outPath = sys.argv[2]
linesCount = 0
dataLinesCount = 0
err = 0;
dtWsCount = {}
def updateResultFile():
outF = open(outPath, 'w')
outF.write("Date, No L@L, L@L\n")
for k in sorted(dtWsCount):
v = dtWsCount[k]
outF.write(k + ", " + str(v[False]) + ", " + str(v[True]) + "\n")
outF.flush()
log("Start")
files = [ f for f in listdir(path) if isfile(join(path,f)) ]
for filePath in files:
f = gzip.open (join(path,filePath));
for ln in f:
linesCount += 1
if linesCount % 1000 == 0:
updateResultFile()
log (str(linesCount))
# log (str(linesCount) + "\t" + str(dataLinesCount) + "\t" + str(err) + "\tNew sessions:\t" + str(len(newSessionIDSet)) + "\tUpdated sessions:\t" + str(len(sessionIDSet)))
try:
if not ln.startswith("{"):
continue
dataLinesCount += 1
data = json.loads(ln)
session = data["SessionID"]
serverDate = data["b75e7a7f_ServerDate"]
tag = data["Tag"]
if (tag.startswith("Workspace")):
data = base64.b64decode(data["Data"])
usesListAtLevel = False
usesListAtLevel = data.find('useLevels="True"') > -1
if not dtWsCount.has_key(serverDate):
dtWsCount[serverDate] = {}
dtWsCount[serverDate][False] = 0
dtWsCount[serverDate][True] = 0
dtWsCount[serverDate][usesListAtLevel] += 1
# print (dtWsCount)
# print (session + ",\t" + serverDate + ",\t")
except err:
err += 1
print err
log("Printing results")
| mit | 695,384,233,009,811,600 | 25.292135 | 174 | 0.560684 | false | 3.451327 | false | false | false |
alladdin/plugin.video.primaplay | default.py | 1 | 10359 | # -*- coding: utf-8 -*-
import os
import sys
import xbmc
import xbmcgui
import xbmcplugin
import xbmcaddon
import traceback
import time
from xbmcplugin import addDirectoryItem
from libPrimaPlay import PrimaPlay
import urllib
from urlparse import parse_qs
_addon_ = xbmcaddon.Addon('plugin.video.primaplay')
_scriptname_ = _addon_.getAddonInfo('name')
_version_ = _addon_.getAddonInfo('version')
###############################################################################
def log(msg, level=xbmc.LOGDEBUG):
if type(msg).__name__ == 'unicode':
msg = msg.encode('utf-8')
xbmc.log("[%s] %s" % (_scriptname_, msg.__str__()), level)
def logDbg(msg):
log(msg, level=xbmc.LOGDEBUG)
def logErr(msg):
log(msg, level=xbmc.LOGERROR)
def _exception_log(exc_type, exc_value, exc_traceback):
logErr(traceback.format_exception(exc_type, exc_value, exc_traceback))
xbmcgui.Dialog().notification(_scriptname_, _toString(exc_value), xbmcgui.NOTIFICATION_ERROR)
def _toString(text):
if type(text).__name__ == 'unicode':
output = text.encode('utf-8')
else:
output = str(text)
return output
try:
_icon_ = xbmc.translatePath(os.path.join(_addon_.getAddonInfo('path'), 'icon.png'))
_handle_ = int(sys.argv[1])
_baseurl_ = sys.argv[0]
_hd_enabled = False;
if (_addon_.getSetting('hd_enabled') == 'true'): _hd_enabled = True
_play_parser = PrimaPlay.Parser(hd_enabled=_hd_enabled)
_play_account = None
if (_addon_.getSetting('account_enabled') == 'true'):
_play_account = PrimaPlay.Account( _addon_.getSetting('account_email'), _addon_.getSetting('account_password'), _play_parser )
xbmcplugin.setContent(_handle_, 'tvshows')
def main_menu(pageurl, list_only = False):
page = _play_parser.get_page(pageurl+'?strana=1')
if not list_only:
if page.player:
add_player(page.player)
else:
add_search_menu()
add_account_menu()
add_filters(page, pageurl)
for video_list in page.video_lists:
if video_list.title: add_title(video_list)
add_item_list(video_list.item_list)
if video_list.next_link: add_next_link(video_list.next_link)
def shows_menu(pageurl, list_only = False):
page = _play_parser.get_shows(pageurl)
for video_list in page.video_lists:
if video_list.title: add_show(video_list)
add_item_list(video_list.item_list)
if video_list.next_link: add_next_link(video_list.next_link)
def show_navigation(pageurl, list_only = False):
page = _play_parser.get_show_navigation(pageurl)
for video_list in page.video_lists:
if video_list.title: add_title(video_list)
def next_menu(nexturl):
next_list = _play_parser.get_next_list(nexturl)
add_item_list(next_list.list)
if next_list.next_link: add_next_link(next_list.next_link)
def search():
keyboard = xbmc.Keyboard('',u'Hledej')
keyboard.doModal()
if (not keyboard.isConfirmed()): return
search_query = keyboard.getText()
if len(search_query) <= 1: return
main_menu(_play_parser.get_search_url(search_query))
def account():
if not _play_account.login():
li = list_item('[B]Chyba přihlášení![/B] Zkontrolujte e-mail a heslo.')
xbmcplugin.addDirectoryItem(handle=_handle_, url='#', listitem=li, isFolder=True)
return
main_menu(_play_account.video_list_url, True)
def remove_filter(removefilterurl):
link = _play_parser.get_redirect_from_remove_link(removefilterurl)
main_menu(link)
def manage_filter(pageurl, filterid):
if filterid is None:
main_menu(pageurl)
return
page = _play_parser.get_page(pageurl)
dlg = xbmcgui.Dialog()
filter_list = page.filter_lists[filterid]
add_id = dlg.select(filter_list.title, map(lambda x: x.title, filter_list.item_list))
if add_id < 0:
main_menu(pageurl)
return
main_menu(filter_list.item_list[add_id].link)
def add_filters(page, pageurl):
if page.current_filters:
li = list_item(u'[B]Odstranit nastavené filtry: [/B]' + ", ".join(map(lambda x: x.title, page.current_filters.item_list)))
url = get_menu_link( action = 'FILTER-REMOVE', linkurl = page.current_filters.link )
xbmcplugin.addDirectoryItem(handle=_handle_, url=url, listitem=li, isFolder=True)
for filterid, filter_list in enumerate(page.filter_lists):
li = list_item(u'[B]Nastav filtr: [/B]' + filter_list.title)
url = get_menu_link( action = 'FILTER-MANAGE', linkurl = pageurl, filterid = filterid )
xbmcplugin.addDirectoryItem(handle=_handle_, url=url, listitem=li, isFolder=True)
def add_search_menu():
li = list_item(u'[B]Hledej[/B]')
url = get_menu_link( action = 'SEARCH' )
xbmcplugin.addDirectoryItem(handle=_handle_, url=url, listitem=li, isFolder=True)
def add_account_menu():
if _play_account is None: return
li = list_item(u'[B]Můj PLAY[/B]')
url = get_menu_link( action = 'ACCOUNT' )
xbmcplugin.addDirectoryItem(handle=_handle_, url=url, listitem=li, isFolder=True)
def add_show(video_list):
url = '#'
thumbnail = None
if video_list.link:
url = get_menu_link( action = 'SHOW-NAV', linkurl = video_list.link )
if video_list.thumbnail:
thumbnail = video_list.thumbnail
li = list_item(video_list.title, thumbnail)
xbmcplugin.addDirectoryItem(handle=_handle_, url=url, listitem=li, isFolder=True)
def add_title(video_list):
li = list_item('[B]'+video_list.title+'[/B]')
url = '#'
if video_list.link:
url = get_menu_link( action = 'PAGE', linkurl = video_list.link )
xbmcplugin.addDirectoryItem(handle=_handle_, url=url, listitem=li, isFolder=True)
def add_item_list(item_list):
for item in item_list:
li = list_item(item.title, item.image_url, item.description, item.broadcast_date, item.year)
url = item.link
if item.isFolder: url = get_menu_link( action = 'PAGE', linkurl = item.link )
xbmcplugin.addDirectoryItem(handle=_handle_, url=url, listitem=li, isFolder=item.isFolder)
def add_next_link(next_link):
li = list_item(u'Další stránka')
url = get_menu_link( action = 'PAGE-NEXT', linkurl = next_link )
xbmcplugin.addDirectoryItem(handle=_handle_, url=url, listitem=li, isFolder=True)
def add_player(player):
li = list_item(u"[B]Přehraj:[/B] "+player.title, player.image_url, player.description, player.broadcast_date, player.year)
xbmcplugin.addDirectoryItem(handle=_handle_, url=player.video_link, listitem=li, isFolder=False)
def play_video(link):
product_id = _play_parser.get_productID(link)
video = _play_parser.get_video(product_id)
if video.link is None:
raise Exception('Video není dostupné')
return
video_item = xbmcgui.ListItem(video.title)
video_item.setInfo('video', {'Title': video.title})
video_item.setThumbnailImage(video.image_url)
player = xbmc.Player()
player.play(video.link, video_item)
def list_item(label, thumbnail = None, description = None, broadcast_date = None, year = None):
li = xbmcgui.ListItem(label)
liVideo = {
'title': label,
'plot': description,
'year': year,
'aired': broadcast_date
}
if thumbnail:
li.setThumbnailImage(thumbnail)
li.setArt({'poster': thumbnail, 'fanart': thumbnail})
li.setInfo("video", liVideo)
return li
def get_menu_link(**kwargs):
return _baseurl_ + "?" + urllib.urlencode(kwargs)
def get_params():
if len(sys.argv[2])<2: return []
encoded_query = sys.argv[2].lstrip('?')
decoded_params = parse_qs(encoded_query)
param = {}
for key in decoded_params:
if len(decoded_params[key]) <= 0: continue
param[key] = decoded_params[key][0]
return param
def assign_params(params):
for param in params:
try:
globals()[param] = params[param]
except:
pass
action = None
linkurl = None
filterid = None
params = get_params()
assign_params(params)
logDbg("PrimaPlay Parameters!!!")
logDbg("action: "+str(action))
logDbg("linkurl: "+str(linkurl))
logDbg("filterid: "+str(filterid))
try:
if action == "FILTER-REMOVE":
remove_filter(linkurl)
xbmcplugin.endOfDirectory(_handle_, updateListing=True)
if action == "FILTER-MANAGE":
manage_filter(linkurl, int(filterid))
xbmcplugin.endOfDirectory(_handle_, updateListing=True)
elif action == "PAGE-NEXT":
next_menu(linkurl)
xbmcplugin.endOfDirectory(_handle_, updateListing=True)
elif action == "SEARCH":
search()
xbmcplugin.endOfDirectory(_handle_)
elif action == "ACCOUNT":
account()
xbmcplugin.endOfDirectory(_handle_)
elif action == "SHOW-NAV":
show_navigation(linkurl)
xbmcplugin.endOfDirectory(_handle_)
elif action == "PAGE":
main_menu(linkurl, list_only=True)
xbmcplugin.endOfDirectory(_handle_)
elif action == "PLAY":
play_video(linkurl)
else:
ts = int(time.time())
shows_menu("https://prima.iprima.cz/iprima-api/ListWithFilter/Series/Content?ts="+ str(ts) +"&filter=all&featured_queue_name=iprima:hp-featured-series")
xbmcplugin.endOfDirectory(_handle_)
except Exception as ex:
exc_type, exc_value, exc_traceback = sys.exc_info()
_exception_log(exc_type, exc_value, exc_traceback)
except Exception as ex:
exc_type, exc_value, exc_traceback = sys.exc_info()
_exception_log(exc_type, exc_value, exc_traceback)
| gpl-2.0 | 6,113,510,587,463,496,000 | 37.322222 | 164 | 0.603557 | false | 3.533811 | false | false | false |
StingraySoftware/dave | src/main/python/utils/dataset_cache.py | 1 | 1790 | import hashlib
import utils.exception_helper as ExHelper
from random import randint
from config import CONFIG
import pylru
cached_datasets = pylru.lrucache(CONFIG.PYTHON_CACHE_SIZE)
# DATASET CACHE METHODS
def add(key, dataset):
try:
cached_datasets[key] = dataset
except:
logging.error(ExHelper.getException('dataset_cache.add'))
def contains(key):
try:
return key in cached_datasets
except:
logging.error(ExHelper.getException('dataset_cache.contains'))
return False
def get(key):
try:
if contains(key):
return cached_datasets[key]
except:
logging.error(ExHelper.getException('dataset_cache.get'))
return None
def remove(key):
try:
if contains(key):
del cached_datasets[key]
return True
except:
logging.error(ExHelper.getException('dataset_cache.remove'))
return False
def remove_with_prefix(key_prefix):
try:
remove_keys = []
for key in cached_datasets.keys():
if key.startswith(key_prefix):
remove_keys.append(key)
for key in remove_keys:
remove(key)
except:
logging.error(ExHelper.getException('dataset_cache.remove_with_prefix'))
def get_key(value, strict=False):
try:
m = hashlib.md5()
if strict:
m.update(str(value).encode('utf-8'))
else:
m.update(str(value + str(randint(0,99999))).encode('utf-8'))
ugly_key = str(m.digest())
return "".join(e for e in ugly_key if e.isalnum())
except:
logging.error(ExHelper.getException('dataset_cache.remove_with_prefix'))
return ""
def count():
return len(cached_datasets)
def clear():
cached_datasets.clear()
| apache-2.0 | -4,037,457,176,267,402,000 | 23.189189 | 80 | 0.623464 | false | 3.792373 | false | false | false |
py-mina-deploy/py-mina | py_mina/decorators/task.py | 1 | 1045 | """
Task decorator (wrapper for `fabric3` @task decorator)
"""
from __future__ import with_statement
import timeit
import fabric.api
from py_mina.echo import echo_task, print_task_stats
def task(wrapped_function):
"""
Task function decorator
"""
wrapped_function_name = wrapped_function.__name__
def task_wrapper(*args):
"""
Runs task and prints stats at the end
"""
echo_task('Running "%s" task\n' % wrapped_function_name)
start_time = timeit.default_timer()
with fabric.api.settings(colorize_errors=True):
try:
wrapped_function(*args)
except Exception as e:
print_task_stats(wrapped_function_name, start_time, e)
raise e # escalate exception
else:
print_task_stats(wrapped_function_name, start_time)
# Copy __name__ and __doc__ from decorated function to wrapper function
task_wrapper.__name__ = wrapped_function_name or 'task'
if wrapped_function.__doc__: task_wrapper.__doc__ = wrapped_function.__doc__
# Decorate with `fabric3` task decorator
return fabric.api.task(task_wrapper)
| mit | -8,349,296,923,187,485,000 | 22.75 | 77 | 0.702392 | false | 3.29653 | false | false | false |
GiulioGx/RNNs | sources/ObjectiveFunction.py | 1 | 3362 | import theano.tensor as TT
from theano.tensor.elemwise import TensorType
import theano as T
from infos.Info import Info
from infos.InfoElement import PrintableInfoElement
from infos.InfoGroup import InfoGroup
from infos.InfoList import InfoList
from infos.InfoProducer import SimpleInfoProducer
from infos.SymbolicInfo import SymbolicInfo
from lossFunctions.LossFunction import LossFunction
from model.Variables import Variables
__author__ = 'giulio'
class ObjectiveFunction(SimpleInfoProducer): # XXX is this class needed?
def __init__(self, loss_fnc: LossFunction, net, params: Variables, u, t, mask):
self.__net = net
self.__loss_fnc = loss_fnc
self.__u = u
self.__t = t
self.__params = params
# XXX
# XXX REMOVE (?)
self.failsafe_grad, _ = self.__net.symbols.failsafe_grad(u=u, t=t, mask=mask, params=self.__params,
obj_fnc=self)
self.__grad, self.__objective_value = self.__net.symbols.gradient(u=u, t=t, mask=mask, params=self.__params,
obj_fnc=self)
grad_norm = self.__grad.value.norm()
# separate
gradient_info = self.__grad.temporal_norms_infos
# DEBUG DIFF
# debug_diff = (self.grad.value - self.failsafe_grad).norm()
debug_diff = TT.alloc(-1)
self.__infos = ObjectiveFunction.Info(gradient_info, self.__objective_value, grad_norm, debug_diff,
net.symbols.mask)
@property
def current_loss(self):
return self.__objective_value
def value(self, y, t, mask):
return self.__loss_fnc.value(y=y, t=t, mask=mask)
@property
def loss_mask(self):
return self.__loss_fnc.mask
@property
def infos(self):
return self.__infos
@property
def grad(self):
return self.__grad
class Info(SymbolicInfo):
def __init__(self, gradient_info, objective_value, grad_norm, debug_diff, mask):
# n_selected_temporal_losses = TT.switch(mask.sum(axis=1) > 0, 1, 0).sum(axis=1).sum()
n_selected_temporal_losses = LossFunction.num_examples_insting_temp_loss(mask)
self.__symbols = [objective_value, grad_norm, debug_diff,
n_selected_temporal_losses] + gradient_info.symbols
self.__symbolic_gradient_info = gradient_info
def fill_symbols(self, symbols_replacements: list) -> Info:
loss_value_info = PrintableInfoElement('value', ':07.3f', symbols_replacements[0].item())
loss_grad_info = PrintableInfoElement('grad', ':07.3f', symbols_replacements[1].item())
norm_diff_info = PrintableInfoElement('@@', '', symbols_replacements[2].item())
n_loss_info = PrintableInfoElement('##n', '', symbols_replacements[3])
gradient_info = self.__symbolic_gradient_info.fill_symbols(symbols_replacements[4:])
loss_info = InfoGroup('loss', InfoList(loss_value_info, loss_grad_info))
obj_info = InfoGroup('obj', InfoList(loss_info, gradient_info))
info = InfoList(obj_info, norm_diff_info)
return info
@property
def symbols(self):
return self.__symbols
| lgpl-3.0 | -1,983,306,252,256,076,300 | 36.355556 | 116 | 0.603212 | false | 3.855505 | false | false | false |
hasgeek/funnel | funnel/forms/organization.py | 1 | 3873 | from __future__ import annotations
from flask import Markup, url_for
from baseframe import _, __
from coaster.auth import current_auth
import baseframe.forms as forms
from ..models import Organization, Profile, Team
__all__ = ['OrganizationForm', 'TeamForm']
@Organization.forms('main')
class OrganizationForm(forms.Form):
title = forms.StringField(
__("Organization name"),
description=__(
"Your organization’s given name, without legal suffixes such as Pvt Ltd"
),
validators=[
forms.validators.DataRequired(),
forms.validators.Length(max=Organization.__title_length__),
],
filters=[forms.filters.strip()],
)
name = forms.AnnotatedTextField(
__("Username"),
description=__(
"A short name for your organization’s profile page."
" Single word containing letters, numbers and dashes only."
" Pick something permanent: changing it will break existing links from"
" around the web"
),
validators=[
forms.validators.DataRequired(),
forms.validators.Length(max=Profile.__name_length__),
],
filters=[forms.filters.strip()],
prefix="https://hasgeek.com/",
widget_attrs={'autocorrect': 'none', 'autocapitalize': 'none'},
)
def validate_name(self, field):
reason = Profile.validate_name_candidate(field.data)
if not reason:
return # name is available
if reason == 'invalid':
raise forms.ValidationError(
_(
"Names can only have letters, numbers and dashes (except at the"
" ends)"
)
)
if reason == 'reserved':
raise forms.ValidationError(_("This name is reserved"))
if self.edit_obj and field.data.lower() == self.edit_obj.name.lower():
# Name is not reserved or invalid under current rules. It's also not changed
# from existing name, or has only changed case. This is a validation pass.
return
if reason == 'user':
if (
current_auth.user.username
and field.data.lower() == current_auth.user.username.lower()
):
raise forms.ValidationError(
Markup(
_(
"This is <em>your</em> current username."
' You must change it first from <a href="{account}">your'
" account</a> before you can assign it to an organization"
).format(account=url_for('account'))
)
)
raise forms.ValidationError(_("This name has been taken by another user"))
if reason == 'org':
raise forms.ValidationError(
_("This name has been taken by another organization")
)
# We're not supposed to get an unknown reason. Flag error to developers.
raise ValueError(f"Unknown profile name validation failure reason: {reason}")
@Team.forms('main')
class TeamForm(forms.Form):
title = forms.StringField(
__("Team name"),
validators=[
forms.validators.DataRequired(),
forms.validators.Length(max=Team.__title_length__),
],
filters=[forms.filters.strip()],
)
users = forms.UserSelectMultiField(
__("Users"),
validators=[forms.validators.DataRequired()],
description=__("Lookup a user by their username or email address"),
)
is_public = forms.BooleanField(
__("Make this team public"),
description=__(
"Team members will be listed on the organization’s profile page"
),
default=True,
)
| agpl-3.0 | -2,139,949,074,538,231,800 | 35.828571 | 88 | 0.5609 | false | 4.85804 | false | false | false |
binary-array-ld/bald | ncldDump/ncldDump.py | 1 | 18095 | from __future__ import print_function
from six import string_types, PY2
import argparse
import jinja2
import json
import netCDF4
import numpy
import os
import re
import sys
import pprint
import traceback
def parseArgs(args):
'''
Parse the command line arguments into a dictionary object.
args [in] A list of command line arguments.
returns A dictionary of the parse results.
'''
parser = argparse.ArgumentParser(description = 'Generate web-linked CDL (without data) from a netCDF-LD file as HTML.',
epilog = 'If no output file is specified, the output will be written to the file ncldDump.html in the current folder.')
parser.add_argument('-a', metavar = '<alias file>', default = None,
dest = 'aliasFile', help = 'A JSON file containing alias definitions.')
parser.add_argument('-o', metavar = '<output file>', default = 'ncldDump.html',
dest = 'outputFile', help = 'The file to write the output to.')
parser.add_argument('inputFile', metavar = '<input file>', help = 'A netCDF-LD file.')
parsedArgs = parser.parse_args(args)
assert os.access(parsedArgs.inputFile, os.R_OK), 'Unable to read file ' + parsedArgs.inputFile
if parsedArgs.aliasFile is not None:
assert os.access(parsedArgs.aliasFile, os.R_OK), 'Unable to read file ' + parsedArgs.aliasFile
argDict = vars(parsedArgs)
return argDict
def parseDtype(dtype):
'''
Return a string representing the data type in the dtype argument.
dtype [in] A dtype object.
returns A string.
'''
# Get the basic two character type string for the type. Remove any
# byte-order information or other extraneous characters.
#
theType = dtype.str.strip('><=#')
# Map the type. If the type is not found, return '?'.
#
result = '?'
if 'i1' == theType:
result = 'byte'
elif 'u1' == theType:
result = 'ubyte'
elif 'i2' == theType:
result = 'short'
elif 'u2' == theType:
result = 'ushort'
elif 'i4' == theType:
result = 'int'
elif 'u4' == theType:
result = 'uint'
elif 'f4' == theType:
result = 'float'
elif 'i8' == theType:
result = 'int64'
elif 'u8' == theType:
result = 'uint64'
elif 'f8' == theType:
result = 'double'
elif 'S1' == theType:
result = 'char'
elif 'S' == theType:
result = 'string'
elif 'U' == theType:
result = 'string'
# Return the result.
#
return result
def parseType(obj):
'''
Return a string representing the data type of the obj argument.
dtype [in] A dtype object.
returns A string.
'''
# Map the type. If the type is not found, return '?'.
#
result = '?'
if True == isinstance(obj, string_types):
result = ''
elif True == isinstance(obj, numpy.int8):
result = 'b'
elif True == isinstance(obj, numpy.uint8):
result = 'ub'
elif True == isinstance(obj, numpy.int16):
result = 's'
elif True == isinstance(obj, numpy.uint16):
result = 'us'
elif True == isinstance(obj, numpy.int32):
result = ''
elif True == isinstance(obj, numpy.uint32):
result = 'u'
elif True == isinstance(obj, numpy.int64):
result = 'll'
elif True == isinstance(obj, numpy.uint64):
result = 'ull'
elif True == isinstance(obj, numpy.float32):
result = 'f'
elif True == isinstance(obj, numpy.float64):
result = ''
elif True == isinstance(obj, numpy.ndarray):
result = parseType(obj[0])
# Return the result.
#
return result
def convertToStringHook(item, ignoreDicts = False):
'''
This function is passed to the json load function as an object hook. It
converts any string_types strings into ASCII strings.
item [in] An item passed in for processing.
ignoreDicts [in] If this is set to True ignore any dict objects passed in.
returns Items with any string_types strings converted to ASCII.
'''
# If this is a string_types string, convert it. If this is a list, convert any
# contained string_types strings. If this is a dict and it hasn't been converted
# already, convert any contained string_types strings. Otherwise, leave the item
# alone.
#
if isinstance(item, string_types):
if PY2:
result = item.encode('utf-8')
else:
result = item
elif isinstance(item, list):
result = [ convertToStringHook(element, True) for element in item ]
elif isinstance(item, dict) and not ignoreDicts:
result = { convertToStringHook(key, True) : convertToStringHook(value, True) for key, value in item.items() }
else:
result = item
# Return the possibly converted item.
#
return result
def loadAliasDict(aliasFilePath):
'''
Load an alias dictionary from a JSON file. This is a temporary workaround
until it is decided how to store the information in a netCDF-LD file. The
alias dictionary is a mapping of URIs to context prefixes and words. The
words will be found in variable and attribute names, and in words found in
specified attribute values. If the file path is None, create a stubbed-out
dictionary and return it.
aliasFilePath [in] The path to the JSON file containing the alias
definitions.
returns The loaded dictionary.
'''
# If the file path is None, create a stubbed-out dictionary.
#
if aliasFilePath is None:
aliasDict = { 'contexts' : {}, 'names' : {}, 'values' : {} }
else:
# Open the file to parse.
#
aliasFile = open(aliasFilePath)
# Parse the contained JSON.
#
aliasDict = json.load(aliasFile, object_hook = convertToStringHook)
# Return the dictionary.
#
return aliasDict
def makeURL(word, pattern):
'''
Create a URL from the word and pattern.
word [in] The word to build the URL around.
pattern [in] The URL pattern to reference.
returns A URL.
'''
# Insert the word into any replaceable part in the pattern.
#
theURL = pattern.format(word)
# Return the URL
#
return theURL
def resolveName(name, aliasDict):
'''
Determine if the name has a context part (the form is <context>__<name>).
If it does and the context alias exists, use it to build a URL.
If not, attempt to resolve the name into a URL using the names
part of the alias dictionary.
name [in] A name to attempt to resolve into a URL string.
aliasDict [in] A dictionary of URI patterns keyed by the elements they
replace.
returns A URL, or None if there was no resolution.
'''
# Start with the result equal to None.
#
result = None
# Split the name on '__'.
#
nameParts = name.split('__')
# Breakout context.
#
for _x in [0]:
# If there is a context part, attempt to use it.
#
if 2 == len(nameParts):
# Get the name and context parts.
#
contextPart = nameParts[0]
namePart = nameParts[1]
# If the context exists in the alias dictionary, create a URL
# string using the pattern for the context and the name part.
#
if contextPart in aliasDict['contexts']:
pattern = aliasDict['contexts'][contextPart]
result = makeURL(namePart, pattern)
break
# If the name exists in the alias dictionary, create a URL string
# using the pattern for the name.
#
if name in aliasDict['names']:
pattern = aliasDict['names'][name]
result = makeURL(name, pattern)
break
# Return the resolved URL if one was found.
#
return result
def resolveValue(name, value, aliasDict):
'''
Determine if the value associated with the name has an entry in the alias
dictionary. If it does, build a URL and return it.
name [in] A name associated with the value to attempt to resolve into
a URL.
value [in] A value to attempt to resolve into a URL.
aliasDict [in] A dictionary of URI patterns keyed by the elements they
replace.
returns A URL, or None if there was no resolution.
'''
# Start with the result equal to None.
#
result = None
# Breakout context
#
done = False
while False == done:
done = True
# If the value is not a string, get a string representation.
#
if False == isinstance(value, str) and False == isinstance(value, string_types):
value = str(value)
# If the value starts with 'http', interpret the entire string as a
# resolved URL.
#
if value[0:4] == 'http':
result = value
break
# Attempt to split the value on '__' to see if there is a context
# part to the value.
#
valueParts = value.split('__')
# If there is a context part, resolve the value as a name.
#
if 2 == len(valueParts):
result = resolveName(value, aliasDict)
break
# If the name exists in the alias dictionary, and if the value exists
# in the sub-dictionary for the name, create a URL using the pattern
# for the value. A wildcard (*) for a value key in the dictionary
# matches any value.
#
if name in aliasDict['values']:
subDict = aliasDict['values'][name]
pattern = None
if value in subDict:
pattern = subDict[value]
elif '*' in subDict:
pattern = subDict['*']
if pattern is not None:
result = makeURL(value, pattern)
break
# Return the resolved name if one was found.
#
return result
def parseAttributes(ncObj, aliasDict):
'''
Build a list of dictionaries for each netCDF attribute on the object.
ncObj [in] A netCDF object with attributes.
aliasDict [in] A dictionary of URI patterns keyed by the elements they
replace.
returns A list of dictionaries for each attribute.
'''
# Create the attribute list.
#
attrList = []
# Fill the list with dictionaries describing each attribute.
#
for attrName in ncObj.ncattrs():
# Get the value and type for the attribute.
#
attrValue = ncObj.getncattr(attrName)
attrType = parseType(attrValue)
# If the value is an array, make it a list.
#
if True == isinstance(attrValue, numpy.ndarray):
attrValue = list(attrValue)
# Get the URL (if any) for the attribute.
#
nameURL = resolveName(attrName, aliasDict)
# Get the URL (if any) for the value.
#
valueURL = resolveValue(attrName, attrValue, aliasDict)
# If the value is a string, wrap it in '"' characters.
#
if True == isinstance(attrValue, str) or True == isinstance(attrValue, string_types):
attrValue = '"' + str(attrValue) + '"'
valueEntry = { 'element' : attrValue }
if valueURL is not None:
valueEntry['url'] = valueURL
# Build the attribute entry. If there is a name URL add it.
#
attrEntry = {'name' : attrName, 'value' : valueEntry, 'type' : attrType}
if nameURL is not None:
attrEntry['url'] = nameURL
# Add the entry to the list.
#
attrList.append(attrEntry)
# Return the list.
#
return attrList
def parseGroup(ncObj, aliasDict):
'''
Build dimension, variable, and attribute lists for the group object.
ncObj [in] The netCDF4 group object to parse.
aliasDict [in] A dictionary of URI patterns keyed by the elements they
replace.
returns A nested set of dictionaries and lists describing the object
contents.
'''
# Create the top-level dictionary.
#
dataDict = {}
# If there are any dimensions, add and populate a dimensions entry.
#
dimList = []
try:
for dimName, dimObj in ncObj.dimensions.items():
dimEntry = {'name' : dimName }
if True == dimObj.isunlimited():
dimEntry['value'] = 'UNLIMITED'
dimEntry['comment'] = str(dimObj.size) + ' currently'
else:
dimEntry['value'] = str(dimObj.size)
dimList.append(dimEntry)
except:
pass
if 0 < len(dimList):
dataDict['dimensions'] = dimList
# If there are any variables, add and populate a variables entry.
#
varList = []
try:
for varName, varObj in ncObj.variables.items():
varType = parseDtype(varObj.dtype)
varEntry = {'name' : varName, 'type' : varType}
dimList = []
for dimName in varObj.dimensions:
dimSize = ncObj.dimensions[dimName].size
dimList.append(dimName)
if 0 < len(dimList):
varEntry['dimensions'] = dimList
# If the variable name is in the alias dictionary names section,
# get a URL for it and add it to the entry for the variable.
#
if varName in aliasDict['names']:
pattern = aliasDict['names'][varName]
theURL = makeURL(varName, pattern)
varEntry['url'] = theURL
# If there are any attributes add and populate an attributes
# entry.
#
attrList = parseAttributes(varObj, aliasDict)
if 0 < len(attrList):
varEntry['attributes'] = attrList
varList.append(varEntry)
except:
#type_, value_, traceback_ = sys.exc_info()
#tb = traceback.format_tb(traceback_)
pass
if 0 < len(varList):
dataDict['variables'] = varList
# If there are any group-level attributes, add and populate an attributes
# entry.
#
attrList = parseAttributes(ncObj, aliasDict)
if 0 < len(attrList):
dataDict['attributes'] = attrList
# Return the dictionary.
#
return dataDict
def parseDataset(ncObj, aliasDict):
'''
Build a set of group dictionaries for the netCDF4 Dataset object.
ncObj [in] The netCDF4 Dataset object to parse.
aliasDict [in] A dictionary of URI patterns keyed by the elements they
replace.
returns A nested set of dictionaries and lists describing the object
contents.
'''
# Parse the contents of the root group of the netCDF file. Add a groupName
# element and store it in a groups list.
#
groupList = []
groupEntry = parseGroup(ncObj, aliasDict)
groupEntry['groupName'] = 'global'
groupList.append(groupEntry)
# If there are any other groups, add them as well.
#
for groupName, groupObj in ncObj.groups.items():
groupEntry = parseGroup(groupObj, aliasDict)
groupEntry['groupName'] = groupName
groupList.append(groupEntry)
# Add the group list to a top-level dictionary.
#
dataDict = { 'groups' : groupList }
# Return the dictionary.
#
return dataDict
def ncldDump(inputFile, aliasFile, outputFile):
'''
Generate an HTML page from a netCDF-LD file. The page will contain CDL
describing the structure and contents of the file (without data values),
similar to the output of ncdump. Any elements that have associated linked
data will be presented as hot links that will open a new browser tab that
shows the linked contents.
inputFile [in] The netCDF-LD file to parse and display.
aliasFile [in] A JSON file with alias definitions. If the value is None,
no aliases are defined.
outputFile [in] The output file to write to.
'''
# Load the alias dictionary.
#
aliasDict = loadAliasDict(aliasFile)
# Get a netCDF4 dataset object from the input file and open it.
#
ncObj = netCDF4.Dataset(inputFile, 'r')
# Parse the contents into a dictionary.
#
ncDict = parseDataset(ncObj, aliasDict)
# Add a filePath entry.
#
ncDict['filePath'] = os.path.split(inputFile)[-1]
# Create a jinja environment and template object.
#
envObj = jinja2.Environment(loader = jinja2.FileSystemLoader('./'))
templateObj = envObj.get_template('ncldDump_template.html')
# Render the template with the contents of the dictionary.
#
result = templateObj.render(**ncDict)
# Open the output file and write the rendered template into it.
#
outObj = open(outputFile, 'w')
outObj.write(result)
outObj.close()
if __name__ == '__main__':
try:
argDict = parseArgs(sys.argv[1:])
ncldDump(**argDict)
except Exception as exc:
print(exc)
if 'pdb' not in sys.modules:
sys.exit(1)
| bsd-3-clause | 2,375,748,781,564,818,000 | 28.909091 | 156 | 0.574247 | false | 4.354994 | false | false | false |
quimaguirre/diana | diana/toolbox/selection_utilities.py | 1 | 3039 | from random import shuffle, randint
from itertools import combinations
def main():
return
def get_subsamples_at_ratio(values, n_fold=1000, ratio=0.1):
n = int(round(len(values) * float(ratio)))
#for i in range(n_fold):
# yield random_combination(values, n, n_fold=1)
return get_subsamples(values, n_fold, n)
def get_subsamples(scores, n_fold=10000, n_sample=1000):
for i in range(n_fold):
#if with_replacement:
# size = len(scores)-1
# selected = empty(n_sample)
# for i in range(n_sample):
# selected[i] = scores[randint(0,size)]
shuffle(scores)
selected = scores[:n_sample]
yield selected
return
def random_combination(nodes, n, r):
"Random selection r times from itertools.combinations(nodes, n)"
shuffle(nodes)
values = []
for i, combination in enumerate(combinations(nodes, n)):
if randint(0, n) == 0:
values.append(combination)
if len(values) >= r:
break
if len(values) < r:
raise ValueError("Not enough combinations!")
return values
def k_fold_cross_validation(X, K, randomize = False, replicable = None):
"""
By John Reid (code.activestate.com)
Generates K (training, validation) pairs from the items in X.
Each pair is a partition of X, where validation is an iterable
of length len(X)/K. So each training iterable is of length (K-1)*len(X)/K.
If randomise is true, a copy of X is shuffled before partitioning,
otherwise its order is preserved in training and validation.
If replicable is not None, this number is used to create the same random splits at each call
"""
#if randomize: from random import shuffle; X=list(X); shuffle(X)
if randomize:
from random import seed
X=list(X)
if replicable is not None:
seed(replicable)
shuffle(X)
for k in range(K):
training = [x for i, x in enumerate(X) if i % K != k]
validation = [x for i, x in enumerate(X) if i % K == k]
yield k+1, training, validation
return
def generate_samples_from_list_without_replacement(elements, sample_size, n_folds = None, replicable = None):
"""
Iteratively returns (yields) n_folds sublists of elements with a size of sample_size
n_folds: If None calculated to cover as much elements as possible
replicable: If not None uses this replicable as the seed for random
"""
from random import seed
if replicable is not None:
seed(replicable)
shuffle(elements)
if n_folds is None:
from math import ceil
#n_folds = len(elements) / sample_size
n_folds = int(ceil(float(len(elements)) / sample_size))
for i in range(n_folds):
if (i+1)*sample_size < len(elements):
yield elements[i*sample_size:(i+1)*sample_size]
else:
yield elements[i*sample_size:]
return
if __name__ == "__main__":
main()
| mit | 7,498,710,034,029,366,000 | 31.677419 | 109 | 0.622244 | false | 3.701583 | false | false | false |
hackersql/sq1map | thirdparty/beautifulsoup/beautifulsoup.py | 1 | 77817 | #!/usr/bin/env python
#coding=utf-8
"""
Beautiful Soup 是一个可以从HTML或XML文件中提取数据的Python库.
它能够通过你喜欢的转换器实现惯用的文档导航,查找,修改文档的方式.Beautiful Soup会帮你节省数小时甚至数天的工作时间.
"""
from __future__ import generators
__author__ = "Leonard Richardson ([email protected])"
__version__ = "3.2.1"
__copyright__ = "Copyright (c) 2004-2012 Leonard Richardson"
__license__ = "New-style BSD"
from sgmllib import SGMLParser, SGMLParseError
import codecs
import markupbase
import types
import re
import sgmllib
try:
from htmlentitydefs import name2codepoint
except ImportError:
name2codepoint = {}
try:
set
except NameError:
from sets import Set as set
# 这些hack技巧使Beautiful Soup能够使用命名空间解析XML
sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
markupbase._declname_match = re.compile(r'[a-zA-Z][-_.:a-zA-Z0-9]*\s*').match
DEFAULT_OUTPUT_ENCODING = "utf-8"
def _match_css_class(str):
"""构建一个RE正则表达式以匹配给定的CSS类"""
return re.compile(r"(^|.*\s)%s($|\s)" % str)
# 首先,代表标记元素的类。
class PageElement(object):
"""
包含页面某些部分的导航信息(标签或文本)
"""
def _invert(h):
# 反转key和value的值
i = {}
for k,v in h.items():
i[v] = k
return i
"""
XML实体中不允许出现"&","<",">"等特殊字符,否则XML语法检查时将出错,如果编写的XML文件必须包含这些字符,
则必须分别写成"&","<",">"再写入文件中。
例如,如果在XML文档中使用类似"<" 的字符, 那么解析器将会出现错误,因为解析器会认为这是一个新元素的开始。所以不应该像下面那样书写代码:
<age> age < 30 </age>
为了避免出现这种情况,必须将字符"<" 转换成实体,像下面这样:
<age> age < 30 </age>
在读取的时候,解析器会自动将其转换回"&","<",">"等特殊字符,正常来说,只有"<" 字符和"&"字符对于XML来说是严格禁止使用的。
需要注意的是:
a. 转义序列各字符间不能有空格;
b. 转义序列必须以";"结束;
c. 单独的&不被认为是转义开始;
d. 区分大小写。
以下是XML中需要的转义字符:[/size]
&(逻辑与) &
<(小于) <
>(大于) >
"(双引号) "
'(单引号) ' [/size]
"""
XML_ENTITIES_TO_SPECIAL_CHARS = { "apos" : "'",
"quot" : '"',
"amp" : "&",
"lt" : "<",
"gt" : ">" }
XML_SPECIAL_CHARS_TO_ENTITIES = _invert(XML_ENTITIES_TO_SPECIAL_CHARS)
def setup(self, parent=None, previous=None):
"""
设置此元素和其他元素之间的初始关系
"""
self.parent = parent
self.previous = previous
self.next = None
self.previousSibling = None
self.nextSibling = None
if self.parent and self.parent.contents:
self.previousSibling = self.parent.contents[-1]
self.previousSibling.nextSibling = self
def replaceWith(self, replaceWith):
oldParent = self.parent
myIndex = self.parent.index(self)
if hasattr(replaceWith, "parent")\
and replaceWith.parent is self.parent:
# We're replacing this element with one of its siblings.
index = replaceWith.parent.index(replaceWith)
if index and index < myIndex:
# Furthermore, it comes before this element. That
# means that when we extract it, the index of this
# element will change.
myIndex = myIndex - 1
self.extract()
oldParent.insert(myIndex, replaceWith)
def replaceWithChildren(self):
myParent = self.parent
myIndex = self.parent.index(self)
self.extract()
reversedChildren = list(self.contents)
reversedChildren.reverse()
for child in reversedChildren:
myParent.insert(myIndex, child)
def extract(self):
"""Destructively rips this element out of the tree."""
if self.parent:
try:
del self.parent.contents[self.parent.index(self)]
except ValueError:
pass
#Find the two elements that would be next to each other if
#this element (and any children) hadn't been parsed. Connect
#the two.
lastChild = self._lastRecursiveChild()
nextElement = lastChild.next
if self.previous:
self.previous.next = nextElement
if nextElement:
nextElement.previous = self.previous
self.previous = None
lastChild.next = None
self.parent = None
if self.previousSibling:
self.previousSibling.nextSibling = self.nextSibling
if self.nextSibling:
self.nextSibling.previousSibling = self.previousSibling
self.previousSibling = self.nextSibling = None
return self
def _lastRecursiveChild(self):
"Finds the last element beneath this object to be parsed."
lastChild = self
while hasattr(lastChild, 'contents') and lastChild.contents:
lastChild = lastChild.contents[-1]
return lastChild
def insert(self, position, newChild):
if isinstance(newChild, basestring) \
and not isinstance(newChild, NavigableString):
newChild = NavigableString(newChild)
position = min(position, len(self.contents))
if hasattr(newChild, 'parent') and newChild.parent is not None:
# We're 'inserting' an element that's already one
# of this object's children.
if newChild.parent is self:
index = self.index(newChild)
if index > position:
# Furthermore we're moving it further down the
# list of this object's children. That means that
# when we extract this element, our target index
# will jump down one.
position = position - 1
newChild.extract()
newChild.parent = self
previousChild = None
if position == 0:
newChild.previousSibling = None
newChild.previous = self
else:
previousChild = self.contents[position-1]
newChild.previousSibling = previousChild
newChild.previousSibling.nextSibling = newChild
newChild.previous = previousChild._lastRecursiveChild()
if newChild.previous:
newChild.previous.next = newChild
newChildsLastElement = newChild._lastRecursiveChild()
if position >= len(self.contents):
newChild.nextSibling = None
parent = self
parentsNextSibling = None
while not parentsNextSibling:
parentsNextSibling = parent.nextSibling
parent = parent.parent
if not parent: # This is the last element in the document.
break
if parentsNextSibling:
newChildsLastElement.next = parentsNextSibling
else:
newChildsLastElement.next = None
else:
nextChild = self.contents[position]
newChild.nextSibling = nextChild
if newChild.nextSibling:
newChild.nextSibling.previousSibling = newChild
newChildsLastElement.next = nextChild
if newChildsLastElement.next:
newChildsLastElement.next.previous = newChildsLastElement
self.contents.insert(position, newChild)
def append(self, tag):
"""Appends the given tag to the contents of this tag."""
self.insert(len(self.contents), tag)
def findNext(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears after this Tag in the document."""
return self._findOne(self.findAllNext, name, attrs, text, **kwargs)
def findAllNext(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
after this Tag in the document."""
return self._findAll(name, attrs, text, limit, self.nextGenerator,
**kwargs)
def findNextSibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears after this Tag in the document."""
return self._findOne(self.findNextSiblings, name, attrs, text,
**kwargs)
def findNextSiblings(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear after this Tag in the document."""
return self._findAll(name, attrs, text, limit,
self.nextSiblingGenerator, **kwargs)
fetchNextSiblings = findNextSiblings # Compatibility with pre-3.x
def findPrevious(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears before this Tag in the document."""
return self._findOne(self.findAllPrevious, name, attrs, text, **kwargs)
def findAllPrevious(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
before this Tag in the document."""
return self._findAll(name, attrs, text, limit, self.previousGenerator,
**kwargs)
fetchPrevious = findAllPrevious # Compatibility with pre-3.x
def findPreviousSibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears before this Tag in the document."""
return self._findOne(self.findPreviousSiblings, name, attrs, text,
**kwargs)
def findPreviousSiblings(self, name=None, attrs={}, text=None,
limit=None, **kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear before this Tag in the document."""
return self._findAll(name, attrs, text, limit,
self.previousSiblingGenerator, **kwargs)
fetchPreviousSiblings = findPreviousSiblings # Compatibility with pre-3.x
def findParent(self, name=None, attrs={}, **kwargs):
"""Returns the closest parent of this Tag that matches the given
criteria."""
# NOTE: We can't use _findOne because findParents takes a different
# set of arguments.
r = None
l = self.findParents(name, attrs, 1)
if l:
r = l[0]
return r
def findParents(self, name=None, attrs={}, limit=None, **kwargs):
"""Returns the parents of this Tag that match the given
criteria."""
return self._findAll(name, attrs, None, limit, self.parentGenerator,
**kwargs)
fetchParents = findParents # Compatibility with pre-3.x
#These methods do the real heavy lifting.
def _findOne(self, method, name, attrs, text, **kwargs):
r = None
l = method(name, attrs, text, 1, **kwargs)
if l:
r = l[0]
return r
def _findAll(self, name, attrs, text, limit, generator, **kwargs):
"Iterates over a generator looking for things that match."
if isinstance(name, SoupStrainer):
strainer = name
# (Possibly) special case some findAll*(...) searches
elif text is None and not limit and not attrs and not kwargs:
# findAll*(True)
if name is True:
return [element for element in generator()
if isinstance(element, Tag)]
# findAll*('tag-name')
elif isinstance(name, basestring):
return [element for element in generator()
if isinstance(element, Tag) and
element.name == name]
else:
strainer = SoupStrainer(name, attrs, text, **kwargs)
# Build a SoupStrainer
else:
strainer = SoupStrainer(name, attrs, text, **kwargs)
results = ResultSet(strainer)
g = generator()
while True:
try:
i = g.next()
except StopIteration:
break
if i:
found = strainer.search(i)
if found:
results.append(found)
if limit and len(results) >= limit:
break
return results
#These Generators can be used to navigate starting from both
#NavigableStrings and Tags.
def nextGenerator(self):
i = self
while i is not None:
i = i.next
yield i
def nextSiblingGenerator(self):
i = self
while i is not None:
i = i.nextSibling
yield i
def previousGenerator(self):
i = self
while i is not None:
i = i.previous
yield i
def previousSiblingGenerator(self):
i = self
while i is not None:
i = i.previousSibling
yield i
def parentGenerator(self):
i = self
while i is not None:
i = i.parent
yield i
# Utility methods
def substituteEncoding(self, str, encoding=None):
encoding = encoding or "utf-8"
return str.replace("%SOUP-ENCODING%", encoding)
def toEncoding(self, s, encoding=None):
"""Encodes an object to a string in some encoding, or to Unicode.
."""
if isinstance(s, unicode):
if encoding:
s = s.encode(encoding)
elif isinstance(s, str):
if encoding:
s = s.encode(encoding)
else:
s = unicode(s)
else:
if encoding:
s = self.toEncoding(str(s), encoding)
else:
s = unicode(s)
return s
BARE_AMPERSAND_OR_BRACKET = re.compile("([<>]|"
+ "&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)"
+ ")")
def _sub_entity(self, x):
"""Used with a regular expression to substitute the
appropriate XML entity for an XML special character."""
return "&" + self.XML_SPECIAL_CHARS_TO_ENTITIES[x.group(0)[0]] + ";"
class NavigableString(unicode, PageElement):
def __new__(cls, value):
"""Create a new NavigableString.
When unpickling a NavigableString, this method is called with
the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be
passed in to the superclass's __new__ or the superclass won't know
how to handle non-ASCII characters.
"""
if isinstance(value, unicode):
return unicode.__new__(cls, value)
return unicode.__new__(cls, value, DEFAULT_OUTPUT_ENCODING)
def __getnewargs__(self):
return (NavigableString.__str__(self),)
def __getattr__(self, attr):
"""text.string gives you text. This is for backwards
compatibility for Navigable*String, but for CData* it lets you
get the string without the CData wrapper."""
if attr == 'string':
return self
else:
raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__.__name__, attr)
def __unicode__(self):
return str(self).decode(DEFAULT_OUTPUT_ENCODING)
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
# Substitute outgoing XML entities.
data = self.BARE_AMPERSAND_OR_BRACKET.sub(self._sub_entity, self)
if encoding:
return data.encode(encoding)
else:
return data
class CData(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
return "<![CDATA[%s]]>" % NavigableString.__str__(self, encoding)
class ProcessingInstruction(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
output = self
if "%SOUP-ENCODING%" in output:
output = self.substituteEncoding(output, encoding)
return "<?%s?>" % self.toEncoding(output, encoding)
class Comment(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
return "<!--%s-->" % NavigableString.__str__(self, encoding)
class Declaration(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
return "<!%s>" % NavigableString.__str__(self, encoding)
class Tag(PageElement):
"""Represents a found HTML tag with its attributes and contents."""
def _convertEntities(self, match):
"""Used in a call to re.sub to replace HTML, XML, and numeric
entities with the appropriate Unicode characters. If HTML
entities are being converted, any unrecognized entities are
escaped."""
try:
x = match.group(1)
if self.convertHTMLEntities and x in name2codepoint:
return unichr(name2codepoint[x])
elif x in self.XML_ENTITIES_TO_SPECIAL_CHARS:
if self.convertXMLEntities:
return self.XML_ENTITIES_TO_SPECIAL_CHARS[x]
else:
return u'&%s;' % x
elif len(x) > 0 and x[0] == '#':
# Handle numeric entities
if len(x) > 1 and x[1] == 'x':
return unichr(int(x[2:], 16))
else:
return unichr(int(x[1:]))
elif self.escapeUnrecognizedEntities:
return u'&%s;' % x
except ValueError: # e.g. ValueError: unichr() arg not in range(0x10000)
pass
return u'&%s;' % x
def __init__(self, parser, name, attrs=None, parent=None,
previous=None):
"Basic constructor."
# We don't actually store the parser object: that lets extracted
# chunks be garbage-collected
self.parserClass = parser.__class__
self.isSelfClosing = parser.isSelfClosingTag(name)
self.name = name
if attrs is None:
attrs = []
elif isinstance(attrs, dict):
attrs = attrs.items()
self.attrs = attrs
self.contents = []
self.setup(parent, previous)
self.hidden = False
self.containsSubstitutions = False
self.convertHTMLEntities = parser.convertHTMLEntities
self.convertXMLEntities = parser.convertXMLEntities
self.escapeUnrecognizedEntities = parser.escapeUnrecognizedEntities
# Convert any HTML, XML, or numeric entities in the attribute values.
convert = lambda(k, val): (k,
re.sub("&(#\d+|#x[0-9a-fA-F]+|\w+);",
self._convertEntities,
val))
self.attrs = map(convert, self.attrs)
def getString(self):
if (len(self.contents) == 1
and isinstance(self.contents[0], NavigableString)):
return self.contents[0]
def setString(self, string):
"""Replace the contents of the tag with a string"""
self.clear()
self.append(string)
string = property(getString, setString)
def getText(self, separator=u""):
if not len(self.contents):
return u""
stopNode = self._lastRecursiveChild().next
strings = []
current = self.contents[0]
while current is not stopNode:
if isinstance(current, NavigableString):
strings.append(current.strip())
current = current.next
return separator.join(strings)
text = property(getText)
def get(self, key, default=None):
"""Returns the value of the 'key' attribute for the tag, or
the value given for 'default' if it doesn't have that
attribute."""
return self._getAttrMap().get(key, default)
def clear(self):
"""Extract all children."""
for child in self.contents[:]:
child.extract()
def index(self, element):
for i, child in enumerate(self.contents):
if child is element:
return i
raise ValueError("Tag.index: element not in tag")
# has_key等同于in
def has_key(self, key):
return self._getAttrMap().has_key(key)
def __getitem__(self, key):
"""tag[key] returns the value of the 'key' attribute for the tag,
and throws an exception if it's not there."""
return self._getAttrMap()[key]
def __iter__(self):
"Iterating over a tag iterates over its contents."
return iter(self.contents)
def __len__(self):
"The length of a tag is the length of its list of contents."
return len(self.contents)
def __contains__(self, x):
return x in self.contents
def __nonzero__(self):
"A tag is non-None even if it has no contents."
return True
def __setitem__(self, key, value):
"""
设置tag[key]设置tag的'key'属性的值
"""
self._getAttrMap()
self.attrMap[key] = value
found = False
for i in xrange(0, len(self.attrs)):
if self.attrs[i][0] == key:
self.attrs[i] = (key, value)
found = True
if not found:
self.attrs.append((key, value))
self._getAttrMap()[key] = value
def __delitem__(self, key):
"Deleting tag[key] deletes all 'key' attributes for the tag."
for item in self.attrs:
if item[0] == key:
self.attrs.remove(item)
#We don't break because bad HTML can define the same
#attribute multiple times.
self._getAttrMap()
if self.attrMap.has_key(key):
del self.attrMap[key]
def __call__(self, *args, **kwargs):
"""Calling a tag like a function is the same as calling its
findAll() method. Eg. tag('a') returns a list of all the A tags
found within this tag."""
return apply(self.findAll, args, kwargs)
def __getattr__(self, tag):
#print "Getattr %s.%s" % (self.__class__, tag)
if len(tag) > 3 and tag.rfind('Tag') == len(tag)-3:
return self.find(tag[:-3])
elif tag.find('__') != 0:
return self.find(tag)
raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__, tag)
def __eq__(self, other):
"""Returns true iff this tag has the same name, the same attributes,
and the same contents (recursively) as the given tag.
NOTE: right now this will return false if two tags have the
same attributes in a different order. Should this be fixed?"""
if other is self:
return True
if not hasattr(other, 'name') or not hasattr(other, 'attrs') or not hasattr(other, 'contents') or self.name != other.name or self.attrs != other.attrs or len(self) != len(other):
return False
for i in xrange(0, len(self.contents)):
if self.contents[i] != other.contents[i]:
return False
return True
def __ne__(self, other):
"""Returns true iff this tag is not identical to the other tag,
as defined in __eq__."""
return not self == other
def __repr__(self, encoding=DEFAULT_OUTPUT_ENCODING):
"""Renders this tag as a string."""
return self.__str__(encoding)
def __unicode__(self):
return self.__str__(None)
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
"""Returns a string or Unicode representation of this tag and
its contents. To get Unicode, pass None for encoding.
NOTE: since Python's HTML parser consumes whitespace, this
method is not certain to reproduce the whitespace present in
the original string."""
encodedName = self.toEncoding(self.name, encoding)
attrs = []
if self.attrs:
for key, val in self.attrs:
fmt = '%s="%s"'
if isinstance(val, basestring):
if self.containsSubstitutions and '%SOUP-ENCODING%' in val:
val = self.substituteEncoding(val, encoding)
# The attribute value either:
#
# * Contains no embedded double quotes or single quotes.
# No problem: we enclose it in double quotes.
# * Contains embedded single quotes. No problem:
# double quotes work here too.
# * Contains embedded double quotes. No problem:
# we enclose it in single quotes.
# * Embeds both single _and_ double quotes. This
# can't happen naturally, but it can happen if
# you modify an attribute value after parsing
# the document. Now we have a bit of a
# problem. We solve it by enclosing the
# attribute in single quotes, and escaping any
# embedded single quotes to XML entities.
if '"' in val:
fmt = "%s='%s'"
if "'" in val:
# TODO: replace with apos when
# appropriate.
val = val.replace("'", "&squot;")
# Now we're okay w/r/t quotes. But the attribute
# value might also contain angle brackets, or
# ampersands that aren't part of entities. We need
# to escape those to XML entities too.
val = self.BARE_AMPERSAND_OR_BRACKET.sub(self._sub_entity, val)
attrs.append(fmt % (self.toEncoding(key, encoding),
self.toEncoding(val, encoding)))
close = ''
closeTag = ''
if self.isSelfClosing:
close = ' /'
else:
closeTag = '</%s>' % encodedName
indentTag, indentContents = 0, 0
if prettyPrint:
indentTag = indentLevel
space = (' ' * (indentTag-1))
indentContents = indentTag + 1
contents = self.renderContents(encoding, prettyPrint, indentContents)
if self.hidden:
s = contents
else:
s = []
attributeString = ''
if attrs:
attributeString = ' ' + ' '.join(attrs)
if prettyPrint:
s.append(space)
s.append('<%s%s%s>' % (encodedName, attributeString, close))
if prettyPrint:
s.append("\n")
s.append(contents)
if prettyPrint and contents and contents[-1] != "\n":
s.append("\n")
if prettyPrint and closeTag:
s.append(space)
s.append(closeTag)
if prettyPrint and closeTag and self.nextSibling:
s.append("\n")
s = ''.join(s)
return s
def decompose(self):
"""Recursively destroys the contents of this tree."""
self.extract()
if len(self.contents) == 0:
return
current = self.contents[0]
while current is not None:
next = current.next
if isinstance(current, Tag):
del current.contents[:]
current.parent = None
current.previous = None
current.previousSibling = None
current.next = None
current.nextSibling = None
current = next
def prettify(self, encoding=DEFAULT_OUTPUT_ENCODING):
return self.__str__(encoding, True)
def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
"""Renders the contents of this tag as a string in the given
encoding. If encoding is None, returns a Unicode string.."""
s=[]
for c in self:
text = None
if isinstance(c, NavigableString):
text = c.__str__(encoding)
elif isinstance(c, Tag):
s.append(c.__str__(encoding, prettyPrint, indentLevel))
if text and prettyPrint:
text = text.strip()
if text:
if prettyPrint:
s.append(" " * (indentLevel-1))
s.append(text)
if prettyPrint:
s.append("\n")
return ''.join(s)
#Soup methods
def find(self, name=None, attrs={}, recursive=True, text=None,
**kwargs):
"""Return only the first child of this Tag matching the given
criteria."""
r = None
l = self.findAll(name, attrs, recursive, text, 1, **kwargs)
if l:
r = l[0]
return r
findChild = find
def findAll(self, name=None, attrs={}, recursive=True, text=None,
limit=None, **kwargs):
"""Extracts a list of Tag objects that match the given
criteria. You can specify the name of the Tag and any
attributes you want the Tag to have.
The value of a key-value pair in the 'attrs' map can be a
string, a list of strings, a regular expression object, or a
callable that takes a string and returns whether or not the
string matches for some custom definition of 'matches'. The
same is true of the tag name."""
generator = self.recursiveChildGenerator
if not recursive:
generator = self.childGenerator
return self._findAll(name, attrs, text, limit, generator, **kwargs)
findChildren = findAll
# Pre-3.x compatibility methods
first = find
fetch = findAll
def fetchText(self, text=None, recursive=True, limit=None):
return self.findAll(text=text, recursive=recursive, limit=limit)
def firstText(self, text=None, recursive=True):
return self.find(text=text, recursive=recursive)
# 私有方法
def _getAttrMap(self):
"""
如果尚未初始化,则初始化此标记属性的映射表示。
"""
if not getattr(self, 'attrMap'):
self.attrMap = {}
for (key, value) in self.attrs:
self.attrMap[key] = value
return self.attrMap
#Generator methods
def childGenerator(self):
# Just use the iterator from the contents
return iter(self.contents)
def recursiveChildGenerator(self):
if not len(self.contents):
raise StopIteration
stopNode = self._lastRecursiveChild().next
current = self.contents[0]
while current is not stopNode:
yield current
current = current.next
# Next, a couple classes to represent queries and their results.
class SoupStrainer:
"""Encapsulates a number of ways of matching a markup element (tag or
text)."""
def __init__(self, name=None, attrs={}, text=None, **kwargs):
self.name = name
if isinstance(attrs, basestring):
kwargs['class'] = _match_css_class(attrs)
attrs = None
if kwargs:
if attrs:
attrs = attrs.copy()
attrs.update(kwargs)
else:
attrs = kwargs
self.attrs = attrs
self.text = text
def __str__(self):
if self.text:
return self.text
else:
return "%s|%s" % (self.name, self.attrs)
def searchTag(self, markupName=None, markupAttrs={}):
found = None
markup = None
if isinstance(markupName, Tag):
markup = markupName
markupAttrs = markup
callFunctionWithTagData = callable(self.name) \
and not isinstance(markupName, Tag)
if (not self.name) \
or callFunctionWithTagData \
or (markup and self._matches(markup, self.name)) \
or (not markup and self._matches(markupName, self.name)):
if callFunctionWithTagData:
match = self.name(markupName, markupAttrs)
else:
match = True
markupAttrMap = None
for attr, matchAgainst in self.attrs.items():
if not markupAttrMap:
if hasattr(markupAttrs, 'get'):
markupAttrMap = markupAttrs
else:
markupAttrMap = {}
for k,v in markupAttrs:
markupAttrMap[k] = v
attrValue = markupAttrMap.get(attr)
if not self._matches(attrValue, matchAgainst):
match = False
break
if match:
if markup:
found = markup
else:
found = markupName
return found
def search(self, markup):
#print 'looking for %s in %s' % (self, markup)
found = None
# If given a list of items, scan it for a text element that
# matches.
if hasattr(markup, "__iter__") \
and not isinstance(markup, Tag):
for element in markup:
if isinstance(element, NavigableString) \
and self.search(element):
found = element
break
# If it's a Tag, make sure its name or attributes match.
# Don't bother with Tags if we're searching for text.
elif isinstance(markup, Tag):
if not self.text:
found = self.searchTag(markup)
# If it's text, make sure the text matches.
elif isinstance(markup, NavigableString) or \
isinstance(markup, basestring):
if self._matches(markup, self.text):
found = markup
else:
raise Exception, "I don't know how to match against a %s" \
% markup.__class__
return found
def _matches(self, markup, matchAgainst):
#print "Matching %s against %s" % (markup, matchAgainst)
result = False
if matchAgainst is True:
result = markup is not None
elif callable(matchAgainst):
result = matchAgainst(markup)
else:
#Custom match methods take the tag as an argument, but all
#other ways of matching match the tag name as a string.
if isinstance(markup, Tag):
markup = markup.name
if markup and not isinstance(markup, basestring):
markup = unicode(markup)
#Now we know that chunk is either a string, or None.
if hasattr(matchAgainst, 'match'):
# It's a regexp object.
result = markup and matchAgainst.search(markup)
elif hasattr(matchAgainst, '__iter__'): # list-like
result = markup in matchAgainst
elif hasattr(matchAgainst, 'items'):
result = markup.has_key(matchAgainst)
elif matchAgainst and isinstance(markup, basestring):
if isinstance(markup, unicode):
matchAgainst = unicode(matchAgainst)
else:
matchAgainst = str(matchAgainst)
if not result:
result = matchAgainst == markup
return result
class ResultSet(list):
"""A ResultSet is just a list that keeps track of the SoupStrainer
that created it."""
def __init__(self, source):
list.__init__([])
self.source = source
# Now, some helper functions.
def buildTagMap(default, *args):
"""
将dict{},list[]或tuple()转换为单个dict。
将SELF_CLOSING_TAGS,NESTABLE_TAGS和NESTING_RESET_TAGS转换为dict(k, v)的形式
arg1:default参数设置dict(k, v)中v的默认值
arg2:变长参数,接收多个参数
"""
# 初始化一个空字典
built = {}
for portion in args:
if hasattr(portion, 'items'): # 如果具有items属性则说明是dict{}词典。
# 如果是一个dict,就合并它
for k,v in portion.items():
built[k] = v
elif hasattr(portion, '__iter__'): # 如果具有__iter__属性则说明是列表list[]或者tuple()。
for k in portion:
built[k] = default
else:
# 如果传入的既不是dict、list、tuple,
built[portion] = default
return built
# 现在,解析类。
class BeautifulStoneSoup(Tag, SGMLParser):
"""This class contains the basic parser and search code. It defines
a parser that knows nothing about tag behavior except for the
following:
You can't close a tag without closing all the tags it encloses.
That is, "<foo><bar></foo>" actually means
"<foo><bar></bar></foo>".
[Another possible explanation is "<foo><bar /></foo>", but since
this class defines no SELF_CLOSING_TAGS, it will never use that
explanation.]
This class is useful for parsing XML or made-up markup languages,
or when BeautifulSoup makes an assumption counter to what you were
expecting."""
SELF_CLOSING_TAGS = {}
NESTABLE_TAGS = {}
RESET_NESTING_TAGS = {}
QUOTE_TAGS = {}
PRESERVE_WHITESPACE_TAGS = []
MARKUP_MASSAGE = [(re.compile('(<[^<>]*)/>'),
lambda x: x.group(1) + ' />'),
(re.compile('<!\s+([^<>]*)>'),
lambda x: '<!' + x.group(1) + '>')
]
ROOT_TAG_NAME = u'[document]'
HTML_ENTITIES = "html"
XML_ENTITIES = "xml"
XHTML_ENTITIES = "xhtml"
# TODO: This only exists for backwards-compatibility
ALL_ENTITIES = XHTML_ENTITIES
# Used when determining whether a text node is all whitespace and
# can be replaced with a single space. A text node that contains
# fancy Unicode spaces (usually non-breaking) should be left
# alone.
STRIP_ASCII_SPACES = { 9: None, 10: None, 12: None, 13: None, 32: None, }
def __init__(self, markup="", parseOnlyThese=None, fromEncoding=None,
markupMassage=True, smartQuotesTo=XML_ENTITIES,
convertEntities=None, selfClosingTags=None, isHTML=False):
"""The Soup object is initialized as the 'root tag', and the
provided markup (which can be a string or a file-like object)
is fed into the underlying parser.
sgmllib will process most bad HTML, and the BeautifulSoup
class has some tricks for dealing with some HTML that kills
sgmllib, but Beautiful Soup can nonetheless choke or lose data
if your data uses self-closing tags or declarations
incorrectly.
By default, Beautiful Soup uses regexes to sanitize input,
avoiding the vast majority of these problems. If the problems
don't apply to you, pass in False for markupMassage, and
you'll get better performance.
The default parser massage techniques fix the two most common
instances of invalid HTML that choke sgmllib:
<br/> (No space between name of closing tag and tag close)
<! --Comment--> (Extraneous whitespace in declaration)
You can pass in a custom list of (RE object, replace method)
tuples to get Beautiful Soup to scrub your input the way you
want."""
self.parseOnlyThese = parseOnlyThese
self.fromEncoding = fromEncoding
self.smartQuotesTo = smartQuotesTo
self.convertEntities = convertEntities
# Set the rules for how we'll deal with the entities we
# encounter
if self.convertEntities:
# It doesn't make sense to convert encoded characters to
# entities even while you're converting entities to Unicode.
# Just convert it all to Unicode.
self.smartQuotesTo = None
if convertEntities == self.HTML_ENTITIES:
self.convertXMLEntities = False
self.convertHTMLEntities = True
self.escapeUnrecognizedEntities = True
elif convertEntities == self.XHTML_ENTITIES:
self.convertXMLEntities = True
self.convertHTMLEntities = True
self.escapeUnrecognizedEntities = False
elif convertEntities == self.XML_ENTITIES:
self.convertXMLEntities = True
self.convertHTMLEntities = False
self.escapeUnrecognizedEntities = False
else:
self.convertXMLEntities = False
self.convertHTMLEntities = False
self.escapeUnrecognizedEntities = False
self.instanceSelfClosingTags = buildTagMap(None, selfClosingTags)
SGMLParser.__init__(self)
if hasattr(markup, 'read'): # It's a file-type object.
markup = markup.read()
self.markup = markup
self.markupMassage = markupMassage
try:
self._feed(isHTML=isHTML)
except StopParsing:
pass
self.markup = None # The markup can now be GCed
def convert_charref(self, name):
"""This method fixes a bug in Python's SGMLParser."""
try:
n = int(name)
except ValueError:
return
if not 0 <= n <= 127 : # ASCII ends at 127, not 255
return
return self.convert_codepoint(n)
def _feed(self, inDocumentEncoding=None, isHTML=False):
# Convert the document to Unicode.
markup = self.markup
if isinstance(markup, unicode):
if not hasattr(self, 'originalEncoding'):
self.originalEncoding = None
else:
dammit = UnicodeDammit\
(markup, [self.fromEncoding, inDocumentEncoding],
smartQuotesTo=self.smartQuotesTo, isHTML=isHTML)
markup = dammit.unicode
self.originalEncoding = dammit.originalEncoding
self.declaredHTMLEncoding = dammit.declaredHTMLEncoding
if markup:
if self.markupMassage:
if not hasattr(self.markupMassage, "__iter__"):
self.markupMassage = self.MARKUP_MASSAGE
for fix, m in self.markupMassage:
markup = fix.sub(m, markup)
# TODO: We get rid of markupMassage so that the
# soup object can be deepcopied later on. Some
# Python installations can't copy regexes. If anyone
# was relying on the existence of markupMassage, this
# might cause problems.
del(self.markupMassage)
self.reset()
SGMLParser.feed(self, markup)
# Close out any unfinished strings and close all the open tags.
self.endData()
while self.currentTag.name != self.ROOT_TAG_NAME:
self.popTag()
def __getattr__(self, methodName):
"""This method routes method call requests to either the SGMLParser
superclass or the Tag superclass, depending on the method name."""
#print "__getattr__ called on %s.%s" % (self.__class__, methodName)
if methodName.startswith('start_') or methodName.startswith('end_') \
or methodName.startswith('do_'):
return SGMLParser.__getattr__(self, methodName)
elif not methodName.startswith('__'):
return Tag.__getattr__(self, methodName)
else:
raise AttributeError
def isSelfClosingTag(self, name):
"""Returns true iff the given string is the name of a
self-closing tag according to this parser."""
return self.SELF_CLOSING_TAGS.has_key(name) \
or self.instanceSelfClosingTags.has_key(name)
def reset(self):
Tag.__init__(self, self, self.ROOT_TAG_NAME)
self.hidden = 1
SGMLParser.reset(self)
self.currentData = []
self.currentTag = None
self.tagStack = []
self.quoteStack = []
self.pushTag(self)
def popTag(self):
tag = self.tagStack.pop()
#print "Pop", tag.name
if self.tagStack:
self.currentTag = self.tagStack[-1]
return self.currentTag
def pushTag(self, tag):
#print "Push", tag.name
if self.currentTag:
self.currentTag.contents.append(tag)
self.tagStack.append(tag)
self.currentTag = self.tagStack[-1]
def endData(self, containerClass=NavigableString):
if self.currentData:
currentData = u''.join(self.currentData)
if (currentData.translate(self.STRIP_ASCII_SPACES) == '' and
not set([tag.name for tag in self.tagStack]).intersection(
self.PRESERVE_WHITESPACE_TAGS)):
if '\n' in currentData:
currentData = '\n'
else:
currentData = ' '
self.currentData = []
if self.parseOnlyThese and len(self.tagStack) <= 1 and \
(not self.parseOnlyThese.text or \
not self.parseOnlyThese.search(currentData)):
return
o = containerClass(currentData)
o.setup(self.currentTag, self.previous)
if self.previous:
self.previous.next = o
self.previous = o
self.currentTag.contents.append(o)
def _popToTag(self, name, inclusivePop=True):
"""Pops the tag stack up to and including the most recent
instance of the given tag. If inclusivePop is false, pops the tag
stack up to but *not* including the most recent instqance of
the given tag."""
#print "Popping to %s" % name
if name == self.ROOT_TAG_NAME:
return
numPops = 0
mostRecentTag = None
for i in xrange(len(self.tagStack)-1, 0, -1):
if name == self.tagStack[i].name:
numPops = len(self.tagStack)-i
break
if not inclusivePop:
numPops = numPops - 1
for i in xrange(0, numPops):
mostRecentTag = self.popTag()
return mostRecentTag
def _smartPop(self, name):
"""We need to pop up to the previous tag of this type, unless
one of this tag's nesting reset triggers comes between this
tag and the previous tag of this type, OR unless this tag is a
generic nesting trigger and another generic nesting trigger
comes between this tag and the previous tag of this type.
Examples:
<p>Foo<b>Bar *<p>* should pop to 'p', not 'b'.
<p>Foo<table>Bar *<p>* should pop to 'table', not 'p'.
<p>Foo<table><tr>Bar *<p>* should pop to 'tr', not 'p'.
<li><ul><li> *<li>* should pop to 'ul', not the first 'li'.
<tr><table><tr> *<tr>* should pop to 'table', not the first 'tr'
<td><tr><td> *<td>* should pop to 'tr', not the first 'td'
"""
nestingResetTriggers = self.NESTABLE_TAGS.get(name)
isNestable = nestingResetTriggers != None
isResetNesting = self.RESET_NESTING_TAGS.has_key(name)
popTo = None
inclusive = True
for i in xrange(len(self.tagStack)-1, 0, -1):
p = self.tagStack[i]
if (not p or p.name == name) and not isNestable:
#Non-nestable tags get popped to the top or to their
#last occurance.
popTo = name
break
if (nestingResetTriggers is not None
and p.name in nestingResetTriggers) \
or (nestingResetTriggers is None and isResetNesting
and self.RESET_NESTING_TAGS.has_key(p.name)):
#If we encounter one of the nesting reset triggers
#peculiar to this tag, or we encounter another tag
#that causes nesting to reset, pop up to but not
#including that tag.
popTo = p.name
inclusive = False
break
p = p.parent
if popTo:
self._popToTag(popTo, inclusive)
def unknown_starttag(self, name, attrs, selfClosing=0):
#print "Start tag %s: %s" % (name, attrs)
if self.quoteStack:
#This is not a real tag.
#print "<%s> is not real!" % name
attrs = ''.join([' %s="%s"' % (x, y) for x, y in attrs])
self.handle_data('<%s%s>' % (name, attrs))
return
self.endData()
if not self.isSelfClosingTag(name) and not selfClosing:
self._smartPop(name)
if self.parseOnlyThese and len(self.tagStack) <= 1 \
and (self.parseOnlyThese.text or not self.parseOnlyThese.searchTag(name, attrs)):
return
tag = Tag(self, name, attrs, self.currentTag, self.previous)
if self.previous:
self.previous.next = tag
self.previous = tag
self.pushTag(tag)
if selfClosing or self.isSelfClosingTag(name):
self.popTag()
if name in self.QUOTE_TAGS:
#print "Beginning quote (%s)" % name
self.quoteStack.append(name)
self.literal = 1
return tag
def unknown_endtag(self, name):
#print "End tag %s" % name
if self.quoteStack and self.quoteStack[-1] != name:
#This is not a real end tag.
#print "</%s> is not real!" % name
self.handle_data('</%s>' % name)
return
self.endData()
self._popToTag(name)
if self.quoteStack and self.quoteStack[-1] == name:
self.quoteStack.pop()
self.literal = (len(self.quoteStack) > 0)
def handle_data(self, data):
self.currentData.append(data)
def _toStringSubclass(self, text, subclass):
"""Adds a certain piece of text to the tree as a NavigableString
subclass."""
self.endData()
self.handle_data(text)
self.endData(subclass)
def handle_pi(self, text):
"""Handle a processing instruction as a ProcessingInstruction
object, possibly one with a %SOUP-ENCODING% slot into which an
encoding will be plugged later."""
if text[:3] == "xml":
text = u"xml version='1.0' encoding='%SOUP-ENCODING%'"
self._toStringSubclass(text, ProcessingInstruction)
def handle_comment(self, text):
"Handle comments as Comment objects."
self._toStringSubclass(text, Comment)
def handle_charref(self, ref):
"Handle character references as data."
if self.convertEntities:
data = unichr(int(ref))
else:
data = '&#%s;' % ref
self.handle_data(data)
def handle_entityref(self, ref):
"""Handle entity references as data, possibly converting known
HTML and/or XML entity references to the corresponding Unicode
characters."""
data = None
if self.convertHTMLEntities:
try:
data = unichr(name2codepoint[ref])
except KeyError:
pass
if not data and self.convertXMLEntities:
data = self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref)
if not data and self.convertHTMLEntities and \
not self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref):
# TODO: We've got a problem here. We're told this is
# an entity reference, but it's not an XML entity
# reference or an HTML entity reference. Nonetheless,
# the logical thing to do is to pass it through as an
# unrecognized entity reference.
#
# Except: when the input is "&carol;" this function
# will be called with input "carol". When the input is
# "AT&T", this function will be called with input
# "T". We have no way of knowing whether a semicolon
# was present originally, so we don't know whether
# this is an unknown entity or just a misplaced
# ampersand.
#
# The more common case is a misplaced ampersand, so I
# escape the ampersand and omit the trailing semicolon.
data = "&%s" % ref
if not data:
# This case is different from the one above, because we
# haven't already gone through a supposedly comprehensive
# mapping of entities to Unicode characters. We might not
# have gone through any mapping at all. So the chances are
# very high that this is a real entity, and not a
# misplaced ampersand.
data = "&%s;" % ref
self.handle_data(data)
def handle_decl(self, data):
"Handle DOCTYPEs and the like as Declaration objects."
self._toStringSubclass(data, Declaration)
def parse_declaration(self, i):
"""Treat a bogus SGML declaration as raw data. Treat a CDATA
declaration as a CData object."""
j = None
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1:
k = len(self.rawdata)
data = self.rawdata[i+9:k]
j = k+3
self._toStringSubclass(data, CData)
else:
try:
j = SGMLParser.parse_declaration(self, i)
except SGMLParseError:
toHandle = self.rawdata[i:]
self.handle_data(toHandle)
j = i + len(toHandle)
return j
class BeautifulSoup(BeautifulStoneSoup):
"""
BeautifulSoup 对象表示的是一个文档的全部内容.
大部分时候,可以把它当作 Tag 对象,它支持 遍历文档树 和 搜索文档树 中描述的大部分的方法.
这个解析器知道有关HTML的以下事实:
* 某些标记没有结束标记, 应将其解释为在遇到它们后立即关闭
* 某些标记内的文本 (即 "脚本") 可能包含的标记实际上不是文档的一部分,
应该将其解析为文本, 而不是标记。如果要将文本解析为标记, 则始终可以获取它并显式解析它。
* 标签嵌套规则:
大多数标签根本无法嵌套。 例如,<p>标签的出现应隐含地关闭以前的<p>标签。
<p>Para1<p>Para2
应转变为:
<p>Para1</p><p>Para2
有些标签可以任意嵌套。例如,<blockquote>标签的出现不应该隐式地关闭前一个标签。
Alice said: <blockquote>Bob said: <blockquote>Blah
不应该变成:
Alice said: <blockquote>Bob said: </blockquote><blockquote>Blah
一些标签可以嵌套,但嵌套是通过插入其他标签来重置的。
例如,<tr>标签应该隐式地关闭同一个<table>中的前一个<tr>标签,但不能关闭另一个表中的<tr>标签。
<table><tr>Blah<tr>Blah
应转变为::
<table><tr>Blah</tr><tr>Blah
但,
<tr>Blah<table><tr>Blah
不应该变成
<tr>Blah<table></tr><tr>Blah
关于标签嵌套规则的不同假设是BeautifulSoup类的主要问题。
如果BeautifulSoup不被视为可嵌套的标签,您的页面作者将其视为可嵌套的,
请在编写自己的子类之前尝试使用ICantBelieveItsBeautifulSoup,MinimalSoup或BeautifulStoneSoup
"""
def __init__(self, *args, **kwargs):
if not kwargs.has_key('smartQuotesTo'):
kwargs['smartQuotesTo'] = self.HTML_ENTITIES
kwargs['isHTML'] = True
BeautifulStoneSoup.__init__(self, *args, **kwargs)
SELF_CLOSING_TAGS = buildTagMap(None,
('br' , 'hr', 'input', 'img', 'meta',
'spacer', 'link', 'frame', 'base', 'col'))
PRESERVE_WHITESPACE_TAGS = set(['pre', 'textarea'])
QUOTE_TAGS = {'script' : None, 'textarea' : None}
# 根据HTML标准,这些内联标签中的每一个可以包含相同类型的另一个标签。
# 此外,通常以这种方式实际使用这些标签。
NESTABLE_INLINE_TAGS = ('span', 'font', 'q', 'object', 'bdo', 'sub', 'sup',
'center')
# 根据HTML标准,这些块标签可以包含相同类型的另一个标签。
# 此外,通常以这种方式实际使用这些标签。
NESTABLE_BLOCK_TAGS = ('blockquote', 'div', 'fieldset', 'ins', 'del')
# 列表可以包含其他列表,但有限制。
NESTABLE_LIST_TAGS = { 'ol' : [],
'ul' : [],
'li' : ['ul', 'ol'],
'dl' : [],
'dd' : ['dl'],
'dt' : ['dl'] }
# 表可以包含其他表,但有限制。
NESTABLE_TABLE_TAGS = {'table' : [],
'tr' : ['table', 'tbody', 'tfoot', 'thead'],
'td' : ['tr'],
'th' : ['tr'],
'thead' : ['table'],
'tbody' : ['table'],
'tfoot' : ['table'],
}
NON_NESTABLE_BLOCK_TAGS = ('address', 'form', 'p', 'pre')
# 如果遇到这些标签之一,则会弹出到此类型的下一个标签的所有标签。
# 重新设置标签
RESET_NESTING_TAGS = buildTagMap(None, NESTABLE_BLOCK_TAGS, 'noscript',
NON_NESTABLE_BLOCK_TAGS,
NESTABLE_LIST_TAGS,
NESTABLE_TABLE_TAGS)
NESTABLE_TAGS = buildTagMap([], NESTABLE_INLINE_TAGS, NESTABLE_BLOCK_TAGS,
NESTABLE_LIST_TAGS, NESTABLE_TABLE_TAGS)
# 用于检测META标签中的字符集; 见start_meta
CHARSET_RE = re.compile("((^|;)\s*charset=)([^;]*)", re.M)
def start_meta(self, attrs):
"""
Beautiful Soup可以检测META标签中包含的字符集,尝试将文档转换为该字符集,并从头开始重新解析文档。
"""
httpEquiv = None
contentType = None
contentTypeIndex = None
tagNeedsEncodingSubstitution = False
for i in xrange(0, len(attrs)):
key, value = attrs[i]
key = key.lower()
if key == 'http-equiv':
httpEquiv = value
elif key == 'content':
contentType = value
contentTypeIndex = i
if httpEquiv and contentType: # 这是一个有趣的meta标签。
match = self.CHARSET_RE.search(contentType)
if match:
if (self.declaredHTMLEncoding is not None or
self.originalEncoding == self.fromEncoding):
# 在将文档转换为Unicode时,嗅探HTML编码,
# 或者在上一次传递文档期间嗅探HTML编码,
# 或者明确指定了编码,并且它的工作正常。 重写meta标记。
def rewrite(match):
return match.group(1) + "%SOUP-ENCODING%"
newAttr = self.CHARSET_RE.sub(rewrite, contentType)
attrs[contentTypeIndex] = (attrs[contentTypeIndex][0],
newAttr)
tagNeedsEncodingSubstitution = True
else:
# 这是我们首次通过该文件。
# 再次使用编码信息进行处理。
newCharset = match.group(3)
if newCharset and newCharset != self.originalEncoding:
self.declaredHTMLEncoding = newCharset
self._feed(self.declaredHTMLEncoding)
raise StopParsing
pass
tag = self.unknown_starttag("meta", attrs)
if tag and tagNeedsEncodingSubstitution:
tag.containsSubstitutions = True
class StopParsing(Exception):
pass
class ICantBelieveItsBeautifulSoup(BeautifulSoup):
"""
BeautifulSoup类面向通常的HTML错误,如未封闭的标签。
但是,有时候会造成自己的错误。 例如,考虑这个片段:
<b>Foo<b>Bar</b></b>
This is perfectly valid (if bizarre) HTML. However, the
BeautifulSoup class will implicitly close the first b tag when it
encounters the second 'b'. It will think the author wrote
"<b>Foo<b>Bar", and didn't close the first 'b' tag, because
there's no real-world reason to bold something that's already
bold. When it encounters '</b></b>' it will close two more 'b'
tags, for a grand total of three tags closed instead of two. This
can throw off the rest of your document structure. The same is
true of a number of other tags, listed below.
It's much more common for someone to forget to close a 'b' tag
than to actually use nested 'b' tags, and the BeautifulSoup class
handles the common case. This class handles the not-co-common
case: where you can't believe someone wrote what they did, but
it's valid HTML and BeautifulSoup screwed up by assuming it
wouldn't be."""
I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS = \
('em', 'big', 'i', 'small', 'tt', 'abbr', 'acronym', 'strong',
'cite', 'code', 'dfn', 'kbd', 'samp', 'strong', 'var', 'b',
'big')
I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS = ('noscript',)
NESTABLE_TAGS = buildTagMap([], BeautifulSoup.NESTABLE_TAGS,
I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS,
I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS)
class MinimalSoup(BeautifulSoup):
"""The MinimalSoup class is for parsing HTML that contains
pathologically bad markup. It makes no assumptions about tag
nesting, but it does know which tags are self-closing, that
<script> tags contain Javascript and should not be parsed, that
META tags may contain encoding information, and so on.
This also makes it better for subclassing than BeautifulStoneSoup
or BeautifulSoup."""
RESET_NESTING_TAGS = buildTagMap('noscript')
NESTABLE_TAGS = {}
class BeautifulSOAP(BeautifulStoneSoup):
"""This class will push a tag with only a single string child into
the tag's parent as an attribute. The attribute's name is the tag
name, and the value is the string child. An example should give
the flavor of the change:
<foo><bar>baz</bar></foo>
=>
<foo bar="baz"><bar>baz</bar></foo>
You can then access fooTag['bar'] instead of fooTag.barTag.string.
This is, of course, useful for scraping structures that tend to
use subelements instead of attributes, such as SOAP messages. Note
that it modifies its input, so don't print the modified version
out.
I'm not sure how many people really want to use this class; let me
know if you do. Mainly I like the name."""
def popTag(self):
if len(self.tagStack) > 1:
tag = self.tagStack[-1]
parent = self.tagStack[-2]
parent._getAttrMap()
if (isinstance(tag, Tag) and len(tag.contents) == 1 and
isinstance(tag.contents[0], NavigableString) and
not parent.attrMap.has_key(tag.name)):
parent[tag.name] = tag.contents[0]
BeautifulStoneSoup.popTag(self)
#Enterprise class names! It has come to our attention that some people
#think the names of the Beautiful Soup parser classes are too silly
#and "unprofessional" for use in enterprise screen-scraping. We feel
#your pain! For such-minded folk, the Beautiful Soup Consortium And
#All-Night Kosher Bakery recommends renaming this file to
#"RobustParser.py" (or, in cases of extreme enterprisiness,
#"RobustParserBeanInterface.class") and using the following
#enterprise-friendly class aliases:
class RobustXMLParser(BeautifulStoneSoup):
pass
class RobustHTMLParser(BeautifulSoup):
pass
class RobustWackAssHTMLParser(ICantBelieveItsBeautifulSoup):
pass
class RobustInsanelyWackAssHTMLParser(MinimalSoup):
pass
class SimplifyingSOAPParser(BeautifulSOAP):
pass
######################################################
#
# Bonus library: Unicode, Dammit
#
# This class forces XML data into a standard format (usually to UTF-8
# or Unicode). It is heavily based on code from Mark Pilgrim's
# Universal Feed Parser. It does not rewrite the XML or HTML to
# reflect a new encoding: that happens in BeautifulStoneSoup.handle_pi
# (XML) and BeautifulSoup.start_meta (HTML).
# Autodetects character encodings.
# Download from http://chardet.feedparser.org/
try:
import chardet
# import chardet.constants
# chardet.constants._debug = 1
except ImportError:
chardet = None
# cjkcodecs and iconv_codec make Python know about more character encodings.
# Both are available from http://cjkpython.i18n.org/
# They're built in if you use Python 2.4.
try:
import cjkcodecs.aliases
except ImportError:
pass
try:
import iconv_codec
except ImportError:
pass
class UnicodeDammit:
"""A class for detecting the encoding of a *ML document and
converting it to a Unicode string. If the source encoding is
windows-1252, can replace MS smart quotes with their HTML or XML
equivalents."""
# This dictionary maps commonly seen values for "charset" in HTML
# meta tags to the corresponding Python codec names. It only covers
# values that aren't in Python's aliases and can't be determined
# by the heuristics in find_codec.
CHARSET_ALIASES = { "macintosh" : "mac-roman",
"x-sjis" : "shift-jis" }
def __init__(self, markup, overrideEncodings=[],
smartQuotesTo='xml', isHTML=False):
self.declaredHTMLEncoding = None
self.markup, documentEncoding, sniffedEncoding = \
self._detectEncoding(markup, isHTML)
self.smartQuotesTo = smartQuotesTo
self.triedEncodings = []
if markup == '' or isinstance(markup, unicode):
self.originalEncoding = None
self.unicode = unicode(markup)
return
u = None
for proposedEncoding in overrideEncodings:
u = self._convertFrom(proposedEncoding)
if u: break
if not u:
for proposedEncoding in (documentEncoding, sniffedEncoding):
u = self._convertFrom(proposedEncoding)
if u: break
# If no luck and we have auto-detection library, try that:
if not u and chardet and not isinstance(self.markup, unicode):
u = self._convertFrom(chardet.detect(self.markup)['encoding'])
# As a last resort, try utf-8 and windows-1252:
if not u:
for proposed_encoding in ("utf-8", "windows-1252"):
u = self._convertFrom(proposed_encoding)
if u: break
self.unicode = u
if not u: self.originalEncoding = None
def _subMSChar(self, orig):
"""Changes a MS smart quote character to an XML or HTML
entity."""
sub = self.MS_CHARS.get(orig)
if isinstance(sub, tuple):
if self.smartQuotesTo == 'xml':
sub = '&#x%s;' % sub[1]
else:
sub = '&%s;' % sub[0]
return sub
def _convertFrom(self, proposed):
proposed = self.find_codec(proposed)
if not proposed or proposed in self.triedEncodings:
return None
self.triedEncodings.append(proposed)
markup = self.markup
# Convert smart quotes to HTML if coming from an encoding
# that might have them.
if self.smartQuotesTo and proposed.lower() in("windows-1252",
"iso-8859-1",
"iso-8859-2"):
markup = re.compile("([\x80-\x9f])").sub \
(lambda(x): self._subMSChar(x.group(1)),
markup)
try:
# print "Trying to convert document to %s" % proposed
u = self._toUnicode(markup, proposed)
self.markup = u
self.originalEncoding = proposed
except Exception, e:
# print "That didn't work!"
# print e
return None
#print "Correct encoding: %s" % proposed
return self.markup
def _toUnicode(self, data, encoding):
'''Given a string and its encoding, decodes the string into Unicode.
%encoding is a string recognized by encodings.aliases'''
# strip Byte Order Mark (if present)
if (len(data) >= 4) and (data[:2] == '\xfe\xff') \
and (data[2:4] != '\x00\x00'):
encoding = 'utf-16be'
data = data[2:]
elif (len(data) >= 4) and (data[:2] == '\xff\xfe') \
and (data[2:4] != '\x00\x00'):
encoding = 'utf-16le'
data = data[2:]
elif data[:3] == '\xef\xbb\xbf':
encoding = 'utf-8'
data = data[3:]
elif data[:4] == '\x00\x00\xfe\xff':
encoding = 'utf-32be'
data = data[4:]
elif data[:4] == '\xff\xfe\x00\x00':
encoding = 'utf-32le'
data = data[4:]
newdata = unicode(data, encoding)
return newdata
def _detectEncoding(self, xml_data, isHTML=False):
"""Given a document, tries to detect its XML encoding."""
xml_encoding = sniffed_xml_encoding = None
try:
if xml_data[:4] == '\x4c\x6f\xa7\x94':
# EBCDIC
xml_data = self._ebcdic_to_ascii(xml_data)
elif xml_data[:4] == '\x00\x3c\x00\x3f':
# UTF-16BE
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data, 'utf-16be').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') \
and (xml_data[2:4] != '\x00\x00'):
# UTF-16BE with BOM
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x3f\x00':
# UTF-16LE
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data, 'utf-16le').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and \
(xml_data[2:4] != '\x00\x00'):
# UTF-16LE with BOM
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\x00\x3c':
# UTF-32BE
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data, 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x00\x00':
# UTF-32LE
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data, 'utf-32le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\xfe\xff':
# UTF-32BE with BOM
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\xff\xfe\x00\x00':
# UTF-32LE with BOM
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8')
elif xml_data[:3] == '\xef\xbb\xbf':
# UTF-8 with BOM
sniffed_xml_encoding = 'utf-8'
xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8')
else:
sniffed_xml_encoding = 'ascii'
pass
except:
xml_encoding_match = None
xml_encoding_match = re.compile(
'^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data)
if not xml_encoding_match and isHTML:
regexp = re.compile('<\s*meta[^>]+charset=([^>]*?)[;\'">]', re.I)
xml_encoding_match = regexp.search(xml_data)
if xml_encoding_match is not None:
xml_encoding = xml_encoding_match.groups()[0].lower()
if isHTML:
self.declaredHTMLEncoding = xml_encoding
if sniffed_xml_encoding and \
(xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode',
'iso-10646-ucs-4', 'ucs-4', 'csucs4',
'utf-16', 'utf-32', 'utf_16', 'utf_32',
'utf16', 'u16')):
xml_encoding = sniffed_xml_encoding
return xml_data, xml_encoding, sniffed_xml_encoding
def find_codec(self, charset):
return self._codec(self.CHARSET_ALIASES.get(charset, charset)) \
or (charset and self._codec(charset.replace("-", ""))) \
or (charset and self._codec(charset.replace("-", "_"))) \
or charset
def _codec(self, charset):
if not charset: return charset
codec = None
try:
codecs.lookup(charset)
codec = charset
except (LookupError, ValueError):
pass
return codec
EBCDIC_TO_ASCII_MAP = None
def _ebcdic_to_ascii(self, s):
c = self.__class__
if not c.EBCDIC_TO_ASCII_MAP:
emap = (0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15,
16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31,
128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7,
144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26,
32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33,
38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94,
45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63,
186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34,
195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,
201,202,106,107,108,109,110,111,112,113,114,203,204,205,
206,207,208,209,126,115,116,117,118,119,120,121,122,210,
211,212,213,214,215,216,217,218,219,220,221,222,223,224,
225,226,227,228,229,230,231,123,65,66,67,68,69,70,71,72,
73,232,233,234,235,236,237,125,74,75,76,77,78,79,80,81,
82,238,239,240,241,242,243,92,159,83,84,85,86,87,88,89,
90,244,245,246,247,248,249,48,49,50,51,52,53,54,55,56,57,
250,251,252,253,254,255)
import string
c.EBCDIC_TO_ASCII_MAP = string.maketrans( \
''.join(map(chr, xrange(256))), ''.join(map(chr, emap)))
return s.translate(c.EBCDIC_TO_ASCII_MAP)
MS_CHARS = { '\x80' : ('euro', '20AC'),
'\x81' : ' ',
'\x82' : ('sbquo', '201A'),
'\x83' : ('fnof', '192'),
'\x84' : ('bdquo', '201E'),
'\x85' : ('hellip', '2026'),
'\x86' : ('dagger', '2020'),
'\x87' : ('Dagger', '2021'),
'\x88' : ('circ', '2C6'),
'\x89' : ('permil', '2030'),
'\x8A' : ('Scaron', '160'),
'\x8B' : ('lsaquo', '2039'),
'\x8C' : ('OElig', '152'),
'\x8D' : '?',
'\x8E' : ('#x17D', '17D'),
'\x8F' : '?',
'\x90' : '?',
'\x91' : ('lsquo', '2018'),
'\x92' : ('rsquo', '2019'),
'\x93' : ('ldquo', '201C'),
'\x94' : ('rdquo', '201D'),
'\x95' : ('bull', '2022'),
'\x96' : ('ndash', '2013'),
'\x97' : ('mdash', '2014'),
'\x98' : ('tilde', '2DC'),
'\x99' : ('trade', '2122'),
'\x9a' : ('scaron', '161'),
'\x9b' : ('rsaquo', '203A'),
'\x9c' : ('oelig', '153'),
'\x9d' : '?',
'\x9e' : ('#x17E', '17E'),
'\x9f' : ('Yuml', ''),}
#######################################################################
#By default, act as an HTML pretty-printer.
if __name__ == '__main__':
import sys
soup = BeautifulSoup(sys.stdin)
print soup.prettify()
| gpl-3.0 | -5,230,023,785,679,473,000 | 37.157708 | 186 | 0.555863 | false | 3.795561 | false | false | false |
eblossom/gnuradio | grc/python/Block.py | 1 | 12322 | """
Copyright 2008-2011 Free Software Foundation, Inc.
This file is part of GNU Radio
GNU Radio Companion is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
GNU Radio Companion is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
import itertools
import collections
from .. base.Constants import BLOCK_FLAG_NEED_QT_GUI, BLOCK_FLAG_NEED_WX_GUI
from .. base.odict import odict
from .. base.Block import Block as _Block
from .. gui.Block import Block as _GUIBlock
from . FlowGraph import _variable_matcher
from . import epy_block_io
class Block(_Block, _GUIBlock):
def __init__(self, flow_graph, n):
"""
Make a new block from nested data.
Args:
flow: graph the parent element
n: the nested odict
Returns:
block a new block
"""
#grab the data
self._doc = (n.find('doc') or '').strip('\n').replace('\\\n', '')
self._imports = map(lambda i: i.strip(), n.findall('import'))
self._make = n.find('make')
self._var_make = n.find('var_make')
self._checks = n.findall('check')
self._callbacks = n.findall('callback')
self._bus_structure_source = n.find('bus_structure_source') or ''
self._bus_structure_sink = n.find('bus_structure_sink') or ''
self.port_counters = [itertools.count(), itertools.count()]
#build the block
_Block.__init__(
self,
flow_graph=flow_graph,
n=n,
)
_GUIBlock.__init__(self)
self._epy_source_hash = -1 # for epy blocks
self._epy_reload_error = None
def get_bus_structure(self, direction):
if direction == 'source':
bus_structure = self._bus_structure_source;
else:
bus_structure = self._bus_structure_sink;
bus_structure = self.resolve_dependencies(bus_structure);
if not bus_structure: return ''
try:
clean_bus_structure = self.get_parent().evaluate(bus_structure)
return clean_bus_structure
except: return ''
def validate(self):
"""
Validate this block.
Call the base class validate.
Evaluate the checks: each check must evaluate to True.
"""
_Block.validate(self)
#evaluate the checks
for check in self._checks:
check_res = self.resolve_dependencies(check)
try:
if not self.get_parent().evaluate(check_res):
self.add_error_message('Check "%s" failed.'%check)
except: self.add_error_message('Check "%s" did not evaluate.'%check)
# for variables check the value (only if var_value is used
if _variable_matcher.match(self.get_key()) and self._var_value != '$value':
value = self._var_value
try:
value = self.get_var_value()
self.get_parent().evaluate(value)
except Exception as err:
self.add_error_message('Value "%s" cannot be evaluated:\n%s' % (value, err))
# check if this is a GUI block and matches the selected generate option
current_generate_option = self.get_parent().get_option('generate_options')
def check_generate_mode(label, flag, valid_options):
block_requires_mode = (
flag in self.get_flags() or
self.get_name().upper().startswith(label)
)
if block_requires_mode and current_generate_option not in valid_options:
self.add_error_message("Can't generate this block in mode " +
repr(current_generate_option))
check_generate_mode('WX GUI', BLOCK_FLAG_NEED_WX_GUI, ('wx_gui',))
check_generate_mode('QT GUI', BLOCK_FLAG_NEED_QT_GUI, ('qt_gui', 'hb_qt_gui'))
if self._epy_reload_error:
self.get_param('_source_code').add_error_message(str(self._epy_reload_error))
def rewrite(self):
"""
Add and remove ports to adjust for the nports.
"""
_Block.rewrite(self)
# Check and run any custom rewrite function for this block
getattr(self, 'rewrite_' + self._key, lambda: None)()
# adjust nports, disconnect hidden ports
for ports in (self.get_sources(), self.get_sinks()):
for i, master_port in enumerate(ports):
nports = master_port.get_nports() or 1
num_ports = 1 + len(master_port.get_clones())
if master_port.get_hide():
for connection in master_port.get_connections():
self.get_parent().remove_element(connection)
if not nports and num_ports == 1: # not a master port and no left-over clones
continue
# remove excess cloned ports
for port in master_port.get_clones()[nports-1:]:
# remove excess connections
for connection in port.get_connections():
self.get_parent().remove_element(connection)
master_port.remove_clone(port)
ports.remove(port)
# add more cloned ports
for j in range(num_ports, nports):
port = master_port.add_clone()
ports.insert(ports.index(master_port) + j, port)
self.back_ofthe_bus(ports)
# renumber non-message/-msg ports
domain_specific_port_index = collections.defaultdict(int)
for port in filter(lambda p: p.get_key().isdigit(), ports):
domain = port.get_domain()
port._key = str(domain_specific_port_index[domain])
domain_specific_port_index[domain] += 1
def port_controller_modify(self, direction):
"""
Change the port controller.
Args:
direction: +1 or -1
Returns:
true for change
"""
changed = False
#concat the nports string from the private nports settings of all ports
nports_str = ' '.join([port._nports for port in self.get_ports()])
#modify all params whose keys appear in the nports string
for param in self.get_params():
if param.is_enum() or param.get_key() not in nports_str: continue
#try to increment the port controller by direction
try:
value = param.get_evaluated()
value = value + direction
if 0 < value:
param.set_value(value)
changed = True
except: pass
return changed
def get_doc(self):
platform = self.get_parent().get_parent()
extracted_docs = platform.block_docstrings.get(self._key, '')
return (self._doc + '\n\n' + extracted_docs).strip()
def get_category(self):
return _Block.get_category(self)
def get_imports(self, raw=False):
"""
Resolve all import statements.
Split each import statement at newlines.
Combine all import statments into a list.
Filter empty imports.
Returns:
a list of import statements
"""
if raw:
return self._imports
return filter(lambda i: i, sum(map(lambda i: self.resolve_dependencies(i).split('\n'), self._imports), []))
def get_make(self, raw=False):
if raw:
return self._make
return self.resolve_dependencies(self._make)
def get_var_make(self):
return self.resolve_dependencies(self._var_make)
def get_var_value(self):
return self.resolve_dependencies(self._var_value)
def get_callbacks(self):
"""
Get a list of function callbacks for this block.
Returns:
a list of strings
"""
def make_callback(callback):
callback = self.resolve_dependencies(callback)
if 'self.' in callback: return callback
return 'self.%s.%s'%(self.get_id(), callback)
return map(make_callback, self._callbacks)
def is_virtual_sink(self):
return self.get_key() == 'virtual_sink'
def is_virtual_source(self):
return self.get_key() == 'virtual_source'
###########################################################################
# Custom rewrite functions
###########################################################################
def rewrite_epy_block(self):
flowgraph = self.get_parent()
platform = flowgraph.get_parent()
param_blk = self.get_param('_io_cache')
param_src = self.get_param('_source_code')
src = param_src.get_value()
src_hash = hash(src)
if src_hash == self._epy_source_hash:
return
try:
blk_io = epy_block_io.extract(src)
except Exception as e:
self._epy_reload_error = ValueError(str(e))
try: # load last working block io
blk_io = epy_block_io.BlockIO(*eval(param_blk.get_value()))
except:
return
else:
self._epy_reload_error = None # clear previous errors
param_blk.set_value(repr(tuple(blk_io)))
# print "Rewriting embedded python block {!r}".format(self.get_id())
self._epy_source_hash = src_hash
self._name = blk_io.name or blk_io.cls
self._doc = blk_io.doc
self._imports[0] = 'from {} import {}'.format(self.get_id(), blk_io.cls)
self._make = '{}({})'.format(blk_io.cls, ', '.join(
'{0}=${0}'.format(key) for key, _ in blk_io.params))
params = dict()
for param in list(self._params):
if hasattr(param, '__epy_param__'):
params[param.get_key()] = param
self._params.remove(param)
for key, value in blk_io.params:
if key in params:
param = params[key]
if not param.value_is_default():
param.set_value(value)
else:
name = key.replace('_', ' ').title()
n = odict(dict(name=name, key=key, type='raw', value=value))
param = platform.Param(block=self, n=n)
setattr(param, '__epy_param__', True)
self._params.append(param)
def update_ports(label, ports, port_specs, direction):
ports_to_remove = list(ports)
iter_ports = iter(ports)
ports_new = list()
port_current = next(iter_ports, None)
for key, port_type in port_specs:
reuse_port = (
port_current is not None and
port_current.get_type() == port_type and
(key.isdigit() or port_current.get_key() == key)
)
if reuse_port:
ports_to_remove.remove(port_current)
port, port_current = port_current, next(iter_ports, None)
else:
n = odict(dict(name=label + str(key), type=port_type, key=key))
port = platform.Port(block=self, n=n, dir=direction)
ports_new.append(port)
# replace old port list with new one
del ports[:]
ports.extend(ports_new)
# remove excess port connections
for port in ports_to_remove:
for connection in port.get_connections():
flowgraph.remove_element(connection)
update_ports('in', self.get_sinks(), blk_io.sinks, 'sink')
update_ports('out', self.get_sources(), blk_io.sources, 'source')
_Block.rewrite(self)
| gpl-3.0 | -8,642,736,465,189,697,000 | 37.386293 | 115 | 0.557296 | false | 4.176949 | false | false | false |
lemonad/my-django-skeleton | djangoproject/settings.py | 1 | 3240 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import logging
import os
import posix
from django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS
# logging.basicConfig(
# level = logging.DEBUG,
# format = '%(asctime)s %(levelname)s %(message)s',
# filename = '/tmp/djangoproject.log',
# filemode = 'w'
# )
PROJECT_ROOT = os.path.join(os.path.dirname(__file__), '/')
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASE_ENGINE = 'sqlite3' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_NAME = 'djangoproject.db'
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
TIME_ZONE = 'Europe/Stockholm'
# LANGUAGE_CODE = 'sv-SE'
LANGUAGE_CODE = 'en-US'
ugettext = lambda s: s
LANGUAGES = (
('en', ugettext('English')),
('sv', ugettext('Swedish')),
)
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = os.path.join(os.path.dirname(__file__), 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/media/'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/admin_media/'
# Don't share this with anybody.
SECRET_KEY = 'ChangeThisKeyToSomethingCompletelyDifferent'
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'djangoflash.middleware.FlashMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.transaction.TransactionMiddleware',
'django.middleware.doc.XViewMiddleware',
)
ROOT_URLCONF = 'djangoproject.urls'
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'djangoproject.exampleapp',
)
TEMPLATE_CONTEXT_PROCESSORS = TEMPLATE_CONTEXT_PROCESSORS + (
'djangoflash.context_processors.flash',
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
)
TEMPLATE_DIRS = (
os.path.join(os.path.dirname(__file__), "exampleapp/fixtures"),
os.path.join(os.path.dirname(__file__), "templates"),
)
SITE_ID = 1
DEFAULT_FROM_EMAIL = "[email protected]"
SERVER_EMAIL = "[email protected]"
EMAIL_SUBJECT_PREFIX = "Django: "
# The below LOGIN_URL and LOGOUT_URL doesn't seem to be used except
# when unit testing views.
LOGIN_URL = '/login/'
LOGIN_REDIRECT_URL = '/'
LOGOUT_URL = '/logout/'
| bsd-2-clause | 7,324,122,328,674,378,000 | 28.724771 | 101 | 0.695988 | false | 3.432203 | false | false | false |
askcom/pybigip | pybigip/gtm.py | 1 | 5269 | '''
GTM Interfaces.
Example (Disable datacenter for a distributed application):
>>> con = pybigip.Connection('gtm.example.company', 'admin', 'foobarbaz')
>>> myapp = pybigip.gtm.Application(con, '/Common/myapp')
>>> myapp.datacenters['/Common/SFO'].enabled = False
'''
import itertools
from pybigip import core
class Applications(core.ObjectList):
'''
Access Distributed Applications.
'''
def __init__(self, con):
'''
'''
super(Applications, self).__init__(con)
self._lcon = self._con.GlobalLB.Application
def load(self, names):
'''
Override parent load method to preload Application datacenter status
data.
'''
ret = list()
app_dcs = self._lcon.get_data_centers(names)
app_desc = self._lcon.get_description(names)
for app, dcs, desc in itertools.izip(names, app_dcs, app_desc):
app_obj = Application(self._con, app)
app_obj._dcs = dict(((dc, Datacenter(app_obj, dc)) for dc in dcs))
app_obj._description = desc
ret.append(app_obj)
return ret
class Application(object):
'''
A Distributed Application.
'''
_description = None
_wips = None
def __init__(self, con, name, dcs=None):
'''
'''
self._con = con
self._lcon = self._con.GlobalLB.Application
self.name = name
self._dcs = dcs
def get_ctx(self, name, type):
'''
Get application object context status.
@param name: Object name
@param type: Object type
@return: dict containing object context status information.
'''
ctx = {
'application_name': self.name,
'object_name': name,
'object_type': type
}
return self._lcon.get_application_context_status([ctx])[0]
def enable_ctx(self, name, type):
'''
Enable an application object context.
@param name: Object name
@param type: Object type
'''
ctx = {
'application_name': self.name,
'object_name': name,
'object_type': type
}
self._lcon.enable_application_context_object([ctx])
def disable_ctx(self, name, type):
'''
Disable an application object context.
@param name: Object name
@param type: Object type
'''
ctx = {
'application_name': self.name,
'object_name': name,
'object_type': type
}
self._lcon.disable_application_context_object([ctx])
@property
def description(self):
'''
Lazy load application description value.
@return: application description from the bigip.
'''
if not self._description:
self._description = self._lcon.get_description([self.name])[0]
return self._description
@property
def datacenters(self):
'''
Lazy load application datacenter list.
@return: List of L{Datacenter} objects for this application.
'''
if not self._dcs:
dcs = self._lcon.get_data_centers([self.name])[0]
self._dcs = dict(((dc, Datacenter(self, dc)) for dc in dcs))
return self._dcs
def status(self):
'''
'''
return self._lcon.get_object_status([self.name])[0]
@property
def wips(self):
'''
'''
if not self._wips:
self._wips = self._lcon.get_wide_ips([self.name])[0]
return self._wips
class Datacenter(object):
'''
Application datacenter context object.
'''
_status = None
def __init__(self, app, name):
'''
@param app: Containing application
@param name: Datacenter name
'''
self._app = app
self.name = name
def enable(self):
'''
Enable this datacenter by enabling the coresponding application
context object in the Application.
'''
self._app.enable_ctx(self.name,
'APPLICATION_OBJECT_TYPE_DATACENTER')
def disable(self):
'''
Disable this datacenter by disabling the coresponding application
context object in the Application.
'''
self._app.disable_ctx(self.name,
'APPLICATION_OBJECT_TYPE_DATACENTER')
def toggle(self):
'''
Toggle enabled status
'''
self.enabled = not self.enabled
def status(self):
'''
Get status information for this datacenter.
'''
return self._app.get_ctx(self.name,
'APPLICATION_OBJECT_TYPE_DATACENTER')
@property
def enabled(self):
'''
@return: bool representation of datacenter enabled status.
'''
return self.status()['enabled_status'] == 'ENABLED_STATUS_ENABLED'
@enabled.setter
def enabled(self, value):
'''
Write property to allow setting the enable status for this datacenter.
@param value:
'''
value = bool(value)
if value:
self.enable()
else:
self.disable()
| apache-2.0 | 6,623,435,594,905,670,000 | 24.955665 | 78 | 0.547163 | false | 4.318852 | false | false | false |
mjoblin/neotiles | neotiles/matrixes/__init__.py | 1 | 7239 | try:
from neopixel import Adafruit_NeoPixel, ws
DEFAULT_STRIP_TYPE = ws.WS2811_STRIP_GRB
except ImportError:
DEFAULT_STRIP_TYPE = None
try:
from rgbmatrix import RGBMatrix, RGBMatrixOptions
except ImportError:
pass
from neotiles import MatrixSize
from neotiles.exceptions import NeoTilesError
__all__ = ['NTMatrix', 'NTNeoPixelMatrix', 'NTRGBMatrix']
class NTMatrix(object):
"""
Base class for the Neotiles Matrix interface.
"""
def __init__(self):
self._size = None
self._brightness = None
def setPixelColor(self, x, y, color):
raise NotImplementedError
def show(self):
raise NotImplementedError
@property
def brightness(self):
return self._brightness
@brightness.setter
def brightness(self, val):
self._brightness = val
@property
def size(self):
return self._size
class NTNeoPixelMatrix(NTMatrix):
"""
Represents a NeoPixel matrix.
You must specify a ``size`` matching your neopixel matrix (e.g. ``(8, 8)``)
as well as the ``led_pin`` you're using to talk to it (e.g. ``18``). The
other parameters can usually be left at their defaults. For more
information on the other parameters look at the ``Adafruit_NeoPixel``
class in the
`neopixel <https://github.com/jgarff/rpi_ws281x/tree/master/python>`_
module.
If your RGB values appear to be mixed up (e.g. red is showing as green)
then try using a different ``strip_type``. You can see a list of valid
strip type constants here (look for ``_STRIP_`` in the constant name):
https://docs.rs/ws281x/0.1.0/ws281x/ffi/index.html. Specify a strip type
like this: ``strip_type=ws.WS2811_STRIP_GRB``. For this to work you'll
need to ``import ws`` (which comes with the ``neopixel`` module) into your
code.
:param size: (:class:`MatrixSize`) Size of the neopixel matrix.
:param led_pin: (int) The pin you're using to talk to your neopixel matrix.
:param led_freq_hz: (int) LED frequency.
:param led_dma: (int) LED DMA.
:param led_brightness: (int) Brightness of the matrix display (0-255).
:param led_invert: (bool) Whether to invert the LEDs.
:param strip_type: (int) Neopixel strip type.
:raises: :class:`exceptions.NeoTilesError` if ``matrix_size`` or
``led_pin`` are not specified.
"""
def __init__(
self, size=None, led_pin=None,
led_freq_hz=800000, led_dma=5, led_brightness=64, led_invert=False,
strip_type=DEFAULT_STRIP_TYPE):
super(NTNeoPixelMatrix, self).__init__()
if size is None or led_pin is None:
raise NeoTilesError('size and led_pin must be specified')
self._size = MatrixSize(*size)
self._led_pin = led_pin
self._led_freq_hz = led_freq_hz
self._led_dma = led_dma
self._brightness = led_brightness
self._led_invert = led_invert
self._strip_type = strip_type
self._led_count = self.size.cols * self.size.rows
self.hardware_matrix = Adafruit_NeoPixel(
self._led_count, self._led_pin, freq_hz=self._led_freq_hz,
dma=self._led_dma, invert=self._led_invert,
brightness=self.brightness, strip_type=self._strip_type
)
self.hardware_matrix.begin()
def __repr__(self):
strip_name = self._strip_type
# Convert strip name from strip type integer to associated attribute
# name from ws module (if we can find it).
for strip_check in [attr for attr in dir(ws) if '_STRIP_' in attr]:
if getattr(ws, strip_check) == self._strip_type:
strip_name = 'ws.{}'.format(strip_check)
return (
'{}(size={}, led_pin={}, led_freq_hz={}, led_dma={}, '
'led_brightness={}, led_invert={}, strip_type={})'
).format(
self.__class__.__name__, self.size, self._led_pin,
self._led_freq_hz, self._led_dma, self.brightness,
self._led_invert, strip_name
)
def setPixelColor(self, x, y, color):
pixel_num = (y * self.size.cols) + x
self.hardware_matrix.setPixelColor(pixel_num, color.hardware_int)
def show(self):
self.hardware_matrix.show()
@property
def brightness(self):
return self._brightness
@brightness.setter
def brightness(self, val):
error_msg = 'Brightness must be between 0 and 255'
try:
if val >= 0 and val <= 255:
self._brightness = val
self.hardware_matrix.setBrightness(self._brightness)
else:
raise ValueError(error_msg)
except TypeError:
raise ValueError(error_msg)
class NTRGBMatrix(NTMatrix):
"""
Represents an RGB Matrix.
If no options are passed in then the matrix will be initialized with
default options. These options can be overridden either with ``options``
(which should be an ``RGBMatrixOptions`` object as provided by the
``rgbmatrix`` module); or individual options can be passed into the
constructor.
For example, the following are equivalent: ::
from rgbmatrix import RGBMatrixOptions
options = RGBMatrixOptions()
options.chain_length = 2
options.gpio_slowdown = 3
NTRGBMatrix(options=options)
and: ::
NTRGBMatrix(chain_length=2, gpio_slowdown=3)
:param options: (RGBMatrixOptions) Matrix options.
:param kwargs: (*) Individual matrix options.
"""
def __init__(self, options=None, **kwargs):
super(NTRGBMatrix, self).__init__()
if options is None:
options = RGBMatrixOptions()
for kwarg in kwargs:
setattr(options, kwarg, kwargs[kwarg])
self._size = MatrixSize(
options.rows * options.chain_length, options.rows)
self.options = options
self.hardware_matrix = RGBMatrix(options=options)
self.frame_canvas = self.hardware_matrix.CreateFrameCanvas()
def __repr__(self):
options = [
attr for attr in dir(self.options) if
not callable(getattr(self.options, attr)) and
not attr.startswith('_')
]
options_string = ', '.join([
'{}={}'.format(option, getattr(self.options, option))
for option in sorted(options)
])
return '{}({})'.format(self.__class__.__name__, options_string)
def setPixelColor(self, x, y, color):
cd = color.components_denormalized
self.frame_canvas.SetPixel(x, y, cd[0], cd[1], cd[2])
def show(self):
self.frame_canvas = self.hardware_matrix.SwapOnVSync(self.frame_canvas)
@property
def brightness(self):
return self.hardware_matrix.brightness
@brightness.setter
def brightness(self, val):
error_msg = 'Brightness must be between 0 and 100'
try:
if val >= 0 and val <= 100:
self._brightness = val
self.hardware_matrix.brightness = val
else:
raise ValueError(error_msg)
except TypeError:
raise ValueError(error_msg)
| mit | -5,985,888,903,134,632,000 | 31.173333 | 79 | 0.611549 | false | 3.850532 | false | false | false |
Valentijn1995/Kn0ckKn0ck | Proxies/MultiProxy.py | 1 | 1343 | from Proxy import Proxy
class MultiProxy(Proxy):
"""
Proxy which is composed of multiple proxy's. Another proxy will be used as soon as you call de connect method.
This class makes use of the Composite design pattern. You can use the MultiProxy class as if it is one proxy
but you are actually using multiple proxies.
"""
def __init__(self, proxy_list):
Proxy.__init__(self, None)
self._current_proxy = None
self._proxy_list = proxy_list
self._proxy_counter = 0
self._last_proxy_index = len(proxy_list)
def _get_next_proxy(self):
if self._proxy_counter >= self._last_proxy_index:
self._proxy_counter = 0
next_proxy = self._proxy_list[self._proxy_counter]
self._proxy_counter += 1
return next_proxy
def _receive(self):
return self._current_proxy.receive()
def copy(self):
return MultiProxy(self._proxy_list)
def _connect(self, destination):
self._current_proxy = self._get_next_proxy()
self._current_proxy.connect(destination)
def is_connected(self):
return self._current_proxy is not None
def _send(self, payload):
self._current_proxy.send(payload)
def _close(self):
self._current_proxy.close()
self._current_proxy = None
| mit | -4,782,610,776,293,411,000 | 30.97619 | 118 | 0.62621 | false | 4.045181 | false | false | false |
hirofumi0810/tensorflow_end2end_speech_recognition | examples/erato/evaluation/eval_julius.py | 1 | 4380 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import sys
import codecs
from glob import glob
import numpy as np
import pandas as pd
sys.path.append('../../../')
from experiments.erato.data.load_dataset_ctc import Dataset
def main():
results_paths = [path for path in glob(
'/home/lab5/inaguma/asru2017/erato_results_0710/test/*.log')]
# Julisu Rusults
for path in results_paths:
with codecs.open(path, 'r', 'euc_jp') as f:
start_flag = False
file_name = ''
output, output_pos = '', ''
result_dict = {}
for line in f:
line = line.strip()
if line == '----------------------- System Information end -----------------------':
start_flag = True
if start_flag:
if 'input MFCC file' in line:
file_name = line.split(': ')[-1]
file_name = '_'.join(file_name.split('/')[-2:])
file_name = re.sub('.wav', '', file_name)
if 'sentence1' in line:
output = line.split(': ')[-1]
output = re.sub('<s>', '', output)
output = re.sub('</s>', '', output)
output = re.sub('<sp>', '', output)
output = re.sub(r'[\sー]+', '', output)
if 'wseq1' in line:
output_pos = line.split(': ')[-1]
output_pos = re.sub('<s>', '', output_pos)
output_pos = re.sub('</s>', '', output_pos)
output_pos = re.sub('<sp>', '', output_pos)
output_pos = re.sub('感動詞', 'F', output_pos)
output_pos = re.sub('言いよどみ', 'D', output_pos)
result_dict[file_name] = [output, output_pos[1:]]
output, output_pos = '', ''
dataset = Dataset(data_type='test',
label_type='kana',
ss_type='insert_left',
batch_size=1,
max_epoch=1,
shuffle=False,
progressbar=True)
tp_f, fp_f, fn_f = 0., 0., 0.
tp_d, fp_d, fn_d = 0., 0., 0.
for data, is_new_epoch in dataset:
# Create feed dictionary for next mini batch
inputs, labels_true, inputs_seq_len, input_names = data
if input_names[0][0] not in result_dict.keys():
continue
output, output_pos = result_dict[input_names[0][0]]
detected_f_num = output_pos.count('F')
detected_d_num = output_pos.count('D')
if detected_f_num != 0 or detected_d_num != 0:
print(output_pos)
print(output)
str_true = labels_true[0][0][0]
print(str_true)
print('-----')
true_f_num = np.sum(labels_true[0][0][0].count('F'))
true_d_num = np.sum(labels_true[0][0][0].count('D'))
# Filler
if detected_f_num <= true_f_num:
tp_f += detected_f_num
fn_f += true_f_num - detected_f_num
else:
tp_f += true_f_num
fp_f += detected_f_num - true_f_num
# Disfluency
if detected_d_num <= true_d_num:
tp_d += detected_d_num
fn_d += true_d_num - detected_d_num
else:
tp_d += true_d_num
fp_d += detected_d_num - true_d_num
if is_new_epoch:
break
r_f = tp_f / (tp_f + fn_f) if (tp_f + fn_f) != 0 else 0
p_f = tp_f / (tp_f + fp_f) if (tp_f + fp_f) != 0 else 0
f_f = 2 * r_f * p_f / (r_f + p_f) if (r_f + p_f) != 0 else 0
r_d = tp_d / (tp_d + fn_d) if (tp_d + fn_d) != 0 else 0
p_d = tp_d / (tp_d + fp_d) if (tp_d + fp_d) != 0 else 0
f_d = 2 * r_d * p_d / (r_d + p_d) if (r_d + p_d) != 0 else 0
acc_f = [p_f, r_f, f_f]
acc_d = [p_d, r_d, f_d]
df_acc = pd.DataFrame({'Filler': acc_f, 'Disfluency': acc_d},
columns=['Filler', 'Disfluency'],
index=['Precision', 'Recall', 'F-measure'])
print(df_acc)
if __name__ == '__main__':
main()
| mit | 6,545,205,638,564,850,000 | 32.813953 | 100 | 0.446355 | false | 3.36834 | false | false | false |
DamienIrving/ocean-analysis | data_processing/calc_sfci.py | 1 | 3165 | """Calculate the (binned) total internal surface forcing."""
import sys
script_dir = sys.path[0]
import os
import pdb
import argparse
import numpy as np
import iris
import cmdline_provenance as cmdprov
repo_dir = '/'.join(script_dir.split('/')[:-1])
module_dir = repo_dir + '/modules'
sys.path.append(module_dir)
try:
import general_io as gio
import convenient_universal as uconv
except ImportError:
raise ImportError('Script and modules in wrong directories')
def main(inargs):
"""Run the program."""
sfc_tbin_cube = iris.load_cube(inargs.sfc_file, 'total surface forcing binned by temperature')
wfo_tbin_cube = iris.load_cube(inargs.wfo_file, 'Water Flux into Sea Water binned by temperature')
cp = 3992.10322329649 #J kg-1 degC-1
lower_tos_bounds = sfc_tbin_cube.coord('sea_surface_temperature').bounds[:, 0]
coord_names_tbin = [coord.name() for coord in sfc_tbin_cube.dim_coords]
theta = uconv.broadcast_array(lower_tos_bounds,
coord_names_tbin.index('sea_surface_temperature'),
sfc_tbin_cube.shape)
sfci_tbin_cube = sfc_tbin_cube.copy()
sfci_tbin_cube.data = sfc_tbin_cube.data - (cp * theta * wfo_tbin_cube.data) # SFCI = SFC - Cp*THETA*SVF
sfci_tbin_cube.var_name = 'sfci_tbin'
sfci_tbin_cube.long_name = 'total internal surface forcing binned by temperature'
metadata = {inargs.sfc_file: sfc_tbin_cube.attributes['history'],
inargs.wfo_file: wfo_tbin_cube.attributes['history']}
log = cmdprov.new_log(infile_history=metadata, git_repo=repo_dir)
sfci_tbin_cube.attributes['history'] = log
sfc_tsbin_cube = iris.load_cube(inargs.sfc_file, 'total surface forcing binned by temperature and salinity')
wfo_tsbin_cube = iris.load_cube(inargs.wfo_file, 'Water Flux into Sea Water binned by temperature and salinity')
coord_names_tsbin = [coord.name() for coord in sfc_tsbin_cube.dim_coords]
theta = uconv.broadcast_array(lower_tos_bounds,
coord_names_tsbin.index('sea_surface_temperature'),
sfc_tsbin_cube.shape)
sfci_tsbin_cube = sfc_tsbin_cube.copy()
sfci_tsbin_cube.data = sfc_tsbin_cube.data - (cp * theta * wfo_tsbin_cube.data) # SFCI = SFC - Cp*THETA*SVF
sfci_tsbin_cube.var_name = 'sfci_tsbin'
sfci_tsbin_cube.long_name = 'total internal surface forcing binned by temperature and salinity'
sfci_tsbin_cube.attributes['history'] = log
cube_list = iris.cube.CubeList([sfci_tbin_cube, sfci_tsbin_cube])
iris.save(cube_list, inargs.sfci_file)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__,
argument_default=argparse.SUPPRESS,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("sfc_file", type=str, help="Total surface forcing file")
parser.add_argument("wfo_file", type=str, help="Surface freshwater flux file")
parser.add_argument("sfci_file", type=str, help="Output file")
args = parser.parse_args()
main(args)
| mit | -7,283,266,626,185,533,000 | 46.238806 | 116 | 0.660664 | false | 3.232891 | false | false | false |
gleb812/pch2csd | tests/test_csdgen.py | 1 | 3891 | from unittest import TestCase, skip
from pch2csd.csdgen import Udo, Csd, ZakSpace
from pch2csd.parse import parse_pch2
from pch2csd.patch import transform_in2in_cables
from pch2csd.resources import ProjectData
from tests.util import get_test_resource, cmp_str_lines
class TestPolymorphism(TestCase):
def setUp(self):
self.data = ProjectData()
self.poly_mix2 = parse_pch2(self.data, get_test_resource('test_poly_mix2.pch2'))
self.udo_mix2_k = """opcode Mix21A_v0, 0, iiiiiiiii
; TODO: lin/log scale, chain input
iLev1, iSw1, iLev2, iSw2, iScale, izIn1, izIn2, izInChain, izOut xin
k1 zkr izIn1
k2 zkr izIn2
k3 zkr izInChain
kout = k1 + k2*iLev1*iSW1 + k3*iLev2*iSW2
zkw kout, izOut
endop
"""
self.udo_mix2_a = """opcode Mix21A_v1, 0, iiiiiiiii
; TODO: lin/log scale, chain input
iLev1, iSw1, iLev2, iSw2, iScale, izIn1, izIn2, izInChain, izOut xin
a1 zar izIn1
a2 zar izIn2
a3 zar izInChain
aout = a1 + a2*iLev1*iSW1 + a3*iLev2*iSW2
zaw aout, izOut
endop
"""
@skip
def test_mix2__choose_right_templates(self):
p = self.poly_mix2
udo_s = [Udo(p, m) for m in p.modules][:2]
self.assertSequenceEqual([s.get_name() for s in udo_s],
['Mix21A_v0', 'Mix21A_v1'])
self.assertTrue(cmp_str_lines(udo_s[0].get_src(), self.udo_mix2_k))
self.assertTrue(cmp_str_lines(udo_s[1].get_src(), self.udo_mix2_a))
class TestParameterMapping(TestCase):
def setUp(self):
self.data = ProjectData()
self.poly_mix2 = parse_pch2(self.data, get_test_resource('test_poly_mix2.pch2'))
@skip
def test_poly_mix2(self):
p = self.poly_mix2
udo_s = [Udo(p, m) for m in p.modules]
params = [udo.get_params() for udo in udo_s]
self.assertSequenceEqual(params, [[-99.9, 0, -6.2, 1, 2],
[0.781, 1, 0.781, 1, 0],
[0., 0],
[2, 1, 1]])
class TestRateConversion(TestCase):
def setUp(self):
self.data = ProjectData()
self.r2b_b2r_fn = get_test_resource('test_convert_r2b_b2r.pch2')
def test_r2b_b2r(self):
p = parse_pch2(self.data, self.r2b_b2r_fn)
zak = ZakSpace()
udos = zak.connect_patch(p)
in2, envh, out2, a2k, k2a = sorted(udos, key=lambda x: x.mod.id)
# sends a
self.assertSequenceEqual(in2.outlets, [7, 0])
# a -> k
self.assertSequenceEqual(a2k.inlets, [7])
self.assertSequenceEqual(a2k.outlets, [7])
# receives k
self.assertSequenceEqual(envh.inlets, [1, 7, 1])
# sends k
self.assertSequenceEqual(envh.outlets, [0, 8])
# k -> a
self.assertSequenceEqual(k2a.inlets, [8])
self.assertSequenceEqual(k2a.outlets, [8])
# receives a
self.assertSequenceEqual(out2.inlets, [8, 1])
csd = Csd(p, zak, udos)
print(csd.get_code())
class TestUdoGen(TestCase):
def setUp(self):
self.data = ProjectData()
self.poly_mix2_fn = get_test_resource('test_poly_mix2.pch2')
self.modes_LfoC = get_test_resource('test_modes_LfoC.pch2')
self.LevAmp = get_test_resource('test_LevAmp.pch2')
def test_udo_statement_gen__not_raises(self):
p = parse_pch2(self.data, self.poly_mix2_fn)
p.cables = [transform_in2in_cables(p, c) for c in p.cables]
zak = ZakSpace()
udos = zak.connect_patch(p)
csd = Csd(p, zak, udos)
csd.get_code()
def test_patch_LevAmp(self):
p = parse_pch2(self.data, self.LevAmp)
zak = ZakSpace()
udos = [udo for udo in zak.connect_patch(p)
if udo.mod.type == 81]
amp_params = [u.get_params()[0] for u in udos]
self.assertEqual(len(amp_params), 2)
self.assertEqual(amp_params[0], 83)
| mit | -2,614,806,133,715,733,500 | 33.433628 | 88 | 0.598047 | false | 2.805335 | true | false | false |
Bazzzzzinga/Election-Portal | Election_Portal/models.py | 1 | 1767 | from __future__ import unicode_literals
from django.db import models
from django.utils import timezone
class Election(models.Model):
election_name=models.CharField(max_length=50)
nom_start_time=models.DateTimeField('Nominations start time')
nom_end_time=models.DateTimeField('Nominations end time')
vote_start_time=models.DateTimeField('Voting start time')
vote_end_time=models.DateTimeField('Voting end time')
desc=models.TextField()
def __str__(self):
return self.election_name
def nomval(self):
if self.nom_start_time>timezone.now():
return "1"
elif self.nom_end_time>=timezone.now():
return "2"
else:
return "3"
def winner(self):
x=self.candidate_set.all().order_by('-vote_count')
if x:
return x[0]
else:
return None
class Branch(models.Model):
name=models.CharField(max_length=50)
def __str__(self):
return self.name
class Candidate(models.Model):
myid=models.AutoField(primary_key=True)
election=models.ForeignKey(Election,on_delete=models.CASCADE)
name=models.CharField(max_length=50)
branch=models.CharField(max_length=50)
work_experience=models.TextField()
user=models.CharField(max_length=30)
vote_count=models.IntegerField(default=0)
profile_pic=models.ImageField(upload_to='media/',blank=True)
def __str__(self):
return self.name
class Comment(models.Model):
candidate=models.ForeignKey(Candidate,on_delete=models.CASCADE)
user=models.CharField(max_length=30)
comment_content=models.CharField(max_length=3000)
comment_time=models.DateTimeField('Comment Time')
def __str__(self):
return self.comment_content
def isCandidate(self):
return candidate.user==self.user
class Voter(models.Model):
election=models.ForeignKey(Election,on_delete=models.CASCADE)
user=models.CharField(max_length=30) | mit | -7,296,916,089,330,264,000 | 31.740741 | 64 | 0.760611 | false | 3.062392 | false | false | false |
mpapazog/meraki-python | deploydevices.py | 1 | 16568 | # This is a script to claim a number of devices into Dashboard, create a network for them and bind
# the network to a pre-existing template. Optionally you can also claim a license key. Switch networks
# must be eligible for auto-bind (Auto-bind is not valid unless the switch template has at least
# one profile and has at most one profile per switch model.)
#
# You need to have Python 3 and the Requests module installed. You
# can download the module here: https://github.com/kennethreitz/requests
# or install it using pip.
#
# To run the script, enter:
# python deploydevices.py -k <key> -o <org> -s <sn> -n <netw> -c <cfg_tmpl> [-t <tags>] [-a <addr>] [-m ignore_error]
#
# To make script chaining easier, all lines containing informational messages to the user
# start with the character @
#
# This file was last modified on 2017-07-05
import sys, getopt, requests, json
def printusertext(p_message):
#prints a line of text that is meant for the user to read
#do not process these lines when chaining scripts
print('@ %s' % p_message)
def printhelp():
#prints help text
printusertext('This is a script to claim MR, MS and MX devices into Dashboard, create a new network for them')
printusertext(' and bind the network to a pre-existing template. The script can also claim license capacity.')
printusertext('')
printusertext('To run the script, enter:')
printusertext('python deploydevices.py -k <key> -o <org> -s <sn> -n <netw> -c <cfg_tmpl> [-t <tags>] [-a <addr>] [-m ignore_error]')
printusertext('')
printusertext('<key>: Your Meraki Dashboard API key')
printusertext('<org>: Name of the Meraki Dashboard Organization to modify')
printusertext('<sn>: Serial number of the devices to claim. Use double quotes and spaces to enter')
printusertext(' multiple serial numbers. Example: -s "AAAA-BBBB-CCCC DDDD-EEEE-FFFF"')
printusertext(' You can also enter a license key as a serial number to claim along with devices')
printusertext('<netw>: Name the new network will have')
printusertext('<cfg_template>: Name of the config template the new network will bound to')
printusertext('-t <tags>: Optional parameter. If defined, network will be tagged with the given tags')
printusertext('-a <addr>: Optional parameter. If defined, devices will be moved to given street address')
printusertext('-m ignore_error: Optional parameter. If defined, the script will not stop if network exists')
printusertext('')
printusertext('Example:')
printusertext('python deploydevices.py -k 1234 -o MyCustomer -s XXXX-YYYY-ZZZZ -n "SF Branch" -c MyCfgTemplate')
printusertext('')
printusertext('Use double quotes ("") in Windows to pass arguments containing spaces. Names are case-sensitive.')
def getorgid(p_apikey, p_orgname):
#looks up org id for a specific org name
#on failure returns 'null'
r = requests.get('https://dashboard.meraki.com/api/v0/organizations', headers={'X-Cisco-Meraki-API-Key': p_apikey, 'Content-Type': 'application/json'})
if r.status_code != requests.codes.ok:
return 'null'
rjson = r.json()
for record in rjson:
if record['name'] == p_orgname:
return record['id']
return('null')
def getshardurl(p_apikey, p_orgid):
#Looks up shard URL for a specific org. Use this URL instead of 'dashboard.meraki.com'
# when making API calls with API accounts that can access multiple orgs.
#On failure returns 'null'
r = requests.get('https://dashboard.meraki.com/api/v0/organizations/%s/snmp' % p_orgid, headers={'X-Cisco-Meraki-API-Key': p_apikey, 'Content-Type': 'application/json'})
if r.status_code != requests.codes.ok:
return 'null'
rjson = r.json()
return(rjson['hostname'])
def getnwid(p_apikey, p_shardurl, p_orgid, p_nwname):
#looks up network id for a network name
#on failure returns 'null'
r = requests.get('https://%s/api/v0/organizations/%s/networks' % (p_shardurl, p_orgid), headers={'X-Cisco-Meraki-API-Key': p_apikey, 'Content-Type': 'application/json'})
if r.status_code != requests.codes.ok:
return 'null'
rjson = r.json()
for record in rjson:
if record['name'] == p_nwname:
return record['id']
return('null')
def createnw(p_apikey, p_shardurl, p_dstorg, p_nwdata):
#creates network if one does not already exist with the same name
#check if network exists
getnwresult = getnwid(p_apikey, p_shardurl, p_dstorg, p_nwdata['name'])
if getnwresult != 'null':
printusertext('WARNING: Skipping network "%s" (Already exists)' % p_nwdata['name'])
return('null')
if p_nwdata['type'] == 'combined':
#find actual device types
nwtype = 'wireless switch appliance'
else:
nwtype = p_nwdata['type']
if nwtype != 'systems manager':
r = requests.post('https://%s/api/v0/organizations/%s/networks' % (p_shardurl, p_dstorg), data=json.dumps({'timeZone': p_nwdata['timeZone'], 'tags': p_nwdata['tags'], 'name': p_nwdata['name'], 'organizationId': p_dstorg, 'type': nwtype}), headers={'X-Cisco-Meraki-API-Key': p_apikey, 'Content-Type': 'application/json'})
else:
printusertext('WARNING: Skipping network "%s" (Cannot create SM networks)' % p_nwdata['name'])
return('null')
return('ok')
def gettemplateid(p_apikey, p_shardurl, p_orgid, p_tname):
#looks up config template id for a config template name
#on failure returns 'null'
r = requests.get('https://%s/api/v0/organizations/%s/configTemplates' % (p_shardurl, p_orgid), headers={'X-Cisco-Meraki-API-Key': p_apikey, 'Content-Type': 'application/json'})
if r.status_code != requests.codes.ok:
return 'null'
rjson = r.json()
for record in rjson:
if record['name'] == p_tname:
return record['id']
return('null')
def bindnw(p_apikey, p_shardurl, p_nwid, p_templateid, p_autobind):
#binds a network to a template
if p_autobind:
autobindvalue = 'true'
else:
autobindvalue = 'false'
r = requests.post('https://%s/api/v0/networks/%s/bind' % (p_shardurl, p_nwid), data=json.dumps({'configTemplateId': p_templateid, 'autoBind': autobindvalue}), headers={'X-Cisco-Meraki-API-Key': p_apikey, 'Content-Type': 'application/json'})
if r.status_code != requests.codes.ok:
return 'null'
return('ok')
def claimdeviceorg(p_apikey, p_shardurl, p_orgid, p_devserial):
#claims a device into an org without adding to a network
r = requests.post('https://%s/api/v0/organizations/%s/claim' % (p_shardurl, p_orgid), data=json.dumps({'serial': p_devserial}), headers={'X-Cisco-Meraki-API-Key': p_apikey, 'Content-Type': 'application/json'})
return(0)
def claimlicenseorg(p_apikey, p_shardurl, p_orgid, p_licensekey):
#claims a license key into an org
r = requests.post('https://%s/api/v0/organizations/%s/claim' % (p_shardurl, p_orgid), data=json.dumps({'licenseKey': p_licensekey, 'licenseMode': 'addDevices'}), headers={'X-Cisco-Meraki-API-Key': p_apikey, 'Content-Type': 'application/json'})
return(0)
def claimdevice(p_apikey, p_shardurl, p_nwid, p_devserial):
#claims a device into a network
r = requests.post('https://%s/api/v0/networks/%s/devices/claim' % (p_shardurl, p_nwid), data=json.dumps({'serial': p_devserial}), headers={'X-Cisco-Meraki-API-Key': p_apikey, 'Content-Type': 'application/json'})
return(0)
def getdeviceinfo(p_apikey, p_shardurl, p_nwid, p_serial):
#returns info for a single device
#on failure returns lone device record, with serial number 'null'
r = requests.get('https://%s/api/v0/networks/%s/devices/%s' % (p_shardurl, p_nwid, p_serial), headers={'X-Cisco-Meraki-API-Key': p_apikey, 'Content-Type': 'application/json'})
returnvalue = []
if r.status_code != requests.codes.ok:
returnvalue = {'serial':'null', 'model':'null'}
return(returnvalue)
rjson = r.json()
return(rjson)
def setdevicedata(p_apikey, p_shardurl, p_nwid, p_devserial, p_field, p_value, p_movemarker):
#modifies value of device record. Returns the new value
#on failure returns one device record, with all values 'null'
#p_movemarker is boolean: True/False
movevalue = "false"
if p_movemarker:
movevalue = "true"
r = requests.put('https://%s/api/v0/networks/%s/devices/%s' % (p_shardurl, p_nwid, p_devserial), data=json.dumps({p_field: p_value, 'moveMapMarker': movevalue}), headers={'X-Cisco-Meraki-API-Key': p_apikey, 'Content-Type': 'application/json'})
if r.status_code != requests.codes.ok:
return ('null')
return('ok')
def getorgdeviceinfo (p_apikey, p_shardurl, p_orgid, p_devserial):
#gets basic device info from org inventory. device does not need to be part of a network
r = requests.get('https://%s/api/v0/organizations/%s/inventory' % (p_shardurl, p_orgid), headers={'X-Cisco-Meraki-API-Key': p_apikey, 'Content-Type': 'application/json'})
returnvalue = {}
if r.status_code != requests.codes.ok:
returnvalue = {'serial':'null', 'model':'null'}
return(returnvalue)
rjson = r.json()
foundserial = False
for record in rjson:
if record['serial'] == p_devserial:
foundserial = True
returnvalue = {'mac': record['mac'], 'serial': record['serial'], 'networkId': record['networkId'], 'model': record['model'], 'claimedAt': record['claimedAt'], 'publicIp': record['publicIp']}
if not foundserial:
returnvalue = {'serial':'null', 'model':'null'}
return(returnvalue)
def main(argv):
#set default values for command line arguments
arg_apikey = 'null'
arg_orgname = 'null'
arg_serial = 'null'
arg_nwname = 'null'
arg_template = 'null'
arg_modexisting = 'null'
arg_address = 'null'
arg_nwtags = 'null'
#get command line arguments
try:
opts, args = getopt.getopt(argv, 'hk:o:s:n:c:m:a:t:')
except getopt.GetoptError:
printhelp()
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
printhelp()
sys.exit()
elif opt == '-k':
arg_apikey = arg
elif opt == '-o':
arg_orgname = arg
elif opt == '-s':
arg_serial = arg
elif opt == '-n':
arg_nwname = arg
elif opt == '-c':
arg_template = arg
elif opt == '-m':
arg_modexisting = arg
elif opt == '-a':
arg_address = arg
elif opt == '-t':
arg_nwtags = arg
#check if all parameters are required parameters have been given
if arg_apikey == 'null' or arg_orgname == 'null' or arg_serial == 'null' or arg_nwname == 'null' or arg_template == 'null':
printhelp()
sys.exit(2)
#set optional flag to ignore error if network already exists
stoponerror = True
if arg_modexisting == 'ignore_error':
stoponerror = False
#get organization id corresponding to org name provided by user
orgid = getorgid(arg_apikey, arg_orgname)
if orgid == 'null':
printusertext('ERROR: Fetching organization failed')
sys.exit(2)
#get shard URL where Org is stored
shardurl = getshardurl(arg_apikey, orgid)
if shardurl == 'null':
printusertext('ERROR: Fetching Meraki cloud shard URL failed')
sys.exit(2)
#make sure that a network does not already exist with the same name
nwid = getnwid(arg_apikey, shardurl, orgid, arg_nwname)
if nwid != 'null' and stoponerror:
printusertext('ERROR: Network with that name already exists')
sys.exit(2)
#get template ID for template name argument
templateid = gettemplateid(arg_apikey, shardurl, orgid, arg_template)
if templateid == 'null':
printusertext('ERROR: Unable to find template: ' + arg_template)
sys.exit(2)
#get serial numbers from parameter -s
devicelist = {}
devicelist['serial'] = arg_serial.split(" ")
devicelist['model'] = []
for i in range (0, len(devicelist['serial']) ):
claimdeviceorg(arg_apikey, shardurl, orgid, devicelist['serial'][i])
#check if device has been claimed successfully
deviceinfo = getorgdeviceinfo (arg_apikey, shardurl, orgid, devicelist['serial'][i])
if deviceinfo['serial'] == 'null':
printusertext('INFO: Serial number %s is a license or unsupported device' % devicelist['serial'][i])
claimlicenseorg(arg_apikey, shardurl, orgid, devicelist['serial'][i])
devicelist['model'].append(deviceinfo['model'])
#compile list of different product types in order to create correct type of network
devicetypes = {'mx': False, 'ms': False, 'mr': False}
for record in devicelist['model']:
if record [:2] == 'MX' or record [:1] == 'Z':
devicetypes['mx'] = True
elif record [:2] == 'MS':
devicetypes['ms'] = True
elif record [:2] == 'MR':
devicetypes['mr'] = True
#build network type string for network creation
nwtypestring = ''
if devicetypes['mr']:
nwtypestring += 'wireless'
if len(nwtypestring) > 0:
nwtypestring += ' '
if devicetypes['ms']:
nwtypestring += 'switch'
if len(nwtypestring) > 0:
nwtypestring += ' '
if devicetypes['mx']:
nwtypestring += 'appliance'
#compile parameters to create network
nwtags = ''
if arg_nwtags != 'null':
nwtags = arg_nwtags
### NOTE THAT TIMEZONE IS HARDCODED IN THIS SCRIPT. EDIT THE LINE BELOW TO MODIFY ###
nwparams = {'name': arg_nwname, 'timeZone': 'Europe/Helsinki', 'tags': nwtags, 'organizationId': orgid, 'type': nwtypestring}
#create network and get its ID
if nwid == 'null':
createstatus = createnw (arg_apikey, shardurl, orgid, nwparams)
if createstatus == 'null':
printusertext('ERROR: Unable to create network')
sys.exit(2)
nwid = getnwid(arg_apikey, shardurl, orgid, arg_nwname)
if nwid == 'null':
printusertext('ERROR: Unable to get ID for new network')
sys.exit(2)
#clean up serials list to filter out licenses, MVs, etc
validserials = []
for i in range (0, len(devicelist['serial']) ):
if devicelist['model'][i][:2] == 'MR' or devicelist['model'][i][:2] == 'MS' or devicelist['model'][i][:2] == 'MX' or devicelist['model'][i][:1] == 'Z':
validserials.append(devicelist['serial'][i])
for devserial in validserials:
#claim device into newly created network
claimdevice(arg_apikey, shardurl, nwid, devserial)
#check if device has been claimed successfully
deviceinfo = getdeviceinfo(arg_apikey, shardurl, nwid, devserial)
if deviceinfo['serial'] == 'null':
printusertext('ERROR: Claiming or moving device unsuccessful')
sys.exit(2)
#set device hostname
hostname = deviceinfo['model'] + '_' + devserial
setdevicedata(arg_apikey, shardurl, nwid, devserial, 'name', hostname, False)
#if street address is given as a parameter, set device location
if arg_address != 'null':
setdevicedata(arg_apikey, shardurl, nwid, devserial, 'address', arg_address, True)
#bind network to template. If switches in template, attempt to autobind them
bindstatus = bindnw(arg_apikey, shardurl, nwid, templateid, devicetypes['ms'])
if bindstatus == 'null' and stoponerror:
printusertext('ERROR: Unable to bind network to template')
sys.exit(2)
printusertext('End of script.')
if __name__ == '__main__':
main(sys.argv[1:]) | mit | -7,332,367,188,420,823,000 | 41.038961 | 328 | 0.612808 | false | 3.630945 | false | false | false |
ppnchb/python-automata-simulator | automata/reader.py | 1 | 1056 | __author__ = 'Hyunsoo'
import re
def getStates(data):
states = []
for line in data[1:]:
state = line[1]
assert state not in states
states.append(state)
return states
def getVocabulary(data):
vocabulary = []
line = data[0][2:]
for symbol in line:
assert len(symbol) <= 1 and symbol not in vocabulary
vocabulary.append(symbol)
return vocabulary
def getInitialState(data):
return data[1][1]
def getFinalState(data):
finalStates = []
for line in data[1:]:
if len(line[0])>0:
finalStates.append(line[1])
return finalStates
def getTable(data):
table = [line[1:] for line in data]
return table
def getPartialData(data, index):
height, width = len(data), len(data[0])
result = [row[:] for row in data]
for row in range(1, height):
for column in range(2, width):
tableData = re.split(';\s+', result[row][column])
assert len(tableData)>index
result[row][column]=tableData[index]
return result | gpl-2.0 | 8,983,291,796,607,273,000 | 26.815789 | 61 | 0.606061 | false | 3.653979 | false | false | false |
PnX-SI/GeoNature | backend/geonature/utils/env.py | 1 | 1539 | """ Helpers to manipulate the execution environment """
import os
import subprocess
from pathlib import Path
import pkg_resources
from flask_sqlalchemy import SQLAlchemy
from flask_marshmallow import Marshmallow
from flask_mail import Mail
from flask_migrate import Migrate
# Must be at top of this file. I don't know why (?)
MAIL = Mail()
from flask import current_app
# Define GEONATURE_VERSION before import config_shema module
# because GEONATURE_VERSION is imported in this module
ROOT_DIR = Path(__file__).absolute().parent.parent.parent.parent
try:
GEONATURE_VERSION = pkg_resources.get_distribution("geonature").version
except pkg_resources.DistributionNotFound:
with open(str((ROOT_DIR / "VERSION"))) as v:
GEONATURE_VERSION = v.read()
BACKEND_DIR = ROOT_DIR / "backend"
DEFAULT_CONFIG_FILE = ROOT_DIR / "config/geonature_config.toml"
os.environ['FLASK_SQLALCHEMY_DB'] = 'geonature.utils.env.DB'
os.environ['FLASK_MARSHMALLOW'] = 'geonature.utils.env.MA'
DB = SQLAlchemy()
MA = Marshmallow()
migrate = Migrate()
GN_MODULE_FILES = (
"manifest.toml",
"__init__.py",
"backend/__init__.py",
"backend/blueprint.py",
)
GN_EXTERNAL_MODULE = ROOT_DIR / "external_modules"
GN_MODULE_FE_FILE = "frontend/app/gnModule.module"
def import_requirements(req_file):
from geonature.utils.errors import GeoNatureError
cmd_return = subprocess.call(["pip", "install", "-r", req_file])
if cmd_return != 0:
raise GeoNatureError("Error while installing module backend dependencies")
| gpl-3.0 | -4,124,683,430,628,901,000 | 25.534483 | 82 | 0.725146 | false | 3.389868 | false | false | false |
Ultimaker/Uranium | plugins/LocalFileOutputDevice/LocalFileOutputDevice.py | 1 | 10399 | # Copyright (c) 2021 Ultimaker B.V.
# Uranium is released under the terms of the LGPLv3 or higher.
import os
import sys
from PyQt5.QtCore import QUrl
from PyQt5.QtGui import QDesktopServices
from PyQt5.QtWidgets import QFileDialog, QMessageBox
from UM.Application import Application
from UM.FileHandler.WriteFileJob import WriteFileJob
from UM.Logger import Logger
from UM.Mesh.MeshWriter import MeshWriter
from UM.Message import Message
from UM.OutputDevice import OutputDeviceError
from UM.OutputDevice.OutputDevice import OutputDevice
from UM.OutputDevice.ProjectOutputDevice import ProjectOutputDevice
from UM.i18n import i18nCatalog
catalog = i18nCatalog("uranium")
class LocalFileOutputDevice(ProjectOutputDevice):
"""Implements an OutputDevice that supports saving to arbitrary local files."""
def __init__(self, add_to_output_devices: bool = True, parent = None):
super().__init__(device_id = "local_file", add_to_output_devices = add_to_output_devices, parent = parent)
self.setName(catalog.i18nc("@item:inmenu", "Local File"))
self.setShortDescription(catalog.i18nc("@action:button Preceded by 'Ready to'.", "Save to Disk"))
self.setDescription(catalog.i18nc("@info:tooltip", "Save to Disk"))
self.setIconName("save")
self.shortcut = "Ctrl+S"
self.menu_entry_text = catalog.i18nc("@item:inmenu About saving files to the hard drive", "To Disk")
self._writing = False
def requestWrite(self, nodes, file_name = None, limit_mimetypes = None, file_handler = None, **kwargs):
"""Request the specified nodes to be written to a file.
:param nodes: A collection of scene nodes that should be written to the
file.
:param file_name: A suggestion for the file name to write
to. Can be freely ignored if providing a file name makes no sense.
:param limit_mimetypes: Should we limit the available MIME types to the
MIME types available to the currently active machine?
:param kwargs: Keyword arguments.
"""
if self._writing:
raise OutputDeviceError.DeviceBusyError()
# Set up and display file dialog
dialog = QFileDialog()
dialog.setWindowTitle(catalog.i18nc("@title:window", "Save to Disk"))
dialog.setFileMode(QFileDialog.AnyFile)
dialog.setAcceptMode(QFileDialog.AcceptSave)
# Ensure platform never ask for overwrite confirmation since we do this ourselves
dialog.setOption(QFileDialog.DontConfirmOverwrite)
if sys.platform == "linux" and "KDE_FULL_SESSION" in os.environ:
dialog.setOption(QFileDialog.DontUseNativeDialog)
filters = []
mime_types = []
selected_filter = None
if "preferred_mimetypes" in kwargs and kwargs["preferred_mimetypes"] is not None:
preferred_mimetypes = kwargs["preferred_mimetypes"]
else:
preferred_mimetypes = Application.getInstance().getPreferences().getValue("local_file/last_used_type")
preferred_mimetype_list = preferred_mimetypes.split(";")
if not file_handler:
file_handler = Application.getInstance().getMeshFileHandler()
file_types = file_handler.getSupportedFileTypesWrite()
file_types.sort(key = lambda k: k["description"])
if limit_mimetypes:
file_types = list(filter(lambda i: i["mime_type"] in limit_mimetypes, file_types))
file_types = [ft for ft in file_types if not ft["hide_in_file_dialog"]]
if len(file_types) == 0:
Logger.log("e", "There are no file types available to write with!")
raise OutputDeviceError.WriteRequestFailedError(catalog.i18nc("@info:warning", "There are no file types available to write with!"))
# Find the first available preferred mime type
preferred_mimetype = None
for mime_type in preferred_mimetype_list:
if any(ft["mime_type"] == mime_type for ft in file_types):
preferred_mimetype = mime_type
break
extension_added = False
for item in file_types:
type_filter = "{0} (*.{1})".format(item["description"], item["extension"])
filters.append(type_filter)
mime_types.append(item["mime_type"])
if preferred_mimetype == item["mime_type"]:
selected_filter = type_filter
if file_name and not extension_added:
extension_added = True
file_name += "." + item["extension"]
# CURA-6411: This code needs to be before dialog.selectFile and the filters, because otherwise in macOS (for some reason) the setDirectory call doesn't work.
stored_directory = Application.getInstance().getPreferences().getValue("local_file/dialog_save_path")
if stored_directory and stored_directory != "":
dialog.setDirectory(stored_directory)
# Add the file name before adding the extension to the dialog
if file_name is not None:
dialog.selectFile(file_name)
dialog.setNameFilters(filters)
if selected_filter is not None:
dialog.selectNameFilter(selected_filter)
if not dialog.exec_():
raise OutputDeviceError.UserCanceledError()
save_path = dialog.directory().absolutePath()
Application.getInstance().getPreferences().setValue("local_file/dialog_save_path", save_path)
selected_type = file_types[filters.index(dialog.selectedNameFilter())]
Application.getInstance().getPreferences().setValue("local_file/last_used_type", selected_type["mime_type"])
# Get file name from file dialog
file_name = dialog.selectedFiles()[0]
Logger.log("d", "Writing to [%s]..." % file_name)
if os.path.exists(file_name):
result = QMessageBox.question(None, catalog.i18nc("@title:window", "File Already Exists"), catalog.i18nc("@label Don't translate the XML tag <filename>!", "The file <filename>{0}</filename> already exists. Are you sure you want to overwrite it?").format(file_name))
if result == QMessageBox.No:
raise OutputDeviceError.UserCanceledError()
self.writeStarted.emit(self)
# Actually writing file
if file_handler:
file_writer = file_handler.getWriter(selected_type["id"])
else:
file_writer = Application.getInstance().getMeshFileHandler().getWriter(selected_type["id"])
try:
mode = selected_type["mode"]
if mode == MeshWriter.OutputMode.TextMode:
Logger.log("d", "Writing to Local File %s in text mode", file_name)
stream = open(file_name, "wt", encoding = "utf-8")
elif mode == MeshWriter.OutputMode.BinaryMode:
Logger.log("d", "Writing to Local File %s in binary mode", file_name)
stream = open(file_name, "wb")
else:
Logger.log("e", "Unrecognised OutputMode.")
return None
job = WriteFileJob(file_writer, stream, nodes, mode)
job.setFileName(file_name)
job.setAddToRecentFiles(True) # The file will be added into the "recent files" list upon success
job.progress.connect(self._onJobProgress)
job.finished.connect(self._onWriteJobFinished)
message = Message(catalog.i18nc("@info:progress Don't translate the XML tags <filename>!", "Saving to <filename>{0}</filename>").format(file_name),
0, False, -1 , catalog.i18nc("@info:title", "Saving"))
message.show()
job.setMessage(message)
self._writing = True
job.start()
except PermissionError as e:
Logger.log("e", "Permission denied when trying to write to %s: %s", file_name, str(e))
raise OutputDeviceError.PermissionDeniedError(catalog.i18nc("@info:status Don't translate the XML tags <filename>!", "Permission denied when trying to save <filename>{0}</filename>").format(file_name)) from e
except OSError as e:
Logger.log("e", "Operating system would not let us write to %s: %s", file_name, str(e))
raise OutputDeviceError.WriteRequestFailedError(catalog.i18nc("@info:status Don't translate the XML tags <filename> or <message>!", "Could not save to <filename>{0}</filename>: <message>{1}</message>").format(file_name, str(e))) from e
def _onJobProgress(self, job, progress):
self.writeProgress.emit(self, progress)
def _onWriteJobFinished(self, job):
self._writing = False
self.writeFinished.emit(self)
if job.getResult():
self.writeSuccess.emit(self)
message = Message(catalog.i18nc("@info:status Don't translate the XML tags <filename>!", "Saved to <filename>{0}</filename>").format(job.getFileName()), title = catalog.i18nc("@info:title", "File Saved"))
message.addAction("open_folder", catalog.i18nc("@action:button", "Open Folder"), "open-folder", catalog.i18nc("@info:tooltip", "Open the folder containing the file"))
message._folder = os.path.dirname(job.getFileName())
message.actionTriggered.connect(self._onMessageActionTriggered)
message.show()
else:
message = Message(catalog.i18nc("@info:status Don't translate the XML tags <filename> or <message>!", "Could not save to <filename>{0}</filename>: <message>{1}</message>").format(job.getFileName(), str(job.getError())), lifetime = 0, title = catalog.i18nc("@info:title", "Warning"))
message.show()
self.writeError.emit(self)
try:
job.getStream().close()
except (OSError, PermissionError): #When you don't have the rights to do the final flush or the disk is full.
message = Message(catalog.i18nc("@info:status", "Something went wrong saving to <filename>{0}</filename>: <message>{1}</message>").format(job.getFileName(), str(job.getError())), title = catalog.i18nc("@info:title", "Error"))
message.show()
self.writeError.emit(self)
def _onMessageActionTriggered(self, message, action):
if action == "open_folder" and hasattr(message, "_folder"):
QDesktopServices.openUrl(QUrl.fromLocalFile(message._folder))
| lgpl-3.0 | 5,876,116,264,392,756,000 | 48.755981 | 294 | 0.650832 | false | 4.116785 | false | false | false |
halbbob/dff | modules/viewer/cat.py | 1 | 5050 | # DFF -- An Open Source Digital Forensics Framework
# Copyright (C) 2009-2011 ArxSys
# This program is free software, distributed under the terms of
# the GNU General Public License Version 2. See the LICENSE file
# at the top of the source tree.
#
# See http://www.digital-forensic.org for more information about this
# project. Please do not directly contact any of the maintainers of
# DFF for assistance; the project provides a web site, mailing lists
# and IRC channels for your use.
#
# Author(s):
# Solal Jacob <[email protected]>
# Jeremy MOUNIER <[email protected]>
__dff_module_cat_version__ = "1.0.0"
from PyQt4 import QtCore, QtGui
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from api.vfs import *
from api.module.module import *
from api.module.script import *
from api.types.libtypes import Argument, typeId
class TextEdit(QTextEdit):
def __init__(self, cat):
QTextEdit.__init__(self)
self.cat = cat
self.scroll = self.cat.scroll
self.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.setReadOnly(1)
self.setWordWrapMode(QTextOption.NoWrap)
def wheelEvent(self, event):
v = self.scroll.value()
if event.delta() > 0:
trig = v - 5
if trig >= self.scroll.min:
self.cat.read(trig)
self.scroll.setValue(trig)
else:
trig = v + 5
if trig < self.scroll.max:
self.cat.read(trig)
self.scroll.setValue(trig)
class Scroll(QScrollBar):
def __init__(self, parent):
QScrollBar.__init__(self, parent)
self.cat = parent
self.init()
self.initCallBacks()
self.setValues()
def init(self):
self.min = 0
self.single = 1
self.page = 32
self.max = self.cat.lines - 1
def initCallBacks(self):
self.connect(self, SIGNAL("sliderMoved(int)"), self.moved)
self.connect(self, SIGNAL("actionTriggered(int)"), self.triggered)
def setValues(self):
self.setMinimum(self.min)
self.setMaximum(self.max)
self.setSingleStep(self.single)
self.setPageStep(self.page)
self.setRange(self.min, self.max)
def triggered(self, action):
if action == QAbstractSlider.SliderSingleStepAdd:
trig = self.value() + 1
if trig <= self.max:
self.cat.read(trig)
elif action == QAbstractSlider.SliderSingleStepSub:
trig = self.value() - 1
if trig >= self.min:
self.cat.read(trig)
elif action == QAbstractSlider.SliderPageStepSub:
trig = self.value() - 5
if trig >= self.min:
self.cat.read(trig)
elif action == QAbstractSlider.SliderPageStepAdd:
trig = self.value() + 5
if trig <= self.max:
self.cat.read(trig)
def moved(self, value):
if value == self.max:
value -= 5
self.cat.read(value)
class CAT(QWidget, Script):
def __init__(self):
Script.__init__(self, "cat")
self.vfs = vfs.vfs()
self.type = "cat"
self.icon = None
def start(self, args):
self.args = args
try:
self.node = args["file"].value()
except:
pass
def g_display(self):
QWidget.__init__(self)
self.vfile = self.node.open()
self.offsets = self.linecount()
self.initShape()
self.read(0)
def initShape(self):
self.hbox = QHBoxLayout()
self.hbox.setContentsMargins(0, 0, 0, 0)
self.scroll = Scroll(self)
self.text = TextEdit(self)
self.hbox.addWidget(self.text)
self.hbox.addWidget(self.scroll)
self.setLayout(self.hbox)
def read(self, line):
padd = 0
if line > padd:
padd = 1
self.vfile.seek(self.offsets[line]+padd)
self.text.clear()
self.text.textCursor().insertText(QString.fromUtf8(self.vfile.read(1024*10)))
self.text.moveCursor(QTextCursor.Start)
def linecount(self):
offsets = [0]
offsets.extend(self.vfile.indexes('\n'))
self.lines = len(offsets)
return offsets
def updateWidget(self):
pass
def c_display(self):
file = self.node.open()
fsize = self.node.size()
size = 0
self.buff = ""
while size < fsize:
try:
tmp = file.read(4096)
except vfsError, e:
print self.buff
break
if len(tmp) == 0:
print tmp
break
size += len(tmp)
self.buff += tmp
print tmp
file.close()
if len(self.buff):
return self.buff
class cat(Module):
"""Show text file content
ex:cat /myfile.txt"""
def __init__(self):
Module.__init__(self, "text", CAT)
self.conf.addArgument({"name": "file",
"description": "Text file to display",
"input": Argument.Required|Argument.Single|typeId.Node})
self.conf.addConstant({"name": "mime-type",
"type": typeId.String,
"description": "managed mime type",
"values": ["HTML", "ASCII", "XML", "text"]})
self.tags = "Viewers"
self.flags = ["console", "gui"]
self.icon = ":text"
| gpl-2.0 | -1,013,139,386,503,417,700 | 25.719577 | 83 | 0.611089 | false | 3.364424 | false | false | false |
gitcoinco/web | app/grants/management/commands/find_deadbeat_grants.py | 1 | 1292 | '''
Copyright (C) 2021 Gitcoin Core
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from django.core.management.base import BaseCommand
from marketing.mails import notify_deadbeat_grants
class Command(BaseCommand):
help = 'finds quests whose reward is out of redemptions'
def handle(self, *args, **options):
from grants.models import Grant
from django.utils import timezone
before = timezone.now() - timezone.timedelta(hours=6)
grants = Grant.objects.filter(contract_address='0x0', contract_version__lt=2, active=True, created_on__lt=before)
if grants.count():
notify_deadbeat_grants(grants)
| agpl-3.0 | -7,652,814,746,059,988,000 | 35.914286 | 121 | 0.720588 | false | 4.208469 | false | false | false |
laszlokiraly/OffenesParlament | offenesparlament/op_scraper/scraper/parlament/resources/extractors/prelaw.py | 2 | 3197 | import datetime
from django.utils.html import remove_tags
from scrapy import Selector
from parlament.resources.extractors import SingleExtractor
from parlament.resources.extractors import MultiExtractor
from parlament.resources.extractors.law import LAW
from parlament.resources.util import _clean
# import the logging library
import logging
# Get an instance of a logger
logger = logging.getLogger(__name__)
class PRELAW:
class DESCRIPTION(SingleExtractor):
XPATH = "//div[contains(concat(' ', normalize-space(@class), ' '), ' c_2 ')]/h3/following-sibling::p/text()"
@classmethod
def xt(cls, response):
try:
description = response.xpath(cls.XPATH)[0].extract()[0]
except:
import ipdb
ipdb.set_trace()
return remove_tags(description, 'p')
class STEPS(MultiExtractor):
XPATH = "//table[contains(@class,'tabelleHistorie')]"
@classmethod
def xt(cls, response):
steps = []
raw_table = response.xpath(cls.XPATH)[0]
raw_steps = Selector(text=raw_table.extract()).xpath('//tr')[1:] # ignore header
for index, step in enumerate(raw_steps, start=1):
step_selector = Selector(text=step.extract())
title = LAW.PHASES.STEPS.TITLE.xt(step_selector)
date_str = LAW.PHASES.STEPS.DATE.xt(step_selector)
date = datetime.datetime.strptime(
date_str, "%d.%m.%Y").date()
protocol_url = LAW.PHASES.STEPS.PROTOCOL.xt(step_selector)
steps.append({
'date': date,
'title': title['text'],
'sortkey': str(index).zfill(3),
'protocol_url': protocol_url
})
return steps
class OPINIONS(MultiExtractor):
XPATH = "//div[contains(@class,'filterListe')]//table[contains(@class,'filter')]//tr"
@classmethod
def xt(cls, response):
ops = []
raw_ops = response.xpath(cls.XPATH).extract()
for raw_op in raw_ops[1:]:
op_sel = Selector(text=raw_op)
date = op_sel.xpath('//td[1]').xpath("normalize-space()").extract()[0]
url = op_sel.xpath('//td[2]/a/@href').extract()[0]
parl_id = u"({})".format(
op_sel.xpath('//td[3]/a').xpath('normalize-space()').extract()[0])
title = op_sel.xpath('//td[2]').xpath('normalize-space()').extract()[0]
if title:
title = _clean(title).replace("*", ", ")
else:
title = None
email = None
try:
date = datetime.datetime.strptime(
_clean(date), "%d.%m.%Y").date()
except:
date = None
ops.append({
'date': date,
'url': url,
'email': email,
'title': title,
'parl_id': parl_id
})
return ops
| bsd-2-clause | 2,424,237,312,767,877,000 | 33.376344 | 116 | 0.506099 | false | 4.141192 | false | false | false |
MTgeophysics/mtpy | legacy/beta_tests_before_merge/plot_strike.py | 1 | 1281 | # -*- coding: utf-8 -*-
"""
Created on Wed Sep 18 15:35:39 2013
@author: Alison Kirkby
plots strike
fails with error:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\Users\u64125\AppData\Local\Continuum\Miniconda2\envs\mtpy27\lib\site-packages\spyderlib\widgets\externalshell\sitecustomize.py", line 714, in runfile
execfile(filename, namespace)
File "C:\Users\u64125\AppData\Local\Continuum\Miniconda2\envs\mtpy27\lib\site-packages\spyderlib\widgets\externalshell\sitecustomize.py", line 74, in execfile
exec(compile(scripttext, filename, 'exec'), glob, loc)
File "C:/Git/mtpy/examples/tests/plot_strike.py", line 21, in <module>
plotstrike = PlotStrike(fn_list=elst)
File "mtpy\imaging\plotstrike.py", line 240, in __init__
self.plot()
File "mtpy\imaging\plotstrike.py", line 307, in plot
zinv = mt.get_Zinvariants()
AttributeError: 'MTplot' object has no attribute 'get_Zinvariants'
"""
import os
os.chdir(r'C:\Git\mtpy')
from mtpy.imaging.plotstrike import PlotStrike
import os.path as op
import matplotlib.pyplot as plt
# path to edis
epath = r'C:\Git\mtpy\examples\data\edi_files'
elst=[op.join(epath,edi) for edi in os.listdir(epath) if edi.endswith('.edi')][::4]
plotstrike = PlotStrike(fn_list=elst) | gpl-3.0 | 8,677,624,270,188,862,000 | 32.736842 | 160 | 0.733802 | false | 2.878652 | false | false | false |
UltrosBot/Ultros | plugins/urls/handlers/handler.py | 1 | 4865 | # coding=utf-8
from kitchen.text.converters import to_unicode
from plugins.urls.matching import REGEX_TYPE
__author__ = 'Gareth Coles'
class URLHandler(object):
"""
URL handler. Subclass this!
You'll want to override both the `call` method and the *criteria* dict.
The former is called if the criteria matches the URL.
In the criteria dict, you're expected to provide values to test equality
for. However, there are a few things to be aware of.
* Leave a key out if you don't care about matching it - None will be
matched against.
* You may provide a compiled regular expression to test against as well.
* Finally, you may provide a callable (function or class), which will be
run for the comparison instead, and should return either True or False.
>>> criteria = {
... # No protocol here, since we don't care about it
... "auth": lambda x: x is not None
... "domain": re.compile(u"[a-zA-Z]+"),
... "port": lambda x: x > 8080,
... "path": lambda x: len(x) > 10,
... "permission": "urls.trigger.example" # If you need one
... }
...
>>>
Additionally, if the above matching is somehow not good enough for you, you
may override the `match` function.
"""
# Remember to set this, so that there are no conflicting handlers - only
# one handler per name!
name = ""
plugin = None
urls_plugin = None
criteria = {
"protocol": None,
"auth": None,
"domain": None,
"port": None,
"path": None,
# Check the user and source for a permission - This is not a URL field
"permission": None,
}
def __init__(self, plugin):
"""
Initializer. The plugin here is your plugin, not the URLs plugin.
You're expected to initialize this object yourself, so feel free to
override this.
"""
self.plugin = plugin
def call(self, url, context):
"""
Called if the URL matches. Override this or there's basically no point
in having a handler.
*context* here is a dict containing "protocol", "source", and "target"
keys, which you can use to respond to whoever sent the message which
contained the URL.
Return True if this should cascade to any other handlers, or False
if it should end here.
If an exception is raised, it will be caught and we'll move on to the
next handler.
:param url: The URL object that was matched
:param context: Dictionary with the current context, contains
the MessageReceived event under "event" in normal
circumstances
:type url: plugins.urls.url.URL
:type context: dict
:return: constants.STOP_HANDLING or constants.CASCADE
:rtype: int
"""
raise NotImplementedError()
def match(self, url, context):
"""
Decide whether to handle this URL.
This should return True if this handler should handle the URL, or
False if not.
Do not do any actual handling here. You should only override this if
the built-in handling doesn't cover your needs for some reason.
:param url: The URL object to match
:param context: Dictionary with the current context
:return: True if this handler should handle the URL, False otherwise
"""
for key in self.criteria.iterkeys():
value = self.criteria.get(key)
if key == "permission":
event = context["event"]
result = self.plugin.commands.perm_handler.check(
value, event.source, event.target, event.caller
)
if not result:
return False
continue
if callable(value): # Function, lambda, etc
if value(getattr(url, key)):
continue
else:
return False
elif isinstance(value, REGEX_TYPE): # Compiled regex
# Casting due to port, None, etc
if value.match(to_unicode(getattr(url, key))):
continue
else:
return False
elif value == getattr(url, key): # Standard equality test
continue
else:
return False
return True
def teardown(self):
"""
Called when the URLs plugin unloads - Do any saving or cleanup you
need to do here
"""
pass
def reload(self):
"""
Called when the URLs plugin has its configuration reloaded - You are
free to leave this as it is if it isn't relevant to your plugin
"""
pass
| artistic-2.0 | 6,549,978,788,208,118,000 | 29.791139 | 79 | 0.578623 | false | 4.677885 | false | false | false |
rupertotorres1/UBCPreReqTool | main.py | 1 | 1241 | try:
input = raw_input
except NameError:
pass
import sys
from pickle import load
# Load dictionary with courses and their pre-requisites and co-requisites
dict = load(open("dictCoursesPreCoReqs.p", "rb"))
print ("Welcome! This tool helps you find out which courses require the given course as a pre-req or co-req. I hope it is useful.")
def interaction():
print ("")
course_input = input("Which course would you like to look up? (Enter x to exit): ").upper()
if (course_input != "X"):
print("")
pre_co_req_for = []
# If the course that the user provided is not in the loaded dictionary, ask again
if (not(course_input in dict)):
print ("That is not a valid course")
interaction()
# Else, search the courses for which the provided course is a pre-requisite or co-requisite
# and add them to a list.
else:
for course, pre_co_reqs in dict.items():
if (course_input in pre_co_reqs):
pre_co_req_for.append(course)
sys.stdout.write(course_input + " is a pre-req or co-req for:")
print("")
pre_co_req_for.sort()
for p in pre_co_req_for:
sys.stdout.write("| " + str(p) + " |")
print("")
print("")
interaction()
interaction()
| mit | -385,972,854,799,941,900 | 26.204545 | 131 | 0.641418 | false | 3.291777 | false | false | false |
synapse-wireless/snap-to-cloud-examples | exosite/exosite_connector.py | 1 | 2078 | import json
from pyonep import onep # Exosite Python Library
from tornado import httpclient
# TODO: Replace these with values from your own Exosite account and resource
# We want to map SN171 SNAP addresses to Exosite CIKs
# Addresses should not have any separators (no "." Or ":", etc.). The hexadecimal digits a-f must be entered in lower case.
EXOSITE_CIKS = {"XXXXXX": 'unique Exosite CIK here',
"YYYYYY": 'another unique Exosite CIK here'} # yapf: disable
class ExositeConnector(object):
def __init__(self):
self.exosite = onep.OnepV1()
def publish(self, thing_id, state):
"""Publish a message to Exosite API.
:param str thing_id: The 6-character SNAP MAC Address
:param dict state: A dictionary containing the new state values for a thing
"""
# Use the Exosite Python Library to format the message
jsonreq = {"auth": {"cik": EXOSITE_CIKS[thing_id.lower()]},
"calls": self.exosite._composeCalls([('writegroup',
[[[{"alias": "batt"}, int(state['batt'])],
[{"alias": "state"}, int(state['button_state'])],
[{"alias": "count"}, state['button_count']]]])])} # yapf: disable
# Create a Tornado HTTPRequest
request = httpclient.HTTPRequest(url=self.exosite.onephttp.host + self.exosite.url,
method='POST',
headers=self.exosite.headers,
body=json.dumps(jsonreq))
http_client = httpclient.AsyncHTTPClient()
http_client.fetch(request, self._handle_request)
@staticmethod
def _handle_request(response):
"""Prints the response of a HTTPRequest.
:param response: HTTPRequest
:return:
"""
if response.error:
print "Error:", response.error
else:
print response.body
| apache-2.0 | -1,340,095,892,736,022,300 | 42.291667 | 125 | 0.552936 | false | 4.27572 | false | false | false |
mathause/regionmask | regionmask/defined_regions/_ar6_pre_revisions.py | 1 | 4711 | import geopandas as gp
from shapely import geometry
from ..core._geopandas import _enumerate_duplicates, from_geopandas
from ._ressources import read_remote_shapefile
REPR = """
pre-revision version of 'AR6 reference regions - Iturbide et al., 2020'
These are the regions as originally submitted by Iturbide et al., 2020. During
the revisions regions were added and existing regions were adapted. The originally
submitted regions are provided here for completeness. Use the revised regions
i.e. ``regionmask.defined_regions.ar6``.
Attributes
----------
all : Regions
All regions (land + ocean), regions split along the date line
are combined (see below).
land : Regions
Land regions only, regions split along the date line
are combined (see below).
ocean : Regions
Ocean regions only, regions split along the date line
are combined (see below).
separate_pacific : Regions
Original definitions of the regions, no combination of the Pacific
regions.
Combined Regions
----------------
SPO and SPO*; EPO and EPO*; NPO and NPO*
Note
----
The region numbers for ``all``, ``land``, and ``ocean`` are consistent. The
region numbers for ``separate_pacific`` and all others are not.
"""
def _combine_to_multipolygon(df, column, *names):
all_poly = [df[df[column] == name].geometry.values[0] for name in names]
combined_poly = geometry.MultiPolygon(all_poly)
df.loc[df[column] == names[0], "geometry"] = gp.GeoSeries(combined_poly).values
for name in names[1:]:
df = df.loc[df[column] != name]
return df
land = [
"GIC",
"NEC",
"CNA",
"ENA",
"NWN",
"WNA",
"NCA",
"SCA",
"CAR",
"NWS",
"SAM",
"SSA",
"SWS",
"SES",
"NSA",
"NES",
"NEU",
"CEU",
"EEU",
"MED",
"WAF",
"SAH",
"NEAF",
"CEAF",
"SWAF",
"SEAF",
"CAF",
"RAR",
"RFE",
"ESB",
"WSB",
"WCA",
"TIB",
"EAS",
"ARP",
"SAS",
"SEA",
"NAU",
"CAU",
"SAU",
"NZ",
"EAN",
"WAN",
]
ocean = [
"ARO",
"SPO",
"EPO",
"NPO",
"SAO",
"EAO",
"NAO",
"EIO",
"SIO",
"ARS",
"BOB",
"SOO",
]
class ar6_pre_revisions_cls:
"""docstring for ar6"""
def __init__(self):
self.__df = None
self.__df_combined = None
self._all = None
self._land = None
self._ocean = None
self._separate_pacific = None
self._name = "pre-revision version of 'AR6 reference regions'"
self._source = "Iturbide et al., 2020 (Earth Syst. Sci. Data)"
@property
def _df(self):
if self.__df is None:
self.__df = read_remote_shapefile("IPCC-WGI-reference-regions-v1.zip")
return self.__df
@property
def _df_combined(self):
if self.__df_combined is None:
_df_combined = self._df.copy()
_df_combined = _combine_to_multipolygon(_df_combined, "V3", "SPO", "SPO*")
_df_combined = _combine_to_multipolygon(_df_combined, "V3", "EPO", "EPO*")
_df_combined = _combine_to_multipolygon(_df_combined, "V3", "NPO", "NPO*")
# make sure the index goes from 0 to n - 1
_df_combined = _df_combined.reset_index().drop("index", axis=1)
self.__df_combined = _df_combined
return self.__df_combined
@property
def all(self):
if self._all is None:
self._all = from_geopandas(
self._df_combined,
names="V2",
abbrevs="V3",
name=self._name,
source=self._source,
)
return self._all
@property
def land(self):
if self._land is None:
r = self.all[land]
r.name = self._name + " (land only)"
self._land = r
return self._land
@property
def ocean(self):
if self._ocean is None:
r = self.all[ocean]
r.name = self._name + " (ocean only)"
self._ocean = r
return self._ocean
@property
def separate_pacific(self):
if self._separate_pacific is None:
# need to fix the duplicates
df = self._df.copy()
df["V2"] = _enumerate_duplicates(df["V2"])
self._separate_pacific = from_geopandas(
df,
names="V2",
abbrevs="V3",
name=self._name + "(separate Pacific regions)",
source=self._source,
)
return self._separate_pacific
def __repr__(self): # pragma: no cover
return REPR
_ar6_pre_revisions = ar6_pre_revisions_cls()
| mit | 2,731,936,561,561,596,000 | 21.327014 | 86 | 0.544258 | false | 3.421206 | false | false | false |
emmanuel-santos/GEM | trucoGemSite/truco/views.py | 1 | 4850 | from django.shortcuts import render
from django.contrib.auth.models import User
from django.contrib.auth import logout
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
from django.http import HttpResponseRedirect, HttpResponse
from truco.forms import *
from truco.models import Partida, Jugador, Carta
from django.contrib.auth.decorators import login_required
from django.contrib.auth.views import login
from django.db.models import F, Count
def vista_principal(request):
if request.user.is_authenticated():
return hall(request)
else:
return login(request,template_name='login.html')
@login_required
def salir_partida(request, ident):
try:
partida = Partida.objects.get(id = ident)
except Partida.DoesNotExist:
return HttpResponseRedirect('/')
jugador = partida.jugador(request.user)
equipo = jugador.equipo
partida.equipos.exclude(id=equipo.id).update(puntos_partida=30)
partida.terminada = True
partida.save()
return HttpResponseRedirect('/')
def new_user(request):
if request.method == 'POST':
form = UserCreationForm(request.POST)
if form.is_valid():
user = User.objects.create_user(form.cleaned_data['username'], '[email protected]', form.cleaned_data['password1'])
user.save()
return HttpResponseRedirect("volver")
else:
form = UserCreationForm()
return render(request, 'form.html', {'form':form})
def volver(request):
if request.method == 'POST':
return HttpResponseRedirect("/")
else:
return render(request, 'volver.html')
@login_required
def sala_input(request, ident, input):
try:
partida = Partida.objects.get(id = ident)
except Partida.DoesNotExist:
return HttpResponseRedirect('/')
jugador = partida.jugador(request.user)
partida.procesar_entrada(jugador,input)
return HttpResponseRedirect('/sala/'+ str(ident))
@login_required
def partida_refresh(request, ident):
return sala(request, ident, True)
@login_required
def sala(request, ident, refresh=False):
try:
partida = Partida.objects.get(id = ident)
except Partida.DoesNotExist:
return HttpResponseRedirect('/')
jugador = partida.jugador(request.user)
# Si hay slots vacios, el usuario los ocupa.
if jugador == None:
if not partida.esta_llena():
partida.sumar_jugador(request.user)
jugador = partida.jugador(request.user)
# Elaborado de la respuesta
resp = partida.mostrar_partida(jugador)
# Form para mentir
if request.method == 'POST':
form = MentirForm(request.POST)
if form.is_valid():
jugador.puntos_cantados = form.cleaned_data['Puntos']
jugador.save()
partida.ultima_ronda.accion(jugador, 'mentir')
return HttpResponseRedirect("/sala/" + str(ident))
else:
form = MentirForm()
resp.update({'form':form})
# Elegir template
template = resp["template"] if refresh else "sala.html"
return render(request, template, resp)
@login_required
def nueva_sala(request):
if request.method == 'POST':
form = NuevaSalaForm(request.POST)
if form.is_valid():
sala = Partida()
sala.nombre=form.cleaned_data['Nombre']
sala.puntos_max=form.cleaned_data['Puntos']
sala.cantidad_jugadores=form.cleaned_data['num_jug']
sala.save()
return HttpResponseRedirect("sala/" + str(sala.id))
else:
form = NuevaSalaForm()
return render(request, 'form.html', {'form':form})
@login_required
def hall(request):
resp = {
'hay_salas' : Partida.objects.all().count() != 0,
'lista_salas' : Partida.objects.filter(terminada=False),
}
return render(request, 'hall.html', resp)
@login_required
def hall_select(request, categoria):
categorias = {
'0' : Partida.objects.filter(terminada=False),
'1' : Partida.objects.annotate(Count('jugadores')).filter(jugadores__count__lt=2),
'2' : Partida.objects.all(),
}
return render(request, 'hall_content.html', {'lista_salas':categorias[categoria]})
@login_required
def user(request,id):
partidas = Partida.objects.annotate(Count('jugadores')).annotate(Count('rondas')).filter(jugadores__user__id=id).exclude(rondas__count=0)
stats = {
'totales' : partidas.count(),
'ganadas' : Jugador.objects.filter(equipo__puntos_partida__gte=F('partida__puntos_max'),user__id=id).count(),
'jugando' : partidas.filter(terminada=False).count(),
'partidas' : partidas,
'pageuser' : User.objects.get(id=id),
}
stats['perdidas'] = stats['totales'] - (stats['ganadas'] + stats['jugando'])
return render(request, 'usuario.html', stats)
| mit | -8,096,884,938,365,961,000 | 32.680556 | 141 | 0.66 | false | 3.303815 | false | false | false |
leetmaa/KMCLib | python/unittest/KMCLibTest/Utilities/CheckUtilitiesTest.py | 1 | 13535 | """ Module for testing the common checking utilities. """
# Copyright (c) 2012-2014 Mikael Leetmaa
#
# This file is part of the KMCLib project distributed under the terms of the
# GNU General Public License version 3, see <http://www.gnu.org/licenses/>.
#
import unittest
import numpy
from KMCLib.Exceptions.Error import Error
# Import from the module we test.
from KMCLib.Utilities.CheckUtilities import checkCoordinateList
from KMCLib.Utilities.CheckUtilities import checkIndexWithinBounds
from KMCLib.Utilities.CheckUtilities import checkSequence
from KMCLib.Utilities.CheckUtilities import checkSequenceOf
from KMCLib.Utilities.CheckUtilities import checkSequenceOfPositiveIntegers
from KMCLib.Utilities.CheckUtilities import checkTypes
from KMCLib.Utilities.CheckUtilities import checkCellVectors
from KMCLib.Utilities.CheckUtilities import checkPositiveInteger
from KMCLib.Utilities.CheckUtilities import checkPositiveFloat
from KMCLib.Utilities.CheckUtilities import checkAndNormaliseBucketEntry
# Implement the test.
class CheckUtilitiesTest(unittest.TestCase):
""" Class for testing the checking utility functions. """
def testCheckCoordinateList(self):
""" Test the coordinate list checking function. """
# Setup some valid coordinates.
valid_coordinates = [[1.0,2.0,3.4],[3.0,3.0,3.5]]
# Make sure they pass the check.
checked_coords = checkCoordinateList(valid_coordinates)
self.assertAlmostEqual( numpy.linalg.norm(valid_coordinates-checked_coords), 0.0, 10)
# Again, with numpy.
valid_coordinates = numpy.array(valid_coordinates)
checked_coords = checkCoordinateList(valid_coordinates)
self.assertAlmostEqual( numpy.linalg.norm(valid_coordinates-checked_coords), 0.0, 10)
# Test some things that fails.
# Wrong type.
invalid_coordinates = [[1.0,1.0,1.0],[1.0,1.0,1]]
self.assertRaises(Error, lambda: checkCoordinateList(invalid_coordinates))
# Wrong type.
invalid_coordinates = "[[1.0,1.0,1.0],[1.0,1.0,1]]"
self.assertRaises(Error, lambda: checkCoordinateList(invalid_coordinates))
# Wrong size.
invalid_coordinates = [[1.0,2.0,3.4],[3.0,3.0],[3.0,3.0,3.5]]
self.assertRaises(Error, lambda: checkCoordinateList(invalid_coordinates))
invalid_coordinates = []
self.assertRaises(Error, lambda: checkCoordinateList(invalid_coordinates))
invalid_coordinates = [[1.0,2.0,3.4],[3.0,3.0,1.2],[3.0,3.0,3.5,32.3]]
self.assertRaises(Error, lambda: checkCoordinateList(invalid_coordinates))
def testCheckCellVectors(self):
""" Test the cell vector checking function. """
# The simplest possible vectors.
trial_vectors = [[1.0,0.0,0.0],[0.0,1.0,0.0],[0.0,0.0,1.0]]
numpy_vectors = numpy.array(trial_vectors)
# This must pass.
checked_vectors = checkCellVectors(trial_vectors)
self.assertAlmostEqual(numpy.linalg.norm(checked_vectors - numpy_vectors), 0.0, 10)
# This should also pass.
checked_vectors = checkCellVectors(numpy_vectors)
self.assertAlmostEqual(numpy.linalg.norm(checked_vectors - numpy_vectors), 0.0, 10)
# This should fail because of wrong format / shape.
trial_vectors = [[1.0,0.0,0.0],[0.0,1.0,0.0,0.0],[0.0,1.0]]
self.assertRaises(Error, lambda: checkCellVectors(trial_vectors))
# As well as this.
trial_vectors = [1.0,0.0,0.0,0.0,1.0]
self.assertRaises(Error, lambda: checkCellVectors(trial_vectors))
# This should also fail, because of wrong shape.
trial_vectors = numpy.array([[1.0,0.0,0.0],[0.0,1.0,0.0,0.0],[0.0,1.0]])
self.assertRaises(Error, lambda: checkCellVectors(trial_vectors))
# This should fail because of wrong type.
trial_vectors = "ABC"
self.assertRaises(Error, lambda: checkCellVectors(trial_vectors))
trial_vectors = numpy.array([[1,0,0],[0,1,0],[0,0,1]])
self.assertRaises(Error, lambda: checkCellVectors(trial_vectors))
# These should fail because of linear dependencies.
trial_vectors = [[1.0,0.0,0.0],[0.0,1.0,2.0],[0.5,0.5,1.000001]]
self.assertRaises(Error, lambda: checkCellVectors(trial_vectors))
trial_vectors = [[1.0,0.0,0.0],[0.5,0.5,1.0],[0.0,1.0,2.0]]
self.assertRaises(Error, lambda: checkCellVectors(trial_vectors))
def testCheckIndexWithinBounds(self):
""" Test the index within bounds checking function. """
# Shoud pass.
index = 0
list = [0,12,'a',21.2]
checked_index = checkIndexWithinBounds(index, list)
self.assertEqual(checked_index, index)
# This should also pass.
index = 4
list = "ABCDE"
checked_index = checkIndexWithinBounds(index, list)
self.assertEqual(checked_index, index)
# And this.
index = 1
list = numpy.array([[12.0,1.3],[1.,4.3]])
checked_index = checkIndexWithinBounds(index, list)
self.assertEqual(checked_index, index)
# Should fail - index not within bounds.
index = -1
self.assertRaises(Error, lambda: checkIndexWithinBounds(index, list))
index = 2
self.assertRaises(Error, lambda: checkIndexWithinBounds(index, list))
# Catch and check the error.
msg = "Custom Error msg."
try:
checkIndexWithinBounds(index, list, msg)
except Error as e:
error_msg = str(e)
self.assertEqual(error_msg, msg)
def testCheckSequence(self):
""" Test that the sequence checking works. """
# This is a sequence.
sequence = [1,2,3,'a',12.3, "ABC"]
checked_sequence = checkSequence(sequence)
self.assertEqual(checked_sequence, sequence)
# This also.
sequence = numpy.array([[12.0,1.3],[1.,4.3]])
checked_sequence = checkSequence(sequence)
self.assertAlmostEqual( numpy.linalg.norm(checked_sequence-sequence), 0.0, 10)
# And these.
sequence = "A"
checked_sequence = checkSequence(sequence)
self.assertEqual(checked_sequence, sequence)
sequence = []
checked_sequence = checkSequence(sequence)
self.assertEqual(checked_sequence, sequence)
# But this is not.
sequence = 1
self.assertRaises(Error, lambda: checkSequence(sequence))
def testCheckSequenceOfPositiveIntegers(self):
""" Test that the positive integer sequence checking works. """
# This is a valid.
sequence = [1,2,3,12]
checked_sequence = checkSequenceOfPositiveIntegers(sequence)
self.assertEqual(checked_sequence, sequence)
# This is not.
sequence = numpy.array([[1,1],[1,4]])
self.assertRaises(Error, lambda: checkSequenceOfPositiveIntegers(sequence))
# This is not.
sequence = [1,2,-4]
self.assertRaises(Error, lambda: checkSequenceOfPositiveIntegers(sequence))
# And this is.
sequence = numpy.array([1,1,1,4])
checked_sequence = checkSequenceOfPositiveIntegers(sequence)
self.assertAlmostEqual( numpy.linalg.norm(checked_sequence-sequence), 0.0, 10)
# But this is not.
sequence = [1.0,2.0,0.0]
self.assertRaises(Error, lambda: checkSequenceOfPositiveIntegers(sequence))
def testCheckSequenceOf(self):
""" Test that the general sequence checking works. """
# Make a classes to check.
class Dummy:
def __init__(self):
pass
class Dummy2:
def __init__(self):
pass
# Setup a valid sequence.
sequence = [Dummy(),Dummy(), Dummy()]
checked_sequence = checkSequenceOf(sequence, Dummy, msg="Error error.")
# Check that it passes the test.
self.assertEqual(checked_sequence, sequence)
# Setup an invald sequence.
sequence = [Dummy(),Dummy2(), Dummy()]
# Check that it does not pass.
self.assertRaises(Error,
lambda : checkSequenceOf(sequence, Dummy))
# Setup a sequence containing classes (not instances)
sequence = [Dummy(), Dummy, Dummy2]
self.assertRaises(Error,
lambda : checkSequenceOf(sequence, Dummy))
sequence = [Dummy]
self.assertRaises(Error,
lambda : checkSequenceOf(sequence, Dummy))
def testCheckTypes(self):
""" Test that the types checking works. """
# This is a valid types list.
types = ['A','a', """A""", "ABC"]
size = 4
checked_types = checkTypes(types, size)
self.assertEqual(checked_types, types)
# Wrong size.
size = 3
self.assertRaises(Error, lambda: checkTypes(types,size))
# Mixed types.
types = ['A','a', """A""", 2]
self.assertRaises(Error, lambda: checkTypes(types,size))
# Not a list.
types = "ABCDEfgH"
self.assertRaises(Error, lambda: checkTypes(types,8))
def testCheckPositiveInteger(self):
""" Test that the positive integer checking works. """
# Test pass.
integer0 = checkPositiveInteger(21, 1234, "integer0")
self.assertEqual(integer0, 21)
integer0 = checkPositiveInteger(0, 1234, "integer0")
self.assertEqual(integer0, 0)
# Test default.
integer0 = checkPositiveInteger(None, 1234, "integer0")
self.assertEqual(integer0, 1234)
# Test fail negative.
self.assertRaises( Error,
lambda: checkPositiveInteger(-1, 12, "fail") )
# Test fail wrong type.
self.assertRaises( Error,
lambda: checkPositiveInteger(1.1, 12, "fail") )
# Test fail wrong type.
self.assertRaises( Error,
lambda: checkPositiveInteger("1", 12, "fail") )
def testCheckPositiveFloat(self):
""" Test that the positive float checking works. """
# Test pass.
float0 = checkPositiveFloat(21.0, 1.234, "float0")
self.assertEqual(float0, 21.0)
float0 = checkPositiveFloat(0.0, 1.234, "float0")
self.assertEqual(float0, 0.0)
# Test default.
float0 = checkPositiveFloat(None, 1.234, "float0")
self.assertEqual(float0, 1.234)
# Test fail negative.
self.assertRaises( Error,
lambda: checkPositiveFloat(-1.0, 1.2, "fail") )
# Test fail wrong type.
self.assertRaises( Error,
lambda: checkPositiveFloat(1, 1.2, "fail") )
# Test fail wrong type.
self.assertRaises( Error,
lambda: checkPositiveFloat("1.1", 1.2, "fail") )
def testCheckAndNormaliseBucketEntry(self):
""" Check normalization of types information in the bucket format. """
# One A.
t = checkAndNormaliseBucketEntry("A")
self.assertEqual(t, [(1, "A")])
t = checkAndNormaliseBucketEntry(["A"])
self.assertEqual(t, [(1, "A")])
t = checkAndNormaliseBucketEntry([(1, "A")])
self.assertEqual(t, [(1, "A")])
# Two A.
t = checkAndNormaliseBucketEntry(["A", "A"])
self.assertEqual(t, [(2, "A")])
t = checkAndNormaliseBucketEntry((2, "A"))
self.assertEqual(t, [(2, "A")])
t = checkAndNormaliseBucketEntry([(2, "A")])
self.assertEqual(t, [(2, "A")])
t = checkAndNormaliseBucketEntry(["A", (1, "A")])
self.assertEqual(t, [(2, "A")])
t = checkAndNormaliseBucketEntry([(1, "A"), (1, "A")])
self.assertEqual(t, [(2, "A")])
# Three A.
t = checkAndNormaliseBucketEntry(["A", "A", "A"])
self.assertEqual(t, [(3, "A")])
t = checkAndNormaliseBucketEntry((3, "A"))
self.assertEqual(t, [(3, "A")])
t = checkAndNormaliseBucketEntry([(3, "A")])
self.assertEqual(t, [(3, "A")])
t = checkAndNormaliseBucketEntry(["A", (2, "A")])
self.assertEqual(t, [(3, "A")])
t = checkAndNormaliseBucketEntry(["A", (1, "A"), "A"])
self.assertEqual(t, [(3, "A")])
t = checkAndNormaliseBucketEntry([(2, "A"), (1, "A")])
self.assertEqual(t, [(3, "A")])
# Three A, four B.
t = checkAndNormaliseBucketEntry(["B", "B", "A", "A", "A", "B", "B"])
self.assertEqual(t, [(4, "B"), (3, "A")])
t = checkAndNormaliseBucketEntry(["A", "B", "B", "A", "A", "B", "B"])
self.assertEqual(t, [(3, "A"), (4, "B")])
t = checkAndNormaliseBucketEntry([(2, "B"), (3, "A"), "B", (1, "B")])
self.assertEqual(t, [(4, "B"), (3, "A")])
# Three A, four B, five C.
t = checkAndNormaliseBucketEntry(["B", "B", (2, "C"), "A", "A", "A", "B", "B", (2, "C"), "C"])
self.assertEqual(t, [(4, "B"), (5, "C"), (3, "A")])
# Wrong format.
self.assertRaises( Error, lambda: checkAndNormaliseBucketEntry(["B", "B", (2, "C", "C")]) )
self.assertRaises( Error, lambda: checkAndNormaliseBucketEntry(["B", 3, (7, "C")]) )
self.assertRaises( Error, lambda: checkAndNormaliseBucketEntry((7, "C", "C")) )
self.assertRaises( Error, lambda: checkAndNormaliseBucketEntry(("C", "C")) )
self.assertRaises( Error, lambda: checkAndNormaliseBucketEntry((7, 7)) )
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -2,279,521,522,377,841,000 | 36.389503 | 102 | 0.606206 | false | 3.715344 | true | false | false |
kdyq007/cmdb-api | core/ci_relation.py | 1 | 2400 | # -*- coding:utf-8 -*-
from flask import Blueprint
from flask import jsonify
from flask import request
from lib.ci import CIRelationManager
from lib.utils import get_page
from lib.utils import get_per_page
from lib.auth import auth_with_key
cirelation = Blueprint("cirelation", __name__)
@cirelation.route("/types", methods=["GET"])
def get_types():
manager = CIRelationManager()
return jsonify(relation_types=manager.relation_types)
@cirelation.route("/<int:first_ci>/second_cis", methods=["GET"])
def get_second_cis_by_first_ci(first_ci=None):
page = get_page(request.values.get("page", 1))
count = get_per_page(request.values.get("count"))
relation_type = request.values.get("relation_type", "contain")
manager = CIRelationManager()
numfound, total, second_cis = manager.get_second_cis(
first_ci, page=page, per_page=count, relation_type=relation_type)
return jsonify(numfound=numfound, total=total,
page=page, second_cis=second_cis)
@cirelation.route("/<int:second_ci>/first_cis", methods=["GET"])
def get_first_cis_by_second_ci(second_ci=None):
page = get_page(request.values.get("page", 1))
count = get_per_page(request.values.get("count"))
relation_type = request.values.get("relation_type", "contain")
manager = CIRelationManager()
numfound, total, first_cis = manager.get_first_cis(
second_ci, per_page=count, page=page, relation_type=relation_type)
return jsonify(numfound=numfound, total=total,
page=page, first_cis=first_cis)
@cirelation.route("/<int:first_ci>/<int:second_ci>", methods=["POST"])
@auth_with_key
def create_ci_relation(first_ci=None, second_ci=None):
relation_type = request.values.get("relation_type", "contain")
manager = CIRelationManager()
res = manager.add(first_ci, second_ci, relation_type=relation_type)
return jsonify(cr_id=res)
@cirelation.route("/<int:cr_id>", methods=["DELETE"])
@auth_with_key
def delete_ci_relation(cr_id=None):
manager = CIRelationManager()
manager.delete(cr_id)
return jsonify(message="CIType Relation is deleted")
@cirelation.route("/<int:first_ci>/<int:second_ci>", methods=["DELETE"])
@auth_with_key
def delete_ci_relation_2(first_ci, second_ci):
manager = CIRelationManager()
manager.delete_2(first_ci, second_ci)
return jsonify(message="CIType Relation is deleted") | gpl-2.0 | -2,730,147,931,960,538,000 | 33.3 | 74 | 0.6975 | false | 3.230148 | false | false | false |
JianboTang/modified_GroundHog | fork_process/dataPreprocess/result_analysis/analysis_2.py | 1 | 3000 | import numpy
import pickle
readcmnt = open('../used/test/cmnt_inside.txt','r')
readtran = open('../used/test/cmnt_outside.txt','r');
def preprocess(line):
lline = list(line.decode("utf-8"));
lline = [x for x in lline if x != u' ']
del lline[-1]
return lline
def static(dictionary,lline):
for i in xrange(len(lline) - 1):
if lline[i] in dictionary:
if lline[i + 1] in dictionary[lline[i]]:
dictionary[lline[i]][lline[i + 1]] += 1
else:
dictionary[lline[i]][lline[i + 1]] = 1;
else:
dictionary[lline[i]] = {}
dictionary[lline[i]][lline[i + 1]] = 1;
return dictionary,len(lline)
def fileStatic(fileHandle,count):
statDict = {}
number = 0;
i = 0;
while i < count:
line = fileHandle.readline();
if not line:
print "touch the end of file"
break
statDict,temp = static(statDict,preprocess(line))
number += temp
i += 1
print "total number is : ",number
return statDict
def extractDict(dict1,dict2):
common = [];
dict_x = []
dict_y = []
for x in dict1:
for y in dict1[x]:
if x in dict2 and y in dict2[x]:
if x not in dict_x:
dict_x.append(x)
if y not in dict_y:
dict_y.append(y)
common.append([x,y])
matrix1 = numpy.zeros((len(dict_x),len(dict_y)));
matrix2 = numpy.zeros((len(dict_x),len(dict_y)));
for i,x in enumerate(dict_x):
for j,y in enumerate(dict_y):
if x in dict1 and y in dict1[x]:
matrix1[i,j] = dict1[x][y];
if x in dict1 and y in dict2[x]:
matrix2[i,j] = dict2[x][y];
return matrix1,matrix2
def similarityMatrix(matrix1,matrix2):
similar = numpy.zeros(matrix1.shape[0])
for i in xrange(matrix1.shape[0]):
temp = numpy.zeros(1);
temp1 = numpy.zeros(1);
temp2 = numpy.zeros(1);
for j in xrange(matrix1.shape[1]):
temp += matrix1[i,j] * matrix2[i,j];
temp1 += matrix1[i,j] ** 2;
temp2 += matrix2[i,j] ** 2;
similar[i] = temp / (numpy.sqrt(temp1) * numpy.sqrt(temp2));
return similar
def main(count):
cmnt_dict = fileStatic(readcmnt,count);
tran_dict = fileStatic(readtran,count);
matrix1,matrix2 = extractDict(cmnt_dict,tran_dict);
# writeMatrix = open('matrix.pkl','w');
# pickle.dump(matrix1,writeMatrix);
# pickle.dump(matrix2,writeMatrix);
# writeMatrix.close();
# readMatrix = open('matrix.pkl','r');
# matrix1 = pickle.load(readMatrix)
# matrix2 = pickle.load(readMatrix);
similar = similarityMatrix(matrix1,matrix2);
print sum(matrix1)
print sum(matrix2)
print float(sum(similar >= 0.8)) / float(len(similar))
print float(sum(similar >= 0.5)) / float(len(similar))
if __name__ == '__main__':
main(1000000);
| bsd-3-clause | -4,999,212,649,414,244,000 | 31.967033 | 68 | 0.553 | false | 3.239741 | false | false | false |
ddw/python-tdl | examples/life.py | 1 | 5907 | #!/usr/bin/env python
import random
import time
import tdl
WIDTH = 80
HEIGHT = 40
class LifeBoard():
def __init__(self, width, height):
self.width = width
self.height = height
self.live_cells = set()
self.wrap = True
def set(self, x, y, value):
if value:
self.live_cells.add((x, y))
else:
self.live_cells.discard((x, y))
def set_batch(self, x, y, batch):
for y_, line in enumerate(batch):
for x_, char in enumerate(line):
self.set(x + x_, y + y_, char != ' ')
def get(self, x, y):
if(self.wrap is False
and not (0 <= x < self.width and 0 <= y < self.height)):
return False
return (x % self.width, y % self.height) in self.live_cells
def clear(self):
self.live_cells.clear()
def toggle(self, x, y):
self.live_cells.symmetric_difference_update([(x, y)])
def wrap_edges(self):
for x in range(-1, self.width + 1):
self.set(x, -1, self.get(x, -1))
self.set(x, self.height, self.get(x, self.height))
for y in range(self.height):
self.set(-1, y, self.get(-1, y))
self.set(self.width, y, self.get(self.width, y))
def get_neighbours(self, x, y):
return len(self.live_cells & {(x - 1, y - 1), (x, y - 1),
(x + 1,y - 1), (x + 1, y),
(x + 1, y + 1), (x, y + 1),
(x - 1, y + 1), (x - 1, y)})
def rule(self, is_alive, neighbours):
"""
1. Any live cell with fewer than two live neighbours dies, as if caused
by under-population.
2. Any live cell with two or three live neighbours lives on to the next
generation.
3. Any live cell with more than three live neighbours dies, as if by
overcrowding.
4. Any dead cell with exactly three live neighbours becomes a live
cell, as if by reproduction.
"""
if is_alive:
return 2 <= neighbours <= 3
else:
return neighbours == 3
def step(self):
self.wrap_edges()
next_generation = set()
for x in range(self.width):
for y in range(self.height):
if self.rule(self.get(x, y), self.get_neighbours(x, y)):
next_generation.add((x, y))
self.live_cells = next_generation
def main():
console = tdl.init(WIDTH, HEIGHT)
board = LifeBoard(WIDTH, HEIGHT - 1)
# The R-pentomino
#board.set_batch(WIDTH // 2 - 2,HEIGHT // 2 - 2,
# [' **',
# '** ',
# ' * '])
# Diehard
#board.set_batch(WIDTH // 2 - 5,HEIGHT // 2 - 2,
# [' * ',
# '** ',
# ' * ***'])
# Gosper glider gun
board.set_batch(1, 1,
[' ',
' * ',
' * * ',
' ** ** **',
' * * ** **',
'** * * ** ',
'** * * ** * * ',
' * * * ',
' * * ',
' ** '])
play = False
redraw = True
mouse_drawing = None
mouse_x = -1
mouse_y = -1
while True:
for event in tdl.event.get():
if event.type == 'QUIT':
return
elif event.type == 'KEYDOWN':
if event.key == 'SPACE':
play = not play
redraw = True
elif event.char.upper() == 'S':
board.step()
redraw = True
elif event.char.upper() == 'C':
board.clear()
redraw = True
elif event.char.upper() == 'W':
board.wrap = not board.wrap
redraw = True
elif event.type == 'MOUSEDOWN':
x, y, = event.cell
board.toggle(x, y)
mouse_drawing = event.cell
redraw = True
elif event.type == 'MOUSEUP':
mouse_drawing = None
elif event.type == 'MOUSEMOTION':
if(mouse_drawing and mouse_drawing != event.cell):
x, y = mouse_drawing = event.cell
board.toggle(x, y)
mouse_x, mouse_y = event.cell
redraw = True
if play and mouse_drawing is None:
board.step()
redraw = True
if redraw:
redraw = False
console.clear()
for x, y in board.live_cells:
console.draw_char(x, y, '*')
#console.draw_rect(0, -1, None, None, None, bg=(64, 64, 80))
console.draw_rect(0, -1, None, None, None, bg=(64, 64, 80))
console.draw_str(0, -1, "Mouse:Toggle Cells, Space:%5s, [S]tep, [C]lear, [W]rap Turn %s" % (['Play', 'Pause'][play], ['On', 'Off'][board.wrap]), None, None)
if (mouse_x, mouse_y) in console:
console.draw_char(mouse_x, mouse_y,
None, (0, 0, 0), (255, 255, 255))
else:
time.sleep(0.01)
tdl.flush()
tdl.set_title("Conway's Game of Life - %i FPS" % tdl.get_fps())
if __name__ == '__main__':
main()
| bsd-2-clause | 8,701,432,477,316,274,000 | 34.371257 | 168 | 0.402912 | false | 3.951171 | false | false | false |
vinay-qa/vinayit-android-server-apk | py/test/selenium/webdriver/common/proxy_tests.py | 1 | 1561 | #!/usr/bin/python
# Copyright 2012 Software Freedom Conservancy.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS.
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from selenium.webdriver.common.proxy import Proxy
class ProxyTests(unittest.TestCase):
def testCanAddToDesiredCapabilities(self):
desired_capabilities = {}
proxy = Proxy()
proxy.http_proxy = 'some.url:1234'
proxy.ftp_proxy = 'ftp.proxy:1234'
proxy.no_proxy = 'localhost, foo.localhost'
proxy.sslProxy = 'ssl.proxy:1234'
proxy.autodetect = 'True'
proxy.add_to_capabilities(desired_capabilities)
expected_capabilities = {
'proxy': {
'proxyType': 'MANUAL',
'httpProxy': 'some.url:1234',
'ftpProxy': 'ftp.proxy:1234',
'noProxy': 'localhost, foo.localhost',
'sslProxy': 'ssl.proxy:1234',
'autodetect': 'True'
}
}
print 'descap', desired_capabilities
self.assertEqual(expected_capabilities, desired_capabilities)
| apache-2.0 | 6,617,160,217,035,852,000 | 32.934783 | 74 | 0.650865 | false | 4.107895 | false | false | false |
shiina/invariant-object-recognition | gabor.py | 1 | 4221 | import numpy as np
import matplotlib.pyplot as pl
import Image
import scipy.signal as sg
#some variable initializations
#resolution of gabor filter
resolution = 1.
#size of gabor filter
gsize = 30
#Number of gabor filter orientations with cosine in the gabor bank
N_Greal = 8
#Number of gabor filter orientations with sine in the gabor bank
N_Gimag = 0
#number of different wave vectors in the gabor bank
N_Size = 8
#total number of gabor filters
N_Gabor = N_Greal*N_Size+N_Gimag*N_Size
# return 2D Gabor Filter with cosine. Uses multivariate Gaussian with standard deviations "sigmax" and "sigmay" and has a mean of 0. Cosine has wave vector "k", phase "phi and is rotated around angle "theta". Filter has "size" as size with resolution "res".
def Gabor_real(size, sigmax, sigmay, k, phi, theta, res):
x,y = np.mgrid[-size/2:size/2:res,-size/2:size/2:res]
xrot = x*np.cos(theta) + y*np.sin(theta)
return (1/(2.*np.pi*sigmax*sigmay))*np.exp(-(x**2/(2.0*sigmax**2))-(y**2/(2.0*sigmay**2)))*np.cos((k*xrot)-phi)
# return 2D Gabor Filter with sine. Uses multivariate Gaussian with standard deviations "sigmax" and "sigmay" and has a mean of 0. Sine has wave vector "k", phase "phi and is rotated around angle "theta". Filter has "size" as size with resolution "res".
def Gabor_imag(size, sigmax, sigmay, k, phi, theta, res):
# return 2D Gabor Filter
x,y = np.mgrid[-size/2:size/2:res,-size/2:size/2:res]
xrot = x*np.cos(theta) + y*np.sin(theta)
return (1/(2.*np.pi*sigmax*sigmay))*np.exp(-(x**2/(2.0*sigmax**2))-(y**2/(2.0*sigmay**2)))*np.sin((k*xrot)-phi)
# return gabor bank of "n_real" cosine gabor filters and "n_imag" sine gabor filters with "n_size" wave vektors and size "size" and resolution "res". returns array of gabor filters with shape (N_Gabor,int(size/res),int(size/res) such that gabor_bank[i] is the i-th gabor filter. gabor_bank[0:nsize*n_real] contains the real gabor filters where gabor_bank[0:n_real] contains n_real differently sized filters of the same orientation and so on. gabor_bank[nsize*n_real:nsize*(n_real+n_imag)] contains the imaginary gabor filters.
def Gabor_Bank(n_real, n_imag, n_size, size, res):
#total number of gabor filters in the gabor bank
N_Gabor = n_real*n_size+n_imag*n_size
gabor_bank = np.zeros((N_Gabor,int(size/res),int(size/res)))
for i in range(n_real):
for j in range(n_size):
gabor_bank[i*n_size+j] = Gabor_real(size,j/4.+1/2.,j/4.+1/2.,n_size/2.+1-j/2.,0,i*np.pi/n_real,res)
for i in range(n_imag):
for j in range(n_size):
gabor_bank[i*n_size+j+n_real*n_size] = Gabor_imag(size,j/4.+1/4.,j/4.+1/4.,n_size/2.+1-j/2.,0,i*2*np.pi/n_imag,res)
return gabor_bank
#nice gabor filter plot function for the "N"-th gabor filter. for my 4 different sizes though.
def Gabor_Plot(gabor_bank,N):
f,ar = pl.subplots(2,2)
ar[0,0].imshow(gabor_bank[N+0])
ar[0,1].imshow(gabor_bank[N+1])
ar[1,0].imshow(gabor_bank[N+2])
ar[1,1].imshow(gabor_bank[N+3])
f.show()
#reads png image with name "image_name". returns a 2D numpy array
def Read_Image(img_name):
img = Image.open(img_name).convert('LA')
img = np.array(img)
#img = img[:,:,0]
#img = np.dot(img[:,:,:3], [0.299, 0.587, 0.144])
return img
#plots image after reading. also plots convolved image if given cimg[i] as argument
def Plot_Image(img):
pl.figure()
pl.imshow(img,cmap='gray')
pl.show()
#convolve data
def Convolve_Data(img,gabor_bank):
cimg = np.zeros((gabor_bank.shape[0],gabor_bank.shape[1]+img.shape[0]-1,gabor_bank.shape[2]+img.shape[1]-1))
for i in range(gabor_bank.shape[0]):
cimg[i]=sg.convolve2d(img, gabor_bank[i])
#return status of convolution (since it is soo slow)
print N_Gabor, i
return cimg
#write "data" into "filename". checks data after writing with assertion.
def Write_Data(data,filename):
with file(filename, 'w') as outfile:
for i in range(data.shape[0]):
np.savetxt(outfile, data[i])
new_data = np.loadtxt(filename)
new_data = new_data.reshape((data.shape[0],data.shape[1],data.shape[2]))
assert np.all(new_data == data)
def Read_Img_Database():
for i in range(1,101):
for j in range(356):
filename="coil-100/obj"+str(i)+"__"+str(j)+".png"
img=Read_Image('coil-100/obj1__100.png')
Plot_Image(img)
| lgpl-3.0 | 2,869,530,535,805,990,400 | 43.904255 | 526 | 0.695096 | false | 2.485866 | false | false | false |
googleapis/googleapis-gen | google/cloud/notebooks/v1beta1/notebooks-v1beta1-py/google/cloud/notebooks_v1beta1/services/notebook_service/pagers.py | 1 | 10947 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional
from google.cloud.notebooks_v1beta1.types import environment
from google.cloud.notebooks_v1beta1.types import instance
from google.cloud.notebooks_v1beta1.types import service
class ListInstancesPager:
"""A pager for iterating through ``list_instances`` requests.
This class thinly wraps an initial
:class:`google.cloud.notebooks_v1beta1.types.ListInstancesResponse` object, and
provides an ``__iter__`` method to iterate through its
``instances`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListInstances`` requests and continue to iterate
through the ``instances`` field on the
corresponding responses.
All the usual :class:`google.cloud.notebooks_v1beta1.types.ListInstancesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., service.ListInstancesResponse],
request: service.ListInstancesRequest,
response: service.ListInstancesResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.notebooks_v1beta1.types.ListInstancesRequest):
The initial request object.
response (google.cloud.notebooks_v1beta1.types.ListInstancesResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = service.ListInstancesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[service.ListInstancesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[instance.Instance]:
for page in self.pages:
yield from page.instances
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListInstancesAsyncPager:
"""A pager for iterating through ``list_instances`` requests.
This class thinly wraps an initial
:class:`google.cloud.notebooks_v1beta1.types.ListInstancesResponse` object, and
provides an ``__aiter__`` method to iterate through its
``instances`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListInstances`` requests and continue to iterate
through the ``instances`` field on the
corresponding responses.
All the usual :class:`google.cloud.notebooks_v1beta1.types.ListInstancesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., Awaitable[service.ListInstancesResponse]],
request: service.ListInstancesRequest,
response: service.ListInstancesResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.notebooks_v1beta1.types.ListInstancesRequest):
The initial request object.
response (google.cloud.notebooks_v1beta1.types.ListInstancesResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = service.ListInstancesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterable[service.ListInstancesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[instance.Instance]:
async def async_generator():
async for page in self.pages:
for response in page.instances:
yield response
return async_generator()
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListEnvironmentsPager:
"""A pager for iterating through ``list_environments`` requests.
This class thinly wraps an initial
:class:`google.cloud.notebooks_v1beta1.types.ListEnvironmentsResponse` object, and
provides an ``__iter__`` method to iterate through its
``environments`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListEnvironments`` requests and continue to iterate
through the ``environments`` field on the
corresponding responses.
All the usual :class:`google.cloud.notebooks_v1beta1.types.ListEnvironmentsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., service.ListEnvironmentsResponse],
request: service.ListEnvironmentsRequest,
response: service.ListEnvironmentsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.notebooks_v1beta1.types.ListEnvironmentsRequest):
The initial request object.
response (google.cloud.notebooks_v1beta1.types.ListEnvironmentsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = service.ListEnvironmentsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[service.ListEnvironmentsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[environment.Environment]:
for page in self.pages:
yield from page.environments
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListEnvironmentsAsyncPager:
"""A pager for iterating through ``list_environments`` requests.
This class thinly wraps an initial
:class:`google.cloud.notebooks_v1beta1.types.ListEnvironmentsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``environments`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListEnvironments`` requests and continue to iterate
through the ``environments`` field on the
corresponding responses.
All the usual :class:`google.cloud.notebooks_v1beta1.types.ListEnvironmentsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., Awaitable[service.ListEnvironmentsResponse]],
request: service.ListEnvironmentsRequest,
response: service.ListEnvironmentsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.notebooks_v1beta1.types.ListEnvironmentsRequest):
The initial request object.
response (google.cloud.notebooks_v1beta1.types.ListEnvironmentsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = service.ListEnvironmentsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterable[service.ListEnvironmentsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[environment.Environment]:
async def async_generator():
async for page in self.pages:
for response in page.environments:
yield response
return async_generator()
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
| apache-2.0 | 7,427,238,236,461,898,000 | 40.465909 | 95 | 0.654791 | false | 4.56125 | false | false | false |
aggrent/cab | cab/forms.py | 1 | 3329 | from django import forms
from django.contrib import admin
from haystack.forms import SearchForm
from cab.models import Language, Snippet, SnippetFlag, VERSIONS
from registration.forms import RegistrationFormUniqueEmail
from taggit_autosuggest.widgets import TagAutoSuggest
from epiceditor.widgets import AdminEpicEditorWidget
from codemirror.widgets import CodeMirrorTextarea
haskell_textarea = CodeMirrorTextarea(mode="haskell", theme="ambiance", config={ 'fixedGutter': True, 'lineNumbers': False, 'viewportMargin': 5000 })
def validate_non_whitespace_only_string(value):
"""
Additionally to requiring a non-empty string, this validator also strips
the string to treat strings with only whitespaces in them as empty.
"""
if not value or not value.strip():
raise forms.ValidationError(u'This field is required', code='required')
class SnippetForm(forms.ModelForm):
title = forms.CharField(
validators=[validate_non_whitespace_only_string])
description = forms.CharField(
validators=[validate_non_whitespace_only_string],
widget=AdminEpicEditorWidget)
code = forms.CharField(
validators=[validate_non_whitespace_only_string],
widget=haskell_textarea)
class Meta:
model = Snippet
exclude = ('author', 'bookmark_count', 'rating_score', 'version', 'language')
class SnippetFlagForm(forms.ModelForm):
class Meta:
model = SnippetFlag
fields = ('flag',)
class AdvancedSearchForm(SearchForm):
language = forms.ModelChoiceField(
queryset=Language.objects.all(), required=False)
version = forms.MultipleChoiceField(choices=VERSIONS, required=False)
minimum_pub_date = forms.DateTimeField(
widget=admin.widgets.AdminDateWidget, required=False)
minimum_bookmark_count = forms.IntegerField(required=False)
minimum_rating_score = forms.IntegerField(required=False)
def search(self):
# First, store the SearchQuerySet received from other processing.
sqs = super(AdvancedSearchForm, self).search()
if not self.is_valid():
return sqs
if self.cleaned_data['language']:
sqs = sqs.filter(language=self.cleaned_data['language'].name)
if self.cleaned_data['version']:
sqs = sqs.filter(
version__in=self.cleaned_data['version'])
if self.cleaned_data['minimum_pub_date']:
sqs = sqs.filter(
pub_date__gte=self.cleaned_data['minimum_pub_date'])
if self.cleaned_data['minimum_bookmark_count']:
sqs = sqs.filter(
bookmark_count__gte=self.cleaned_data['minimum_bookmark_count'])
if self.cleaned_data['minimum_rating_score']:
sqs = sqs.filter(
rating_score__gte=self.cleaned_data['minimum_rating_score'])
return sqs
class RegisterForm(RegistrationFormUniqueEmail):
your_name = forms.CharField(label='Your Name', required=False,
widget=forms.TextInput(attrs={
'autocomplete': 'off',
}))
def clean(self):
if self.cleaned_data.get('your_name'):
raise forms.ValidationError('Please keep the Name field blank')
return self.cleaned_data
| bsd-3-clause | -2,306,287,639,573,231,000 | 35.184783 | 149 | 0.665966 | false | 4.176913 | false | false | false |
galaxor/Nodewatcher | generator/gennyd.py | 1 | 9945 | #!/usr/bin/python
#
# nodewatcher firmware generator daemon
#
# Copyright (C) 2009 by Jernej Kos <[email protected]>
#
# First parse options (this must be done here since they contain import paths
# that must be parsed before Django models can be imported)
import sys, os, re
from optparse import OptionParser
print "============================================================================"
print " nodewatcher firmware generator daemon "
print "============================================================================"
parser = OptionParser()
parser.add_option('--path', dest = 'path', help = 'Path that contains nodewatcher "web" Python module')
parser.add_option('--settings', dest = 'settings', help = 'Django settings to use')
parser.add_option('--destination', dest = 'destination', help = 'Firmware destination directory')
options, args = parser.parse_args()
if not options.path:
print "ERROR: Path specification is required!\n"
parser.print_help()
exit(1)
elif not options.settings:
print "ERROR: Settings specification is required!\n"
parser.print_help()
exit(1)
elif not options.destination:
print "ERROR: Firmware destination directory is required!\n"
parser.print_help()
exit(1)
# Setup import paths, since we are using Django models
sys.path.append(options.path)
os.environ['DJANGO_SETTINGS_MODULE'] = options.settings
# Django stuff
from django.core.mail import send_mail
from django.utils.translation import ugettext as _
from django.template import loader, Context
from django.conf import settings
# Other stuff
from beanstalk import serverconn
from beanstalk import job
from config_generator import OpenWrtConfig, portLayouts
import logging
import hashlib
from traceback import format_exc
import pwd
from zipfile import ZipFile, ZIP_DEFLATED
from base64 import urlsafe_b64encode
from glob import glob
WORKDIR = os.getcwd()
DESTINATION = options.destination
IMAGEBUILDERS = (
"imagebuilder.atheros",
"imagebuilder.brcm24",
"imagebuilder.broadcom",
"imagebuilder.ar71xx"
)
def no_unicodes(x):
"""
Converts all unicodes to str instances.
"""
for k, v in x.iteritems():
if isinstance(v, unicode):
x[k] = v.encode('utf8')
return x
def generate_image(d):
"""
Generates an image accoording to given configuration.
"""
logging.debug(repr(d))
if d['imagebuilder'] not in IMAGEBUILDERS:
raise Exception("Invalid imagebuilder specified!")
x = OpenWrtConfig()
x.setUUID(d['uuid'])
x.setOpenwrtVersion(d['openwrt_ver'])
x.setArch(d['arch'])
x.setPortLayout(d['port_layout'])
x.setWifiIface(d['iface_wifi'], d['driver'], d['channel'])
x.setWifiAnt(d['rx_ant'], d['tx_ant'])
x.setLanIface(d['iface_lan'])
x.setNodeType("adhoc")
x.setPassword(d['root_pass'])
x.setHostname(d['hostname'])
x.setIp(d['ip'])
x.setSSID(d['ssid'])
# Add WAN interface and all subnets
if d['wan_dhcp']:
x.addInterface("wan", d['iface_wan'], init = True)
else:
x.addInterface("wan", d['iface_wan'], d['wan_ip'], d['wan_cidr'], d['wan_gw'], init = True)
for subnet in d['subnets']:
x.addSubnet(str(subnet['iface']), str(subnet['network']), subnet['cidr'], subnet['dhcp'], True)
x.setCaptivePortal(d['captive_portal'])
if d['vpn']:
x.setVpn(d['vpn_username'], d['vpn_password'], d['vpn_mac'], d['vpn_limit'])
if d['lan_wifi_bridge']:
x.enableLanWifiBridge()
if d['lan_wan_switch']:
x.switchWanToLan()
# Add optional packages
for package in d['opt_pkg']:
x.addPackage(package)
# Cleanup stuff from previous builds
os.chdir(WORKDIR)
os.system("rm -rf build/files/*")
os.system("rm -rf build/%s/bin/*" % d['imagebuilder'])
os.mkdir("build/files/etc")
x.generate("build/files/etc")
if d['only_config']:
# Just pack configuration and send it
prefix = hashlib.md5(os.urandom(32)).hexdigest()[0:16]
tempfile = os.path.join(DESTINATION, prefix + "-config.zip")
zip = ZipFile(tempfile, 'w', ZIP_DEFLATED)
os.chdir('build/files')
for root, dirs, files in os.walk("etc"):
for file in files:
zip.write(os.path.join(root, file))
zip.close()
# Generate checksum
f = open(tempfile, 'r')
checksum = hashlib.md5(f.read())
f.close()
# We can take just first 22 characters as checksums are fixed size and we can reconstruct it
filechecksum = urlsafe_b64encode(checksum.digest())[:22]
checksum = checksum.hexdigest()
result = "%s-%s-config-%s.zip" % (d['hostname'], d['router_name'], filechecksum)
destination = os.path.join(DESTINATION, result)
os.rename(tempfile, destination)
# Send an e-mail
t = loader.get_template('generator/email_config.txt')
c = Context({
'hostname' : d['hostname'],
'ip' : d['ip'],
'username' : d['vpn_username'],
'config' : result,
'checksum' : checksum,
'network' : { 'name' : settings.NETWORK_NAME,
'home' : settings.NETWORK_HOME,
'contact' : settings.NETWORK_CONTACT,
'description' : getattr(settings, 'NETWORK_DESCRIPTION', None)
},
'images_bindist_url' : getattr(settings, 'IMAGES_BINDIST_URL', None)
})
send_mail(
settings.EMAIL_SUBJECT_PREFIX + (_("Configuration for %s/%s") % (d['hostname'], d['ip'])),
t.render(c),
settings.EMAIL_IMAGE_GENERATOR_SENDER,
[d['email']],
fail_silently = False
)
else:
# Generate full image
x.build("build/%s" % d['imagebuilder'])
# Read image version
try:
f = open(glob('%s/build/%s/build_dir/target-*/root-*/etc/version' % (WORKDIR, d['imagebuilder']))[0], 'r')
version = f.read().strip()
version = re.sub(r'\W+', '_', version)
version = re.sub(r'_+', '_', version)
f.close()
except:
version = 'unknown'
# Get resulting image
files = []
for file, type in d['imagefiles']:
file = str(file)
source = "%s/build/%s/bin/%s" % (WORKDIR, d['imagebuilder'], file)
f = open(source, 'r')
checksum = hashlib.md5(f.read())
f.close()
# We can take just first 22 characters as checksums are fixed size and we can reconstruct it
filechecksum = urlsafe_b64encode(checksum.digest())[:22]
checksum = checksum.hexdigest()
ext = os.path.splitext(file)[1]
router_name = d['router_name'].replace('-', '')
result = "%s-%s-%s%s-%s%s" % (d['hostname'], router_name, version, ("-%s" % type if type else "-all"), filechecksum, ext)
destination = os.path.join(DESTINATION, result)
os.rename(source, destination)
files.append({ 'name' : result, 'checksum' : checksum })
# Send an e-mail
t = loader.get_template('generator/email.txt')
c = Context({
'hostname' : d['hostname'],
'ip' : d['ip'],
'username' : d['vpn_username'],
'files' : files,
'network' : { 'name' : settings.NETWORK_NAME,
'home' : settings.NETWORK_HOME,
'contact' : settings.NETWORK_CONTACT,
'description' : getattr(settings, 'NETWORK_DESCRIPTION', None)
},
'images_bindist_url' : getattr(settings, 'IMAGES_BINDIST_URL', None)
})
send_mail(
settings.EMAIL_SUBJECT_PREFIX + (_("Router images for %s/%s") % (d['hostname'], d['ip'])),
t.render(c),
settings.EMAIL_IMAGE_GENERATOR_SENDER,
[d['email']],
fail_silently = False
)
# Configure logger
logging.basicConfig(level = logging.DEBUG,
format = '%(asctime)s %(levelname)-8s %(message)s',
datefmt = '%a, %d %b %Y %H:%M:%S',
filename = os.path.join(WORKDIR, 'generator.log'),
filemode = 'a')
if settings.IMAGE_GENERATOR_USER:
# Change ownership for the build directory
os.system("chown -R {0}:{0} build".format(settings.IMAGE_GENERATOR_USER))
# Drop user privileges
try:
info = pwd.getpwnam(settings.IMAGE_GENERATOR_USER)
os.setgid(info.pw_gid)
os.setuid(info.pw_uid)
except:
print "ERROR: Unable to change to '{0}' user!".format(settings.IMAGE_GENERATOR_USER)
exit(1)
logging.info("nodewatcher firmware generator daemon v0.1 starting up...")
c = serverconn.ServerConn("127.0.0.1", 11300)
c.job = job.Job
c.use("generator")
logging.info("Connected to local beanstalkd instance.")
try:
while True:
j = c.reserve()
j.Finish()
try:
logging.info("Generating an image for '%s/%s'..." % (j.data['vpn_username'], j.data['ip']))
generate_image(no_unicodes(j.data))
logging.info("Image generation successful!")
except:
logging.error(format_exc())
logging.warning("Image generation has failed!")
# Send an e-mail
d = no_unicodes(j.data)
t = loader.get_template('generator/email_failed.txt')
ctx = Context({
'hostname' : d['hostname'],
'ip' : d['ip'],
'username' : d['vpn_username'],
'network' : { 'name' : settings.NETWORK_NAME,
'home' : settings.NETWORK_HOME,
'contact' : settings.NETWORK_CONTACT,
'description' : getattr(settings, 'NETWORK_DESCRIPTION', None)
},
'images_bindist_url' : getattr(settings, 'IMAGES_BINDIST_URL', None)
})
send_mail(
settings.EMAIL_SUBJECT_PREFIX + (_("Image generation failed for %s/%s") % (d['hostname'], d['ip'])),
t.render(ctx),
settings.EMAIL_IMAGE_GENERATOR_SENDER,
[d['email']],
fail_silently = False
)
except KeyboardInterrupt:
logging.info("Terminating due to user abort.")
except:
logging.error(format_exc())
logging.warning("We are going down!")
| agpl-3.0 | -8,465,503,166,224,846,000 | 31.713816 | 133 | 0.604424 | false | 3.554325 | true | false | false |
OCA/purchase-workflow | purchase_location_by_line/models/purchase.py | 1 | 2524 | # © 2016 Eficent Business and IT Consulting Services S.L.
# (<http://www.eficent.com>)
# © 2018 Hizbul Bahar <[email protected]>
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from odoo import api, fields, models
class PurchaseOrderLine(models.Model):
_inherit = 'purchase.order.line'
location_dest_id = fields.Many2one(
comodel_name='stock.location', string='Destination',
domain=[('usage', 'in', ['internal', 'transit'])])
@api.model
def _first_picking_copy_vals(self, key, lines):
"""The data to be copied to new pickings is updated with data from the
grouping key. This method is designed for extensibility, so that
other modules can store more data based on new keys."""
vals = super(PurchaseOrderLine, self)._first_picking_copy_vals(
key, lines)
for key_element in key:
if 'location_dest_id' in key_element.keys():
vals['location_dest_id'] = key_element['location_dest_id'].id
return vals
@api.model
def _get_group_keys(self, order, line, picking=False):
"""Define the key that will be used to group. The key should be
defined as a tuple of dictionaries, with each element containing a
dictionary element with the field that you want to group by. This
method is designed for extensibility, so that other modules can add
additional keys or replace them by others."""
key = super(PurchaseOrderLine, self)._get_group_keys(
order, line, picking=picking)
default_picking_location_id = line.order_id._get_destination_location()
default_picking_location = self.env['stock.location'].browse(
default_picking_location_id)
location = line.location_dest_id or default_picking_location
return key + ({'location_dest_id': location},)
@api.multi
def _create_stock_moves(self, picking):
res = super(PurchaseOrderLine, self)._create_stock_moves(picking)
for line in self:
default_picking_location_id = \
line.order_id._get_destination_location()
default_picking_location = self.env['stock.location'].browse(
default_picking_location_id)
location = line.location_dest_id or default_picking_location
if location:
line.move_ids.filtered(lambda m: m.state != 'done').write(
{'location_dest_id': location.id})
return res
| agpl-3.0 | -5,849,242,406,780,434,000 | 43.245614 | 79 | 0.639572 | false | 3.959184 | false | false | false |
Ignotus/bookclub | app.py | 1 | 2062 | from flask import Flask, render_template, redirect, url_for, request
from flask_login import LoginManager, current_user
from flaskext.markdown import Markdown
from flask_assets import Environment, Bundle
route_modules = ["auth", "blog", "progress", "home", "calendar", "books"]
for module in route_modules:
exec("from routes.%s import %s" % (module, module))
from core.config import *
from core.db import db
from core.tables import User
app = Flask(__name__)
app.config["SQLALCHEMY_DATABASE_URI"] = "postgresql://" + DB_USER + ":" + DB_PASSWORD + "@" + DB_HOST + "/" + DB
db.init_app(app)
Markdown(app)
login_manager = LoginManager()
login_manager.init_app(app)
@login_manager.user_loader
def load_user(userid):
user = User.query.get(int(userid))
if user:
return user
app.debug = DEBUG
app.secret_key = SECRET_KEY
@app.before_request
def before_request():
app.config["SERVER_NAME"] = request.host
@app.route("/")
def main():
if current_user.is_authenticated():
return redirect(url_for("home.main"))
return redirect(url_for("blog.blog_main"))
modules = [auth, blog, progress, home, calendar, books]
for module in modules:
app.register_blueprint(module)
@app.errorhandler(401)
def custom_401(error):
return render_template("unauthorized.html")
assets = Environment(app)
css = Bundle("css/blog.css", "css/style.css",
filters="cssmin", output="gen/style.min.css")
js_markdown = Bundle("js/to-markdown.js", "js/markdown.js",
filters="jsmin", output="gen/markdown.min.js")
css_tagsinput = Bundle("css/bootstrap-tagsinput.css", filters="cssmin",
output="gen/bootstrap-tagsinput.min.css")
js_tagsinput = Bundle("js/bootstrap-tagsinput.js", filters="jsmin",
output="gen/bootstrap-tagsinput.min.js")
assets.register("css_all", css)
assets.register("js_markdown", js_markdown)
assets.register("css_tagsinput", css_tagsinput)
assets.register("js_tagsinput", js_tagsinput)
if __name__ == "__main__":
app.run(port=PORT)
| mit | -5,505,833,248,113,537,000 | 26.493333 | 112 | 0.677983 | false | 3.2992 | false | false | false |
pombredanne/pywb | pywb/rewrite/url_rewriter.py | 1 | 5978 | from six.moves.urllib.parse import urljoin, urlsplit, urlunsplit
from pywb.rewrite.wburl import WbUrl
from pywb.rewrite.cookie_rewriter import get_cookie_rewriter
#=================================================================
class UrlRewriter(object):
"""
Main pywb UrlRewriter which rewrites absolute and relative urls
to be relative to the current page, as specified via a WbUrl
instance and an optional full path prefix
"""
NO_REWRITE_URI_PREFIX = ('#', 'javascript:', 'data:',
'mailto:', 'about:', 'file:', '{')
PROTOCOLS = ('http:', 'https:', 'ftp:', 'mms:', 'rtsp:', 'wais:')
REL_SCHEME = ('//', r'\/\/', r'\\/\\/')
def __init__(self, wburl, prefix='', full_prefix=None, rel_prefix=None,
root_path=None, cookie_scope=None, rewrite_opts=None):
self.wburl = wburl if isinstance(wburl, WbUrl) else WbUrl(wburl)
self.prefix = prefix
self.full_prefix = full_prefix or prefix
self.rel_prefix = rel_prefix or prefix
self.root_path = root_path or '/'
if self.full_prefix and self.full_prefix.startswith(self.PROTOCOLS):
self.prefix_scheme = self.full_prefix.split(':')[0]
else:
self.prefix_scheme = None
self.prefix_abs = self.prefix and self.prefix.startswith(self.PROTOCOLS)
self.cookie_scope = cookie_scope
self.rewrite_opts = rewrite_opts or {}
if self.rewrite_opts.get('punycode_links'):
self.wburl._do_percent_encode = False
def rewrite(self, url, mod=None):
# if special protocol, no rewriting at all
if url.startswith(self.NO_REWRITE_URI_PREFIX):
return url
if (self.prefix and
self.prefix != '/' and
url.startswith(self.prefix)):
return url
if (self.full_prefix and
self.full_prefix != self.prefix and
url.startswith(self.full_prefix)):
return url
wburl = self.wburl
is_abs = url.startswith(self.PROTOCOLS)
scheme_rel = False
if url.startswith(self.REL_SCHEME):
is_abs = True
scheme_rel = True
# if prefix starts with a scheme
#if self.prefix_scheme:
# url = self.prefix_scheme + ':' + url
#url = 'http:' + url
# optimize: join if not absolute url, otherwise just use as is
if not is_abs:
new_url = self.urljoin(wburl.url, url)
else:
new_url = url
if mod is None:
mod = wburl.mod
final_url = self.prefix + wburl.to_str(mod=mod, url=new_url)
if not is_abs and self.prefix_abs and not self.rewrite_opts.get('no_match_rel'):
parts = final_url.split('/', 3)
final_url = '/'
if len(parts) == 4:
final_url += parts[3]
# experiment for setting scheme rel url
elif scheme_rel and self.prefix_abs:
final_url = final_url.split(':', 1)[1]
return final_url
def get_new_url(self, **kwargs):
return self.prefix + self.wburl.to_str(**kwargs)
def rebase_rewriter(self, new_url):
if new_url.startswith(self.prefix):
new_url = new_url[len(self.prefix):]
elif new_url.startswith(self.rel_prefix):
new_url = new_url[len(self.rel_prefix):]
new_wburl = WbUrl(new_url)
return self._create_rebased_rewriter(new_wburl, self.prefix)
def _create_rebased_rewriter(self, new_wburl, prefix):
return UrlRewriter(new_wburl, prefix)
def get_cookie_rewriter(self, scope=None):
# collection scope overrides rule scope?
if self.cookie_scope:
scope = self.cookie_scope
cls = get_cookie_rewriter(scope)
return cls(self)
def deprefix_url(self):
return self.wburl.deprefix_url(self.full_prefix)
def __repr__(self):
return "UrlRewriter('{0}', '{1}')".format(self.wburl, self.prefix)
@staticmethod
def urljoin(orig_url, url): # pragma: no cover
new_url = urljoin(orig_url, url)
if '../' not in new_url:
return new_url
# only needed in py2 as py3 urljoin resolves '../'
parts = urlsplit(new_url)
scheme, netloc, path, query, frag = parts
path_parts = path.split('/')
i = 0
n = len(path_parts) - 1
while i < n:
if path_parts[i] == '..':
del path_parts[i]
n -= 1
if i > 0:
del path_parts[i - 1]
n -= 1
i -= 1
else:
i += 1
if path_parts == ['']:
path = '/'
else:
path = '/'.join(path_parts)
parts = (scheme, netloc, path, query, frag)
new_url = urlunsplit(parts)
return new_url
#=================================================================
class SchemeOnlyUrlRewriter(UrlRewriter):
"""
A url rewriter which ensures that any urls have the same
scheme (http or https) as the base url.
Other urls/input is unchanged.
"""
def __init__(self, *args, **kwargs):
super(SchemeOnlyUrlRewriter, self).__init__(*args, **kwargs)
self.url_scheme = self.wburl.url.split(':')[0]
if self.url_scheme == 'https':
self.opposite_scheme = 'http'
else:
self.opposite_scheme = 'https'
def rewrite(self, url, mod=None):
if url.startswith(self.opposite_scheme + '://'):
url = self.url_scheme + url[len(self.opposite_scheme):]
return url
def get_new_url(self, **kwargs):
return kwargs.get('url', self.wburl.url)
def rebase_rewriter(self, new_url):
return self
def get_cookie_rewriter(self, scope=None):
return None
def deprefix_url(self):
return self.wburl.url
| gpl-3.0 | 326,196,317,442,210,560 | 30.967914 | 88 | 0.546002 | false | 3.795556 | false | false | false |