text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
import unittest
class TestExample(unittest.TestCase):
@classmethod
def setUpClass(cls):
print("**************************************** setUpClass ****************************************")
@classmethod
def tearDownClass(cls):
print("************************************** tearDownClass ***************************************")
def setUp(self):
print("****** setUp *******")
def tearDown(self):
print("***** tearDown *****")
def _example(self):
print("This is a test example.")
| Ernestyj/PyStudy | DataScience/python/tools/test/test_example.py | Python | apache-2.0 | 577 | 0.003466 |
#!/usr/bin/env python3
"""
Check of automatic algorithm rollover scenario.
"""
import collections
import os
import shutil
import datetime
import random
import subprocess
from subprocess import check_call
from dnstest.utils import *
from dnstest.keys import Keymgr
from dnstest.test import Test
from dnstest.module import ModOnlineSign
def pregenerate_key(server, zone, alg):
class a_class_with_name:
def __init__(self, name):
self.name = name
server.gen_key(a_class_with_name("nonexistent.zone."), ksk=True, alg=alg,
addtopolicy="blahblah")
# check zone if keys are present and used for signing
def check_zone(server, zone, dnskeys, dnskey_rrsigs, cdnskeys, soa_rrsigs, msg):
qdnskeys = server.dig("example.com", "DNSKEY", bufsize=4096)
found_dnskeys = qdnskeys.count("DNSKEY")
qdnskeyrrsig = server.dig("example.com", "DNSKEY", dnssec=True, bufsize=4096)
found_rrsigs = qdnskeyrrsig.count("RRSIG")
qcdnskey = server.dig("example.com", "CDNSKEY", bufsize=4096)
found_cdnskeys = qcdnskey.count("CDNSKEY")
qsoa = server.dig("example.com", "SOA", dnssec=True, bufsize=4096)
found_soa_rrsigs = qsoa.count("RRSIG")
check_log("DNSKEYs: %d (expected %d)" % (found_dnskeys, dnskeys));
check_log("RRSIGs: %d (expected %d)" % (found_soa_rrsigs, soa_rrsigs));
check_log("DNSKEY-RRSIGs: %d (expected %d)" % (found_rrsigs, dnskey_rrsigs));
check_log("CDNSKEYs: %d (expected %d)" % (found_cdnskeys, cdnskeys));
if found_dnskeys != dnskeys:
set_err("BAD DNSKEY COUNT: " + msg)
detail_log("!DNSKEYs not published and activated as expected: " + msg)
if found_soa_rrsigs != soa_rrsigs:
set_err("BAD RRSIG COUNT: " + msg)
detail_log("!RRSIGs not published and activated as expected: " + msg)
if found_rrsigs != dnskey_rrsigs:
set_err("BAD DNSKEY RRSIG COUNT: " + msg)
detail_log("!RRSIGs not published and activated as expected: " + msg)
if found_cdnskeys != cdnskeys:
set_err("BAD CDNSKEY COUNT: " + msg)
detail_log("!CDNSKEYs not published and activated as expected: " + msg)
detail_log(SEP)
def wait_for_rrsig_count(t, server, rrtype, rrsig_count, timeout):
rtime = 0
while True:
qdnskeyrrsig = server.dig("example.com", rrtype, dnssec=True, bufsize=4096)
found_rrsigs = qdnskeyrrsig.count("RRSIG")
if found_rrsigs == rrsig_count:
break
rtime = rtime + 1
t.sleep(1)
if rtime > timeout:
break
def wait_for_dnskey_count(t, server, dnskey_count, timeout):
rtime = 0
while True:
qdnskeyrrsig = server.dig("example.com", "DNSKEY", dnssec=True, bufsize=4096)
found_dnskeys = qdnskeyrrsig.count("DNSKEY")
if found_dnskeys == dnskey_count:
break
rtime = rtime + 1
t.sleep(1)
if rtime > timeout:
break
def wait_for_cds_change(t, server, timeout):
rtime = 0
CDS1 = str(server.dig(ZONE, "CDS").resp.answer[0].to_rdataset())
while True:
CDS2 = str(server.dig(ZONE, "CDS").resp.answer[0].to_rdataset())
if CDS1 != CDS2:
break
rtime = rtime + 1
t.sleep(1)
if rtime > timeout:
break
def watch_alg_rollover(t, server, zone, before_keys, after_keys, desc, set_alg, key_len, submission_cb):
check_zone(server, zone, before_keys, 1, 1, 1, desc + ": initial keys")
z = server.zones[zone[0].name];
z.get_module("onlinesign").algorithm = set_alg
z.get_module("onlinesign").key_size = key_len
server.gen_confile()
server.reload()
wait_for_rrsig_count(t, server, "SOA", 2, 20)
check_zone(server, zone, before_keys, 1, 1, 2, desc + ": pre active")
wait_for_dnskey_count(t, server, before_keys + after_keys, 20)
check_zone(server, zone, before_keys + after_keys, 2, 1, 2, desc + ": both algorithms active")
# wait for any change in CDS records
CDS1 = str(server.dig(ZONE, "CDS").resp.answer[0].to_rdataset())
t.sleep(3)
while CDS1 == str(server.dig(ZONE, "CDS").resp.answer[0].to_rdataset()):
t.sleep(1)
check_zone(server, zone, before_keys + after_keys, 2, 1, 2, desc + ": new KSK ready")
submission_cb()
t.sleep(4)
check_zone(server, zone, before_keys + after_keys, 2, 1, 2, desc + ": both still active")
wait_for_dnskey_count(t, server, after_keys, 20)
check_zone(server, zone, after_keys, 1, 1, 2, desc + ": post active")
wait_for_rrsig_count(t, server, "SOA", 1, 20)
check_zone(server, zone, after_keys, 1, 1, 1, desc + ": old alg removed")
def watch_ksk_rollover(t, server, zone, before_keys, after_keys, total_keys, desc, set_ksk_lifetime, submission_cb):
check_zone(server, zone, before_keys, 1, 1, 1, desc + ": initial keys")
z = server.zones[zone[0].name];
orig_ksk_lifetime = z.get_module("onlinesign").ksk_life
z.get_module("onlinesign").ksk_life = set_ksk_lifetime if set_ksk_lifetime > 0 else orig_ksk_lifetime
server.gen_confile()
server.reload()
wait_for_dnskey_count(t, server, total_keys, 20)
t.sleep(3)
check_zone(server, zone, total_keys, 2, 1, 1, desc + ": published new")
z.get_module("onlinesign").ksk_life = orig_ksk_lifetime
server.gen_confile()
server.reload()
wait_for_cds_change(t, server, 20)
expect_zone_rrsigs = (2 if before_keys == 1 and after_keys > 1 else 1) # there is an exception for CSK->KZSK rollover that we have double signatures for the zone. Sorry, we don't care...
check_zone(server, zone, total_keys, 2, 1, expect_zone_rrsigs, desc + ": new KSK ready")
submission_cb()
t.sleep(4)
if before_keys < 2 or after_keys > 1:
check_zone(server, zone, total_keys, 2, 1, 1, desc + ": both still active")
# else skip the test as we have no control on KSK and ZSK retiring asynchronously
wait_for_dnskey_count(t, server, after_keys, 28)
check_zone(server, zone, after_keys, 1, 1, 1, desc + ": old key removed")
t = Test(stress=False)
ModOnlineSign.check()
parent = t.server("knot")
parent_zone = t.zone("com.", storage=".")
t.link(parent_zone, parent)
parent.dnssec(parent_zone).enable = True
child = t.server("knot")
child_zone = t.zone("example.com.", storage=".")
t.link(child_zone, child)
def cds_submission():
cds = child.dig(ZONE, "CDS")
cds_rdata = cds.resp.answer[0].to_rdataset()[0].to_text()
up = parent.update(parent_zone)
up.delete(ZONE, "DS")
up.add(ZONE, 7, "DS", cds_rdata)
up.send("NOERROR")
child.zonefile_sync = 24 * 60 * 60
child.dnssec(child_zone).ksk_sbm_check = [ parent ]
child.add_module(child_zone, ModOnlineSign("ECDSAP384SHA384", key_size="384", prop_delay=11, ksc = [ parent ],
ksci = 2, ksk_shared=True, cds_publish="always",
cds_digesttype=random.choice(["sha256", "sha384"])))
# parameters
ZONE = "example.com."
t.start()
child.zone_wait(child_zone)
cds_submission() # pass initially generated key to active state
t.sleep(4) # let the server accept the submission before forced reload
pregenerate_key(child, child_zone, "ECDSAP384SHA384")
watch_ksk_rollover(t, child, child_zone, 1, 1, 2, "CSK rollover", 22, cds_submission)
pregenerate_key(child, child_zone, "ECDSAP256SHA256")
watch_alg_rollover(t, child, child_zone, 1, 1, "CSK to CSK alg", "ECDSAP256SHA256", 256, cds_submission)
t.end()
| CZ-NIC/knot | tests-extra/tests/modules/onlinesign_rollovers/test.py | Python | gpl-3.0 | 7,474 | 0.006422 |
import numpy as np
import pytest
from numpy.testing import assert_allclose
try:
import scipy
except ImportError:
HAS_SCIPY = False
else:
HAS_SCIPY = True
import astropy.units as u
from astropy.timeseries.periodograms.lombscargle import LombScargle
from astropy.timeseries.periodograms.lombscargle._statistics import (fap_single, inv_fap_single,
METHODS)
from astropy.timeseries.periodograms.lombscargle.utils import convert_normalization, compute_chi2_ref
METHOD_KWDS = dict(bootstrap={'n_bootstraps': 20, 'random_seed': 42})
NORMALIZATIONS = ['standard', 'psd', 'log', 'model']
def make_data(N=100, period=1, theta=[10, 2, 3], dy=1, rseed=0, units=False):
"""Generate some data for testing"""
rng = np.random.RandomState(rseed)
t = 5 * period * rng.rand(N)
omega = 2 * np.pi / period
y = theta[0] + theta[1] * np.sin(omega * t) + theta[2] * np.cos(omega * t)
dy = dy * (0.5 + rng.rand(N))
y += dy * rng.randn(N)
fmax = 5
if units:
return t * u.day, y * u.mag, dy * u.mag, fmax / u.day
else:
return t, y, dy, fmax
def null_data(N=1000, dy=1, rseed=0, units=False):
"""Generate null hypothesis data"""
rng = np.random.RandomState(rseed)
t = 100 * rng.rand(N)
dy = 0.5 * dy * (1 + rng.rand(N))
y = dy * rng.randn(N)
fmax = 40
if units:
return t * u.day, y * u.mag, dy * u.mag, fmax / u.day
else:
return t, y, dy, fmax
@pytest.mark.parametrize('normalization', NORMALIZATIONS)
@pytest.mark.parametrize('with_errors', [True, False])
@pytest.mark.parametrize('units', [False, True])
def test_distribution(normalization, with_errors, units):
t, y, dy, fmax = null_data(units=units)
if not with_errors:
dy = None
ls = LombScargle(t, y, dy, normalization=normalization)
freq, power = ls.autopower(maximum_frequency=fmax)
z = np.linspace(0, power.max(), 1000)
# Test that pdf and cdf are consistent
dz = z[1] - z[0]
z_mid = z[:-1] + 0.5 * dz
pdf = ls.distribution(z_mid)
cdf = ls.distribution(z, cumulative=True)
if isinstance(dz, u.Quantity):
dz = dz.value
assert_allclose(pdf, np.diff(cdf) / dz, rtol=1E-5, atol=1E-8)
# psd normalization without specified errors produces bad results
if not (normalization == 'psd' and not with_errors):
# Test that observed power is distributed according to the theoretical pdf
hist, bins = np.histogram(power, 30, density=True)
midpoints = 0.5 * (bins[1:] + bins[:-1])
pdf = ls.distribution(midpoints)
assert_allclose(hist, pdf, rtol=0.05, atol=0.05 * pdf[0])
@pytest.mark.parametrize('N', [10, 100, 1000])
@pytest.mark.parametrize('normalization', NORMALIZATIONS)
def test_inverse_single(N, normalization):
fap = np.linspace(0, 1, 11)
z = inv_fap_single(fap, N, normalization)
fap_out = fap_single(z, N, normalization)
assert_allclose(fap, fap_out)
@pytest.mark.parametrize('normalization', NORMALIZATIONS)
@pytest.mark.parametrize('use_errs', [True, False])
@pytest.mark.parametrize('units', [False, True])
def test_inverse_bootstrap(normalization, use_errs, units):
t, y, dy, fmax = null_data(units=units)
if not use_errs:
dy = None
fap = np.linspace(0, 1, 11)
method = 'bootstrap'
method_kwds = METHOD_KWDS['bootstrap']
ls = LombScargle(t, y, dy, normalization=normalization)
z = ls.false_alarm_level(fap, maximum_frequency=fmax,
method=method, method_kwds=method_kwds)
fap_out = ls.false_alarm_probability(z, maximum_frequency=fmax,
method=method,
method_kwds=method_kwds)
# atol = 1 / n_bootstraps
assert_allclose(fap, fap_out, atol=0.05)
@pytest.mark.parametrize('method', sorted(set(METHODS) - {'bootstrap'}))
@pytest.mark.parametrize('normalization', NORMALIZATIONS)
@pytest.mark.parametrize('use_errs', [True, False])
@pytest.mark.parametrize('N', [10, 100, 1000])
@pytest.mark.parametrize('units', [False, True])
def test_inverses(method, normalization, use_errs, N, units, T=5):
if not HAS_SCIPY and method in ['baluev', 'davies']:
pytest.skip("SciPy required")
t, y, dy, fmax = make_data(N, rseed=543, units=units)
if not use_errs:
dy = None
method_kwds = METHOD_KWDS.get(method, None)
fap = np.logspace(-10, 0, 11)
ls = LombScargle(t, y, dy, normalization=normalization)
z = ls.false_alarm_level(fap, maximum_frequency=fmax,
method=method,
method_kwds=method_kwds)
fap_out = ls.false_alarm_probability(z, maximum_frequency=fmax,
method=method,
method_kwds=method_kwds)
assert_allclose(fap, fap_out)
@pytest.mark.parametrize('method', sorted(METHODS))
@pytest.mark.parametrize('normalization', NORMALIZATIONS)
@pytest.mark.parametrize('units', [False, True])
def test_false_alarm_smoketest(method, normalization, units):
if not HAS_SCIPY and method in ['baluev', 'davies']:
pytest.skip("SciPy required")
kwds = METHOD_KWDS.get(method, None)
t, y, dy, fmax = make_data(units=units)
ls = LombScargle(t, y, dy, normalization=normalization)
freq, power = ls.autopower(maximum_frequency=fmax)
Z = np.linspace(power.min(), power.max(), 30)
fap = ls.false_alarm_probability(Z, maximum_frequency=fmax,
method=method, method_kwds=kwds)
assert len(fap) == len(Z)
if method != 'davies':
assert np.all(fap <= 1)
assert np.all(fap[:-1] >= fap[1:]) # monotonically decreasing
@pytest.mark.parametrize('method', sorted(METHODS))
@pytest.mark.parametrize('use_errs', [True, False])
@pytest.mark.parametrize('normalization', sorted(set(NORMALIZATIONS) - {'psd'}))
@pytest.mark.parametrize('units', [False, True])
def test_false_alarm_equivalence(method, normalization, use_errs, units):
# Note: the PSD normalization is not equivalent to the others, in that it
# depends on the absolute errors rather than relative errors. Because the
# scaling contributes to the distribution, it cannot be converted directly
# from any of the three normalized versions.
if not HAS_SCIPY and method in ['baluev', 'davies']:
pytest.skip("SciPy required")
kwds = METHOD_KWDS.get(method, None)
t, y, dy, fmax = make_data(units=units)
if not use_errs:
dy = None
ls = LombScargle(t, y, dy, normalization=normalization)
freq, power = ls.autopower(maximum_frequency=fmax)
Z = np.linspace(power.min(), power.max(), 30)
fap = ls.false_alarm_probability(Z, maximum_frequency=fmax,
method=method, method_kwds=kwds)
# Compute the equivalent Z values in the standard normalization
# and check that the FAP is consistent
Z_std = convert_normalization(Z, len(t),
from_normalization=normalization,
to_normalization='standard',
chi2_ref=compute_chi2_ref(y, dy))
ls = LombScargle(t, y, dy, normalization='standard')
fap_std = ls.false_alarm_probability(Z_std, maximum_frequency=fmax,
method=method, method_kwds=kwds)
assert_allclose(fap, fap_std, rtol=0.1)
| MSeifert04/astropy | astropy/timeseries/periodograms/lombscargle/tests/test_statistics.py | Python | bsd-3-clause | 7,519 | 0.000532 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-21 19:00
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('books', '0007_auto_20170821_2052'),
]
operations = [
migrations.AlterField(
model_name='book',
name='slug',
field=models.SlugField(help_text='wykorzystywane w adresie strony', max_length=100, unique=True, verbose_name='identyfikator'),
),
]
| ludwiktrammer/tikn | apps/books/migrations/0008_auto_20170821_2100.py | Python | gpl-3.0 | 534 | 0.001873 |
#!/usr/bin/env python
"""
.. py:currentmodule:: FileFormat.Results.Phirhoz
.. moduleauthor:: Hendrix Demers <[email protected]>
MCXRay phirhoz result file.
"""
# Script information for the file.
__author__ = "Hendrix Demers ([email protected])"
__version__ = ""
__date__ = ""
__copyright__ = "Copyright (c) 2012 Hendrix Demers"
__license__ = ""
# Subversion informations for the file.
__svnRevision__ = "$Revision$"
__svnDate__ = "$Date$"
__svnId__ = "$Id$"
# Standard library modules.
# Third party modules.
# Local modules.
# Project modules
# Globals and constants variables.
KEY_SYMBOL = "symbol"
KEY_SHELL = "shell"
KEY_INTENSITY = "Intensity"
KEY_DEPTHS_A = "depths_A"
KEY_VALUES = "values"
class Phirhoz(object):
def __init__(self, symbol, shell):
self._parameters = {}
self.symbol = symbol
self.shell = shell
self._label = "%s [Shell %s]," % (symbol, shell)
def readFromLines(self, lines):
indexLine = 0
for line in lines[indexLine:]:
indexLine += 1
if line.startswith(self._label):
items = line.split('=')
self.intensity = float(items[-1])
break
self.depths_A = []
self.values = []
for _index in range(len(lines[indexLine:])):
line = lines[indexLine]
indexLine += 1
try:
items = line.split()
depth_A = float(items[0])
value = float(items[1])
self.depths_A.append(depth_A)
self.values.append(value)
except:
break
return indexLine
@property
def symbol(self):
return self._parameters[KEY_SYMBOL]
@symbol.setter
def symbol(self, symbol):
self._parameters[KEY_SYMBOL] = symbol
@property
def shell(self):
return self._parameters[KEY_SHELL]
@shell.setter
def shell(self, shell):
self._parameters[KEY_SHELL] = shell
@property
def intensity(self):
return self._parameters[KEY_INTENSITY]
@intensity.setter
def intensity(self, intensity):
self._parameters[KEY_INTENSITY] = intensity
@property
def depths_A(self):
return self._parameters[KEY_DEPTHS_A]
@depths_A.setter
def depths_A(self, depths_A):
self._parameters[KEY_DEPTHS_A] = depths_A
@property
def values(self):
return self._parameters[KEY_VALUES]
@values.setter
def values(self, values):
self._parameters[KEY_VALUES] = values
| drix00/pymcxray | pymcxray/FileFormat/Results/Phirhoz.py | Python | apache-2.0 | 2,697 | 0.002595 |
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.forms import ModelForm
from django.views.generic.list import ListView,View
import abc
import datetime
from django.utils import timezone
from django.core.validators import RegexValidator
from django.core.urlresolvers import reverse
'Tradução e estados para PT-BR'
from django.utils.translation import ugettext_lazy as _
#from django.core.exceptions import ValidationError
'Estas classes implementam os campos de Tutor do Subsistema Secretaria e sua respectivas regras de negócio.'
STATE_CHOICES = (
('', '----'),
('AC', 'Acre'),
('AL', 'Alagoas'),
('AP', 'Amapá'),
('AM', 'Amazonas'),
('BA', 'Bahia'),
('CE', 'Ceará'),
('DF', 'Distrito Federal'),
('ES', 'Espírito Santo'),
('GO', 'Goiás'),
('MA', 'Maranhão'),
('MT', 'Mato Grosso'),
('MS', 'Mato Grosso do Sul'),
('MG', 'Minas Gerais'),
('PA', 'Pará'),
('PB', 'Paraíba'),
('PR', 'Paraná'),
('PE', 'Pernambuco'),
('PI', 'Piauí'),
('RJ', 'Rio de Janeiro'),
('RN', 'Rio Grande do Norte'),
('RS', 'Rio Grande do Sul'),
('RO', 'Rondônia'),
('RR', 'Roraima'),
('SC', 'Santa Catarina'),
('SP', 'São Paulo'),
('SE', 'Sergipe'),
('TO', 'Tocantins')
)
GENERO_CHOICES = (
('', '----'),
('FE', 'Feminino'),
('MA', 'Masculino'),
)
class EnderecoAbs(models.Model):
_logradouro = models.CharField(verbose_name='Logradouro', max_length=200)
_numero = models.PositiveSmallIntegerField(verbose_name='Número')
_bairro = models.CharField(verbose_name='Bairro', max_length=20)
_cidade = models.CharField(verbose_name='Cidade', max_length=200)
_cep = models.CharField(verbose_name = 'CEP', max_length=15)
_uf = models.CharField(verbose_name = 'UF', max_length=10, choices=STATE_CHOICES)
class Meta:
abstract = True
class AcoesEndereco(EnderecoAbs):
def __unicode__(self):
return u'%s %s' % (self.logradouro, self.numero)
def __str__(self):
return u'%s %s' % (self.logradouro, self.numero)
class Meta:
verbose_name_plural = "Endereços"
abstract = True
class Endereco(AcoesEndereco):
def _get_logradouro(self):
return self._logradouro
def _get_numero(self):
return self._numero
def _get_bairro(self):
return self._bairro
def _get_cidade(self):
return self._cidade
def _get_cep(self):
return self._cep
def _get_uf(self):
return self._uf
def _set_logradouro(self,logradouro):
self._logradouro = logradouro
def _set_numero(self,numero):
self._numero = numero
def _set_bairro(self,bairro):
self._bairro = bairro
def _set_cidade(self,cidade):
self._cidade = cidade
def _set_cep(self,cep):
self._cep = cep
def _set_uf(self,uf):
self._uf = uf
logradouro = property(_get_logradouro,_set_logradouro)
numero = property(_get_numero,_set_numero)
bairro = property(_get_bairro,_set_bairro)
cidade = property(_get_cidade,_set_cidade)
cep = property(_get_cep,_set_cep)
uf = property(_get_uf,_set_uf)
class Meta:
abstract = True
class TelefoneAbs(models.Model):
telefone_fixo_regex = RegexValidator(regex=r'^\+?1?\d{9,15}$', message="O formato do número de telefone deve ser: '+999999999'. São Permitidos até 15 dígitos.")
_telefone1 = models.CharField(validators=[telefone_fixo_regex],verbose_name='Telefone de Contato 1', max_length=15,blank=True)
_telefone2 = models.CharField(validators=[telefone_fixo_regex],null = True,verbose_name='Telefone de Contato 2', max_length=15,blank=True)
class Meta:
abstract = True
class AcoesTelefone(TelefoneAbs):
def __unicode__(self):
return u'%s' % (self.telefone)
class Meta:
verbose_name_plural = "Telefones"
abstract = True
class Telefone(AcoesTelefone):
def _get_telefone1(self):
return self._telefone1
def _set_telefone1(self,telefone):
self._telefone1 = telefone1
def _get_telefone2(self):
return self._telefone2
def _set_telefone2(self,telefone):
self._telefone2 = telefone2
telefone1 = property(_get_telefone1,_set_telefone1)
telefone2 = property(_get_telefone2,_set_telefone2)
class Meta:
abstract = True
class PessoaAbs(models.Model):
_nome = models.CharField(verbose_name='Nome', max_length=50)
_email = models.EmailField(verbose_name='E-Mail')
_cpf = models.CharField(verbose_name='CPF', max_length=11)
def __unicode__(self):
return u'%s' % (self.nome)
def __str__(self):
return u'%s' % (self.nome)
def _get_nome(self):
return self._nome
def _get_email(self):
return self._email
def _get_cpf(self):
return self._cpf
def _set_nome(self,nome):
self._nome = nome
def _set_email(self,email):
self._email = email
def _set_cpf(self,cpf):
self._cpf = cpf
nome = property(_get_nome,_set_nome)
email = property(_get_email,_set_email)
cpf = property(_get_cpf,_set_cpf)
class Meta:
abstract = True
class AcoesTutor(PessoaAbs):
def __unicode__(self):
return u'%s' % (self.nome)
class Meta:
verbose_name_plural = "Tutores"
abstract = True
class Tutor(AcoesTutor):
class Meta:
abstract = True
#mudar o nome para tutor_detalhe ou tutordetalhe ou tutordetalhes
class TutorEndTel(Tutor, Endereco, Telefone):
def get_absolute_url(self):
return reverse('tutorendtel_detail', kwargs={'pk': self.pk})
class AnimalAbs(models.Model):
_nome = models.CharField(verbose_name='Nome', max_length=50)
_rg = models.PositiveSmallIntegerField(verbose_name='RG', unique=True, blank = True)
_especie = models.CharField(verbose_name='Espécie', max_length=50)
_raca = models.CharField(verbose_name='Raça', max_length=50)
sexo = models.CharField(verbose_name='Sexo', max_length=15, choices=GENERO_CHOICES)
_nascimento = models.DateField(verbose_name='Data de Nascimento')
_obito = models.DateField(verbose_name='Data de Óbito', null = True ,blank = True)
_idade = models.PositiveSmallIntegerField(verbose_name='Idade')
tutor = models.ForeignKey(TutorEndTel, on_delete = models.CASCADE, related_name='animais')
class Meta:
verbose_name_plural = "Animais"
abstract = True
def get_absolute_url(self):
return reverse('animal_detalhes', kwargs={'pk': self.pk})
class AcoesAnimal(AnimalAbs):
def __unicode__(self):
return u'%s' % (self.nome)
def __str__(self):
return u'%s' % (self.nome)
class Meta:
abstract = True
class Animal(AcoesAnimal):
def get_absolute_url(self):
return reverse('animal_detail', kwargs={'pk': self.pk})
def _get_nome(self):
return self._nome
def _get_rg(self):
return self._rg
def _get_especie(self):
return self._especie
def _get_raca(self):
return self._raca
def _get_nascimento(self):
return self._nascimento
def _get_obito(self):
return self._obito
def _get_idade(self):
return self._idade
def _set_nome(self,nome):
self._nome = nome
def _set_rg(self,rg):
self._rg = rg
def _set_especie(self,especie):
self._especie = especie
def _set_raca(self,raca):
self._raca = raca
def _set_nascimento(self,nascimento):
self._nascimento = nascimento
def _set_obito(self,obito):
self._obito = obito
def _set_idade(self,idade):
self._idade = idade
nome = property(_get_nome,_set_nome)
rg = property(_get_rg,_set_rg)
especie = property(_get_especie,_set_especie)
raca = property(_get_raca,_set_raca)
nascimento = property(_get_nascimento,_set_nascimento)
idade = property(_get_idade,_set_idade)
obito = property(_get_obito,_set_obito)
# referente a veterinario
class AcoesVeterinario(PessoaAbs):
class Meta:
verbose_name_plural = "Veterinarios"
abstract = True
class Veterinario(AcoesVeterinario):
_crmv = models.CharField(verbose_name='CRMV', max_length=10)
def __unicode__(self):
return u'%s' % (self.nome)
def __str__(self):
return u'%s' % (self.nome)
def _get_crmv(self):
return self._crmv
def _set_crmv(self,crmv):
self._crmv = crmv
crmv = property(_get_crmv,_set_crmv)
# referente a tecnico
class AcoesTecnico(PessoaAbs):
class Meta:
verbose_name_plural = "Tecnicos"
abstract = True
class Tecnico(AcoesTecnico):
_crf = models.CharField(verbose_name='CRF', max_length=10)
def __unicode__(self):
return u'%s' % (self.nome)
def __str__(self):
return u'%s' % (self.nome)
def _get_crf(self):
return self._crf
def _set_crf(self,crf):
self._crf = crf
crf = property(_get_crf,_set_crf)
# classes para servico,consulta e exame
class AtendimentoAbs(models.Model):
_data = models.DateField(auto_now_add=True)
_diagnostico = models.TextField(default = 'Pendente', blank = True, verbose_name='Diagnóstico', max_length=200)
cliente = models.ForeignKey(TutorEndTel,on_delete=models.CASCADE, related_name='cliente_a_ser_atendido', null = True ,blank = True)
def _get_data(self):
return self._data
def _get_diagnostico(self):
return self._diagnostico
def _get_cliente(self):
return self.cliente
def _set_diagnostico(self,diagnostico):
self._diagnostico = diagnostico
def _set_data(self,data):
self._data = data
diagnostico = property(_get_diagnostico,_set_diagnostico)
data = property(_get_data,_set_data)
class ConsultaAbs (AtendimentoAbs):
_retorno = models.BooleanField()
animal = models.ForeignKey(Animal, on_delete=models.CASCADE, related_name='a_ser_consultado')
veterinario = models.ForeignKey(Veterinario, on_delete=models.CASCADE, related_name='realiza_consulta')
_data_realizacao = models.DateField(verbose_name='Data Agendada')
class Meta:
abstract = True
verbose_name_plural = "Consultas"
class AcoesConsulta(ConsultaAbs):
class Meta:
abstract = True
class Consulta (AcoesConsulta):
def _get_retorno(self):
return self._retorno
def _set_retorno(self,retorno):
self._retorno = retorno
def _get_data_realizacao(self):
return self._data_realizacao
def _set_data_realizacao(self,data_realizacao):
self._data_realizacao = data_realizacao
retorno = property(_get_retorno,_set_retorno)
data_realizacao = property(_get_data_realizacao,_set_data_realizacao)
#classes referentes a laboratório
class Laboratorio (models.Model):
_nome = models.CharField(verbose_name='Nome', max_length=50)
_local = models.CharField(verbose_name='local', max_length=50)
def get_absolute_url(self):
return reverse('laboratorio_detail', kwargs={'pk': self.pk})
def _get_nome(self):
return self._nome
def _get_local(self):
return self._local
def _set_nome(self,nome):
self._nome = nome
def _set_local(self,local):
self.local = local
nome = property(_get_nome,_set_nome)
local = property(_get_local,_set_local)
def __unicode__(self):
return u'%s' % (self.nome)
def __str__(self):
return u'%s' % (self.nome)
class ExameAbs (AtendimentoAbs):
animal = models.ForeignKey(Animal,null = True, blank = True,on_delete=models.CASCADE, related_name='mostrado_para_exame')
veterinario = models.ForeignKey(Veterinario, on_delete=models.CASCADE, related_name='realiza_diagnostico')
tecnico = models.ForeignKey(Tecnico, on_delete=models.CASCADE, related_name='realiza_exame', blank = True, null = True)
_resultado = models.TextField(default = 'Pendente', blank = True, verbose_name='Resultado', max_length=200)
observacoes = models.CharField(blank=True, null=True, verbose_name='Observações', max_length=200)
numero_amostra = models.PositiveSmallIntegerField(blank=True, null=True, verbose_name='Número de amostra')
estadoexame = models.NullBooleanField(null = True, blank = True, verbose_name='Estado do Exame')
laboratorio = models.ForeignKey(Laboratorio, on_delete=models.CASCADE, related_name='exames', blank=True, null=True)
class Meta:
abstract = True
verbose_name_plural = "Exames"
class AcoesExame(ExameAbs):
@classmethod
def estadoExame(veterinario,tecnico,estadoexame):
if tecnico != None:
if veterinario != None:
estadoExame = True
return estadoExame
else:
estadoExame = False
return estadoExame
else:
estadoExame = False
return estadoExame
class Meta:
abstract = True
class Exame (AcoesExame):
def get_absolute_url(self):
return reverse('exame_detail', kwargs={'pk': self.pk})
def _get_resultado(self):
return self._resultado
def _set_resultado(self,resultado):
self._resultado = resultado
resultado = property(_get_resultado,_set_resultado)
| adelinesharla/CtrlVET | cadastro/models.py | Python | gpl-3.0 | 12,567 | 0.05463 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('layers', '24_to_26'),
]
operations = [
migrations.CreateModel(
name='QGISServerLayer',
fields=[
('layer', models.OneToOneField(primary_key=True, serialize=False, to='layers.Layer')),
('base_layer_path', models.CharField(help_text=b'Location of the base layer.', max_length=100, verbose_name=b'Base Layer Path')),
],
),
]
| kartoza/geonode | geonode/qgis_server/migrations/0001_initial.py | Python | gpl-3.0 | 597 | 0.00335 |
"""
A set of built-in default checks for the platform heartbeat endpoint
Other checks should be included in their respective modules/djangoapps
"""
from datetime import datetime, timedelta
from time import sleep, time
from django.conf import settings
from django.core.cache import cache
from django.db import connection
from django.db.utils import DatabaseError
from xmodule.modulestore.django import modulestore
from xmodule.exceptions import HeartbeatFailure
from .defaults import HEARTBEAT_CELERY_TIMEOUT
from .tasks import sample_task
# DEFAULT SYSTEM CHECKS
# Modulestore
def check_modulestore():
""" Check the modulestore connection
Returns:
(string, Boolean, unicode): A tuple containing the name of the check, whether it succeeded, and a unicode
string of either "OK" or the failure message
"""
# This refactoring merely delegates to the default modulestore (which if it's mixed modulestore will
# delegate to all configured modulestores) and a quick test of sql. A later refactoring may allow
# any service to register itself as participating in the heartbeat. It's important that all implementation
# do as little as possible but give a sound determination that they are ready.
try:
#@TODO Do we want to parse the output for split and mongo detail and return it?
modulestore().heartbeat()
return 'modulestore', True, u'OK'
except HeartbeatFailure as fail:
return 'modulestore', False, unicode(fail)
def check_database():
""" Check the database connection by attempting a no-op query
Returns:
(string, Boolean, unicode): A tuple containing the name of the check, whether it succeeded, and a unicode
string of either "OK" or the failure message
"""
cursor = connection.cursor()
try:
cursor.execute("SELECT 1")
cursor.fetchone()
return 'sql', True, u'OK'
except DatabaseError as fail:
return 'sql', False, unicode(fail)
# Caching
CACHE_KEY = 'heartbeat-test'
CACHE_VALUE = 'abc123'
def check_cache_set():
""" Check setting a cache value
Returns:
(string, Boolean, unicode): A tuple containing the name of the check, whether it succeeded, and a unicode
string of either "OK" or the failure message
"""
try:
cache.set(CACHE_KEY, CACHE_VALUE, 30)
return 'cache_set', True, u'OK'
except Exception as fail:
return 'cache_set', False, unicode(fail)
def check_cache_get():
""" Check getting a cache value
Returns:
(string, Boolean, unicode): A tuple containing the name of the check, whether it succeeded, and a unicode
string of either "OK" or the failure message
"""
try:
data = cache.get(CACHE_KEY)
if data == CACHE_VALUE:
return 'cache_get', True, u'OK'
else:
return 'cache_get', False, u'value check failed'
except Exception as fail:
return 'cache_get', False, unicode(fail)
# Celery
def check_celery():
""" Check running a simple asynchronous celery task
Returns:
(string, Boolean, unicode): A tuple containing the name of the check, whether it succeeded, and a unicode
string of either "OK" or the failure message
"""
now = time()
datetimenow = datetime.now()
expires = datetimenow + timedelta(seconds=getattr(settings, 'HEARTBEAT_CELERY_TIMEOUT', HEARTBEAT_CELERY_TIMEOUT))
try:
task = sample_task.apply_async(expires=expires)
while expires > datetime.now():
if task.ready() and task.result:
finished = str(time() - now)
return 'celery', True, unicode({'time': finished})
sleep(0.25)
return 'celery', False, "expired"
except Exception as fail:
return 'celery', False, unicode(fail)
| lduarte1991/edx-platform | openedx/core/djangoapps/heartbeat/default_checks.py | Python | agpl-3.0 | 4,002 | 0.004248 |
import argparse
import requests
import logging
import pip._internal
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Get the nth version of a given package')
parser.add_argument('--package', type=str, required=True, help='The PyPI you want to inspect')
parser.add_argument('--nth_last_version', type=int, default=1, help='The nth last package will be retrieved')
parser.add_argument('--prerelease', help='Get PreRelease Package Version', action='store_true')
parser.add_argument('--debug', help='Print debug information', action='store_true')
args = parser.parse_args()
logger = logging.getLogger("PyPI_CLI")
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.addHandler(ch)
if args.debug:
logger.setLevel(logging.DEBUG)
logger.debug("Package: %s" % args.package)
logger.debug("nth_last_version: %s" % args.nth_last_version)
logger.debug("prerelease: %s" % args.prerelease)
logger.debug("debug: %s" % args.debug)
finder = pip._internal.index.PackageFinder([], ['https://pypi.python.org/simple'], session=requests.Session())
results = finder.find_all_candidates(args.package)
tmp_versions = [str(p.version) for p in results]
logger.debug("%s" % tmp_versions)
versions = list()
for el in tmp_versions:
if el not in versions:
versions.append(el)
pos = -1
nth_version = 1
while True:
fetched_version = versions[pos]
logger.debug("Version: %s" % fetched_version)
if nth_version == args.nth_last_version:
if args.prerelease or not ("rc" in fetched_version or "a" in fetched_version or "b" in fetched_version):
break
else:
pos -= 1
continue
pos -= 1
nth_version += 1
print(fetched_version)
| zsdonghao/tensorlayer | docker/pypi_list.py | Python | apache-2.0 | 1,987 | 0.003523 |
# pylint: disable=C0103,R0801
import sqlalchemy
import migrate
meta = sqlalchemy.MetaData()
# define the previous state of tenants
tenant = {}
tenant['id'] = sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True,
autoincrement=True)
tenant['name'] = sqlalchemy.Column('name', sqlalchemy.String(255), unique=True)
tenant['desc'] = sqlalchemy.Column('desc', sqlalchemy.String(255))
tenant['enabled'] = sqlalchemy.Column('enabled', sqlalchemy.Integer)
tenants = sqlalchemy.Table('tenants', meta, *tenant.values())
# this column will become unique/non-nullable after populating it
tenant_uid = sqlalchemy.Column('uid', sqlalchemy.String(255),
unique=False, nullable=True)
def upgrade(migrate_engine):
meta.bind = migrate_engine
migrate.create_column(tenant_uid, tenants)
assert tenants.c.uid is tenant_uid
def downgrade(migrate_engine):
meta.bind = migrate_engine
migrate.drop_column(tenant_uid, tenants)
assert not hasattr(tenants.c, 'uid')
| HugoKuo/keystone-essex3 | keystone/backends/sqlalchemy/migrate_repo/versions/005_add_tenants_uid.py | Python | apache-2.0 | 996 | 0.002008 |
from pygame import *
from key_dict import *
''' The player class '''
class Cursor:
def __init__(self, x, y, size):
self.x = int(x)
self.y = int(y)
self.size = size
self.speed = 1
self.cooldown = 0
self.block = 0
self.menu_switch = {'Build' : True}
self.menu_block = {
0 : 'Wall',
1 : 'Heavy tower',
2 : 'Light tower',
3 : 'Torch',
4 : 'Farm'}
def check_border(self, level, location):
if location < 0 or location >= level.map_size:
return False
return True
def update(self, keys, level, dt):
self.cooldown -= 1 * dt
if self.cooldown < 0:
self.cooldown = 0
tile = level.terrain_map[self.x + self.y * level.map_size]
for key in KEY_DICT:
if keys[key] and self.cooldown == 0:
if KEY_DICT[key] == 'left' and self.check_border(level, self.x - self.speed):
self.x -= self.speed
if KEY_DICT[key] == 'right' and self.check_border(level, self.x + self.speed):
self.x += self.speed
if KEY_DICT[key] == 'up' and self.check_border(level, self.y - self.speed):
self.y -= self.speed
if KEY_DICT[key] == 'down' and self.check_border(level, self.y + self.speed):
self.y += self.speed
# Toggles between building / building removal
#if KEY_DICT[key] == 'switch':
# self.menu_switch['Build'] = not self.menu_switch['Build']
if KEY_DICT[key] == 'block':
self.block += 1
if self.block >= len(self.menu_block):
self.block = 0
if KEY_DICT[key] == 'action':
if self.menu_switch['Build'] and level.gold > 0:
if tile.passable:
level.create_tile(self.x, self.y, self.menu_block[self.block])
elif not self.menu_switch['Build']:
if not tile.passable:
level.break_tile(self.x, self.y)
level.gold += tile.tile_price
self.cooldown = 0.2
def draw(self, screen, xoff, yoff):
draw.rect(screen, (255, 255, 255), ((self.x + xoff) * self.size, (self.y + yoff) * self.size, self.size, self.size), int(self.size/(self.size/3)))
| erikjuhani/thefortressheart | cursor.py | Python | gpl-3.0 | 2,603 | 0.006915 |
# ==============================================================================
# Copyright 2019 - Philip Paquette
#
# NOTICE: Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# ==============================================================================
""" Runs tests for the current model and adapter """
from diplomacy_research.models.policy.tests.policy_adapter_test_setup import PolicyAdapterTestSetup
from diplomacy_research.models.policy.token_based import PolicyAdapter, BaseDatasetBuilder
from diplomacy_research.models.policy.token_based.v004_language_model import PolicyModel, load_args
from diplomacy_research.models.value.v001_val_relu_7 import ValueModel, load_args as load_value_args
from diplomacy_research.models.self_play.algorithms.a2c import Algorithm as A2CAlgo, load_args as a2c_args
from diplomacy_research.models.self_play.algorithms.ppo import Algorithm as PPOAlgo, load_args as ppo_args
from diplomacy_research.models.self_play.algorithms.reinforce import Algorithm as ReinforceAlgo,\
load_args as reinforce_args
from diplomacy_research.models.self_play.algorithms.tests.algorithm_test_setup import AlgorithmSetup
from diplomacy_research.utils.process import run_in_separate_process
# ----------- Testable Class --------------
class BaseTestClass(AlgorithmSetup):
""" Tests the algorithm """
def __init__(self, algorithm_ctor, algo_load_args):
""" Constructor """
AlgorithmSetup.__init__(self, algorithm_ctor, algo_load_args, 'token_based')
def get_policy_model(self):
""" Returns the PolicyModel """
return PolicyModel
def get_policy_builder(self):
""" Returns the Policy's BaseDatasetBuilder """
return BaseDatasetBuilder
def get_policy_adapter(self):
""" Returns the PolicyAdapter """
return PolicyAdapter
def get_policy_load_args(self):
""" Returns the policy args """
return load_args()
# ----------- Launch Scripts --------------
def launch_a2c():
""" Launches tests for a2c """
test_object = BaseTestClass(A2CAlgo, a2c_args)
test_object.run_tests()
def launch_ppo():
""" Launches tests for ppo """
test_object = BaseTestClass(PPOAlgo, ppo_args)
test_object.run_tests()
def launch_reinforce():
""" Launches tests for reinforce """
test_object = BaseTestClass(ReinforceAlgo, reinforce_args)
test_object.run_tests()
def launch_adapter():
""" Launches the tests """
testable_class = PolicyAdapterTestSetup(policy_model_ctor=PolicyModel,
value_model_ctor=ValueModel,
draw_model_ctor=None,
dataset_builder=BaseDatasetBuilder(),
policy_adapter_ctor=PolicyAdapter,
load_policy_args=load_args,
load_value_args=load_value_args,
load_draw_args=None,
strict=False)
testable_class.run_tests()
# ----------- Tests --------------
def test_run_a2c():
""" Runs the a2c test """
run_in_separate_process(target=launch_a2c, timeout=240)
def test_run_ppo():
""" Runs the ppo test """
run_in_separate_process(target=launch_ppo, timeout=240)
def test_run_reinforce():
""" Runs the reinforce test """
run_in_separate_process(target=launch_reinforce, timeout=240)
def test_run_adapter():
""" Runs the adapter test """
run_in_separate_process(target=launch_adapter, timeout=240)
| diplomacy/research | diplomacy_research/models/policy/token_based/v004_language_model/tests/test_model.py | Python | mit | 4,186 | 0.005256 |
#!/usr/bin/env python3
# Developer virtualenv setup for Certbot client
"""Aids in creating a developer virtual environment for Certbot.
When this module is run as a script, it takes the arguments that should
be passed to pip to install the Certbot packages as command line
arguments. If no arguments are provided, all Certbot packages and their
development dependencies are installed. The virtual environment will be
created with the name "venv" in the current working directory. You can
change the name of the virtual environment by setting the environment
variable VENV_NAME.
"""
from __future__ import print_function
import glob
import os
import re
import shutil
import subprocess
import sys
import time
REQUIREMENTS = [
'-e acme[test]',
'-e certbot[all]',
'-e certbot-apache',
'-e certbot-dns-cloudflare',
'-e certbot-dns-cloudxns',
'-e certbot-dns-digitalocean',
'-e certbot-dns-dnsimple',
'-e certbot-dns-dnsmadeeasy',
'-e certbot-dns-gehirn',
'-e certbot-dns-google',
'-e certbot-dns-linode',
'-e certbot-dns-luadns',
'-e certbot-dns-nsone',
'-e certbot-dns-ovh',
'-e certbot-dns-rfc2136',
'-e certbot-dns-route53',
'-e certbot-dns-sakuracloud',
'-e certbot-nginx',
'-e certbot-compatibility-test',
'-e certbot-ci',
'-e letstest',
]
if sys.platform == 'win32':
REQUIREMENTS.append('-e windows-installer')
REQUIREMENTS.remove('-e certbot-apache')
REQUIREMENTS.remove('-e certbot-compatibility-test')
VERSION_PATTERN = re.compile(r'^(\d+)\.(\d+).*$')
class PythonExecutableNotFoundError(Exception):
pass
def find_python_executable() -> str:
"""
Find the relevant python executable that is of the given python major version.
Will test, in decreasing priority order:
* the current Python interpreter
* 'pythonX' executable in PATH (with X the given major version) if available
* 'python' executable in PATH if available
* Windows Python launcher 'py' executable in PATH if available
Incompatible python versions for Certbot will be evicted (e.g. Python 3
versions less than 3.6).
:rtype: str
:return: the relevant python executable path
:raise RuntimeError: if no relevant python executable path could be found
"""
python_executable_path = None
# First try, current python executable
if _check_version('{0}.{1}.{2}'.format(
sys.version_info[0], sys.version_info[1], sys.version_info[2])):
return sys.executable
# Second try, with python executables in path
for one_version in ('3', '',):
try:
one_python = 'python{0}'.format(one_version)
output = subprocess.check_output([one_python, '--version'],
universal_newlines=True, stderr=subprocess.STDOUT)
if _check_version(output.strip().split()[1]):
return subprocess.check_output([one_python, '-c',
'import sys; sys.stdout.write(sys.executable);'],
universal_newlines=True)
except (subprocess.CalledProcessError, OSError):
pass
# Last try, with Windows Python launcher
try:
output_version = subprocess.check_output(['py', '-3', '--version'],
universal_newlines=True, stderr=subprocess.STDOUT)
if _check_version(output_version.strip().split()[1]):
return subprocess.check_output(['py', env_arg, '-c',
'import sys; sys.stdout.write(sys.executable);'],
universal_newlines=True)
except (subprocess.CalledProcessError, OSError):
pass
if not python_executable_path:
raise RuntimeError('Error, no compatible Python executable for Certbot could be found.')
def _check_version(version_str):
search = VERSION_PATTERN.search(version_str)
if not search:
return False
version = (int(search.group(1)), int(search.group(2)))
if version >= (3, 6):
return True
print('Incompatible python version for Certbot found: {0}'.format(version_str))
return False
def subprocess_with_print(cmd, env=None, shell=False):
if env is None:
env = os.environ
print('+ {0}'.format(subprocess.list2cmdline(cmd)) if isinstance(cmd, list) else cmd)
subprocess.check_call(cmd, env=env, shell=shell)
def subprocess_output_with_print(cmd, env=None, shell=False):
if env is None:
env = os.environ
print('+ {0}'.format(subprocess.list2cmdline(cmd)) if isinstance(cmd, list) else cmd)
return subprocess.check_output(cmd, env=env, shell=shell)
def get_venv_python_path(venv_path):
python_linux = os.path.join(venv_path, 'bin/python')
if os.path.isfile(python_linux):
return os.path.abspath(python_linux)
python_windows = os.path.join(venv_path, 'Scripts\\python.exe')
if os.path.isfile(python_windows):
return os.path.abspath(python_windows)
raise ValueError((
'Error, could not find python executable in venv path {0}: is it a valid venv ?'
.format(venv_path)))
def prepare_venv_path(venv_name):
"""Determines the venv path and prepares it for use.
This function cleans up any Python eggs in the current working directory
and ensures the venv path is available for use. The path used is the
VENV_NAME environment variable if it is set and venv_name otherwise. If
there is already a directory at the desired path, the existing directory is
renamed by appending a timestamp to the directory name.
:param str venv_name: The name or path at where the virtual
environment should be created if VENV_NAME isn't set.
:returns: path where the virtual environment should be created
:rtype: str
"""
for path in glob.glob('*.egg-info'):
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
env_venv_name = os.environ.get('VENV_NAME')
if env_venv_name:
print('Creating venv at {0}'
' as specified in VENV_NAME'.format(env_venv_name))
venv_name = env_venv_name
if os.path.isdir(venv_name):
os.rename(venv_name, '{0}.{1}.bak'.format(venv_name, int(time.time())))
return venv_name
def install_packages(venv_name, pip_args):
"""Installs packages in the given venv.
:param str venv_name: The name or path at where the virtual
environment should be created.
:param pip_args: Command line arguments that should be given to
pip to install packages
:type pip_args: `list` of `str`
"""
# Using the python executable from venv, we ensure to execute following commands in this venv.
py_venv = get_venv_python_path(venv_name)
subprocess_with_print([py_venv, os.path.abspath('tools/pipstrap.py')])
command = [py_venv, os.path.abspath('tools/pip_install.py')]
command.extend(pip_args)
subprocess_with_print(command)
if os.path.isdir(os.path.join(venv_name, 'bin')):
# Linux/OSX specific
print('-------------------------------------------------------------------')
print('Please run the following command to activate developer environment:')
print('source {0}/bin/activate'.format(venv_name))
print('-------------------------------------------------------------------')
elif os.path.isdir(os.path.join(venv_name, 'Scripts')):
# Windows specific
print('---------------------------------------------------------------------------')
print('Please run one of the following commands to activate developer environment:')
print('{0}\\Scripts\\activate.bat (for Batch)'.format(venv_name))
print('.\\{0}\\Scripts\\Activate.ps1 (for Powershell)'.format(venv_name))
print('---------------------------------------------------------------------------')
else:
raise ValueError('Error, directory {0} is not a valid venv.'.format(venv_name))
def create_venv(venv_path):
"""Create a Python virtual environment at venv_path.
:param str venv_path: path where the venv should be created
"""
python = find_python_executable()
command = [python, '-m', 'venv', venv_path]
subprocess_with_print(command)
def main(pip_args=None):
venv_path = prepare_venv_path('venv')
create_venv(venv_path)
if not pip_args:
pip_args = REQUIREMENTS
install_packages(venv_path, pip_args)
if __name__ == '__main__':
main(sys.argv[1:])
| letsencrypt/letsencrypt | tools/venv.py | Python | apache-2.0 | 8,641 | 0.002315 |
# The name of the dashboard to be added to HORIZON['dashboards']. Required.
DASHBOARD = 'help_about'
DISABLED = False
# A list of applications to be added to INSTALLED_APPS.
ADD_INSTALLED_APPS = [
'openstack_dashboard.dashboards.help_about',
]
| ging/horizon | openstack_dashboard/enabled/_35_help_about.py | Python | apache-2.0 | 250 | 0 |
from django.conf.urls import url
from core.views.generic import get_dashboard, delete
from users.views.individuals import RegisterView
from users.views.base import LoginView, logout_user
from core.views.display import IndexView
urlpatterns = [#url(r'^$', LoginView.as_view(), name='index'),
url(r'^$', IndexView.as_view(), name='index'),
url(r'^login$', LoginView.as_view(), name='login'),
url(r'^logout$', logout_user, name='logout'),
url(r'^register$', RegisterView.as_view(), name='register'),
#url(r'^delete/(?P<content_type_id>\d+)/(?P<object_id>\d+)$', delete, name='delete'),
url(r'^delete$', delete, name='delete'),
url(r'^dashboard$', get_dashboard, name='dashboard')] | acuriel/nahuatilli | core/urls.py | Python | gpl-3.0 | 840 | 0.007143 |
from requests import HTTPError
from database import Database
import simplejson as json
db = Database.getDatabaseConnection()["cras"]
from log_session import LogSession
import datetime
class DB:
def __init__(self):
pass
@staticmethod
def add_user(user_id, user_name, mail,picture,fcm_token):
print ("DEBUG: adding user with data: " + user_id + " "+ user_name + " " + mail + " " + fcm_token)
data = {
"_id": user_id,
"fcm_token" : fcm_token,
"name": user_name,
"mail": mail,
"picture": picture,
"supervise": [],
"supervised_by" : [],
"currently_monitoring" : [],
"currently_monitored_by": "",
"log_sessions": {}
}
try:
db.create_document(data)
except HTTPError:
print "CloudantException: user already exists"
return data
@staticmethod
def get_user_by_ID(user_ID):
try:
return db[user_ID]
except Exception:
print "DB exception : User does not exists"
return None
@staticmethod
def add_supervisor(user_id, other_id):
user = get_user_by_id(user_id)
other_user = get_user_by_id(other_id)
user["supervised_by"].append(other_id)
other_user["supervise"].append(user_id)
user.save()
other_user.save()
@staticmethod
def get_user_supervise(user_id):
currently_monitoring = db[user_id]["currently_monitoring"]
user_arr = []
for id in db[user_id]["supervise"]:
current = False
if id in currently_monitoring:
current = True
# user_arr.append({"user": get_user_by_id(id),
# "status" : current})
user = get_user_by_id(id).copy()
user.update({"status":current})
user_arr.append(user)
return json.dumps(user_arr)
@staticmethod
def get_user_supervised_by(user_id):
user_arr = []
for id in db[user_id]["supervised_by"]:
user_arr.append(get_user_by_id(id))
return json.dumps(user_arr)
@staticmethod
def get_user_name(id):
return db[id]["name"]
@staticmethod
def update_monitoring_status(user_id, sup_id, monitoring,is_sup):
user = db[user_id]
sup = db[sup_id]
if monitoring:
user["currently_monitored_by"] = sup_id
sup["currently_monitoring"].append(user_id)
else:
if is_sup:
if sup_id in user["log_sessions"]:
num_of_logs = len(user["log_sessions"][sup_id])
user["log_sessions"][sup_id][num_of_logs-1].update({"end_time": str(datetime.datetime.now())})
user.save()
user["currently_monitoring"].remove(sup_id)
sup["currently_monitored_by"] = ""
else:
if user_id in sup["log_sessions"]:
num_of_logs = len(sup["log_sessions"][user_id])
sup["log_sessions"][user_id][num_of_logs - 1].update({"end_time": str(datetime.datetime.now())})
sup.save()
sup["currently_monitoring"].remove(user_id)
user["currently_monitored_by"] = ""
user.save()
sup.save()
@staticmethod
def add_log_session(user_id,to_monitor_id):
user = db[user_id]
if to_monitor_id not in user["log_sessions"]:
user["log_sessions"].update({to_monitor_id: []})
user["log_sessions"][to_monitor_id].append(json.loads(LogSession(datetime.datetime.now(), to_monitor_id).toJSON()))
try:
user.save()
except Exception,e:
print e
@staticmethod
def get_currently_monitored_by(user_id):
return db[user_id]["currently_monitored_by"]
@staticmethod
def get_logs(user_id,sup_id):
log_sessions = db[user_id]["log_sessions"]
if sup_id in log_sessions:
return log_sessions[sup_id]
else:
print "There are no logs available for: " + sup_id
return None
@staticmethod
def add_log_event(user_id, sup_id,event):
user = get_user_by_id(user_id)
log_sessions = db[user_id]["log_sessions"]
if sup_id in log_sessions:
try:
last_session = db[user_id]["log_sessions"][sup_id][len(db[user_id]["log_sessions"][sup_id]) - 1]
last_session["events"].append(event)
db[user_id]["log_sessions"][sup_id][len(db[user_id]["log_sessions"][sup_id]) - 1] = last_session
user.save()
except Exception,e:
print e
def get_user_by_id(user_id):
user = db[user_id]
return user
def db_exists(user_id):
try:
user = db[user_id]
except Exception,e:
return False
print "DEBUG: the name is : " + user["name"]
return user.json()
def get_fcm_token(user_id):
return db[user_id]["fcm_token"]
| tweiss1234/Cras | db_actions.py | Python | apache-2.0 | 5,095 | 0.005299 |
# -*- coding: utf-8 -*-
#
# This file is based upon the file generated by sphinx-quickstart. However,
# where sphinx-quickstart hardcodes values in this file that you input, this
# file has been changed to pull from your module's metadata module.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../..'))
# Import project metadata
from ecs import metadata
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx',
'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.viewcode']
# show todos
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = metadata.project
copyright = metadata.copyright
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = metadata.version
# The full version, including alpha/beta/rc tags.
release = metadata.version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = metadata.project_no_spaces + 'doc'
# -- Options for LaTeX output -------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author,
# documentclass [howto/manual]).
latex_documents = [
('index', metadata.project_no_spaces + '.tex',
metadata.project + ' Documentation', metadata.authors_string,
'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output -------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', metadata.package, metadata.project + ' Documentation',
metadata.authors_string, 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', metadata.project_no_spaces,
metadata.project + ' Documentation', metadata.authors_string,
metadata.project_no_spaces, metadata.description, 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('http://docs.python.org/', None),
}
# Extra local configuration. This is useful for placing the class description
# in the class docstring and the __init__ parameter documentation in the
# __init__ docstring. See
# <http://sphinx-doc.org/ext/autodoc.html#confval-autoclass_content> for more
# information.
autoclass_content = 'both'
| GCStokum/ECS-Game | ecs-0.1/docs/source/conf.py | Python | mit | 8,695 | 0.00483 |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.contrib.hooks.gcs_hook import GoogleCloudStorageHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from airflow.version import version
class GoogleCloudStorageCreateBucketOperator(BaseOperator):
"""
Creates a new bucket. Google Cloud Storage uses a flat namespace,
so you can't create a bucket with a name that is already in use.
.. seealso::
For more information, see Bucket Naming Guidelines:
https://cloud.google.com/storage/docs/bucketnaming.html#requirements
:param bucket_name: The name of the bucket. (templated)
:type bucket_name: str
:param resource: An optional dict with parameters for creating the bucket.
For information on available parameters, see Cloud Storage API doc:
https://cloud.google.com/storage/docs/json_api/v1/buckets/insert
:type resource: dict
:param storage_class: This defines how objects in the bucket are stored
and determines the SLA and the cost of storage (templated). Values include
- ``MULTI_REGIONAL``
- ``REGIONAL``
- ``STANDARD``
- ``NEARLINE``
- ``COLDLINE``.
If this value is not specified when the bucket is
created, it will default to STANDARD.
:type storage_class: str
:param location: The location of the bucket. (templated)
Object data for objects in the bucket resides in physical storage
within this region. Defaults to US.
.. seealso:: https://developers.google.com/storage/docs/bucket-locations
:type location: str
:param project_id: The ID of the GCP Project. (templated)
:type project_id: str
:param labels: User-provided labels, in key/value pairs.
:type labels: dict
:param google_cloud_storage_conn_id: The connection ID to use when
connecting to Google cloud storage.
:type google_cloud_storage_conn_id: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must
have domain-wide delegation enabled.
:type delegate_to: str
:Example::
The following Operator would create a new bucket ``test-bucket``
with ``MULTI_REGIONAL`` storage class in ``EU`` region ::
CreateBucket = GoogleCloudStorageCreateBucketOperator(
task_id='CreateNewBucket',
bucket_name='test-bucket',
storage_class='MULTI_REGIONAL',
location='EU',
labels={'env': 'dev', 'team': 'airflow'},
google_cloud_storage_conn_id='airflow-service-account'
)
"""
template_fields = ('bucket_name', 'storage_class',
'location', 'project_id')
ui_color = '#f0eee4'
@apply_defaults
def __init__(self,
bucket_name,
resource=None,
storage_class='MULTI_REGIONAL',
location='US',
project_id=None,
labels=None,
google_cloud_storage_conn_id='google_cloud_default',
delegate_to=None,
*args,
**kwargs):
super(GoogleCloudStorageCreateBucketOperator, self).__init__(*args, **kwargs)
self.bucket_name = bucket_name
self.resource = resource
self.storage_class = storage_class
self.location = location
self.project_id = project_id
self.labels = labels
self.google_cloud_storage_conn_id = google_cloud_storage_conn_id
self.delegate_to = delegate_to
def execute(self, context):
if self.labels is not None:
self.labels.update(
{'airflow-version': 'v' + version.replace('.', '-').replace('+', '-')}
)
hook = GoogleCloudStorageHook(
google_cloud_storage_conn_id=self.google_cloud_storage_conn_id,
delegate_to=self.delegate_to
)
hook.create_bucket(bucket_name=self.bucket_name,
resource=self.resource,
storage_class=self.storage_class,
location=self.location,
project_id=self.project_id,
labels=self.labels)
| artwr/airflow | airflow/contrib/operators/gcs_operator.py | Python | apache-2.0 | 5,140 | 0.000778 |
from rest_framework import serializers
from emotion_annotator.models import FrameEmotions
class FrameEmotionsSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = FrameEmotions
fields = ('video', 'frameTime', 'emotionType')
| dumoulinj/ers | ers_backend/emotion_annotator/serializers.py | Python | mit | 267 | 0.007491 |
import unittest
import copy
import gc
import rpy2.rinterface as rinterface
rinterface.initr()
class SexpTestCase(unittest.TestCase):
def testNew_invalid(self):
x = "a"
self.assertRaises(ValueError, rinterface.Sexp, x)
def testNew(self):
sexp = rinterface.baseenv.get("letters")
sexp_new = rinterface.Sexp(sexp)
idem = rinterface.baseenv.get("identical")
self.assertTrue(idem(sexp, sexp_new)[0])
sexp_new2 = rinterface.Sexp(sexp)
self.assertTrue(idem(sexp, sexp_new2)[0])
del(sexp)
self.assertTrue(idem(sexp_new, sexp_new2)[0])
def testTypeof_get(self):
sexp = rinterface.baseenv.get("letters")
self.assertEquals(sexp.typeof, rinterface.STRSXP)
sexp = rinterface.baseenv.get("pi")
self.assertEquals(sexp.typeof, rinterface.REALSXP)
sexp = rinterface.baseenv.get("plot")
self.assertEquals(sexp.typeof, rinterface.CLOSXP)
def testDo_slot(self):
data_func = rinterface.baseenv.get("data")
data_func(rinterface.SexpVector(["iris", ], rinterface.STRSXP))
sexp = rinterface.globalenv.get("iris")
names = sexp.do_slot("names")
iris_names = ("Sepal.Length", "Sepal.Width", "Petal.Length", "Petal.Width", "Species")
self.assertEquals(len(iris_names), len(names))
for i, n in enumerate(iris_names):
self.assertEquals(iris_names[i], names[i])
self.assertRaises(LookupError, sexp.do_slot, "foo")
def testDo_slot_assign(self):
data_func = rinterface.baseenv.get("data")
data_func(rinterface.SexpVector(["iris", ], rinterface.STRSXP))
sexp = rinterface.globalenv.get("iris")
iris_names = rinterface.StrSexpVector(['a', 'b', 'c', 'd', 'e'])
sexp.do_slot_assign("names", iris_names)
names = [x for x in sexp.do_slot("names")]
self.assertEquals(['a', 'b', 'c', 'd', 'e'], names)
def testDo_slot_assign_create(self):
#test that assigning slots is also creating the slot
x = rinterface.IntSexpVector([1,2,3])
x.do_slot_assign("foo", rinterface.StrSexpVector(["bar", ]))
slot = x.do_slot("foo")
self.assertEquals(1, len(slot))
self.assertEquals("bar", slot[0])
def testSexp_rsame_true(self):
sexp_a = rinterface.baseenv.get("letters")
sexp_b = rinterface.baseenv.get("letters")
self.assertTrue(sexp_a.rsame(sexp_b))
def testSexp_rsame_false(self):
sexp_a = rinterface.baseenv.get("letters")
sexp_b = rinterface.baseenv.get("pi")
self.assertFalse(sexp_a.rsame(sexp_b))
def testSexp_rsame_wrongType(self):
sexp_a = rinterface.baseenv.get("letters")
self.assertRaises(ValueError, sexp_a.rsame, 'foo')
def testSexp_sexp(self):
sexp = rinterface.IntSexpVector([1,2,3])
cobj = sexp.__sexp__
sexp = rinterface.IntSexpVector([4,5,6,7])
self.assertEquals(4, len(sexp))
sexp.__sexp__ = cobj
self.assertEquals(3, len(sexp))
def testSexp_sexp_wrongtypeof(self):
sexp = rinterface.IntSexpVector([1,2,3])
cobj = sexp.__sexp__
sexp = rinterface.StrSexpVector(['a', 'b'])
self.assertEquals(2, len(sexp))
self.assertRaises(ValueError, sexp.__setattr__, '__sexp__', cobj)
def testSexp_sexp_destroyCobj(self):
sexp = rinterface.IntSexpVector([1,2,3])
cobj = sexp.__sexp__
del(cobj)
gc.collect()
# no real test, just make sure that it does
# not cause a segfault
def testSexp_deepcopy(self):
sexp = rinterface.IntSexpVector([1,2,3])
self.assertEquals(0, sexp.named)
rinterface.baseenv.get("identity")(sexp)
self.assertEquals(2, sexp.named)
sexp2 = sexp.__deepcopy__()
self.assertEquals(sexp.typeof, sexp2.typeof)
self.assertEquals(list(sexp), list(sexp2))
self.assertFalse(sexp.rsame(sexp2))
self.assertEquals(0, sexp2.named)
# should be the same as above, but just in case:
sexp3 = copy.deepcopy(sexp)
self.assertEquals(sexp.typeof, sexp3.typeof)
self.assertEquals(list(sexp), list(sexp3))
self.assertFalse(sexp.rsame(sexp3))
self.assertEquals(0, sexp3.named)
def suite():
suite = unittest.TestLoader().loadTestsFromTestCase(SexpTestCase)
return suite
if __name__ == '__main__':
tr = unittest.TextTestRunner(verbosity = 2)
tr.run(suite())
| lbouma/Cyclopath | pyserver/bin/rpy2/rinterface/tests/test_Sexp.py | Python | apache-2.0 | 4,549 | 0.005935 |
from chainer import function
class Flatten(function.Function):
"""Flatten function."""
def forward(self, inputs):
self.retain_inputs(())
self._in_shape = inputs[0].shape
return inputs[0].ravel(),
def backward(self, inputs, grads):
return grads[0].reshape(self._in_shape),
def flatten(x):
"""Flatten a given array into one dimension.
Args:
x (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`): Input variable.
Returns:
~chainer.Variable: Output variable flatten to one dimension.
.. note::
When you input a scalar array (i.e. the shape is ``()``),
you can also get the one dimension array whose shape is ``(1,)``.
.. admonition:: Example
>>> x = np.array([[1, 2], [3, 4]])
>>> x.shape
(2, 2)
>>> y = F.flatten(x)
>>> y.shape
(4,)
>>> y.data
array([1, 2, 3, 4])
>>> x = np.arange(8).reshape(2, 2, 2)
>>> x.shape
(2, 2, 2)
>>> y = F.flatten(x)
>>> y.shape
(8,)
>>> y.data
array([0, 1, 2, 3, 4, 5, 6, 7])
"""
return Flatten()(x)
| kashif/chainer | chainer/functions/array/flatten.py | Python | mit | 1,211 | 0 |
#!/usr/bin/env python
"""
.. py:currentmodule:: FileFormat.SimulationParameters
.. moduleauthor:: Hendrix Demers <[email protected]>
MCXRay simulation parameters input file.
"""
# Script information for the file.
__author__ = "Hendrix Demers ([email protected])"
__version__ = ""
__date__ = ""
__copyright__ = "Copyright (c) 2012 Hendrix Demers"
__license__ = ""
# Subversion informations for the file.
__svnRevision__ = "$Revision$"
__svnDate__ = "$Date$"
__svnId__ = "$Id$"
# Standard library modules.
import copy
# Third party modules.
# Local modules.
# Project modules
import pymcxray.FileFormat.MCXRayModel as MCXRayModel
import pymcxray.FileFormat.Version as Version
# Globals and constants variables.
KEY_BASE_FILENAME = "BaseFileName"
KEY_NUMBER_ELECTRONS = "ElectronNbr"
KEY_NUMBER_PHOTONS = "PhotonNbr"
KEY_NUMBER_WINDOWS = "WindowNbr"
KEY_NUMBER_FILMS_X = "FilmNbrX"
KEY_NUMBER_FILMS_Y = "FilmNbrY"
KEY_NUMBER_FILMS_Z = "FilmNbrZ"
KEY_NUMBER_CHANNELS = "SpectraChannel"
KEY_ENERGY_CHANNEL_WIDTH = "EnergyChannelWidth"
KEY_SPECTRA_INTERPOLATION_MODEL = "SpectraInterpolation"
KEY_VOXEL_SIMPLIFICATION = "VoxelSimplification"
KEY_ELASTIC_CROSS_SECTION_SCALING_FACTOR = "ElasticCrossSectionScalingFactor"
KEY_ENERGY_LOSS_SCALING_FACTOR = "EnergyLossScalingFactor"
class SimulationParameters(object):
def __init__(self):
self.version = copy.deepcopy(Version.CURRENT_VERSION)
self._keys = self._createKeys()
self._parameters = {}
self.defaultValues()
def _createKeys(self):
keys = []
keys.append(KEY_BASE_FILENAME)
keys.append(KEY_NUMBER_ELECTRONS)
keys.append(KEY_NUMBER_PHOTONS)
keys.append(KEY_NUMBER_WINDOWS)
keys.append(KEY_NUMBER_FILMS_X)
keys.append(KEY_NUMBER_FILMS_Y)
keys.append(KEY_NUMBER_FILMS_Z)
if self.version == Version.BEFORE_VERSION:
keys.append(KEY_NUMBER_CHANNELS)
else:
keys.append(KEY_ENERGY_CHANNEL_WIDTH)
keys.append(KEY_SPECTRA_INTERPOLATION_MODEL)
keys.append(KEY_VOXEL_SIMPLIFICATION)
if self.version >= Version.VERSION_1_4_4:
keys.append(KEY_ELASTIC_CROSS_SECTION_SCALING_FACTOR)
keys.append(KEY_ENERGY_LOSS_SCALING_FACTOR)
return keys
def defaultValues(self):
baseFilenameRef = r"Results\McXRay"
self.baseFilename = baseFilenameRef
self.numberElectrons = 1000
self.numberPhotons = 10000
self.numberWindows = 64
self.numberFilmsX = 128
self.numberFilmsY = 128
self.numberFilmsZ = 128
self.numberChannels = 1024
self.energyChannelWidth_eV = 5.0
self.spectrumInterpolationModel = MCXRayModel.SpectrumInterpolationModel.TYPE_LINEAR_DOUBLE
self.voxelSimplification = None
self.elasticCrossSectionScalingFactor = 1.0
self.energyLossScalingFactor = 1.0
def _createExtractMethod(self):
extractMethods = {}
extractMethods[KEY_BASE_FILENAME] = str
extractMethods[KEY_NUMBER_ELECTRONS] = int
extractMethods[KEY_NUMBER_PHOTONS] = int
extractMethods[KEY_NUMBER_WINDOWS] = int
extractMethods[KEY_NUMBER_FILMS_X] = int
extractMethods[KEY_NUMBER_FILMS_Y] = int
extractMethods[KEY_NUMBER_FILMS_Z] = int
extractMethods[KEY_NUMBER_CHANNELS] = int
extractMethods[KEY_ENERGY_CHANNEL_WIDTH] = float
extractMethods[KEY_SPECTRA_INTERPOLATION_MODEL] = self._extractSpectrumInterpolationModel
extractMethods[KEY_VOXEL_SIMPLIFICATION] = bool
extractMethods[KEY_ELASTIC_CROSS_SECTION_SCALING_FACTOR] = float
extractMethods[KEY_ENERGY_LOSS_SCALING_FACTOR] = float
return extractMethods
def _createFormatMethod(self):
fromatMethods = {}
fromatMethods[KEY_BASE_FILENAME] = "%s"
fromatMethods[KEY_NUMBER_ELECTRONS] = "%i"
fromatMethods[KEY_NUMBER_PHOTONS] = "%i"
fromatMethods[KEY_NUMBER_WINDOWS] = "%i"
fromatMethods[KEY_NUMBER_FILMS_X] = "%i"
fromatMethods[KEY_NUMBER_FILMS_Y] = "%i"
fromatMethods[KEY_NUMBER_FILMS_Z] = "%i"
fromatMethods[KEY_NUMBER_CHANNELS] = "%i"
fromatMethods[KEY_ENERGY_CHANNEL_WIDTH] = "%s"
fromatMethods[KEY_SPECTRA_INTERPOLATION_MODEL] = "%s"
fromatMethods[KEY_VOXEL_SIMPLIFICATION] = "%s"
fromatMethods[KEY_ELASTIC_CROSS_SECTION_SCALING_FACTOR] = "%.5f"
fromatMethods[KEY_ENERGY_LOSS_SCALING_FACTOR] = "%.5f"
return fromatMethods
def _extractSpectrumInterpolationModel(self, text):
model = MCXRayModel.SpectrumInterpolationModel(int(text))
return model
def read(self, filepath):
self.version.readFromFile(filepath)
lines = open(filepath, 'r').readlines()
extractMethods = self._createExtractMethod()
for line in lines:
line = line.strip()
for key in self._keys:
if line.startswith(key):
items = line.split('=')
self._parameters[key] = extractMethods[key](items[-1])
def write(self, filepath):
outputFile = open(filepath, 'w')
self._writeHeader(outputFile)
self.version.writeLine(outputFile)
formatMethods = self._createFormatMethod()
keys = self._createKeys()
for key in keys:
if key == KEY_SPECTRA_INTERPOLATION_MODEL:
value = formatMethods[key] % (self._parameters[key].getModel())
else:
value = formatMethods[key] % (self._parameters[key])
if value is not None and value != "None":
line = "%s=%s\n" % (key, value)
outputFile.write(line)
def _writeHeader(self, outputFile):
if self._parameters[KEY_VOXEL_SIMPLIFICATION] is not None:
headerLines = [ "********************************************************************************",
"*** SIMULATION PARAMETERS",
"***",
"*** BaseFileName = All output files will be named using this term",
"*** ElectronNbr = Total number of electrons to simulate",
"*** PhotonNbr = Total number of photons to simulate in EDS",
"*** WindowNbr = Number of energy windows in PhiRo computations",
"*** FilmNbrX = Number of X layers in PhiRo computations",
"*** FilmNbrY = Number of Y layers in PhiRo computations",
"*** FilmNbrZ = Number of Z layers in PhiRo computations",
"*** SpectraChannel = Number of channels in spectraa",
"*** SpectraInterpolation = Interpolation type for spectras",
"*** VoxelSimplification = Use only middle voxel of trajectories to store energy",
"***",
"********************************************************************************"]
elif self.version == Version.BEFORE_VERSION:
headerLines = [ "********************************************************************************",
"*** SIMULATION PARAMETERS",
"***",
"*** BaseFileName = All output files will be named using this term",
"*** ElectronNbr = Total number of electrons to simulate",
"*** PhotonNbr = Total number of photons to simulate in EDS",
"*** WindowNbr = Number of energy windows in PhiRo computations",
"*** FilmNbrX = Number of X layers in PhiRo computations",
"*** FilmNbrY = Number of Y layers in PhiRo computations",
"*** FilmNbrZ = Number of Z layers in PhiRo computations",
"*** SpectraChannel = Number of channels in spectraa",
"*** SpectraInterpolation = Interpolation type for spectras",
"***",
"********************************************************************************"]
elif self.version >= Version.VERSION_1_4_4:
headerLines = [ "********************************************************************************",
"*** SIMULATION PARAMETERS",
"***",
"*** BaseFileName = All output files will be named using this term",
"*** ElectronNbr = Total number of electrons to simulate",
"*** PhotonNbr = Total number of photons to simulate in EDS",
"*** WindowNbr = Number of energy windows in Spectrum computations",
"*** FilmNbrX = Number of X layers in Spectrum computations",
"*** FilmNbrY = Number of Y layers in Spectrum computations",
"*** FilmNbrZ = Number of Z layers in Spectrum computations",
"*** EnergyChannelWidth in eV",
"*** SpectraInterpolation = Interpolation type for spectra",
"*** ElasticCrossSectionScalingFactor",
"*** EnergyLossScalingFactor",
"***",
"********************************************************************************"]
else:
headerLines = [ "********************************************************************************",
"*** SIMULATION PARAMETERS",
"***",
"*** BaseFileName = All output files will be named using this term",
"*** ElectronNbr = Total number of electrons to simulate",
"*** PhotonNbr = Total number of photons to simulate in EDS",
"*** WindowNbr = Number of energy windows in Spectrum computations",
"*** FilmNbrX = Number of X layers in Spectrum computations",
"*** FilmNbrY = Number of Y layers in Spectrum computations",
"*** FilmNbrZ = Number of Z layers in Spectrum computations",
"*** EnergyChannelWidth in eV",
"*** SpectraInterpolation = Interpolation type for spectra",
"***",
"********************************************************************************"]
for line in headerLines:
outputFile.write(line+'\n')
@property
def version(self):
return self._version
@version.setter
def version(self, version):
self._version = version
@property
def baseFilename(self):
return self._parameters[KEY_BASE_FILENAME]
@baseFilename.setter
def baseFilename(self, baseFilename):
self._parameters[KEY_BASE_FILENAME] = baseFilename
@property
def numberElectrons(self):
return self._parameters[KEY_NUMBER_ELECTRONS]
@numberElectrons.setter
def numberElectrons(self, numberElectrons):
self._parameters[KEY_NUMBER_ELECTRONS] = numberElectrons
@property
def numberPhotons(self):
return self._parameters[KEY_NUMBER_PHOTONS]
@numberPhotons.setter
def numberPhotons(self, numberPhotons):
self._parameters[KEY_NUMBER_PHOTONS] = numberPhotons
@property
def numberWindows(self):
return self._parameters[KEY_NUMBER_WINDOWS]
@numberWindows.setter
def numberWindows(self, numberWindows):
self._parameters[KEY_NUMBER_WINDOWS] = numberWindows
@property
def numberFilmsX(self):
return self._parameters[KEY_NUMBER_FILMS_X]
@numberFilmsX.setter
def numberFilmsX(self, numberFilmsX):
self._parameters[KEY_NUMBER_FILMS_X] = numberFilmsX
@property
def numberFilmsY(self):
return self._parameters[KEY_NUMBER_FILMS_Y]
@numberFilmsY.setter
def numberFilmsY(self, numberFilmsY):
self._parameters[KEY_NUMBER_FILMS_Y] = numberFilmsY
@property
def numberFilmsZ(self):
return self._parameters[KEY_NUMBER_FILMS_Z]
@numberFilmsZ.setter
def numberFilmsZ(self, numberFilmsZ):
self._parameters[KEY_NUMBER_FILMS_Z] = numberFilmsZ
@property
def numberChannels(self):
return self._parameters[KEY_NUMBER_CHANNELS]
@numberChannels.setter
def numberChannels(self, numberChannels):
self._parameters[KEY_NUMBER_CHANNELS] = numberChannels
@property
def energyChannelWidth_eV(self):
return self._parameters[KEY_ENERGY_CHANNEL_WIDTH]
@energyChannelWidth_eV.setter
def energyChannelWidth_eV(self, energyChannelWidth_eV):
self._parameters[KEY_ENERGY_CHANNEL_WIDTH] = energyChannelWidth_eV
@property
def spectrumInterpolationModel(self):
return self._parameters[KEY_SPECTRA_INTERPOLATION_MODEL].getModel()
@spectrumInterpolationModel.setter
def spectrumInterpolationModel(self, spectrumInterpolationModel):
self._parameters[KEY_SPECTRA_INTERPOLATION_MODEL] = MCXRayModel.SpectrumInterpolationModel(spectrumInterpolationModel)
@property
def voxelSimplification(self):
return self._parameters.get(KEY_VOXEL_SIMPLIFICATION, None)
@voxelSimplification.setter
def voxelSimplification(self, voxelSimplification):
self._parameters[KEY_VOXEL_SIMPLIFICATION] = voxelSimplification
@property
def elasticCrossSectionScalingFactor(self):
return self._parameters[KEY_ELASTIC_CROSS_SECTION_SCALING_FACTOR]
@elasticCrossSectionScalingFactor.setter
def elasticCrossSectionScalingFactor(self, elasticCrossSectionScalingFactor):
self._parameters[KEY_ELASTIC_CROSS_SECTION_SCALING_FACTOR] = elasticCrossSectionScalingFactor
@property
def energyLossScalingFactor(self):
return self._parameters[KEY_ENERGY_LOSS_SCALING_FACTOR]
@energyLossScalingFactor.setter
def energyLossScalingFactor(self, energyLossScalingFactor):
self._parameters[KEY_ENERGY_LOSS_SCALING_FACTOR] = energyLossScalingFactor
| drix00/pymcxray | pymcxray/FileFormat/SimulationParameters.py | Python | apache-2.0 | 15,335 | 0.004695 |
"""
Unit tests for email feature flag in new instructor dashboard.
Additionally tests that bulk email is always disabled for
non-Mongo backed courses, regardless of email feature flag, and
that the view is conditionally available when Course Auth is turned on.
"""
from __future__ import absolute_import
from django.urls import reverse
from opaque_keys.edx.keys import CourseKey
from six import text_type
from bulk_email.models import BulkEmailFlag, CourseAuthorization
from student.tests.factories import AdminFactory
from xmodule.modulestore.tests.django_utils import TEST_DATA_MIXED_MODULESTORE, SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
class TestNewInstructorDashboardEmailViewMongoBacked(SharedModuleStoreTestCase):
"""
Check for email view on the new instructor dashboard
for Mongo-backed courses
"""
@classmethod
def setUpClass(cls):
super(TestNewInstructorDashboardEmailViewMongoBacked, cls).setUpClass()
cls.course = CourseFactory.create()
# URL for instructor dash
cls.url = reverse('instructor_dashboard', kwargs={'course_id': text_type(cls.course.id)})
# URL for email view
cls.email_link = '<button type="button" class="btn-link send_email" data-section="send_email">Email</button>'
def setUp(self):
super(TestNewInstructorDashboardEmailViewMongoBacked, self).setUp()
# Create instructor account
instructor = AdminFactory.create()
self.client.login(username=instructor.username, password="test")
def tearDown(self):
super(TestNewInstructorDashboardEmailViewMongoBacked, self).tearDown()
BulkEmailFlag.objects.all().delete()
# In order for bulk email to work, we must have both the BulkEmailFlag.is_enabled()
# set to True and for the course to be Mongo-backed.
# The flag is enabled and the course is Mongo-backed (should work)
def test_email_flag_true_mongo_true(self):
BulkEmailFlag.objects.create(enabled=True, require_course_email_auth=False)
# Assert that instructor email is enabled for this course - since REQUIRE_COURSE_EMAIL_AUTH is False,
# all courses should be authorized to use email.
self.assertTrue(BulkEmailFlag.feature_enabled(self.course.id))
# Assert that the URL for the email view is in the response
response = self.client.get(self.url)
self.assertIn(self.email_link, response.content)
send_to_label = '<div class="send_to_list">Send to:</div>'
self.assertIn(send_to_label, response.content)
self.assertEqual(response.status_code, 200)
# The course is Mongo-backed but the flag is disabled (should not work)
def test_email_flag_false_mongo_true(self):
BulkEmailFlag.objects.create(enabled=False)
# Assert that the URL for the email view is not in the response
response = self.client.get(self.url)
self.assertNotIn(self.email_link, response.content)
# Flag is enabled, but we require course auth and haven't turned it on for this course
def test_course_not_authorized(self):
BulkEmailFlag.objects.create(enabled=True, require_course_email_auth=True)
# Assert that instructor email is not enabled for this course
self.assertFalse(BulkEmailFlag.feature_enabled(self.course.id))
# Assert that the URL for the email view is not in the response
response = self.client.get(self.url)
self.assertNotIn(self.email_link, response.content)
# Flag is enabled, we require course auth and turn it on for this course
def test_course_authorized(self):
BulkEmailFlag.objects.create(enabled=True, require_course_email_auth=True)
# Assert that instructor email is not enabled for this course
self.assertFalse(BulkEmailFlag.feature_enabled(self.course.id))
# Assert that the URL for the email view is not in the response
response = self.client.get(self.url)
self.assertNotIn(self.email_link, response.content)
# Authorize the course to use email
cauth = CourseAuthorization(course_id=self.course.id, email_enabled=True)
cauth.save()
# Assert that instructor email is enabled for this course
self.assertTrue(BulkEmailFlag.feature_enabled(self.course.id))
# Assert that the URL for the email view is in the response
response = self.client.get(self.url)
self.assertIn(self.email_link, response.content)
# Flag is disabled, but course is authorized
def test_course_authorized_feature_off(self):
BulkEmailFlag.objects.create(enabled=False, require_course_email_auth=True)
# Authorize the course to use email
cauth = CourseAuthorization(course_id=self.course.id, email_enabled=True)
cauth.save()
# Assert that this course is authorized for instructor email, but the feature is not enabled
self.assertFalse(BulkEmailFlag.feature_enabled(self.course.id))
self.assertTrue(CourseAuthorization.instructor_email_enabled(self.course.id))
# Assert that the URL for the email view IS NOT in the response
response = self.client.get(self.url)
self.assertNotIn(self.email_link, response.content)
class TestNewInstructorDashboardEmailViewXMLBacked(SharedModuleStoreTestCase):
"""
Check for email view on the new instructor dashboard
"""
MODULESTORE = TEST_DATA_MIXED_MODULESTORE
@classmethod
def setUpClass(cls):
super(TestNewInstructorDashboardEmailViewXMLBacked, cls).setUpClass()
cls.course_key = CourseKey.from_string('edX/toy/2012_Fall')
# URL for instructor dash
cls.url = reverse('instructor_dashboard', kwargs={'course_id': text_type(cls.course_key)})
# URL for email view
cls.email_link = '<button type="button" class="btn-link send_email" data-section="send_email">Email</button>'
def setUp(self):
super(TestNewInstructorDashboardEmailViewXMLBacked, self).setUp()
# Create instructor account
instructor = AdminFactory.create()
self.client.login(username=instructor.username, password="test")
# URL for instructor dash
self.url = reverse('instructor_dashboard', kwargs={'course_id': text_type(self.course_key)})
# URL for email view
self.email_link = '<button type="button" class="btn-link send_email" data-section="send_email">Email</button>'
def tearDown(self):
super(TestNewInstructorDashboardEmailViewXMLBacked, self).tearDown()
BulkEmailFlag.objects.all().delete()
# The flag is enabled, and since REQUIRE_COURSE_EMAIL_AUTH is False, all courses should
# be authorized to use email. But the course is not Mongo-backed (should not work)
def test_email_flag_true_mongo_false(self):
BulkEmailFlag.objects.create(enabled=True, require_course_email_auth=False)
response = self.client.get(self.url)
self.assertNotIn(self.email_link, response.content)
# The flag is disabled and the course is not Mongo-backed (should not work)
def test_email_flag_false_mongo_false(self):
BulkEmailFlag.objects.create(enabled=False, require_course_email_auth=False)
response = self.client.get(self.url)
self.assertNotIn(self.email_link, response.content)
| jolyonb/edx-platform | lms/djangoapps/instructor/tests/test_email.py | Python | agpl-3.0 | 7,373 | 0.003119 |
from SimpleLexicon import SimpleLexicon
from LOTlib.Evaluation.EvaluationException import RecursionDepthException
class RecursiveLexicon(SimpleLexicon):
"""
A lexicon where word meanings can call each other. Analogous to a RecursiveLOTHypothesis from a LOTHypothesis.
To achieve this, we require the LOThypotheses in self.values to take a "recurse" call that is always passed in by
default here on __call__ as the first argument.
This throws a RecursionDepthException when it gets too deep.
See Examples.EvenOdd
"""
def __init__(self, recursive_depth_bound=10, *args, **kwargs):
self.recursive_depth_bound = recursive_depth_bound
SimpleLexicon.__init__(self, *args, **kwargs)
def __call__(self, word, *args):
"""
Wrap in self as a first argument that we don't have to in the grammar. This way, we can use self(word, X Y) as above.
"""
self.recursive_call_depth = 0
return self.value[word](self.recursive_call, *args) # pass in "self" as lex, using the recursive version
def recursive_call(self, word, *args):
"""
This gets called internally on recursive calls. It keeps track of the depth to allow us to escape
"""
self.recursive_call_depth += 1
if self.recursive_call_depth > self.recursive_depth_bound:
raise RecursionDepthException
# print ">>>", self.value[word]
return self.value[word](self.recursive_call, *args) | ebigelow/LOTlib | LOTlib/Hypotheses/Lexicon/RecursiveLexicon.py | Python | gpl-3.0 | 1,491 | 0.004695 |
# flake8: noqa
"""
Public testing utility functions.
"""
from pandas.util.testing import (
assert_frame_equal, assert_index_equal, assert_series_equal)
| GuessWhoSamFoo/pandas | pandas/testing.py | Python | bsd-3-clause | 158 | 0 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.12 on 2017-03-21 12:36
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [("clusters", "0020_emr_release_model")]
operations = [
migrations.RenameField(
model_name="cluster", old_name="emr_release", new_name="emr_release_version"
),
migrations.AddField(
model_name="cluster",
name="emr_release",
field=models.ForeignKey(
blank=True,
help_text='Different AWS EMR versions have different versions of software like Hadoop, Spark, etc. See <a href="http://docs.aws.amazon.com/emr/latest/ReleaseGuide/emr-whatsnew.html">what\'s new</a> in each.',
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="created_clusters",
to="clusters.EMRRelease",
verbose_name="EMR release",
),
),
]
| mozilla/telemetry-analysis-service | atmo/clusters/migrations/0021_rename_cluster_emr_release.py | Python | mpl-2.0 | 1,083 | 0.001847 |
from gettext import gettext as _
SECTION_ROOT = 'puppet'
DESC_ROOT = _('manage Puppet bindings')
def ensure_puppet_root(cli):
"""
Verifies that the root of puppet-related commands exists in the CLI,
creating it using constants from this module if it does not.
:param cli: CLI instance being configured
:type cli: pulp.client.extensions.core.PulpCli
"""
root_section = cli.find_section(SECTION_ROOT)
if root_section is None:
root_section = cli.create_section(SECTION_ROOT, DESC_ROOT)
return root_section
def root_section(cli):
return cli.root_section.find_subsection(SECTION_ROOT)
| ammaritiz/pulp_puppet | pulp_puppet_extensions_consumer/pulp_puppet/extensions/consumer/structure.py | Python | gpl-2.0 | 635 | 0 |
from flask import Flask
from flask.ext.bootstrap import Bootstrap
from flask.ext.mail import Mail
from flask.ext.moment import Moment
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.login import LoginManager
from config import config
bootstrap = Bootstrap()
mail = Mail()
moment = Moment()
db = SQLAlchemy()
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.login_view = 'auth.login'
def create_app(config_name):
app = Flask(__name__)
print 'in app __init__.py', config_name, config[config_name]
app.config.from_object(config[config_name])
config[config_name].init_app(app)
bootstrap.init_app(app)
mail.init_app(app)
moment.init_app(app)
db.init_app(app)
login_manager.init_app(app)
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint, url_prefix='/auth')
return app
| sb2gh/flask_login_1 | app/__init__.py | Python | mit | 988 | 0 |
import subprocess
def convert_chinese(text):
return subprocess.getoutput("echo '%s' | opencc -c hk2s.json" % text) | josherich/mindynode-parsers | mindynode_nltk/utils/opencc.py | Python | mit | 117 | 0.025641 |
import sqlite3
from config import appConfig
def createTBLS(path=None):
conn = sqlite3.connect(path)
cursor = conn.cursor()
cursor.execute("""CREATE TABLE links(id INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE NOT NULL,
name TEXT NOT NULL
);""")
cursor.execute("""CREATE TABLE tags(id INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE NOT NULL,
tag TEXT NOT NULL
);""")
cursor.execute("""CREATE TABLE assc(id INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE NOT NULL,
links_id INTEGER NOT NULL,
tags_id INTEGER NOT NULL,
FOREIGN KEY (links_id) REFERENCES links(id),
FOREIGN KEY (tags_id) REFERENCES tags(id)
);""")
conn.commit()
conn.close()
if __name__ == '__main__':
try:
path = appConfig.db_path
print path
createTBLS(str(path))
except IOError as e:
print (str(e))
| Mohamad1994HD/LinkArchiever | app/createDBTables.py | Python | gpl-3.0 | 1,118 | 0.006261 |
"""This module is part of Swampy, a suite of programs available from
allendowney.com/swampy.
Copyright 2011 Allen B. Downey
Distributed under the GNU General Public License at gnu.org/licenses/gpl.html.
"""
import math
from World import World
class CellWorld(World):
"""Contains cells and animals that move between cells."""
def __init__(self, canvas_size=500, cell_size=5, interactive=False):
World.__init__(self)
self.title('CellWorld')
self.canvas_size = canvas_size
self.cell_size = cell_size
# cells is a map from index tuples to Cell objects
self.cells = {}
if interactive:
self.make_canvas()
self.make_control()
def make_canvas(self):
"""Creates the GUI."""
self.canvas = self.ca(width=self.canvas_size,
height=self.canvas_size,
bg='white',
scale = [self.cell_size, self.cell_size])
def make_control(self):
"""Adds GUI elements that allow the user to change the scale."""
self.la(text='Click or drag on the canvas to create cells.')
self.row([0,1,0])
self.la(text='Cell size: ')
self.cell_size_en = self.en(width=10, text=str(self.cell_size))
self.bu(text='resize', command=self.rescale)
self.endrow()
def bind(self):
"""Creates bindings for the canvas."""
self.canvas.bind('<ButtonPress-1>', self.click)
self.canvas.bind('<B1-Motion>', self.click)
def click(self, event):
"""Event handler for clicks and drags.
It creates a new cell or toggles an existing cell.
"""
# convert the button click coordinates to an index tuple
x, y = self.canvas.invert([event.x, event.y])
i, j = int(math.floor(x)), int(math.floor(y))
# toggle the cell if it exists; create it otherwise
cell = self.get_cell(i,j)
if cell:
cell.toggle()
else:
self.make_cell(x, y)
def make_cell(self, i, j):
"""Creates and returns a new cell at i,j."""
cell = Cell(self, i, j)
self.cells[i,j] = cell
return cell
def cell_bounds(self, i, j):
"""Return the bounds of the cell with indices i, j."""
p1 = [i, j]
p2 = [i+1, j]
p3 = [i+1, j+1]
p4 = [i, j+1]
bounds = [p1, p2, p3, p4]
return bounds
def get_cell(self, i, j, default=None):
"""Gets the cell at i, j or returns the default value."""
cell = self.cells.get((i,j), default)
return cell
four_neighbors = [(1,0), (-1,0), (0,1), (0,-1)]
eight_neighbors = four_neighbors + [(1,1), (1,-1), (-1,1), (-1,-1)]
def get_four_neighbors(self, cell, default=None):
"""Return the four Von Neumann neighbors of a cell."""
return self.get_neighbors(cell, default, CellWorld.four_neighbors)
def get_eight_neighbors(self, cell, default=None):
"""Returns the eight Moore neighbors of a cell."""
return self.get_neighbors(cell, default, CellWorld.eight_neighbors)
def get_neighbors(self, cell, default=None, deltas=[(0,0)]):
"""Return the neighbors of a cell.
Args:
cell: Cell
deltas: a list of tuple offsets.
"""
i, j = cell.indices
cells = [self.get_cell(i+di, j+dj, default) for di, dj in deltas]
return cells
def rescale(self):
"""Event handler that rescales the world.
Reads the new scale from the GUI,
changes the canvas transform, and redraws the world.
"""
cell_size = self.cell_size_en.get()
cell_size = int(cell_size)
self.canvas.transforms[0].scale = [cell_size, cell_size]
self.redraw()
def redraw(self):
"""Clears the canvas and redraws all cells and animals."""
self.canvas.clear()
for cell in self.cells.itervalues():
cell.draw()
for animal in self.animals:
animal.draw()
class Cell(object):
"""A rectangular region in CellWorld"""
def __init__(self, world, i, j):
self.world = world
self.indices = i, j
self.bounds = self.world.cell_bounds(i, j)
# options used for a marked cell
self.marked_options = dict(fill='black', outline='gray80')
# options used for an unmarked cell
self.unmarked_options = dict(fill='yellow', outline='gray80')
self.marked = False
self.draw()
def draw(self):
"""Draw the cell."""
if self.marked:
options = self.marked_options
else:
options = self.unmarked_options
# bounds returns all four corners, so slicing every other
# element yields two opposing corners, which is what we
# pass to Canvas.rectangle
coords = self.bounds[::2]
self.item = self.world.canvas.rectangle(coords, **options)
def undraw(self):
"""Delete any items with this cell's tag."""
self.item.delete()
self.item = None
def get_config(self, option):
"""Gets the configuration of this cell."""
return self.item.cget(option)
def config(self, **options):
"""Configure this cell with the given options."""
self.item.config(**options)
def mark(self):
"""Marks this cell."""
self.marked = True
self.config(**self.marked_options)
def unmark(self):
"""Unmarks this cell."""
self.marked = False
self.config(**self.unmarked_options)
def is_marked(self):
"""Checks whether this cell is marked."""
return self.marked
def toggle(self):
"""Toggles the state of this cell."""
if self.is_marked():
self.unmark()
else:
self.mark()
if __name__ == '__main__':
world = CellWorld(interactive=True)
world.bind()
world.mainloop()
| simontakite/sysadmin | pythonscripts/thinkpython/CellWorld.py | Python | gpl-2.0 | 6,080 | 0.004441 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2009-2010 TUBITAK/UEKAE
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# Please read the COPYING file
import re
from os import path
from PyQt5.QtWidgets import QMessageBox
from PyQt5.QtWidgets import QDialog
from PyQt5.QtWidgets import QTableWidgetItem
from PyQt5.QtWidgets import QCheckBox
from PyQt5.QtWidgets import QFileDialog
from PyQt5.QtGui import QDesktopServices
from PyQt5.QtNetwork import QNetworkProxy
from PyQt5.QtCore import *
import pds
import config
import helpdialog
import repodialog
import pmutils
import backend
from ui_settingsdialog import Ui_SettingsDialog
from pmutils import *
class SettingsTab(QObject):
def __init__(self, settings):
QObject.__init__(self)
self.settings = settings
self.config = config.PMConfig()
self.iface = backend.pm.Iface()
self.setupUi()
self.connectSignals()
self.changed = False
def markChanged(self):
self.changed = True
def setupUi(self):
pass
def connectSignals(self):
pass
def save(self):
pass
def initialize(self):
pass
class GeneralSettings(SettingsTab):
def setupUi(self):
self.settings.moveUpButton.setIcon(KIcon("up"))
self.settings.moveDownButton.setIcon(KIcon("down"))
self.settings.addRepoButton.setIcon(KIcon(("list-add", "add")))
self.settings.removeRepoButton.setIcon(KIcon(("list-remove", "remove")))
self.initialize()
def initialize(self):
self.settings.onlyGuiApp.setChecked(self.config.showOnlyGuiApp())
self.settings.showComponents.setChecked(self.config.showComponents())
self.settings.showIsA.setChecked(self.config.showIsA())
self.settings.intervalCheck.setChecked(self.config.updateCheck())
self.settings.installUpdates.setChecked(self.config.installUpdatesAutomatically())
self.settings.intervalSpin.setValue(self.config.updateCheckInterval())
self.settings.systemTray.setChecked(self.config.systemTray())
self.settings.hideIfNoUpdate.setChecked(self.config.hideTrayIfThereIsNoUpdate())
def connectSignals(self):
self.settings.onlyGuiApp.toggled.connect(self.markChanged)
self.settings.showComponents.toggled.connect(self.markChanged)
self.settings.showIsA.toggled.connect(self.markChanged)
self.settings.intervalCheck.toggled.connect(self.markChanged)
self.settings.intervalSpin.valueChanged.connect(self.markChanged)
self.settings.installUpdates.toggled.connect(self.markChanged)
self.settings.systemTray.toggled.connect(self.markChanged)
self.settings.hideIfNoUpdate.toggled.connect(self.markChanged)
def save(self):
if not self.settings.onlyGuiApp.isChecked() == self.config.showOnlyGuiApp():
self.config.setShowOnlyGuiApp(self.settings.onlyGuiApp.isChecked())
self.settings.packagesChanged.emit()
if not self.settings.showComponents.isChecked() == self.config.showComponents():
self.config.setShowComponents(self.settings.showComponents.isChecked())
self.settings.packageViewChanged.emit()
if not self.settings.showIsA.isChecked() == self.config.showIsA():
self.config.setShowIsA(self.settings.showIsA.isChecked())
self.settings.packageViewChanged.emit()
if not self.settings.systemTray.isChecked() == self.config.systemTray() or \
not self.settings.intervalSpin.value() == self.config.updateCheckInterval() or \
not self.settings.intervalCheck.isChecked() == self.config.updateCheck() or \
not self.settings.hideIfNoUpdate.isChecked() == self.config.hideTrayIfThereIsNoUpdate():
self.config.setSystemTray(self.settings.systemTray.isChecked())
self.config.setUpdateCheck(self.settings.intervalCheck.isChecked())
self.config.setUpdateCheckInterval(self.settings.intervalSpin.value())
self.config.setHideTrayIfThereIsNoUpdate(self.settings.hideIfNoUpdate.isChecked())
self.settings.traySettingChanged.emit()
self.config.setInstallUpdatesAutomatically(self.settings.installUpdates.isChecked())
class CacheSettings(SettingsTab):
def setupUi(self):
self.initialize()
def initialize(self):
config = self.iface.getConfig()
cache = config.get("general", "package_cache")
cache_limit = config.get("general", "package_cache_limit")
cache_limit = int(cache_limit) if cache_limit else 0
cache_dir = config.get("directories", "cached_packages_dir")
cache_dir = str(cache_dir) if cache_dir else '/var/cache/pisi/packages'
# If pisi.conf does not have it yet, default is use package cache
if not cache or cache == "True":
enableCache = True
else:
enableCache = False
self.cacheEnabled = enableCache
self.cacheSize = cache_limit
self.settings.cacheGroup.setEnabled(self.cacheEnabled)
self.settings.useCacheCheck.setChecked(enableCache)
self.settings.useCacheSpin.setValue(cache_limit)
self.settings.cacheDirPath.setText(cache_dir)
bandwidth_limit = config.get("general", "bandwidth_limit")
bandwidth_limit = int(bandwidth_limit) if bandwidth_limit else 0
self.settings.useBandwidthLimit.setChecked(not bandwidth_limit == 0)
self.settings.bandwidthSpin.setValue(bandwidth_limit)
def connectSignals(self):
self.settings.clearCacheButton.clicked.connect(self.clearCache)
self.settings.selectCacheDir.clicked.connect(self.selectCacheDir)
self.settings.useCacheCheck.toggled.connect(self.markChanged)
self.settings.useCacheSpin.valueChanged.connect(self.markChanged)
self.settings.useBandwidthLimit.toggled.connect(self.markChanged)
self.settings.bandwidthSpin.valueChanged.connect(self.markChanged)
self.settings.openCacheDir.clicked.connect(self.openCacheDir)
def openCacheDir(self):
cache_dir = unicode(self.settings.cacheDirPath.text())
if path.exists(cache_dir):
QDesktopServices.openUrl(QUrl("file://%s" % cache_dir, QUrl.TolerantMode))
def selectCacheDir(self):
selected_dir = QFileDialog.getExistingDirectory(self.settings, self.tr("Open Directory"), "/",
QFileDialog.ShowDirsOnly | QFileDialog.DontResolveSymlinks)
if not selected_dir == '':
if not selected_dir == self.settings.cacheDirPath.text():
self.settings.cacheDirPath.setText(selected_dir)
self.markChanged()
def clearCache(self):
if QMessageBox.Yes == QMessageBox.warning(self.settings,
self.tr("Warning"),
self.tr("All the cached packages will be deleted. Are you sure? "),
QMessageBox.Yes | QMessageBox.No):
try:
self.iface.clearCache(0)
except Exception, e:
self.settings.parent.cw.exceptionCaught(str(e))
def save(self):
self.iface.setCacheLimit(self.settings.useCacheCheck.isChecked(), self.settings.useCacheSpin.value())
self.iface.setConfig("directories", "cached_packages_dir", unicode(self.settings.cacheDirPath.text()))
if self.settings.useBandwidthLimit.isChecked():
self.iface.setConfig("general", "bandwidth_limit", str(self.settings.bandwidthSpin.value()))
else:
self.iface.setConfig("general", "bandwidth_limit", "0")
class RepositorySettings(SettingsTab):
def setupUi(self):
self.settings.repoListView.horizontalHeader().setStretchLastSection(True)
self.settings.repoListView.verticalHeader().hide()
self.settings.repoListView.setColumnWidth(0, 32)
self.initialize(firstRun = True)
def connectSignals(self):
self.settings.addRepoButton.clicked.connect(self.addRepository)
self.settings.removeRepoButton.clicked.connect(self.removeRepository)
self.settings.moveUpButton.clicked.connect(self.moveUp)
self.settings.moveDownButton.clicked.connect(self.moveDown)
self.settings.repoListView.itemChanged.connect(self.markChanged)
def get_repo_names(self):
repos = []
for row in range(self.settings.repoListView.rowCount()):
repos.append(unicode(self.settings.repoListView.item(row, 1).text()))
return repos
def initialize(self, firstRun = False):
self.repositories = self.iface.getRepositories(
repos = None if firstRun else self.get_repo_names())
self.__clear()
for name, address in self.repositories:
self.__insertRow(unicode(name), address)
def __clear(self):
while self.settings.repoListView.rowCount():
self.settings.repoListView.removeRow(0)
def __insertRow(self, repoName, repoAddress):
currentRow = self.settings.repoListView.rowCount()
self.settings.repoListView.insertRow(currentRow)
checkbox = QCheckBox(self.settings.repoListView)
checkbox.toggled.connect(self.markChanged)
self.settings.repoListView.setCellWidget(currentRow, 0, checkbox)
self.settings.repoListView.cellWidget(currentRow, 0).setChecked(self.iface.isRepoActive(repoName))
repoNameItem = QTableWidgetItem()
repoNameItem.setText(repoName)
repoNameItem.setTextAlignment(Qt.AlignLeft|Qt.AlignVCenter)
self.settings.repoListView.setItem(currentRow, 1, repoNameItem)
repoAddressItem = QTableWidgetItem()
repoAddressItem.setText(repoAddress)
repoAddressItem.setTextAlignment(Qt.AlignLeft|Qt.AlignVCenter)
self.settings.repoListView.setItem(currentRow, 2, repoAddressItem)
def addRepository(self):
self.repoDialog = repodialog.RepoDialog()
self.repoDialog.buttonBox.accepted.connect(self.__addRepository)
self.repoDialog.show()
def __addRepository(self):
repoName = self.repoDialog.repoName.text()
repoAddress = self.repoDialog.repoAddress.currentText()
if not re.match("^[0-9%s\-\\_\\.\s]*$" % str(pmutils.letters()), str(repoName)) or str(repoName) == '':
QMessageBox.warning(self.settings,
self.tr("Pisi Error"),
self.tr("Not a valid repository name"))
return
if not repoAddress.endsWith("xml") and not repoAddress.endsWith("xml.bz2") and not repoAddress.endsWith('xz'):
QMessageBox.warning(self.settings,
self.tr("Pisi Error"),
self.tr('<qt>Repository address should end with xml or xml.bz2 or xz suffix.<p>Please try again.</qt>'))
return
self.__insertRow(repoName, repoAddress)
self.markChanged()
def removeRepository(self):
self.settings.repoListView.removeRow(self.settings.repoListView.currentRow())
self.markChanged()
def __setRow(self, row, rowItems):
for col in range(self.settings.repoListView.columnCount()):
self.settings.repoListView.setItem(row, col, rowItems[col])
def __takeRow(self, row):
rowItems = []
for col in range(self.settings.repoListView.columnCount()):
rowItems.append(self.settings.repoListView.takeItem(row, col))
return rowItems
def __move(self, up):
srcRow = self.settings.repoListView.currentRow()
dstRow = srcRow - 1 if up else srcRow + 1
if dstRow < 0 or dstRow >= self.settings.repoListView.rowCount():
return
srcRowChecked = self.settings.repoListView.cellWidget(srcRow, 0).checkState()
dstRowChecked = self.settings.repoListView.cellWidget(dstRow, 0).checkState()
srcItems = self.__takeRow(srcRow)
destItems = self.__takeRow(dstRow)
self.__setRow(srcRow, destItems)
self.__setRow(dstRow, srcItems)
self.settings.repoListView.cellWidget(srcRow, 0).setCheckState(dstRowChecked)
self.settings.repoListView.cellWidget(dstRow, 0).setCheckState(srcRowChecked)
self.settings.repoListView.setCurrentItem(srcItems[1])
self.markChanged()
def moveUp(self):
self.__move(True)
def moveDown(self):
self.__move(False)
def getRepo(self, row):
active = self.settings.repoListView.cellWidget(row, 0).checkState() == Qt.Checked
name = self.settings.repoListView.item(row, 1).text()
address = self.settings.repoListView.item(row, 2).text()
return (str(name), str(address), active)
def save(self):
repos = []
activities = {}
for row in range(self.settings.repoListView.rowCount()):
name, address, active = self.getRepo(row)
repos.append((name, address))
activities[name]=active
self.iface.setRepositories(repos)
self.iface.setRepoActivities(activities)
self.iface.updateRepositories(self.get_repo_names())
class ProxySettings(SettingsTab):
def setupUi(self):
self.initialize()
def initialize(self):
self.settings.useProxy.setChecked(False)
self.settings.useDe.hide()
self.clear()
proxyInUse = False
config = self.iface.getConfig()
https = config.get("general", "https_proxy")
if https and https != "None":
items = parse_proxy(https)
self.settings.httpsProxy.setText(items['host'])
self.settings.httpsProxyPort.setValue(int(items['port']))
proxyInUse = True
ftp = config.get("general", "ftp_proxy")
if ftp and ftp != "None":
items = parse_proxy(ftp)
self.settings.ftpProxy.setText(items['host'])
self.settings.ftpProxyPort.setValue(int(items['port']))
proxyInUse = True
http = config.get("general", "http_proxy")
if http and http != "None":
items = parse_proxy(http)
self.settings.httpProxy.setText(items['host'])
self.settings.httpProxyPort.setValue(int(items['port']))
proxyInUse = True
if proxyInUse:
self.settings.useProxy.setChecked(True)
if items['domain']:
self.settings.domainProxy.setText(items['domain'])
if items['user']:
self.settings.userProxy.setText(items['user'])
if items['pass']:
self.settings.passwordProxy.setText(items['pass'])
def connectSignals(self):
self.settings.useHttpForAll.linkActivated.connect(self.useHttpForAll)
self.settings.httpProxy.textChanged.connect(self.markChanged)
self.settings.httpProxyPort.valueChanged.connect(self.markChanged)
self.settings.httpsProxy.textChanged.connect(self.markChanged)
self.settings.httpsProxyPort.valueChanged.connect(self.markChanged)
self.settings.ftpProxy.textChanged.connect(self.markChanged)
self.settings.ftpProxyPort.valueChanged.connect(self.markChanged)
self.settings.userProxy.textChanged.connect(self.markChanged)
self.settings.passwordProxy.textChanged.connect(self.markChanged)
self.settings.domainProxy.textChanged.connect(self.markChanged)
self.settings.useProxy.toggled.connect(self.markChanged)
self.settings.useProxy.toggled.connect(self.checkDeSettings)
self.settings.useDe.linkActivated.connect(self.getSettingsFromDe)
def useHttpForAll(self, link):
self.settings.httpsProxy.setText(self.settings.httpProxy.text())
self.settings.httpsProxyPort.setValue(self.settings.httpProxyPort.value())
self.settings.ftpProxy.setText(self.settings.httpProxy.text())
self.settings.ftpProxyPort.setValue(self.settings.httpProxyPort.value())
def clear(self):
self.settings.httpProxy.setText("")
self.settings.httpProxyPort.setValue(0)
self.settings.userProxy.setText("")
self.settings.passwordProxy.setText("")
self.settings.domainProxy.setText("")
self.settings.httpsProxy.setText("")
self.settings.httpsProxyPort.setValue(0)
self.settings.ftpProxy.setText("")
self.settings.ftpProxyPort.setValue(0)
def checkDeSettings(self, toggled):
self.settings.useDe.setVisible(self.getSettingsFromDe(just_check = True) and toggled)
def getSettingsFromDe(self, link = '', just_check = False):
cf = path.join(Pds.config_path, 'share/config/kioslaverc')
config = Pds.parse(cf, force=True)
proxyType = config.value('Proxy Settings/ProxyType').toString()
if proxyType:
if int(proxyType) > 0:
if just_check:
return True
items = parse_proxy(config.value('Proxy Settings/httpProxy').toString())
self.settings.httpProxy.setText(items['host'])
self.settings.httpProxyPort.setValue(int(items['port']))
items = parse_proxy(config.value('Proxy Settings/httpsProxy').toString())
self.settings.httpsProxy.setText(items['host'])
self.settings.httpsProxyPort.setValue(int(items['port']))
items = parse_proxy(config.value('Proxy Settings/ftpProxy').toString())
self.settings.ftpProxy.setText(items['host'])
self.settings.ftpProxyPort.setValue(int(items['port']))
return True
return False
def save(self):
httpProxy, httpProxyPort = self.settings.httpProxy.text().split('://')[-1], self.settings.httpProxyPort.value()
httpsProxy, httpsProxyPort = self.settings.httpsProxy.text().split('://')[-1], self.settings.httpsProxyPort.value()
ftpProxy, ftpProxyPort = self.settings.ftpProxy.text().split('://')[-1], self.settings.ftpProxyPort.value()
userProxy = self.settings.userProxy.text()
passProxy = self.settings.passwordProxy.text()
domainProxy = self.settings.domainProxy.text()
if not self.settings.useProxy.isChecked():
httpProxy = httpsProxy = ftpProxy = None
self.clear()
if userProxy and passProxy:
auth = '%s:%s@' % (userProxy, passProxy)
if domainProxy:
auth = '%s\%s:%s@' % (domainProxy, userProxy, passProxy)
else:
auth = ''
self.iface.setConfig("general", "http_proxy", "None" if not httpProxy else "http://%s%s:%s" % (auth, httpProxy, httpProxyPort))
self.iface.setConfig("general", "https_proxy", "None" if not httpsProxy else "https://%s%s:%s" % (auth, httpsProxy, httpsProxyPort))
self.iface.setConfig("general", "ftp_proxy", "None" if not ftpProxy else "ftp://%s%s:%s" % (auth, ftpProxy, ftpProxyPort))
class SettingsDialog(QDialog, Ui_SettingsDialog):
def __init__(self, parent=None):
QDialog.__init__(self, parent)
self.setupUi(self)
self.connectSignals()
self.parent = parent
self.generalSettings = GeneralSettings(self)
self.cacheSettings = CacheSettings(self)
self.repositorySettings = RepositorySettings(self)
self.proxySettings = ProxySettings(self)
def connectSignals(self):
self.buttonOk.clicked.connect(self.saveSettings)
self.buttonCancel.clicked.connect(self.cancelSettings)
self.buttonHelp.clicked.connect(self.showHelp)
def cancelSettings(self):
for tab in (self.generalSettings, self.cacheSettings, \
self.repositorySettings, self.proxySettings):
tab.initialize()
self.reject()
def saveSettings(self):
for settings in [self.generalSettings, self.cacheSettings, self.repositorySettings, self.proxySettings]:
try:
if settings.changed:
settings.save()
except Exception, e:
self.parent.cw.exceptionCaught(str(e))
finally:
if settings.changed:
settings.initialize()
settings.changed = False
self.config = config.PMConfig()
def showHelp(self):
helpDialog = helpdialog.HelpDialog(self, helpdialog.PREFERENCES)
helpDialog.show()
| PisiLinux-PyQt5Port/package-manager | src/settingsdialog.py | Python | gpl-2.0 | 20,739 | 0.003809 |
#!/usr/bin/python3
import os, sys, random
import argparse
# this script processes all the log simulations in one dir, and writes the values of one particular attribute into one single file.
def prepareProcess(inputDir,simulationFile, separator, output, attribute ):
output = open(output, 'w')
simulation = open(inputDir+'/'+simulationFile, 'r')
# headers
output.write('var'+'\n')
splittedHeader = simulation.readline().split(separator)
for i in range(len(splittedHeader)):
if splittedHeader[i] == attribute:
indexAttribute = i
for line in simulation:
splittedLine = line.split(separator)
output.write('step' + splittedLine[0]+'\n')
output.close()
return indexAttribute
def processSimulation(inputDir, simulationFile, separator, outputName, attributeIndex):
output = open(outputName, 'r')
outputTmp = open('tmp', 'w')
simulation = open(inputDir+'/'+simulationFile, 'r')
# header
outputTmp.write(output.readline().strip('\n')+separator+simulationFile+'\n')
simulation.readline()
for simulationLine in simulation:
previousLine = output.readline().strip('\n')+separator
splittedLine = simulationLine.split(separator)
value = splittedLine[attributeIndex]
outputTmp.write(previousLine+value+'\n')
simulation.close()
output.close()
outputTmp.close()
os.rename('tmp', outputName)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input', default='input', help='directory where simulated files are stored')
parser.add_argument('-o', '--output', default='results.csv', help='output file')
parser.add_argument('-s', '--separator', default=';', help='separator token between values')
parser.add_argument('-a', '--attribute', default='Number of agents', help='name of the attribute column to process')
args = parser.parse_args()
outputFile = open(args.output, 'w')
outputFile.close()
header = 0
for root, dirs, simulationFiles in os.walk(args.input):
for simulationFile in simulationFiles:
if not simulationFile.endswith('.csv'):
continue
if header == 0:
attributeIndex = prepareProcess(args.input,simulationFile, args.separator, args.output, args.attribute)
header = 1
print 'processing simulation results in file: ' + simulationFile
processSimulation(args.input, simulationFile, args.separator, args.output, attributeIndex)
if __name__ == "__main__":
main()
| montanier/pandora | bin/joinResults.py | Python | lgpl-3.0 | 2,368 | 0.029561 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
PyCOMPSs Testbench Arguments Warnings
=====================================
"""
# Imports
import unittest
from modules.testArgumentError import testArgumentError
def main():
suite = unittest.TestLoader().loadTestsFromTestCase(testArgumentError)
unittest.TextTestRunner(verbosity=2).run(suite)
if __name__ == "__main__":
main()
| mF2C/COMPSs | tests/sources/python/8_argument_error/src/args_error.py | Python | apache-2.0 | 392 | 0.002551 |
import os
from configurations import values
from boto.s3.connection import OrdinaryCallingFormat
from {{cookiecutter.app_name}}.config.common import Common
try:
# Python 2.x
import urlparse
except ImportError:
# Python 3.x
from urllib import parse as urlparse
class Production(Common):
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
# https://devcenter.heroku.com/articles/getting-started-with-django
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
INSTALLED_APPS = Common.INSTALLED_APPS
SECRET_KEY = values.SecretValue()
# Postgres
DATABASES = values.DatabaseURLValue('postgres://localhost/{{cookiecutter.app_name}}')
# django-secure
# http://django-secure.readthedocs.org/en/v0.1.2/settings.html
INSTALLED_APPS += ("djangosecure", )
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = values.BooleanValue(True)
SECURE_FRAME_DENY = values.BooleanValue(True)
SECURE_CONTENT_TYPE_NOSNIFF = values.BooleanValue(True)
SECURE_BROWSER_XSS_FILTER = values.BooleanValue(True)
SESSION_COOKIE_SECURE = values.BooleanValue(False)
SESSION_COOKIE_HTTPONLY = values.BooleanValue(True)
SECURE_SSL_REDIRECT = values.BooleanValue(True)
# Site
# https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["*"]
INSTALLED_APPS += ("gunicorn", )
# Template
# https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_LOADERS = (
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)),
)
# Media files
# http://django-storages.readthedocs.org/en/latest/index.html
INSTALLED_APPS += ('storages',)
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
AWS_ACCESS_KEY_ID = values.Value('DJANGO_AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = values.Value('DJANGO_AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = values.Value('DJANGO_AWS_STORAGE_BUCKET_NAME')
AWS_AUTO_CREATE_BUCKET = True
AWS_QUERYSTRING_AUTH = False
MEDIA_URL = 'https://s3.amazonaws.com/{}/'.format(AWS_STORAGE_BUCKET_NAME)
AWS_S3_CALLING_FORMAT = OrdinaryCallingFormat()
# https://developers.google.com/web/fundamentals/performance/optimizing-content-efficiency/http-caching#cache-control
# Response can be cached by browser and any intermediary caches (i.e. it is "public") for up to 1 day
# 86400 = (60 seconds x 60 minutes x 24 hours)
AWS_HEADERS = {
'Cache-Control': 'max-age=86400, s-maxage=86400, must-revalidate',
}
# Static files
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
# Caching
redis_url = urlparse.urlparse(os.environ.get('REDISTOGO_URL', 'redis://localhost:6379'))
CACHES = {
'default': {
'BACKEND': 'redis_cache.RedisCache',
'LOCATION': '{}:{}'.format(redis_url.hostname, redis_url.port),
'OPTIONS': {
'DB': 0,
'PASSWORD': redis_url.password,
'PARSER_CLASS': 'redis.connection.HiredisParser',
'CONNECTION_POOL_CLASS': 'redis.BlockingConnectionPool',
'CONNECTION_POOL_CLASS_KWARGS': {
'max_connections': 50,
'timeout': 20,
}
}
}
}
# Django RQ production settings
RQ_QUEUES = {
'default': {
'URL': os.getenv('REDISTOGO_URL', 'redis://localhost:6379'),
'DB': 0,
'DEFAULT_TIMEOUT': 500,
},
}
Common.VERSATILEIMAGEFIELD_SETTINGS['create_images_on_demand'] = False
| amitassaraf/angular2-django-boilerplate | {{cookiecutter.github_repository_name}}/src/{{cookiecutter.app_name}}/config/production.py | Python | mit | 3,763 | 0.000797 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from ansible.module_utils.basic import AnsibleModule
import git
import itertools
import multiprocessing
import os
import signal
import time
DOCUMENTATION = """
---
module: git_requirements
short_description: Module to run a multithreaded git clone
options:
repo_info:
description:
- List of repo information dictionaries containing at
a minimum a key entry "src" with the source git URL
to clone for each repo. In these dictionaries, one
can further specify:
"path" - destination clone location
"version" - git version to checkout
"refspec" - git refspec to checkout
"depth" - clone depth level
"force" - require git clone uses "--force"
default_path:
description:
Default git clone path (str) in case not
specified on an individual repo basis in
repo_info. Defaults to "master". Not
required.
default_version:
description:
Default git version (str) in case not
specified on an individual repo basis in
repo_info. Defaults to "master". Not
required.
default_refspec:
description:
Default git repo refspec (str) in case not
specified on an individual repo basis in
repo_info. Defaults to "". Not required.
default_depth:
description:
Default clone depth (int) in case not specified
on an individual repo basis. Defaults to 10.
Not required.
retries:
description:
Integer number of retries allowed in case of git
clone failure. Defaults to 1. Not required.
delay:
description:
Integer time delay (seconds) between git clone
retries in case of failure. Defaults to 0. Not
required.
force:
description:
Boolean. Apply --force flags to git clones wherever
possible. Defaults to False. Not required.
core_multiplier:
description:
Integer multiplier on the number of cores
present on the machine to use for
multithreading. For example, on a 2 core
machine, a multiplier of 4 would use 8
threads. Defaults to 4. Not required.
"""
EXAMPLES = r"""
- name: Clone repos
git_requirements:
repo_info: "[{'src':'https://github.com/ansible/',
'name': 'ansible'
'dest': '/etc/opt/ansible'}]"
"""
def init_signal():
signal.signal(signal.SIGINT, signal.SIG_IGN)
def check_out_version(repo, version, pull=False, force=False,
refspec=None, tag=False, depth=10):
try:
repo.git.fetch(tags=tag, force=force, refspec=refspec, depth=depth)
except Exception as e:
return ["Failed to fetch %s\n%s" % (repo.working_dir, str(e))]
try:
repo.git.checkout(version, force=force)
except Exception as e:
return [
"Failed to check out version %s for %s\n%s" %
(version, repo.working_dir, str(e))]
if repo.is_dirty(untracked_files=True) and force:
try:
repo.git.clean(force=force)
except Exception as e:
return [
"Failed to clean up repository% s\n%s" %
(repo.working_dir, str(e))]
if pull:
try:
repo.git.pull(force=force, refspec=refspec, depth=depth)
except Exception as e:
return ["Failed to pull repo %s\n%s" % (repo.working_dir, str(e))]
return []
def pull_wrapper(info):
role_info = info
retries = info[1]["retries"]
delay = info[1]["delay"]
for i in range(retries):
success = pull_role(role_info)
if success:
return True
else:
time.sleep(delay)
info[2].append(["Role {0} failed after {1} retries\n".format(role_info[0],
retries)])
return False
def pull_role(info):
role, config, failures = info
required_version = role["version"]
version_hash = False
if 'version' in role:
# If the version is the length of a hash then treat is as one
if len(required_version) == 40:
version_hash = True
def get_repo(dest):
try:
return git.Repo(dest)
except Exception:
failtxt = "Role in {0} is broken/not a git repo.".format(
role["dest"])
failtxt += "Please delete or fix it manually"
failures.append(failtxt)
return False
# if repo exists
if os.path.exists(role["dest"]):
repo = get_repo(role["dest"])
if not repo:
return False # go to next role
repo_url = list(repo.remote().urls)[0]
if repo_url != role["src"]:
repo.remote().set_url(role["src"])
# if they want master then fetch, checkout and pull to stay at latest
# master
if required_version == "master":
fail = check_out_version(repo, required_version, pull=True,
force=config["force"],
refspec=role["refspec"],
depth=role["depth"])
# If we have a hash then reset it to
elif version_hash:
fail = check_out_version(repo, required_version,
force=config["force"],
refspec=role["refspec"],
depth=role["depth"])
else:
# describe can fail in some cases so be careful:
try:
current_version = repo.git.describe(tags=True)
except Exception:
current_version = ""
if current_version == required_version and not config["force"]:
fail = []
pass
else:
fail = check_out_version(repo, required_version,
force=config["force"],
refspec=role["refspec"],
depth=role["depth"],
tag=True)
else:
try:
# If we have a hash id then treat this a little differently
if version_hash:
git.Repo.clone_from(role["src"], role["dest"],
branch='master',
no_single_branch=True,
depth=role["depth"])
repo = get_repo(role["dest"])
if not repo:
return False # go to next role
fail = check_out_version(repo, required_version,
force=config["force"],
refspec=role["refspec"],
depth=role["depth"])
else:
git.Repo.clone_from(role["src"], role["dest"],
branch=required_version,
depth=role["depth"],
no_single_branch=True)
fail = []
except Exception as e:
fail = ('Failed cloning repo %s\n%s' % (role["dest"], str(e)))
if fail == []:
return True
else:
failures.append(fail)
return False
def set_default(dictionary, key, defaults):
if key not in dictionary.keys():
dictionary[key] = defaults[key]
def main():
# Define variables
failures = multiprocessing.Manager().list()
# Data we can pass in to the module
fields = {
"repo_info": {"required": True, "type": "list"},
"default_path": {"required": True,
"type": "str"},
"default_version": {"required": False,
"type": "str",
"default": "master"},
"default_refspec": {"required": False,
"type": "str",
"default": None},
"default_depth": {"required": False,
"type": "int",
"default": 10},
"retries": {"required": False,
"type": "int",
"default": 1},
"delay": {"required": False,
"type": "int",
"default": 0},
"force": {"required": False,
"type": "bool",
"default": False},
"core_multiplier": {"required": False,
"type": "int",
"default": 4},
}
# Pull in module fields and pass into variables
module = AnsibleModule(argument_spec=fields)
git_repos = module.params['repo_info']
defaults = {
"path": module.params["default_path"],
"depth": module.params["default_depth"],
"version": module.params["default_version"],
"refspec": module.params["default_refspec"]
}
config = {
"retries": module.params["retries"],
"delay": module.params["delay"],
"force": module.params["force"],
"core_multiplier": module.params["core_multiplier"]
}
# Set up defaults
for repo in git_repos:
for key in ["path", "refspec", "version", "depth"]:
set_default(repo, key, defaults)
if "name" not in repo.keys():
repo["name"] = os.path.basename(repo["src"])
repo["dest"] = os.path.join(repo["path"], repo["name"])
# Define varibles
failures = multiprocessing.Manager().list()
core_count = multiprocessing.cpu_count() * config["core_multiplier"]
# Load up process and pass in interrupt and core process count
p = multiprocessing.Pool(core_count, init_signal)
clone_success = p.map(pull_wrapper, zip(git_repos,
itertools.repeat(config),
itertools.repeat(failures)),
chunksize=1)
p.close()
success = all(i for i in clone_success)
if success:
module.exit_json(msg=str(git_repos), changed=True)
else:
module.fail_json(msg=("Module failed"), meta=failures)
if __name__ == '__main__':
main()
| stackforge/os-ansible-deployment | playbooks/library/git_requirements.py | Python | apache-2.0 | 10,270 | 0 |
import os;
f = open('depsVerified', 'w');
f.write('ok');
f.close(); | krahman/node-java | touch.py | Python | mit | 68 | 0.073529 |
from django.db import models
from django.utils import timezone
# Create your models here.
def formatDateTime(dateTime):
return timezone.localtime(dateTime).strftime("%Y-%m-%d %H:%M:%S")
class Beacon(models.Model):
macAddr = models.CharField(max_length=20, unique=True)
uuid = models.UUIDField(editable=False)
major = models.CharField(max_length=10, null=False)
minor = models.CharField(max_length=10, null=False)
def getDict(self):
dict = {}
dict['macAddr'] = self.macAddr
dict['uuid'] = str(self.uuid)
dict['major'] = self.major
dict['minor'] = self.minor
return dict
class Meta:
unique_together = ('uuid', 'major', 'minor')
class DetectorDevice(models.Model):
""" device which detects beacons, now only cellphones """
externalId = models.CharField(max_length=32, unique=True)
def getDict(self):
dict = {}
dict['deviceId'] = self.externalId
return dict
def __str__(self):
return self.externalId
class BeaconLog(models.Model):
time = models.DateTimeField(null=False)
rssi = models.IntegerField(null=False)
measurePower = models.IntegerField(null=False)
beacon = models.ForeignKey(Beacon, on_delete=models.CASCADE)
device = models.ForeignKey(DetectorDevice, on_delete=models.CASCADE)
def __str__(self):
return "time: {} | rssi: {} | measurePower: {}".format(
self.time, self.rssi, self.measurePower)
def getDict(self):
dict = {}
dict['time'] = formatDateTime(self.time)
dict['rssi'] = self.rssi
dict['measurePower'] = self.measurePower
return dict
class Event(models.Model):
time = models.DateTimeField(null=False)
event = models.TextField(null=False)
device = models.ForeignKey(DetectorDevice, on_delete=models.CASCADE)
def getDict(self):
dict = {}
dict['time'] = formatDateTime(self.time)
dict['event'] = self.event
return dict
| SmartcitySantiagoChile/onlineGPS | beacon/models.py | Python | mit | 2,058 | 0.006317 |
import json
import os
from processes.postgres import Postgres
from processes.gather_exception import GatherException
try:
DB_SERVER = os.environ['DB_SERVER']
DB_PORT = os.environ['DB_PORT']
DB_DATABASE = os.environ['DB_DATABASE']
DB_USER = os.environ['DB_USER']
DB_PASSWORD = os.environ['DB_PASSWORD']
except KeyError:
try:
from processes.GLOBALS import DB_SERVER, DB_PORT, DB_DATABASE, DB_USER, DB_PASSWORD
except ImportError:
print("No parameters provided")
exit()
class Main(object):
def __init__(self):
self.pg = Postgres(DB_SERVER, DB_PORT, DB_DATABASE, DB_USER, DB_PASSWORD)
self.source_topic = 'youtube'
self.destination_topic = 'movies'
def run(self, data):
"""
This inserts the relevant json information
into the table kino.movies.
:param data: json data holding information on films.
"""
imdb_id = data['imdb_id']
omdb_movie_data = data['omdb_main']
tmdb_movie_data = data['tmdb_main']
sql = """insert into kino.languages(language)
select y.language
from json_to_recordset(%s) x (original_language varchar(1000))
join kino.iso2language y
on x.original_language = y.iso3166
where language not in (select language
from kino.languages)"""
self.pg.pg_cur.execute(sql, (json.dumps(tmdb_movie_data),))
self.pg.pg_conn.commit()
# We delete our record from kino.movies first.
# Due to foreign keys with 'on delete cascade', this clears all records from
# the database associated with that imdb_id.
sql = """delete from kino.movies
where imdb_id = '{0}'""".format(imdb_id)
self.pg.pg_cur.execute(sql)
self.pg.pg_conn.commit()
# We also delete any records in errored attached to this imdb_id, as
# we have successfully gathered information for the film.
sql = """delete from kino.errored
where imdb_id = '{0}'""".format(imdb_id)
self.pg.pg_cur.execute(sql)
self.pg.pg_conn.commit()
sql = """insert into kino.movies (imdb_id, title, runtime, rated, released, orig_language, plot, tstamp)
select x.imdb_id
, y.title
, y.runtime
, x.rated
, y.release_date::date
, z.language
, y.plot
, CURRENT_DATE
from json_to_recordset(%s) x ( imdb_id varchar(15), rated varchar(10) )
join json_to_recordset(%s) y ( imdb_id varchar(15), title varchar(1000), runtime integer
, release_date date, plot varchar(4000), original_language varchar(1000))
on x.imdb_id = y.imdb_id
join kino.iso2language z
on y.original_language = z.iso3166
"""
self.pg.pg_cur.execute(sql, (json.dumps(omdb_movie_data), json.dumps(tmdb_movie_data)))
if self.pg.pg_cur.rowcount != 1:
raise GatherException(omdb_movie_data[0]['imdb_id'], 'No insert into movies, most likely due to a new language')
self.pg.pg_conn.commit()
sql = """insert into kino.kino_ratings (imdb_id, rating) values (%s, 3) on conflict do nothing"""
self.pg.pg_cur.execute(sql, (imdb_id,))
self.pg.pg_conn.commit()
return data
| kinoreel/kino-gather | processes/insert_movies.py | Python | mit | 3,618 | 0.003317 |
"""A modest set of tools to work with Django models."""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
# with open(path.join(here, 'DESCRIPTION.rst'), encoding='utf-8') as f:
# long_description = f.read()
setup(
name='sculpt.model_tools',
version='0.1',
description='A modest set of tools to work with Django models.',
long_description='',
url='https://github.com/damienjones/sculpt-model-tools',
author='Damien M. Jones',
author_email='[email protected]',
license='LGPLv2',
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: GNU Lesser General Public License v2 (LGPLv2)',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
keywords='',
packages=find_packages(),
install_requires=[
'sculpt-common>=0.2',
],
# package_data={},
# data_files=[],
# entry_points={},
# console_scripts={},
)
| damienjones/sculpt-model-tools | setup.py | Python | lgpl-2.1 | 1,198 | 0.022538 |
from JumpScale import j
descr = """
This jumpscript returns network info
"""
category = "monitoring"
organization = "jumpscale"
author = "[email protected]"
license = "bsd"
version = "1.0"
roles = []
def action():
return j.sal.nettools.getNetworkInfo()
if __name__ == "__main__":
print(action())
| Jumpscale/jumpscale_core8 | apps/agentcontroller/jumpscripts/jumpscale/network_info.py | Python | apache-2.0 | 312 | 0.003205 |
# pack.py -- For dealing with packed git objects.
# Copyright (C) 2007 James Westby <[email protected]>
# Copyright (C) 2008-2013 Jelmer Vernooij <[email protected]>
#
# Dulwich is dual-licensed under the Apache License, Version 2.0 and the GNU
# General Public License as public by the Free Software Foundation; version 2.0
# or (at your option) any later version. You can redistribute it and/or
# modify it under the terms of either of these two licenses.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# You should have received a copy of the licenses; if not, see
# <http://www.gnu.org/licenses/> for a copy of the GNU General Public License
# and <http://www.apache.org/licenses/LICENSE-2.0> for a copy of the Apache
# License, Version 2.0.
#
"""Classes for dealing with packed git objects.
A pack is a compact representation of a bunch of objects, stored
using deltas where possible.
They have two parts, the pack file, which stores the data, and an index
that tells you where the data is.
To find an object you look in all of the index files 'til you find a
match for the object name. You then use the pointer got from this as
a pointer in to the corresponding packfile.
"""
from collections import defaultdict
import binascii
from io import BytesIO, UnsupportedOperation
from collections import (
deque,
)
import difflib
import struct
from itertools import chain
try:
from itertools import imap, izip
except ImportError:
# Python3
imap = map
izip = zip
import os
import sys
from hashlib import sha1
from os import (
SEEK_CUR,
SEEK_END,
)
from struct import unpack_from
import zlib
try:
import mmap
except ImportError:
has_mmap = False
else:
has_mmap = True
# For some reason the above try, except fails to set has_mmap = False for plan9
if sys.platform == 'Plan9':
has_mmap = False
from dulwich.errors import ( # noqa: E402
ApplyDeltaError,
ChecksumMismatch,
)
from dulwich.file import GitFile # noqa: E402
from dulwich.lru_cache import ( # noqa: E402
LRUSizeCache,
)
from dulwich.objects import ( # noqa: E402
ShaFile,
hex_to_sha,
sha_to_hex,
object_header,
)
OFS_DELTA = 6
REF_DELTA = 7
DELTA_TYPES = (OFS_DELTA, REF_DELTA)
DEFAULT_PACK_DELTA_WINDOW_SIZE = 10
def take_msb_bytes(read, crc32=None):
"""Read bytes marked with most significant bit.
:param read: Read function
"""
ret = []
while len(ret) == 0 or ret[-1] & 0x80:
b = read(1)
if crc32 is not None:
crc32 = binascii.crc32(b, crc32)
ret.append(ord(b[:1]))
return ret, crc32
class UnpackedObject(object):
"""Class encapsulating an object unpacked from a pack file.
These objects should only be created from within unpack_object. Most
members start out as empty and are filled in at various points by
read_zlib_chunks, unpack_object, DeltaChainIterator, etc.
End users of this object should take care that the function they're getting
this object from is guaranteed to set the members they need.
"""
__slots__ = [
'offset', # Offset in its pack.
'_sha', # Cached binary SHA.
'obj_type_num', # Type of this object.
'obj_chunks', # Decompressed and delta-resolved chunks.
'pack_type_num', # Type of this object in the pack (may be a delta).
'delta_base', # Delta base offset or SHA.
'comp_chunks', # Compressed object chunks.
'decomp_chunks', # Decompressed object chunks.
'decomp_len', # Decompressed length of this object.
'crc32', # CRC32.
]
# TODO(dborowitz): read_zlib_chunks and unpack_object could very well be
# methods of this object.
def __init__(self, pack_type_num, delta_base, decomp_len, crc32):
self.offset = None
self._sha = None
self.pack_type_num = pack_type_num
self.delta_base = delta_base
self.comp_chunks = None
self.decomp_chunks = []
self.decomp_len = decomp_len
self.crc32 = crc32
if pack_type_num in DELTA_TYPES:
self.obj_type_num = None
self.obj_chunks = None
else:
self.obj_type_num = pack_type_num
self.obj_chunks = self.decomp_chunks
self.delta_base = delta_base
def sha(self):
"""Return the binary SHA of this object."""
if self._sha is None:
self._sha = obj_sha(self.obj_type_num, self.obj_chunks)
return self._sha
def sha_file(self):
"""Return a ShaFile from this object."""
return ShaFile.from_raw_chunks(self.obj_type_num, self.obj_chunks)
# Only provided for backwards compatibility with code that expects either
# chunks or a delta tuple.
def _obj(self):
"""Return the decompressed chunks, or (delta base, delta chunks)."""
if self.pack_type_num in DELTA_TYPES:
return (self.delta_base, self.decomp_chunks)
else:
return self.decomp_chunks
def __eq__(self, other):
if not isinstance(other, UnpackedObject):
return False
for slot in self.__slots__:
if getattr(self, slot) != getattr(other, slot):
return False
return True
def __ne__(self, other):
return not (self == other)
def __repr__(self):
data = ['%s=%r' % (s, getattr(self, s)) for s in self.__slots__]
return '%s(%s)' % (self.__class__.__name__, ', '.join(data))
_ZLIB_BUFSIZE = 4096
def read_zlib_chunks(read_some, unpacked, include_comp=False,
buffer_size=_ZLIB_BUFSIZE):
"""Read zlib data from a buffer.
This function requires that the buffer have additional data following the
compressed data, which is guaranteed to be the case for git pack files.
:param read_some: Read function that returns at least one byte, but may
return less than the requested size.
:param unpacked: An UnpackedObject to write result data to. If its crc32
attr is not None, the CRC32 of the compressed bytes will be computed
using this starting CRC32.
After this function, will have the following attrs set:
* comp_chunks (if include_comp is True)
* decomp_chunks
* decomp_len
* crc32
:param include_comp: If True, include compressed data in the result.
:param buffer_size: Size of the read buffer.
:return: Leftover unused data from the decompression.
:raise zlib.error: if a decompression error occurred.
"""
if unpacked.decomp_len <= -1:
raise ValueError('non-negative zlib data stream size expected')
decomp_obj = zlib.decompressobj()
comp_chunks = []
decomp_chunks = unpacked.decomp_chunks
decomp_len = 0
crc32 = unpacked.crc32
while True:
add = read_some(buffer_size)
if not add:
raise zlib.error('EOF before end of zlib stream')
comp_chunks.append(add)
decomp = decomp_obj.decompress(add)
decomp_len += len(decomp)
decomp_chunks.append(decomp)
unused = decomp_obj.unused_data
if unused:
left = len(unused)
if crc32 is not None:
crc32 = binascii.crc32(add[:-left], crc32)
if include_comp:
comp_chunks[-1] = add[:-left]
break
elif crc32 is not None:
crc32 = binascii.crc32(add, crc32)
if crc32 is not None:
crc32 &= 0xffffffff
if decomp_len != unpacked.decomp_len:
raise zlib.error('decompressed data does not match expected size')
unpacked.crc32 = crc32
if include_comp:
unpacked.comp_chunks = comp_chunks
return unused
def iter_sha1(iter):
"""Return the hexdigest of the SHA1 over a set of names.
:param iter: Iterator over string objects
:return: 40-byte hex sha1 digest
"""
sha = sha1()
for name in iter:
sha.update(name)
return sha.hexdigest().encode('ascii')
def load_pack_index(path):
"""Load an index file by path.
:param filename: Path to the index file
:return: A PackIndex loaded from the given path
"""
with GitFile(path, 'rb') as f:
return load_pack_index_file(path, f)
def _load_file_contents(f, size=None):
try:
fd = f.fileno()
except (UnsupportedOperation, AttributeError):
fd = None
# Attempt to use mmap if possible
if fd is not None:
if size is None:
size = os.fstat(fd).st_size
if has_mmap:
try:
contents = mmap.mmap(fd, size, access=mmap.ACCESS_READ)
except mmap.error:
# Perhaps a socket?
pass
else:
return contents, size
contents = f.read()
size = len(contents)
return contents, size
def load_pack_index_file(path, f):
"""Load an index file from a file-like object.
:param path: Path for the index file
:param f: File-like object
:return: A PackIndex loaded from the given file
"""
contents, size = _load_file_contents(f)
if contents[:4] == b'\377tOc':
version = struct.unpack(b'>L', contents[4:8])[0]
if version == 2:
return PackIndex2(
path, file=f, contents=contents, size=size)
else:
raise KeyError('Unknown pack index format %d' % version)
else:
return PackIndex1(path, file=f, contents=contents, size=size)
def bisect_find_sha(start, end, sha, unpack_name):
"""Find a SHA in a data blob with sorted SHAs.
:param start: Start index of range to search
:param end: End index of range to search
:param sha: Sha to find
:param unpack_name: Callback to retrieve SHA by index
:return: Index of the SHA, or None if it wasn't found
"""
assert start <= end
while start <= end:
i = (start + end) // 2
file_sha = unpack_name(i)
if file_sha < sha:
start = i + 1
elif file_sha > sha:
end = i - 1
else:
return i
return None
class PackIndex(object):
"""An index in to a packfile.
Given a sha id of an object a pack index can tell you the location in the
packfile of that object if it has it.
"""
def __eq__(self, other):
if not isinstance(other, PackIndex):
return False
for (name1, _, _), (name2, _, _) in izip(self.iterentries(),
other.iterentries()):
if name1 != name2:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __len__(self):
"""Return the number of entries in this pack index."""
raise NotImplementedError(self.__len__)
def __iter__(self):
"""Iterate over the SHAs in this pack."""
return imap(sha_to_hex, self._itersha())
def iterentries(self):
"""Iterate over the entries in this pack index.
:return: iterator over tuples with object name, offset in packfile and
crc32 checksum.
"""
raise NotImplementedError(self.iterentries)
def get_pack_checksum(self):
"""Return the SHA1 checksum stored for the corresponding packfile.
:return: 20-byte binary digest
"""
raise NotImplementedError(self.get_pack_checksum)
def object_index(self, sha):
"""Return the index in to the corresponding packfile for the object.
Given the name of an object it will return the offset that object
lives at within the corresponding pack file. If the pack file doesn't
have the object then None will be returned.
"""
if len(sha) == 40:
sha = hex_to_sha(sha)
return self._object_index(sha)
def object_sha1(self, index):
"""Return the SHA1 corresponding to the index in the pack file.
"""
# PERFORMANCE/TODO(jelmer): Avoid scanning entire index
for (name, offset, crc32) in self.iterentries():
if offset == index:
return name
else:
raise KeyError(index)
def _object_index(self, sha):
"""See object_index.
:param sha: A *binary* SHA string. (20 characters long)_
"""
raise NotImplementedError(self._object_index)
def objects_sha1(self):
"""Return the hex SHA1 over all the shas of all objects in this pack.
:note: This is used for the filename of the pack.
"""
return iter_sha1(self._itersha())
def _itersha(self):
"""Yield all the SHA1's of the objects in the index, sorted."""
raise NotImplementedError(self._itersha)
class MemoryPackIndex(PackIndex):
"""Pack index that is stored entirely in memory."""
def __init__(self, entries, pack_checksum=None):
"""Create a new MemoryPackIndex.
:param entries: Sequence of name, idx, crc32 (sorted)
:param pack_checksum: Optional pack checksum
"""
self._by_sha = {}
self._by_index = {}
for name, idx, crc32 in entries:
self._by_sha[name] = idx
self._by_index[idx] = name
self._entries = entries
self._pack_checksum = pack_checksum
def get_pack_checksum(self):
return self._pack_checksum
def __len__(self):
return len(self._entries)
def _object_index(self, sha):
return self._by_sha[sha][0]
def object_sha1(self, index):
return self._by_index[index]
def _itersha(self):
return iter(self._by_sha)
def iterentries(self):
return iter(self._entries)
class FilePackIndex(PackIndex):
"""Pack index that is based on a file.
To do the loop it opens the file, and indexes first 256 4 byte groups
with the first byte of the sha id. The value in the four byte group indexed
is the end of the group that shares the same starting byte. Subtract one
from the starting byte and index again to find the start of the group.
The values are sorted by sha id within the group, so do the math to find
the start and end offset and then bisect in to find if the value is
present.
"""
def __init__(self, filename, file=None, contents=None, size=None):
"""Create a pack index object.
Provide it with the name of the index file to consider, and it will map
it whenever required.
"""
self._filename = filename
# Take the size now, so it can be checked each time we map the file to
# ensure that it hasn't changed.
if file is None:
self._file = GitFile(filename, 'rb')
else:
self._file = file
if contents is None:
self._contents, self._size = _load_file_contents(self._file, size)
else:
self._contents, self._size = (contents, size)
@property
def path(self):
return self._filename
def __eq__(self, other):
# Quick optimization:
if (isinstance(other, FilePackIndex) and
self._fan_out_table != other._fan_out_table):
return False
return super(FilePackIndex, self).__eq__(other)
def close(self):
self._file.close()
if getattr(self._contents, "close", None) is not None:
self._contents.close()
def __len__(self):
"""Return the number of entries in this pack index."""
return self._fan_out_table[-1]
def _unpack_entry(self, i):
"""Unpack the i-th entry in the index file.
:return: Tuple with object name (SHA), offset in pack file and CRC32
checksum (if known).
"""
raise NotImplementedError(self._unpack_entry)
def _unpack_name(self, i):
"""Unpack the i-th name from the index file."""
raise NotImplementedError(self._unpack_name)
def _unpack_offset(self, i):
"""Unpack the i-th object offset from the index file."""
raise NotImplementedError(self._unpack_offset)
def _unpack_crc32_checksum(self, i):
"""Unpack the crc32 checksum for the ith object from the index file.
"""
raise NotImplementedError(self._unpack_crc32_checksum)
def _itersha(self):
for i in range(len(self)):
yield self._unpack_name(i)
def iterentries(self):
"""Iterate over the entries in this pack index.
:return: iterator over tuples with object name, offset in packfile and
crc32 checksum.
"""
for i in range(len(self)):
yield self._unpack_entry(i)
def _read_fan_out_table(self, start_offset):
ret = []
for i in range(0x100):
fanout_entry = self._contents[
start_offset+i*4:start_offset+(i+1)*4]
ret.append(struct.unpack('>L', fanout_entry)[0])
return ret
def check(self):
"""Check that the stored checksum matches the actual checksum."""
actual = self.calculate_checksum()
stored = self.get_stored_checksum()
if actual != stored:
raise ChecksumMismatch(stored, actual)
def calculate_checksum(self):
"""Calculate the SHA1 checksum over this pack index.
:return: This is a 20-byte binary digest
"""
return sha1(self._contents[:-20]).digest()
def get_pack_checksum(self):
"""Return the SHA1 checksum stored for the corresponding packfile.
:return: 20-byte binary digest
"""
return bytes(self._contents[-40:-20])
def get_stored_checksum(self):
"""Return the SHA1 checksum stored for this index.
:return: 20-byte binary digest
"""
return bytes(self._contents[-20:])
def _object_index(self, sha):
"""See object_index.
:param sha: A *binary* SHA string. (20 characters long)_
"""
assert len(sha) == 20
idx = ord(sha[:1])
if idx == 0:
start = 0
else:
start = self._fan_out_table[idx-1]
end = self._fan_out_table[idx]
i = bisect_find_sha(start, end, sha, self._unpack_name)
if i is None:
raise KeyError(sha)
return self._unpack_offset(i)
class PackIndex1(FilePackIndex):
"""Version 1 Pack Index file."""
def __init__(self, filename, file=None, contents=None, size=None):
super(PackIndex1, self).__init__(filename, file, contents, size)
self.version = 1
self._fan_out_table = self._read_fan_out_table(0)
def _unpack_entry(self, i):
(offset, name) = unpack_from('>L20s', self._contents,
(0x100 * 4) + (i * 24))
return (name, offset, None)
def _unpack_name(self, i):
offset = (0x100 * 4) + (i * 24) + 4
return self._contents[offset:offset+20]
def _unpack_offset(self, i):
offset = (0x100 * 4) + (i * 24)
return unpack_from('>L', self._contents, offset)[0]
def _unpack_crc32_checksum(self, i):
# Not stored in v1 index files
return None
class PackIndex2(FilePackIndex):
"""Version 2 Pack Index file."""
def __init__(self, filename, file=None, contents=None, size=None):
super(PackIndex2, self).__init__(filename, file, contents, size)
if self._contents[:4] != b'\377tOc':
raise AssertionError('Not a v2 pack index file')
(self.version, ) = unpack_from(b'>L', self._contents, 4)
if self.version != 2:
raise AssertionError('Version was %d' % self.version)
self._fan_out_table = self._read_fan_out_table(8)
self._name_table_offset = 8 + 0x100 * 4
self._crc32_table_offset = self._name_table_offset + 20 * len(self)
self._pack_offset_table_offset = (self._crc32_table_offset +
4 * len(self))
self._pack_offset_largetable_offset = (
self._pack_offset_table_offset + 4 * len(self))
def _unpack_entry(self, i):
return (self._unpack_name(i), self._unpack_offset(i),
self._unpack_crc32_checksum(i))
def _unpack_name(self, i):
offset = self._name_table_offset + i * 20
return self._contents[offset:offset+20]
def _unpack_offset(self, i):
offset = self._pack_offset_table_offset + i * 4
offset = unpack_from('>L', self._contents, offset)[0]
if offset & (2**31):
offset = (
self._pack_offset_largetable_offset +
(offset & (2 ** 31 - 1)) * 8)
offset = unpack_from('>Q', self._contents, offset)[0]
return offset
def _unpack_crc32_checksum(self, i):
return unpack_from('>L', self._contents,
self._crc32_table_offset + i * 4)[0]
def read_pack_header(read):
"""Read the header of a pack file.
:param read: Read function
:return: Tuple of (pack version, number of objects). If no data is
available to read, returns (None, None).
"""
header = read(12)
if not header:
return None, None
if header[:4] != b'PACK':
raise AssertionError('Invalid pack header %r' % header)
(version,) = unpack_from(b'>L', header, 4)
if version not in (2, 3):
raise AssertionError('Version was %d' % version)
(num_objects,) = unpack_from(b'>L', header, 8)
return (version, num_objects)
def chunks_length(chunks):
if isinstance(chunks, bytes):
return len(chunks)
else:
return sum(imap(len, chunks))
def unpack_object(read_all, read_some=None, compute_crc32=False,
include_comp=False, zlib_bufsize=_ZLIB_BUFSIZE):
"""Unpack a Git object.
:param read_all: Read function that blocks until the number of requested
bytes are read.
:param read_some: Read function that returns at least one byte, but may not
return the number of bytes requested.
:param compute_crc32: If True, compute the CRC32 of the compressed data. If
False, the returned CRC32 will be None.
:param include_comp: If True, include compressed data in the result.
:param zlib_bufsize: An optional buffer size for zlib operations.
:return: A tuple of (unpacked, unused), where unused is the unused data
leftover from decompression, and unpacked in an UnpackedObject with
the following attrs set:
* obj_chunks (for non-delta types)
* pack_type_num
* delta_base (for delta types)
* comp_chunks (if include_comp is True)
* decomp_chunks
* decomp_len
* crc32 (if compute_crc32 is True)
"""
if read_some is None:
read_some = read_all
if compute_crc32:
crc32 = 0
else:
crc32 = None
bytes, crc32 = take_msb_bytes(read_all, crc32=crc32)
type_num = (bytes[0] >> 4) & 0x07
size = bytes[0] & 0x0f
for i, byte in enumerate(bytes[1:]):
size += (byte & 0x7f) << ((i * 7) + 4)
raw_base = len(bytes)
if type_num == OFS_DELTA:
bytes, crc32 = take_msb_bytes(read_all, crc32=crc32)
raw_base += len(bytes)
if bytes[-1] & 0x80:
raise AssertionError
delta_base_offset = bytes[0] & 0x7f
for byte in bytes[1:]:
delta_base_offset += 1
delta_base_offset <<= 7
delta_base_offset += (byte & 0x7f)
delta_base = delta_base_offset
elif type_num == REF_DELTA:
delta_base = read_all(20)
if compute_crc32:
crc32 = binascii.crc32(delta_base, crc32)
raw_base += 20
else:
delta_base = None
unpacked = UnpackedObject(type_num, delta_base, size, crc32)
unused = read_zlib_chunks(read_some, unpacked, buffer_size=zlib_bufsize,
include_comp=include_comp)
return unpacked, unused
def _compute_object_size(value):
"""Compute the size of a unresolved object for use with LRUSizeCache."""
(num, obj) = value
if num in DELTA_TYPES:
return chunks_length(obj[1])
return chunks_length(obj)
class PackStreamReader(object):
"""Class to read a pack stream.
The pack is read from a ReceivableProtocol using read() or recv() as
appropriate.
"""
def __init__(self, read_all, read_some=None, zlib_bufsize=_ZLIB_BUFSIZE):
self.read_all = read_all
if read_some is None:
self.read_some = read_all
else:
self.read_some = read_some
self.sha = sha1()
self._offset = 0
self._rbuf = BytesIO()
# trailer is a deque to avoid memory allocation on small reads
self._trailer = deque()
self._zlib_bufsize = zlib_bufsize
def _read(self, read, size):
"""Read up to size bytes using the given callback.
As a side effect, update the verifier's hash (excluding the last 20
bytes read).
:param read: The read callback to read from.
:param size: The maximum number of bytes to read; the particular
behavior is callback-specific.
"""
data = read(size)
# maintain a trailer of the last 20 bytes we've read
n = len(data)
self._offset += n
tn = len(self._trailer)
if n >= 20:
to_pop = tn
to_add = 20
else:
to_pop = max(n + tn - 20, 0)
to_add = n
self.sha.update(
bytes(bytearray([self._trailer.popleft() for _ in range(to_pop)])))
self._trailer.extend(data[-to_add:])
# hash everything but the trailer
self.sha.update(data[:-to_add])
return data
def _buf_len(self):
buf = self._rbuf
start = buf.tell()
buf.seek(0, SEEK_END)
end = buf.tell()
buf.seek(start)
return end - start
@property
def offset(self):
return self._offset - self._buf_len()
def read(self, size):
"""Read, blocking until size bytes are read."""
buf_len = self._buf_len()
if buf_len >= size:
return self._rbuf.read(size)
buf_data = self._rbuf.read()
self._rbuf = BytesIO()
return buf_data + self._read(self.read_all, size - buf_len)
def recv(self, size):
"""Read up to size bytes, blocking until one byte is read."""
buf_len = self._buf_len()
if buf_len:
data = self._rbuf.read(size)
if size >= buf_len:
self._rbuf = BytesIO()
return data
return self._read(self.read_some, size)
def __len__(self):
return self._num_objects
def read_objects(self, compute_crc32=False):
"""Read the objects in this pack file.
:param compute_crc32: If True, compute the CRC32 of the compressed
data. If False, the returned CRC32 will be None.
:return: Iterator over UnpackedObjects with the following members set:
offset
obj_type_num
obj_chunks (for non-delta types)
delta_base (for delta types)
decomp_chunks
decomp_len
crc32 (if compute_crc32 is True)
:raise ChecksumMismatch: if the checksum of the pack contents does not
match the checksum in the pack trailer.
:raise zlib.error: if an error occurred during zlib decompression.
:raise IOError: if an error occurred writing to the output file.
"""
pack_version, self._num_objects = read_pack_header(self.read)
if pack_version is None:
return
for i in range(self._num_objects):
offset = self.offset
unpacked, unused = unpack_object(
self.read, read_some=self.recv, compute_crc32=compute_crc32,
zlib_bufsize=self._zlib_bufsize)
unpacked.offset = offset
# prepend any unused data to current read buffer
buf = BytesIO()
buf.write(unused)
buf.write(self._rbuf.read())
buf.seek(0)
self._rbuf = buf
yield unpacked
if self._buf_len() < 20:
# If the read buffer is full, then the last read() got the whole
# trailer off the wire. If not, it means there is still some of the
# trailer to read. We need to read() all 20 bytes; N come from the
# read buffer and (20 - N) come from the wire.
self.read(20)
pack_sha = bytearray(self._trailer)
if pack_sha != self.sha.digest():
raise ChecksumMismatch(sha_to_hex(pack_sha), self.sha.hexdigest())
class PackStreamCopier(PackStreamReader):
"""Class to verify a pack stream as it is being read.
The pack is read from a ReceivableProtocol using read() or recv() as
appropriate and written out to the given file-like object.
"""
def __init__(self, read_all, read_some, outfile, delta_iter=None):
"""Initialize the copier.
:param read_all: Read function that blocks until the number of
requested bytes are read.
:param read_some: Read function that returns at least one byte, but may
not return the number of bytes requested.
:param outfile: File-like object to write output through.
:param delta_iter: Optional DeltaChainIterator to record deltas as we
read them.
"""
super(PackStreamCopier, self).__init__(read_all, read_some=read_some)
self.outfile = outfile
self._delta_iter = delta_iter
def _read(self, read, size):
"""Read data from the read callback and write it to the file."""
data = super(PackStreamCopier, self)._read(read, size)
self.outfile.write(data)
return data
def verify(self):
"""Verify a pack stream and write it to the output file.
See PackStreamReader.iterobjects for a list of exceptions this may
throw.
"""
if self._delta_iter:
for unpacked in self.read_objects():
self._delta_iter.record(unpacked)
else:
for _ in self.read_objects():
pass
def obj_sha(type, chunks):
"""Compute the SHA for a numeric type and object chunks."""
sha = sha1()
sha.update(object_header(type, chunks_length(chunks)))
if isinstance(chunks, bytes):
sha.update(chunks)
else:
for chunk in chunks:
sha.update(chunk)
return sha.digest()
def compute_file_sha(f, start_ofs=0, end_ofs=0, buffer_size=1 << 16):
"""Hash a portion of a file into a new SHA.
:param f: A file-like object to read from that supports seek().
:param start_ofs: The offset in the file to start reading at.
:param end_ofs: The offset in the file to end reading at, relative to the
end of the file.
:param buffer_size: A buffer size for reading.
:return: A new SHA object updated with data read from the file.
"""
sha = sha1()
f.seek(0, SEEK_END)
length = f.tell()
if (end_ofs < 0 and length + end_ofs < start_ofs) or end_ofs > length:
raise AssertionError(
"Attempt to read beyond file length. "
"start_ofs: %d, end_ofs: %d, file length: %d" % (
start_ofs, end_ofs, length))
todo = length + end_ofs - start_ofs
f.seek(start_ofs)
while todo:
data = f.read(min(todo, buffer_size))
sha.update(data)
todo -= len(data)
return sha
class PackData(object):
"""The data contained in a packfile.
Pack files can be accessed both sequentially for exploding a pack, and
directly with the help of an index to retrieve a specific object.
The objects within are either complete or a delta against another.
The header is variable length. If the MSB of each byte is set then it
indicates that the subsequent byte is still part of the header.
For the first byte the next MS bits are the type, which tells you the type
of object, and whether it is a delta. The LS byte is the lowest bits of the
size. For each subsequent byte the LS 7 bits are the next MS bits of the
size, i.e. the last byte of the header contains the MS bits of the size.
For the complete objects the data is stored as zlib deflated data.
The size in the header is the uncompressed object size, so to uncompress
you need to just keep feeding data to zlib until you get an object back,
or it errors on bad data. This is done here by just giving the complete
buffer from the start of the deflated object on. This is bad, but until I
get mmap sorted out it will have to do.
Currently there are no integrity checks done. Also no attempt is made to
try and detect the delta case, or a request for an object at the wrong
position. It will all just throw a zlib or KeyError.
"""
def __init__(self, filename, file=None, size=None):
"""Create a PackData object representing the pack in the given filename.
The file must exist and stay readable until the object is disposed of.
It must also stay the same size. It will be mapped whenever needed.
Currently there is a restriction on the size of the pack as the python
mmap implementation is flawed.
"""
self._filename = filename
self._size = size
self._header_size = 12
if file is None:
self._file = GitFile(self._filename, 'rb')
else:
self._file = file
(version, self._num_objects) = read_pack_header(self._file.read)
self._offset_cache = LRUSizeCache(
1024*1024*20, compute_size=_compute_object_size)
self.pack = None
@property
def filename(self):
return os.path.basename(self._filename)
@property
def path(self):
return self._filename
@classmethod
def from_file(cls, file, size):
return cls(str(file), file=file, size=size)
@classmethod
def from_path(cls, path):
return cls(filename=path)
def close(self):
self._file.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def _get_size(self):
if self._size is not None:
return self._size
self._size = os.path.getsize(self._filename)
if self._size < self._header_size:
errmsg = ('%s is too small for a packfile (%d < %d)' %
(self._filename, self._size, self._header_size))
raise AssertionError(errmsg)
return self._size
def __len__(self):
"""Returns the number of objects in this pack."""
return self._num_objects
def calculate_checksum(self):
"""Calculate the checksum for this pack.
:return: 20-byte binary SHA1 digest
"""
return compute_file_sha(self._file, end_ofs=-20).digest()
def get_ref(self, sha):
"""Get the object for a ref SHA, only looking in this pack."""
# TODO: cache these results
if self.pack is None:
raise KeyError(sha)
try:
offset = self.pack.index.object_index(sha)
except KeyError:
offset = None
if offset:
type, obj = self.get_object_at(offset)
elif self.pack is not None and self.pack.resolve_ext_ref:
type, obj = self.pack.resolve_ext_ref(sha)
else:
raise KeyError(sha)
return offset, type, obj
def resolve_object(self, offset, type, obj, get_ref=None):
"""Resolve an object, possibly resolving deltas when necessary.
:return: Tuple with object type and contents.
"""
# Walk down the delta chain, building a stack of deltas to reach
# the requested object.
base_offset = offset
base_type = type
base_obj = obj
delta_stack = []
while base_type in DELTA_TYPES:
prev_offset = base_offset
if get_ref is None:
get_ref = self.get_ref
if base_type == OFS_DELTA:
(delta_offset, delta) = base_obj
# TODO: clean up asserts and replace with nicer error messages
base_offset = base_offset - delta_offset
base_type, base_obj = self.get_object_at(base_offset)
assert isinstance(base_type, int)
elif base_type == REF_DELTA:
(basename, delta) = base_obj
assert isinstance(basename, bytes) and len(basename) == 20
base_offset, base_type, base_obj = get_ref(basename)
assert isinstance(base_type, int)
delta_stack.append((prev_offset, base_type, delta))
# Now grab the base object (mustn't be a delta) and apply the
# deltas all the way up the stack.
chunks = base_obj
for prev_offset, delta_type, delta in reversed(delta_stack):
chunks = apply_delta(chunks, delta)
# TODO(dborowitz): This can result in poor performance if
# large base objects are separated from deltas in the pack.
# We should reorganize so that we apply deltas to all
# objects in a chain one after the other to optimize cache
# performance.
if prev_offset is not None:
self._offset_cache[prev_offset] = base_type, chunks
return base_type, chunks
def iterobjects(self, progress=None, compute_crc32=True):
self._file.seek(self._header_size)
for i in range(1, self._num_objects + 1):
offset = self._file.tell()
unpacked, unused = unpack_object(
self._file.read, compute_crc32=compute_crc32)
if progress is not None:
progress(i, self._num_objects)
yield (offset, unpacked.pack_type_num, unpacked._obj(),
unpacked.crc32)
# Back up over unused data.
self._file.seek(-len(unused), SEEK_CUR)
def _iter_unpacked(self):
# TODO(dborowitz): Merge this with iterobjects, if we can change its
# return type.
self._file.seek(self._header_size)
if self._num_objects is None:
return
for _ in range(self._num_objects):
offset = self._file.tell()
unpacked, unused = unpack_object(
self._file.read, compute_crc32=False)
unpacked.offset = offset
yield unpacked
# Back up over unused data.
self._file.seek(-len(unused), SEEK_CUR)
def iterentries(self, progress=None):
"""Yield entries summarizing the contents of this pack.
:param progress: Progress function, called with current and total
object count.
:return: iterator of tuples with (sha, offset, crc32)
"""
num_objects = self._num_objects
resolve_ext_ref = (
self.pack.resolve_ext_ref if self.pack is not None else None)
indexer = PackIndexer.for_pack_data(
self, resolve_ext_ref=resolve_ext_ref)
for i, result in enumerate(indexer):
if progress is not None:
progress(i, num_objects)
yield result
def sorted_entries(self, progress=None):
"""Return entries in this pack, sorted by SHA.
:param progress: Progress function, called with current and total
object count
:return: List of tuples with (sha, offset, crc32)
"""
ret = sorted(self.iterentries(progress=progress))
return ret
def create_index_v1(self, filename, progress=None):
"""Create a version 1 file for this data file.
:param filename: Index filename.
:param progress: Progress report function
:return: Checksum of index file
"""
entries = self.sorted_entries(progress=progress)
with GitFile(filename, 'wb') as f:
return write_pack_index_v1(f, entries, self.calculate_checksum())
def create_index_v2(self, filename, progress=None):
"""Create a version 2 index file for this data file.
:param filename: Index filename.
:param progress: Progress report function
:return: Checksum of index file
"""
entries = self.sorted_entries(progress=progress)
with GitFile(filename, 'wb') as f:
return write_pack_index_v2(f, entries, self.calculate_checksum())
def create_index(self, filename, progress=None,
version=2):
"""Create an index file for this data file.
:param filename: Index filename.
:param progress: Progress report function
:return: Checksum of index file
"""
if version == 1:
return self.create_index_v1(filename, progress)
elif version == 2:
return self.create_index_v2(filename, progress)
else:
raise ValueError('unknown index format %d' % version)
def get_stored_checksum(self):
"""Return the expected checksum stored in this pack."""
self._file.seek(-20, SEEK_END)
return self._file.read(20)
def check(self):
"""Check the consistency of this pack."""
actual = self.calculate_checksum()
stored = self.get_stored_checksum()
if actual != stored:
raise ChecksumMismatch(stored, actual)
def get_compressed_data_at(self, offset):
"""Given offset in the packfile return compressed data that is there.
Using the associated index the location of an object can be looked up,
and then the packfile can be asked directly for that object using this
function.
"""
assert offset >= self._header_size
self._file.seek(offset)
unpacked, _ = unpack_object(self._file.read, include_comp=True)
return (unpacked.pack_type_num, unpacked.delta_base,
unpacked.comp_chunks)
def get_object_at(self, offset):
"""Given an offset in to the packfile return the object that is there.
Using the associated index the location of an object can be looked up,
and then the packfile can be asked directly for that object using this
function.
"""
try:
return self._offset_cache[offset]
except KeyError:
pass
assert offset >= self._header_size
self._file.seek(offset)
unpacked, _ = unpack_object(self._file.read)
return (unpacked.pack_type_num, unpacked._obj())
class DeltaChainIterator(object):
"""Abstract iterator over pack data based on delta chains.
Each object in the pack is guaranteed to be inflated exactly once,
regardless of how many objects reference it as a delta base. As a result,
memory usage is proportional to the length of the longest delta chain.
Subclasses can override _result to define the result type of the iterator.
By default, results are UnpackedObjects with the following members set:
* offset
* obj_type_num
* obj_chunks
* pack_type_num
* delta_base (for delta types)
* comp_chunks (if _include_comp is True)
* decomp_chunks
* decomp_len
* crc32 (if _compute_crc32 is True)
"""
_compute_crc32 = False
_include_comp = False
def __init__(self, file_obj, resolve_ext_ref=None):
self._file = file_obj
self._resolve_ext_ref = resolve_ext_ref
self._pending_ofs = defaultdict(list)
self._pending_ref = defaultdict(list)
self._full_ofs = []
self._shas = {}
self._ext_refs = []
@classmethod
def for_pack_data(cls, pack_data, resolve_ext_ref=None):
walker = cls(None, resolve_ext_ref=resolve_ext_ref)
walker.set_pack_data(pack_data)
for unpacked in pack_data._iter_unpacked():
walker.record(unpacked)
return walker
def record(self, unpacked):
type_num = unpacked.pack_type_num
offset = unpacked.offset
if type_num == OFS_DELTA:
base_offset = offset - unpacked.delta_base
self._pending_ofs[base_offset].append(offset)
elif type_num == REF_DELTA:
self._pending_ref[unpacked.delta_base].append(offset)
else:
self._full_ofs.append((offset, type_num))
def set_pack_data(self, pack_data):
self._file = pack_data._file
def _walk_all_chains(self):
for offset, type_num in self._full_ofs:
for result in self._follow_chain(offset, type_num, None):
yield result
for result in self._walk_ref_chains():
yield result
assert not self._pending_ofs
def _ensure_no_pending(self):
if self._pending_ref:
raise KeyError([sha_to_hex(s) for s in self._pending_ref])
def _walk_ref_chains(self):
if not self._resolve_ext_ref:
self._ensure_no_pending()
return
for base_sha, pending in sorted(self._pending_ref.items()):
if base_sha not in self._pending_ref:
continue
try:
type_num, chunks = self._resolve_ext_ref(base_sha)
except KeyError:
# Not an external ref, but may depend on one. Either it will
# get popped via a _follow_chain call, or we will raise an
# error below.
continue
self._ext_refs.append(base_sha)
self._pending_ref.pop(base_sha)
for new_offset in pending:
for result in self._follow_chain(new_offset, type_num, chunks):
yield result
self._ensure_no_pending()
def _result(self, unpacked):
return unpacked
def _resolve_object(self, offset, obj_type_num, base_chunks):
self._file.seek(offset)
unpacked, _ = unpack_object(
self._file.read, include_comp=self._include_comp,
compute_crc32=self._compute_crc32)
unpacked.offset = offset
if base_chunks is None:
assert unpacked.pack_type_num == obj_type_num
else:
assert unpacked.pack_type_num in DELTA_TYPES
unpacked.obj_type_num = obj_type_num
unpacked.obj_chunks = apply_delta(base_chunks,
unpacked.decomp_chunks)
return unpacked
def _follow_chain(self, offset, obj_type_num, base_chunks):
# Unlike PackData.get_object_at, there is no need to cache offsets as
# this approach by design inflates each object exactly once.
todo = [(offset, obj_type_num, base_chunks)]
for offset, obj_type_num, base_chunks in todo:
unpacked = self._resolve_object(offset, obj_type_num, base_chunks)
yield self._result(unpacked)
unblocked = chain(self._pending_ofs.pop(unpacked.offset, []),
self._pending_ref.pop(unpacked.sha(), []))
todo.extend(
(new_offset, unpacked.obj_type_num, unpacked.obj_chunks)
for new_offset in unblocked)
def __iter__(self):
return self._walk_all_chains()
def ext_refs(self):
return self._ext_refs
class PackIndexer(DeltaChainIterator):
"""Delta chain iterator that yields index entries."""
_compute_crc32 = True
def _result(self, unpacked):
return unpacked.sha(), unpacked.offset, unpacked.crc32
class PackInflater(DeltaChainIterator):
"""Delta chain iterator that yields ShaFile objects."""
def _result(self, unpacked):
return unpacked.sha_file()
class SHA1Reader(object):
"""Wrapper for file-like object that remembers the SHA1 of its data."""
def __init__(self, f):
self.f = f
self.sha1 = sha1(b'')
def read(self, num=None):
data = self.f.read(num)
self.sha1.update(data)
return data
def check_sha(self):
stored = self.f.read(20)
if stored != self.sha1.digest():
raise ChecksumMismatch(self.sha1.hexdigest(), sha_to_hex(stored))
def close(self):
return self.f.close()
def tell(self):
return self.f.tell()
class SHA1Writer(object):
"""Wrapper for file-like object that remembers the SHA1 of its data."""
def __init__(self, f):
self.f = f
self.length = 0
self.sha1 = sha1(b'')
def write(self, data):
self.sha1.update(data)
self.f.write(data)
self.length += len(data)
def write_sha(self):
sha = self.sha1.digest()
assert len(sha) == 20
self.f.write(sha)
self.length += len(sha)
return sha
def close(self):
sha = self.write_sha()
self.f.close()
return sha
def offset(self):
return self.length
def tell(self):
return self.f.tell()
def pack_object_header(type_num, delta_base, size):
"""Create a pack object header for the given object info.
:param type_num: Numeric type of the object.
:param delta_base: Delta base offset or ref, or None for whole objects.
:param size: Uncompressed object size.
:return: A header for a packed object.
"""
header = []
c = (type_num << 4) | (size & 15)
size >>= 4
while size:
header.append(c | 0x80)
c = size & 0x7f
size >>= 7
header.append(c)
if type_num == OFS_DELTA:
ret = [delta_base & 0x7f]
delta_base >>= 7
while delta_base:
delta_base -= 1
ret.insert(0, 0x80 | (delta_base & 0x7f))
delta_base >>= 7
header.extend(ret)
elif type_num == REF_DELTA:
assert len(delta_base) == 20
header += delta_base
return bytearray(header)
def write_pack_object(f, type, object, sha=None):
"""Write pack object to a file.
:param f: File to write to
:param type: Numeric type of the object
:param object: Object to write
:return: Tuple with offset at which the object was written, and crc32
"""
if type in DELTA_TYPES:
delta_base, object = object
else:
delta_base = None
header = bytes(pack_object_header(type, delta_base, len(object)))
comp_data = zlib.compress(object)
crc32 = 0
for data in (header, comp_data):
f.write(data)
if sha is not None:
sha.update(data)
crc32 = binascii.crc32(data, crc32)
return crc32 & 0xffffffff
def write_pack(filename, objects, deltify=None, delta_window_size=None):
"""Write a new pack data file.
:param filename: Path to the new pack file (without .pack extension)
:param objects: Iterable of (object, path) tuples to write.
Should provide __len__
:param window_size: Delta window size
:param deltify: Whether to deltify pack objects
:return: Tuple with checksum of pack file and index file
"""
with GitFile(filename + '.pack', 'wb') as f:
entries, data_sum = write_pack_objects(
f, objects, delta_window_size=delta_window_size, deltify=deltify)
entries = sorted([(k, v[0], v[1]) for (k, v) in entries.items()])
with GitFile(filename + '.idx', 'wb') as f:
return data_sum, write_pack_index_v2(f, entries, data_sum)
def write_pack_header(f, num_objects):
"""Write a pack header for the given number of objects."""
f.write(b'PACK') # Pack header
f.write(struct.pack(b'>L', 2)) # Pack version
f.write(struct.pack(b'>L', num_objects)) # Number of objects in pack
def deltify_pack_objects(objects, window_size=None):
"""Generate deltas for pack objects.
:param objects: An iterable of (object, path) tuples to deltify.
:param window_size: Window size; None for default
:return: Iterator over type_num, object id, delta_base, content
delta_base is None for full text entries
"""
# TODO(jelmer): Use threads
if window_size is None:
window_size = DEFAULT_PACK_DELTA_WINDOW_SIZE
# Build a list of objects ordered by the magic Linus heuristic
# This helps us find good objects to diff against us
magic = []
for obj, path in objects:
magic.append((obj.type_num, path, -obj.raw_length(), obj))
magic.sort()
possible_bases = deque()
for type_num, path, neg_length, o in magic:
raw = o.as_raw_string()
winner = raw
winner_base = None
for base in possible_bases:
if base.type_num != type_num:
continue
delta = create_delta(base.as_raw_string(), raw)
if len(delta) < len(winner):
winner_base = base.sha().digest()
winner = delta
yield type_num, o.sha().digest(), winner_base, winner
possible_bases.appendleft(o)
while len(possible_bases) > window_size:
possible_bases.pop()
def pack_objects_to_data(objects):
"""Create pack data from objects
:param objects: Pack objects
:return: Tuples with (type_num, hexdigest, delta base, object chunks)
"""
count = len(objects)
return (count,
((o.type_num, o.sha().digest(), None, o.as_raw_string())
for (o, path) in objects))
def write_pack_objects(f, objects, delta_window_size=None, deltify=None):
"""Write a new pack data file.
:param f: File to write to
:param objects: Iterable of (object, path) tuples to write.
Should provide __len__
:param window_size: Sliding window size for searching for deltas;
Set to None for default window size.
:param deltify: Whether to deltify objects
:return: Dict mapping id -> (offset, crc32 checksum), pack checksum
"""
if deltify is None:
# PERFORMANCE/TODO(jelmer): This should be enabled but is *much* too
# slow at the moment.
deltify = False
if deltify:
pack_contents = deltify_pack_objects(objects, delta_window_size)
pack_contents_count = len(objects)
else:
pack_contents_count, pack_contents = pack_objects_to_data(objects)
return write_pack_data(f, pack_contents_count, pack_contents)
def write_pack_data(f, num_records, records, progress=None):
"""Write a new pack data file.
:param f: File to write to
:param num_records: Number of records
:param records: Iterator over type_num, object_id, delta_base, raw
:param progress: Function to report progress to
:return: Dict mapping id -> (offset, crc32 checksum), pack checksum
"""
# Write the pack
entries = {}
f = SHA1Writer(f)
write_pack_header(f, num_records)
for i, (type_num, object_id, delta_base, raw) in enumerate(records):
if progress is not None:
progress((
'writing pack data: %d/%d\r' %
(i, num_records)).encode('ascii'))
offset = f.offset()
if delta_base is not None:
try:
base_offset, base_crc32 = entries[delta_base]
except KeyError:
type_num = REF_DELTA
raw = (delta_base, raw)
else:
type_num = OFS_DELTA
raw = (offset - base_offset, raw)
crc32 = write_pack_object(f, type_num, raw)
entries[object_id] = (offset, crc32)
return entries, f.write_sha()
def write_pack_index_v1(f, entries, pack_checksum):
"""Write a new pack index file.
:param f: A file-like object to write to
:param entries: List of tuples with object name (sha), offset_in_pack,
and crc32_checksum.
:param pack_checksum: Checksum of the pack file.
:return: The SHA of the written index file
"""
f = SHA1Writer(f)
fan_out_table = defaultdict(lambda: 0)
for (name, offset, entry_checksum) in entries:
fan_out_table[ord(name[:1])] += 1
# Fan-out table
for i in range(0x100):
f.write(struct.pack('>L', fan_out_table[i]))
fan_out_table[i+1] += fan_out_table[i]
for (name, offset, entry_checksum) in entries:
if not (offset <= 0xffffffff):
raise TypeError("pack format 1 only supports offsets < 2Gb")
f.write(struct.pack('>L20s', offset, name))
assert len(pack_checksum) == 20
f.write(pack_checksum)
return f.write_sha()
def _delta_encode_size(size):
ret = bytearray()
c = size & 0x7f
size >>= 7
while size:
ret.append(c | 0x80)
c = size & 0x7f
size >>= 7
ret.append(c)
return ret
# The length of delta compression copy operations in version 2 packs is limited
# to 64K. To copy more, we use several copy operations. Version 3 packs allow
# 24-bit lengths in copy operations, but we always make version 2 packs.
_MAX_COPY_LEN = 0xffff
def _encode_copy_operation(start, length):
scratch = []
op = 0x80
for i in range(4):
if start & 0xff << i*8:
scratch.append((start >> i*8) & 0xff)
op |= 1 << i
for i in range(2):
if length & 0xff << i*8:
scratch.append((length >> i*8) & 0xff)
op |= 1 << (4+i)
return bytearray([op] + scratch)
def create_delta(base_buf, target_buf):
"""Use python difflib to work out how to transform base_buf to target_buf.
:param base_buf: Base buffer
:param target_buf: Target buffer
"""
assert isinstance(base_buf, bytes)
assert isinstance(target_buf, bytes)
out_buf = bytearray()
# write delta header
out_buf += _delta_encode_size(len(base_buf))
out_buf += _delta_encode_size(len(target_buf))
# write out delta opcodes
seq = difflib.SequenceMatcher(a=base_buf, b=target_buf)
for opcode, i1, i2, j1, j2 in seq.get_opcodes():
# Git patch opcodes don't care about deletes!
# if opcode == 'replace' or opcode == 'delete':
# pass
if opcode == 'equal':
# If they are equal, unpacker will use data from base_buf
# Write out an opcode that says what range to use
copy_start = i1
copy_len = i2 - i1
while copy_len > 0:
to_copy = min(copy_len, _MAX_COPY_LEN)
out_buf += _encode_copy_operation(copy_start, to_copy)
copy_start += to_copy
copy_len -= to_copy
if opcode == 'replace' or opcode == 'insert':
# If we are replacing a range or adding one, then we just
# output it to the stream (prefixed by its size)
s = j2 - j1
o = j1
while s > 127:
out_buf.append(127)
out_buf += bytearray(target_buf[o:o+127])
s -= 127
o += 127
out_buf.append(s)
out_buf += bytearray(target_buf[o:o+s])
return bytes(out_buf)
def apply_delta(src_buf, delta):
"""Based on the similar function in git's patch-delta.c.
:param src_buf: Source buffer
:param delta: Delta instructions
"""
if not isinstance(src_buf, bytes):
src_buf = b''.join(src_buf)
if not isinstance(delta, bytes):
delta = b''.join(delta)
out = []
index = 0
delta_length = len(delta)
def get_delta_header_size(delta, index):
size = 0
i = 0
while delta:
cmd = ord(delta[index:index+1])
index += 1
size |= (cmd & ~0x80) << i
i += 7
if not cmd & 0x80:
break
return size, index
src_size, index = get_delta_header_size(delta, index)
dest_size, index = get_delta_header_size(delta, index)
assert src_size == len(src_buf), '%d vs %d' % (src_size, len(src_buf))
while index < delta_length:
cmd = ord(delta[index:index+1])
index += 1
if cmd & 0x80:
cp_off = 0
for i in range(4):
if cmd & (1 << i):
x = ord(delta[index:index+1])
index += 1
cp_off |= x << (i * 8)
cp_size = 0
# Version 3 packs can contain copy sizes larger than 64K.
for i in range(3):
if cmd & (1 << (4+i)):
x = ord(delta[index:index+1])
index += 1
cp_size |= x << (i * 8)
if cp_size == 0:
cp_size = 0x10000
if (cp_off + cp_size < cp_size or
cp_off + cp_size > src_size or
cp_size > dest_size):
break
out.append(src_buf[cp_off:cp_off+cp_size])
elif cmd != 0:
out.append(delta[index:index+cmd])
index += cmd
else:
raise ApplyDeltaError('Invalid opcode 0')
if index != delta_length:
raise ApplyDeltaError('delta not empty: %r' % delta[index:])
if dest_size != chunks_length(out):
raise ApplyDeltaError('dest size incorrect')
return out
def write_pack_index_v2(f, entries, pack_checksum):
"""Write a new pack index file.
:param f: File-like object to write to
:param entries: List of tuples with object name (sha), offset_in_pack, and
crc32_checksum.
:param pack_checksum: Checksum of the pack file.
:return: The SHA of the index file written
"""
f = SHA1Writer(f)
f.write(b'\377tOc') # Magic!
f.write(struct.pack('>L', 2))
fan_out_table = defaultdict(lambda: 0)
for (name, offset, entry_checksum) in entries:
fan_out_table[ord(name[:1])] += 1
# Fan-out table
largetable = []
for i in range(0x100):
f.write(struct.pack(b'>L', fan_out_table[i]))
fan_out_table[i+1] += fan_out_table[i]
for (name, offset, entry_checksum) in entries:
f.write(name)
for (name, offset, entry_checksum) in entries:
f.write(struct.pack(b'>L', entry_checksum))
for (name, offset, entry_checksum) in entries:
if offset < 2**31:
f.write(struct.pack(b'>L', offset))
else:
f.write(struct.pack(b'>L', 2**31 + len(largetable)))
largetable.append(offset)
for offset in largetable:
f.write(struct.pack(b'>Q', offset))
assert len(pack_checksum) == 20
f.write(pack_checksum)
return f.write_sha()
write_pack_index = write_pack_index_v2
class Pack(object):
"""A Git pack object."""
def __init__(self, basename, resolve_ext_ref=None):
self._basename = basename
self._data = None
self._idx = None
self._idx_path = self._basename + '.idx'
self._data_path = self._basename + '.pack'
self._data_load = lambda: PackData(self._data_path)
self._idx_load = lambda: load_pack_index(self._idx_path)
self.resolve_ext_ref = resolve_ext_ref
@classmethod
def from_lazy_objects(self, data_fn, idx_fn):
"""Create a new pack object from callables to load pack data and
index objects."""
ret = Pack('')
ret._data_load = data_fn
ret._idx_load = idx_fn
return ret
@classmethod
def from_objects(self, data, idx):
"""Create a new pack object from pack data and index objects."""
ret = Pack('')
ret._data_load = lambda: data
ret._idx_load = lambda: idx
return ret
def name(self):
"""The SHA over the SHAs of the objects in this pack."""
return self.index.objects_sha1()
@property
def data(self):
"""The pack data object being used."""
if self._data is None:
self._data = self._data_load()
self._data.pack = self
self.check_length_and_checksum()
return self._data
@property
def index(self):
"""The index being used.
:note: This may be an in-memory index
"""
if self._idx is None:
self._idx = self._idx_load()
return self._idx
def close(self):
if self._data is not None:
self._data.close()
if self._idx is not None:
self._idx.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def __eq__(self, other):
return isinstance(self, type(other)) and self.index == other.index
def __len__(self):
"""Number of entries in this pack."""
return len(self.index)
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self._basename)
def __iter__(self):
"""Iterate over all the sha1s of the objects in this pack."""
return iter(self.index)
def check_length_and_checksum(self):
"""Sanity check the length and checksum of the pack index and data."""
assert len(self.index) == len(self.data)
idx_stored_checksum = self.index.get_pack_checksum()
data_stored_checksum = self.data.get_stored_checksum()
if idx_stored_checksum != data_stored_checksum:
raise ChecksumMismatch(sha_to_hex(idx_stored_checksum),
sha_to_hex(data_stored_checksum))
def check(self):
"""Check the integrity of this pack.
:raise ChecksumMismatch: if a checksum for the index or data is wrong
"""
self.index.check()
self.data.check()
for obj in self.iterobjects():
obj.check()
# TODO: object connectivity checks
def get_stored_checksum(self):
return self.data.get_stored_checksum()
def __contains__(self, sha1):
"""Check whether this pack contains a particular SHA1."""
try:
self.index.object_index(sha1)
return True
except KeyError:
return False
def get_raw_unresolved(self, sha1):
"""Get raw unresolved data for a SHA.
:param sha1: SHA to return data for
:return: Tuple with pack object type, delta base (if applicable),
list of data chunks
"""
offset = self.index.object_index(sha1)
(obj_type, delta_base, chunks) = self.data.get_compressed_data_at(
offset)
if obj_type == OFS_DELTA:
delta_base = sha_to_hex(
self.index.object_sha1(offset - delta_base))
obj_type = REF_DELTA
return (obj_type, delta_base, chunks)
def get_raw(self, sha1):
offset = self.index.object_index(sha1)
obj_type, obj = self.data.get_object_at(offset)
type_num, chunks = self.data.resolve_object(offset, obj_type, obj)
return type_num, b''.join(chunks)
def __getitem__(self, sha1):
"""Retrieve the specified SHA1."""
type, uncomp = self.get_raw(sha1)
return ShaFile.from_raw_string(type, uncomp, sha=sha1)
def iterobjects(self):
"""Iterate over the objects in this pack."""
return iter(PackInflater.for_pack_data(
self.data, resolve_ext_ref=self.resolve_ext_ref))
def pack_tuples(self):
"""Provide an iterable for use with write_pack_objects.
:return: Object that can iterate over (object, path) tuples
and provides __len__
"""
class PackTupleIterable(object):
def __init__(self, pack):
self.pack = pack
def __len__(self):
return len(self.pack)
def __iter__(self):
return ((o, None) for o in self.pack.iterobjects())
return PackTupleIterable(self)
def keep(self, msg=None):
"""Add a .keep file for the pack, preventing git from garbage collecting it.
:param msg: A message written inside the .keep file; can be used later
to determine whether or not a .keep file is obsolete.
:return: The path of the .keep file, as a string.
"""
keepfile_name = '%s.keep' % self._basename
with GitFile(keepfile_name, 'wb') as keepfile:
if msg:
keepfile.write(msg)
keepfile.write(b'\n')
return keepfile_name
try:
from dulwich._pack import apply_delta, bisect_find_sha # noqa: F811
except ImportError:
pass
| muhkuh-sys/org.muhkuh.tools-flasher | jonchki/dulwich/pack.py | Python | gpl-2.0 | 68,723 | 0.000029 |
#!/usr/bin/env python
"""
Copyright 2010-2018 University Of Southern California
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
This module defines the configuration parameters for the BBToolbox script
"""
from __future__ import division, print_function
# Import Python modules
import os
import sys
# Import Broadband modules
import cc
import bband_utils
class BBToolboxCfg(object):
"""
Define the configuration parameters for the SDSU BBToolbox program
"""
cfgdict = {}
def getval(self, attr):
try:
val = self.cfgdict[attr]
except KeyError:
print("Invalid Source File - Missing attribute: %s" % (attr))
print("Exiting")
sys.exit(1)
return val
def parse_src(self, a_srcfile):
"""
This function calls bband_utils's parse property file function
to get a dictionary of key, value pairs and then looks for a
the parameters needed by bbtoolbox
"""
self.cfgdict = bband_utils.parse_properties(a_srcfile)
val = self.getval("depth_to_top")
self.DEPTH_TO_TOP = float(val)
val = self.getval("fault_length")
self.LENGTH = float(val)
val = self.getval("dip")
self.DIP = float(val)
val = self.getval("rake")
self.RAKE = float(val)
val = self.getval("hypo_along_stk")
self.HYPO_ALONG_STK = float(val)
val = self.getval("hypo_down_dip")
self.HYPO_DOWN_DIP = float(val)
val = self.getval("magnitude")
self.MAG = float(val)
val = self.getval("seed")
self.SEED = int(float(val))
# Now look for the optional grid parameters
if 'grid_x' in self.cfgdict:
self.grid_x = float(self.getval("grid_x"))
if 'grid_y' in self.cfgdict:
self.grid_y = float(self.getval("grid_y"))
if 'grid_z' in self.cfgdict:
self.grid_z = float(self.getval("grid_z"))
#
# Read parameters out of the source file to obtain parameters
# needed by the BBcoda codes
#
fcodes = cc.find_fx_fy_fz(self.HYPO_ALONG_STK,
self.LENGTH,
self.DIP,
self.HYPO_DOWN_DIP,
self.DEPTH_TO_TOP)
self.fsx = fcodes[0]
self.fsy = fcodes[1]
self.fsz = fcodes[2]
#print ("ETH conversion from hypalongstk: "
# "%f flength: %f dip: %f hypdowndip: %f depthtotop: %f\n" %
# (self.HYPO_ALONG_STK,
# self.LENGTH,
# self.DIP,
# self.HYPO_DOWN_DIP,
# self.DEPTH_TO_TOP))
#print ("resulting fsx: %f fxy: %f fsz: %s\n" % (self.fsx,
# self.fsy,
# self.fsz))
def calculate_stress(self):
"""
This function calculates the stress parameters for SDSU based
on the depth of the fault. These values are calibrated for use
in Eastern North America
"""
stress = 16.0 * self.DEPTH_TO_TOP + 225
stress = stress * 10**6
return stress
def __init__(self, a_srcfile=None):
"""
Set up some parameters for BBToolbox
"""
self.MAG = None
self.grid_x = None
self.grid_y = None
self.grid_z = 125.0
self.copy_lf_seismograms = True
# Parse src file, if given
if a_srcfile:
self.parse_src(a_srcfile)
self.MODALITY = 1
# GS_FLAG: Don't change it here, override it in the velocity
# model config file using a 'CODEBASE_SDSU_GS_FLAG = XXX' line
# 1: Western US (active region),
# 2: Eastern NA (stable region),
# 3: Japan
self.GS_FLAG = 1
# NGAW_FLAG: Don't change it here, override it in the velocity
# model config file using a 'CODEBASE_SDSU_NGAW_FLAG = XXX' line
# 1: NGA-WEST1
# 2: NGA-WEST2
self.NGAW_FLAG = 2
self.KAPPA = 0.04
self.Q_CODA = 150.0
self.FDEC = 0.8
self.AFAC = 41.0
self.BFAC = 34.0
self.SOURCE_MECH = "rs"
self.SOURCE_FUNC = "dreg"
self.VERBOSE = "on"
self.TR_SCA = 0.075
self.STR_FAC = 50.e6
# 06/10/11: Sandarsh MK
# Note: Setting FMAX = 20.00 Hz will
# cause BBtoolbox to produce NaNs in 000 and 090 seismograms.
self.FMAX = 100.00
if __name__ == "__main__":
BBCODA2 = BBToolboxCfg()
print("Created Test Config Class: %s" % (os.path.basename(sys.argv[0])))
| SCECcode/BBP | bbp/comps/bbtoolbox_cfg.py | Python | apache-2.0 | 5,200 | 0.000769 |
#! /usr/bin/env python
###############################################################################
#
# simulavr - A simulator for the Atmel AVR family of microcontrollers.
# Copyright (C) 2001, 2002 Theodore A. Roth
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
###############################################################################
#
# $Id: test_LD_X_decr.py,v 1.1 2004/07/31 00:59:11 rivetwa Exp $
#
"""Test the LD_X_decr opcode.
"""
import base_test
from registers import Reg, SREG
class LD_X_decr_TestFail(base_test.TestFail): pass
class base_LD_X_decr(base_test.opcode_test):
"""Generic test case for testing LD_X_decr opcode.
LD_X_decr - Load Indirect from data space to Register using index X and
pre decrement X.
Operation: X <- X - 1 then Rd <- (X)
opcode is '1001 000d dddd 1110' where 0 <= d <= 31 and d != {26,27}
Only registers PC, R26, R27 and Rd should be changed.
"""
def setup(self):
# Set the register values
self.setup_regs[self.Rd] = 0
self.setup_regs[Reg.R26] = (self.X & 0xff)
self.setup_regs[Reg.R27] = ((self.X >> 8) & 0xff)
# set up the val in memory (memory is read after X is decremented,
# thus we need to write to memory _at_ X - 1)
self.mem_byte_write( self.X - 1, self.Vd )
# Return the raw opcode
return 0x900E | (self.Rd << 4)
def analyze_results(self):
self.reg_changed.extend( [self.Rd, Reg.R26, Reg.R27] )
# check that result is correct
expect = self.Vd
got = self.anal_regs[self.Rd]
if expect != got:
self.fail('LD_X_decr: expect=%02x, got=%02x' % (expect, got))
# check that X was decremented
expect = self.X - 1
got = (self.anal_regs[Reg.R26] & 0xff) | ((self.anal_regs[Reg.R27] << 8) & 0xff00)
if expect != got:
self.fail('LD_X_decr X not decr: expect=%04x, got=%04x' % (expect, got))
#
# Template code for test case.
# The fail method will raise a test specific exception.
#
template = """
class LD_X_decr_r%02d_X%04x_v%02x_TestFail(LD_X_decr_TestFail): pass
class test_LD_X_decr_r%02d_X%04x_v%02x(base_LD_X_decr):
Rd = %d
X = 0x%x
Vd = 0x%x
def fail(self,s):
raise LD_X_decr_r%02d_X%04x_v%02x_TestFail, s
"""
#
# automagically generate the test_LD_X_decr_rNN_vXX class definitions.
#
# Operation is undefined for d = 26 and d = 27.
#
code = ''
for d in range(0,26)+range(28,32):
for x in (0x10f, 0x1ff):
for v in (0xaa, 0x55):
args = (d,x,v)*4
code += template % args
exec code
| zouppen/simulavr | regress/test_opcodes/test_LD_X_decr.py | Python | gpl-2.0 | 3,080 | 0.017857 |
import sys
import lofarpipe.support.lofaringredient as ingredient
from lofarpipe.support.baserecipe import BaseRecipe
from lofarpipe.support.remotecommand import RemoteCommandRecipeMixIn
from lofarpipe.support.remotecommand import ComputeJob
from lofarpipe.support.data_map import DataMap, validate_data_maps, \
align_data_maps
class imager_finalize(BaseRecipe, RemoteCommandRecipeMixIn):
"""
The Imager_finalizer performs a number of steps needed for integrating the
msss_imager_pipeline in the LOFAR framework: It places the image on the
output location in the correcy image type (hdf5).
It also adds some meta data collected from the individual measurement sets
and the found data.
This recipe does not have positional commandline arguments
"""
inputs = {
'awimager_output_map': ingredient.FileField(
'--awimager-output-mapfile',
help = """Mapfile containing (host, path) pairs of created sky
images """
),
'ms_per_image_map': ingredient.FileField(
'--ms-per-image-map',
help = '''Mapfile containing (host, path) pairs of mapfiles used
to create image on that node'''
),
'sourcelist_map': ingredient.FileField(
'--sourcelist-map',
help = '''mapfile containing (host, path) pairs to a list of sources
found in the image'''
),
'sourcedb_map': ingredient.FileField(
'--sourcedb_map',
help = '''mapfile containing (host, path) pairs to a db of sources
found in the image'''
),
'target_mapfile': ingredient.FileField(
'--target-mapfile',
help = "Mapfile containing (host, path) pairs to the concatenated and"
"combined measurement set, the source for the actual sky image"
),
'minbaseline': ingredient.FloatField(
'--minbaseline',
help = '''Minimum length of the baseline used for the images'''
),
'maxbaseline': ingredient.FloatField(
'--maxbaseline',
help = '''Maximum length of the baseline used for the images'''
),
'output_image_mapfile': ingredient.FileField(
'--output-image-mapfile',
help = '''mapfile containing (host, path) pairs with the final
output image (hdf5) location'''
),
'processed_ms_dir': ingredient.StringField(
'--processed-ms-dir',
help = '''Path to directory for processed measurment sets'''
),
'fillrootimagegroup_exec': ingredient.ExecField(
'--fillrootimagegroup_exec',
help = '''Full path to the fillRootImageGroup executable'''
),
'placed_image_mapfile': ingredient.FileField(
'--placed-image-mapfile',
help = "location of mapfile with proced and correctly placed,"
" hdf5 images"
)
}
outputs = {
'placed_image_mapfile': ingredient.StringField()
}
def go(self):
"""
Steps:
1. Load and validate the input datamaps
2. Run the node parts of the recipe
3. Validate node output and format the recipe output
"""
super(imager_finalize, self).go()
# *********************************************************************
# 1. Load the datamaps
awimager_output_map = DataMap.load(
self.inputs["awimager_output_map"])
ms_per_image_map = DataMap.load(
self.inputs["ms_per_image_map"])
sourcelist_map = DataMap.load(self.inputs["sourcelist_map"])
sourcedb_map = DataMap.load(self.inputs["sourcedb_map"])
target_mapfile = DataMap.load(self.inputs["target_mapfile"])
output_image_mapfile = DataMap.load(
self.inputs["output_image_mapfile"])
processed_ms_dir = self.inputs["processed_ms_dir"]
fillrootimagegroup_exec = self.inputs["fillrootimagegroup_exec"]
# Align the skip fields
align_data_maps(awimager_output_map, ms_per_image_map,
sourcelist_map, target_mapfile, output_image_mapfile,
sourcedb_map)
# Set the correct iterator
sourcelist_map.iterator = awimager_output_map.iterator = \
ms_per_image_map.iterator = target_mapfile.iterator = \
output_image_mapfile.iterator = sourcedb_map.iterator = \
DataMap.SkipIterator
# *********************************************************************
# 2. Run the node side of the recupe
command = " python3 %s" % (self.__file__.replace("master", "nodes"))
jobs = []
for (awimager_output_item, ms_per_image_item, sourcelist_item,
target_item, output_image_item, sourcedb_item) in zip(
awimager_output_map, ms_per_image_map, sourcelist_map,
target_mapfile, output_image_mapfile, sourcedb_map):
# collect the files as argument
arguments = [awimager_output_item.file,
ms_per_image_item.file,
sourcelist_item.file,
target_item.file,
output_image_item.file,
self.inputs["minbaseline"],
self.inputs["maxbaseline"],
processed_ms_dir,
fillrootimagegroup_exec,
self.environment,
sourcedb_item.file]
self.logger.info(
"Starting finalize with the folowing args: {0}".format(
arguments))
jobs.append(ComputeJob(target_item.host, command, arguments))
self._schedule_jobs(jobs)
# *********************************************************************
# 3. Validate the performance of the node script and assign output
succesful_run = False
for (job, output_image_item) in zip(jobs, output_image_mapfile):
if not "hdf5" in job.results:
# If the output failed set the skip to True
output_image_item.skip = True
else:
succesful_run = True
# signal that we have at least a single run finished ok.
# No need to set skip in this case
if not succesful_run:
self.logger.warn("Failed finalizer node run detected")
return 1
output_image_mapfile.save(self.inputs['placed_image_mapfile'])
self.logger.debug(
"Wrote mapfile containing placed hdf5 images: {0}".format(
self.inputs['placed_image_mapfile']))
self.outputs["placed_image_mapfile"] = self.inputs[
'placed_image_mapfile']
return 0
if __name__ == '__main__':
sys.exit(imager_finalize().main())
| kernsuite-debian/lofar | CEP/Pipeline/recipes/sip/master/imager_finalize.py | Python | gpl-3.0 | 7,155 | 0.004612 |
"""
Functions and decorators for making sure the parameters they work on are of
iterable types.
Copyright 2014-2015, Outernet Inc.
Some rights reserved.
This software is free software licensed under the terms of GPLv3. See COPYING
file that comes with the source code, or http://www.gnu.org/licenses/gpl.txt.
"""
import functools
import numbers
def is_integral(obj):
"""
Determine whether the passed in object is a number of integral type.
"""
return isinstance(obj, numbers.Integral)
def is_string(obj):
"""
Determine if the passed in object is a string.
"""
try:
return isinstance(obj, basestring)
except NameError:
return isinstance(obj, str)
def is_iterable(obj):
"""
Determine if the passed in object is an iterable, but not a string or dict.
"""
return (hasattr(obj, '__iter__') and
not isinstance(obj, dict) and
not is_string(obj))
def as_iterable(params=None):
"""
Make sure the marked parameters are iterable. In case a single-unwrapped
parameter is found among them (e.g. an int, string, ...), wrap it in a
list and forward like that to the wrapped function. The marked parameters,
if not explicitly specified, defaults to the 1st argument (``args[1]``).
"""
# set up default converter and separate positional from keyword arguments
params = params or [1]
indexes = [i for i in params if is_integral(i)]
keys = [k for k in params if is_string(k)]
def decorator(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
# patch positional arguments, if needed
if indexes:
# copy `args` into a new list and wrap it's elements in a list
# on the specified indexes, which are not iterables themselves
args = [[x] if i in indexes and not is_iterable(x) else x
for (i, x) in enumerate(args)]
# patch keyword arguments, if needed
if keys:
for key in keys:
if not is_iterable(kwargs[key]):
kwargs[key] = [kwargs[key]]
# invoke ``fn`` with patched parameters
return fn(*args, **kwargs)
return wrapper
return decorator
| Outernet-Project/librarian | librarian/core/utils/iterables.py | Python | gpl-3.0 | 2,296 | 0 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Copyright 2011 Yaşar Arabacı
This file is part of packagequiz.
packagequiz is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import pyalpm
from pycman import config
import question as q
from random import choice, randint
from sys import modules
config.init_with_config("/etc/pacman.conf")
localdb = pyalpm.get_localdb()
#questionTypes= (q.definition,q.depends,
# q.fileOwner,q.installedSize,
# q.packager)
types = [getattr(q, t) for t in dir(q) if str(type(getattr(q, t))) == "<class 'type'>"]
questionTypes = [qtype for qtype in types if (issubclass(qtype, q.Question) and qtype is not q.Question)]
del(types)
def getRandomQuestion(package=None, numWrongAnswers=3):
"""Returns a tuple with size of 3, first question text,
second correct answer, third list of wrong answers
@param package: A pyalpm.package type
@param numWrongAnswers: integer
@return: tuple
"""
qToReturn = None
if package == None:
package = getRandomPackage()
global questionTypes
while not qToReturn:
qtype = choice(questionTypes)
question = qtype(package)
func = getattr(modules[__name__], "_" + question.type)
qToReturn = func(question, numWrongAnswers)
return qToReturn
def getRandomPackage(exception=[]):
"""
Return a random package
@ param exception: list of packages as an exception
@ return: a package
"""
global localdb
package = choice(localdb.pkgcache)
if len(exception) == 0:
return package
else:
while package.name in exception:
package = choice(localdb.pkgcache)
return package
def qgenerator(function):
def generate(question, numWrongAnswers=3):
if question.correctAnswer is None:
return None
if isinstance(question.correctAnswer, list):
if len(question.correctAnswer) > 0:
correct_answer = choice(question.correctAnswer)
else:
return None
else:
correct_answer = question.correctAnswer
wrong_answers = []
while len(wrong_answers) < numWrongAnswers:
answer = function(question, numWrongAnswers)
if answer not in wrong_answers and answer is not None:
wrong_answers.append(answer)
return (question.text, correct_answer, wrong_answers,question.points)
return generate
@qgenerator
def _definition(question, numWrongAnswers=3):
return getRandomPackage([question.package.name]).desc
@qgenerator
def _depends(question, numWrongAnswers=3):
pkg = getRandomPackage([question.correctAnswer])
return pkg.name + "(" + pkg.desc + ")"
def _requiredBy(question, numWrongAnswers=3):
global localdb
if len(question.correctAnswer) > 0:
correct_answer_name = choice(question.correctAnswer)
correct_answer_package = localdb.get_pkg(correct_answer_name)
correct_answer = correct_answer_name + "(" + correct_answer_package.desc + ")"
else:
return None
wrong_answers = []
while len(wrong_answers) < numWrongAnswers:
pkg = getRandomPackage([pkg for pkg in question.correctAnswer])
answer = pkg.name + "(" + pkg.desc + ")"
if answer not in wrong_answers and answer is not None:
wrong_answers.append(answer)
return (question.text, correct_answer, wrong_answers,question.points)
#@qgenerator
#def _installedSize(question, numWrongAnswers=3):
# (type(question.correctAnswer))
# while True:
# rand = randint(int(question.correctAnswer * 0.1), int(question.correctAnswer * 1.9))
# (rand)
# (type(rand))
# if rand != question.correctAnswer:
# return rand
#
#@qgenerator
#def _maintainer(question, numWrongAnswers=3):
# while True:
# rand_pack = getRandomPackage()
# if rand_pack.packager != question.correctAnswer:
# return rand_pack.packager
#
#@qgenerator
#def _fileOwner(question, numWrongAnswers=3):
#
# return getRandomPackage([question.correctAnswer]).name
if __name__ == "__main__":
(getRandomQuestion())
| yasar11732/arch-package-quiz | packagequiz/questionGenerator.py | Python | gpl-3.0 | 4,801 | 0.005626 |
# -*- coding: utf-8 -*-
#
# This file is part of the bliss project
#
# Copyright (c) 2017 Beamline Control Unit, ESRF
# Distributed under the GNU LGPLv3. See LICENSE for more info.
import functools
from gevent import lock
from bliss.config.conductor.client import Lock
from bliss.config.channels import Cache
from bliss.config.settings import HashObjSetting
from bliss.common.switch import Switch as BaseSwitch
class ShutterSwitch(BaseSwitch):
def __init__(self, set_open, set_closed, is_opened):
BaseSwitch.__init__(self, "ShutterSwitch"+str(id(self)), {})
self._set_open = set_open
self._set_closed = set_closed
self._is_opened = is_opened
def _states_list(self):
return ["OPEN", "CLOSED"]
def _set(self, state):
if state == "OPEN":
return self._set_open()
else:
return self._set_closed()
def _get(self):
if self._is_opened():
return "OPEN"
else:
return "CLOSED"
class Shutter(object):
MANUAL,EXTERNAL,CONFIGURATION = range(3) # modes
MODE2STR = {MANUAL: ("MANUAL", "Manual mode"),
EXTERNAL: ("EXTERNAL", "External trigger mode"),
CONFIGURATION: ("CONFIGURATION", "Configuration mode"),
}
OPEN,CLOSED,UNKNOWN = range(3) # state
STATE2STR = { OPEN: ("OPEN", "Shutter is open"),
CLOSED: ("CLOSED", "Shutter is closed"),
UNKNOWN: ("UNKNOWN", "Unknown shutter state"),
}
"""
Generic shutter object
This interface should be used for all type of shutter (motor,fast...)
You may want to link this shutter with an external
control i.e: wago,musst.... in that case you have to put
in configuration **external-control** with the object reference.
This external control should be compatible with the Switch object
and have an OPEN/CLOSED states.
"""
def lazy_init(func):
@functools.wraps(func)
def func_wrapper(self,*args,**kwargs):
self.init()
with Lock(self):
return func(self,*args,**kwargs)
return func_wrapper
def __init__(self,name,config):
self.__name = name
self.__config = config
self._external_ctrl = config.get('external-control')
self.__settings = HashObjSetting('shutter:%s' % name)
self.__initialized_hw = Cache(self,"initialized",
default_value = False)
self.__state = Cache(self,"state",
default_value = Shutter.UNKNOWN)
self._init_flag = False
self.__lock = lock.Semaphore()
def init(self):
"""
initialize the shutter in the current mode.
this is method is called by lazy_init
"""
if self._external_ctrl is not None:
# Check if the external control is compatible
# with a switch object and if it has open/close state
ext_ctrl = self._external_ctrl
name = ext_ctrl.name if hasattr(ext_ctrl,'name') else "unknown"
try:
states = ext_ctrl.states_list()
ext_ctrl.set
ext_ctrl.get
except AttributeError:
raise ValueError('external-ctrl : {0} is not compatible '
'with a switch object'.format(name))
else:
if(not 'OPEN' in states or
not 'CLOSED' in states):
raise ValueError("external-ctrl : {0} doesn't"
" have 'OPEN' and 'CLOSED' states".format(name))
if not self._init_flag:
self._init_flag = True
try:
self._init()
with Lock(self):
with self.__lock:
if not self.__initialized_hw.value:
self._initialize_hardware()
self.__initialized_hw.value = True
except:
self._init_flag = False
raise
def _init(self):
"""
This method should contains all software initialization
like communication, internal state...
"""
raise NotImplementedError
def _initialize_hardware(self):
"""
This method should contains all commands needed to
initialize the hardware.
It's will be call only once (by the first client).
"""
pass
@property
def name(self):
return self.__name
@property
def config(self):
return self.__config
@property
def settings(self):
return self.__settings
@property
def mode(self):
"""
shutter mode can be MANUAL,EXTERNAL,CONFIGURATION
In CONFIGURATION mode, shutter can't be opened/closed.
**CONFIGURATION** could mean that the shutter is in tuning mode
i.e: changing open/close position in case of a motor.
In EXTERNAL mode, the shutter will be controlled
through the external-control handler.
If no external control is configured open/close
won't be authorized.
"""
return self.__settings.get('mode',Shutter.MANUAL)
@mode.setter
def mode(self,value):
if value not in self.MODE2STR:
raise ValueError("Mode can only be: %s" %\
','.join((x[0] for x in self.MODE2STR.values())))
self.init()
self._set_mode(value)
if value in (self.CONFIGURATION,self.EXTERNAL):
# Can't cache the state if external or configuration
self.__state.value = self.UNKNOWN
self.__settings['mode'] = value
@property
def state(self):
self.init()
mode = self.mode
if mode == self.MANUAL and self.__state.value == self.UNKNOWN:
return_state = self._state()
self.__state.value = return_state
return return_state
else:
if mode == self.EXTERNAL:
if self.external_control is not None:
switch_state = self.external_control.get()
return self.OPEN if switch_state == "OPEN" else self.CLOSED
else:
return self.UNKNOWN
elif mode == self.CONFIGURATION:
return self.UNKNOWN
return self.__state.value
def _state(self):
raise NotImplementedError
@property
def state_string(self):
return self.STATE2STR.get(self.state,self.STATE2STR[self.UNKNOWN])
@property
def external_control(self):
return self._external_ctrl
@lazy_init
def opening_time(self):
"""
Return the opening time if available or None
"""
return self._opening_time()
def _opening_time(self):
return self.__settings.get('opening_time')
@lazy_init
def closing_time(self):
"""
Return the closing time if available or None
"""
return self._closing_time()
def _closing_time(self):
return self.__settings.get('closing_time')
def measure_open_close_time(self):
"""
This small procedure will in basic usage do an open and close
of the shutter to measure the opening and closing time.
Those timing will be register into the settings.
returns (opening,closing) time
"""
previous_mode = self.mode()
try:
if previous_mode != self.MANUAL:
self.mode(self.MANUAL)
opening_time,closing_time = self._measure_open_close_time()
self.__settings['opening_time'] = opening_time
self.__settings['closing_time'] = closing_time
return open_time,close_time
finally:
if previous_mode != self.MANUAL:
self.mode(previous_mode)
def _measure_open_close_time(self):
"""
This method can be overloaded if needed.
Basic timing on
"""
self.close() # ensure it's closed
start_time = time.time()
self.open()
opening_time = time.time() - start_time
start_time = time.time()
self.close()
closing_time = time.time() - start_time
return opening_time,closing_time
@lazy_init
def open(self):
mode = self.mode
if mode == self.EXTERNAL:
if self._external_ctrl is None:
raise RuntimeError("Can't open the shutter because no "
"external-control is configured")
else:
return self._external_ctrl.set("OPEN")
elif mode != self.MANUAL:
raise RuntimeError("Can't open the shutter, in %s" %\
self.MODE2STR.get(mode,"Unknown"))
return self._open()
def _open(self):
raise NotImplementedError
@lazy_init
def close(self):
mode = self.mode
if mode == self.EXTERNAL:
if self._external_ctrl is None:
raise RuntimeError("Can't close the shutter because no "
"external-control is configured")
else:
return self._external_ctrl.set("CLOSED")
elif mode != self.MANUAL:
raise RuntimeError("Can't close the shutter, in %s" %\
self.MODE2STR.get(mode,"Unknown"))
return self._close()
def _close(self):
raise NotImplementedError
def set_external_control(self, set_open, set_closed, is_opened):
"""
Programmatically set shutter in external control mode,
and create _external_ctrl switch using callback functions
"""
if not all(map(callable, (set_open, set_closed, is_opened))):
raise TypeError("%s.set_external_control: set_open, set_closed, is_opened functions must be callable" % self.name)
switch = ShutterSwitch(set_open, set_closed, is_opened)
self._external_ctrl = switch
self.init()
| tiagocoutinho/bliss | bliss/common/shutter.py | Python | lgpl-3.0 | 10,215 | 0.004307 |
#!/usr/bin/env python3
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This is generated, do not edit. Update BuildConfigGenerator.groovy and
# 3ppFetch.template instead.
import argparse
import json
import os
import re
import urllib.request
_REPO_URL = 'https://repo.maven.apache.org/maven2'
_GROUP_NAME = 'io/github/java-diff-utils'
_MODULE_NAME = 'java-diff-utils'
_FILE_EXT = 'jar'
_OVERRIDE_LATEST = None
_PATCH_VERSION = 'cr1'
def do_latest():
if _OVERRIDE_LATEST is not None:
print(_OVERRIDE_LATEST + f'.{_PATCH_VERSION}')
return
maven_metadata_url = '{}/{}/{}/maven-metadata.xml'.format(
_REPO_URL, _GROUP_NAME, _MODULE_NAME)
metadata = urllib.request.urlopen(maven_metadata_url).read().decode(
'utf-8')
# Do not parse xml with the python included parser since it is susceptible
# to maliciously crafted xmls. Only use regular expression parsing to be
# safe. RE should be enough to handle what we need to extract.
match = re.search('<latest>([^<]+)</latest>', metadata)
if match:
latest = match.group(1)
else:
# if no latest info was found just hope the versions are sorted and the
# last one is the latest (as is commonly the case).
latest = re.findall('<version>([^<]+)</version>', metadata)[-1]
print(latest + f'.{_PATCH_VERSION}')
def get_download_url(version):
# Remove the patch version when getting the download url
version_no_patch, patch = version.rsplit('.', 1)
if patch.startswith('cr'):
version = version_no_patch
file_url = '{0}/{1}/{2}/{3}/{2}-{3}.{4}'.format(_REPO_URL, _GROUP_NAME,
_MODULE_NAME, version,
_FILE_EXT)
file_name = file_url.rsplit('/', 1)[-1]
partial_manifest = {
'url': [file_url],
'name': [file_name],
'ext': '.' + _FILE_EXT,
}
print(json.dumps(partial_manifest))
def main():
ap = argparse.ArgumentParser()
sub = ap.add_subparsers()
latest = sub.add_parser('latest')
latest.set_defaults(func=lambda _opts: do_latest())
download = sub.add_parser('get_url')
download.set_defaults(
func=lambda _opts: get_download_url(os.environ['_3PP_VERSION']))
opts = ap.parse_args()
opts.func(opts)
if __name__ == '__main__':
main()
| chromium/chromium | third_party/android_deps/libs/io_github_java_diff_utils_java_diff_utils/3pp/fetch.py | Python | bsd-3-clause | 2,503 | 0 |
# -*- coding: utf-8 -*-
from django.conf import settings
from django.contrib.auth.models import User
from django.db import models
from django.db.models.signals import post_save, pre_save
from django.dispatch import receiver
from django.utils.translation import ugettext as _
class UserProfile(models.Model):
'''
username: 用户名是唯一的,可以为Null
nickname: 昵称是可以变的,可以重复
'''
user = models.OneToOneField(User, unique=True, related_name='profile', verbose_name=_('用户'))
username = models.CharField(blank=True, null=True, unique=True, max_length=255, verbose_name=_('用户名(唯一)'))
nickname = models.CharField(blank=True, max_length=255, db_index=True, default='', verbose_name=_('昵称(可变)'))
created_at = models.DateTimeField(auto_now_add=True, verbose_name=_('创建日期'))
updated_at = models.DateTimeField(auto_now=True, verbose_name=_('修改日期'))
class Meta:
db_table = "auth_userprofile"
verbose_name = _('用户附加信息')
verbose_name_plural = _('用户附加信息')
@receiver(pre_save, sender=User)
def pre_save_user_handler(sender, instance, **kwargs):
'''
保存用户前如果开启了EMAIL_AS_USERNAME, 需要将email字段设为username
'''
if settings.FEATURES.get('EMAIL_AS_USERNAME'):
if not instance.email or instance.email.strip() != instance.username.strip():
instance.email = instance.username
@receiver(post_save, sender=User)
def post_save_user_handler(sender, instance, created, **kwargs):
try:
profile = instance.profile
except UserProfile.DoesNotExist:
profile = UserProfile(user=instance)
profile.save()
@receiver(pre_save, sender=UserProfile)
def pre_save_userprofile_handler(sender, instance, **kwargs):
'''
保存profile前,如果用户名为空,则设置为None, 躲避unique检查
'''
if not instance.username:
instance.username = None
| duoduo369/django-scaffold | myauth/models.py | Python | mit | 1,991 | 0.00387 |
from __future__ import print_function
import sys
sys.path.append('..') # help python find cyton.py relative to scripts folder
from openbci import cyton as bci
import logging
import time
def printData(sample):
# os.system('clear')
print("----------------")
print("%f" % (sample.id))
print(sample.channel_data)
print(sample.aux_data)
print("----------------")
if __name__ == '__main__':
# port = '/dev/tty.OpenBCI-DN008VTF'
port = '/dev/tty.usbserial-DB00JAM0'
# port = '/dev/tty.OpenBCI-DN0096XA'
baud = 115200
logging.basicConfig(filename="test.log", format='%(asctime)s - %(levelname)s : %(message)s', level=logging.DEBUG)
logging.info('---------LOG START-------------')
board = bci.OpenBCICyton(port=port, scaled_output=False, log=True)
print("Board Instantiated")
board.ser.write('v')
time.sleep(10)
board.start_streaming(printData)
board.print_bytes_in()
| OpenBCI/OpenBCI_Python | scripts/test.py | Python | mit | 937 | 0.004269 |
from src import model as mdl
class LaTeXPrinter(object):
def __init__(self, target_file_path):
self._target_file_path = target_file_path
def run(self):
with open(self._target_file_path, 'w') as output:
text = self._generate_text()
output.write(text)
def _generate_text(self):
raise NotImplementedError('Override me!')
class TablePrinter(LaTeXPrinter):
def __init__(self, target_file_path):
super(TablePrinter, self).__init__(target_file_path)
def _generate_text(self):
text = '\\rowcolors{3}{aubergine}{white}\n'
text += self._get_table_definition()
text += '\\toprule\n'
text += self._get_headers()
text += '\\midrule\n\\endhead\n'
for element in self._get_content():
text += ' & '.join(element) + '\\\\\n'
text += '\\bottomrule\n'
caption, label = self._get_caption_and_label()
text += ('\\rowcolor{white}' + '\\caption{' + caption +
'}\\label{' + label + '}\n')
text += '\\end{longtable}\n'
return text
def _get_table_definition(self):
raise NotImplementedError('Override me!')
def _get_headers(self):
raise NotImplementedError('Override me!')
def _get_content(self):
"""Returns an iterable of 3-tuples with the ID, the description and the
parent of the item that needs to be printed.
"""
raise NotImplementedError('Override me!')
def _get_caption_and_label(self):
"""Returns the caption and label of the table to print.
"""
raise NotImplementedError('Override me!')
class UseCaseTablePrinter(TablePrinter):
def __init__(self, target_file_path):
super(UseCaseTablePrinter, self).__init__(target_file_path)
self._uc_id_list = mdl.dal.get_all_use_case_ids()
def _get_table_definition(self):
return '\\begin{longtable}{lp{.5\\textwidth}l}\n'
def _get_headers(self):
return ('\\sffamily\\bfseries ID & \\sffamily\\bfseries Descrizione '
'& \\sffamily\\bfseries Padre\\\n')
def _get_content(self):
"""Returns an iterable (generator) containing a 3-tuple with the
ID, description and parent of every use case.
"""
for uc_id in self._uc_id_list:
uc = mdl.dal.get_use_case(uc_id)
yield (uc.uc_id, uc.description, uc.parent_id or '--')
def _get_caption_and_label(self):
return ('Prospetto riepilogativo dei casi d\'uso', 'tab:uclist')
class RequirementTablePrinter(TablePrinter):
def __init__(self, req_type, priority, target_file_path):
super(RequirementTablePrinter, self).__init__(target_file_path)
self._req_type = req_type
self._priority = priority
self._req_id_list = mdl.dal.get_all_requirement_ids_spec(
req_type, priority)
def _get_table_definition(self):
return '\\begin{longtable}{lp{.5\\textwidth}ll}\n'
def _get_headers(self):
return ('\\sffamily\\bfseries ID & \\sffamily\\bfseries Descrizione & '
'\\sffamily\\bfseries Fonte & '
'\\sffamily\\bfseries Padre\\\\\n')
def _get_content(self):
for req_id in self._req_id_list:
req = mdl.dal.get_requirement(req_id)
source = mdl.dal.get_source(req.source_id)
yield (req.req_id, req.description, source.name,
req.parent_id or '--')
def _get_caption_and_label(self):
return ('Elenco dei requisiti {0} {1}.'.format(
('funzionali' if self._req_type == 'F' else
'dichiarativi' if self._req_type == 'D' else
'prestazionali' if self._req_type == 'P' else 'qualitativi'),
('obbligatori' if self._priority == 'O' else
'facoltativi' if self._priority == 'F' else 'desiderabili')),
'tab:reqlist{0}{1}'.format(self._req_type, self._priority))
class UseCaseRequirementTrackPrinter(TablePrinter):
def __init__(self, target_file_path):
super(UseCaseRequirementTrackPrinter, self).__init__(target_file_path)
self._uc_id_list = mdl.dal.get_all_use_case_ids()
def _get_table_definition(self):
return '\\begin{longtable}{lp{.8\textwidth}}\n'
def _get_headers(self):
return ('\\sffamily\\bfseries Caso d\'uso & '
'\\sffamily\\bfseries Requisiti associati\\\\\n')
def _get_content(self):
for uc_id in self._uc_id_list:
req_ids = mdl.dal.get_use_case_associated_requirements(uc_id)
yield (uc_id, ', '.join(req_ids))
def _get_caption_and_label(self):
return ('Tracciamento requisiti -- casi d\'uso.', 'tab:ucreqtrack')
| diegoberaldin/PyRequirementManager | src/controller/printers.py | Python | gpl-3.0 | 4,793 | 0.000417 |
#import logging
#logging.basicConfig(level=logging.INFO, datefmt='%H:%M:%S',
# format='%(asctime)s %(levelname)s: %(message)s')
import unittest
import SocketServer, socket
import random, time
import threading
import cStringIO
from datetime import datetime
from shapy import register_settings
register_settings('tests.emulation.settings')
from shapy.emulation.shaper import Shaper
from tests.mixins import ShaperMixin, ServerMixin
from tests.utils import total_seconds
class TestCWCShaping(unittest.TestCase, ShaperMixin, ServerMixin):
filesize = 2**19 # 0.5MB
def setUp(self):
self.server_addr = ('127.0.0.2', 55000)
self.client_addr = ('127.0.0.3', 55001)
# shaping init
ShaperMixin.setUp(self)
ServerMixin.run_server(self)
with open('/dev/urandom', 'rb') as f:
self.randomfile = bytearray(f.read(self.filesize))
def test_transfer(self):
self.sock_client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# SO_REUSEADDR: http://stackoverflow.com/questions/3229860/what-is-the-meaning-of-so-reuseaddr-setsockopt-option-linux
s = self.sock_client
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(self.client_addr)
s.connect(self.server_addr)
start = datetime.now()
# client -> server
sent = 0
while sent < self.filesize:
sent += s.send(self.randomfile[sent:sent+4096])
# We have to wait until the server finishes reading data from its socket
# and closes the connection.
rcvd = s.recv(1024)
delay = total_seconds(datetime.now() - start)
#delay = delta.seconds + delta.microseconds/float(10**6)
tt = self.estimate_transfer_time(self.filesize, self.client_addr[0],
self.server_addr[0])
self.assertAlmostEqual(delay, tt, delta=0.4)
# server -> client
start = datetime.now()
while len(rcvd) < self.filesize:
rcvd += s.recv(1024)
delay = total_seconds(datetime.now() - start)
tt = self.estimate_transfer_time(self.filesize, self.server_addr[0],
self.client_addr[0])
self.assertAlmostEqual(delay, tt, delta=0.4)
# statistics of qdiscs on IFB must correctly reflect the transmitted data
self._test_traffic()
s.close()
def _test_traffic(self):
c = self.sh.get_traffic(self.client_addr[0])
s = self.sh.get_traffic(self.server_addr[0])
# qdisc statistics reflect all traffic, including header of each layer,
# not only filesize
delta = self.filesize/100
self.assertAlmostEqual(c[0], self.filesize, delta=delta)
self.assertAlmostEqual(c[1], self.filesize, delta=delta)
self.assertAlmostEqual(s[0], self.filesize, delta=delta)
self.assertAlmostEqual(s[1], self.filesize, delta=delta)
def tearDown(self):
if hasattr(self, 'sock_client'):
self.sock_client.close()
ShaperMixin.tearDown(self)
| praus/shapy | tests/emulation/test_shaping.py | Python | mit | 3,245 | 0.010478 |
import os
import jug.backends.redis_store
import jug.backends.file_store
import jug.backends.dict_store
from jug.backends.redis_store import redis
import pytest
if not os.getenv('TEST_REDIS'):
redis = None
try:
redisConnectionError = redis.ConnectionError
except:
redisConnectionError = SystemError
@pytest.fixture(scope='function', params=['file', 'dict', 'redis'])
def store(tmpdir, request):
if request.param == 'file':
tmpdir = str(tmpdir)
yield jug.backends.file_store.file_store(tmpdir)
jug.backends.file_store.file_store.remove_store(tmpdir)
elif request.param == 'dict':
yield jug.backends.dict_store.dict_store()
elif request.param == 'redis':
if redis is None:
pytest.skip()
try:
st = jug.redis_store.redis_store('redis:')
yield st
st.close()
except redisConnectionError:
pytest.skip()
def test_load_get(store):
assert len(list(store.list())) == 0
key = b'jugisbestthingever'
assert not store.can_load(key)
object = list(range(232))
store.dump(object, key)
assert store.can_load(key)
assert store.load(key) == object
flist = list(store.list())
assert len(flist) == 1
assert flist[0] == key
store.remove(key)
assert not store.can_load(key)
def test_lock(store):
assert len(list(store.listlocks())) == 0
key = b'jugisbestthingever'
lock = store.getlock(key)
assert not lock.is_locked()
assert lock.get()
assert not lock.get()
lock2 = store.getlock(key)
assert not lock2.get()
assert len(list(store.listlocks())) == 1
lock.release()
assert lock2.get()
lock2.release()
def test_lock_remove(store):
assert len(list(store.listlocks())) == 0
key = b'jugisbestthingever'
lock = store.getlock(key)
assert not lock.is_locked()
assert lock.get()
assert not lock.get()
assert len(list(store.listlocks())) == 1
store.remove_locks()
assert len(list(store.listlocks())) == 0
def test_lock_fail(store):
assert len(list(store.listlocks())) == 0
key = b'jugisbestthingever'
lock = store.getlock(key)
assert not lock.is_locked()
assert lock.get()
assert not lock.get()
lock.fail()
assert lock.is_failed()
assert len(list(store.listlocks())) == 1
store.remove_locks()
assert not lock.is_failed()
assert len(list(store.listlocks())) == 0
def test_lock_fail_other(store):
# is_failed should return True even if we can't acquire the lock
assert len(list(store.listlocks())) == 0
key = b'jugisbestthingever'
lock1 = store.getlock(key)
lock2 = store.getlock(key)
assert not lock1.is_locked()
assert not lock2.is_locked()
assert lock1.get()
assert not lock2.get()
assert not lock1.is_failed()
assert not lock2.is_failed()
lock1.fail()
assert lock2.is_failed()
assert len(list(store.listlocks())) == 1
store.remove_locks()
assert not lock1.is_failed()
assert not lock2.is_failed()
assert len(list(store.listlocks())) == 0
def test_numpy_array(tmpdir):
try:
import numpy as np
except ImportError:
pytest.skip()
store = jug.backends.file_store.file_store(str(tmpdir))
arr = np.arange(100) % 17
arr = arr.reshape((10,10))
key = 'mykey'
store.dump(arr, key)
arr2 = store.load(key)
assert np.all(arr2 == arr)
store.remove(key)
store.close()
def test_numpy_array_no_compress(tmpdir):
try:
import numpy as np
except ImportError:
pytest.skip()
store = jug.backends.file_store.file_store(str(tmpdir), compress_numpy=False)
arr = np.arange(100) % 17
arr = arr.reshape((10,10))
key = 'mykey'
store.dump(arr, key)
arr2 = store.load(key)
assert np.all(arr2 == arr)
store.remove(key)
store.close()
| luispedro/jug | jug/tests/test_store.py | Python | mit | 3,891 | 0.003084 |
#!/usr/bin/python3
import sys
def process_import(filename, statement):
statement = statement.replace(",", " ")
modules = statement.split()
for module in modules[1:]:
print('"%s" -> "%s"' % (filename, module))
def process_from(filename, statement):
statement = statement.replace(",", " ")
modules = statement.split()
main_module = modules[1]
for module in modules[3:]:
print('"%s" -> "%s" -> "%s"' % (filename, main_module, module))
def print_header():
print("digraph WeCase {")
print("ratio=2")
def print_footer():
print("}")
print_header()
for line in sys.stdin:
line = line.replace("\n", "")
if line.endswith(".py"):
filename = line
else:
if line.startswith("import"):
process_import(filename, line)
elif line.startswith("from"):
process_from(filename, line)
print_footer()
| WeCase/WeCase | utils/depgraph.py | Python | gpl-3.0 | 898 | 0.004454 |
"""
A pretty-printing dump function for the ast module. The code was copied from
the ast.dump function and modified slightly to pretty-print.
Alex Leone (acleone ~AT~ gmail.com), 2010-01-30
"""
from ast import *
def dump(node, annotate_fields=True, include_attributes=False, indent=' '):
"""
Return a formatted dump of the tree in *node*. This is mainly useful for
debugging purposes. The returned string will show the names and the values
for fields. This makes the code impossible to evaluate, so if evaluation is
wanted *annotate_fields* must be set to False. Attributes such as line
numbers and column offsets are not dumped by default. If this is wanted,
*include_attributes* can be set to True.
"""
def _format(node, level=0):
if isinstance(node, AST):
fields = [(a, _format(b, level)) for a, b in iter_fields(node)]
if include_attributes and node._attributes:
fields.extend([(a, _format(getattr(node, a), level))
for a in node._attributes])
return ''.join([
node.__class__.__name__,
'(',
', '.join(('%s=%s' % field for field in fields)
if annotate_fields else
(b for a, b in fields)),
')'])
elif isinstance(node, list):
lines = ['[']
lines.extend((indent * (level + 2) + _format(x, level + 2) + ','
for x in node))
if len(lines) > 1:
lines.append(indent * (level + 1) + ']')
else:
lines[-1] += ']'
return '\n'.join(lines)
return repr(node)
if isinstance(node, list):
return '\n'.join(_format(n) for n in node)
if not isinstance(node, AST):
raise TypeError('expected AST, got %r' % node.__class__.__name__)
return _format(node)
if __name__ == '__main__':
import sys
for filename in sys.argv[1:]:
print('=' * 50)
print('AST tree for', filename)
print('=' * 50)
f = open(filename, 'r')
fstr = f.read()
f.close()
print(dump(parse(fstr, filename=filename), include_attributes=True))
print()
| Suor/flaws | astpp.py | Python | bsd-2-clause | 2,283 | 0.001752 |
# flake8: noqa: F401
from pandas.core.arrays.sparse.accessor import SparseAccessor, SparseFrameAccessor
from pandas.core.arrays.sparse.array import (
BlockIndex,
IntIndex,
SparseArray,
_make_index,
)
from pandas.core.arrays.sparse.dtype import SparseDtype
| TomAugspurger/pandas | pandas/core/arrays/sparse/__init__.py | Python | bsd-3-clause | 273 | 0.003663 |
from stard.services import BaseService
class Service(BaseService):
def init_service(self):
self.children = {self.service('child')}
| DexterLB/stard | src/stard/test_samples/father.py | Python | mit | 144 | 0.006944 |
# -*- coding: utf-8 -*-
from itertools import chain
from django.contrib.sites.models import Site
from django.core.urlresolvers import NoReverseMatch, reverse_lazy
from django.forms.widgets import Select, MultiWidget, TextInput
from django.utils.encoding import force_text
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from cms.forms.utils import get_site_choices, get_page_choices
from cms.models import Page, PageUser
from cms.templatetags.cms_admin import CMS_ADMIN_ICON_BASE
from cms.utils.compat.dj import force_unicode
class PageSelectWidget(MultiWidget):
"""A widget that allows selecting a page by first selecting a site and then
a page on that site in a two step process.
"""
def __init__(self, site_choices=None, page_choices=None, attrs=None):
if attrs is not None:
self.attrs = attrs.copy()
else:
self.attrs = {}
self.choices = []
super(PageSelectWidget, self).__init__((Select, Select, Select), attrs)
def decompress(self, value):
"""
receives a page_id in value and returns the site_id and page_id
of that page or the current site_id and None if no page_id is given.
"""
if value:
page = Page.objects.get(pk=value)
site = page.site
return [site.pk, page.pk, page.pk]
site = Site.objects.get_current()
return [site.pk,None,None]
def _has_changed(self, initial, data):
# THIS IS A COPY OF django.forms.widgets.Widget._has_changed()
# (except for the first if statement)
"""
Return True if data differs from initial.
"""
# For purposes of seeing whether something has changed, None is
# the same as an empty string, if the data or inital value we get
# is None, replace it w/ u''.
if data is None or (len(data)>=2 and data[1] in [None,'']):
data_value = u''
else:
data_value = data
if initial is None:
initial_value = u''
else:
initial_value = initial
if force_unicode(initial_value) != force_unicode(data_value):
return True
return False
def render(self, name, value, attrs=None):
# THIS IS A COPY OF django.forms.widgets.MultiWidget.render()
# (except for the last line)
# value is a list of values, each corresponding to a widget
# in self.widgets.
site_choices = get_site_choices()
page_choices = get_page_choices()
self.site_choices = site_choices
self.choices = page_choices
self.widgets = (Select(choices=site_choices ),
Select(choices=[('', '----')]),
Select(choices=self.choices, attrs={'style': "display:none;"} ),
)
if not isinstance(value, list):
value = self.decompress(value)
output = []
final_attrs = self.build_attrs(attrs)
id_ = final_attrs.get('id', None)
for i, widget in enumerate(self.widgets):
try:
widget_value = value[i]
except IndexError:
widget_value = None
if id_:
final_attrs = dict(final_attrs, id='%s_%s' % (id_, i))
output.append(widget.render(name + '_%s' % i, widget_value, final_attrs))
output.append(r'''<script type="text/javascript">
(function($) {
var handleSiteChange = function(site_name, selected_id) {
$("#id_%(name)s_1 optgroup").remove();
var myOptions = $("#id_%(name)s_2 optgroup[label='" + site_name + "']").clone();
$("#id_%(name)s_1").append(myOptions);
$("#id_%(name)s_1").change();
};
var handlePageChange = function(page_id) {
if (page_id) {
$("#id_%(name)s_2 option").removeAttr('selected');
$("#id_%(name)s_2 option[value=" + page_id + "]").attr('selected','selected');
} else {
$("#id_%(name)s_2 option[value=]").attr('selected','selected');
};
};
$("#id_%(name)s_0").change(function(){
var site_label = $("#id_%(name)s_0").children(":selected").text();
handleSiteChange( site_label );
});
$("#id_%(name)s_1").change(function(){
var page_id = $(this).find('option:selected').val();
handlePageChange( page_id );
});
$(function(){
handleSiteChange( $("#id_%(name)s_0").children(":selected").text() );
$("#add_id_%(name)s").hide();
});
})(django.jQuery);
</script>''' % {'name': name})
return mark_safe(self.format_output(output))
def format_output(self, rendered_widgets):
return u' '.join(rendered_widgets)
class PageSmartLinkWidget(TextInput):
def __init__(self, attrs=None, ajax_view=None):
super(PageSmartLinkWidget, self).__init__(attrs)
self.ajax_url = self.get_ajax_url(ajax_view=ajax_view)
def get_ajax_url(self, ajax_view):
try:
return reverse_lazy(ajax_view)
except NoReverseMatch:
raise Exception(
'You should provide an ajax_view argument that can be reversed to the PageSmartLinkWidget'
)
def render(self, name=None, value=None, attrs=None):
final_attrs = self.build_attrs(attrs)
id_ = final_attrs.get('id', None)
output = [r'''<script type="text/javascript">
(function($){
$(function(){
$("#%(element_id)s").select2({
placeholder: "%(placeholder_text)s",
allowClear: true,
minimumInputLength: 3,
ajax: {
url: "%(ajax_url)s",
dataType: 'json',
data: function (term, page) {
return {
q: term, // search term
language_code: '%(language_code)s'
};
},
results: function (data, page) {
return {
more: false,
results: $.map(data, function(item, i){
return {
'id':item.redirect_url,
'text': item.title + ' (/' + item.path + ')'}
}
)
};
}
},
// Allow creation of new entries
createSearchChoice:function(term, data) { if ($(data).filter(function() { return this.text.localeCompare(term)===0; }).length===0) {return {id:term, text:term};} },
multiple: false,
initSelection : function (element, callback) {
var initialValue = element.val()
callback({id:initialValue, text: initialValue});
}
});
})
})(django.jQuery);
</script>''' % {
'element_id': id_,
'placeholder_text': final_attrs.get('placeholder_text', ''),
'language_code': self.language,
'ajax_url': force_unicode(self.ajax_url)
}]
output.append(super(PageSmartLinkWidget, self).render(name, value, attrs))
return mark_safe(u''.join(output))
class Media:
css = {
'all': ('cms/js/select2/select2.css',
'cms/js/select2/select2-bootstrap.css',)
}
js = (#'cms/js/libs/jquery.min.js',
'cms/js/select2/select2.js',)
class UserSelectAdminWidget(Select):
"""Special widget used in page permission inlines, because we have to render
an add user (plus) icon, but point it somewhere else - to special user creation
view, which is accessible only if user haves "add user" permissions.
Current user should be assigned to widget in form constructor as an user
attribute.
"""
def render(self, name, value, attrs=None, choices=()):
output = [super(UserSelectAdminWidget, self).render(name, value, attrs, choices)]
if hasattr(self, 'user') and (self.user.is_superuser or \
self.user.has_perm(PageUser._meta.app_label + '.' + PageUser._meta.get_add_permission())):
# append + icon
add_url = '../../../cms/pageuser/add/'
output.append(u'<a href="%s" class="add-another" id="add_id_%s" onclick="return showAddAnotherPopup(this);"> ' % \
(add_url, name))
output.append(u'<img src="%sicon_addlink.gif" width="10" height="10" alt="%s"/></a>' % (CMS_ADMIN_ICON_BASE, _('Add Another')))
return mark_safe(u''.join(output))
class AppHookSelect(Select):
"""Special widget used for the App Hook selector in the Advanced Settings
of the Page Admin. It adds support for a data attribute per option and
includes supporting JS into the page.
"""
class Media:
js = ('cms/js/modules/cms.base.js', 'cms/js/modules/cms.app_hook_select.js', )
def __init__(self, attrs=None, choices=(), app_namespaces={}):
self.app_namespaces = app_namespaces
super(AppHookSelect, self).__init__(attrs, choices)
def render_option(self, selected_choices, option_value, option_label):
if option_value is None:
option_value = ''
option_value = force_text(option_value)
if option_value in selected_choices:
selected_html = mark_safe(' selected="selected"')
if not self.allow_multiple_selected:
# Only allow for a single selection.
selected_choices.remove(option_value)
else:
selected_html = ''
if option_value in self.app_namespaces:
data_html = mark_safe(' data-namespace="%s"' % self.app_namespaces[option_value])
else:
data_html = ''
return '<option value="%s"%s%s>%s</option>' % (
option_value,
selected_html,
data_html,
force_text(option_label),
)
def render_options(self, choices, selected_choices):
selected_choices = set(force_text(v) for v in selected_choices)
output = []
for option_value, option_label in chain(self.choices, choices):
output.append(self.render_option(selected_choices, option_value, option_label))
return '\n'.join(output)
| amaozhao/basecms | cms/forms/widgets.py | Python | mit | 10,371 | 0.004339 |
from django.forms import Media
from wagtail.admin.staticfiles import versioned_static
# Feature objects: these are mapped to feature identifiers within the rich text
# feature registry (wagtail.core.rich_text.features). Each one implements
# a `construct_options` method which modifies an options dict as appropriate to
# enable that feature.
# Additionally, a Feature object defines a media property
# (https://docs.djangoproject.com/en/stable/topics/forms/media/) to specify css/js
# files to import when the feature is active.
class Feature:
def __init__(self, js=None, css=None):
self.js = js or []
self.css = css or {}
@property
def media(self):
js = [versioned_static(js_file) for js_file in self.js]
css = {}
for media_type, css_files in self.css.items():
css[media_type] = [versioned_static(css_file) for css_file in css_files]
return Media(js=js, css=css)
class BooleanFeature(Feature):
"""
A feature which is enabled by a boolean flag at the top level of
the options dict
"""
def __init__(self, option_name, **kwargs):
super().__init__(**kwargs)
self.option_name = option_name
def construct_options(self, options):
options[self.option_name] = True
class ListFeature(Feature):
"""
Abstract class for features that are defined in a list within the options dict.
Subclasses must define option_name
"""
def __init__(self, data, **kwargs):
super().__init__(**kwargs)
self.data = data
def construct_options(self, options):
if self.option_name not in options:
options[self.option_name] = []
options[self.option_name].append(self.data)
class EntityFeature(ListFeature):
"""A feature which is listed in the entityTypes list of the options"""
option_name = 'entityTypes'
class BlockFeature(ListFeature):
"""A feature which is listed in the blockTypes list of the options"""
option_name = 'blockTypes'
class InlineStyleFeature(ListFeature):
"""A feature which is listed in the inlineStyles list of the options"""
option_name = 'inlineStyles'
| timorieber/wagtail | wagtail/admin/rich_text/editors/draftail/features.py | Python | bsd-3-clause | 2,173 | 0.001381 |
from contextlib import closing
from flask import current_app
from summer.app import create_app
from summer.db.connect import connect_db
def init_db():
app = create_app('product')
_context = app.app_context()
_context.push()
with closing(connect_db()) as db:
with open('./summer/schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
if __name__ == '__main__':
init_db()
| zhongql/summer | tool/initdb.py | Python | mit | 448 | 0.002232 |
from Model import *
| joshrule/LOTlib | LOTlib/Projects/NumberGame/__init__.py | Python | gpl-3.0 | 21 | 0 |
# Copyright 2015 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import wraps
import json
import logging
from flask import make_response, request, Response, current_app
from werkzeug.routing import BaseConverter
from netman.api import NETMAN_API_VERSION
from netman.core.objects.exceptions import UnknownResource, Conflict, InvalidValue
def to_response(fn):
@wraps(fn)
def wrapper(self, *args, **kwargs):
try:
result = fn(self, *args, **kwargs)
if isinstance(result, Response):
return result
else:
code, data = result
if data is not None:
response = json_response(data, code)
else:
response = make_response("", code)
except InvalidValue as e:
response = exception_to_response(e, 400)
except UnknownResource as e:
response = exception_to_response(e, 404)
except Conflict as e:
response = exception_to_response(e, 409)
except NotImplementedError as e:
response = exception_to_response(e, 501)
except Exception as e:
logging.exception(e)
response = exception_to_response(e, 500)
self.logger.info("Responding {} : {}".format(response.status_code, response.data))
if 'Netman-Max-Version' in request.headers:
response.headers['Netman-Version'] = min(
float(request.headers['Netman-Max-Version']),
NETMAN_API_VERSION)
return response
return wrapper
def exception_to_response(exception, code):
data = {'error': str(exception)}
if "Netman-Verbose-Errors" in request.headers:
if hasattr(exception, "__module__"):
data["error-module"] = exception.__module__
data["error-class"] = exception.__class__.__name__
else:
if data['error'] == "":
if hasattr(exception, "__module__"):
data['error'] = "Unexpected error: {}.{}".format(exception.__module__, exception.__class__.__name__)
else:
data['error'] = "Unexpected error: {}".format(exception.__class__.__name__)
response = json_response(data, code)
response.status_code = code
return response
def json_response(data, code):
json_data = json.dumps(data, indent=None)
response = current_app.response_class(json_data, mimetype='application/json; charset=UTF-8')
response.status_code = code
return response
class RegexConverter(BaseConverter):
def __init__(self, url_map, *items):
super(RegexConverter, self).__init__(url_map)
self.regex = items[0]
class BadRequest(InvalidValue):
pass
class MultiContext(object):
def __init__(self, switch_api, parameters, *contexts):
self.context_instances = []
for context in contexts:
obj = context(switch_api)
obj.process(parameters)
self.context_instances.append(obj)
self.parameters = parameters
def __enter__(self):
return [(obj.__enter__()) for obj in self.context_instances]
def __exit__(self, type_, value, traceback):
for context in self.context_instances:
context.__exit__(type_, value, traceback)
| idjaw/netman | netman/api/api_utils.py | Python | apache-2.0 | 3,822 | 0.00157 |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Bwtool(AutotoolsPackage):
"""bwtool is a command-line utility for bigWig files."""
homepage = "https://github.com/CRG-Barcelona/bwtool"
url = "https://github.com/CRG-Barcelona/bwtool/archive/1.0.tar.gz"
version('1.0', 'cdd7a34ae457b587edfe7dc8a0bdbedd')
depends_on('libbeato')
| krafczyk/spack | var/spack/repos/builtin/packages/bwtool/package.py | Python | lgpl-2.1 | 1,568 | 0.000638 |
import bleach
from pyramid.config import Configurator
from pyramid.authentication import AuthTktAuthenticationPolicy
from pyramid.authorization import ACLAuthorizationPolicy
from pyramid.session import UnencryptedCookieSessionFactoryConfig
from sqlalchemy import engine_from_config
from .models import (
DBSession,
Base,
)
from .utils import load_local_settings
from sqlalchemy_i18n.manager import translation_manager
from .security import (
RootFactory,
group_membership,
)
from .views.task import check_task_expiration
from apscheduler.schedulers.background import BackgroundScheduler
scheduler = BackgroundScheduler()
scheduler.start()
def main(global_config, **settings):
""" This function returns a Pyramid WSGI application.
"""
settings['mako.directories'] = 'osmtm:templates'
load_local_settings(settings)
engine = engine_from_config(settings, 'sqlalchemy.')
DBSession.configure(bind=engine)
Base.metadata.bind = engine
authn_policy = AuthTktAuthenticationPolicy(
secret='super_secret',
callback=group_membership)
authz_policy = ACLAuthorizationPolicy()
config = Configurator(settings=settings,
root_factory=RootFactory,
authentication_policy=authn_policy,
authorization_policy=authz_policy)
# fixes backwards incompatibilities when running Pyramid 1.5a
# https://pypi.python.org/pypi/pyramid#features
config.include('pyramid_mako')
# pyramid_tm uses the transaction module to begin/commit/rollback
# transaction when requests begin/end.
config.include('pyramid_tm')
# enable exception logger
config.include('pyramid_exclog')
session_factory = UnencryptedCookieSessionFactoryConfig('itsasecret')
config.set_session_factory(session_factory)
config.add_static_view('static', 'static', cachebust=True)
config.add_route('home', '/')
config.add_route('home_json', '/projects.json')
config.add_route('about', '/about')
config.add_route('login', '/login')
config.add_route('logout', '/logout')
config.add_route('oauth_callback', '/oauth_callback')
config.add_route('project_new', '/project/new')
config.add_route('project_new_grid', '/project/new/grid')
config.add_route('project_new_arbitrary', '/project/new/arbitrary')
config.add_route('project_grid_simulate', '/project/grid_simulate')
config.add_route('project_json', '/project/{project:\d+}.json')
config.add_route('project', '/project/{project:\d+}')
config.add_route('project_edit', '/project/{project:\d+}/edit')
config.add_route('project_publish', '/project/{project:\d+}/publish')
config.add_route('project_check_for_update',
'/project/{project:\d+}/check_for_updates')
config.add_route('project_contributors',
'/project/{project:\d+}/contributors', xhr=True)
config.add_route('project_stats', '/project/{project:\d+}/stats')
config.add_route('project_tasks_json', '/project/{project:\d+}/tasks.json')
config.add_route('project_user_add', '/project/{project:\d+}/user/{user}',
request_method="PUT")
config.add_route('project_user_delete',
'/project/{project:\d+}/user/{user}',
request_method="DELETE")
config.add_route('project_preset', '/project/{project:\d+}/preset')
config.add_route('project_users', '/project/{project:\d+}/users')
config.add_route('project_invalidate_all',
'/project/{project:\d+}/invalidate_all')
config.add_route('project_message_all',
'/project/{project:\d+}/message_all')
config.add_route('task_random', '/project/{project:\d+}/random', xhr=True)
config.add_route('task_empty', '/project/{project:\d+}/task/empty',
xhr=True)
config.add_route('task_xhr', '/project/{project:\d+}/task/{task:\d+}',
xhr=True)
config.add_route('task_done',
'/project/{project:\d+}/task/{task:\d+}/done', xhr=True)
config.add_route('task_lock',
'/project/{project:\d+}/task/{task:\d+}/lock', xhr=True)
config.add_route('task_unlock',
'/project/{project:\d+}/task/{task:\d+}/unlock', xhr=True)
config.add_route('task_split',
'/project/{project:\d+}/task/{task:\d+}/split', xhr=True)
config.add_route('task_validate',
'/project/{project:\d+}/task/{task:\d+}/validate',
xhr=True)
config.add_route('task_cancel_done',
'/project/{project:\d+}/task/{task:\d+}/cancel_done',
xhr=True)
config.add_route('task_comment',
'/project/{project:\d+}/task/{task:\d+}/comment',
xhr=True)
config.add_route('task_gpx', '/project/{project:\d+}/task/{task:\d+}.gpx')
config.add_route('task_osm', '/project/{project:\d+}/task/{task:\d+}.osm')
config.add_route('task_assign',
'/project/{project:\d+}/task/{task:\d+}/user/{user}',
xhr=True)
config.add_route('task_assign_delete',
'/project/{project:\d+}/task/{task:\d+}/user', xhr=True,
request_method="DELETE")
config.add_route('task_difficulty',
'/project/{project:\d+}/task/{task:\d+}/difficulty/' +
'{difficulty:\d+}', xhr=True)
config.add_route('task_difficulty_delete',
'/project/{project:\d+}/task/{task:\d+}/difficulty',
xhr=True, request_method='DELETE')
config.add_route('task_users',
'/project/{project:\d+}/task/{task:\d+}/users')
config.add_route('labels', '/labels')
config.add_route('label_new', '/label/new')
config.add_route('label_edit', '/label/{label:\d+}/edit')
config.add_route('label_delete', '/label/{label:\d+}/delete')
config.add_route('users', '/users')
config.add_route('users_json', '/users.json')
config.add_route('user_messages', '/user/messages')
config.add_route('user_messages_check', '/user/messages/check')
config.add_route('user', '/user/{username}')
config.add_route('user_admin', '/user/{id:\d+}/admin')
config.add_route('user_project_manager', '/user/{id:\d+}/project_manager')
config.add_route('user_validator', '/user/{id:\d+}/validator')
config.add_route('user_experienced_mapper',
'/user/{id:\d+}/experienced_mapper')
config.add_route('user_prefered_editor',
'/user/prefered_editor/{editor}', xhr=True)
config.add_route('user_prefered_language',
'/user/prefered_language/{language}', xhr=True)
config.add_route('licenses', '/licenses')
config.add_route('license_new', '/license/new')
config.add_route('license', '/license/{license:\d+}')
config.add_route('license_edit', '/license/{license:\d+}/edit')
config.add_route('license_delete', '/license/{license:\d+}/delete')
config.add_route('message_read', '/message/read/{message:\d+}')
config.add_route('markdown_ref', '/markdown_ref')
config.add_translation_dirs('osmtm:locale')
config.set_locale_negotiator('osmtm.i18n.custom_locale_negotiator')
translation_manager.options.update({
'locales': settings['available_languages'].split(),
'get_locale_fallback': True
})
config.scan(ignore=['osmtm.tests', 'osmtm.scripts'])
bleach.ALLOWED_TAGS.append(u'p')
bleach.ALLOWED_TAGS.append(u'pre')
check_expiration_interval = int(
settings.get('check_expiration_interval', 5)
)
scheduler.add_job(check_task_expiration, 'interval',
seconds=check_expiration_interval,
replace_existing=True)
return config.make_wsgi_app()
| ethan-nelson/osm-tasking-manager2 | osmtm/__init__.py | Python | bsd-2-clause | 7,927 | 0.007191 |
# DJANGO 1.10.5 LOCAL SETTINGS
# https://docs.djangoproject.com/en/1.10/topics/settings/
# ==================================================================================================
from .base import *
DEBUG = True
# APP CONFIGURATION
# https://docs.djangoproject.com/en/1.10/ref/applications
# ==================================================================================================
# Add your local apps here
INSTALLED_APPS += []
| justinmnoor/geodjangotemplate | {{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/settings/local.py | Python | mit | 457 | 0.002188 |
"""
Created on Mar 8, 2014
@author: jtaghiyar
"""
from helpers import validate_argument
import logging
class Plumber(object):
"""
pipe components into a pipeline based on the given configuration and
generate a python script.
"""
def __init__(self, pipeline_script, workflow=None):
## the file where the resultant script is written
self.pipeline_script = pipeline_script
if workflow is not None:
self.wf = workflow
@property
def wf(self):
return self._wf
@wf.setter
def wf(self, value):
self._wf = value
self._wf.inflate()
self.tags = [t for t in self.wf.bfs()]
self.modes = dict((n.tag,n.use_cluster) for n in self.wf.nodes.values())
self.mems = dict((n.tag,n.memory) for n in self.wf.nodes.values())
self.num_cpus = dict((n.tag,n.num_cpus) for n in self.wf.nodes.values())
self.parent_tasks = {}
self.io_connections = {}
self.input_arguments = {}
self.decors = {}
self.func_strs = {}
self.component_names = [n.component_name for n in self.wf.nodes.values()]
self.import_components = {k:['component_main as ' + k + '_main']
for k in self.component_names if k != 'breakpoint'}
def make_script(self, sample_id):
logging.info('making pipeline for %s ...' % (sample_id))
import_pymod = {
'os' :[],
'sys' :[],
'time' :[],
'ruffus' :[],
'traceback' :[],
'multiprocessing' :['Queue'],
}
import_factory = {
'kronos.pipelineui' :[],
'kronos.run_manager' :['RunManager'],
'kronos.job_manager' :[
'DrmaaJobManager',
'SgeJobManager',
'LocalJobManager'
],
'kronos.utils' :['Task'],
'kronos.helpers' :[
'JobFailureError',
'flushqueue'
],
'kronos.logger' :[
'PipelineLogger',
'LogWarnErr',
'LogInfo'
],
}
self._get_parent_tasks()
self._get_io_connections()
self._get_input_arguments()
self._get_decorators()
self._get_function_signatures()
self._write_importing(import_pymod, comment="import python modules as well as pipelinui")
self._write_importing(import_factory, comment="import factory modules")
self._write_initilization()
self._write_env_preparation()
self._write_importing(self.import_components, comment="import components")
self._write_generating_tasks()
self._write_ruffus_pipeline()
self._write_last_task()
self._write_main()
logging.info('successfully completed.')
def paste_pipelines(self, pipelines, config_file):
"""paste all the pipelines scripts into a single one."""
## store ruffus tasks in a list
task_funcs = []
import_dict = {
'os' :[],
'sys' :[],
'ruffus' :[],
'subprocess' :[],
'datetime' :['datetime'],
'multiprocessing' :['Queue'],
'kronos.pipelineui' :[],
'kronos.helpers' :[
'make_dir',
'kill_jobs',
'flushqueue',
'make_intermediate_cmd_args',
'KeywordsManager'
],
'kronos.logger' :[
'PipelineLogger',
'LogWarnErr',
'LogInfo'
],
'kronos.kronos_version' :['kronos_version']
}
self._write_config_file(config_file)
self._write_importing(import_dict)
self._write_argument_validation()
self._write_logger()
self._print(comment="ruffus pipeline")
for i,p in enumerate(pipelines):
n = p.pipeline_name
s = p.sample_id
cmd = '"{python_installation} {4}/{0}.py '
cmd += '--pipeline_name {0} --working_dir {working_dir} '
cmd += '--components_dir {components_dir} --run_id {1} '
cmd += '--drmaa_library_path {drmaa_library_path} '
cmd += "--sample_id {2} --qsub_options '{qsub_options}' "
cmd += '--num_jobs {num_jobs} --log_file {3} '
cmd += '--extension {extension}" '
cmd += '.format(pipeline_name, run_id, sample_id, log_file, intermediate_path, **args)'
self._print(message="@ruffus.follows()")
self._print(message="@LogWarnErr(l)")
self._print(message="@LogInfo(l)")
self._print(message="def task_{0}(pipeline_name='{1}'):".format(i,n))
self._print(message="sample_id = '{}'".format(s), tab=1)
self._print(message="intermediate_path = os.path.join(os.path.dirname(sys.argv[0]),'intermediate_pipeline_scripts')", tab=1)
self._print(message="pipeline_script = '{0}/{1}.py'.format(intermediate_path, pipeline_name)", nl=True, tab=1)
self._print(message="args['pipeline_name'] = pipeline_name", tab=1)
self._print(message="args['run_id'] = run_id", tab=1)
self._print(message="args['sample_id'] = sample_id", tab=1)
self._print(message="args['log_file'] = log_file", nl=True, tab=1)
self._print(message="km = KeywordsManager(pipeline_name, run_id, sample_id, args['working_dir'])", tab=1)
self._print(message="old_script_content = open(pipeline_script, 'r').read()", tab=1)
self._print(message="new_script_content = km.replace_keywords(old_script_content)", tab=1)
self._print(message="f = open(pipeline_script, 'w')", tab=1)
self._print(message="f.write(new_script_content)", tab=1)
self._print(message="f.close()", nl=True, tab=1)
self._print(message="cmd = '{0} {1}'.format(args['python_installation'], pipeline_script)", tab=1)
self._print(message="cmd_args = make_intermediate_cmd_args(args)", tab=1)
self._print(message="cmd = cmd + ' ' + ' '.join(cmd_args)", tab=1)
self._print(message="print 'running {} pipeline with command: %s' % (cmd)".format(n), nl=True, tab=1)
self._print(message="proc = subprocess.Popen(cmd, shell=True)", tab=1)
self._print(message="job_ids.put(proc.pid)", tab=1)
self._print(message="try:", tab=1)
self._print(message="cmdout, cmderr = proc.communicate()", tab=2)
self._print(message="job_rcs.put(proc.returncode)", tab=2)
self._print(message="except:", tab=1)
self._print(message="cmd = 'kill %s' % (proc.pid)", tab=2)
self._print(message="os.system(cmd)", tab=2)
self._print(message="finally:", tab=1)
self._print(message="print '{} pipeline finished with exit code %s' % (proc.returncode)".format(n), nl=True, tab=2)
self._print(message="if cmdout is not None:", tab=1)
self._print(message="print >> sys.stdout, cmdout", nl=True, tab=2)
self._print(message="if cmderr is not None:", tab=1)
self._print(message="print >> sys.stderr, cmderr", nl=True, tab=2)
task_funcs.append('task_{}'.format(i))
last = ','.join(task_funcs)
self._print(comment="running pipeline")
self._print(message="try:")
self._print(message="ruffus.pipeline_run([{}], multiprocess=args['num_pipelines'])".format(last), tab=1)
self._print(message="lrc = flushqueue(job_rcs)", tab=1)
self._print(message="if len(lrc) == len(filter(lambda x: x == 99, lrc)):", tab=1)
self._print(message="print 'pipeline successfully stopped by breakpoints.'", tab=2)
self._print(message="rc = 99", tab=2)
self._print(message="elif not all(rc == 0 for rc in lrc):", tab=1)
self._print(message="rc = 98", nl=True, tab=2)
self._print(message="except:")
self._print(message="rc = -1", tab=1)
self._print(message="exc_type, exc_obj, exc_tb = sys.exc_info()", tab=1)
self._print(message="print >> sys.stderr, '{0} pipeline failed due to error: {1}, {2}'.format(pipeline_name, exc_type, exc_obj)", tab=1)
self._print(message="kill_jobs(job_ids)", nl=True, tab=1)
self._print(message="finally:")
self._print(message="pl.log_pipeline_footer(l)", tab=1)
self._print(message="pl.log_info(l, '{0} pipeline finished with exit code {1}. Please check the logs.'.format(pipeline_name, rc))", tab=1)
self._print(message="sys.exit(rc)", nl=True, tab=1)
logging.info('successfully pasted pipelines.')
def _get_parent_tasks(self):
for t in self.tags:
ptasks = ["{0}_{1}_function".format(self.wf.nodes[p].component_name, p)
for p in self.wf.nodes[t].dependencies]
ptasks = ", ".join(ptasks)
self.parent_tasks[t] = ptasks
def _get_io_connections(self):
group_ioc = lambda n, p: [ioc for ioc in n.io_connections if ioc.stop_param == p]
for t in self.tags:
node = self.wf.nodes[t]
iocs = dict((ioc.stop_param, group_ioc(node, ioc.stop_param))
for ioc in node.io_connections)
iostr = []
for k, v_list in iocs.items():
## prepend key k (parameter name) with '__pipeline__' to avoid
## accidental collision with python reserved keywords
k = '__pipeline__' + k
## v_list is a list of tuples (tag, param)
## change it to list of [tag_component.args.param]
v = [i.start_node + "_component.args." + i.start_param for i in v_list]
if len(v) == 1:
## make sure merge node always has a list as its input.
if '_MERGER_' in t:
v = '[' + v[0] + ']'
else:
v = v[0]
## change the list to a string like '[1,2,...]'
else:
v = '[' + ', '.join(v) + ']'
## paste [k, v] pair together to generate the string
## '__pipeline__k=[__tagname__.component.args.v]'
iostr.append("=".join([k, v]))
self.io_connections[t] = ", ".join(iostr)
def _get_input_arguments(self):
for t in self.tags:
d = self.wf.nodes[t].input_arguments
c = self.wf.nodes[t].component_name
astr = ["=".join(['__pipeline__' + k, repr(validate_argument(v,k,c))])
for k,v in d.iteritems() if v != '__OPTIONAL__']
self.input_arguments[t] = ", ".join(astr)
def _get_decorators(self):
for t in self.tags:
c = self.wf.nodes[t].component_name
dp = self.wf.nodes[t].dependencies
decor = "@ruffus.follows(*[{0}])\n".format(self.parent_tasks[t])
if c == 'breakpoint':
decor += "@ruffus.parallel('{0}', '{1}', {2})\n".format(c, t, dp)
else:
decor += "@ruffus.parallel({0}_component.component_name, '{0}', {1})\n".format(t, dp)
decor += "@ruffus.check_if_uptodate(rm.sentinel_file_exists)\n"
decor += "@LogWarnErr(l)\n"
decor += "@LogInfo(l)"
self.decors[t] = decor
def _get_exception_handler(self, tag):
mode = self.modes[tag]
mem = self.mems[tag]
ncpu = self.num_cpus[tag]
newline = '\n'
indent = ' ' * 4
if self.wf.nodes[tag].component_name == 'breakpoint':
expt_str = ("{ind}rm.generate_sentinel_file(task_name){nl}"
"{ind}raise KeyboardInterrupt('breakpoint')"
).format(ind=indent, nl=newline)
return expt_str
if not mode:
expt_str = ("{ind}try:{nl}{ind}{ind}rc = ljm.run_job"
"(cmd=run_script, job_name=job_name){nl}"
).format(ind=indent, nl=newline)
else:
expt_str = ("{ind}try:{nl}{ind}{ind}rc = cjm.run_job"
"(cmd=run_script, mem='{mem}', ncpus={ncpu}, job_name=job_name)"
"{nl}").format(mem=mem, ncpu=ncpu, ind=indent, nl=newline)
expt_str += ("{ind}{ind}job_rcs.put(rc){nl}"
"{ind}{ind}if rc == 0:{nl}"
"{ind}{ind}{ind}rm.generate_sentinel_file(task_name){nl}"
).format(ind=indent, nl=newline)
expt_str += ("{ind}except KeyboardInterrupt:{nl}"
"{ind}{ind}raise{nl}"
"{ind}except:{nl}"
"{ind}{ind}job_rcs.put(98){nl}"
"{ind}{ind}traceback.print_exc()").format(ind=indent, nl=newline)
return expt_str
def _get_function_signatures(self):
newline = '\n'
indent = ' ' * 4
for t in self.tags:
c = self.wf.nodes[t].component_name
chunk = self.wf.nodes[t].chunk
bp = self.wf.nodes[t].boilerplate
## print it with quotations if string
if isinstance(chunk, str):
chunk = repr(chunk)
if isinstance(bp, str):
bp = repr(bp)
func_str = ("def {0}_{1}_function(*inargs):{nl}"
"{ind}component_name, task_name, _ = inargs{nl}"
"{ind}print '%s for %s started in %s pipeline' % "
"(task_name, component_name, args.pipeline_name)"
"{nl}").format(c, t, ind=indent, nl=newline)
if c == 'breakpoint':
func_str += ("{ind}print 'breakpoint happened in %s' % (task_name)"
"{nl}").format(ind=indent, nl=newline)
else:
func_str += ("{ind}run_script = rm.generate_script({0}_task, {1}, {2}){nl}"
"{ind}job_name = rm.get_filename_prefix(task_name){nl}"
"{ind}time.sleep(1)"
"{nl}").format(t, chunk, bp, ind=indent, nl=newline)
func_str += self._get_exception_handler(t)
self.func_strs[t] = func_str
def _print(self, message=None, comment=None, nl=False, tab=None):
try:
if tab:
self.pipeline_script.write(" " * 4 * tab)
if message:
self.pipeline_script.write(message + '\n')
if comment:
comment_str = "#" + "=" * 80 + "\n#" + comment + "\n#" + "-" * 80
self.pipeline_script.write(comment_str + '\n')
if nl:
self.pipeline_script.write('\n')
except:
raise Exception("failed to write to %s" % self.pipeline_script)
def _write_importing(self, import_dict, comment='import modules'):
self._print(comment=comment)
for k, v in import_dict.iteritems():
if len(v) == 0:
self._print(message = "import {0}".format(k))
else:
v = ", ".join(v)
self._print(message = "from {0} import {1}".format(k,v))
self._print(nl=True)
def _write_initilization(self):
self._print(comment="initialization")
self._print(message="args = kronos.pipelineui.args")
# self._print(message="sample_id = args.sample_id")
self._print(message="rm = RunManager(args.run_id, args.pipeline_name, args.working_dir)")
if not all(self.modes.values()):
self._print(message="ljm = LocalJobManager(rm.logs_dir, rm.outputs_dir)")
if any(self.modes.values()):
self._print(message="if args.job_scheduler.upper() == 'SGE':")
self._print(message="cjm = SgeJobManager(rm.logs_dir, rm.outputs_dir, args.qsub_options)", tab=1)
self._print(message="elif args.job_scheduler.upper() == 'DRMAA':")
self._print(message="try:", tab=1)
self._print(message="cjm = DrmaaJobManager(args.drmaa_library_path, rm.logs_dir, rm.outputs_dir, args.qsub_options)", tab=2)
self._print(message="except:", tab=1)
self._print(message="print >> sys.stderr, 'failed to load DrmaaJobManager'", tab=2)
self._print(message="traceback.print_exc()", tab=2)
self._print(message="else:")
self._print(message="print >> sys.stderr, 'invalid job_scheduler: {}'.format(args.job_scheduler)", tab=1)
self._print(message="pl = PipelineLogger()")
self._print(message="l = pl.get_logger(args.pipeline_name, args.log_file)")
self._print(nl=True)
def _write_env_preparation(self):
self._print(comment="environment preparations")
self._print(message="sys.path.insert(0, args.components_dir)")
self._print(message="job_rcs = Queue()", nl=True)
def _write_generating_tasks(self):
self._print(comment="generating tasks")
for t in self.tags:
c = self.wf.nodes[t].component_name
if c == 'breakpoint':
continue
a = self.input_arguments[t]
i = self.io_connections[t]
e = self.wf.nodes[t].env_vars
#update task requirements based on the GENERAL requirements
reqs = self.wf.nodes[t].requirements
for k,v in reqs.iteritems():
if not v:
reqs[k] = self.wf.general_section.get(k)
#update requirements of implicit merge
if '__MERGER__' in t:
reqs = self.wf.general_section
self._print(message="{0}_component = {1}_main.Component('{1}', component_parent_dir=args.components_dir)".format(t, c))
self._print(message="{0}_task = Task('{0}', {0}_component)".format(t))
self._print(message="{0}_task.update_comp_args({1})".format(t, ", ".join([a,i])))
self._print(message="{0}_prefix = rm.get_filename_prefix('{0}')".format(t))
self._print(message="{0}_task.update_comp_output_filenames({0}_prefix, rm.outputs_dir, args.no_prefix)".format(t))
self._print(message="{0}_task.update_comp_env_vars({1})".format(t, e))
# self._print(message="{0}_task.update_comp_reqs({1})".format(t, self.wf.general_section))
self._print(message="{0}_task.update_comp_reqs({1})".format(t, reqs))
self._print(nl=True)
def _write_ruffus_pipeline(self):
self._print(comment="ruffus pipeline")
for t in self.tags:
self._print(message=self.decors[t])
s = self.func_strs[t]
self._print(s)
self._print(nl=True)
def _write_last_task(self):
leaf_nodes_str = ""
leafs = self.wf.leafs
for i, l in enumerate(leafs):
l = self.wf.nodes[l].component_name + '_' + l
leafs[i] = l
leaf_nodes_str += "{%s}_function, " % (i)
leaf_nodes_str = leaf_nodes_str.format(*leafs)
message = "@ruffus.follows(*[{}])\n".format(leaf_nodes_str)
message += "def __last_task___function():\n"
message += " pass"
self._print(message=message, nl=True)
def _write_main(self):
user_colour_scheme = {
"colour_scheme_index" :1,
"Pipeline" :{"fontcolor" : '"#FF3232"' },
"Key" :{"fontcolor" : "Red",
"fillcolor" : '"#F6F4F4"' },
"Task to run" :{"linecolor" : '"#0044A0"' },
"Final target" :{"fillcolor" : '"#EFA03B"',
"fontcolor" : "black",
"dashed" : 0
}
}
printout_cmd = "ruffus.pipeline_printout_graph({0}, {1}, [__last_task___function], "
printout_cmd += "draw_vertically = {2}, no_key_legend = {3}, user_colour_scheme = {4})"
printout_cmd = printout_cmd.format("args.pipeline_name + '.' + args.extension",
"args.extension",
"args.draw_vertically",
"args.no_key_legend",
user_colour_scheme)
self._print(comment="main body")
self._print(message="try:")
self._print(message="if not args.print_only:", tab=1)
self._print(message="ruffus.pipeline_run(__last_task___function, multithread=args.num_jobs, verbose=0)", tab=2)
self._print(message="else:", tab=1)
self._print(message="cwd = os.getcwd()", tab=2)
self._print(message="os.chdir(rm.pipeline_dir)", tab=2)
self._print(message=printout_cmd, tab=2)
self._print(message="os.chdir(cwd)", tab=2, nl=True)
self._print(message="lrc = flushqueue(job_rcs)", tab=1)
self._print(message="if all(rc == 0 for rc in lrc):", tab=1)
self._print(message="EXIT_CODE = 0", tab=2)
self._print(message="else:", tab=1)
self._print(message="EXIT_CODE = 98", tab=2, nl=True)
self._print(message="except:")
self._print(message="exc_type, exc_obj, exc_tb = sys.exc_info()", tab=1)
self._print(message="##exception object is of type <class 'ruffus.ruffus_exceptions.RethrownJobError'>.", tab=1)
self._print(message="##exc_obj.args[0][3] gives the message in the original exception.", tab=1)
self._print(message="if exc_obj.args[0][3] == '(breakpoint)':", tab=1)
self._print(message="print 'breakpoint happened in %s pipeline' % (args.pipeline_name)", tab=2)
if not all(self.modes.values()):
self._print(message="ljm.kill_all()", tab=2)
if any(self.modes.values()):
self._print(message="try:", tab=2)
self._print(message="cjm.kill_all()", tab=3)
self._print(message="except:", tab=2)
self._print(message="pass", tab=3)
self._print(message="EXIT_CODE = 99", nl=True, tab=2)
self._print(message="else:", tab=1)
self._print(message="print >> sys.stderr, '{0} pipeline failed due to error: {1}, {2}'.format(args.pipeline_name, exc_type, exc_obj)", tab=2)
if not all(self.modes.values()):
self._print(message="ljm.kill_all()", tab=2)
if any(self.modes.values()):
self._print(message="try:", tab=2)
self._print(message="cjm.kill_all()", tab=3)
self._print(message="except:", tab=2)
self._print(message="pass", tab=3)
self._print(message="EXIT_CODE = -1", nl=True, tab=2)
self._print(message="finally:")
self._print(message="print '{0} pipeline finished with exit code {1}'.format(args.pipeline_name, EXIT_CODE)", tab=1)
self._print(message="sys.exit(EXIT_CODE)", tab=1)
def _write_logger(self):
self._print(comment="logger initialization")
self._print(message="pl = PipelineLogger()")
self._print(message="l = pl.get_logger(pipeline_name, log_file)")
self._print(message="pl.log_pipeline_header(l, args, pipeline_name, run_id, kronos_version)", nl=True)
self._print(message="args = vars(kronos.pipelineui.args)", nl=True)
def _write_argument_validation(self):
self._print(comment="argument validation")
self._print(message="job_ids = Queue()")
self._print(message="job_rcs = Queue()")
self._print(message="rc = 0")
self._print(message="args = kronos.pipelineui.args", nl=True)
self._print(message="args.components_dir = os.path.abspath(args.components_dir)", nl=True)
self._print(message="if args.pipeline_name is None:")
self._print(message="pipeline_name = os.path.splitext(os.path.basename(sys.argv[0]))[0]", nl=True, tab=1)
self._print(message="else:")
self._print(message="pipeline_name = args.pipeline_name", nl=True, tab=1)
self._print(message="if args.run_id is None:")
self._print(message="run_id = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')", nl=True, tab=1)
self._print(message="else:")
self._print(message="run_id = args.run_id", nl=True, tab=1)
self._print(message="make_dir(args.working_dir)")
self._print(message="args.working_dir = os.path.join(args.working_dir, run_id)")
self._print(message="make_dir(args.working_dir)", nl=True)
self._print(message="if args.log_file is None:")
self._print(message="log_file = os.path.join(args.working_dir, '_'.join([pipeline_name, run_id]) + '.log')", nl=True, tab=1)
self._print(message="else:")
self._print(message="log_file = os.path.join(args.working_dir, '_'.join([args.log_file, run_id]) + '.log')", nl=True, tab=1)
self._print(comment="make a copy of the config file")
self._print(message="cfile = os.path.join(args.working_dir, pipeline_name + '_' + run_id + '.yaml')")
self._print(message="with open(cfile, 'w') as cf:")
self._print(message="cf.write(config_file_content)", nl=True, tab=1)
# self._print(comment="limit the maximum number of simultaneous jobs")
# self._print(message="max_num_total = 500")
# self._print(message="max_num_pips = 10")
# self._print(message="max_num_jobs = int(max_num_total / max_num_pips)")
# # self._print(message="numok = lambda: args.num_pipelines * args.num_jobs <= max_num_total", nl=True)
# # self._print(message="if not numok():")
# # self._print(message="if args.num_pipelines > max_num_pips:", tab=1)
# # self._print(message="args.num_pipelines = max_num_pips", tab=2)
# # self._print(message="if not numok():", tab=2)
# # self._print(message="args.num_jobs = max_num_jobs", tab=3)
# # self._print(message="else:", tab=1)
# # self._print(message="args.num_jobs = max_num_jobs", tab=2)
# self._print(message="if args.num_pipelines > max_num_pips:")
# self._print(message="args.num_pipelines = max_num_pips", tab=1)
# self._print(message="if args.num_jobs > max_num_jobs:")
# self._print(message="args.num_jobs = max_num_jobs", tab=1)
def _write_config_file(self, config_file):
""" print the content of the config file."""
with open(config_file, 'r') as cfile:
self._print(comment="config file content")
self._print(message="config_file_content = '''")
lines = cfile.readlines()
for l in lines:
self._print(message=l.strip('\n'))
self._print(message="'''")
self._print(nl=True)
| jtaghiyar/kronos | kronos/plumber.py | Python | mit | 28,257 | 0.008635 |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import urlparse
def uc2utf8(input):
## argh! this feels wrong, but seems to be needed.
if type(input) == unicode:
return input.encode('utf-8')
else:
return input
class URL:
"""
This class is for wrapping URLs into objects. It's used
internally in the library, end users should not need to know
anything about this class. All methods that accept URLs can be
fed either with an URL object, a string or an urlparse.ParsedURL
object.
Addresses may be one out of three:
1) a path relative to the DAV-root, i.e. "someuser/calendar" may
refer to
"http://my.davical-server.example.com/pycaldav.php/someuser/calendar".
2) an absolute path, i.e. "/pycaldav.php/someuser/calendar"
3) a fully qualified URL,
i.e. "http://someuser:[email protected]/pycaldav.php/someuser/calendar".
Remark that hostname, port, user, pass is typically given when
instantiating the DAVClient object and cannot be overridden later.
As of 2013-11, some methods in the pycaldav library expected strings
and some expected urlparse.ParseResult objects, some expected
fully qualified URLs and most expected absolute paths. The purpose
of this class is to ensure consistency and at the same time
maintaining backward compatibility. Basically, all methods should
accept any kind of URL.
"""
def __init__(self, url):
if isinstance(url, urlparse.ParseResult) or isinstance(url, urlparse.SplitResult):
self.url_parsed = url
self.url_raw = None
else:
self.url_raw = url
self.url_parsed = None
def __nonzero__(self):
if self.url_raw or self.url_parsed:
return True
else:
return False
def __ne__(self, other):
return not self == other
def __eq__(self, other):
if str(self) == str(other):
return True
## The URLs could have insignificant differences
me = self.canonical()
if hasattr(other, 'canonical'):
other = other.canonical()
return str(me) == str(other)
## TODO: better naming? Will return url if url is already an URL
## object, else will instantiate a new URL object
@classmethod
def objectify(self, url):
if url is None:
return None
if isinstance(url, URL):
return url
else:
return URL(url)
## To deal with all kind of methods/properties in the ParseResult
## class
def __getattr__(self, attr):
if self.url_parsed is None:
self.url_parsed = urlparse.urlparse(self.url_raw)
if hasattr(self.url_parsed, attr):
return getattr(self.url_parsed, attr)
else:
return getattr(self.__unicode__(), attr)
## returns the url in text format
def __str__(self):
return self.__unicode__().encode('utf-8')
## returns the url in text format
def __unicode__(self):
if self.url_raw is None:
self.url_raw = self.url_parsed.geturl()
if isinstance(self.url_raw, unicode):
return self.url_raw
else:
return unicode(self.url_raw, 'utf-8')
def __repr__(self):
return "URL(%s)" % str(self)
def is_auth(self):
return self.username is not None
def unauth(self):
if not self.is_auth():
return self
return URL.objectify(urlparse.ParseResult(
self.scheme, '%s:%s' % (self.hostname, self.port),
self.path.replace('//', '/'), self.params, self.query, self.fragment))
def canonical(self):
"""
a canonical URL ... remove authentication details, make sure there
are no double slashes, and to make sure the URL is always the same,
run it through the urlparser
"""
url = self.unauth()
## this is actually already done in the unauth method ...
if '//' in url.path:
raise NotImplementedError("remove the double slashes")
## TODO: optimize - we're going to burn some CPU cycles here
if url.endswith('/'):
url = URL.objectify(str(url)[:-1])
## This looks like a noop - but it may have the side effect
## that urlparser be run (actually not - unauth ensures we
## have an urlparse.ParseResult object)
url.scheme
## make sure to delete the string version
url.url_raw = None
return url
def join(self, path):
"""
assumes this object is the base URL or base path. If the path
is relative, it should be appended to the base. If the path
is absolute, it should be added to the connection details of
self. If the path already contains connection details and the
connection details differ from self, raise an error.
"""
if not path:
return self
path = URL.objectify(path)
if (
(path.scheme and self.scheme and path.scheme != self.scheme)
or
(path.hostname and self.hostname and path.hostname != self.hostname)
or
(path.port and self.port and path.port != self.port)
):
raise ValueError("%s can't be joined with %s" % (self, path))
if path.path[0] == '/':
ret_path = uc2utf8(path.path)
else:
sep = "/"
if self.path.endswith("/"):
sep = ""
ret_path = "%s%s%s" % (self.path, sep, uc2utf8(path.path))
return URL(urlparse.ParseResult(
self.scheme or path.scheme, self.netloc or path.netloc, ret_path, path.params, path.query, path.fragment))
def make(url):
"""Backward compatibility"""
return URL.objectify(url)
| wasw100/pycaldav | pycaldav/lib/url.py | Python | gpl-3.0 | 5,906 | 0.004233 |
import notorm
import momoko
from tornado import gen
import psycopg2.extras
class AsyncRecord(notorm.record):
@gen.coroutine
def update(self, **args):
for k,v in args.items():
setattr(self, k, v)
cursor = yield notorm.db.execute(
self.update_qry,
self._asdict(),
cursor_factory=psycopg2.extras.NamedTupleCursor)
@gen.coroutine
def save(self):
if self.id:
self.update()
else:
cursor = yield notorm.db.execute(
self.insert_qry,
self.__dict__,
cursor_factory=psycopg2.extras.NamedTupleCursor)
results = cursor.fetchone()
if results:
self.id = results[0]
| subssn21/notorm | notorm/momoko.py | Python | mit | 919 | 0.015234 |
from pylons import tmpl_context as c
from adhocracy.lib.auth import can
from util import render_tile, BaseTile
class VariantRow(object):
def __init__(self, tile, variant, poll):
self.tile = tile
self.variant = variant
self.poll = poll
if tile.frozen:
freeze_time = tile.selection.proposal.adopt_poll.begin_time
self.text = tile.selection.page.variant_at(variant, freeze_time)
else:
self.text = tile.selection.page.variant_head(variant)
@property
def selected(self):
return self.tile.selected == self.variant
@property
def show(self):
return not self.tile.frozen or self.selected
@property
def can_edit(self):
return (not self.tile.frozen) and \
can.variant.edit(self.tile.selection.page, self.variant)
@property
def num_comments(self):
return len(self.tile.selection.page.variant_comments(self.variant))
class SelectionTile(BaseTile):
def __init__(self, selection):
self.selection = selection
self.selected = selection.selected
self.variant_polls = self.selection.variant_polls
@property
def has_variants(self):
return len(self.selection.page.variants) < 2
@property
def num_variants(self):
return len(self.selection.page.variants) - 1
@property
def selected_text(self):
variant = self.selected
if self.frozen:
freeze_time = self.selection.proposal.adopt_poll.begin_time
return self.selection.page.variant_at(variant, freeze_time)
else:
return self.selection.page.variant_head(variant)
@property
def selected_num_comments(self):
return len(self.selection.page.variant_comments(self.selected))
@property
def frozen(self):
return self.selection.proposal.is_adopt_polling()
def variant_rows(self):
for (variant, poll) in self.variant_polls:
row = VariantRow(self, variant, poll)
yield row
@property
def show_new_variant_link(self):
if self.frozen:
return False
return can.norm.edit(self.selection.page, 'any')
def row(selection):
if not selection or selection.is_deleted():
return ""
tile = SelectionTile(selection)
return render_tile('/selection/tiles.html', 'row', tile,
selection=selection, user=c.user, cached=True)
def variants(selection, tile=None):
if tile is None:
tile = SelectionTile(selection)
return render_tile('/selection/tiles.html', 'variants', tile,
selection=selection, user=c.user, cached=True)
| DanielNeugebauer/adhocracy | src/adhocracy/lib/tiles/selection_tiles.py | Python | agpl-3.0 | 2,711 | 0.000369 |
from __future__ import absolute_import
# Start a Celery worker by executing:
# celery -A proj worker -l info
# Import available tasks
from proj.tasks import add, mul, xsum, fib
# Test short-running tasks
add.delay(2, 2)
mul.delay(10, 12)
xsum.delay(range(100))
fib.delay(10)
# Test medium-running tasks
fib.delay(35)
fib.delay(35)
fib.delay(35)
| tjanez/celery-demo-app | test/test.py | Python | gpl-3.0 | 350 | 0.002857 |
import cherrypy
from cherrypy._cpcompat import ntou
from cherrypy.test import helper
class ETagTest(helper.CPWebCase):
@staticmethod
def setup_server():
class Root:
@cherrypy.expose
def resource(self):
return "Oh wah ta goo Siam."
@cherrypy.expose
def fail(self, code):
code = int(code)
if 300 <= code <= 399:
raise cherrypy.HTTPRedirect([], code)
else:
raise cherrypy.HTTPError(code)
@cherrypy.expose
# In Python 3, tools.encode is on by default
@cherrypy.config(**{'tools.encode.on': True})
def unicoded(self):
return ntou('I am a \u1ee4nicode string.', 'escape')
conf = {'/': {'tools.etags.on': True,
'tools.etags.autotags': True,
}}
cherrypy.tree.mount(Root(), config=conf)
def test_etags(self):
self.getPage("/resource")
self.assertStatus('200 OK')
self.assertHeader('Content-Type', 'text/html;charset=utf-8')
self.assertBody('Oh wah ta goo Siam.')
etag = self.assertHeader('ETag')
# Test If-Match (both valid and invalid)
self.getPage("/resource", headers=[('If-Match', etag)])
self.assertStatus("200 OK")
self.getPage("/resource", headers=[('If-Match', "*")])
self.assertStatus("200 OK")
self.getPage("/resource", headers=[('If-Match', "*")], method="POST")
self.assertStatus("200 OK")
self.getPage("/resource", headers=[('If-Match', "a bogus tag")])
self.assertStatus("412 Precondition Failed")
# Test If-None-Match (both valid and invalid)
self.getPage("/resource", headers=[('If-None-Match', etag)])
self.assertStatus(304)
self.getPage("/resource", method='POST',
headers=[('If-None-Match', etag)])
self.assertStatus("412 Precondition Failed")
self.getPage("/resource", headers=[('If-None-Match', "*")])
self.assertStatus(304)
self.getPage("/resource", headers=[('If-None-Match', "a bogus tag")])
self.assertStatus("200 OK")
def test_errors(self):
self.getPage("/resource")
self.assertStatus(200)
etag = self.assertHeader('ETag')
# Test raising errors in page handler
self.getPage("/fail/412", headers=[('If-Match', etag)])
self.assertStatus(412)
self.getPage("/fail/304", headers=[('If-Match', etag)])
self.assertStatus(304)
self.getPage("/fail/412", headers=[('If-None-Match', "*")])
self.assertStatus(412)
self.getPage("/fail/304", headers=[('If-None-Match', "*")])
self.assertStatus(304)
def test_unicode_body(self):
self.getPage("/unicoded")
self.assertStatus(200)
etag1 = self.assertHeader('ETag')
self.getPage("/unicoded", headers=[('If-Match', etag1)])
self.assertStatus(200)
self.assertHeader('ETag', etag1)
| heytcass/homeassistant-config | deps/cherrypy/test/test_etags.py | Python | mit | 3,093 | 0 |
import unittest
import matmath
import numpy as np
import math
class TestMatrix(unittest.TestCase):
def testRotX(self):
mat = matmath.xRotationMatrix(math.radians(90))
pt = np.array([1, 0, 0, 1])
npt = pt.dot(mat)
np.testing.assert_almost_equal(npt, [1, 0, 0, 1])
pt = np.array([0, 1, 0, 1])
npt = pt.dot(mat)
np.testing.assert_almost_equal(npt, [0, 0, 1, 1])
pt = np.array([0, 0, 1, 1])
npt = pt.dot(mat)
np.testing.assert_almost_equal(npt, [0, -1, 0, 1])
def testRotY(self):
pt = np.array([0, 0, 1, 1])
mat = matmath.yRotationMatrix(math.radians(90))
npt = pt.dot(mat)
np.testing.assert_almost_equal(npt, [1, 0, 0, 1])
def testRotZ(self):
pt = np.array([1, 0, 0, 1])
mat = matmath.zRotationMatrix(math.radians(90))
npt = pt.dot(mat)
np.testing.assert_almost_equal(npt, [0, 1, 0, 1])
def testQuaternionMatrix(self):
q = matmath.axisAngleToQuaternion([1, 0, 0], np.radians(90))
qmat = matmath.quaternionToRotationMatrix(q)
rmat = matmath.xRotationMatrix(math.radians(90))
np.testing.assert_almost_equal(qmat, rmat)
q = matmath.axisAngleToQuaternion([0, 1, 0], np.radians(90))
qmat = matmath.quaternionToRotationMatrix(q)
rmat = matmath.yRotationMatrix(math.radians(90))
np.testing.assert_almost_equal(qmat, rmat)
q = matmath.axisAngleToQuaternion([0, 0, 1], np.radians(90))
qmat = matmath.quaternionToRotationMatrix(q)
rmat = matmath.zRotationMatrix(math.radians(90))
np.testing.assert_almost_equal(qmat, rmat)
def testMultipleRotates(self):
r1 = matmath.xRotationMatrix(np.radians(90))
r2 = matmath.zRotationMatrix(np.radians(90))
mat = r1.dot(r2)
pt = np.array([0, 0, 1, 1])
npt = pt.dot(mat)
np.testing.assert_almost_equal(npt, [1, 0, 0, 1])
def test2M(self):
# 2 Meters away depth scan
pt = np.array([0, 0, 2, 1])
print "PC", pt
mat = matmath.pcToSoSMatrix()
npt = pt.dot(mat)
print "SoS ", npt
trans = np.array([0, 0, 0])
quaternion = matmath.axisAngleToQuaternion([1, 0, 0], np.radians(90))
mat = matmath.getPC2WorldMatrix(trans, quaternion)
npt = pt.dot(mat)
print "Device", npt
pt = np.array([0, 1, 2, 1])
print "PC", pt
mat = matmath.pcToSoSMatrix()
npt = pt.dot(mat)
print "SoS ", npt
mat = matmath.getPC2WorldMatrix(trans, quaternion)
npt = pt.dot(mat)
print "Device", npt
| daryllstrauss/tango | test_mat.py | Python | mit | 2,661 | 0.001127 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Voropp(MakefilePackage):
"""Voro++ is a open source software library for the computation of the
Voronoi diagram, a widely-used tessellation that has applications in many
scientific fields."""
homepage = "http://math.lbl.gov/voro++/about.html"
url = "http://math.lbl.gov/voro++/download/dir/voro++-0.4.6.tar.gz"
variant('pic', default=True,
description='Position independent code')
version('0.4.6', sha256='ef7970071ee2ce3800daa8723649ca069dc4c71cc25f0f7d22552387f3ea437e')
def edit(self, spec, prefix):
filter_file(r'CC=g\+\+',
'CC={0}'.format(self.compiler.cxx),
'config.mk')
filter_file(r'PREFIX=/usr/local',
'PREFIX={0}'.format(self.prefix),
'config.mk')
# We can safely replace the default CFLAGS which are:
# CFLAGS=-Wall -ansi -pedantic -O3
cflags = ''
if '+pic' in spec:
cflags += self.compiler.cc_pic_flag
filter_file(r'CFLAGS=.*',
'CFLAGS={0}'.format(cflags),
'config.mk')
| iulian787/spack | var/spack/repos/builtin/packages/voropp/package.py | Python | lgpl-2.1 | 1,349 | 0.001483 |
# Copyright (c) 2012 Roberto Alsina y otros.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from docutils import nodes
from docutils.parsers.rst import Directive, directives
CODE = """\
<iframe width="{width}"
height="{height}"
src="http://www.youtube.com/embed/{yid}?rel=0&hd=1&wmode=transparent"
></iframe>"""
class Youtube(Directive):
""" Restructured text extension for inserting youtube embedded videos
Usage:
.. youtube:: lyViVmaBQDg
:height: 400
:width: 600
"""
has_content = True
required_arguments = 1
option_spec = {
"width": directives.positive_int,
"height": directives.positive_int,
}
def run(self):
self.check_content()
options = {
'yid': self.arguments[0],
'width': 425,
'height': 344,
}
options.update(self.options)
return [nodes.raw('', CODE.format(**options), format='html')]
def check_content(self):
if self.content:
raise self.warning("This directive does not accept content. The "
"'key=value' format for options is deprecated, "
"use ':key: value' instead")
directives.register_directive('youtube', Youtube)
| servalproject/nikola | nikola/plugins/compile_rest/youtube.py | Python | mit | 2,306 | 0 |
def process(self):
#GUJARATI VOWEL SIGN CANDRA E
#GUJARATI VOWEL CANDRA E
self.edit("GUJARATI")
self.edit("LETTER")
self.edit("DIGIT")
self.processAs("Helper Indic")
self.edit("VOWEL SIGN", "sign")
self.edit("VOWEL")
self.edit("SIGN")
self.edit("THREE-DOT NUKTA ABOVE", "threedotnuktaabove")
self.edit("TWO-CIRCLE NUKTA ABOVE", "twocirclenuktaabove")
self.processAs("Helper Numbers")
self.lower()
self.compress()
self.scriptPrefix()
if __name__ == "__main__":
from glyphNameFormatter.exporters import printRange
from glyphNameFormatter.tools import debug
printRange("Gujarati")
debug(0x0AFA) | LettError/glyphNameFormatter | Lib/glyphNameFormatter/rangeProcessors/gujarati.py | Python | bsd-3-clause | 668 | 0.008982 |
# -*- mode: python; indent-tabs-mode: nil; tab-width: 2 -*-
"""
aria_api.py - implements handlers which are for the Aria to talk to helvetic.
"""
from __future__ import absolute_import
from base64 import b16encode
from crc16 import crc16xmodem
from datetime import timedelta
from decimal import Decimal
from django.contrib.auth.models import User
from django.db import transaction
from django.http import HttpResponse, HttpResponseForbidden, HttpResponseBadRequest
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import View
from string import hexdigits
import struct
from time import time
from ..models import AuthorisationToken, Measurement, Scale, utcnow
class ScaleValidateView(View):
def get(self, request):
# Context: https://github.com/micolous/helvetic/issues/1
#
# Sometimes the scale is trying to verify that it authenticated with the
# correct token. We don't really care about these requests (it is handled
# by /scale/register aka ScaleRegisterView), so we can just always return
# "T" (OK).
#
# The real service returns "F" on error.
return HttpResponse('T')
class ScaleRegisterView(View):
def get(self, request):
if 'serialNumber' not in request.GET:
return HttpResponseBadRequest('serialNumber missing')
if 'token' not in request.GET:
return HttpResponseBadRequest('token missing')
if 'ssid' not in request.GET:
return HttpResponseBadRequest('ssid missing')
serial = request.GET['serialNumber'].upper()
token = request.GET['token']
ssid = request.GET['ssid']
if len(serial) != 12:
return HttpResponseBadRequest('serialNumber must be 12 bytes')
if any(((x not in hexdigits) for x in serial)):
return HttpResponseBadRequest('serial must only contain hex')
# Lookup the authorisation token
auth_token = AuthorisationToken.lookup_token(token)
if auth_token is None:
return HttpResponseForbidden('Bad auth token')
owner = auth_token.user
# Delete the token.
auth_token.delete()
# Register the Aria
scale = Scale.objects.create(
hw_address=serial,
ssid=ssid,
owner=owner,
)
# Only return 200 OK
return HttpResponse('')
class ScaleUploadView(View):
@method_decorator(csrf_exempt)
@method_decorator(transaction.atomic)
def dispatch(self, *args, **kwargs):
return super(ScaleUploadView, self).dispatch(*args, **kwargs)
def post(self, request):
now = utcnow()
body = request.body
# Version 3 protocol
proto_ver, battery_pc, mac, auth_code = struct.unpack('<LL6s16s', body[:30])
body = body[30:]
if proto_ver != 3:
return HttpResponseBadRequest('Unknown protocol version: %d' % proto_ver)
if battery_pc > 100 or battery_pc < 0:
return HttpResponseBadRequest('Battery percentage must be 0..100 (got %d)' % battery_pc)
mac, auth_code = [b16encode(x) for x in (mac, auth_code)]
scale = None
try:
scale = Scale.objects.get(hw_address=mac)
except Scale.DoesNotExist:
return HttpResponseBadRequest('Unknown scale: %s' % mac)
# Check authcode
if scale.auth_code is None or scale.auth_code == '':
scale.auth_code = auth_code
elif scale.auth_code != auth_code:
return HttpResponseForbidden('Invalid auth code')
scale.battery_percent = battery_pc
fw_ver, unknown2, scale_now, measurement_count = struct.unpack('<LLLL', body[:16])
body = body[16:]
scale.fw_version = fw_ver
scale.save()
for x in range(measurement_count):
if len(body) < 32:
return HttpResponseBadRequest('Measurement truncated.')
id2, imp, weight, ts, uid, fat1, covar, fat2 = \
struct.unpack('<LLLLLLLL', body[:32])
# Record the measurement
# Look up the owner of this measurement
if uid == 0:
measured_user = None
else:
try:
measured_user = User.objects.get(id=uid)
except User.NotFound:
measured_user = None
measurement = Measurement.objects.create(
user=measured_user,
scale=scale,
when=now - timedelta(seconds=scale_now - ts),
weight=weight,
body_fat=Decimal(fat1) / Decimal(1000),
)
body = body[32:]
# Formulate a response
scale_users = scale.users.all()
response = struct.pack('<LBBBL',
int(time()), # Fill with current time, to account for processing delay
scale.unit,
0x32, # status = configured
0x01, # unknown
len(scale_users)
)
# Insert user info
for profile in scale_users:
last_weight = min_var = max_var = 0
last_measurement = profile.latest_measurement()
if last_measurement is not None:
last_weight = ((last_measurement.weight) // 1000) * 1000
min_var = last_weight - 4000
if min_var < 0:
min_var = 0
max_var = last_weight + 4000
response += struct.pack('<L16x20sLLLBLLLLLL',
profile.user.id,
profile.short_name_formatted(),
min_var,
max_var,
profile.age(),
profile.gender,
profile.height,
0, # some weight
0, # body fat
0, # covariance
0, # another weight
0, # timestamp
)
response = response + struct.pack('<LLL',
0, # always 0
3, # update status: no
0, # unknown
)
trailer = 0x19 + (len(scale_users) * 0x4d)
response = response + struct.pack('<HH',
crc16xmodem(response), # checksum
trailer,
)
hr = HttpResponse(response)
# Content-Length is a required element
hr['Content-Length'] = str(len(response))
return hr
| micolous/helvetic | helvetic/views/aria_api.py | Python | agpl-3.0 | 5,411 | 0.030309 |
#!/usr/bin/env python
# **********************************************************************
#
# Copyright (c) 2003-2013 ZeroC, Inc. All rights reserved.
#
# This copy of Ice is licensed to you under the terms described in the
# ICE_LICENSE file included in this distribution.
#
# **********************************************************************
import sys, os, TestUtil, shlex
from threading import Thread
#
# Set nreplicas to a number N to test replication with N replicas.
#
#nreplicas=0
nreplicas=1
iceGridPort = 12010;
nodeOptions = r' --Ice.Warn.Connections=0' + \
r' --IceGrid.Node.Endpoints=default' + \
r' --IceGrid.Node.WaitTime=240' + \
r' --Ice.ProgramName=icegridnode' + \
r' --IceGrid.Node.Trace.Replica=0' + \
r' --IceGrid.Node.Trace.Activator=0' + \
r' --IceGrid.Node.Trace.Adapter=0' + \
r' --IceGrid.Node.Trace.Server=0' + \
r' --IceGrid.Node.ThreadPool.SizeWarn=0' + \
r' --IceGrid.Node.PrintServersReady=node1' + \
r' --Ice.NullHandleAbort' + \
r' --Ice.ThreadPool.Server.Size=0' + \
r' --Ice.ServerIdleTime=0'
registryOptions = r' --Ice.Warn.Connections=0' + \
r' --IceGrid.Registry.PermissionsVerifier=IceGrid/NullPermissionsVerifier' + \
r' --IceGrid.Registry.AdminPermissionsVerifier=IceGrid/NullPermissionsVerifier' + \
r' --IceGrid.Registry.SSLPermissionsVerifier=IceGrid/NullSSLPermissionsVerifier' + \
r' --IceGrid.Registry.AdminSSLPermissionsVerifier=IceGrid/NullSSLPermissionsVerifier' + \
r' --IceGrid.Registry.Server.Endpoints=default' + \
r' --IceGrid.Registry.Internal.Endpoints=default' + \
r' --IceGrid.Registry.SessionManager.Endpoints=default' + \
r' --IceGrid.Registry.AdminSessionManager.Endpoints=default' + \
r' --IceGrid.Registry.Trace.Session=0' + \
r' --IceGrid.Registry.Trace.Application=0' + \
r' --IceGrid.Registry.Trace.Node=0' + \
r' --IceGrid.Registry.Trace.Replica=0' + \
r' --IceGrid.Registry.Trace.Adapter=0' + \
r' --IceGrid.Registry.Trace.Object=0' + \
r' --IceGrid.Registry.Trace.Server=0' + \
r' --IceGrid.Registry.Trace.Locator=0' + \
r' --Ice.ThreadPool.Server.Size=0 ' + \
r' --Ice.ThreadPool.Client.SizeWarn=0' + \
r' --IceGrid.Registry.Client.ThreadPool.SizeWarn=0' + \
r' --Ice.ServerIdleTime=0' + \
r' --IceGrid.Registry.DefaultTemplates="' + \
os.path.abspath(os.path.join(TestUtil.toplevel, "cpp", "config", "templates.xml") + '"')
def getDefaultLocatorProperty():
i = 0
property = '--Ice.Default.Locator="IceGrid/Locator';
objrefs = ""
while i < nreplicas + 1:
objrefs = objrefs + ':default -p ' + str(iceGridPort + i)
i = i + 1
return ' %s%s"' % (property, objrefs)
def startIceGridRegistry(testdir, dynamicRegistration = False):
iceGrid = TestUtil.getIceGridRegistry()
command = ' --nowarn ' + registryOptions
if dynamicRegistration:
command += r' --IceGrid.Registry.DynamicRegistration'
procs = []
i = 0
while i < (nreplicas + 1):
if i == 0:
name = "registry"
else:
name = "replica-" + str(i)
dataDir = os.path.join(testdir, "db", name)
if not os.path.exists(dataDir):
os.mkdir(dataDir)
else:
cleanDbDir(dataDir)
sys.stdout.write("starting icegrid " + name + "... ")
sys.stdout.flush()
cmd = command + ' ' + TestUtil.getQtSqlOptions('IceGrid') + \
r' --Ice.ProgramName=' + name + \
r' --IceGrid.Registry.Client.Endpoints="default -p ' + str(iceGridPort + i) + '" ' + \
r' --IceGrid.Registry.Data="' + dataDir + '" '
if i > 0:
cmd += r' --IceGrid.Registry.ReplicaName=' + name + ' ' + getDefaultLocatorProperty()
driverConfig = TestUtil.DriverConfig("server")
driverConfig.lang = "cpp"
proc = TestUtil.startServer(iceGrid, cmd, driverConfig, count = 5)
procs.append(proc)
print("ok")
i = i + 1
return procs
def shutdownIceGridRegistry(procs):
i = nreplicas
while i > 0:
sys.stdout.write("shutting down icegrid replica-" + str(i) + "... ")
sys.stdout.flush()
iceGridAdmin("registry shutdown replica-" + str(i))
print("ok")
i = i - 1
sys.stdout.write("shutting down icegrid registry... ")
sys.stdout.flush()
iceGridAdmin("registry shutdown")
print("ok")
for p in procs:
p.waitTestSuccess()
def iceGridNodePropertiesOverride():
#
# Create property overrides from command line options.
#
overrideOptions = ''
for opt in shlex.split(TestUtil.getCommandLineProperties("", TestUtil.DriverConfig("server"))):
opt = opt.strip().replace("--", "")
index = opt.find("=")
if index == -1:
overrideOptions += ("%s=1 ") % opt
else:
key = opt[0:index]
value = opt[index + 1:]
if(value.find(' ') == -1):
overrideOptions += ("%s=%s ") % (key, value)
else:
#
# NOTE: We need 2 backslash before the quote to run the
# C# test/IceGrid/simple test with SSL.
#
overrideOptions += ("%s=\\\"%s\\\" ") % (key, value.replace('"', '\\\\\\"'))
return overrideOptions
def startIceGridNode(testdir):
iceGrid = TestUtil.getIceGridNode()
dataDir = os.path.join(testdir, "db", "node")
if not os.path.exists(dataDir):
os.mkdir(dataDir)
else:
cleanDbDir(dataDir)
overrideOptions = '" ' + iceGridNodePropertiesOverride()
overrideOptions += ' Ice.ServerIdleTime=0 Ice.PrintProcessId=0 Ice.PrintAdapterReady=0"'
sys.stdout.write("starting icegrid node... ")
sys.stdout.flush()
command = r' --nowarn ' + nodeOptions + getDefaultLocatorProperty() + \
r' --IceGrid.Node.Data="' + dataDir + '"' \
r' --IceGrid.Node.Name=localnode' + \
r' --IceGrid.Node.PropertiesOverride=' + overrideOptions
driverConfig = TestUtil.DriverConfig("server")
driverConfig.lang = "cpp"
proc = TestUtil.startServer(iceGrid, command, driverConfig, adapter='node1')
print("ok")
return proc
def iceGridAdmin(cmd, ignoreFailure = False):
iceGridAdmin = TestUtil.getIceGridAdmin()
user = r"admin1"
if cmd == "registry shutdown":
user = r"shutdown"
command = getDefaultLocatorProperty() + r" --IceGridAdmin.Username=" + user + " --IceGridAdmin.Password=test1 " + \
r' -e "' + cmd + '"'
if TestUtil.appverifier:
TestUtil.setAppVerifierSettings([TestUtil.getIceGridAdmin()])
driverConfig = TestUtil.DriverConfig("client")
driverConfig.lang = "cpp"
proc = TestUtil.startClient(iceGridAdmin, command, driverConfig)
status = proc.wait()
if TestUtil.appverifier:
TestUtil.appVerifierAfterTestEnd([TestUtil.getIceGridAdmin()])
if not ignoreFailure and status:
print(proc.buf)
sys.exit(1)
return proc.buf
def killNodeServers():
for server in iceGridAdmin("server list"):
server = server.strip()
iceGridAdmin("server disable " + server, True)
iceGridAdmin("server signal " + server + " SIGKILL", True)
def iceGridTest(application, additionalOptions = "", applicationOptions = ""):
testdir = os.getcwd()
if not TestUtil.isWin32() and os.getuid() == 0:
print
print("*** can't run test as root ***")
print
return
if TestUtil.getDefaultMapping() == "java":
os.environ['CLASSPATH'] = os.path.join(os.getcwd(), "classes") + os.pathsep + os.environ.get("CLASSPATH", "")
client = TestUtil.getDefaultClientFile()
if TestUtil.getDefaultMapping() != "java":
client = os.path.join(testdir, client)
clientOptions = ' ' + getDefaultLocatorProperty() + ' ' + additionalOptions
targets = []
if TestUtil.appverifier:
targets = [client, TestUtil.getIceGridNode(), TestUtil.getIceGridRegistry()]
TestUtil.setAppVerifierSettings(targets)
registryProcs = startIceGridRegistry(testdir)
iceGridNodeProc = startIceGridNode(testdir)
if application != "":
sys.stdout.write("adding application... ")
sys.stdout.flush()
iceGridAdmin("application add -n '" + os.path.join(testdir, application) + "' " + \
"test.dir='" + testdir + "' ice.bindir='" + TestUtil.getCppBinDir() + "' " + applicationOptions)
print("ok")
sys.stdout.write("starting client... ")
sys.stdout.flush()
clientProc = TestUtil.startClient(client, clientOptions, TestUtil.DriverConfig("client"), startReader = False)
print("ok")
clientProc.startReader()
clientProc.waitTestSuccess()
if application != "":
sys.stdout.write("remove application... ")
sys.stdout.flush()
iceGridAdmin("application remove Test")
print("ok")
sys.stdout.write("shutting down icegrid node... ")
sys.stdout.flush()
iceGridAdmin("node shutdown localnode")
print("ok")
shutdownIceGridRegistry(registryProcs)
iceGridNodeProc.waitTestSuccess()
if TestUtil.appverifier:
TestUtil.appVerifierAfterTestEnd(targets)
def iceGridClientServerTest(additionalClientOptions, additionalServerOptions):
testdir = os.getcwd()
server = TestUtil.getDefaultServerFile()
client = TestUtil.getDefaultClientFile()
if TestUtil.getDefaultMapping() != "java":
server = os.path.join(testdir, server)
client = os.path.join(testdir, client)
if TestUtil.getDefaultMapping() == "java":
os.environ['CLASSPATH'] = os.path.join(os.getcwd(), "classes") + os.pathsep + os.environ.get("CLASSPATH", "")
targets = []
if TestUtil.appverifier:
targets = [client, server, TestUtil.getIceGridRegistry()]
TestUtil.setAppVerifierSettings(targets)
clientOptions = getDefaultLocatorProperty() + ' ' + additionalClientOptions
serverOptions = getDefaultLocatorProperty() + ' ' + additionalServerOptions
registryProcs = startIceGridRegistry(testdir, True)
sys.stdout.write("starting server... ")
sys.stdout.flush()
serverProc= TestUtil.startServer(server, serverOptions, TestUtil.DriverConfig("server"))
print("ok")
sys.stdout.write("starting client... ")
sys.stdout.flush()
clientProc = TestUtil.startClient(client, clientOptions, TestUtil.DriverConfig("client"))
print("ok")
clientProc.waitTestSuccess()
serverProc.waitTestSuccess()
shutdownIceGridRegistry(registryProcs)
if TestUtil.appverifier:
TestUtil.appVerifierAfterTestEnd(targets)
def cleanDbDir(path):
for filename in [ os.path.join(path, f) for f in os.listdir(path) if f != ".gitignore"]:
if os.path.isdir(filename):
cleanDbDir(filename)
try:
os.rmdir(filename)
except OSError:
# This might fail if the directory is empty (because
# it itself contains a .gitignore file.
pass
else:
os.remove(filename)
| sbesson/zeroc-ice | scripts/IceGridAdmin.py | Python | gpl-2.0 | 11,612 | 0.007492 |
print("My script") | ildar-band/rd90 | rd90.py | Python | mit | 18 | 0.055556 |
# $Id$
#
import inc_const as const
PJSUA = ["--null-audio --max-calls=1 $SIPP_URI"]
PJSUA_EXPECTS = [[0, const.STATE_CONFIRMED, "v"]]
| nigusgirma/https-svn.pjsip.org-repos-pjproject-trunk- | tests/pjsua/scripts-sipp/uas-answer-200-reinvite-without-sdp.py | Python | gpl-2.0 | 136 | 0 |
import paho.mqtt.publish as publish
import paho.mqtt.client as mqtt
import socket
import json
from datetime import datetime
import configparser
'''
Author: GYzheng, [email protected]
###Server side
We have two topic, one is from client to server, the other one is from client to server
1. Server->Client : sc_topic
2. Client->Server : cs_topic
'''
class command_handler:
def __init__(self,host,port,topic):
self.host = host
self.port = int(port)
self.sc_topic = 'sc_'+topic
self.cs_topic = 'cs_'+topic
self.get_host_info()
self.subscribe_msg()
def send_command(self,cmd):
msg = self.json_generator(cmd,'run')#cmd,status
self.send_msg(msg)
def get_host_info(self):
self.host_name = socket.gethostname()
self.host_ip = socket.gethostbyname(socket.gethostname())
def subscribe_msg(self):
self.subscriber = mqtt.Client()
self.subscriber.on_connect = self.on_connect
self.subscriber.on_message = self.on_message
self.is_connect = False #using this variable to wait for connect ready
self.subscriber.connect(self.host,self.port);#keep_alive=60
self.subscriber.loop_start()
while self.is_connect == False:
pass#donothig...
def send_msg(self,msg):
publish.single(self.sc_topic,msg, hostname=self.host, port=self.port)
def on_connect(self,client, userdata, flags, rc):
self.is_connect = True
#subscribe data from server
client.subscribe(self.cs_topic);
def on_message(self,client,user_data,msg):
try:
tmp = json.loads(msg.payload.decode('utf-8','ignore'))
client_name = tmp['name']
client_ip = tmp['ip']
client_status = tmp['status']
client_result = tmp['result']
print(client_name+","+client_ip+","+client_status)
print(client_result)
except:
print("Not Json format!")
def json_generator(self,cmd,status):
msg = json.dumps({'name':self.host_name,'ip':self.host_ip,'timestamp':datetime.now().strftime('%Y-%m-%d %H:%M:%S'),'status':status,'cmd':cmd})
return msg
#main function
if __name__ == "__main__":
config = configparser.ConfigParser()
config.read('server.conf')
broker_ip = config['server.conf']['broker_ip']
broker_port = config['server.conf']['broker_port']
topic = config['server.conf']['topic']
ch = command_handler(broker_ip,broker_port,topic);
print("Server start! Broker IP = "+broker_ip+", Broker PORT = "+broker_port+", topic = "+topic)
while True:
cmd = input("Please input command:\n")
ch.send_command(cmd)
pass
| iamgyz/remote-system-control | server.py | Python | mit | 2,750 | 0.020364 |
from bravado_core.spec import Spec
import mock
from pyramid.config import Configurator
from pyramid.registry import Registry
import pytest
from swagger_spec_validator.common import SwaggerValidationError
import pyramid_swagger
from pyramid_swagger.model import SwaggerSchema
@mock.patch('pyramid_swagger.register_api_doc_endpoints')
@mock.patch('pyramid_swagger.get_swagger_schema')
@mock.patch('pyramid_swagger.get_swagger_spec')
def test_disable_api_doc_views(_1, _2, mock_register):
settings = {
'pyramid_swagger.enable_api_doc_views': False,
'pyramid_swagger.enable_swagger_spec_validation': False,
}
mock_config = mock.Mock(
spec=Configurator,
registry=mock.Mock(spec=Registry, settings=settings))
pyramid_swagger.includeme(mock_config)
assert not mock_register.called
def test_bad_schema_validated_on_include():
settings = {
'pyramid_swagger.schema_directory': 'tests/sample_schemas/bad_app/',
'pyramid_swagger.enable_swagger_spec_validation': True,
}
mock_config = mock.Mock(registry=mock.Mock(settings=settings))
with pytest.raises(SwaggerValidationError):
pyramid_swagger.includeme(mock_config)
# TODO: Figure out why this assertion fails on travis
# assert "'info' is a required property" in str(excinfo.value)
@mock.patch('pyramid_swagger.get_swagger_spec')
def test_bad_schema_not_validated_if_spec_validation_is_disabled(_):
settings = {
'pyramid_swagger.schema_directory': 'tests/sample_schemas/bad_app/',
'pyramid_swagger.enable_swagger_spec_validation': False,
}
mock_config = mock.Mock(
spec=Configurator, registry=mock.Mock(settings=settings))
pyramid_swagger.includeme(mock_config)
@mock.patch('pyramid_swagger.register_api_doc_endpoints')
def test_swagger_12_only(mock_register):
settings = {
'pyramid_swagger.schema_directory': 'tests/sample_schemas/good_app/',
'pyramid_swagger.swagger_versions': ['1.2']
}
mock_config = mock.Mock(registry=mock.Mock(settings=settings))
pyramid_swagger.includeme(mock_config)
assert isinstance(settings['pyramid_swagger.schema12'], SwaggerSchema)
assert mock_register.call_count == 1
@mock.patch('pyramid_swagger.register_api_doc_endpoints')
def test_swagger_20_only(mock_register):
settings = {
'pyramid_swagger.schema_directory': 'tests/sample_schemas/good_app/',
'pyramid_swagger.swagger_versions': ['2.0']
}
mock_config = mock.Mock(registry=mock.Mock(settings=settings))
pyramid_swagger.includeme(mock_config)
assert isinstance(settings['pyramid_swagger.schema20'], Spec)
assert not settings['pyramid_swagger.schema12']
assert mock_register.call_count == 1
@mock.patch('pyramid_swagger.register_api_doc_endpoints')
def test_swagger_12_and_20(mock_register):
settings = {
'pyramid_swagger.schema_directory': 'tests/sample_schemas/good_app/',
'pyramid_swagger.swagger_versions': ['1.2', '2.0']
}
mock_config = mock.Mock(registry=mock.Mock(settings=settings))
pyramid_swagger.includeme(mock_config)
assert isinstance(settings['pyramid_swagger.schema20'], Spec)
assert isinstance(settings['pyramid_swagger.schema12'], SwaggerSchema)
assert mock_register.call_count == 2
| analogue/pyramid_swagger | tests/includeme_test.py | Python | bsd-3-clause | 3,308 | 0 |
# -*- coding: utf-8 -*-
import cStringIO
import datetime
from itertools import islice
import json
import xml.etree.ElementTree as ET
import logging
import re
import werkzeug.utils
import urllib2
import werkzeug.wrappers
from PIL import Image
import openerp
from openerp.addons.web.controllers.main import WebClient
from openerp.addons.web import http
from openerp.http import request, STATIC_CACHE
from openerp.tools import image_save_for_web
logger = logging.getLogger(__name__)
# Completely arbitrary limits
MAX_IMAGE_WIDTH, MAX_IMAGE_HEIGHT = IMAGE_LIMITS = (1024, 768)
LOC_PER_SITEMAP = 45000
SITEMAP_CACHE_TIME = datetime.timedelta(hours=12)
class Website(openerp.addons.web.controllers.main.Home):
#------------------------------------------------------
# View
#------------------------------------------------------
@http.route('/', type='http', auth="public", website=True)
def index(self, **kw):
page = 'homepage'
try:
main_menu = request.registry['ir.model.data'].get_object(request.cr, request.uid, 'website', 'main_menu')
except Exception:
pass
else:
first_menu = main_menu.child_id and main_menu.child_id[0]
if first_menu:
if not (first_menu.url.startswith(('/page/', '/?', '/#')) or (first_menu.url=='/')):
return request.redirect(first_menu.url)
if first_menu.url.startswith('/page/'):
return request.registry['ir.http'].reroute(first_menu.url)
return self.page(page)
@http.route(website=True, auth="public")
def web_login(self, *args, **kw):
# TODO: can't we just put auth=public, ... in web client ?
return super(Website, self).web_login(*args, **kw)
@http.route('/website/lang/<lang>', type='http', auth="public", website=True, multilang=False)
def change_lang(self, lang, r='/', **kwargs):
if lang == 'default':
lang = request.website.default_lang_code
r = '/%s%s' % (lang, r or '/')
redirect = werkzeug.utils.redirect(r or ('/%s' % lang), 303)
redirect.set_cookie('website_lang', lang)
return redirect
@http.route('/page/<page:page>', type='http', auth="public", website=True)
def page(self, page, **opt):
values = {
'path': page,
}
# /page/website.XXX --> /page/XXX
if page.startswith('website.'):
return request.redirect('/page/' + page[8:], code=301)
elif '.' not in page:
page = 'website.%s' % page
try:
request.website.get_template(page)
except ValueError, e:
# page not found
if request.website.is_publisher():
page = 'website.page_404'
else:
return request.registry['ir.http']._handle_exception(e, 404)
return request.render(page, values)
@http.route(['/robots.txt'], type='http', auth="public")
def robots(self):
return request.render('website.robots', {'url_root': request.httprequest.url_root}, mimetype='text/plain')
@http.route('/sitemap.xml', type='http', auth="public", website=True)
def sitemap_xml_index(self):
cr, uid, context = request.cr, openerp.SUPERUSER_ID, request.context
ira = request.registry['ir.attachment']
iuv = request.registry['ir.ui.view']
mimetype ='application/xml;charset=utf-8'
content = None
def create_sitemap(url, content):
ira.create(cr, uid, dict(
datas=content.encode('base64'),
mimetype=mimetype,
type='binary',
name=url,
url=url,
), context=context)
sitemap = ira.search_read(cr, uid, [('url', '=' , '/sitemap.xml'), ('type', '=', 'binary')], ('datas', 'create_date'), context=context)
if sitemap:
# Check if stored version is still valid
server_format = openerp.tools.misc.DEFAULT_SERVER_DATETIME_FORMAT
create_date = datetime.datetime.strptime(sitemap[0]['create_date'], server_format)
delta = datetime.datetime.now() - create_date
if delta < SITEMAP_CACHE_TIME:
content = sitemap[0]['datas'].decode('base64')
if not content:
# Remove all sitemaps in ir.attachments as we're going to regenerated them
sitemap_ids = ira.search(cr, uid, [('url', '=like' , '/sitemap%.xml'), ('type', '=', 'binary')], context=context)
if sitemap_ids:
ira.unlink(cr, uid, sitemap_ids, context=context)
pages = 0
first_page = None
locs = request.website.sudo(user=request.website.user_id.id).enumerate_pages()
while True:
start = pages * LOC_PER_SITEMAP
values = {
'locs': islice(locs, start, start + LOC_PER_SITEMAP),
'url_root': request.httprequest.url_root[:-1],
}
urls = iuv.render(cr, uid, 'website.sitemap_locs', values, context=context)
if urls.strip():
page = iuv.render(cr, uid, 'website.sitemap_xml', dict(content=urls), context=context)
if not first_page:
first_page = page
pages += 1
create_sitemap('/sitemap-%d.xml' % pages, page)
else:
break
if not pages:
return request.not_found()
elif pages == 1:
content = first_page
else:
# Sitemaps must be split in several smaller files with a sitemap index
content = iuv.render(cr, uid, 'website.sitemap_index_xml', dict(
pages=range(1, pages + 1),
url_root=request.httprequest.url_root,
), context=context)
create_sitemap('/sitemap.xml', content)
return request.make_response(content, [('Content-Type', mimetype)])
@http.route('/website/info', type='http', auth="public", website=True)
def website_info(self):
try:
request.website.get_template('website.info').name
except Exception, e:
return request.registry['ir.http']._handle_exception(e, 404)
irm = request.env()['ir.module.module'].sudo()
apps = irm.search([('state','=','installed'),('application','=',True)])
modules = irm.search([('state','=','installed'),('application','=',False)])
values = {
'apps': apps,
'modules': modules,
'version': openerp.service.common.exp_version()
}
return request.render('website.info', values)
#------------------------------------------------------
# Edit
#------------------------------------------------------
@http.route('/website/add/<path:path>', type='http', auth="user", website=True)
def pagenew(self, path, noredirect=False, add_menu=None):
xml_id = request.registry['website'].new_page(request.cr, request.uid, path, context=request.context)
if add_menu:
model, id = request.registry["ir.model.data"].get_object_reference(request.cr, request.uid, 'website', 'main_menu')
request.registry['website.menu'].create(request.cr, request.uid, {
'name': path,
'url': "/page/" + xml_id,
'parent_id': id,
}, context=request.context)
# Reverse action in order to allow shortcut for /page/<website_xml_id>
url = "/page/" + re.sub(r"^website\.", '', xml_id)
if noredirect:
return werkzeug.wrappers.Response(url, mimetype='text/plain')
return werkzeug.utils.redirect(url)
@http.route('/website/theme_change', type='http', auth="user", website=True)
def theme_change(self, theme_id=False, **kwargs):
imd = request.registry['ir.model.data']
Views = request.registry['ir.ui.view']
_, theme_template_id = imd.get_object_reference(
request.cr, request.uid, 'website', 'theme')
views = Views.search(request.cr, request.uid, [
('inherit_id', '=', theme_template_id),
], context=request.context)
Views.write(request.cr, request.uid, views, {
'active': False,
}, context=dict(request.context or {}, active_test=True))
if theme_id:
module, xml_id = theme_id.split('.')
_, view_id = imd.get_object_reference(
request.cr, request.uid, module, xml_id)
Views.write(request.cr, request.uid, [view_id], {
'active': True
}, context=dict(request.context or {}, active_test=True))
return request.render('website.themes', {'theme_changed': True})
@http.route(['/website/snippets'], type='json', auth="public", website=True)
def snippets(self):
return request.website._render('website.snippets')
@http.route('/website/reset_templates', type='http', auth='user', methods=['POST'], website=True)
def reset_template(self, templates, redirect='/'):
templates = request.httprequest.form.getlist('templates')
modules_to_update = []
for temp_id in templates:
view = request.registry['ir.ui.view'].browse(request.cr, request.uid, int(temp_id), context=request.context)
if view.page:
continue
view.model_data_id.write({
'noupdate': False
})
if view.model_data_id.module not in modules_to_update:
modules_to_update.append(view.model_data_id.module)
if modules_to_update:
module_obj = request.registry['ir.module.module']
module_ids = module_obj.search(request.cr, request.uid, [('name', 'in', modules_to_update)], context=request.context)
if module_ids:
module_obj.button_immediate_upgrade(request.cr, request.uid, module_ids, context=request.context)
return request.redirect(redirect)
@http.route('/website/customize_template_get', type='json', auth='user', website=True)
def customize_template_get(self, xml_id, full=False):
return request.registry["ir.ui.view"].customize_template_get(
request.cr, request.uid, xml_id, full=full, context=request.context)
@http.route('/website/get_view_translations', type='json', auth='public', website=True)
def get_view_translations(self, xml_id, lang=None):
lang = lang or request.context.get('lang')
return request.registry["ir.ui.view"].get_view_translations(
request.cr, request.uid, xml_id, lang=lang, context=request.context)
@http.route('/website/set_translations', type='json', auth='public', website=True)
def set_translations(self, data, lang):
irt = request.registry.get('ir.translation')
for view_id, trans in data.items():
view_id = int(view_id)
for t in trans:
initial_content = t['initial_content'].strip()
new_content = t['new_content'].strip()
tid = t['translation_id']
if not tid:
old_trans = irt.search_read(
request.cr, request.uid,
[
('type', '=', 'view'),
('res_id', '=', view_id),
('lang', '=', lang),
('src', '=', initial_content),
])
if old_trans:
tid = old_trans[0]['id']
if tid:
vals = {'value': new_content}
irt.write(request.cr, request.uid, [tid], vals)
else:
new_trans = {
'name': 'website',
'res_id': view_id,
'lang': lang,
'type': 'view',
'source': initial_content,
'value': new_content,
}
if t.get('gengo_translation'):
new_trans['gengo_translation'] = t.get('gengo_translation')
new_trans['gengo_comment'] = t.get('gengo_comment')
irt.create(request.cr, request.uid, new_trans)
return True
@http.route('/website/translations', type='json', auth="public", website=True)
def get_website_translations(self, lang):
module_obj = request.registry['ir.module.module']
module_ids = module_obj.search(request.cr, request.uid, [('name', 'ilike', 'website'), ('state', '=', 'installed')], context=request.context)
modules = [x['name'] for x in module_obj.read(request.cr, request.uid, module_ids, ['name'], context=request.context)]
return WebClient().translations(mods=modules, lang=lang)
@http.route('/website/attach', type='http', auth='user', methods=['POST'], website=True)
def attach(self, func, upload=None, url=None, disable_optimization=None):
Attachments = request.registry['ir.attachment']
website_url = message = None
if not upload:
website_url = url
name = url.split("/").pop()
attachment_id = Attachments.create(request.cr, request.uid, {
'name': name,
'type': 'url',
'url': url,
'res_model': 'ir.ui.view',
}, request.context)
else:
try:
image_data = upload.read()
image = Image.open(cStringIO.StringIO(image_data))
w, h = image.size
if w*h > 42e6: # Nokia Lumia 1020 photo resolution
raise ValueError(
u"Image size excessive, uploaded images must be smaller "
u"than 42 million pixel")
if not disable_optimization and image.format in ('PNG', 'JPEG'):
image_data = image_save_for_web(image)
attachment_id = Attachments.create(request.cr, request.uid, {
'name': upload.filename,
'datas': image_data.encode('base64'),
'datas_fname': upload.filename,
'res_model': 'ir.ui.view',
}, request.context)
[attachment] = Attachments.read(
request.cr, request.uid, [attachment_id], ['website_url'],
context=request.context)
website_url = attachment['website_url']
except Exception, e:
logger.exception("Failed to upload image to attachment")
message = unicode(e)
return """<script type='text/javascript'>
window.parent['%s'](%s, %s);
</script>""" % (func, json.dumps(website_url), json.dumps(message))
@http.route(['/website/publish'], type='json', auth="public", website=True)
def publish(self, id, object):
_id = int(id)
_object = request.registry[object]
obj = _object.browse(request.cr, request.uid, _id)
values = {}
if 'website_published' in _object._fields:
values['website_published'] = not obj.website_published
_object.write(request.cr, request.uid, [_id],
values, context=request.context)
obj = _object.browse(request.cr, request.uid, _id)
return bool(obj.website_published)
@http.route(['/website/seo_suggest/<keywords>'], type='http', auth="public", website=True)
def seo_suggest(self, keywords):
url = "http://google.com/complete/search"
try:
req = urllib2.Request("%s?%s" % (url, werkzeug.url_encode({
'ie': 'utf8', 'oe': 'utf8', 'output': 'toolbar', 'q': keywords})))
request = urllib2.urlopen(req)
except (urllib2.HTTPError, urllib2.URLError):
return []
xmlroot = ET.fromstring(request.read())
return json.dumps([sugg[0].attrib['data'] for sugg in xmlroot if len(sugg) and sugg[0].attrib['data']])
#------------------------------------------------------
# Helpers
#------------------------------------------------------
@http.route(['/website/kanban'], type='http', auth="public", methods=['POST'], website=True)
def kanban(self, **post):
return request.website.kanban_col(**post)
def placeholder(self, response):
return request.registry['website']._image_placeholder(response)
@http.route([
'/website/image',
'/website/image/<model>/<id>/<field>',
'/website/image/<model>/<id>/<field>/<int:max_width>x<int:max_height>'
], auth="public", website=True)
def website_image(self, model, id, field, max_width=None, max_height=None):
""" Fetches the requested field and ensures it does not go above
(max_width, max_height), resizing it if necessary.
If the record is not found or does not have the requested field,
returns a placeholder image via :meth:`~.placeholder`.
Sets and checks conditional response parameters:
* :mailheader:`ETag` is always set (and checked)
* :mailheader:`Last-Modified is set iif the record has a concurrency
field (``__last_update``)
The requested field is assumed to be base64-encoded image data in
all cases.
"""
try:
idsha = id.split('_')
id = idsha[0]
response = werkzeug.wrappers.Response()
return request.registry['website']._image(
request.cr, request.uid, model, id, field, response, max_width, max_height,
cache=STATIC_CACHE if len(idsha) > 1 else None)
except Exception:
logger.exception("Cannot render image field %r of record %s[%s] at size(%s,%s)",
field, model, id, max_width, max_height)
response = werkzeug.wrappers.Response()
return self.placeholder(response)
#------------------------------------------------------
# Server actions
#------------------------------------------------------
@http.route([
'/website/action/<path_or_xml_id_or_id>',
'/website/action/<path_or_xml_id_or_id>/<path:path>',
], type='http', auth="public", website=True)
def actions_server(self, path_or_xml_id_or_id, **post):
cr, uid, context = request.cr, request.uid, request.context
res, action_id, action = None, None, None
ServerActions = request.registry['ir.actions.server']
# find the action_id: either an xml_id, the path, or an ID
if isinstance(path_or_xml_id_or_id, basestring) and '.' in path_or_xml_id_or_id:
action_id = request.registry['ir.model.data'].xmlid_to_res_id(request.cr, request.uid, path_or_xml_id_or_id, raise_if_not_found=False)
if not action_id:
action_ids = ServerActions.search(cr, uid, [('website_path', '=', path_or_xml_id_or_id), ('website_published', '=', True)], context=context)
action_id = action_ids and action_ids[0] or None
if not action_id:
try:
action_id = int(path_or_xml_id_or_id)
except ValueError:
pass
# check it effectively exists
if action_id:
action_ids = ServerActions.exists(cr, uid, [action_id], context=context)
action_id = action_ids and action_ids[0] or None
# run it, return only if we got a Response object
if action_id:
action = ServerActions.browse(cr, uid, action_id, context=context)
if action.state == 'code' and action.website_published:
action_res = ServerActions.run(cr, uid, [action_id], context=context)
if isinstance(action_res, werkzeug.wrappers.Response):
res = action_res
if res:
return res
return request.redirect('/')
| minhphung171093/GreenERP_V8 | openerp/addons/website/controllers/main.py | Python | agpl-3.0 | 20,190 | 0.003566 |
# -*- coding: utf-8 -*-
# Copyright(C) 2014 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
import re
import sys
from copy import deepcopy
from weboob.tools.log import getLogger, DEBUG_FILTERS
from weboob.tools.ordereddict import OrderedDict
from weboob.browser.pages import NextPage
from .filters.standard import _Filter, CleanText
from .filters.html import AttributeNotFound, XPathNotFound
__all__ = ['DataError', 'AbstractElement', 'ListElement', 'ItemElement', 'TableElement', 'SkipItem']
class DataError(Exception):
"""
Returned data from pages are incoherent.
"""
def method(klass):
"""
Class-decorator to call it as a method.
"""
def inner(self, *args, **kwargs):
return klass(self)(*args, **kwargs)
return inner
class AbstractElement(object):
_creation_counter = 0
def __init__(self, page, parent=None, el=None):
self.page = page
self.parent = parent
if el is not None:
self.el = el
elif parent is not None:
self.el = parent.el
else:
self.el = page.doc
if parent is not None:
self.env = deepcopy(parent.env)
else:
self.env = deepcopy(page.params)
# Used by debug
self._random_id = AbstractElement._creation_counter
AbstractElement._creation_counter += 1
self.loaders = {}
def use_selector(self, func, key=None):
if isinstance(func, _Filter):
func._obj = self
func._key = key
value = func(self)
elif isinstance(func, type) and issubclass(func, ItemElement):
value = func(self.page, self, self.el)()
elif callable(func):
value = func()
else:
value = deepcopy(func)
return value
def parse(self, obj):
pass
def cssselect(self, *args, **kwargs):
return self.el.cssselect(*args, **kwargs)
def xpath(self, *args, **kwargs):
return self.el.xpath(*args, **kwargs)
def handle_loaders(self):
for attrname in dir(self):
m = re.match('load_(.*)', attrname)
if not m:
continue
name = m.group(1)
if name in self.loaders:
continue
loader = getattr(self, attrname)
self.loaders[name] = self.use_selector(loader, key=attrname)
class ListElement(AbstractElement):
item_xpath = None
flush_at_end = False
ignore_duplicate = False
def __init__(self, *args, **kwargs):
super(ListElement, self).__init__(*args, **kwargs)
self.logger = getLogger(self.__class__.__name__.lower())
self.objects = OrderedDict()
def __call__(self, *args, **kwargs):
for key, value in kwargs.iteritems():
self.env[key] = value
return self.__iter__()
def find_elements(self):
"""
Get the nodes that will have to be processed.
This method can be overridden if xpath filters are not
sufficient.
"""
if self.item_xpath is not None:
for el in self.el.xpath(self.item_xpath):
yield el
else:
yield self.el
def __iter__(self):
self.parse(self.el)
items = []
for el in self.find_elements():
for attrname in dir(self):
attr = getattr(self, attrname)
if isinstance(attr, type) and issubclass(attr, AbstractElement) and attr != type(self):
item = attr(self.page, self, el)
item.handle_loaders()
items.append(item)
for item in items:
for obj in item:
obj = self.store(obj)
if obj and not self.flush_at_end:
yield obj
if self.flush_at_end:
for obj in self.flush():
yield obj
self.check_next_page()
def flush(self):
for obj in self.objects.itervalues():
yield obj
def check_next_page(self):
if not hasattr(self, 'next_page'):
return
next_page = getattr(self, 'next_page')
try:
value = self.use_selector(next_page)
except (AttributeNotFound, XPathNotFound):
return
if value is None:
return
raise NextPage(value)
def store(self, obj):
if obj.id:
if obj.id in self.objects:
if self.ignore_duplicate:
self.logger.warning('There are two objects with the same ID! %s' % obj.id)
return
else:
raise DataError('There are two objects with the same ID! %s' % obj.id)
self.objects[obj.id] = obj
return obj
class SkipItem(Exception):
"""
Raise this exception in an :class:`ItemElement` subclass to skip an item.
"""
class _ItemElementMeta(type):
"""
Private meta-class used to keep order of obj_* attributes in :class:`ItemElement`.
"""
def __new__(mcs, name, bases, attrs):
_attrs = []
for base in bases:
if hasattr(base, '_attrs'):
_attrs += base._attrs
filters = [(re.sub('^obj_', '', attr_name), attrs[attr_name]) for attr_name, obj in attrs.items() if attr_name.startswith('obj_')]
# constants first, then filters, then methods
filters.sort(key=lambda x: x[1]._creation_counter if hasattr(x[1], '_creation_counter') else (sys.maxsize if callable(x[1]) else 0))
new_class = super(_ItemElementMeta, mcs).__new__(mcs, name, bases, attrs)
new_class._attrs = _attrs + [f[0] for f in filters]
return new_class
class ItemElement(AbstractElement):
__metaclass__ = _ItemElementMeta
_attrs = None
_loaders = None
klass = None
condition = None
validate = None
class Index(object):
pass
def __init__(self, *args, **kwargs):
super(ItemElement, self).__init__(*args, **kwargs)
self.logger = getLogger(self.__class__.__name__.lower())
self.obj = None
def build_object(self):
if self.klass is None:
return
return self.klass()
def __call__(self, obj=None):
if obj is not None:
self.obj = obj
for obj in self:
return obj
def __iter__(self):
if self.condition is not None and not self.condition():
return
try:
if self.obj is None:
self.obj = self.build_object()
self.parse(self.el)
self.handle_loaders()
for attr in self._attrs:
self.handle_attr(attr, getattr(self, 'obj_%s' % attr))
except SkipItem:
return
if self.validate is not None and not self.validate(self.obj):
return
yield self.obj
def handle_attr(self, key, func):
try:
value = self.use_selector(func, key=key)
except Exception as e:
# Help debugging as tracebacks do not give us the key
self.logger.warning('Attribute %s raises %s' % (key, repr(e)))
raise
logger = getLogger('b2filters')
logger.log(DEBUG_FILTERS, "%s.%s = %r" % (self._random_id, key, value))
setattr(self.obj, key, value)
class TableElement(ListElement):
head_xpath = None
cleaner = CleanText
def __init__(self, *args, **kwargs):
super(TableElement, self).__init__(*args, **kwargs)
self._cols = {}
columns = {}
for attrname in dir(self):
m = re.match('col_(.*)', attrname)
if m:
cols = getattr(self, attrname)
if not isinstance(cols, (list,tuple)):
cols = [cols]
columns[m.group(1)] = [s.lower() for s in cols]
colnum = 0
for el in self.el.xpath(self.head_xpath):
title = self.cleaner.clean(el).lower()
for name, titles in columns.iteritems():
if title in titles and not name in self._cols:
self._cols[name] = colnum
try:
colnum += int(el.attrib.get('colspan', 1))
except (ValueError, AttributeError):
colnum += 1
def get_colnum(self, name):
return self._cols.get(name, None)
class DictElement(ListElement):
def find_elements(self):
if self.item_xpath is None:
selector = []
elif isinstance(self.item_xpath, basestring):
selector = self.item_xpath.split('/')
else:
selector = self.item_xpath
for el in selector:
if isinstance(self.el, list):
el = int(el)
self.el = self.el[el]
for el in self.el:
yield el
| willprice/weboob | weboob/browser/elements.py | Python | agpl-3.0 | 9,532 | 0.001154 |
from P_14 import *
print("Shape of data: {}".format(iris_dataset["data"].shape))
input()
| TNT-Samuel/Coding-Projects | Machine Learning with Python/Chapter 1/P_15.py | Python | gpl-3.0 | 90 | 0 |
# encoding: utf-8
"""Gherkin step implementations for chart data features."""
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
import datetime
from behave import given, then, when
from pptx.chart.data import (
BubbleChartData, Category, CategoryChartData, XyChartData
)
from pptx.enum.chart import XL_CHART_TYPE
from pptx.util import Inches
# given ===================================================
@given('a BubbleChartData object with number format {strval}')
def given_a_BubbleChartData_object_with_number_format(context, strval):
params = {}
if strval != 'None':
params['number_format'] = int(strval)
context.chart_data = BubbleChartData(**params)
@given('a Categories object with number format {init_nf}')
def given_a_Categories_object_with_number_format_init_nf(context, init_nf):
categories = CategoryChartData().categories
if init_nf != 'left as default':
categories.number_format = init_nf
context.categories = categories
@given('a Category object')
def given_a_Category_object(context):
context.category = Category(None, None)
@given('a CategoryChartData object')
def given_a_CategoryChartData_object(context):
context.chart_data = CategoryChartData()
@given('a CategoryChartData object having date categories')
def given_a_CategoryChartData_object_having_date_categories(context):
chart_data = CategoryChartData()
chart_data.categories = [
datetime.date(2016, 12, 27),
datetime.date(2016, 12, 28),
datetime.date(2016, 12, 29),
]
context.chart_data = chart_data
@given('a CategoryChartData object with number format {strval}')
def given_a_CategoryChartData_object_with_number_format(context, strval):
params = {}
if strval != 'None':
params['number_format'] = int(strval)
context.chart_data = CategoryChartData(**params)
@given('a XyChartData object with number format {strval}')
def given_a_XyChartData_object_with_number_format(context, strval):
params = {}
if strval != 'None':
params['number_format'] = int(strval)
context.chart_data = XyChartData(**params)
@given('the categories are of type {type_}')
def given_the_categories_are_of_type(context, type_):
label = {
'date': datetime.date(2016, 12, 22),
'float': 42.24,
'int': 42,
'str': 'foobar',
}[type_]
context.categories.add_category(label)
# when ====================================================
@when('I add a bubble data point with number format {strval}')
def when_I_add_a_bubble_data_point_with_number_format(context, strval):
series_data = context.series_data
params = {'x': 1, 'y': 2, 'size': 10}
if strval != 'None':
params['number_format'] = int(strval)
context.data_point = series_data.add_data_point(**params)
@when('I add a data point with number format {strval}')
def when_I_add_a_data_point_with_number_format(context, strval):
series_data = context.series_data
params = {'value': 42}
if strval != 'None':
params['number_format'] = int(strval)
context.data_point = series_data.add_data_point(**params)
@when('I add an XY data point with number format {strval}')
def when_I_add_an_XY_data_point_with_number_format(context, strval):
series_data = context.series_data
params = {'x': 1, 'y': 2}
if strval != 'None':
params['number_format'] = int(strval)
context.data_point = series_data.add_data_point(**params)
@when('I add an {xy_type} chart having 2 series of 3 points each')
def when_I_add_an_xy_chart_having_2_series_of_3_points(context, xy_type):
chart_type = getattr(XL_CHART_TYPE, xy_type)
data = (
('Series 1', ((-0.1, 0.5), (16.2, 0.0), (8.0, 0.2))),
('Series 2', ((12.4, 0.8), (-7.5, -0.5), (-5.1, -0.2)))
)
chart_data = XyChartData()
for series_data in data:
series_label, points = series_data
series = chart_data.add_series(series_label)
for point in points:
x, y = point
series.add_data_point(x, y)
context.chart = context.slide.shapes.add_chart(
chart_type, Inches(1), Inches(1), Inches(8), Inches(5), chart_data
).chart
@when("I assign ['a', 'b', 'c'] to chart_data.categories")
def when_I_assign_a_b_c_to_chart_data_categories(context):
chart_data = context.chart_data
chart_data.categories = ['a', 'b', 'c']
# then ====================================================
@then("[c.label for c in chart_data.categories] is ['a', 'b', 'c']")
def then_c_label_for_c_in_chart_data_categories_is_a_b_c(context):
chart_data = context.chart_data
assert [c.label for c in chart_data.categories] == ['a', 'b', 'c']
@then('categories.number_format is {value}')
def then_categories_number_format_is_value(context, value):
expected_value = value
number_format = context.categories.number_format
assert number_format == expected_value, 'got %s' % number_format
@then('category.add_sub_category(name) is a Category object')
def then_category_add_sub_category_is_a_Category_object(context):
category = context.category
context.sub_category = sub_category = category.add_sub_category('foobar')
assert type(sub_category).__name__ == 'Category'
@then('category.sub_categories[-1] is the new category')
def then_category_sub_categories_minus_1_is_the_new_category(context):
category, sub_category = context.category, context.sub_category
assert category.sub_categories[-1] is sub_category
@then('chart_data.add_category(name) is a Category object')
def then_chart_data_add_category_name_is_a_Category_object(context):
chart_data = context.chart_data
context.category = category = chart_data.add_category('foobar')
assert type(category).__name__ == 'Category'
@then('chart_data.add_series(name, values) is a CategorySeriesData object')
def then_chart_data_add_series_is_a_CategorySeriesData_object(context):
chart_data = context.chart_data
context.series = series = chart_data.add_series('Series X', (1, 2, 3))
assert type(series).__name__ == 'CategorySeriesData'
@then('chart_data.categories is a Categories object')
def then_chart_data_categories_is_a_Categories_object(context):
chart_data = context.chart_data
assert type(chart_data.categories).__name__ == 'Categories'
@then('chart_data.categories[-1] is the category')
def then_chart_data_categories_minus_1_is_the_category(context):
chart_data, category = context.chart_data, context.category
assert chart_data.categories[-1] is category
@then('chart_data.number_format is {value_str}')
def then_chart_data_number_format_is(context, value_str):
chart_data = context.chart_data
number_format = value_str if value_str == 'General' else int(value_str)
assert chart_data.number_format == number_format
@then('chart_data[-1] is the new series')
def then_chart_data_minus_1_is_the_new_series(context):
chart_data, series = context.chart_data, context.series
assert chart_data[-1] is series
@then('series_data.number_format is {value_str}')
def then_series_data_number_format_is(context, value_str):
series_data = context.series_data
number_format = value_str if value_str == 'General' else int(value_str)
assert series_data.number_format == number_format
| biggihs/python-pptx | features/steps/chartdata.py | Python | mit | 7,361 | 0 |
# -*- coding: utf-8 -*-
import pytest
from irc3.plugins import slack
pytestmark = pytest.mark.asyncio
async def test_simple_matches(irc3_bot_factory):
bot = irc3_bot_factory(includes=['irc3.plugins.slack'])
plugin = bot.get_plugin(slack.Slack)
setattr(plugin, 'config', {'token': 'xoxp-faketoken'})
assert '' == await plugin.parse_text('\n')
assert '' == await plugin.parse_text('\r\n')
assert '' == await plugin.parse_text('\r')
assert '@channel' == await plugin.parse_text('<!channel>')
assert '@group' == await plugin.parse_text('<!group>')
assert '@everyone' == await plugin.parse_text('<!everyone>')
assert '<' == await plugin.parse_text('<')
assert '>' == await plugin.parse_text('>')
assert '&' == await plugin.parse_text('&')
assert 'daniel' == await plugin.parse_text('<WHATEVER|daniel>')
async def test_channel_matches(irc3_bot_factory):
bot = irc3_bot_factory(includes=['irc3.plugins.slack'])
plugin = bot.get_plugin(slack.Slack)
setattr(plugin, 'config', {'token': 'xoxp-faketoken'})
async def api_call(self, method, date=None):
return ({'channel': {'name': 'testchannel'}})
plugin.api_call = api_call
assert '#testchannel' == await plugin.parse_text('<#C12345>')
assert 'channel' == await plugin.parse_text('<#C12345|channel>')
async def test_user_matches(irc3_bot_factory):
bot = irc3_bot_factory(includes=['irc3.plugins.slack'])
plugin = bot.get_plugin(slack.Slack)
setattr(plugin, 'config', {'token': 'xoxp-faketoken'})
async def api_call(self, method, date=None):
return ({'user': {'name': 'daniel'}})
plugin.api_call = api_call
assert '@daniel' == await plugin.parse_text('<@U12345>')
assert 'user' == await plugin.parse_text('<@U12345|user>')
async def test_emoji_matches(irc3_bot_factory):
bot = irc3_bot_factory(includes=['irc3.plugins.slack'])
plugin = bot.get_plugin(slack.Slack)
setattr(plugin, 'config', {'token': 'xoxp-faketoken'})
assert ':-)' == await plugin.parse_text(':smiley:')
assert ':@' == await plugin.parse_text(':rage:')
| gawel/irc3 | tests/test_slack.py | Python | mit | 2,123 | 0 |
import six
try:
from logging import NullHandler
except ImportError: # Python 2.6
from logging import Handler
class NullHandler(Handler):
def emit(self, record):
pass
try:
from urllib import urlencode as format_query
except ImportError:
from urllib.parse import urlencode as format_query
try:
from urlparse import urlparse as parse_url
except ImportError:
from urllib.parse import urlparse as parse_url
try:
memoryview = memoryview
except NameError:
memoryview = buffer
def get_int(*args):
try:
return int(get_character(*args))
except ValueError:
return ord(get_character(*args))
def get_character(x, index):
return chr(get_byte(x, index))
def get_byte(x, index):
return six.indexbytes(x, index)
def encode_string(x):
return x.encode('utf-8')
def decode_string(x):
return x.decode('utf-8')
| feus4177/socketIO-client-2 | socketIO_client/symmetries.py | Python | mit | 897 | 0.001115 |
from random import randint
from position import Position, Size
from block import Room, Block
class Room(object):
def __init__(self, pos_row=0, pos_col=0, rows=1, cols=1, fill=Block.empty,
left=Room.left, right=Room.right,
top=Room.top, bottom=Room.bottom,
top_left=Room.top_left, top_right=Room.top_right,
bottom_left=Room.bottom_left, bottom_right=Room.bottom_right):
self.pos = Position(pos_row, pos_col)
self.center = Position(pos_row + (rows // 2), pos_col + (cols // 2))
self.size = Size(rows, cols)
self.fill = fill
# Specific the block of walls
self.left = left
self.right = right
self.top = top
self.bottom = bottom
self.top_left = top_left
self.top_right = top_right
self.bottom_left = bottom_left
self.bottom_right = bottom_right
@classmethod
def from_objects(cls, pos, size, **kwargs):
return cls(pos.row, pos.col, size.rows, size.cols, **kwargs)
def collision(self, other_room):
"""
Checks if two rooms intersect each other
The logic is clearer as a one dimension line
"""
pos_2 = Position(self.pos.row + self.size.rows,
self.pos.col + self.size.cols)
other_room_pos_2 = Position(other_room.pos.row + other_room.size.rows,
other_room.pos.col + other_room.size.cols)
return (self.pos.col <= other_room_pos_2.col and
pos_2.col >= other_room.pos.col and
self.pos.row <= other_room_pos_2.row and
pos_2.row >= other_room.pos.row)
@classmethod
def generate(cls, min_pos, max_pos, min_size, max_size):
"""
Create room from min_size to max_size between min_pos and max_pos
"""
size = Size(randint(min_size.rows, max_size.rows),
randint(min_size.cols, max_size.cols))
pos = Position(randint(min_pos.row, max_pos.row - size.rows),
randint(min_pos.col, max_pos.col - size.cols))
return cls.from_objects(pos, size)
class RoomList():
def __init__(self):
self._room_list = []
def __iter__(self):
return iter(self._room_list)
def __getitem__(self, key):
return self._room_list[key]
def __len__(self):
return len(self._room_list)
def append(self, room):
self._room_list.append(room)
def generate(self, num, min_pos, max_pos, min_size, max_size):
"""
Given a number of rooms, generate rooms that don't intersect
"""
for i in range(num):
room = Room.generate(min_pos, max_pos, min_size, max_size)
while self.is_collision(room):
room = Room.generate(min_pos, max_pos, min_size, max_size)
self.append(room)
def is_collision(self, room):
"""
Iterate through the list of rooms to test for collisions
"""
for other_room in self:
if other_room.collision(room):
return True
return False
| f0lie/RogueGame | src/room.py | Python | mit | 3,171 | 0 |
#!/usr/bin/python3
## @package domomaster
# Master daemon for D3 boxes.
#
# Developed by GreenLeaf.
import sys;
import os;
import random;
import string;
from hashlib import sha1
from subprocess import *
import socket;
sys.path.append("/usr/lib/domoleaf");
from DaemonConfigParser import *;
MASTER_CONF_FILE_BKP = '/etc/domoleaf/master.conf.save';
MASTER_CONF_FILE_TO = '/etc/domoleaf/master.conf';
SLAVE_CONF_FILE = '/etc/domoleaf/slave.conf';
## Copies the conf data from a backup file to a new one.
def master_conf_copy():
file_from = DaemonConfigParser(MASTER_CONF_FILE_BKP);
file_to = DaemonConfigParser(MASTER_CONF_FILE_TO);
#listen
var = file_from.getValueFromSection('listen', 'port_slave');
file_to.writeValueFromSection('listen', 'port_slave', var);
var = file_from.getValueFromSection('listen', 'port_cmd');
file_to.writeValueFromSection('listen', 'port_cmd', var);
#connect
var = file_from.getValueFromSection('connect', 'port');
file_to.writeValueFromSection('connect', 'port', var);
#mysql
var = file_from.getValueFromSection('mysql', 'user');
file_to.writeValueFromSection('mysql', 'user', var);
var = file_from.getValueFromSection('mysql', 'database_name');
file_to.writeValueFromSection('mysql', 'database_name', var);
#greenleaf
var = file_from.getValueFromSection('greenleaf', 'commercial');
file_to.writeValueFromSection('greenleaf', 'commercial', var);
var = file_from.getValueFromSection('greenleaf', 'admin_addr');
file_to.writeValueFromSection('greenleaf', 'admin_addr', var);
## Initializes the conf in database.
def master_conf_initdb():
file = DaemonConfigParser(MASTER_CONF_FILE_TO);
#mysql password
password = ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase + string.digits) for _ in range(128))
password = sha1(password.encode('utf-8'))
file.writeValueFromSection('mysql', 'password', password.hexdigest());
os.system('sed -i "s/define(\'DB_PASSWORD\', \'domoleaf\')/define(\'DB_PASSWORD\', \''+password.hexdigest()+'\')/g" /etc/domoleaf/www/config.php')
#mysql user
query1 = 'DELETE FROM user WHERE User="domoleaf"';
query2 = 'DELETE FROM db WHERE User="domoleaf"';
query3 = 'INSERT INTO user (Host, User, Password) VALUES (\'%\', \'domoleaf\', PASSWORD(\''+password.hexdigest()+'\'));';
query4 = 'INSERT INTO db (Host, Db, User, Select_priv, Insert_priv, Update_priv, Delete_priv, Create_priv, Drop_priv, Grant_priv, References_priv, Index_priv, Alter_priv, Create_tmp_table_priv, Lock_tables_priv, Create_view_priv, Show_view_priv, Create_routine_priv, Alter_routine_priv, Execute_priv, Event_priv, Trigger_priv) VALUES ("%","domoleaf","domoleaf","Y","Y","Y","Y","Y","Y","Y","Y","Y","Y","Y","Y","Y","Y","Y","Y","Y","Y","Y");';
query5 = 'FLUSH PRIVILEGES';
call(['mysql', '--defaults-file=/etc/mysql/debian.cnf', 'mysql', '-e', query1]);
call(['mysql', '--defaults-file=/etc/mysql/debian.cnf', 'mysql', '-e', query2]);
call(['mysql', '--defaults-file=/etc/mysql/debian.cnf', 'mysql', '-e', query3]);
call(['mysql', '--defaults-file=/etc/mysql/debian.cnf', 'mysql', '-e', query4]);
call(['mysql', '--defaults-file=/etc/mysql/debian.cnf', 'mysql', '-e', query5]);
## Initializes the conf in file.
def master_conf_init():
file = DaemonConfigParser(SLAVE_CONF_FILE);
personnal_key = file.getValueFromSection('personnal_key', 'aes');
hostname = socket.gethostname();
#KNX Interface
if os.path.exists('/dev/ttyAMA0'):
knx = "tpuarts"
knx_interface = 'ttyAMA0';
elif os.path.exists('/dev/ttyS0'):
knx = "tpuarts"
knx_interface = 'ttyS0';
else:
knx = "ipt"
knx_interface = '127.0.0.1';
domoslave = os.popen("dpkg-query -W -f='${Version}\n' domoslave").read().split('\n')[0];
query1 = "INSERT INTO daemon (name, serial, secretkey, validation, version) VALUES ('"+hostname+"','"+hostname+"','"+personnal_key+"',1,'"+domoslave+"')"
query2 = "INSERT INTO daemon_protocol (daemon_id, protocol_id, interface, interface_arg) VALUES (1,1,'"+knx+"','"+knx_interface+"')"
call(['mysql', '--defaults-file=/etc/mysql/debian.cnf', 'domoleaf',
'-e', query1]);
call(['mysql', '--defaults-file=/etc/mysql/debian.cnf', 'domoleaf',
'-e', query2]);
if __name__ == "__main__":
#Upgrade
if os.path.exists(MASTER_CONF_FILE_BKP):
master_conf_copy()
os.remove(MASTER_CONF_FILE_BKP);
else:
master_conf_init()
master_conf_initdb()
| V-Paranoiaque/Domoleaf | domomaster/usr/bin/domomaster/domomaster_postinst.py | Python | gpl-3.0 | 4,613 | 0.017993 |
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Create missing snapshot revisions.
Create Date: 2017-01-05 23:10:37.257161
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
from ggrc.migrations.utils.snapshot_revisions import handle_objects
# revision identifiers, used by Alembic.
revision = '579239d161e1'
down_revision = '353e5f281799'
def upgrade():
"""Create missing revisions for snapshottable objects."""
# copy pasted from ggrc.snapshoter.rules.Types.all
snapshot_objects = sorted([
"AccessGroup",
"Clause",
"Control",
"DataAsset",
"Facility",
"Market",
"Objective",
"OrgGroup",
"Product",
"Section",
"Vendor",
"Policy",
"Regulation",
"Standard",
"Contract",
"System",
"Process",
"Risk",
"Threat",
])
handle_objects(snapshot_objects)
def downgrade():
"""Data correction migrations can not be downgraded."""
| VinnieJohns/ggrc-core | src/ggrc/migrations/versions/20170105231037_579239d161e1_create_missing_snapshot_revisions.py | Python | apache-2.0 | 1,087 | 0.0046 |
# -*- coding: utf-8 -*-
# Copyright (C) 2017 Matthias Luescher
#
# Authors:
# Matthias Luescher
#
# This file is part of edi.
#
# edi is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# edi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with edi. If not, see <http://www.gnu.org/licenses/>.
from edi.lib.edicommand import EdiCommand
from edi.lib.versionhelpers import get_edi_version
class Version(EdiCommand):
@classmethod
def advertise(cls, subparsers):
help_text = "print the program version"
description_text = "Print the program version."
subparsers.add_parser(cls._get_short_command_name(),
help=help_text,
description=description_text)
def run_cli(self, _):
version = self.run()
print(version)
@staticmethod
def run():
return get_edi_version()
| lueschem/edi | edi/commands/version.py | Python | lgpl-3.0 | 1,351 | 0 |
#!/bin/python
import sys
import vlc
import os
import re
from tempfile import *
from gtts import gTTS
from remote2text import RGBRemote2Text
parser = RGBRemote2Text(verbose=True)
while True:
ir_out = input()
response = parser.process(ir_out)
if response:
tts = gTTS(text=response, lang='pt')
tmp = NamedTemporaryFile(delete=False)
tts.write_to_fp(tmp)
path = os.path.join(gettempdir(), str(tmp.name))
vlc.MediaPlayer(path).play()
tmp.close()
| Macmod/rgb-remote-tts | remote-gtts/remote2gtts.py | Python | mit | 507 | 0 |
# -*- coding: utf-8 -*-
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.db.models import Max, F
from django.shortcuts import render, get_object_or_404, redirect
from django.utils.decorators import method_decorator
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from ponyFiction import signals
from ponyFiction.forms.chapter import ChapterForm
from ponyFiction.models import Story, Chapter, Author
from django.views.decorators.csrf import csrf_protect
from cacheops import invalidate_obj
from .story import get_story
def chapter_view(request, story_id=False, chapter_order=False):
story = get_story(request, pk=story_id)
if chapter_order:
chapter = get_object_or_404(story.chapter_set, order=chapter_order)
page_title = "{} — {}".format(chapter.title[:80], story.title)
prev_chapter = chapter.get_prev_chapter()
next_chapter = chapter.get_next_chapter()
if request.user.is_authenticated():
signals.story_viewed.send(sender=Author, instance=request.user, story=story, chapter=chapter)
data = {
'story': story,
'chapter': chapter,
'prev_chapter': prev_chapter,
'next_chapter': next_chapter,
'page_title': page_title,
'allchapters': False
}
else:
chapters = story.chapter_set.order_by('order').cache()
page_title = "{} — все главы".format(story.title)
if request.user.is_authenticated():
signals.story_viewed.send(sender=Author, instance=request.user, story=story, chapter=None)
data = {
'story': story,
'chapters': chapters,
'page_title': page_title,
'allchapters': True
}
return render(request, 'chapter_view.html', data)
class ChapterAdd(CreateView):
model = Chapter
form_class = ChapterForm
template_name = 'chapter_work.html'
initial = {'button_submit': 'Добавить'}
story = None
@method_decorator(login_required)
@method_decorator(csrf_protect)
def dispatch(self, request, *args, **kwargs):
self.story = get_object_or_404(Story, pk=kwargs['story_id'])
if self.story.editable_by(request.user):
return CreateView.dispatch(self, request, *args, **kwargs)
else:
raise PermissionDenied
def form_valid(self, form):
chapter = form.save(commit=False)
chapter.story = self.story
chapter.order = (self.story.chapter_set.aggregate(o=Max('order'))['o'] or 0) + 1
chapter.save()
return redirect('chapter_edit', chapter.id)
def get_context_data(self, **kwargs):
context = super(ChapterAdd, self).get_context_data(**kwargs)
extra_context = {'page_title': 'Добавить новую главу', 'story': self.story}
context.update(extra_context)
return context
class ChapterEdit(UpdateView):
model = Chapter
form_class = ChapterForm
template_name = 'chapter_work.html'
initial = {'button_submit': 'Сохранить изменения'}
chapter = None
@method_decorator(login_required)
@method_decorator(csrf_protect)
def dispatch(self, request, *args, **kwargs):
return UpdateView.dispatch(self, request, *args, **kwargs)
def get_object(self, queryset=None):
self.chapter = UpdateView.get_object(self, queryset=queryset)
if self.chapter.story.editable_by(self.request.user):
return self.chapter
else:
raise PermissionDenied
def form_valid(self, form):
self.chapter = form.save()
return redirect('chapter_edit', self.chapter.id)
def get_context_data(self, **kwargs):
context = super(ChapterEdit, self).get_context_data(**kwargs)
extra_context = {'page_title': 'Редактирование «%s»' % self.chapter.title, 'chapter': self.chapter}
context.update(extra_context)
return context
class ChapterDelete(DeleteView):
model = Chapter
chapter = None
story = None
chapter_id = None
template_name = 'chapter_confirm_delete.html'
@method_decorator(login_required)
@method_decorator(csrf_protect)
def dispatch(self, request, *args, **kwargs):
return DeleteView.dispatch(self, request, *args, **kwargs)
def get_object(self, queryset=None):
self.chapter = DeleteView.get_object(self, queryset=queryset)
self.story = self.chapter.story
self.chapter_id = self.chapter.id
if self.story.editable_by(self.request.user):
return self.chapter
else:
raise PermissionDenied
def delete(self, request, *args, **kwargs):
self.chapter = self.get_object()
self.story.chapter_set.filter(order__gt=self.chapter.order).update(order=F('order')-1)
for chapter in self.story.chapter_set.filter(order__gt=self.chapter.order):
invalidate_obj(chapter)
self.chapter.delete()
return redirect('story_edit', self.story.id)
def get_context_data(self, **kwargs):
context = super(ChapterDelete, self).get_context_data(**kwargs)
extra_context = {'page_title': 'Подтверждение удаления главы', 'story': self.story, 'chapter': self.chapter}
context.update(extra_context)
return context
| everypony/ponyFiction | ponyFiction/views/chapter.py | Python | gpl-3.0 | 5,461 | 0.001492 |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2004-2007 Donald N. Allingham
# Copyright (C) 2010 Brian G. Matherly
# Copyright (C) 2014 Paul Franklin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Class handling language-specific displaying of names.
Specific symbols for parts of a name are defined:
====== ===============================================================
Symbol Description
====== ===============================================================
't' title
'f' given (first names)
'l' full surname (lastname)
'c' callname
'x' nick name, call, or otherwise first first name (common name)
'i' initials of the first names
'm' primary surname (main)
'0m' primary surname prefix
'1m' primary surname surname
'2m' primary surname connector
'y' pa/matronymic surname (father/mother) - assumed unique
'0y' pa/matronymic prefix
'1y' pa/matronymic surname
'2y' pa/matronymic connector
'o' surnames without pa/matronymic and primary
'r' non primary surnames (rest)
'p' list of all prefixes
'q' surnames without prefixes and connectors
's' suffix
'n' nick name
'g' family nick name
====== ===============================================================
"""
#-------------------------------------------------------------------------
#
# Python modules
#
#-------------------------------------------------------------------------
import re
import logging
LOG = logging.getLogger(".gramps.gen")
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from ..const import ARABIC_COMMA, ARABIC_SEMICOLON, GRAMPS_LOCALE as glocale
_ = glocale.translation.sgettext
from ..lib.name import Name
from ..lib.nameorigintype import NameOriginType
try:
from ..config import config
WITH_GRAMPS_CONFIG=True
except ImportError:
WITH_GRAMPS_CONFIG=False
#-------------------------------------------------------------------------
#
# Constants
#
#-------------------------------------------------------------------------
_FIRSTNAME = 4
_SURNAME_LIST = 5
_SUFFIX = 6
_TITLE = 7
_TYPE = 8
_GROUP = 9
_SORT = 10
_DISPLAY = 11
_CALL = 12
_NICK = 13
_FAMNICK = 14
_SURNAME_IN_LIST = 0
_PREFIX_IN_LIST = 1
_PRIMARY_IN_LIST = 2
_TYPE_IN_LIST = 3
_CONNECTOR_IN_LIST = 4
_ORIGINPATRO = NameOriginType.PATRONYMIC
_ORIGINMATRO = NameOriginType.MATRONYMIC
_ACT = True
_INA = False
_F_NAME = 0 # name of the format
_F_FMT = 1 # the format string
_F_ACT = 2 # if the format is active
_F_FN = 3 # name format function
_F_RAWFN = 4 # name format raw function
PAT_AS_SURN = False
#-------------------------------------------------------------------------
#
# Local functions
#
#-------------------------------------------------------------------------
# Because of occurring in an exec(), this couldn't be in a lambda:
# we sort names first on longest first, then last letter first, this to
# avoid translations of shorter terms which appear in longer ones, eg
# namelast may not be mistaken with name, so namelast must first be
# converted to %k before name is converted.
##def _make_cmp(a, b): return -cmp((len(a[1]),a[1]), (len(b[1]), b[1]))
def _make_cmp_key(a): return (len(a[1]),a[1]) # set reverse to True!!
#-------------------------------------------------------------------------
#
# NameDisplayError class
#
#-------------------------------------------------------------------------
class NameDisplayError(Exception):
"""
Error used to report that the name display format string is invalid.
"""
def __init__(self, value):
Exception.__init__(self)
self.value = value
def __str__(self):
return self.value
#-------------------------------------------------------------------------
#
# Functions to extract data from raw lists (unserialized objects)
#
#-------------------------------------------------------------------------
def _raw_full_surname(raw_surn_data_list):
"""method for the 'l' symbol: full surnames"""
result = ""
for raw_surn_data in raw_surn_data_list:
result += "%s %s %s " % (raw_surn_data[_PREFIX_IN_LIST],
raw_surn_data[_SURNAME_IN_LIST],
raw_surn_data[_CONNECTOR_IN_LIST])
return ' '.join(result.split()).strip()
def _raw_primary_surname(raw_surn_data_list):
"""method for the 'm' symbol: primary surname"""
global PAT_AS_SURN
nrsur = len(raw_surn_data_list)
for raw_surn_data in raw_surn_data_list:
if raw_surn_data[_PRIMARY_IN_LIST]:
#if there are multiple surnames, return the primary. If there
#is only one surname, then primary has little meaning, and we
#assume a pa/matronymic should not be given as primary as it
#normally is defined independently
if not PAT_AS_SURN and nrsur == 1 and \
(raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINPATRO
or raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINMATRO):
return ''
else:
result = "%s %s %s" % (raw_surn_data[_PREFIX_IN_LIST],
raw_surn_data[_SURNAME_IN_LIST],
raw_surn_data[_CONNECTOR_IN_LIST])
return ' '.join(result.split())
return ''
def _raw_primary_surname_only(raw_surn_data_list):
"""method to obtain the raw primary surname data, so this returns a string
"""
global PAT_AS_SURN
nrsur = len(raw_surn_data_list)
for raw_surn_data in raw_surn_data_list:
if raw_surn_data[_PRIMARY_IN_LIST]:
if not PAT_AS_SURN and nrsur == 1 and \
(raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINPATRO
or raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINMATRO):
return ''
else:
return raw_surn_data[_SURNAME_IN_LIST]
return ''
def _raw_primary_prefix_only(raw_surn_data_list):
"""method to obtain the raw primary surname data"""
global PAT_AS_SURN
nrsur = len(raw_surn_data_list)
for raw_surn_data in raw_surn_data_list:
if raw_surn_data[_PRIMARY_IN_LIST]:
if not PAT_AS_SURN and nrsur == 1 and \
(raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINPATRO
or raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINMATRO):
return ''
else:
return raw_surn_data[_PREFIX_IN_LIST]
return ''
def _raw_primary_conn_only(raw_surn_data_list):
"""method to obtain the raw primary surname data"""
global PAT_AS_SURN
nrsur = len(raw_surn_data_list)
for raw_surn_data in raw_surn_data_list:
if raw_surn_data[_PRIMARY_IN_LIST]:
if not PAT_AS_SURN and nrsur == 1 and \
(raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINPATRO
or raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINMATRO):
return ''
else:
return raw_surn_data[_CONNECTOR_IN_LIST]
return ''
def _raw_patro_surname(raw_surn_data_list):
"""method for the 'y' symbol: patronymic surname"""
for raw_surn_data in raw_surn_data_list:
if (raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINPATRO or
raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINMATRO):
result = "%s %s %s" % (raw_surn_data[_PREFIX_IN_LIST],
raw_surn_data[_SURNAME_IN_LIST],
raw_surn_data[_CONNECTOR_IN_LIST])
return ' '.join(result.split())
return ''
def _raw_patro_surname_only(raw_surn_data_list):
"""method for the '1y' symbol: patronymic surname only"""
for raw_surn_data in raw_surn_data_list:
if (raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINPATRO or
raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINMATRO):
result = "%s" % (raw_surn_data[_SURNAME_IN_LIST])
return ' '.join(result.split())
return ''
def _raw_patro_prefix_only(raw_surn_data_list):
"""method for the '0y' symbol: patronymic prefix only"""
for raw_surn_data in raw_surn_data_list:
if (raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINPATRO or
raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINMATRO):
result = "%s" % (raw_surn_data[_PREFIX_IN_LIST])
return ' '.join(result.split())
return ''
def _raw_patro_conn_only(raw_surn_data_list):
"""method for the '2y' symbol: patronymic conn only"""
for raw_surn_data in raw_surn_data_list:
if (raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINPATRO or
raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINMATRO):
result = "%s" % (raw_surn_data[_CONNECTOR_IN_LIST])
return ' '.join(result.split())
return ''
def _raw_nonpatro_surname(raw_surn_data_list):
"""method for the 'o' symbol: full surnames without pa/matronymic or
primary
"""
result = ""
for raw_surn_data in raw_surn_data_list:
if ((not raw_surn_data[_PRIMARY_IN_LIST]) and
raw_surn_data[_TYPE_IN_LIST][0] != _ORIGINPATRO and
raw_surn_data[_TYPE_IN_LIST][0] != _ORIGINMATRO):
result += "%s %s %s " % (raw_surn_data[_PREFIX_IN_LIST],
raw_surn_data[_SURNAME_IN_LIST],
raw_surn_data[_CONNECTOR_IN_LIST])
return ' '.join(result.split()).strip()
def _raw_nonprimary_surname(raw_surn_data_list):
"""method for the 'r' symbol: nonprimary surnames"""
result = ''
for raw_surn_data in raw_surn_data_list:
if not raw_surn_data[_PRIMARY_IN_LIST]:
result = "%s %s %s %s" % (result, raw_surn_data[_PREFIX_IN_LIST],
raw_surn_data[_SURNAME_IN_LIST],
raw_surn_data[_CONNECTOR_IN_LIST])
return ' '.join(result.split())
def _raw_prefix_surname(raw_surn_data_list):
"""method for the 'p' symbol: all prefixes"""
result = ""
for raw_surn_data in raw_surn_data_list:
result += "%s " % (raw_surn_data[_PREFIX_IN_LIST])
return ' '.join(result.split()).strip()
def _raw_single_surname(raw_surn_data_list):
"""method for the 'q' symbol: surnames without prefix and connectors"""
result = ""
for raw_surn_data in raw_surn_data_list:
result += "%s " % (raw_surn_data[_SURNAME_IN_LIST])
return ' '.join(result.split()).strip()
def cleanup_name(namestring):
"""Remove too long white space due to missing name parts,
so "a b" becomes "a b" and "a , b" becomes "a, b"
"""
parts = namestring.split()
if not parts:
return ""
result = parts[0]
for val in parts[1:]:
if len(val) == 1 and val in [',', ';', ':',
ARABIC_COMMA, ARABIC_SEMICOLON]:
result += val
else:
result += ' ' + val
return result
#-------------------------------------------------------------------------
#
# NameDisplay class
#
#-------------------------------------------------------------------------
class NameDisplay:
"""
Base class for displaying of Name instances.
Property:
*default_format*
the default name format to use
*pas_as_surn*
if only one surname, see if pa/ma should be considered as 'the' surname.
"""
format_funcs = {}
raw_format_funcs = {}
def __init__(self, xlocale=glocale):
"""
Initialize the NameDisplay class.
If xlocale is passed in (a GrampsLocale), then
the translated script will be returned instead.
:param xlocale: allow selection of the displayer script
:type xlocale: a GrampsLocale instance
"""
global WITH_GRAMPS_CONFIG
global PAT_AS_SURN
# translators: needed for Arabic, ignore otherwise
COMMAGLYPH = xlocale.translation.gettext(',')
self.STANDARD_FORMATS = [
(Name.DEF, _("Default format (defined by Gramps preferences)"),
'', _ACT),
(Name.LNFN, _("Surname, Given Suffix"),
'%l' + COMMAGLYPH + ' %f %s', _ACT),
(Name.FN, _("Given"),
'%f', _ACT),
(Name.FNLN, _("Given Surname Suffix"),
'%f %l %s', _ACT),
# primary name primconnector other, given pa/matronynic suffix, primprefix
# translators: long string, have a look at Preferences dialog
(Name.LNFNP, _("Main Surnames, Given Patronymic Suffix Prefix"),
'%1m %2m %o' + COMMAGLYPH + ' %f %1y %s %0m', _ACT),
# DEPRECATED FORMATS
(Name.PTFN, _("Patronymic, Given"),
'%y' + COMMAGLYPH + ' %s %f', _INA),
]
self.LNFN_STR = "%s" + COMMAGLYPH + " %s %s"
self.name_formats = {}
if WITH_GRAMPS_CONFIG:
self.default_format = config.get('preferences.name-format')
if self.default_format == 0:
self.default_format = Name.LNFN
config.set('preferences.name-format', self.default_format)
#if only one surname, see if pa/ma should be considered as
# 'the' surname.
PAT_AS_SURN = config.get('preferences.patronimic-surname')
config.connect('preferences.patronimic-surname', self.change_pa_sur)
else:
self.default_format = Name.LNFN
PAT_AS_SURN = False
#preinit the name formats, this should be updated with the data
#in the database once a database is loaded
self.set_name_format(self.STANDARD_FORMATS)
def change_pa_sur(self, *args):
""" How to handle single patronymic as surname is changed"""
global PAT_AS_SURN
PAT_AS_SURN = config.get('preferences.patronimic-surname')
def get_pat_as_surn(self):
global PAT_AS_SURN
return PAT_AS_SURN
def _format_fn(self, fmt_str):
return lambda x: self.format_str(x, fmt_str)
def _format_raw_fn(self, fmt_str):
return lambda x: self.format_str_raw(x, fmt_str)
def _raw_lnfn(self, raw_data):
result = self.LNFN_STR % (_raw_full_surname(raw_data[_SURNAME_LIST]),
raw_data[_FIRSTNAME],
raw_data[_SUFFIX])
return ' '.join(result.split())
def _raw_fnln(self, raw_data):
result = "%s %s %s" % (raw_data[_FIRSTNAME],
_raw_full_surname(raw_data[_SURNAME_LIST]),
raw_data[_SUFFIX])
return ' '.join(result.split())
def _raw_fn(self, raw_data):
result = raw_data[_FIRSTNAME]
return ' '.join(result.split())
def set_name_format(self, formats):
raw_func_dict = {
Name.LNFN : self._raw_lnfn,
Name.FNLN : self._raw_fnln,
Name.FN : self._raw_fn,
}
for (num, name, fmt_str, act) in formats:
func = self._format_fn(fmt_str)
func_raw = raw_func_dict.get(num, self._format_raw_fn(fmt_str))
self.name_formats[num] = (name, fmt_str, act, func, func_raw)
self.set_default_format(self.get_default_format())
def add_name_format(self, name, fmt_str):
for num in self.name_formats:
if fmt_str in self.name_formats.get(num):
return num
num = -1
while num in self.name_formats:
num -= 1
self.set_name_format([(num, name, fmt_str,_ACT)])
return num
def edit_name_format(self, num, name, fmt_str):
self.set_name_format([(num, name, fmt_str,_ACT)])
if self.default_format == num:
self.set_default_format(num)
def del_name_format(self, num):
try:
del self.name_formats[num]
except:
pass
def set_default_format(self, num):
if num not in self.name_formats:
num = Name.LNFN
# if user sets default format to the Gramps default format,
# then we select LNFN as format.
if num == Name.DEF:
num = Name.LNFN
self.default_format = num
self.name_formats[Name.DEF] = (self.name_formats[Name.DEF][_F_NAME],
self.name_formats[Name.DEF][_F_FMT],
self.name_formats[Name.DEF][_F_ACT],
self.name_formats[num][_F_FN],
self.name_formats[num][_F_RAWFN])
def get_default_format(self):
return self.default_format
def set_format_inactive(self, num):
try:
self.name_formats[num] = (self.name_formats[num][_F_NAME],
self.name_formats[num][_F_FMT],
_INA,
self.name_formats[num][_F_FN],
self.name_formats[num][_F_RAWFN])
except:
pass
def get_name_format(self, also_default=False,
only_custom=False,
only_active=True):
"""
Get a list of tuples (num, name,fmt_str,act)
"""
the_list = []
keys = sorted(self.name_formats, key=self.cmp_to_key(self._sort_name_format))
for num in keys:
if ((also_default or num) and
(not only_custom or (num < 0)) and
(not only_active or self.name_formats[num][_F_ACT])):
the_list.append((num,) + self.name_formats[num][_F_NAME:_F_FN])
return the_list
def cmp_to_key(self, mycmp):
"""
python 2 to 3 conversion, python recipe http://code.activestate.com/recipes/576653/
Convert a :func:`cmp` function into a :func:`key` function
We use this in Gramps as understanding the old compare function is
not trivial. This should be replaced by a proper key function
"""
class K:
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return mycmp(self.obj, other.obj) != 0
return K
def _sort_name_format(self, x, y):
if x < 0:
if y < 0:
return x+y
else:
return -x+y
else:
if y < 0:
return -x+y
else:
return x-y
def _is_format_valid(self, num):
try:
if not self.name_formats[num][_F_ACT]:
num = 0
except:
num = 0
return num
#-------------------------------------------------------------------------
def _gen_raw_func(self, format_str):
"""The job of building the name from a format string is rather
expensive and it is called lots and lots of times. So it is worth
going to some length to optimise it as much as possible.
This method constructs a new function that is specifically written
to format a name given a particular format string. This is worthwhile
because the format string itself rarely changes, so by caching the new
function and calling it directly when asked to format a name to the
same format string again we can be as quick as possible.
The new function is of the form::
def fn(raw_data):
return "%s %s %s" % (raw_data[_TITLE],
raw_data[_FIRSTNAME],
raw_data[_SUFFIX])
Specific symbols for parts of a name are defined (keywords given):
't' : title = title
'f' : given = given (first names)
'l' : surname = full surname (lastname)
'c' : call = callname
'x' : common = nick name, call, otherwise first first name (common name)
'i' : initials = initials of the first names
'm' : primary = primary surname (main)
'0m': primary[pre]= prefix primary surname (main)
'1m': primary[sur]= surname primary surname (main)
'2m': primary[con]= connector primary surname (main)
'y' : patronymic = pa/matronymic surname (father/mother) - assumed unique
'0y': patronymic[pre] = prefix "
'1y': patronymic[sur] = surname "
'2y': patronymic[con] = connector "
'o' : notpatronymic = surnames without pa/matronymic and primary
'r' : rest = non primary surnames
'p' : prefix = list of all prefixes
'q' : rawsurnames = surnames without prefixes and connectors
's' : suffix = suffix
'n' : nickname = nick name
'g' : familynick = family nick name
"""
# we need the names of each of the variables or methods that are
# called to fill in each format flag.
# Dictionary is "code": ("expression", "keyword", "i18n-keyword")
d = {"t": ("raw_data[_TITLE]", "title",
_("Person|title")),
"f": ("raw_data[_FIRSTNAME]", "given",
_("given")),
"l": ("_raw_full_surname(raw_data[_SURNAME_LIST])", "surname",
_("surname")),
"s": ("raw_data[_SUFFIX]", "suffix",
_("suffix")),
"c": ("raw_data[_CALL]", "call",
_("Name|call")),
"x": ("(raw_data[_NICK] or raw_data[_CALL] or raw_data[_FIRSTNAME].split(' ')[0])",
"common",
_("Name|common")),
"i": ("''.join([word[0] +'.' for word in ('. ' +" +
" raw_data[_FIRSTNAME]).split()][1:])",
"initials",
_("initials")),
"m": ("_raw_primary_surname(raw_data[_SURNAME_LIST])",
"primary",
_("Name|primary")),
"0m": ("_raw_primary_prefix_only(raw_data[_SURNAME_LIST])",
"primary[pre]",
_("primary[pre]")),
"1m": ("_raw_primary_surname_only(raw_data[_SURNAME_LIST])",
"primary[sur]",
_("primary[sur]")),
"2m": ("_raw_primary_conn_only(raw_data[_SURNAME_LIST])",
"primary[con]",
_("primary[con]")),
"y": ("_raw_patro_surname(raw_data[_SURNAME_LIST])", "patronymic",
_("patronymic")),
"0y": ("_raw_patro_prefix_only(raw_data[_SURNAME_LIST])", "patronymic[pre]",
_("patronymic[pre]")),
"1y": ("_raw_patro_surname_only(raw_data[_SURNAME_LIST])", "patronymic[sur]",
_("patronymic[sur]")),
"2y": ("_raw_patro_conn_only(raw_data[_SURNAME_LIST])", "patronymic[con]",
_("patronymic[con]")),
"o": ("_raw_nonpatro_surname(raw_data[_SURNAME_LIST])", "notpatronymic",
_("notpatronymic")),
"r": ("_raw_nonprimary_surname(raw_data[_SURNAME_LIST])",
"rest",
_("Remaining names|rest")),
"p": ("_raw_prefix_surname(raw_data[_SURNAME_LIST])",
"prefix",
_("prefix")),
"q": ("_raw_single_surname(raw_data[_SURNAME_LIST])",
"rawsurnames",
_("rawsurnames")),
"n": ("raw_data[_NICK]", "nickname",
_("nickname")),
"g": ("raw_data[_FAMNICK]", "familynick",
_("familynick")),
}
args = "raw_data"
return self._make_fn(format_str, d, args)
def _gen_cooked_func(self, format_str):
"""The job of building the name from a format string is rather
expensive and it is called lots and lots of times. So it is worth
going to some length to optimise it as much as possible.
This method constructs a new function that is specifically written
to format a name given a particular format string. This is worthwhile
because the format string itself rarely changes, so by caching the new
function and calling it directly when asked to format a name to the
same format string again we can be as quick as possible.
The new function is of the form::
def fn(first, raw_surname_list, suffix, title, call,):
return "%s %s" % (first,suffix)
Specific symbols for parts of a name are defined (keywords given):
't' : title = title
'f' : given = given (first names)
'l' : surname = full surname (lastname)
'c' : call = callname
'x' : common = nick name, call, or otherwise first first name (common name)
'i' : initials = initials of the first names
'm' : primary = primary surname (main)
'0m': primary[pre]= prefix primary surname (main)
'1m': primary[sur]= surname primary surname (main)
'2m': primary[con]= connector primary surname (main)
'y' : patronymic = pa/matronymic surname (father/mother) - assumed unique
'0y': patronymic[pre] = prefix "
'1y': patronymic[sur] = surname "
'2y': patronymic[con] = connector "
'o' : notpatronymic = surnames without pa/matronymic and primary
'r' : rest = non primary surnames
'p' : prefix = list of all prefixes
'q' : rawsurnames = surnames without prefixes and connectors
's' : suffix = suffix
'n' : nickname = nick name
'g' : familynick = family nick name
"""
# we need the names of each of the variables or methods that are
# called to fill in each format flag.
# Dictionary is "code": ("expression", "keyword", "i18n-keyword")
d = {"t": ("title", "title",
_("Person|title")),
"f": ("first", "given",
_("given")),
"l": ("_raw_full_surname(raw_surname_list)", "surname",
_("surname")),
"s": ("suffix", "suffix",
_("suffix")),
"c": ("call", "call",
_("Name|call")),
"x": ("(nick or call or first.split(' ')[0])", "common",
_("Name|common")),
"i": ("''.join([word[0] +'.' for word in ('. ' + first).split()][1:])",
"initials",
_("initials")),
"m": ("_raw_primary_surname(raw_surname_list)", "primary",
_("Name|primary")),
"0m":("_raw_primary_prefix_only(raw_surname_list)",
"primary[pre]", _("primary[pre]")),
"1m":("_raw_primary_surname_only(raw_surname_list)",
"primary[sur]",_("primary[sur]")),
"2m":("_raw_primary_conn_only(raw_surname_list)",
"primary[con]", _("primary[con]")),
"y": ("_raw_patro_surname(raw_surname_list)", "patronymic",
_("patronymic")),
"0y":("_raw_patro_prefix_only(raw_surname_list)", "patronymic[pre]",
_("patronymic[pre]")),
"1y":("_raw_patro_surname_only(raw_surname_list)", "patronymic[sur]",
_("patronymic[sur]")),
"2y":("_raw_patro_conn_only(raw_surname_list)", "patronymic[con]",
_("patronymic[con]")),
"o": ("_raw_nonpatro_surname(raw_surname_list)", "notpatronymic",
_("notpatronymic")),
"r": ("_raw_nonprimary_surname(raw_surname_list)", "rest",
_("Remaining names|rest")),
"p": ("_raw_prefix_surname(raw_surname_list)", "prefix",
_("prefix")),
"q": ("_raw_single_surname(raw_surname_list)", "rawsurnames",
_("rawsurnames")),
"n": ("nick", "nickname",
_("nickname")),
"g": ("famnick", "familynick",
_("familynick")),
}
args = "first,raw_surname_list,suffix,title,call,nick,famnick"
return self._make_fn(format_str, d, args)
def format_str(self, name, format_str):
return self._format_str_base(name.first_name, name.surname_list,
name.suffix, name.title,
name.call, name.nick, name.famnick,
format_str)
def format_str_raw(self, raw_data, format_str):
"""
Format a name from the raw name list. To make this as fast as possible
this uses :func:`_gen_raw_func` to generate a new method for each new
format_string.
Is does not call :meth:`_format_str_base` because it would introduce an
extra method call and we need all the speed we can squeeze out of this.
"""
func = self.__class__.raw_format_funcs.get(format_str)
if func is None:
func = self._gen_raw_func(format_str)
self.__class__.raw_format_funcs[format_str] = func
return func(raw_data)
def _format_str_base(self, first, surname_list, suffix, title, call,
nick, famnick, format_str):
"""
Generates name from a format string.
The following substitutions are made:
'%t' : title
'%f' : given (first names)
'%l' : full surname (lastname)
'%c' : callname
'%x' : nick name, call, or otherwise first first name (common name)
'%i' : initials of the first names
'%m' : primary surname (main)
'%0m': prefix primary surname (main)
'%1m': surname primary surname (main)
'%2m': connector primary surname (main)
'%y' : pa/matronymic surname (father/mother) - assumed unique
'%0y': prefix "
'%1y': surname "
'%2y': connector "
'%o' : surnames without patronymic
'%r' : non-primary surnames (rest)
'%p' : list of all prefixes
'%q' : surnames without prefixes and connectors
'%s' : suffix
'%n' : nick name
'%g' : family nick name
The capital letters are substituted for capitalized name components.
The %% is substituted with the single % character.
All the other characters in the fmt_str are unaffected.
"""
func = self.__class__.format_funcs.get(format_str)
if func is None:
func = self._gen_cooked_func(format_str)
self.__class__.format_funcs[format_str] = func
try:
s = func(first, [surn.serialize() for surn in surname_list],
suffix, title, call, nick, famnick)
except (ValueError, TypeError,):
raise NameDisplayError("Incomplete format string")
return s
#-------------------------------------------------------------------------
def primary_surname(self, name):
global PAT_AS_SURN
nrsur = len(name.surname_list)
sur = name.get_primary_surname()
if not PAT_AS_SURN and nrsur <= 1 and \
(sur.get_origintype().value == _ORIGINPATRO
or sur.get_origintype().value == _ORIGINMATRO):
return ''
return sur.get_surname()
def sort_string(self, name):
return "%-25s%-30s%s" % (self.primary_surname(name),
name.first_name, name.suffix)
def sorted(self, person):
"""
Return a text string representing the :class:`~.person.Person`
instance's :class:`~.name.Name` in a manner that should be used for
displaying a sortedname.
:param person: :class:`~.person.Person` instance that contains the
:class:`~.name.Name` that is to be displayed. The
primary name is used for the display.
:type person: :class:`~.person.Person`
:returns: Returns the :class:`~.person.Person` instance's name
:rtype: str
"""
name = person.get_primary_name()
return self.sorted_name(name)
def sorted_name(self, name):
"""
Return a text string representing the :class:`~.name.Name` instance
in a manner that should be used for sorting the name in a list.
:param name: :class:`~.name.Name` instance that is to be displayed.
:type name: :class:`~.name.Name`
:returns: Returns the :class:`~.name.Name` string representation
:rtype: str
"""
num = self._is_format_valid(name.sort_as)
return self.name_formats[num][_F_FN](name)
def truncate(self, full_name, max_length=15, elipsis="..."):
name_out = ""
if len(full_name) <= max_length:
name_out = full_name
else:
last_space = full_name.rfind(" ", max_length)
if (last_space) > -1:
name_out = full_name[:last_space]
else:
name_out = full_name[:max_length]
name_out += " " + elipsis
return name_out
def raw_sorted_name(self, raw_data):
"""
Return a text string representing the :class:`~.name.Name` instance
in a manner that should be used for sorting the name in a list.
:param name: raw unserialized data of name that is to be displayed.
:type name: tuple
:returns: Returns the :class:`~.name.Name` string representation
:rtype: str
"""
num = self._is_format_valid(raw_data[_SORT])
return self.name_formats[num][_F_RAWFN](raw_data)
def display(self, person):
"""
Return a text string representing the :class:`~.person.Person`
instance's :class:`~.name.Name` in a manner that should be used for
normal displaying.
:param person: :class:`~.person.Person` instance that contains the
:class:`~.name.Name` that is to be displayed. The
primary name is used for the display.
:type person: :class:`~.person.Person`
:returns: Returns the :class:`~.person.Person` instance's name
:rtype: str
"""
name = person.get_primary_name()
return self.display_name(name)
def display_format(self, person, num):
"""
Return a text string representing the L{gen.lib.Person} instance's
L{Name} using num format.
@param person: L{gen.lib.Person} instance that contains the
L{Name} that is to be displayed. The primary name is used for
the display.
@type person: L{gen.lib.Person}
@param num: num of the format to be used, as return by
name_displayer.add_name_format('name','format')
@type num: int
@returns: Returns the L{gen.lib.Person} instance's name
@rtype: str
"""
name = person.get_primary_name()
return self.name_formats[num][_F_FN](name)
def display_formal(self, person):
"""
Return a text string representing the :class:`~.person.Person`
instance's :class:`~.name.Name` in a manner that should be used for
formal displaying.
:param person: :class:`~.person.Person` instance that contains the
:class:`~.name.Name` that is to be displayed. The
primary name is used for the display.
:type person: :class:`~.person.Person`
:returns: Returns the :class:`~.person.Person` instance's name
:rtype: str
"""
# FIXME: At this time, this is just duplicating display() method
name = person.get_primary_name()
return self.display_name(name)
def display_name(self, name):
"""
Return a text string representing the :class:`~.name.Name` instance
in a manner that should be used for normal displaying.
:param name: :class:`~.name.Name` instance that is to be displayed.
:type name: :class:`~.name.Name`
:returns: Returns the :class:`~.name.Name` string representation
:rtype: str
"""
if name is None:
return ""
num = self._is_format_valid(name.display_as)
return self.name_formats[num][_F_FN](name)
def raw_display_name(self, raw_data):
"""
Return a text string representing the :class:`~.name.Name` instance
in a manner that should be used for normal displaying.
:param name: raw unserialized data of name that is to be displayed.
:type name: tuple
:returns: Returns the :class:`~.name.Name` string representation
:rtype: str
"""
num = self._is_format_valid(raw_data[_DISPLAY])
return self.name_formats[num][_F_RAWFN](raw_data)
def display_given(self, person):
return self.format_str(person.get_primary_name(),'%f')
def name_grouping(self, db, person):
"""
Return the name under which to group this person. This is defined as:
1. if group name is defined on primary name, use that
2. if group name is defined for the primary surname of the primary
name, use that
3. use primary surname of primary name otherwise
"""
return self.name_grouping_name(db, person.primary_name)
def name_grouping_name(self, db, pn):
"""
Return the name under which to group. This is defined as:
1. if group name is defined, use that
2. if group name is defined for the primary surname, use that
3. use primary surname itself otherwise
:param pn: :class:`~.name.Name` object
:type pn: :class:`~.name.Name` instance
:returns: Returns the groupname string representation
:rtype: str
"""
if pn.group_as:
return pn.group_as
return db.get_name_group_mapping(pn.get_primary_surname().get_surname())
def name_grouping_data(self, db, pn):
"""
Return the name under which to group. This is defined as:
1. if group name is defined, use that
2. if group name is defined for the primary surname, use that
3. use primary surname itself otherwise
:param pn: raw unserialized data of name
:type pn: tuple
:returns: Returns the groupname string representation
:rtype: str
"""
if pn[_GROUP]:
return pn[_GROUP]
return db.get_name_group_mapping(_raw_primary_surname_only(
pn[_SURNAME_LIST]))
def _make_fn(self, format_str, d, args):
"""
Create the name display function and handles dependent
punctuation.
"""
# d is a dict: dict[code] = (expr, word, translated word)
# First, go through and do internationalization-based
# key-word replacement. Just replace ikeywords with
# %codes (ie, replace "irstnamefay" with "%f", and
# "IRSTNAMEFAY" for %F)
if (len(format_str) > 2 and
format_str[0] == format_str[-1] == '"'):
pass
else:
d_keys = [(code, _tuple[2]) for code, _tuple in d.items()]
d_keys.sort(key=_make_cmp_key, reverse=True) # reverse on length and by ikeyword
for (code, ikeyword) in d_keys:
exp, keyword, ikeyword = d[code]
format_str = format_str.replace(ikeyword, "%"+ code)
format_str = format_str.replace(ikeyword.title(), "%"+ code)
format_str = format_str.replace(ikeyword.upper(), "%"+ code.upper())
# Next, go through and do key-word replacement.
# Just replace keywords with
# %codes (ie, replace "firstname" with "%f", and
# "FIRSTNAME" for %F)
if (len(format_str) > 2 and
format_str[0] == format_str[-1] == '"'):
pass
else:
d_keys = [(code, _tuple[1]) for code, _tuple in d.items()]
d_keys.sort(key=_make_cmp_key, reverse=True) # reverse sort on length and by keyword
# if in double quotes, just use % codes
for (code, keyword) in d_keys:
exp, keyword, ikeyword = d[code]
format_str = format_str.replace(keyword, "%"+ code)
format_str = format_str.replace(keyword.title(), "%"+ code)
format_str = format_str.replace(keyword.upper(), "%"+ code.upper())
# Get lower and upper versions of codes:
codes = list(d.keys()) + [c.upper() for c in d]
# Next, list out the matching patterns:
# If it starts with "!" however, treat the punctuation verbatim:
if len(format_str) > 0 and format_str[0] == "!":
patterns = ["%(" + ("|".join(codes)) + ")", # %s
]
format_str = format_str[1:]
else:
patterns = [
",\W*\"%(" + ("|".join(codes)) + ")\"", # ,\W*"%s"
",\W*\(%(" + ("|".join(codes)) + ")\)", # ,\W*(%s)
",\W*%(" + ("|".join(codes)) + ")", # ,\W*%s
"\"%(" + ("|".join(codes)) + ")\"", # "%s"
"_%(" + ("|".join(codes)) + ")_", # _%s_
"\(%(" + ("|".join(codes)) + ")\)", # (%s)
"%(" + ("|".join(codes)) + ")", # %s
]
new_fmt = format_str
# replace the specific format string flags with a
# flag that works in standard python format strings.
new_fmt = re.sub("|".join(patterns), "%s", new_fmt)
# replace special meaning codes we need to have verbatim in output
if (len(new_fmt) > 2 and new_fmt[0] == new_fmt[-1] == '"'):
new_fmt = new_fmt.replace('\\', r'\\')
new_fmt = new_fmt[1:-1].replace('"', r'\"')
else:
new_fmt = new_fmt.replace('\\', r'\\')
new_fmt = new_fmt.replace('"', '\\\"')
# find each format flag in the original format string
# for each one we find the variable name that is needed to
# replace it and add this to a list. This list will be used to
# generate the replacement tuple.
# This compiled pattern should match all of the format codes.
pat = re.compile("|".join(patterns))
param = ()
mat = pat.search(format_str)
while mat:
match_pattern = mat.group(0) # the matching pattern
# prefix, code, suffix:
p, code, s = re.split("%(.)", match_pattern)
if code in '0123456789':
code = code + s[0]
s = s[1:]
field = d[code.lower()][0]
if code.isupper():
field += ".upper()"
if p == '' and s == '':
param = param + (field,)
else:
param = param + ("ifNotEmpty(%s,'%s','%s')" % (field, p, s), )
mat = pat.search(format_str, mat.end())
s = """
def fn(%s):
def ifNotEmpty(str,p,s):
if str == '':
return ''
else:
return p + str + s
return cleanup_name("%s" %% (%s))""" % (args, new_fmt, ",".join(param))
try:
exec(s) in globals(), locals()
return locals()['fn']
except:
LOG.error("\n" + 'Wrong name format string %s' % new_fmt
+"\n" + ("ERROR, Edit Name format in Preferences->Display to correct")
+"\n" + _('Wrong name format string %s') % new_fmt
+"\n" + ("ERROR, Edit Name format in Preferences->Display to correct")
)
def errfn(*arg):
return _("ERROR, Edit Name format in Preferences")
return errfn
displayer = NameDisplay()
| beernarrd/gramps | gramps/gen/display/name.py | Python | gpl-2.0 | 45,844 | 0.004319 |
from .enums import IncrementalSearchDirection
from .filters import SimpleFilter, Never
__all__ = (
'SearchState',
)
class SearchState(object):
"""
A search 'query'.
"""
__slots__ = ('text', 'direction', 'ignore_case')
def __init__(self, text='', direction=IncrementalSearchDirection.FORWARD, ignore_case=Never()):
assert isinstance(ignore_case, SimpleFilter)
self.text = text
self.direction = direction
self.ignore_case = ignore_case
def __repr__(self):
return '%s(%r, direction=%r, ignore_case=%r)' % (
self.__class__.__name__, self.text, self.direction, self.ignore_case)
def __invert__(self):
"""
Create a new SearchState where backwards becomes forwards and the other
way around.
"""
if self.direction == IncrementalSearchDirection.BACKWARD:
direction = IncrementalSearchDirection.FORWARD
else:
direction = IncrementalSearchDirection.BACKWARD
return SearchState(text=self.text, direction=direction, ignore_case=self.ignore_case)
| ddalex/python-prompt-toolkit | prompt_toolkit/search_state.py | Python | bsd-3-clause | 1,105 | 0.002715 |
"""
This module provides loaders for local file system and over http
local and remote access
"""
import os
import hmac
import urllib
#import urllib2
import requests
import urlparse
import time
import pkg_resources
from io import open, BytesIO
try:
from boto import connect_s3
s3_avail = True
except ImportError: #pragma: no cover
s3_avail = False
#=================================================================
def is_http(filename):
return filename.startswith(('http://', 'https://'))
#=================================================================
def is_s3(filename):
return filename.startswith('s3://')
#=================================================================
def to_file_url(filename):
""" Convert a filename to a file:// url
"""
url = os.path.abspath(filename)
url = urlparse.urljoin('file:', urllib.pathname2url(url))
return url
#=================================================================
def load_yaml_config(config_file):
import yaml
configdata = BlockLoader().load(config_file)
config = yaml.load(configdata)
return config
#=================================================================
def extract_post_query(method, mime, length, stream, buffered_stream=None):
"""
Extract a url-encoded form POST from stream
If not a application/x-www-form-urlencoded, or no missing
content length, return None
"""
if method.upper() != 'POST':
return None
if ((not mime or
not mime.lower().startswith('application/x-www-form-urlencoded'))):
return None
try:
length = int(length)
except (ValueError, TypeError):
return None
if length <= 0:
return None
#todo: encoding issues?
post_query = ''
while length > 0:
buff = stream.read(length)
length -= len(buff)
if not buff:
break
post_query += buff
if buffered_stream:
buffered_stream.write(post_query)
buffered_stream.seek(0)
post_query = urllib.unquote_plus(post_query)
return post_query
#=================================================================
def append_post_query(url, post_query):
if not post_query:
return url
if '?' not in url:
url += '?'
else:
url += '&'
url += post_query
return url
#=================================================================
def extract_client_cookie(env, cookie_name):
cookie_header = env.get('HTTP_COOKIE')
if not cookie_header:
return None
# attempt to extract cookie_name only
inx = cookie_header.find(cookie_name)
if inx < 0:
return None
end_inx = cookie_header.find(';', inx)
if end_inx > 0:
value = cookie_header[inx:end_inx]
else:
value = cookie_header[inx:]
value = value.split('=')
if len(value) < 2:
return None
value = value[1].strip()
return value
#=================================================================
def read_last_line(fh, offset=256):
""" Read last line from a seekable file. Start reading
from buff before end of file, and double backwards seek
until line break is found. If reached beginning of file
(no lines), just return whole file
"""
fh.seek(0, 2)
size = fh.tell()
while offset < size:
fh.seek(-offset, 2)
lines = fh.readlines()
if len(lines) > 1:
return lines[-1]
offset *= 2
fh.seek(0, 0)
return fh.readlines()[-1]
#=================================================================
class BlockLoader(object):
"""
a loader which can stream blocks of content
given a uri, offset and optional length.
Currently supports: http/https and file/local file system
"""
def __init__(self, cookie_maker=None):
self.cookie_maker = cookie_maker
self.session = None
self.s3conn = None
def load(self, url, offset=0, length=-1):
"""
Determine loading method based on uri
"""
if is_http(url):
return self.load_http(url, offset, length)
elif is_s3(url):
return self.load_s3(url, offset, length)
else:
return self.load_file_or_resource(url, offset, length)
def load_file_or_resource(self, url, offset=0, length=-1):
"""
Load a file-like reader from the local file system
"""
# if starting with . or /, can only be a file path..
file_only = url.startswith(('/', '.'))
# convert to filename
if url.startswith('file://'):
file_only = True
url = urllib.url2pathname(url[len('file://'):])
try:
# first, try as file
afile = open(url, 'rb')
except IOError:
if file_only:
raise
# then, try as package.path/file
pkg_split = url.split('/', 1)
if len(pkg_split) == 1:
raise
afile = pkg_resources.resource_stream(pkg_split[0],
pkg_split[1])
if offset > 0:
afile.seek(offset)
if length >= 0:
return LimitReader(afile, length)
else:
return afile
@staticmethod
def _make_range_header(offset, length):
if length > 0:
range_header = 'bytes={0}-{1}'.format(offset, offset + length - 1)
else:
range_header = 'bytes={0}-'.format(offset)
return range_header
def load_http(self, url, offset, length):
"""
Load a file-like reader over http using range requests
and an optional cookie created via a cookie_maker
"""
headers = {}
if offset != 0 or length != -1:
headers['Range'] = self._make_range_header(offset, length)
if self.cookie_maker:
if isinstance(self.cookie_maker, basestring):
headers['Cookie'] = self.cookie_maker
else:
headers['Cookie'] = self.cookie_maker.make()
if not self.session:
self.session = requests.Session()
r = self.session.get(url, headers=headers, stream=True)
return r.raw
def load_s3(self, url, offset, length):
if not s3_avail: #pragma: no cover
raise IOError('To load from s3 paths, ' +
'you must install boto: pip install boto')
if not self.s3conn:
try:
self.s3conn = connect_s3()
except Exception: #pragma: no cover
self.s3conn = connect_s3(anon=True)
parts = urlparse.urlsplit(url)
bucket = self.s3conn.get_bucket(parts.netloc)
headers = {'Range': self._make_range_header(offset, length)}
key = bucket.get_key(parts.path)
result = key.get_contents_as_string(headers=headers)
key.close()
return BytesIO(result)
#=================================================================
# Signed Cookie-Maker
#=================================================================
class HMACCookieMaker(object):
"""
Utility class to produce signed HMAC digest cookies
to be used with each http request
"""
def __init__(self, key, name, duration=10):
self.key = key
self.name = name
# duration in seconds
self.duration = duration
def make(self, extra_id=''):
expire = str(long(time.time() + self.duration))
if extra_id:
msg = extra_id + '-' + expire
else:
msg = expire
hmacdigest = hmac.new(self.key, msg)
hexdigest = hmacdigest.hexdigest()
if extra_id:
cookie = '{0}-{1}={2}-{3}'.format(self.name, extra_id,
expire, hexdigest)
else:
cookie = '{0}={1}-{2}'.format(self.name, expire, hexdigest)
return cookie
#=================================================================
# Limit Reader
#=================================================================
class LimitReader(object):
"""
A reader which will not read more than specified limit
"""
def __init__(self, stream, limit):
self.stream = stream
self.limit = limit
def read(self, length=None):
if length is not None:
length = min(length, self.limit)
else:
length = self.limit
if length == 0:
return ''
buff = self.stream.read(length)
self.limit -= len(buff)
return buff
def readline(self, length=None):
if length is not None:
length = min(length, self.limit)
else:
length = self.limit
if length == 0:
return ''
buff = self.stream.readline(length)
self.limit -= len(buff)
return buff
def close(self):
self.stream.close()
@staticmethod
def wrap_stream(stream, content_length):
"""
If given content_length is an int > 0, wrap the stream
in a LimitReader. Ottherwise, return the stream unaltered
"""
try:
content_length = int(content_length)
if content_length >= 0:
# optimize: if already a LimitStream, set limit to
# the smaller of the two limits
if isinstance(stream, LimitReader):
stream.limit = min(stream.limit, content_length)
else:
stream = LimitReader(stream, content_length)
except (ValueError, TypeError):
pass
return stream
| jcushman/pywb | pywb/utils/loaders.py | Python | gpl-3.0 | 9,733 | 0.002055 |
"""
logitech-m720-config - A config script for Logitech M720 button mappings
Copyright (C) 2017 Fin Christensen <[email protected]>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath (path.dirname (__file__))
# Get the long description from the README.rst file
with open (path.join (here, "README.md"), encoding = "utf-8") as readme:
long_description = readme.read ()
setup (
name = "m720-config",
version = "0.0.1",
description = "A config script for Logitech M720 button mappings.",
long_description = long_description,
url = "",
author = "Fin Christensen",
author_email = "[email protected]",
license = "GPLv3+",
classifiers = [
"Development Status :: 2 - Pre-Alpha",
"Environment :: Console",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3 :: Only",
],
keywords = "config logitech m720 hid++",
packages = find_packages (),
install_requires = ["solaar"],
extras_require = {},
package_data = {
"m720_config": [],
},
data_files = [],
entry_points = {
"console_scripts": [
"m720-config=m720_config:main"
],
},
)
| fin-ger/logitech-m720-config | setup.py | Python | gpl-3.0 | 2,072 | 0.02027 |
import _plotly_utils.basevalidators
class FamilyValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="family",
parent_name="layout.ternary.aaxis.title.font",
**kwargs
):
super(FamilyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
no_blank=kwargs.pop("no_blank", True),
strict=kwargs.pop("strict", True),
**kwargs
)
| plotly/plotly.py | packages/python/plotly/plotly/validators/layout/ternary/aaxis/title/font/_family.py | Python | mit | 553 | 0 |
"""TailorDev Biblio
Bibliography management with Django.
"""
__version__ = "2.0.0"
default_app_config = "td_biblio.apps.TDBiblioConfig"
| TailorDev/django-tailordev-biblio | td_biblio/__init__.py | Python | mit | 139 | 0 |