text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
#!/usr/bin/env python
# coding=utf-8
__author__ = u'Ahmed Şeref GÜNEYSU'
import ui
| guneysus/packathon2016 | packathon2016/__init__.py | Python | bsd-3-clause | 85 | 0 |
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
"""Mimic pyquick exercise -- optional extra exercise.
Google's Python Class
Read in the file specified on the command line.
Do a simple split() on whitespace to obtain all the words in the file.
Rather than read the file line by line, it's easier to read
it into one giant string and split it once.
Build a "mimic" dict that maps each word that appears in the file
to a list of all the words that immediately follow that word in the file.
The list of words can be be in any order and should include
duplicates. So for example the key "and" might have the list
["then", "best", "then", "after", ...] listing
all the words which came after "and" in the text.
We'll say that the empty string is what comes before
the first word in the file.
With the mimic dict, it's fairly easy to emit random
text that mimics the original. Print a word, then look
up what words might come next and pick one at random as
the next work.
Use the empty string as the first word to prime things.
If we ever get stuck with a word that is not in the dict,
go back to the empty string to keep things moving.
Note: the standard python module 'random' includes a
random.choice(list) method which picks a random element
from a non-empty list.
For fun, feed your program to itself as input.
Could work on getting it to put in linebreaks around 70
columns, so the output looks better.
"""
import random
import sys
def mimic_dict(filename):
"""Returns mimic dict mapping each word to list of words which follow it."""
# +++your code here+++
word=open(filename).read().split()
mimic_dict={}
prev=''
for words in word:
if not prev in mimic_dict:
mimic_dict[prev]=[words]
else:
mimic_dict[prev].append(words)
prev=words
return mimic_dict
def print_mimic(mimic_dict, word):
"""Given mimic dict and start word, prints 200 random words."""
# +++your code here+++
for i in range(200):
print word,
nexts=mimic_dict.get(word)
if not nexts:
nexts=mimic_dict['']
word=random.choice(nexts)
# Provided main(), calls mimic_dict() and mimic()
def main():
if len(sys.argv) != 2:
print 'usage: ./mimic.py file-to-read'
sys.exit(1)
dict = mimic_dict(sys.argv[1])
print dict
print_mimic(dict, '')
if __name__ == '__main__':
main()
| sanaldavis/Google-Python-Exercies | basic/mimic.py | Python | apache-2.0 | 2,511 | 0.010753 |
[uTidylib]: http://utidylib.berlios.de/
[options]: http://tidy.sourceforge.net/docs/quickref.html
Copyright (c)2008 [Waylan Limberg](http://achinghead.com)
License: [BSD](http://www.opensource.org/licenses/bsd-license.php) | retrography/scancode-toolkit | tests/cluecode/data/ics/markdown-markdown-extensions/html_tidy.py | Python | apache-2.0 | 225 | 0.031111 |
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.tool.steps.abstractstep import AbstractStep
class PostDiffForCommit(AbstractStep):
def run(self, state):
self._tool.bugs.add_patch_to_bug(
state["bug_id"],
self.cached_lookup(state, "diff"),
"Patch for landing",
mark_for_review=False,
mark_for_landing=True)
| mogoweb/webkit_for_android5.1 | webkit/Tools/Scripts/webkitpy/tool/steps/postdiffforcommit.py | Python | apache-2.0 | 1,879 | 0.001597 |
from django.core.exceptions import ImproperlyConfigured
from django.template import engines
from django.test import SimpleTestCase, override_settings
class TemplateStringsTests(SimpleTestCase):
@override_settings(TEMPLATES=[{
'BACKEND': 'raise.import.error',
}])
def test_backend_import_error(self):
"""
Failing to import a backend keeps raising the original import error.
Regression test for #24265.
"""
with self.assertRaises(ImportError):
engines.all()
with self.assertRaises(ImportError):
engines.all()
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# Incorrect: APP_DIRS and loaders are mutually incompatible.
'APP_DIRS': True,
'OPTIONS': {'loaders': []},
}])
def test_backend_improperly_configured(self):
"""
Failing to initialize a backend keeps raising the original exception.
Regression test for #24265.
"""
with self.assertRaises(ImproperlyConfigured):
engines.all()
with self.assertRaises(ImproperlyConfigured):
engines.all()
| willharris/django | tests/template_backends/test_utils.py | Python | bsd-3-clause | 1,198 | 0 |
from test_support import *
import re, os
do_flow()
hand = open(os.path.join("gnatprove", "main.ali"))
for line in hand :
line = line.rstrip()
if re.search('^F ', line) :
print line
| ptroja/spark2014 | testsuite/gnatprove/tests/O824-010__flow_no_computed_calls_for_ll_subprogram/test.py | Python | gpl-3.0 | 199 | 0.015075 |
import numpy as np
from numba import jit
import OSIM.Simulation.Utils as u
from OSIM.Modeling.AbstractComponents.NonlinearComponent import NonlinearComponent
class IBC(NonlinearComponent):
def __init__(self, nodes, name, value, superComponent, **kwargs):
super(IBC, self).__init__(nodes, name, value, superComponent, **kwargs)
if(self.COMPONENT_PRINT_WARNINGS):
print (name + "VBIC Current IBC-Warning no avalanche effect implemented yet")
self.bi = nodes[0]
self.ci = nodes[1]
#self.ei = nodes[2]
'''
TODO: Defaultwerte anpassen
'''
for v in self.variableDict:
variableExpr = "".join((v, "=", self.variableDict[v]))
exec(variableExpr)
self.UT = eval(self.paramDict.get("ut", "0.026"))
self.IBCI = eval(self.paramDict.get("ibci", "1.5E-18"))
self.IBCN = eval(self.paramDict.get("ibcn", "1E-15"))
self.NCN = eval(self.paramDict.get("ncn", "1.7"))
self.NCI = eval(self.paramDict.get("nci", "1.05"))
self.AVC1 = eval(self.paramDict.get("avc1", "2.4"))
self.AVC2 = eval(self.paramDict.get("avc2", "11.5"))
self.MC = eval(self.paramDict.get("mc", "0.12"))
self.PC = eval(self.paramDict.get("pc", "0.62"))
self.IS = eval(self.paramDict.get("is", "1e-16"))
self.ISSR = eval(self.paramDict.get("issr", "1"))
self.NF = eval(self.paramDict.get("nf", "1.0"))
self.NR = eval(self.paramDict.get("nr", "1.0"))
self.Udlim = 0.8
def performCalculations(self):
self.current,self.gd = self.getCharacterisitcs()
def getCharacterisitcs(self):
ubi = (self.sys.getSolutionAt(self.bi).real)
uci = (self.sys.getSolutionAt(self.ci).real)
#uei = (self.sys.getSolutionAt(self.ei).real)
ibcn = self.IBCN * (u.exp((ubi - uci), 1 / (self.NCN * self.UT), self.Udlim) - 1.0)
ibci = self.IBCI * (u.exp((ubi - uci), 1 / (self.NCI * self.UT), self.Udlim) - 1.0)
igc = 0 #self.igc(ubi, uci,uei,ibcn + ibci) # fehlt noch
dig = 0 #(self.igc(ubi+0.000001, uei, uci,ibcn + ibci)-igc)/0.000001
return ibcn + ibci - igc , ibcn / (self.NCN * self.UT) + ibci / (self.NCI * self.UT)+dig + self.sys.GMIN
def avalm(self, V, P, M, AV1, AV2):
# aus http://www.designers-guide.org/VBIC/release1.1.5/vbic1.1.5_pseudoCode.html
# Kloosterman/de Graaff weak avalanche model
vl = 0.5 * (np.sqrt((P - V) ** 2 + 0.01) + (P - V))
return AV1 * vl * np.exp(- AV2 * vl ** (M - 1.0))
def igc(self, ubi, uci,uei,ibc):
#TODO: implement !
Itzf = self._ITF(ubi,uei)
Itzr = self._ITR(ubi,uci)
return (Itzf - Itzr - ibc )*self.avalm(ubi-uci,self.PC,self.MC,self.AVC1,self.AVC2)
def reloadParams(self):
for v in self.variableDict:
variableExpr = "".join((v, "=", self.variableDict[v]))
exec (variableExpr)
for v in self.variableDict:
variableExpr = "".join((v, "=", self.variableDict[v]))
exec(variableExpr)
self.UT = eval(self.paramDict.get("ut", "0.026"))
self.IBCI = eval(self.paramDict.get("ibci", "1.5E-18"))
self.IBCN = eval(self.paramDict.get("ibcn", "1E-15"))
self.NCN = eval(self.paramDict.get("ncn", "1.7"))
self.NCI = eval(self.paramDict.get("nci", "1.05"))
self.AVC1 = eval(self.paramDict.get("avc1", "2.4"))
self.AVC2 = eval(self.paramDict.get("avc2", "11.5"))
self.MC = eval(self.paramDict.get("mc", "0.12"))
self.PC = eval(self.paramDict.get("pc", "0.62"))
self.IS = eval(self.paramDict.get("is", "1e-16"))
self.ISSR = eval(self.paramDict.get("issr", "1"))
self.NF = eval(self.paramDict.get("nf", "1.0"))
self.NR = eval(self.paramDict.get("nr", "1.0"))
@jit
def _ITF(self, BI, EI):
if (BI < 1.6):
lim = BI
else:
lim = 1.6
return self.IS * (u.exp(BI - EI, 1 / (self.NF * self.UT), lim) - 1.0)
@jit
def _ITR(self, BI, CI):
if (BI < 1.6):
lim = BI
else:
lim = 1.6
return self.IS * self.ISSR * (u.exp(BI - CI, 1 / (self.NR * self.UT), lim) - 1.0) | tmaiwald/OSIM | OSIM/Modeling/Components/NPN_Vertical_Bipolar_Intercompany_Model/VBIC_Currents/IBC.py | Python | bsd-2-clause | 4,274 | 0.007253 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the MSIE typed URLs Windows Registry plugin."""
import unittest
from plaso.parsers.winreg_plugins import typedurls
from tests.parsers.winreg_plugins import test_lib
class MsieTypedURLsPluginTest(test_lib.RegistryPluginTestCase):
"""Tests for the MSIE typed URLs Windows Registry plugin."""
def testFilters(self):
"""Tests the FILTERS class attribute."""
plugin = typedurls.TypedURLsPlugin()
key_path = (
'HKEY_CURRENT_USER\\Software\\Microsoft\\Internet Explorer\\'
'TypedURLs')
self._AssertFiltersOnKeyPath(plugin, key_path)
key_path = (
'HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\'
'Explorer\\TypedPaths')
self._AssertFiltersOnKeyPath(plugin, key_path)
self._AssertNotFiltersOnKeyPath(plugin, 'HKEY_LOCAL_MACHINE\\Bogus')
def testProcess(self):
"""Tests the Process function."""
test_file_entry = self._GetTestFileEntry(['NTUSER-WIN7.DAT'])
key_path = (
'HKEY_CURRENT_USER\\Software\\Microsoft\\Internet Explorer\\'
'TypedURLs')
win_registry = self._GetWinRegistryFromFileEntry(test_file_entry)
registry_key = win_registry.GetKeyByPath(key_path)
plugin = typedurls.TypedURLsPlugin()
storage_writer = self._ParseKeyWithPlugin(
registry_key, plugin, file_entry=test_file_entry)
self.assertEqual(storage_writer.number_of_events, 1)
self.assertEqual(storage_writer.number_of_extraction_warnings, 0)
self.assertEqual(storage_writer.number_of_recovery_warnings, 0)
events = list(storage_writer.GetEvents())
expected_entries = (
'url1: http://cnn.com/ '
'url2: http://twitter.com/ '
'url3: http://linkedin.com/ '
'url4: http://tweetdeck.com/ '
'url5: mozilla '
'url6: http://google.com/ '
'url7: http://controller.shieldbase.local/certsrv/ '
'url8: http://controller.shieldbase.local/ '
'url9: http://www.stark-research-labs.com/ '
'url10: http://www.adobe.com/ '
'url11: http://www.google.com/ '
'url12: http://www.firefox.com/ '
'url13: http://go.microsoft.com/fwlink/?LinkId=69157')
expected_event_values = {
'date_time': '2012-03-12 21:23:53.3077499',
'data_type': 'windows:registry:typedurls',
'entries': expected_entries,
'key_path': key_path,
# This should just be the plugin name, as we're invoking it directly,
# and not through the parser.
'parser': plugin.plugin_name}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
class TypedPathsPluginTest(test_lib.RegistryPluginTestCase):
"""Tests for the typed paths Windows Registry plugin."""
def testProcess(self):
"""Tests the Process function."""
test_file_entry = self._GetTestFileEntry(['NTUSER-WIN7.DAT'])
key_path = (
'HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\'
'Explorer\\TypedPaths')
win_registry = self._GetWinRegistryFromFileEntry(test_file_entry)
registry_key = win_registry.GetKeyByPath(key_path)
plugin = typedurls.TypedURLsPlugin()
storage_writer = self._ParseKeyWithPlugin(
registry_key, plugin, file_entry=test_file_entry)
self.assertEqual(storage_writer.number_of_events, 1)
self.assertEqual(storage_writer.number_of_extraction_warnings, 0)
self.assertEqual(storage_writer.number_of_recovery_warnings, 0)
events = list(storage_writer.GetEvents())
expected_entries = (
'url1: \\\\controller')
expected_event_values = {
'date_time': '2010-11-10 07:58:15.8116250',
'data_type': 'windows:registry:typedurls',
'entries': expected_entries,
'key_path': key_path,
# This should just be the plugin name, as we're invoking it directly,
# and not through the parser.
'parser': plugin.plugin_name}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
if __name__ == '__main__':
unittest.main()
| kiddinn/plaso | tests/parsers/winreg_plugins/typedurls.py | Python | apache-2.0 | 4,083 | 0.00147 |
# Dict of Twitter handles and known political views
data_tuples = {
"jeremycorbyn" : "Labour",
"ken4london" : "Labour",
"Imran_HussainMP" : "Labour",
"GloriaDePiero" : "Labour",
"tom_watson" : "Labour",
"JonAshworth" : "Labour",
"UKLabour" : "Labour",
"RupaHuq" : "Labour",
"heidi_mp" : "Labour",
"Conservatives" : "Tory",
"claire4devizes" : "Tory",
"David_Cameron" : "Tory",
"NickyMorgan01" : "Tory",
"Freeman_George" : "Tory",
"lucyallan" : "Tory",
"edvaizey" : "Tory",
"ChrisWhite_MP" : "Tory",
"BrandonLewis" : "Tory",
"NicolaSturgeon" : "SNP",
"theSNP" : "SNP",
"StewartHosieSNP" : "SNP",
"DougChapmanSNP" : "SNP",
"AngusMacNeilSNP" : "SNP",
"RobertJenrick" : "Tory",
"JulieElliottMP" : "Labour",
"IanMearnsMP" : "Labour",
"SDoughtyMP" : "Labour",
"Keith_Vaz" : "Labour",
"CWhittakerMP" : "Tory",
"Owen_PatersonMP" : "Tory",
"NigelFarage" : "UKIP",
"DouglasCarswell" : "UKIP",
"paulnuttallukip" : "UKIP",
"Steven_Woolfe" : "UKIP",
"RogerHelmerMEP" : "UKIP",
"oflynnmep" : "UKIP",
"rog_ukip" : "UKIP",
"SimonDanczuk" : "Labour",
"WalkerWorcester" : "Tory",
"NickBolesMP" : "Tory",
"tcunninghammp1" : "Labour",
"KateHoeyMP" : "Labour",
"HelenJonesMP" : "Labour",
"SarahChampionMP" : "Labour",
"JustineGreening" : "Tory",
"PeterBoneMP" : "Tory",
"Tim_Aker" : "UKIP",
"JohnBickleyUKIP" : "UKIP",
"SuzanneEvans1" : "UKIP"
}
| CarltonShepherd/political-tweet-classifier | data/usermapping.py | Python | gpl-2.0 | 1,522 | 0.032852 |
# Copyright (C) 2009, 2010, 2011 Rickard Lindberg, Roger Lindberg
#
# This file is part of Timeline.
#
# Timeline is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Timeline is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Timeline. If not, see <http://www.gnu.org/licenses/>.
import wx
from autopilotlib.app.logger import Logger
from autopilotlib.wrappers.wrapper import Wrapper
from autopilotlib.app.constants import TIME_TO_WAIT_FOR_DIALOG_TO_SHOW_IN_MILLISECONDS
wxMessageDialog = wx.MessageDialog
class MessageDialog(wxMessageDialog, Wrapper):
def __init__(self, *args, **kw):
wxMessageDialog.__init__(self, *args, **kw)
def ShowModal(self):
Logger.add_result("MessageDialog opened")
wx.CallLater(TIME_TO_WAIT_FOR_DIALOG_TO_SHOW_IN_MILLISECONDS,
self._explore, MessageDialog.listener)
super(MessageDialog, self).ShowModal()
@classmethod
def wrap(self, listener):
wx.MessageDialog = MessageDialog
MessageDialog.listener = listener
| ezequielpereira/Time-Line | autopilot/autopilotlib/wrappers/messagedialog.py | Python | gpl-3.0 | 1,500 | 0.003333 |
from django.views import generic
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import tabs
from horizon import tables
from billingdashboard.common import get_user_invoices
from billingdashboard.dashboards.project.cust_invoice \
import tables as invoice_table
from astutedashboard.common import get_invoices, get_invoice
class IndexView(tables.DataTableView):
table_class = invoice_table.UserInvoiceListingTable
template_name = 'project/cust_invoice/index.html'
page_title = _("Invoices")
def get_data(self):
return get_user_invoices(self.request, verbose=True)
class UserInvoiceDetailsView(generic.TemplateView):
template_name = 'project/cust_invoice/invoice.html'
def get_context_data(self, **kwargs):
context = super(UserInvoiceDetailsView, self).get_context_data(**kwargs)
id = self.kwargs['invoice_id']
context['invoice'] = get_invoice(self.request, id, verbose=True)
return context
| sreenathmenon/billing-dashboard | billingdashboard/dashboards/project/cust_invoice/views.py | Python | apache-2.0 | 1,151 | 0.004344 |
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_interface_ospf
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages configuration of an OSPF interface instance.
description:
- Manages configuration of an OSPF interface instance.
author: Gabriele Gerbino (@GGabriele)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- Default, where supported, restores params default value.
- To remove an existing authentication configuration you should use
C(message_digest_key_id=default) plus all other options matching their
existing values.
- C(state=absent) removes the whole OSPF interface configuration.
options:
interface:
description:
- Name of this cisco_interface resource. Valid value is a string.
required: true
ospf:
description:
- Name of the ospf instance.
required: true
area:
description:
- Ospf area associated with this cisco_interface_ospf instance.
Valid values are a string, formatted as an IP address
(i.e. "0.0.0.0") or as an integer.
required: true
cost:
description:
- The cost associated with this cisco_interface_ospf instance.
hello_interval:
description:
- Time between sending successive hello packets.
Valid values are an integer or the keyword 'default'.
dead_interval:
description:
- Time interval an ospf neighbor waits for a hello
packet before tearing down adjacencies. Valid values are an
integer or the keyword 'default'.
passive_interface:
description:
- Setting to true will prevent this interface from receiving
HELLO packets.
type: bool
message_digest:
description:
- Enables or disables the usage of message digest authentication.
type: bool
message_digest_key_id:
description:
- Md5 authentication key-id associated with the ospf instance.
If this is present, message_digest_encryption_type,
message_digest_algorithm_type and message_digest_password are
mandatory. Valid value is an integer and 'default'.
message_digest_algorithm_type:
description:
- Algorithm used for authentication among neighboring routers
within an area. Valid values are 'md5' and 'default'.
choices: ['md5', 'default']
message_digest_encryption_type:
description:
- Specifies the scheme used for encrypting message_digest_password.
Valid values are '3des' or 'cisco_type_7' encryption or 'default'.
choices: ['cisco_type_7','3des', 'default']
message_digest_password:
description:
- Specifies the message_digest password. Valid value is a string.
state:
description:
- Determines whether the config should be present or not
on the device.
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- nxos_interface_ospf:
interface: ethernet1/32
ospf: 1
area: 1
cost: default
'''
RETURN = '''
commands:
description: commands sent to the device
returned: always
type: list
sample: ["interface Ethernet1/32", "ip router ospf 1 area 0.0.0.1"]
'''
import re
import struct
import socket
from ansible.module_utils.network.nxos.nxos import get_config, load_config
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.config import CustomNetworkConfig
BOOL_PARAMS = [
'passive_interface',
'message_digest'
]
PARAM_TO_COMMAND_KEYMAP = {
'interface': '',
'cost': 'ip ospf cost',
'ospf': 'ip router ospf',
'area': 'ip router ospf',
'hello_interval': 'ip ospf hello-interval',
'dead_interval': 'ip ospf dead-interval',
'passive_interface': 'ip ospf passive-interface',
'message_digest': 'ip ospf authentication message-digest',
'message_digest_key_id': 'ip ospf message-digest-key',
'message_digest_algorithm_type': 'ip ospf message-digest-key',
'message_digest_encryption_type': 'ip ospf message-digest-key',
'message_digest_password': 'ip ospf message-digest-key',
}
def get_value(arg, config, module):
command = PARAM_TO_COMMAND_KEYMAP[arg]
has_command = re.search(r'\s+{0}\s*$'.format(command), config, re.M)
has_command_val = re.search(r'(?:{0}\s)(?P<value>.*)$'.format(command), config, re.M)
if command == 'ip router ospf':
value = ''
if has_command_val:
value_list = has_command_val.group('value').split()
if arg == 'ospf':
value = value_list[0]
elif arg == 'area':
value = value_list[2]
value = normalize_area(value, module)
elif command == 'ip ospf message-digest-key':
value = ''
if has_command_val:
value_list = has_command_val.group('value').split()
if arg == 'message_digest_key_id':
value = value_list[0]
elif arg == 'message_digest_algorithm_type':
value = value_list[1]
elif arg == 'message_digest_encryption_type':
value = value_list[2]
if value == '3':
value = '3des'
elif value == '7':
value = 'cisco_type_7'
elif arg == 'message_digest_password':
value = value_list[3]
elif arg == 'passive_interface':
has_no_command = re.search(r'\s+no\s+{0}\s*$'.format(command), config, re.M)
value = False
if has_command and not has_no_command:
value = True
elif arg in BOOL_PARAMS:
value = bool(has_command)
else:
value = ''
if has_command_val:
value = has_command_val.group('value')
return value
def get_existing(module, args):
existing = {}
netcfg = CustomNetworkConfig(indent=2, contents=get_config(module))
if module.params['interface'].startswith('loopback') or module.params['interface'].startswith('port-channel'):
parents = ['interface {0}'.format(module.params['interface'])]
else:
parents = ['interface {0}'.format(module.params['interface'].capitalize())]
config = netcfg.get_section(parents)
if 'ospf' in config:
for arg in args:
if arg not in ['interface']:
existing[arg] = get_value(arg, config, module)
existing['interface'] = module.params['interface']
return existing
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
new_dict[new_key] = value
return new_dict
def get_default_commands(existing, proposed, existing_commands, key, module):
commands = list()
existing_value = existing_commands.get(key)
if key.startswith('ip ospf message-digest-key'):
check = False
for param in ['message_digest_encryption_type',
'message_digest_algorithm_type',
'message_digest_password']:
if existing[param] == proposed[param]:
check = True
if check:
if existing['message_digest_encryption_type'] == '3des':
encryption_type = '3'
elif existing['message_digest_encryption_type'] == 'cisco_type_7':
encryption_type = '7'
command = 'no {0} {1} {2} {3} {4}'.format(
key,
existing['message_digest_key_id'],
existing['message_digest_algorithm_type'],
encryption_type,
existing['message_digest_password'])
commands.append(command)
else:
commands.append('no {0} {1}'.format(key, existing_value))
return commands
def get_custom_command(existing_cmd, proposed, key, module):
commands = list()
if key == 'ip router ospf':
command = '{0} {1} area {2}'.format(key, proposed['ospf'],
proposed['area'])
if command not in existing_cmd:
commands.append(command)
elif key.startswith('ip ospf message-digest-key'):
if (proposed['message_digest_key_id'] != 'default' and
'options' not in key):
if proposed['message_digest_encryption_type'] == '3des':
encryption_type = '3'
elif proposed['message_digest_encryption_type'] == 'cisco_type_7':
encryption_type = '7'
command = '{0} {1} {2} {3} {4}'.format(
key,
proposed['message_digest_key_id'],
proposed['message_digest_algorithm_type'],
encryption_type,
proposed['message_digest_password'])
commands.append(command)
return commands
def state_present(module, existing, proposed, candidate):
commands = list()
proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed)
existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing)
for key, value in proposed_commands.items():
if existing_commands.get(key):
if key == 'ip router ospf':
if proposed['area'] == existing['area']:
continue
if existing_commands[key] == proposed_commands[key]:
continue
if key == 'ip ospf passive-interface' and module.params.get('interface').upper().startswith('LO'):
module.fail_json(msg='loopback interface does not support passive_interface')
if value is True:
commands.append(key)
elif value is False:
commands.append('no {0}'.format(key))
elif value == 'default':
if existing_commands.get(key):
commands.extend(get_default_commands(existing, proposed,
existing_commands, key,
module))
else:
if (key == 'ip router ospf' or
key.startswith('ip ospf message-digest-key')):
commands.extend(get_custom_command(commands, proposed,
key, module))
else:
command = '{0} {1}'.format(key, value.lower())
commands.append(command)
if commands:
parents = ['interface {0}'.format(module.params['interface'].capitalize())]
candidate.add(commands, parents=parents)
def state_absent(module, existing, proposed, candidate):
commands = []
parents = ['interface {0}'.format(module.params['interface'].capitalize())]
existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing)
for key, value in existing_commands.items():
if value:
if key.startswith('ip ospf message-digest-key'):
if 'options' not in key:
if existing['message_digest_encryption_type'] == '3des':
encryption_type = '3'
elif existing['message_digest_encryption_type'] == 'cisco_type_7':
encryption_type = '7'
command = 'no {0} {1} {2} {3} {4}'.format(
key,
existing['message_digest_key_id'],
existing['message_digest_algorithm_type'],
encryption_type,
existing['message_digest_password'])
commands.append(command)
elif key in ['ip ospf authentication message-digest',
'ip ospf passive-interface']:
if value:
commands.append('no {0}'.format(key))
elif key == 'ip router ospf':
command = 'no {0} {1} area {2}'.format(key, proposed['ospf'], proposed['area'])
if command not in commands:
commands.append(command)
else:
existing_value = existing_commands.get(key)
commands.append('no {0} {1}'.format(key, existing_value))
candidate.add(commands, parents=parents)
def normalize_area(area, module):
try:
area = int(area)
area = socket.inet_ntoa(struct.pack('!L', area))
except ValueError:
splitted_area = area.split('.')
if len(splitted_area) != 4:
module.fail_json(msg='Incorrect Area ID format', area=area)
return area
def main():
argument_spec = dict(
interface=dict(required=True, type='str'),
ospf=dict(required=True, type='str'),
area=dict(required=True, type='str'),
cost=dict(required=False, type='str'),
hello_interval=dict(required=False, type='str'),
dead_interval=dict(required=False, type='str'),
passive_interface=dict(required=False, type='bool'),
message_digest=dict(required=False, type='bool'),
message_digest_key_id=dict(required=False, type='str'),
message_digest_algorithm_type=dict(required=False, type='str', choices=['md5', 'default']),
message_digest_encryption_type=dict(required=False, type='str', choices=['cisco_type_7', '3des', 'default']),
message_digest_password=dict(required=False, type='str', no_log=True),
state=dict(choices=['present', 'absent'], default='present', required=False)
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
required_together=[['message_digest_key_id',
'message_digest_algorithm_type',
'message_digest_encryption_type',
'message_digest_password']],
supports_check_mode=True)
# Normalize interface input data.
#
# * For port-channel and loopback interfaces expection is all lower case names.
# * All other interfaces the expectation is an uppercase leading character
# followed by lower case characters.
#
if re.match(r'(port-channel|loopback)', module.params['interface'], re.I):
module.params['interface'] = module.params['interface'].lower()
else:
module.params['interface'] = module.params['interface'].capitalize()
warnings = list()
check_args(module, warnings)
result = {'changed': False, 'commands': [], 'warnings': warnings}
for param in ['message_digest_encryption_type',
'message_digest_algorithm_type',
'message_digest_password']:
if module.params[param] == 'default' and module.params['message_digest_key_id'] != 'default':
module.exit_json(msg='Use message_digest_key_id=default to remove an existing authentication configuration')
state = module.params['state']
args = PARAM_TO_COMMAND_KEYMAP.keys()
existing = get_existing(module, args)
proposed_args = dict((k, v) for k, v in module.params.items()
if v is not None and k in args)
proposed = {}
for key, value in proposed_args.items():
if key != 'interface':
if str(value).lower() == 'true':
value = True
elif str(value).lower() == 'false':
value = False
elif str(value).lower() == 'default':
value = 'default'
if existing.get(key) or (not existing.get(key) and value):
proposed[key] = value
proposed['area'] = normalize_area(proposed['area'], module)
if 'hello_interval' in proposed and proposed['hello_interval'] == '10':
proposed['hello_interval'] = 'default'
candidate = CustomNetworkConfig(indent=3)
if state == 'present':
state_present(module, existing, proposed, candidate)
elif state == 'absent' and existing.get('ospf') == proposed['ospf'] and existing.get('area') == proposed['area']:
state_absent(module, existing, proposed, candidate)
if candidate:
candidate = candidate.items_text()
load_config(module, candidate)
result['changed'] = True
result['commands'] = candidate
module.exit_json(**result)
if __name__ == '__main__':
main()
| hryamzik/ansible | lib/ansible/modules/network/nxos/nxos_interface_ospf.py | Python | gpl-3.0 | 17,076 | 0.001464 |
#! /usr/bin/python2.6
# NOTE: the above "/usr/local/bin/python" is NOT a mistake. It is
# intentionally NOT "/usr/bin/env python". On many systems
# (e.g. Solaris), /usr/local/bin is not in $PATH as passed to CGI
# scripts, and /usr/local/bin is the default directory where Python is
# installed, so /usr/bin/env would be unable to find python. Granted,
# binary installations by Linux vendors often install Python in
# /usr/bin. So let those vendors patch cgi.py to match their choice
# of installation.
"""Support module for CGI (Common Gateway Interface) scripts.
This module defines a number of utilities for use by CGI scripts
written in Python.
"""
# XXX Perhaps there should be a slimmed version that doesn't contain
# all those backwards compatible and debugging classes and functions?
# History
# -------
#
# Michael McLay started this module. Steve Majewski changed the
# interface to SvFormContentDict and FormContentDict. The multipart
# parsing was inspired by code submitted by Andreas Paepcke. Guido van
# Rossum rewrote, reformatted and documented the module and is currently
# responsible for its maintenance.
#
__version__ = "2.6"
# Imports
# =======
from operator import attrgetter
import sys
import os
import urllib
import UserDict
import urlparse
from warnings import filterwarnings, catch_warnings, warn
with catch_warnings():
if sys.py3kwarning:
filterwarnings("ignore", ".*mimetools has been removed",
DeprecationWarning)
import mimetools
if sys.py3kwarning:
filterwarnings("ignore", ".*rfc822 has been removed", DeprecationWarning)
import rfc822
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
__all__ = ["MiniFieldStorage", "FieldStorage", "FormContentDict",
"SvFormContentDict", "InterpFormContentDict", "FormContent",
"parse", "parse_qs", "parse_qsl", "parse_multipart",
"parse_header", "print_exception", "print_environ",
"print_form", "print_directory", "print_arguments",
"print_environ_usage", "escape"]
# Logging support
# ===============
logfile = "" # Filename to log to, if not empty
logfp = None # File object to log to, if not None
def initlog(*allargs):
"""Write a log message, if there is a log file.
Even though this function is called initlog(), you should always
use log(); log is a variable that is set either to initlog
(initially), to dolog (once the log file has been opened), or to
nolog (when logging is disabled).
The first argument is a format string; the remaining arguments (if
any) are arguments to the % operator, so e.g.
log("%s: %s", "a", "b")
will write "a: b" to the log file, followed by a newline.
If the global logfp is not None, it should be a file object to
which log data is written.
If the global logfp is None, the global logfile may be a string
giving a filename to open, in append mode. This file should be
world writable!!! If the file can't be opened, logging is
silently disabled (since there is no safe place where we could
send an error message).
"""
global logfp, log
if logfile and not logfp:
try:
logfp = open(logfile, "a")
except IOError:
pass
if not logfp:
log = nolog
else:
log = dolog
log(*allargs)
def dolog(fmt, *args):
"""Write a log message to the log file. See initlog() for docs."""
logfp.write(fmt%args + "\n")
def nolog(*allargs):
"""Dummy function, assigned to log when logging is disabled."""
pass
log = initlog # The current logging function
# Parsing functions
# =================
# Maximum input we will accept when REQUEST_METHOD is POST
# 0 ==> unlimited input
maxlen = 0
def parse(fp=None, environ=os.environ, keep_blank_values=0, strict_parsing=0):
"""Parse a query in the environment or from a file (default stdin)
Arguments, all optional:
fp : file pointer; default: sys.stdin
environ : environment dictionary; default: os.environ
keep_blank_values: flag indicating whether blank values in
URL encoded forms should be treated as blank strings.
A true value indicates that blanks should be retained as
blank strings. The default false value indicates that
blank values are to be ignored and treated as if they were
not included.
strict_parsing: flag indicating what to do with parsing errors.
If false (the default), errors are silently ignored.
If true, errors raise a ValueError exception.
"""
if fp is None:
fp = sys.stdin
if not 'REQUEST_METHOD' in environ:
environ['REQUEST_METHOD'] = 'GET' # For testing stand-alone
if environ['REQUEST_METHOD'] == 'POST':
ctype, pdict = parse_header(environ['CONTENT_TYPE'])
if ctype == 'multipart/form-data':
return parse_multipart(fp, pdict)
elif ctype == 'application/x-www-form-urlencoded':
clength = int(environ['CONTENT_LENGTH'])
if maxlen and clength > maxlen:
raise ValueError, 'Maximum content length exceeded'
qs = fp.read(clength)
else:
qs = '' # Unknown content-type
if 'QUERY_STRING' in environ:
if qs: qs = qs + '&'
qs = qs + environ['QUERY_STRING']
elif sys.argv[1:]:
if qs: qs = qs + '&'
qs = qs + sys.argv[1]
environ['QUERY_STRING'] = qs # XXX Shouldn't, really
elif 'QUERY_STRING' in environ:
qs = environ['QUERY_STRING']
else:
if sys.argv[1:]:
qs = sys.argv[1]
else:
qs = ""
environ['QUERY_STRING'] = qs # XXX Shouldn't, really
return parse_qs(qs, keep_blank_values, strict_parsing)
# parse query string function called from urlparse,
# this is done in order to maintain backward compatiblity.
def parse_qs(qs, keep_blank_values=0, strict_parsing=0):
"""Parse a query given as a string argument."""
warn("cgi.parse_qs is deprecated, use urlparse.parse_qs \
instead", PendingDeprecationWarning, 2)
return urlparse.parse_qs(qs, keep_blank_values, strict_parsing)
def parse_qsl(qs, keep_blank_values=0, strict_parsing=0):
"""Parse a query given as a string argument."""
warn("cgi.parse_qsl is deprecated, use urlparse.parse_qsl instead",
PendingDeprecationWarning, 2)
return urlparse.parse_qsl(qs, keep_blank_values, strict_parsing)
def parse_multipart(fp, pdict):
"""Parse multipart input.
Arguments:
fp : input file
pdict: dictionary containing other parameters of content-type header
Returns a dictionary just like parse_qs(): keys are the field names, each
value is a list of values for that field. This is easy to use but not
much good if you are expecting megabytes to be uploaded -- in that case,
use the FieldStorage class instead which is much more flexible. Note
that content-type is the raw, unparsed contents of the content-type
header.
XXX This does not parse nested multipart parts -- use FieldStorage for
that.
XXX This should really be subsumed by FieldStorage altogether -- no
point in having two implementations of the same parsing algorithm.
Also, FieldStorage protects itself better against certain DoS attacks
by limiting the size of the data read in one chunk. The API here
does not support that kind of protection. This also affects parse()
since it can call parse_multipart().
"""
boundary = ""
if 'boundary' in pdict:
boundary = pdict['boundary']
if not valid_boundary(boundary):
raise ValueError, ('Invalid boundary in multipart form: %r'
% (boundary,))
nextpart = "--" + boundary
lastpart = "--" + boundary + "--"
partdict = {}
terminator = ""
while terminator != lastpart:
bytes = -1
data = None
if terminator:
# At start of next part. Read headers first.
headers = mimetools.Message(fp)
clength = headers.getheader('content-length')
if clength:
try:
bytes = int(clength)
except ValueError:
pass
if bytes > 0:
if maxlen and bytes > maxlen:
raise ValueError, 'Maximum content length exceeded'
data = fp.read(bytes)
else:
data = ""
# Read lines until end of part.
lines = []
while 1:
line = fp.readline()
if not line:
terminator = lastpart # End outer loop
break
if line[:2] == "--":
terminator = line.strip()
if terminator in (nextpart, lastpart):
break
lines.append(line)
# Done with part.
if data is None:
continue
if bytes < 0:
if lines:
# Strip final line terminator
line = lines[-1]
if line[-2:] == "\r\n":
line = line[:-2]
elif line[-1:] == "\n":
line = line[:-1]
lines[-1] = line
data = "".join(lines)
line = headers['content-disposition']
if not line:
continue
key, params = parse_header(line)
if key != 'form-data':
continue
if 'name' in params:
name = params['name']
else:
continue
if name in partdict:
partdict[name].append(data)
else:
partdict[name] = [data]
return partdict
def _parseparam(s):
while s[:1] == ';':
s = s[1:]
end = s.find(';')
while end > 0 and s.count('"', 0, end) % 2:
end = s.find(';', end + 1)
if end < 0:
end = len(s)
f = s[:end]
yield f.strip()
s = s[end:]
def parse_header(line):
"""Parse a Content-type like header.
Return the main content-type and a dictionary of options.
"""
parts = _parseparam(';' + line)
key = parts.next()
pdict = {}
for p in parts:
i = p.find('=')
if i >= 0:
name = p[:i].strip().lower()
value = p[i+1:].strip()
if len(value) >= 2 and value[0] == value[-1] == '"':
value = value[1:-1]
value = value.replace('\\\\', '\\').replace('\\"', '"')
pdict[name] = value
return key, pdict
# Classes for field storage
# =========================
class MiniFieldStorage:
"""Like FieldStorage, for use when no file uploads are possible."""
# Dummy attributes
filename = None
list = None
type = None
file = None
type_options = {}
disposition = None
disposition_options = {}
headers = {}
def __init__(self, name, value):
"""Constructor from field name and value."""
self.name = name
self.value = value
# self.file = StringIO(value)
def __repr__(self):
"""Return printable representation."""
return "MiniFieldStorage(%r, %r)" % (self.name, self.value)
class FieldStorage:
"""Store a sequence of fields, reading multipart/form-data.
This class provides naming, typing, files stored on disk, and
more. At the top level, it is accessible like a dictionary, whose
keys are the field names. (Note: None can occur as a field name.)
The items are either a Python list (if there's multiple values) or
another FieldStorage or MiniFieldStorage object. If it's a single
object, it has the following attributes:
name: the field name, if specified; otherwise None
filename: the filename, if specified; otherwise None; this is the
client side filename, *not* the file name on which it is
stored (that's a temporary file you don't deal with)
value: the value as a *string*; for file uploads, this
transparently reads the file every time you request the value
file: the file(-like) object from which you can read the data;
None if the data is stored a simple string
type: the content-type, or None if not specified
type_options: dictionary of options specified on the content-type
line
disposition: content-disposition, or None if not specified
disposition_options: dictionary of corresponding options
headers: a dictionary(-like) object (sometimes rfc822.Message or a
subclass thereof) containing *all* headers
The class is subclassable, mostly for the purpose of overriding
the make_file() method, which is called internally to come up with
a file open for reading and writing. This makes it possible to
override the default choice of storing all files in a temporary
directory and unlinking them as soon as they have been opened.
"""
def __init__(self, fp=None, headers=None, outerboundary="",
environ=os.environ, keep_blank_values=0, strict_parsing=0):
"""Constructor. Read multipart/* until last part.
Arguments, all optional:
fp : file pointer; default: sys.stdin
(not used when the request method is GET)
headers : header dictionary-like object; default:
taken from environ as per CGI spec
outerboundary : terminating multipart boundary
(for internal use only)
environ : environment dictionary; default: os.environ
keep_blank_values: flag indicating whether blank values in
URL encoded forms should be treated as blank strings.
A true value indicates that blanks should be retained as
blank strings. The default false value indicates that
blank values are to be ignored and treated as if they were
not included.
strict_parsing: flag indicating what to do with parsing errors.
If false (the default), errors are silently ignored.
If true, errors raise a ValueError exception.
"""
method = 'GET'
self.keep_blank_values = keep_blank_values
self.strict_parsing = strict_parsing
if 'REQUEST_METHOD' in environ:
method = environ['REQUEST_METHOD'].upper()
self.qs_on_post = None
if method == 'GET' or method == 'HEAD':
if 'QUERY_STRING' in environ:
qs = environ['QUERY_STRING']
elif sys.argv[1:]:
qs = sys.argv[1]
else:
qs = ""
fp = StringIO(qs)
if headers is None:
headers = {'content-type':
"application/x-www-form-urlencoded"}
if headers is None:
headers = {}
if method == 'POST':
# Set default content-type for POST to what's traditional
headers['content-type'] = "application/x-www-form-urlencoded"
if 'CONTENT_TYPE' in environ:
headers['content-type'] = environ['CONTENT_TYPE']
if 'QUERY_STRING' in environ:
self.qs_on_post = environ['QUERY_STRING']
if 'CONTENT_LENGTH' in environ:
headers['content-length'] = environ['CONTENT_LENGTH']
self.fp = fp or sys.stdin
self.headers = headers
self.outerboundary = outerboundary
# Process content-disposition header
cdisp, pdict = "", {}
if 'content-disposition' in self.headers:
cdisp, pdict = parse_header(self.headers['content-disposition'])
self.disposition = cdisp
self.disposition_options = pdict
self.name = None
if 'name' in pdict:
self.name = pdict['name']
self.filename = None
if 'filename' in pdict:
self.filename = pdict['filename']
# Process content-type header
#
# Honor any existing content-type header. But if there is no
# content-type header, use some sensible defaults. Assume
# outerboundary is "" at the outer level, but something non-false
# inside a multi-part. The default for an inner part is text/plain,
# but for an outer part it should be urlencoded. This should catch
# bogus clients which erroneously forget to include a content-type
# header.
#
# See below for what we do if there does exist a content-type header,
# but it happens to be something we don't understand.
if 'content-type' in self.headers:
ctype, pdict = parse_header(self.headers['content-type'])
elif self.outerboundary or method != 'POST':
ctype, pdict = "text/plain", {}
else:
ctype, pdict = 'application/x-www-form-urlencoded', {}
self.type = ctype
self.type_options = pdict
self.innerboundary = ""
if 'boundary' in pdict:
self.innerboundary = pdict['boundary']
clen = -1
if 'content-length' in self.headers:
try:
clen = int(self.headers['content-length'])
except ValueError:
pass
if maxlen and clen > maxlen:
raise ValueError, 'Maximum content length exceeded'
self.length = clen
self.list = self.file = None
self.done = 0
if ctype == 'application/x-www-form-urlencoded':
self.read_urlencoded()
elif ctype[:10] == 'multipart/':
self.read_multi(environ, keep_blank_values, strict_parsing)
else:
self.read_single()
def __repr__(self):
"""Return a printable representation."""
return "FieldStorage(%r, %r, %r)" % (
self.name, self.filename, self.value)
def __iter__(self):
return iter(self.keys())
def __getattr__(self, name):
if name != 'value':
raise AttributeError, name
if self.file:
self.file.seek(0)
value = self.file.read()
self.file.seek(0)
elif self.list is not None:
value = self.list
else:
value = None
return value
def __getitem__(self, key):
"""Dictionary style indexing."""
if self.list is None:
raise TypeError, "not indexable"
found = []
for item in self.list:
if item.name == key: found.append(item)
if not found:
raise KeyError, key
if len(found) == 1:
return found[0]
else:
return found
def getvalue(self, key, default=None):
"""Dictionary style get() method, including 'value' lookup."""
if key in self:
value = self[key]
if type(value) is type([]):
return map(attrgetter('value'), value)
else:
return value.value
else:
return default
def getfirst(self, key, default=None):
""" Return the first value received."""
if key in self:
value = self[key]
if type(value) is type([]):
return value[0].value
else:
return value.value
else:
return default
def getlist(self, key):
""" Return list of received values."""
if key in self:
value = self[key]
if type(value) is type([]):
return map(attrgetter('value'), value)
else:
return [value.value]
else:
return []
def keys(self):
"""Dictionary style keys() method."""
if self.list is None:
raise TypeError, "not indexable"
return list(set(item.name for item in self.list))
def has_key(self, key):
"""Dictionary style has_key() method."""
if self.list is None:
raise TypeError, "not indexable"
return any(item.name == key for item in self.list)
def __contains__(self, key):
"""Dictionary style __contains__ method."""
if self.list is None:
raise TypeError, "not indexable"
return any(item.name == key for item in self.list)
def __len__(self):
"""Dictionary style len(x) support."""
return len(self.keys())
def __nonzero__(self):
return bool(self.list)
def read_urlencoded(self):
"""Internal: read data in query string format."""
qs = self.fp.read(self.length)
if self.qs_on_post:
qs += '&' + self.qs_on_post
self.list = list = []
for key, value in urlparse.parse_qsl(qs, self.keep_blank_values,
self.strict_parsing):
list.append(MiniFieldStorage(key, value))
self.skip_lines()
FieldStorageClass = None
def read_multi(self, environ, keep_blank_values, strict_parsing):
"""Internal: read a part that is itself multipart."""
ib = self.innerboundary
if not valid_boundary(ib):
raise ValueError, 'Invalid boundary in multipart form: %r' % (ib,)
self.list = []
if self.qs_on_post:
for key, value in urlparse.parse_qsl(self.qs_on_post,
self.keep_blank_values, self.strict_parsing):
self.list.append(MiniFieldStorage(key, value))
FieldStorageClass = None
klass = self.FieldStorageClass or self.__class__
part = klass(self.fp, {}, ib,
environ, keep_blank_values, strict_parsing)
# Throw first part away
while not part.done:
headers = rfc822.Message(self.fp)
part = klass(self.fp, headers, ib,
environ, keep_blank_values, strict_parsing)
self.list.append(part)
self.skip_lines()
def read_single(self):
"""Internal: read an atomic part."""
if self.length >= 0:
self.read_binary()
self.skip_lines()
else:
self.read_lines()
self.file.seek(0)
bufsize = 8*1024 # I/O buffering size for copy to file
def read_binary(self):
"""Internal: read binary data."""
self.file = self.make_file('b')
todo = self.length
if todo >= 0:
while todo > 0:
data = self.fp.read(min(todo, self.bufsize))
if not data:
self.done = -1
break
self.file.write(data)
todo = todo - len(data)
def read_lines(self):
"""Internal: read lines until EOF or outerboundary."""
self.file = self.__file = StringIO()
if self.outerboundary:
self.read_lines_to_outerboundary()
else:
self.read_lines_to_eof()
def __write(self, line):
if self.__file is not None:
if self.__file.tell() + len(line) > 1000:
self.file = self.make_file('')
self.file.write(self.__file.getvalue())
self.__file = None
self.file.write(line)
def read_lines_to_eof(self):
"""Internal: read lines until EOF."""
while 1:
line = self.fp.readline(1<<16)
if not line:
self.done = -1
break
self.__write(line)
def read_lines_to_outerboundary(self):
"""Internal: read lines until outerboundary."""
next = "--" + self.outerboundary
last = next + "--"
delim = ""
last_line_lfend = True
while 1:
line = self.fp.readline(1<<16)
if not line:
self.done = -1
break
if line[:2] == "--" and last_line_lfend:
strippedline = line.strip()
if strippedline == next:
break
if strippedline == last:
self.done = 1
break
odelim = delim
if line[-2:] == "\r\n":
delim = "\r\n"
line = line[:-2]
last_line_lfend = True
elif line[-1] == "\n":
delim = "\n"
line = line[:-1]
last_line_lfend = True
else:
delim = ""
last_line_lfend = False
self.__write(odelim + line)
def skip_lines(self):
"""Internal: skip lines until outer boundary if defined."""
if not self.outerboundary or self.done:
return
next = "--" + self.outerboundary
last = next + "--"
last_line_lfend = True
while 1:
line = self.fp.readline(1<<16)
if not line:
self.done = -1
break
if line[:2] == "--" and last_line_lfend:
strippedline = line.strip()
if strippedline == next:
break
if strippedline == last:
self.done = 1
break
last_line_lfend = line.endswith('\n')
def make_file(self, binary=None):
"""Overridable: return a readable & writable file.
The file will be used as follows:
- data is written to it
- seek(0)
- data is read from it
The 'binary' argument is unused -- the file is always opened
in binary mode.
This version opens a temporary file for reading and writing,
and immediately deletes (unlinks) it. The trick (on Unix!) is
that the file can still be used, but it can't be opened by
another process, and it will automatically be deleted when it
is closed or when the current process terminates.
If you want a more permanent file, you derive a class which
overrides this method. If you want a visible temporary file
that is nevertheless automatically deleted when the script
terminates, try defining a __del__ method in a derived class
which unlinks the temporary files you have created.
"""
import tempfile
return tempfile.TemporaryFile("w+b")
# Backwards Compatibility Classes
# ===============================
class FormContentDict(UserDict.UserDict):
"""Form content as dictionary with a list of values per field.
form = FormContentDict()
form[key] -> [value, value, ...]
key in form -> Boolean
form.keys() -> [key, key, ...]
form.values() -> [[val, val, ...], [val, val, ...], ...]
form.items() -> [(key, [val, val, ...]), (key, [val, val, ...]), ...]
form.dict == {key: [val, val, ...], ...}
"""
def __init__(self, environ=os.environ, keep_blank_values=0, strict_parsing=0):
self.dict = self.data = parse(environ=environ,
keep_blank_values=keep_blank_values,
strict_parsing=strict_parsing)
self.query_string = environ['QUERY_STRING']
class SvFormContentDict(FormContentDict):
"""Form content as dictionary expecting a single value per field.
If you only expect a single value for each field, then form[key]
will return that single value. It will raise an IndexError if
that expectation is not true. If you expect a field to have
possible multiple values, than you can use form.getlist(key) to
get all of the values. values() and items() are a compromise:
they return single strings where there is a single value, and
lists of strings otherwise.
"""
def __getitem__(self, key):
if len(self.dict[key]) > 1:
raise IndexError, 'expecting a single value'
return self.dict[key][0]
def getlist(self, key):
return self.dict[key]
def values(self):
result = []
for value in self.dict.values():
if len(value) == 1:
result.append(value[0])
else: result.append(value)
return result
def items(self):
result = []
for key, value in self.dict.items():
if len(value) == 1:
result.append((key, value[0]))
else: result.append((key, value))
return result
class InterpFormContentDict(SvFormContentDict):
"""This class is present for backwards compatibility only."""
def __getitem__(self, key):
v = SvFormContentDict.__getitem__(self, key)
if v[0] in '0123456789+-.':
try: return int(v)
except ValueError:
try: return float(v)
except ValueError: pass
return v.strip()
def values(self):
result = []
for key in self.keys():
try:
result.append(self[key])
except IndexError:
result.append(self.dict[key])
return result
def items(self):
result = []
for key in self.keys():
try:
result.append((key, self[key]))
except IndexError:
result.append((key, self.dict[key]))
return result
class FormContent(FormContentDict):
"""This class is present for backwards compatibility only."""
def values(self, key):
if key in self.dict :return self.dict[key]
else: return None
def indexed_value(self, key, location):
if key in self.dict:
if len(self.dict[key]) > location:
return self.dict[key][location]
else: return None
else: return None
def value(self, key):
if key in self.dict: return self.dict[key][0]
else: return None
def length(self, key):
return len(self.dict[key])
def stripped(self, key):
if key in self.dict: return self.dict[key][0].strip()
else: return None
def pars(self):
return self.dict
# Test/debug code
# ===============
def test(environ=os.environ):
"""Robust test CGI script, usable as main program.
Write minimal HTTP headers and dump all information provided to
the script in HTML form.
"""
print "Content-type: text/html"
print
sys.stderr = sys.stdout
try:
form = FieldStorage() # Replace with other classes to test those
print_directory()
print_arguments()
print_form(form)
print_environ(environ)
print_environ_usage()
def f():
exec "testing print_exception() -- <I>italics?</I>"
def g(f=f):
f()
print "<H3>What follows is a test, not an actual exception:</H3>"
g()
except:
print_exception()
print "<H1>Second try with a small maxlen...</H1>"
global maxlen
maxlen = 50
try:
form = FieldStorage() # Replace with other classes to test those
print_directory()
print_arguments()
print_form(form)
print_environ(environ)
except:
print_exception()
def print_exception(type=None, value=None, tb=None, limit=None):
if type is None:
type, value, tb = sys.exc_info()
import traceback
print
print "<H3>Traceback (most recent call last):</H3>"
list = traceback.format_tb(tb, limit) + \
traceback.format_exception_only(type, value)
print "<PRE>%s<B>%s</B></PRE>" % (
escape("".join(list[:-1])),
escape(list[-1]),
)
del tb
def print_environ(environ=os.environ):
"""Dump the shell environment as HTML."""
keys = environ.keys()
keys.sort()
print
print "<H3>Shell Environment:</H3>"
print "<DL>"
for key in keys:
print "<DT>", escape(key), "<DD>", escape(environ[key])
print "</DL>"
print
def print_form(form):
"""Dump the contents of a form as HTML."""
keys = form.keys()
keys.sort()
print
print "<H3>Form Contents:</H3>"
if not keys:
print "<P>No form fields."
print "<DL>"
for key in keys:
print "<DT>" + escape(key) + ":",
value = form[key]
print "<i>" + escape(repr(type(value))) + "</i>"
print "<DD>" + escape(repr(value))
print "</DL>"
print
def print_directory():
"""Dump the current directory as HTML."""
print
print "<H3>Current Working Directory:</H3>"
try:
pwd = os.getcwd()
except os.error, msg:
print "os.error:", escape(str(msg))
else:
print escape(pwd)
print
def print_arguments():
print
print "<H3>Command Line Arguments:</H3>"
print
print sys.argv
print
def print_environ_usage():
"""Dump a list of environment variables used by CGI as HTML."""
print """
<H3>These environment variables could have been set:</H3>
<UL>
<LI>AUTH_TYPE
<LI>CONTENT_LENGTH
<LI>CONTENT_TYPE
<LI>DATE_GMT
<LI>DATE_LOCAL
<LI>DOCUMENT_NAME
<LI>DOCUMENT_ROOT
<LI>DOCUMENT_URI
<LI>GATEWAY_INTERFACE
<LI>LAST_MODIFIED
<LI>PATH
<LI>PATH_INFO
<LI>PATH_TRANSLATED
<LI>QUERY_STRING
<LI>REMOTE_ADDR
<LI>REMOTE_HOST
<LI>REMOTE_IDENT
<LI>REMOTE_USER
<LI>REQUEST_METHOD
<LI>SCRIPT_NAME
<LI>SERVER_NAME
<LI>SERVER_PORT
<LI>SERVER_PROTOCOL
<LI>SERVER_ROOT
<LI>SERVER_SOFTWARE
</UL>
In addition, HTTP headers sent by the server may be passed in the
environment as well. Here are some common variable names:
<UL>
<LI>HTTP_ACCEPT
<LI>HTTP_CONNECTION
<LI>HTTP_HOST
<LI>HTTP_PRAGMA
<LI>HTTP_REFERER
<LI>HTTP_USER_AGENT
</UL>
"""
# Utilities
# =========
def escape(s, quote=None):
'''Replace special characters "&", "<" and ">" to HTML-safe sequences.
If the optional flag quote is true, the quotation mark character (")
is also translated.'''
s = s.replace("&", "&") # Must be done first!
s = s.replace("<", "<")
s = s.replace(">", ">")
if quote:
s = s.replace('"', """)
return s
def valid_boundary(s, _vb_pattern="^[ -~]{0,200}[!-~]$"):
import re
return re.match(_vb_pattern, s)
# Invoke mainline
# ===============
# Call test() when this file is run as a script (not imported as a module)
if __name__ == '__main__':
test()
| astagi/twitterene | twitterene/tweepy/cgi.py | Python | gpl-3.0 | 34,465 | 0.002234 |
# coding=utf-8
"""
a csv trace reader
Author: Jason Yang <[email protected]> 2016/06
"""
import string
from PyMimircache.const import ALLOW_C_MIMIRCACHE, INSTALL_PHASE
from PyMimircache.utils.printing import *
if ALLOW_C_MIMIRCACHE and not INSTALL_PHASE:
import PyMimircache.CMimircache.CacheReader as c_cacheReader
from PyMimircache.cacheReader.abstractReader import AbstractReader
class CsvReader(AbstractReader):
"""
CsvReader class
"""
all = ["read_one_req", "read_complete_req", "lines_dict",
"lines", "read_time_req", "reset", "copy", "get_params"]
def __init__(self, file_loc,
data_type='c',
init_params=None,
block_unit_size=0,
disk_sector_size=0,
open_c_reader=True,
**kwargs):
"""
:param file_loc: location of the file
:param data_type: type of data, can be "l" for int/long, "c" for string
:param init_params: the init_params for opening csv
:param block_unit_size: block size for storage system, 0 when disabled
:param disk_sector_size: size of disk sector
:param open_c_reader: bool for whether open reader in C backend
:param kwargs: not used now
"""
super(CsvReader, self).__init__(file_loc, data_type, block_unit_size, disk_sector_size,
open_c_reader, kwargs.get("lock", None))
assert init_params is not None, "please provide init_param for csvReader"
assert "label" in init_params, "please provide label for csv reader"
self.trace_file = open(file_loc, 'rb')
# self.trace_file = open(file_loc, 'r', encoding='utf-8', errors='ignore')
self.init_params = init_params
self.label_column = init_params['label']
self.time_column = init_params.get("real_time", )
self.size_column = init_params.get("size", )
if self.time_column != -1:
self.support_real_time = True
if self.size_column != -1:
self.support_size = True
if block_unit_size != 0:
assert "size" in init_params, "please provide size_column option to consider request size"
self.header_bool = init_params.get('header', )
self.delimiter = init_params.get('delimiter', ",")
if "delimiter" not in init_params:
INFO("open {} using default delimiter \",\" for CsvReader".format(file_loc))
if self.header_bool:
self.headers = [i.strip(string.whitespace) for i in
self.trace_file.readline().decode().split(self.delimiter)]
# self.trace_file.readline()
if ALLOW_C_MIMIRCACHE and open_c_reader:
self.c_reader = c_cacheReader.setup_reader(file_loc, 'c', data_type=data_type,
block_unit_size=block_unit_size,
disk_sector_size=disk_sector_size,
init_params=init_params)
def read_one_req(self):
"""
read one request, return the lbn/objID
:return:
"""
super().read_one_req()
line = self.trace_file.readline().decode('utf-8', 'ignore')
while line and len(line.strip()) == 0:
line = self.trace_file.readline().decode()
if line:
ret = line.split(self.delimiter)[self.label_column - 1].strip()
if self.data_type == 'l':
ret = int(ret)
if self.block_unit_size != 0 and self.disk_sector_size != 0:
ret = ret * self.disk_sector_size // self.block_unit_size
return ret
else:
return None
def read_complete_req(self):
"""
read the complete line, including request and its all related info
:return: a list of all info of the request
"""
super().read_one_req()
line = self.trace_file.readline().decode()
while line and len(line.strip()) == 0:
line = self.trace_file.readline().decode()
if line:
line_split = line.strip().split(self.delimiter)
if self.block_unit_size != 0 and self.disk_sector_size != 0:
line_split[self.label_column - 1] = line_split[self.label_column - 1] * \
self.disk_sector_size // self.block_unit_size
return line_split
else:
return None
def lines_dict(self):
"""
return a dict with column header->data
note this function does not convert lbn even if disk_sector_size and block_unit_size are set
:return:
"""
line = self.trace_file.readline().decode()
while line and len(line.strip()) == 0:
line = self.trace_file.readline().decode()
while line:
line_split = line.split(self.delimiter)
d = {}
if self.header_bool:
for i in range(len(self.headers)):
d[self.headers[i]] = line_split[i].strip(string.whitespace)
else:
for key, value in enumerate(line_split):
d[key] = value
line = self.trace_file.readline()
yield d
# raise StopIteration
def lines(self):
"""
a generator for reading all the information of current request/line
:return: a tuple of current request
"""
line = self.trace_file.readline().decode()
while line and len(line.strip()) == 0:
line = self.trace_file.readline().decode()
while line:
line_split = tuple(line.split(self.delimiter))
line = self.trace_file.readline()
yield line_split
# raise StopIteration
def read_time_req(self):
"""
return real_time information for the request in the form of (time, request)
:return:
"""
super().read_one_req()
line = self.trace_file.readline().strip().decode()
while line and len(line.strip()) == 0:
line = self.trace_file.readline().decode()
if line:
line = line.split(self.delimiter)
try:
time = float(line[self.time_column - 1].strip())
lbn = line[self.label_column - 1].strip()
if self.data_type == 'l':
lbn = int(lbn)
if self.block_unit_size != 0 and self.disk_sector_size != 0:
lbn = lbn * self.disk_sector_size // self.block_unit_size
return time, lbn
except Exception as e:
print("ERROR csvReader reading data: {}, current line: {}".format(e, line))
else:
return None
def skip_n_req(self, n):
"""
skip N requests from current position
:param n: the number of requests to skip
"""
for i in range(n):
self.read_one_req()
def reset(self):
"""
reset reader to initial state
:return:
"""
super().reset()
if self.header_bool:
self.trace_file.readline()
def copy(self, open_c_reader=False):
"""
reader a deep copy of current reader with everything reset to initial state,
the returned reader should not interfere with current reader
:param open_c_reader: whether open_c_reader_or_not, default not open
:return: a copied reader
"""
return CsvReader(self.file_loc, self.data_type, self.init_params,
self.block_unit_size, self.disk_sector_size, open_c_reader, lock=self.lock)
def get_params(self):
"""
return all the parameters for this reader instance in a dictionary
:return: a dictionary containing all parameters
"""
return {
"file_loc": self.file_loc,
"init_params": self.init_params,
"data_type": self.data_type,
"block_unit_size": self.block_unit_size,
"disk_sector_size": self.disk_sector_size,
"open_c_reader": self.open_c_reader,
"lock": self.lock
}
def __next__(self): # Python 3
super().__next__()
element = self.read_one_req()
if element is not None:
return element
else:
raise StopIteration
def __repr__(self):
return "csvReader for trace {}".format(self.file_loc)
| 1a1a11a/mimircache | PyMimircache/cacheReader/csvReader.py | Python | gpl-3.0 | 8,745 | 0.003316 |
import Aplicacion
import Probabilidades as pr
from Menu import *
from Variable import *
from validador import *
#------------------------------------------------
#--------------- TODO ---------------------------
#------------------------------------------------
# 1) Lista de tareas pendientes a implementar.
# 2) Si no te gusta lo sacas :).
#------------------------------------------------
#------------------------------------------------
#------------------------------------------------
class Crypto(Aplicacion.Aplicacion):
"""
FILLME
"""
#-----------------------
#--- inicializacion ----
#-----------------------
def iniciar(self,**args):
#variables de programa
self.probs = pr.Probabilidad()
self.probs.cargarDatos("0","1","2","3","4","5","6","7","8","9","a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z")
#variables de usuario
self.vars["semilla"] = Variable(0,self.modifSemilla,orden=0)
self.vars["longitudSemilla"] = Variable(16,self.modifLongitudSemilla,orden=1)
#Items del Menu
self.agregarMenu(0,Leaf("Encriptar","",self.encriptar))
self.agregarMenu(1,Leaf("Desencriptar","",self.desencriptar))
self.modifSemilla("semilla")
self.vars["semilla"].valor = self.generarSemilla(self.vars["longitudSemilla"].valor)
#-----------------------
#--- Funciones ---------
#-----------------------
def base36encode(self,number, alphabet='0123456789abcdefghijklmnopqrstuvwxyz'):
"""Converts an integer to a base36 string."""
if not isinstance(number, (int, long)):
raise TypeError('number must be an integer')
base36 = ''
sign = ''
if number < 0:
sign = '-'
number = -number
if 0 <= number < len(alphabet):
return sign + alphabet[number]
while number != 0:
number, i = divmod(number, len(alphabet))
base36 = alphabet[i] + base36
return sign + base36
def encriptar(self):
self.espaciador()
print "Ingrese la clave a encriptar:"
clave = int(validador.ingresar(str),36)
print "Ingrese la semilla a utilizar:"
semilla = int(validador.ingresar(str),36)
print "codigo encriptado: (ANOTATELO)"
print self.doEncriptar(clave,semilla)
self.espaciador()
def doEncriptar(self,clave,semilla):
return self.base36encode(clave + semilla)
def desencriptar(self):
self.espaciador()
print "Ingrese el codigo encriptado:"
criptado = validador.ingresar(str)
print "Ingrese la semilla utilizada:"
semilla = validador.ingresar(str)
self.espaciador()
print "el codigo descencriptado es:"
print self.doDesencriptar(criptado,semilla)
def doDesencriptar(self,criptado,semilla):
return self.base36encode(int(criptado,36) - int(semilla,36))
def generarSemilla(self,longitud):
crypto = ""
for i in range(0,longitud):
crypto += self.probs.generar()
return crypto
#-----------------------
#--- modificadores -----
#-----------------------
# Crear todos los modificadores con esta estructura, y SIEMPRE respetando el encabezado (self,key,*params):
def modifSemilla(self,key,*params):
print "Se genera una nueva Semilla:"
self.vars["semilla"].valor = self.generarSemilla(self.vars["longitudSemilla"].valor)
print self.vars["semilla"].valor
def modifLongitudSemilla(self,key,*params):
print "Ingrese la nueva longitud (entre 5 y 32)"
longitud = validador.ingresar(int,validador.entre,5,32)
self.vars["longitudSemilla"].valor = longitud
self.modifSemilla("semilla")
#-----------------------
#--- otros -------------
#-----------------------
# Funcion opcional. Si se desea mostrar algun tipo de informacion ( o ejecutar algun comando)
# en el menu principal( arriba del nombre de la aplicacion) hay que sobreescribir este metodo.
# Este metodo muestra lo que quieras, la idea es que expliques como se usa el programa.
def ayuda(self):
print "Para encriptar: \n"
print "1) Ingresar la clave que se quiere encriptar."
print "2) Ingresar una semilla. " + self.appNombre+ " " + self.version + " ofrece una semilla generada de forma aleatoria, de ancho configurable. Su uso es opcional."
print "3) (RECOMENDABLE) Guardar el codigo encriptado."
print ""
print "Para desencriptar:\n"
print "1) Ingresar el codigo encriptado."
print "2) Ingresar la semilla utilizada para la encriptacion.\n"
print "3) Se mostrara en pantalla el codigo desencriptado."
# Texto personalizado de salida.
def salirPrint(self):
pass# self.doEncriptar("Hasta la vista Baby")
#esto siempre va.
# tenes que invocar a tu clase principal, los tres primeros parametros son el nombre, version y si es o no con pantalla grafica.
# despues tenes que pasarle todos los parametros que quieras, separados por comas.
if __name__ == '__main__':
a = Crypto("Crypto","1.0.0",False)
a.menuPrincipal()
| pepitogithub/PythonScripts | crypto.py | Python | gpl-2.0 | 5,608 | 0.023894 |
"""empty message
Revision ID: 177b55430b61
Revises: None
Create Date: 2015-01-29 22:29:18.963249
"""
# revision identifiers, used by Alembic.
revision = '177b55430b61'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('stream',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('type', sa.String(length=50), nullable=True),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('stream')
### end Alembic commands ###
| WatchPeopleCode/WatchPeopleCode | migrations/versions/177b55430b61_.py | Python | mit | 679 | 0.013255 |
from tests.models_tests import ModelsTestBase
from test_models.core import Language
from tests import dbhelper
from sqlalchemy.exc import IntegrityError
class TestLanguageModel(ModelsTestBase):
def test_00_init(self):
assert Language
def test_01_populate(self):
dbhelper.add(Language(lang='en', name='English'), self.db)
dbhelper.add(Language(lang='en-us', name='English US'), self.db)
dbhelper.add(Language(lang='es', name='Espanol'), self.db)
dbhelper.add(Language(lang='fr', name='French'), self.db)
# Duplicate entry
try:
dbhelper.add(Language(lang='fr', name='French'), self.db)
assert False, 'Expected IntegrityError'
except IntegrityError:
pass
assert Language.query.count() == 4
def test_02_repr_method(self):
languages = Language.query.all()
for language in languages:
assert str(language.id) in str(language)
assert language.lang in str(language)
assert language.name in str(language)
def test_04_languages_lang_name_present(self):
for language in Language.query.all():
assert (language.lang is not None and language.lang is not '')
assert (language.name is not None and language.name is not '')
def test_05_languages_unique(self):
assert Language.query.filter(Language.lang == 'en').count() == 1
assert Language.query.filter(Language.lang == 'en').first().name == 'English'
| jfillmore/hoops | tests/models_tests/test_model_language.py | Python | mit | 1,525 | 0.001311 |
# -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
# design:
# a threaded worker accepts connections in the main loop, accepted
# connections are are added to the thread pool as a connection job. On
# keepalive connections are put back in the loop waiting for an event.
# If no event happen after the keep alive timeout, the connectoin is
# closed.
from collections import deque
from datetime import datetime
import errno
from functools import partial
import os
import operator
import socket
import ssl
import sys
import time
from .. import http
from ..http import wsgi
from .. import util
from . import base
from .. import six
try:
import concurrent.futures as futures
except ImportError:
raise RuntimeError("""
You need 'concurrent' installed to use this worker with this python
version.
""")
try:
from asyncio import selectors
except ImportError:
try:
from trollius import selectors
except ImportError:
raise RuntimeError("""
You need 'trollius' installed to use this worker with this python
version.
""")
class TConn(object):
def __init__(self, cfg, listener, sock, addr):
self.cfg = cfg
self.listener = listener
self.sock = sock
self.addr = addr
self.timeout = None
self.parser = None
# set the socket to non blocking
self.sock.setblocking(False)
def init(self):
self.sock.setblocking(True)
if self.parser is None:
# wrap the socket if needed
if self.cfg.is_ssl:
self.sock = ssl.wrap_socket(client, server_side=True,
**self.cfg.ssl_options)
# initialize the parser
self.parser = http.RequestParser(self.cfg, self.sock)
return True
return False
def set_timeout(self):
# set the timeout
self.timeout = time.time() + self.cfg.keepalive
def __lt__(self, other):
return self.timeout < other.timeout
__cmp__ = __lt__
class ThreadWorker(base.Worker):
def __init__(self, *args, **kwargs):
super(ThreadWorker, self).__init__(*args, **kwargs)
self.worker_connections = self.cfg.worker_connections
# initialise the pool
self.tpool = None
self.poller = None
self.futures = deque()
self._keep = deque()
def _wrap_future(self, fs, conn):
fs.conn = conn
self.futures.append(fs)
fs.add_done_callback(self.finish_request)
def init_process(self):
self.tpool = futures.ThreadPoolExecutor(max_workers=self.cfg.threads)
self.poller = selectors.DefaultSelector()
super(ThreadWorker, self).init_process()
def accept(self, listener):
try:
client, addr = listener.accept()
conn = TConn(self.cfg, listener, client, addr)
# wait for the read event to handle the connection
self.poller.register(client, selectors.EVENT_READ,
partial(self.handle_client, conn))
except socket.error as e:
if e.args[0] not in (errno.EAGAIN,
errno.ECONNABORTED, errno.EWOULDBLOCK):
raise
def handle_client(self, conn, client):
# unregister the client from the poller
self.poller.unregister(client)
# submit the connection to a worker
fs = self.tpool.submit(self.handle, conn)
self._wrap_future(fs, conn)
def murder_keepalived(self):
now = time.time()
while True:
try:
# remove the connection from the queue
conn = self._keep.popleft()
except IndexError:
break
delta = conn.timeout - now
if delta > 0:
# add the connection back to the queue
self._keep.appendleft(conn)
break
else:
# remove the socket from the poller
self.poller.unregister(conn.sock)
# close the socket
util.close(conn.sock)
def run(self):
# init listeners, add them to the event loop
for s in self.sockets:
s.setblocking(False)
self.poller.register(s, selectors.EVENT_READ, self.accept)
timeout = self.cfg.timeout or 0.5
while self.alive:
# If our parent changed then we shut down.
if self.ppid != os.getppid():
self.log.info("Parent changed, shutting down: %s", self)
return
# notify the arbiter we are alive
self.notify()
events = self.poller.select(0.2)
for key, mask in events:
callback = key.data
callback(key.fileobj)
# hanle keepalive timeouts
self.murder_keepalived()
# if we more connections than the max number of connections
# accepted on a worker, wait until some complete or exit.
if len(self.futures) >= self.worker_connections:
res = futures.wait(self.futures, timeout=timeout)
if not res:
self.log.info("max requests achieved")
break
# shutdown the pool
self.poller.close()
self.tpool.shutdown(False)
# wait for the workers
futures.wait(self.futures, timeout=self.cfg.graceful_timeout)
# if we have still fures running, try to close them
while True:
try:
fs = self.futures.popleft()
except IndexError:
break
sock = fs.conn.sock
# the future is not running, cancel it
if not fs.done() and not fs.running():
fs.cancel()
# make sure we close the sockets after the graceful timeout
util.close(sock)
def finish_request(self, fs):
try:
(keepalive, conn) = fs.result()
# if the connection should be kept alived add it
# to the eventloop and record it
if keepalive:
# flag the socket as non blocked
conn.sock.setblocking(False)
# register the connection
conn.set_timeout()
self._keep.append(conn)
# add the socket to the event loop
self.poller.register(conn.sock, selectors.EVENT_READ,
partial(self.handle_client, conn))
else:
util.close(conn.sock)
except:
# an exception happened, make sure to close the
# socket.
util.close(fs.conn.sock)
finally:
# remove the future from our list
try:
self.futures.remove(fs)
except ValueError:
pass
def handle(self, conn):
if not conn.init():
# connection kept alive
try:
self._keep.remove(conn)
except ValueError:
pass
keepalive = False
req = None
try:
req = six.next(conn.parser)
if not req:
return (False, conn)
# handle the request
keepalive = self.handle_request(req, conn)
if keepalive:
return (keepalive, conn)
except http.errors.NoMoreData as e:
self.log.debug("Ignored premature client disconnection. %s", e)
except StopIteration as e:
self.log.debug("Closing connection. %s", e)
except ssl.SSLError as e:
if e.args[0] == ssl.SSL_ERROR_EOF:
self.log.debug("ssl connection closed")
conn.sock.close()
else:
self.log.debug("Error processing SSL request.")
self.handle_error(req, conn.sock, conn.addr, e)
except socket.error as e:
if e.args[0] not in (errno.EPIPE, errno.ECONNRESET):
self.log.exception("Socket error processing request.")
else:
if e.args[0] == errno.ECONNRESET:
self.log.debug("Ignoring connection reset")
else:
self.log.debug("Ignoring connection epipe")
except Exception as e:
self.handle_error(req, conn.sock, conn.addr, e)
return (False, conn)
def handle_request(self, req, conn):
environ = {}
resp = None
try:
self.cfg.pre_request(self, req)
request_start = datetime.now()
resp, environ = wsgi.create(req, conn.sock, conn.addr,
conn.listener.getsockname(), self.cfg)
environ["wsgi.multithread"] = True
self.nr += 1
if self.alive and self.nr >= self.max_requests:
self.log.info("Autorestarting worker after current request.")
resp.force_close()
self.alive = False
if not self.cfg.keepalive:
resp.force_close()
respiter = self.wsgi(environ, resp.start_response)
try:
if isinstance(respiter, environ['wsgi.file_wrapper']):
resp.write_file(respiter)
else:
for item in respiter:
resp.write(item)
resp.close()
request_time = datetime.now() - request_start
self.log.access(resp, req, environ, request_time)
finally:
if hasattr(respiter, "close"):
respiter.close()
if resp.should_close():
self.log.debug("Closing connection.")
return False
except socket.error:
exc_info = sys.exc_info()
# pass to next try-except level
six.reraise(exc_info[0], exc_info[1], exc_info[2])
except Exception:
if resp and resp.headers_sent:
# If the requests have already been sent, we should close the
# connection to indicate the error.
self.log.exception("Error handling request")
try:
conn.sock.shutdown(socket.SHUT_RDWR)
conn.sock.close()
except socket.error:
pass
raise StopIteration()
raise
finally:
try:
self.cfg.post_request(self, req, environ, resp)
except Exception:
self.log.exception("Exception in post_request hook")
return True
| naziris/HomeSecPi | venv/lib/python2.7/site-packages/gunicorn/workers/gthread.py | Python | apache-2.0 | 10,785 | 0.000649 |
#!/usr/bin/env python
import os
from setuptools import setup, find_packages
import streamkinect2.version as meta
# Utility function to read the README file.
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = meta.__project__,
version = meta.__version__,
author = meta.__author__,
author_email = meta.__author_email__,
description = "A simple network streamer for kinect2 data.",
license = "BSD",
keywords = "kinect kinect2 zeroconf bonjour",
url = "https://github.com/rjw57/stramkinect2",
packages=find_packages(exclude='test'),
long_description=read('README.md'),
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Utilities",
"License :: OSI Approved :: BSD License",
],
install_requires=[
'blinker',
'enum34',
'lz4',
'numpy',
'pillow',
'pyzmq',
'tornado',
'zeroconf',
],
setup_requires=[
'nose',
],
tests_require=[
'coverage'
],
extras_require={
'docs': [ 'sphinx', 'docutils', ],
},
)
| rjw57/streamkinect2 | setup.py | Python | bsd-2-clause | 1,148 | 0.016551 |
def propertycached(fn):
attr_name = "_cached_" + fn.__name__
@property
def _propertycached(self):
if not hasattr(self, attr_name):
setattr(self, attr_name, fn(self))
return getattr(self, attr_name)
return _propertycached
| koyadovic/Dia | predictive/systems/statistical/analysis/tools/property.py | Python | gpl-2.0 | 271 | 0.00369 |
#!/usr/bin/env python
"""
Train a NNJM (with global context and extended architecture) model.
"""
usage = 'To train NNJM (with global context and extended architecture) using Theano'
import cPickle
import gzip
import os
import sys
import time
import re
import codecs
import argparse
import datetime
import numpy as np
import theano
import theano.tensor as T
# our libs
import model_gplus
import io_vocab
import io_read_ngram
import io_model
from train_util import *
def process_command_line():
"""
Return a 1-tuple: (args list).
`argv` is a list of arguments, or `None` for ``sys.argv[1:]``.
"""
parser = argparse.ArgumentParser(description=usage) # add description
# positional arguments
parser.add_argument(
'train_file', metavar='train_file', type=str, help='train file')
parser.add_argument(
'valid_file', metavar='valid_file', type=str, help='valid file')
parser.add_argument(
'test_file', metavar='test_file', type=str, help='test file')
parser.add_argument(
'ngram_size', metavar='ngram_size', type=int, help='ngram size')
parser.add_argument('sentence_vector_length',
metavar='sentence_vector_length', type=int, help='sentence vector length')
parser.add_argument(
'vocab_size', metavar='vocab_size', type=int, help='vocab size')
# plus model
parser.add_argument(
'num_section', metavar='num_section', type=int, help='global vector section number')
parser.add_argument(
'vocab_file', metavar='vocab_file', type=str, help='vocab file')
# optional arguments
parser.add_argument('--model_file', dest='model_file', type=str,
default='', help='load model from a file (default=\'\')')
parser.add_argument('--emb_dim', dest='emb_dim', type=int,
default=128, help='embedding dimension (default=128)')
parser.add_argument('--hidden_layers', dest='hidden_layers', type=str,
default='512', help='hidden layers, e.g. 512-512 (default=512)')
parser.add_argument('--learning_rate', dest='learning_rate',
type=float, default=0.1, help='learning rate (default=0.1)')
parser.add_argument('--chunk', dest='chunk', type=int, default=2000,
help='each time consider batch_size*chunk ngrams (default=2000)')
parser.add_argument('--valid_freq', dest='valid_freq',
type=int, default=1000, help='valid freq (default=1000)')
parser.add_argument('--option', dest='opt', type=int, default=0,
help='option: 0 -- predict last word, 1 -- predict middle word (default=0)')
parser.add_argument('--act_func', dest='act_func', type=str, default='relu',
help='non-linear function: \'tanh\' or \'relu\' (default=\'relu\')')
parser.add_argument('--finetune', dest='finetune', type=int, default=1,
help='after training for this number of epoches, start halving learning rate(default: 1)')
parser.add_argument('--n_epochs', dest='n_epochs', type=int, default=5,
help='number of epochs, i.e. how many times to go throught the training data (default: 5)')
# joint model
parser.add_argument('--src_window', dest='src_window', type=int,
default=5, help='src window for joint model (default=5)')
parser.add_argument('--src_lang', dest='src_lang',
type=str, default='', help='src lang (default=\'\')')
parser.add_argument('--tgt_lang', dest='tgt_lang',
type=str, default='', help='tgt_lang (default=\'\')')
# load pretrain model
parser.add_argument('--load_model', dest='load_model_file', type=str, default=None, help='Load model parameters from a pre-trained model')
parser.add_argument('--fix_emb', dest='fix_emb', action='store_true', default=False, help='Use pretrain model and fix the embedding matrix during the training process')
# global non-linearity
parser.add_argument('--global_nonlinear', dest='global_nonlinear', type=int, default=None, help="Add a non-linear layer after the global mean sum")
# remove stopwords
parser.add_argument('--rm_stopwords', dest='stopword_cutoff', type=int, default=-1, help="Remove stopwords from the global sentence vector")
# adaptive section length splitting
parser.add_argument('--ada_split', dest='ada_split', action='store_true', default=False, help="Use adaptive section length splitting")
args = parser.parse_args()
return args
class TrainGlobalPlusModel(TrainModel):
def loadModelParams(self, ngram_size, src_window, model, max_src_sent_length):
self.ngram_size = ngram_size
self.src_window = src_window
self.model = model
self.max_src_sent_length = max_src_sent_length
self.model_param_loaded = True
def loadGlobalModelParams(self, stopword_cutoff):
self.stopword_cutoff = stopword_cutoff
def loadValidSet(self, valid_data_package):
self.valid_set_x, self.valid_set_y, self.valid_set_sm = valid_data_package
self.shared_valid_set_x, self.shared_valid_set_y, self.shared_valid_set_sm = io_read_ngram.shared_dataset(valid_data_package)
self.shared_valid_set_y = T.cast(self.shared_valid_set_y, 'int32')
self.valid_set_loaded = True
def loadTestSet(self, test_data_package):
self.test_set_x, self.test_set_y, self.test_set_sm = test_data_package
self.shared_test_set_x, self.shared_test_set_y, self.shared_test_set_sm = io_read_ngram.shared_dataset(test_data_package)
self.shared_test_set_y = T.cast(self.shared_test_set_y, 'int32')
self.test_set_loaded = True
def loadBatchData(self, isInitialLoad=False):
src_lang = self.src_lang
tgt_lang = self.tgt_lang
tgt_vocab_size = self.tgt_vocab_size
ngram_size = self.ngram_size
chunk_size = self.chunk_size
src_window = self.src_window
opt = self.opt
(self.data_x, self.data_y, self.data_sm) = io_read_ngram.get_joint_ngrams_with_src_global_matrix(self.src_f, self.tgt_f, self.align_f, \
max_src_sent_length, tgt_vocab_size, ngram_size, src_window, opt, num_read_lines=chunk_size, stopword_cutoff=self.stopword_cutoff)
if isInitialLoad == False:
assert(type(self.model) == model_gplus.ModelGlobalPlus)
return self.model.updateTrainModelInput(self.data_x, self.data_y, self.data_sm)
def displayFirstNExamples(self, n):
if self.src_window < 0:
return
src_vocab, src_vocab_size = io_vocab.load_vocab(self.src_vocab_file)
tgt_vocab, tgt_vocab_size = io_vocab.load_vocab(self.tgt_vocab_file)
src_inverse_vocab = io_vocab.inverse_vocab(src_vocab)
tgt_inverse_vocab = io_vocab.inverse_vocab(tgt_vocab)
assert(n <= self.chunk_size)
for i in xrange(n):
example_x = self.data_x[i]
example_y = self.data_y[i]
sent_idx = example_x[-1]
src_sent_vector = self.data_sm[sent_idx]
src_sent_length = src_sent_vector[0]
src_sent_vector = src_sent_vector[1:src_sent_length+1]
src_window_vector = example_x[:self.src_window*2 + 1]
tgt_gram_vector = example_x[self.src_window*2 + 1:-1]
src_sent_words = io_vocab.getWordsFromIndeces(src_sent_vector, src_inverse_vocab, self.tgt_vocab_size)
src_window_words = io_vocab.getWordsFromIndeces(src_window_vector, src_inverse_vocab, self.tgt_vocab_size)
tgt_gram_words = io_vocab.getWordsFromIndeces(tgt_gram_vector, tgt_inverse_vocab, 0)
output = ""
count = 0
for w in src_window_words:
count += 1
if count == self.src_window + 1:
output += "[" + w + "] "
else:
output += w + " "
output += "|| "
output += " ".join(tgt_gram_words) + " "
output += "===> " + tgt_inverse_vocab[example_y]
output += " |||| "
output += " ".join(src_sent_words) + " "
print output
def trainOnBatch(self, train_model, i, batch_size, num_train_batches, num_train_samples, learning_rate):
ngram_start_id = i * batch_size
ngram_end_id = (i + 1) * batch_size if i < (num_train_batches - 1) else num_train_samples
sm_start_id, sm_end_id = io_read_ngram.get_sentence_matrix_range(self.data_x, ngram_start_id, ngram_end_id)
outputs = train_model(ngram_start_id, ngram_end_id, sm_start_id, sm_end_id, learning_rate)
return outputs
def buildModels(self):
assert(hasattr(self, 'model'))
print "Getting train model ..."
train_model = self.model.getTrainModel(self.data_x, self.data_y, self.data_sm)
print "Getting validation model ..."
valid_model = self.model.getValidationModel(self.shared_valid_set_x, self.shared_valid_set_y, self.shared_valid_set_sm, self.batch_size)
print "Getting test model ..."
test_model = self.model.getTestModel(self.shared_test_set_x, self.shared_test_set_y, self.shared_test_set_sm, self.batch_size)
print "Going to start training now ..."
return (train_model, valid_model, test_model)
def validate(self, model, num_ngrams, batch_size, num_batches):
"""
Return average negative log-likelihood
"""
loss = 0.0
for i in xrange(num_batches):
ngram_start_id = i * batch_size
ngram_end_id = (i + 1) * batch_size if i < (num_batches - 1) else num_ngrams
sm_start_id, sm_end_id = io_read_ngram.get_sentence_matrix_range(self.valid_set_x, ngram_start_id, ngram_end_id)
loss -= model(ngram_start_id, ngram_end_id, sm_start_id, sm_end_id) # model returns sum log likelihood
loss /= num_ngrams
perp = np.exp(loss)
return (loss, perp)
def test(self, model, num_ngrams, batch_size, num_batches):
"""
Return average negative log-likelihood
"""
loss = 0.0
for i in xrange(num_batches):
ngram_start_id = i * batch_size
ngram_end_id = (i + 1) * batch_size if i < (num_batches - 1) else num_ngrams
sm_start_id, sm_end_id = io_read_ngram.get_sentence_matrix_range(self.test_set_x, ngram_start_id, ngram_end_id)
loss -= model(ngram_start_id, ngram_end_id, sm_start_id, sm_end_id) # model returns sum log likelihood
loss /= num_ngrams
perp = np.exp(loss)
return (loss, perp)
if __name__ == '__main__':
####################################
# READ IN PARAMETERS
args = process_command_line()
print "Process ID: %d" % (os.getpid())
print_cml_args(args)
batch_size = 128
emb_dim = args.emb_dim # 128
hidden_sizes = [int(x) for x in re.split('-', args.hidden_layers)]
train_file = args.train_file
learning_rate = args.learning_rate
ngram_size = args.ngram_size
valid_freq = args.valid_freq
opt = args.opt
chunk_size = args.chunk
act_func = args.act_func
src_lang = args.src_lang
tgt_lang = args.tgt_lang
vocab_size = args.vocab_size
finetune_epoch = args.finetune
n_epochs = args.n_epochs
src_window = args.src_window
num_section = args.num_section
load_model_file = args.load_model_file
fix_emb = args.fix_emb
global_nonlinear = args.global_nonlinear
stopword_cutoff = args.stopword_cutoff
ada_split = args.ada_split
assert src_lang != '' and tgt_lang != ''
# all the global context (sentence) will be extended to this length to
# ensure a uniform length
max_src_sent_length = args.sentence_vector_length # often around 100
####################################
# LOAD VACAB
# <words> is a list of words as in string
# <vocab_map> is a dict mapping from word string to integer number of 1,2,...|Vocab|
# <vocab_size> is the size of vocab == len(words) == len(vocab_map).
src_vocab_file = args.vocab_file + '.' + \
str(args.vocab_size) + '.vocab.' + src_lang
tgt_vocab_file = args.vocab_file + '.' + \
str(args.vocab_size) + '.vocab.' + tgt_lang
(src_vocab_map, src_vocab_size) = io_vocab.load_vocab(
src_vocab_file)
(tgt_vocab_map, tgt_vocab_size) = io_vocab.load_vocab(
tgt_vocab_file)
#######################################
# LOAD VALID NGRAMS, LOAD TEST NGRAMS
# <valid_set_x> is a list of list, each of the list in valid_set_x is a n-gram of word, each word is represented by an integer
# for e.g. [128, 11, 13, 33, 17, 22, 0, 0, 11, 3]
# <valid_set_y> is a list of integers each represent a next-word following the list of word in valid_set_x
src_valid_file = args.valid_file + '.' + \
str(args.vocab_size) + '.id.' + src_lang
tgt_valid_file = args.valid_file + '.' + \
str(args.vocab_size) + '.id.' + tgt_lang
# valid_set_sm is the sentence matrix
(valid_set_x, valid_set_y, valid_set_sm) = io_read_ngram.get_all_joint_ngrams_with_src_global_matrix(src_valid_file, tgt_valid_file, args.valid_file + '.align',
max_src_sent_length, tgt_vocab_size, ngram_size, src_window, opt, stopword_cutoff=stopword_cutoff)
src_test_file = args.test_file + '.' + \
str(args.vocab_size) + '.id.' + src_lang
tgt_test_file = args.test_file + '.' + \
str(args.vocab_size) + '.id.' + tgt_lang
(test_set_x, test_set_y, test_set_sm) = io_read_ngram.get_all_joint_ngrams_with_src_global_matrix(src_test_file, tgt_test_file, args.test_file + '.align',
max_src_sent_length, tgt_vocab_size, ngram_size, src_window, opt, stopword_cutoff=stopword_cutoff)
if src_window >= 0:
local_context_size = 2 * src_window + ngram_size # 2 * 5 + 5 = 15
else:
local_context_size = ngram_size - 1
# global_context_size = max_src_sent_length
in_vocab_size = src_vocab_size + tgt_vocab_size
out_vocab_size = tgt_vocab_size
# Load model
if args.load_model_file is not None:
model_parameters = io_model.load_model(args.load_model_file)
else:
model_parameters = None
#####################################
# BUILD MODEL
print "Start modeling part..."
nnjm_global_model = model_gplus.ModelGlobalPlus(local_context_size, in_vocab_size, emb_dim, hidden_sizes, act_func, out_vocab_size,
num_section, model_parameters, fix_emb, global_nonlinear, ada_split)
nnjm_global_model.buildModel()
# model = nnlm_model.getModelSymbols()
####################################
# START TRAINING
print "Start training part... (1/2: loading)"
train_model = TrainGlobalPlusModel()
train_model.loadVocab(src_lang, tgt_lang, tgt_vocab_size, src_vocab_file, tgt_vocab_file)
train_model.loadValidSet((valid_set_x, valid_set_y, valid_set_sm))
train_model.loadTestSet((test_set_x, test_set_y, test_set_sm))
train_model.loadModelParams(ngram_size, src_window, nnjm_global_model, max_src_sent_length)
train_model.loadTrainParams(train_file, batch_size, learning_rate, opt, valid_freq, finetune_epoch, chunk_size, vocab_size, n_epochs)
train_model.loadGlobalModelParams(stopword_cutoff)
print "Start training part... (2/2: training)"
train_model.train()
| yuhaozhang/nnjm-global | code/train_nnjm_gplus.py | Python | mit | 15,583 | 0.005134 |
# CUPS Cloudprint - Print via Google Cloud Print
# Copyright (C) 2014 Simon Cadman
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import subprocess
import os
import logging
import sys
import grp
import base64
import fcntl
import termios
import struct
class Utils(object):
logpath = '/var/log/cups/cloudprint_log'
# Countries where letter sized paper is used, according to:
# http://en.wikipedia.org/wiki/Letter_(paper_size)
_LETTER_COUNTRIES = set(('US', 'CA', 'MX', 'BO', 'CO', 'VE', 'PH', 'CL'))
PROTOCOL_NAME = 'gcp'
GUI = False
PROTOCOL = PROTOCOL_NAME + '://'
OLD_PROTOCOL_NAME = 'cloudprint'
OLD_PROTOCOL = OLD_PROTOCOL_NAME + '://'
_MIMETYPES_JOBTYPES = {'pdf': 'application/pdf',
'other': 'application/octet-stream',
'jpg': 'image/jpeg',
'png': 'image/png'}
@staticmethod
def FixFilePermissions(filename):
filePermissions = True
fileOwnerships = True
currentStat = None
if os.path.exists(filename):
currentStat = os.stat(filename)
if currentStat is None or currentStat.st_mode != 0o100660:
try:
os.chmod(filename, 0o100660)
except Exception:
filePermissions = False
sys.stderr.write(
"DEBUG: Cannot alter " +
filename +
" file permissions\n")
if currentStat is None or currentStat.st_gid != Utils.GetLPID():
try:
os.chown(filename, -1, Utils.GetLPID())
except Exception:
fileOwnerships = False
sys.stderr.write(
"DEBUG: Cannot alter " +
filename +
" file ownership\n")
return filePermissions, fileOwnerships
@staticmethod
def SetupLogging(logpath=None):
returnValue = True
logformat = "%(asctime)s|%(levelname)s|%(message)s"
dateformat = "%Y-%m-%d %H:%M:%S"
if logpath is None:
logpath = Utils.logpath
try:
logging.basicConfig(
filename=logpath,
level=logging.INFO,
format=logformat,
datefmt=dateformat)
Utils.FixFilePermissions(logpath)
except Exception:
logging.basicConfig(
level=logging.INFO,
format=logformat,
datefmt=dateformat)
logging.error("Unable to write to log file " + logpath)
returnValue = False
return returnValue
@staticmethod
def fileIsPDF(filedata):
"""Check if a file is or isnt a PDF
Args:
filename: string, name of the file to check
Returns:
boolean: True = is a PDF, False = not a PDF.
"""
p = subprocess.Popen(["file", '-'], stdout=subprocess.PIPE, stdin=subprocess.PIPE)
output = p.communicate(filedata)[0]
logging.debug("File output was: " + output)
return "PDF document" in output
@staticmethod
def is_exe(fpath):
return os.path.exists(fpath) and os.access(fpath, os.X_OK)
@staticmethod
def which(program):
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if Utils.is_exe(exe_file):
return exe_file
return None
@staticmethod
def GetLPID(default='lp', alternative='cups', useFiles=True,
blacklistedGroups=None,
useFilesOnly=False):
if blacklistedGroups is None:
blacklistedGroups = ['adm', 'wheel', 'root']
blacklistedGroupIds = []
for group in blacklistedGroups:
try:
blacklistedGroupIds.append(grp.getgrnam(group).gr_gid)
except Exception:
logging.debug("Group " + group + " not found")
if useFiles:
# check files in order
for cupsConfigFile in ['/var/log/cups/access_log',
'/etc/cups/ppd',
'/usr/local/etc/cups/ppd']:
if os.path.exists(cupsConfigFile):
configGid = os.stat(cupsConfigFile).st_gid
if configGid not in blacklistedGroupIds:
return configGid
else:
logging.debug(
"Group " +
str(configGid) +
" excluded as blacklisted")
if useFilesOnly:
return None
# try lp first, then cups
lpgrp = None
try:
lpgrp = grp.getgrnam(default)
except Exception:
try:
lpgrp = grp.getgrnam(alternative)
except Exception:
pass
if lpgrp is None:
return None
else:
return lpgrp.gr_gid
@staticmethod
def ShowVersion(CCPVersion):
if len(sys.argv) == 2 and sys.argv[1] == 'version':
print "CUPS Cloud Print Version " + CCPVersion
sys.exit(0)
return False
@staticmethod
def ReadFile(pathname):
"""Read contents of a file and return content.
Args:
pathname: string, (path)name of file.
Returns:
string: contents of file.
"""
try:
f = open(pathname, 'rb')
s = f.read()
return s
except IOError as e:
print 'ERROR: Error opening %s\n%s', pathname, e
return None
@staticmethod
def WriteFile(file_name, data):
"""Write contents of data to a file_name.
Args:
file_name: string, (path)name of file.
data: string, contents to write to file.
Returns:
boolean: True = success, False = errors.
"""
status = True
try:
f = open(file_name, 'wb')
f.write(data)
f.close()
except IOError:
status = False
return status
@staticmethod
def Base64Encode(data, jobtype):
"""Convert a file to a base64 encoded file.
Args:
pathname: data to base64 encode
jobtype: job type being encoded - pdf, jpg etc
Returns:
string, base64 encoded string.
For more info on data urls, see:
http://en.wikipedia.org/wiki/Data_URI_scheme
"""
# Convert binary data to base64 encoded data.
mimetype = Utils._MIMETYPES_JOBTYPES['other']
if jobtype in Utils._MIMETYPES_JOBTYPES:
mimetype = Utils._MIMETYPES_JOBTYPES[jobtype]
header = 'data:%s;base64,' % mimetype
return header + base64.b64encode(data)
@staticmethod
def GetLanguage(locale, cupshelper=None):
newlocale = None
if cupshelper is not None:
newlocale = cupshelper.getServerSetting('DefaultLanguage')
if newlocale is None:
if len(locale) < 1 or locale[0] is None:
return ('en', 'en')
defaultlocale = locale[0]
newlocale = defaultlocale
language = newlocale
if '_' in newlocale:
language = newlocale.split("_")[0]
return (language, newlocale)
@staticmethod
def GetDefaultPaperType(locale):
defaultpapertype = "Letter"
if len(locale.split('_')) > 1 and \
locale.split('_')[1].upper() not in Utils._LETTER_COUNTRIES:
defaultpapertype = "A4"
return defaultpapertype
@staticmethod
def GetWindowSize(winsize=None):
"""Gets window height and width.
Gets window (aka terminal, console) height and width using IOCtl Get WINdow SiZe
method.
Returns:
The tuple (height, width) of the window as integers, or None if the
windows size isn't available.
"""
try:
structbytes = struct.pack('HHHH', 0, 0, 0, 0)
if winsize is None:
winsize = fcntl.ioctl(sys.stdout.fileno(), termios.TIOCGWINSZ, structbytes)
height, width = struct.unpack('HHHH', winsize)[:2]
except Exception:
return None
if height > 0 and width > 0:
return height, width
return None
@staticmethod
def StdInToTempFile(jobID, userName, stdin=None):
if stdin is None:
stdin = sys.stdin
tmpDir = os.getenv('TMPDIR')
if not tmpDir:
tmpDir = "/tmp"
tempFile = '%s/%s-%s-cupsjob-%s' % \
(tmpDir, jobID, userName, str(os.getpid()))
OUT = open(tempFile, 'w')
if not OUT:
logging.error("Cannot write temp file: %s", tempFile)
print "ERROR: Cannot write " + tempFile
sys.exit(1)
for line in stdin:
OUT.write(line)
OUT.close()
return tempFile
@staticmethod
def hasGUI():
return Utils.GUI
@staticmethod
def openBrowserWithUrl(url):
if not Utils.hasGUI():
return False
try:
if sys.platform == "darwin":
subprocess.Popen(['open', url])
else:
subprocess.Popen(['xdg-open', url])
except Exception:
return False
return True
| jjscarafia/CUPS-Cloud-Print | ccputils.py | Python | gpl-3.0 | 10,118 | 0.000297 |
class Permission(object):
"""This class represents the most basic permission possible. It has any number of segments, but is fully defined by
it's name. It may have wildcards, allowing for easily giving multiple permissions of the same form to users,
especially when the number of permissions is large, infinite, or undetermined. Note: Permissions with different
delimiters and wildcards are treated as the same, so don't use multiple delimiters or wildcards unless you know
completely what you're doing.
"""
def __init__(self, name, description=None, delimiter=".", wildcard="*"):
"""Create a Permission object with the specified name and optional description.
:param name: The string representing the name of the permission. This indicates what the permission grants.
:param description: A human-readable string describing the abilities this permission grants.
:param delimiter: The character to be used as the delimiter for segments. Default: "."
:param wildcard: The character to be used as the wildcard. Default: "*"
:rtype: :py:class`Permission` representing the supplied values.
"""
self.delimiter = delimiter
self.segments = name.split(self.delimiter)
self.description = description
self.wildcard = wildcard
self.state = dict()
@property
def is_wildcard(self):
"""Determines whether the permission is a wildcard permission or a simple permission.
:rtype: True or False
"""
return self.wildcard in self.segments
@property
def is_end_wildcard(self):
"""Returns whether this permission ends in a wildcard. Terminating wildcards are treated differently from other
wildcards, as they may represent an infinite number of segments rather than just the typical single segment.
:rtype: True or False
"""
return self.segments[len(self.segments)-1] == self.wildcard
def grants_permission(self, other_permission):
"""Checks whether this permission grants the supplied permission.
:param other_permission: The permission that we're checking
:type other_permission: :py:class:`Permission` or :py:class:`basestring`
:rtype: True or False
"""
if other_permission is None:
return True
if isinstance(other_permission, basestring):
other_permission = Permission(name=other_permission)
if len(self.segments) < len(other_permission.segments) and not self.is_end_wildcard:
return False
if len(self.segments) > len(other_permission.segments):
return False
for s, o in zip(self.segments, other_permission.segments):
if s != o and s != self.wildcard:
return False
return True
def grants_any_permission(self, permission_set):
"""Checks whether this permission grants access to any permission in the supplied set.
:param permission_set: The set of Permissions that we are checking
:rtype: True or False
"""
return any(self.grants_permission(perm) for perm in permission_set)
@property
def name(self):
"""Returns the name of this permission.
:rtype: :py:class:`str`
"""
return self.delimiter.join(self.segments)
def __eq__(self, other):
if not hasattr(other, "name"):
return False
if not self.description:
return self.name == other.name
if not hasattr(other, "description"):
return False
return self.name == other.name and self.description == other.description
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "{cls}({name}, {desc})".format(cls=self.__class__.__name__, name=self.name, desc=self.description)
def __hash__(self):
return 17 * self.name.__hash__() + 19 * self.description.__hash__()
@staticmethod
def meets_requirements(permission, **kwargs):
if permission:
return True
return False
class DynamicPermission(Permission):
"""Dynamic permissions are used for cases where you want to grant permissions that require state. These permissions
require additional information in order to be evaluated (such as access to a database). This class serves as the
base for dynamic permissions."""
# The list of templates that this dynamic permission uses to match other permissions.
templates = []
def grants_permission(self, other_permission):
"""Checks whether this permission grants the supplied permission.
:param other_permission: The permission that we're checking
:type other_permission: :py:class:`Permission` or :py:class:`basestring`
:rtype: True or False
"""
other = other_permission.name if hasattr(other_permission, 'name') else other_permission
if other == self.name:
return True
for template in self.templates:
matches, m = template.matches_format(other_permission)
if matches:
return self._grants_permission(m, template)
return False
def _grants_permission(self, components, template):
"""This method is where you define the stateful logic of your dynamic permission. Only permissions that match
the formats you specified with your templates will reach this code, and only the wildcard portion of the
template is returned. The template is supplied so that you know how to parse the components.
:param components: A :py:class:`list` containing the portions of the other permission that matched the template
:param template: The :py:class:`PermissionTemplate` that matched the permission.
:rtype: True or False
"""
raise NotImplementedError()
def create_stateful_permission(self, state):
if self.state:
raise Exception("You cannot create a stateful permission from a stateful permission")
new_perm = self.__class__(name=self.name, description=self.description,
delimiter=self.delimiter, wildcard=self.wildcard)
new_perm.state = state
return new_perm
class PermissionSet(set):
def grants_permission(self, other_permission):
"""Checks whether this permission set has a permission that grants the supplied permission.
:param other_permission: The permission that we're checking
:type other_permission: :py:class:`Permission` or :py:class:`basestring`
:rtype: True or False
"""
return any(perm.grants_permission(other_permission) for perm in self)
def grants_any_permission(self, permission_set):
"""Checks whether this permission set has any permission that grants access to any permission in the supplied
set.
:param permission_set: The set of Permissions that we are checking
:rtype: True or False
"""
"""O(n^2) :( Can be done faster."""
return any(self.grants_permission(perm) for perm in permission_set)
def has_any_permission(self, other_permission):
"""Checks whether any permission in this permission set is of the form other_permission. Strictly speaking, this
checks whether any permission in the set is granted by other_permission.
:param other_permission: The permission whose form we're checking for
:rtype: True or False
"""
if isinstance(other_permission, basestring):
other_permission = Permission(name=other_permission)
return other_permission.grants_any_permission(self)
def statefulize(self, state=None):
"""Returns a new PermissionSet, with all DynamicPermissions having their state set to the provided state.
:param state: The state to be added to the permissions in the set
:type state: :py:class:`dict`
:rtype: :py:class:`PermissionSet`
"""
ret = PermissionSet()
for perm in self:
if hasattr(perm, 'create_stateful_permission') and not perm.state:
ret.add(perm.create_stateful_permission(state))
else:
ret.add(perm)
return ret
def __getattr__(self, item):
ret = getattr(super(PermissionSet, self), item)
return PermissionSet(ret) if isinstance(ret, set) else ret
| Acidity/PyPermissions | pypermissions/permission.py | Python | mit | 8,509 | 0.004231 |
#!/usr/bin/env python
#
# Copyright (c) 2018 The heketi Authors
#
# This file is licensed to you under your choice of the GNU Lesser
# General Public License, version 3 or any later version (LGPLv3 or
# later), or the GNU General Public License, version 2 (GPLv2), in all
# cases as published by the Free Software Foundation.
#
import argparse
import json
import sys
import yaml
DESC = """
Compare outputs of gluster and/or heketi and/or openshift/k8s.
Prints lists of volumes where sources differ.
"""
EXAMPLE= """
Example:
$ python3 comparison.py
--gluster-info gluster-volume-info.txt
--heketi-json heketi-db.json
--pv-yaml openshift-pv-yaml.yaml
"""
def main():
parser = argparse.ArgumentParser(description=DESC, epilog=EXAMPLE)
parser.add_argument(
'--gluster-info', '-g',
help='Path to a file containing gluster volume info')
parser.add_argument(
'--heketi-json', '-j',
help='Path to a file containing Heketi db json export')
parser.add_argument(
'--pv-yaml', '-y',
help='Path to a file containing PV yaml data')
parser.add_argument(
'--skip-ok', '-K', action='store_true',
help='Exclude matching items from output')
parser.add_argument(
'--pending', action='store_true',
help='Show heketi pending status (best effort)')
parser.add_argument(
'--no-header', '-H', action='store_true',
help='Do not print column header')
parser.add_argument(
'--ignore', '-I', action='append',
help='Exlude given volume name (multiple allowed)')
cli = parser.parse_args()
check = []
gvinfo = heketi = pvdata = None
if cli.gluster_info:
check.append('gluster')
gvinfo = parse_gvinfo(cli.gluster_info)
if cli.heketi_json:
check.append('heketi')
heketi = parse_heketi(cli.heketi_json)
if cli.pv_yaml:
check.append('pvs')
pvdata = parse_oshift(cli.pv_yaml)
if not check:
parser.error(
"Must provide: --gluster-info OR --heketi-json OR --pv-yaml")
summary = compile_summary(gvinfo, heketi, pvdata)
for ign in (cli.ignore or []):
if summary.pop(ign, None):
sys.stderr.write('ignoring: {}\n'.format(ign))
compare(summary, check, cli.skip_ok,
header=(not cli.no_header),
show_pending=(cli.pending))
return
def parse_heketi(h_json):
with open(h_json) as fh:
return json.load(fh)
def parse_oshift(yf):
with open(yf) as fh:
return yaml.safe_load(fh)
def parse_gvlist(gvl):
vols = {}
with open(gvl) as fh:
for line in fh:
vols[line.strip()] = []
return vols
def parse_gvinfo(gvi):
vols = {}
volume = None
with open(gvi) as fh:
for line in fh:
l = line.strip()
if l.startswith("Volume Name:"):
volume = l.split(":", 1)[-1].strip()
vols[volume] = []
if l.startswith('Brick') and l != "Bricks:":
if volume is None:
raise ValueError("Got Brick before volume: %s" % l)
vols[volume].append(l.split(":", 1)[-1].strip())
return vols
def compile_heketi(summary, heketi):
for vid, v in heketi['volumeentries'].items():
n = v['Info']['name']
summary[n] = {'id': vid, 'heketi': True}
if v['Pending']['Id']:
summary[n]['heketi-pending'] = True
def compile_gvinfo(summary, gvinfo):
for vn in gvinfo:
if vn in summary:
summary[vn]['gluster'] = True
else:
summary[vn] = {'gluster': True}
def compile_pvdata(summary, pvdata):
for elem in pvdata['items']:
g = elem.get('spec', {}).get('glusterfs', {})
if not g:
continue
vn = g['path']
if vn in summary:
summary[vn]['pvs'] = True
else:
summary[vn] = {'pvs': True}
def compile_summary(gvinfo, heketi, pvdata):
summary = {}
if heketi:
compile_heketi(summary, heketi)
if gvinfo:
compile_gvinfo(summary, gvinfo)
if pvdata:
compile_pvdata(summary, pvdata)
return summary
def compare(summary, check, skip_ok=False, header=True, show_pending=False):
if header:
_print = Printer(['Volume-Name', 'Match', 'Volume-ID'])
else:
_print = Printer([])
for vn, vs in summary.items():
ok = all(vs.get(c) for c in check)
if ok and skip_ok:
continue
heketi_info = vs.get('id', '')
if show_pending and vs.get('heketi-pending'):
heketi_info += '/pending'
if ok:
_print.line(vn, 'ok', heketi_info)
else:
matches = ','.join(
sorted(k for k in check if vs.get(k)))
_print.line(vn, matches, heketi_info)
class Printer(object):
"""Utility class for printing columns w/ headers."""
def __init__(self, header):
self._did_header = False
self.header = header or []
def line(self, *columns):
if self.header and not self._did_header:
self._print_header(columns)
self._did_header = True
print (' '.join(columns))
def _print_header(self, columns):
parts = []
for idx, hdr in enumerate(self.header):
pad = max(0, len(columns[idx]) - len(hdr))
parts.append('{}{}'.format(hdr, ' ' * pad))
print (' '.join(parts))
if __name__ == '__main__':
main()
| enj/origin | vendor/github.com/heketi/heketi/extras/tools/comparison.py | Python | apache-2.0 | 5,562 | 0.001079 |
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import xmlrpclib
import requests
from turbogears.database import session
from bkr.inttest.server.selenium import WebDriverTestCase, XmlRpcTestCase
from bkr.inttest.server.webdriver_utils import login, delete_and_confirm
from bkr.inttest import data_setup, with_transaction, get_server_base
from bkr.server.model import Permission, User
def go_to_distro_view(browser, distro):
browser.get(get_server_base() + 'distros/view?id=%s' % distro.id)
class DistroViewTest(WebDriverTestCase):
@with_transaction
def setUp(self):
self.distro = data_setup.create_distro()
self.distro.tags.append(u'SAD')
self.user = data_setup.create_user(password=u'distro')
self.browser = self.get_browser()
def test_can_add_tag_to_distro(self):
b = self.browser
login(b, data_setup.ADMIN_USER, data_setup.ADMIN_PASSWORD)
go_to_distro_view(b, self.distro)
b.find_element_by_id('tags_tag_text').send_keys('HAPPY')
b.find_element_by_link_text('Add').click()
self.assertEquals(b.find_element_by_class_name('flash').text,
'Added Tag HAPPY')
b.find_element_by_xpath(
'//td[normalize-space(text())="HAPPY"]')
with session.begin():
session.refresh(self.distro)
activity = self.distro.activity[0]
self.assertEquals(activity.field_name, u'Tag')
self.assertEquals(activity.service, u'WEBUI')
self.assertEquals(activity.action, u'Added')
self.assertEquals(activity.old_value, None)
self.assertEquals(activity.new_value, u'HAPPY')
def test_can_remove_tag_from_distro(self):
b = self.browser
login(b, data_setup.ADMIN_USER, data_setup.ADMIN_PASSWORD)
go_to_distro_view(b, self.distro)
delete_and_confirm(b, '//td[normalize-space(preceding-sibling::td[1]/text())="SAD"]')
self.assertEquals(b.find_element_by_class_name('flash').text,
'Removed Tag SAD')
b.find_element_by_xpath('//div[@class="tags"]//table[not('
'.//td[normalize-space(text())="SAD"])]')
with session.begin():
session.refresh(self.distro)
self.assert_(u'SAD' not in self.distro.tags)
with session.begin():
session.refresh(self.distro)
activity = self.distro.activity[0]
self.assertEquals(activity.field_name, u'Tag')
self.assertEquals(activity.service, u'WEBUI')
self.assertEquals(activity.action, u'Removed')
self.assertEquals(activity.old_value, u'SAD')
self.assertEquals(activity.new_value, None)
def test_non_admin_user_cannot_add_tag(self):
b = self.browser
login(b, self.user.user_name, 'distro')
go_to_distro_view(b, self.distro)
b.find_element_by_xpath('//div[@class="tags" and not(.//a)]')
response = requests.get(get_server_base() +
'distros/save_tag?id=%s&tag.text=HAPPY' % self.distro.id)
self.assertEquals(response.status_code, 403)
def test_non_admin_user_cannot_remove_tag(self):
b = self.browser
login(b, self.user.user_name, 'distro')
go_to_distro_view(b, self.distro)
b.find_element_by_xpath('//div[@class="tags" and not(.//a)]')
response = requests.get(get_server_base() +
'distros/tag_remove?id=%s&tag=SAD' % self.distro.id)
self.assertEquals(response.status_code, 403)
# https://bugzilla.redhat.com/show_bug.cgi?id=830940
def test_provision_links_arent_shown_for_expired_trees(self):
with session.begin():
not_expired_tree = data_setup.create_distro_tree(
distro=self.distro, variant=u'Client')
expired_tree = data_setup.create_distro_tree(
distro=self.distro, variant=u'Server')
session.flush()
expired_tree.lab_controller_assocs[:] = []
b = self.browser
login(b, data_setup.ADMIN_USER, data_setup.ADMIN_PASSWORD)
go_to_distro_view(b, self.distro)
self.assertEquals(b.find_element_by_xpath(
'//table//tr[td[1]/a/text()="%s"]/td[4]'
% not_expired_tree.id).text,
'Provision')
self.assertEquals(b.find_element_by_xpath(
'//table//tr[td[1]/a/text()="%s"]/td[4]'
% expired_tree.id).text,
'')
class DistroExpireXmlRpcTest(XmlRpcTestCase):
@with_transaction
def setUp(self):
self.group = data_setup.create_group()
# grant the group distro_expire permission
self.group.permissions.append(Permission.by_name('distro_expire'))
self.user = data_setup.create_user(password=u'password')
self.group.add_member(self.user)
self.lc = data_setup.create_labcontroller(user=self.user)
self.distro = data_setup.create_distro()
self.distro_tree = data_setup.create_distro_tree(distro=self.distro,
arch='x86_64', lab_controllers=[self.lc])
self.server = self.get_server()
def test_activity_created_with_expire(self):
self.server.auth.login_password(self.user.user_name, u'password')
self.server.distros.expire(self.distro.name, 'CUSTOMSERVICE')
session.expire_all()
with session.begin():
activity = self.distro_tree.activity[0]
self.assertEquals(activity.service, u'CUSTOMSERVICE')
class DistroEditVersionXmlRpcTest(XmlRpcTestCase):
@with_transaction
def setUp(self):
self.distro = data_setup.create_distro()
self.server = self.get_server()
# https://bugzilla.redhat.com/show_bug.cgi?id=1173368
def test_empty_version(self):
self.server.auth.login_password(data_setup.ADMIN_USER,
data_setup.ADMIN_PASSWORD)
try:
self.server.distros.edit_version(self.distro.name, '')
self.fail('should raise')
except xmlrpclib.Fault, e:
self.assertIn('OSMajor cannot be empty', e.faultString)
class DistroTaggingXmlRpcTest(XmlRpcTestCase):
@with_transaction
def setUp(self):
self.distro = data_setup.create_distro()
self.distro.tags.append(u'SAD')
self.user = data_setup.create_user(password=u'distro')
self.server = self.get_server()
def test_can_add_tag_to_distro(self):
self.server.auth.login_password(
data_setup.ADMIN_USER, data_setup.ADMIN_PASSWORD)
self.server.distros.tag(self.distro.name, 'HAPPY')
with session.begin():
session.refresh(self.distro)
self.assert_(u'HAPPY' in self.distro.tags)
def test_can_remove_tag_from_distro(self):
self.server.auth.login_password(
data_setup.ADMIN_USER, data_setup.ADMIN_PASSWORD)
self.server.distros.untag(self.distro.name, 'SAD')
with session.begin():
session.refresh(self.distro)
self.assert_(u'SAD' not in self.distro.tags)
def test_non_admin_user_cannot_add_tag(self):
self.server.auth.login_password(self.user.user_name, 'distro')
try:
self.server.distros.tag(self.distro.name, 'HAPPY')
self.fail('should raise')
except xmlrpclib.Fault, e:
self.assert_('IdentityFailure' in e.faultString)
def test_non_admin_user_cannot_remove_tag(self):
self.server.auth.login_password(self.user.user_name, 'distro')
try:
self.server.distros.untag(self.distro.name, 'SAD')
self.fail('should raise')
except xmlrpclib.Fault, e:
self.assert_('IdentityFailure' in e.faultString)
def test_adding_tag_is_recorded_in_distro_activity(self):
self.server.auth.login_password(
data_setup.ADMIN_USER, data_setup.ADMIN_PASSWORD)
self.server.distros.tag(self.distro.name, 'HAPPY')
with session.begin():
session.refresh(self.distro)
activity = self.distro.activity[0]
self.assertEquals(activity.field_name, u'Tag')
self.assertEquals(activity.service, u'XMLRPC')
self.assertEquals(activity.action, u'Added')
self.assertEquals(activity.old_value, None)
self.assertEquals(activity.new_value, u'HAPPY')
def test_removing_tag_is_recorded_in_distro_activity(self):
self.server.auth.login_password(
data_setup.ADMIN_USER, data_setup.ADMIN_PASSWORD)
self.server.distros.untag(self.distro.name, 'SAD')
with session.begin():
session.refresh(self.distro)
activity = self.distro.activity[0]
self.assertEquals(activity.field_name, u'Tag')
self.assertEquals(activity.service, u'XMLRPC')
self.assertEquals(activity.action, u'Removed')
self.assertEquals(activity.old_value, u'SAD')
self.assertEquals(activity.new_value, None)
| jtoppins/beaker | IntegrationTests/src/bkr/inttest/server/selenium/test_distros.py | Python | gpl-2.0 | 9,272 | 0.001294 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2018 acrazing <[email protected]>. All rights reserved.
# @since 2018-12-03 00:03:40
import time
from dbapi.DoubanAPI import DoubanAPI
class GroupAPI:
def __init__(self):
self.api = DoubanAPI(flush=False)
self._applied = {}
self._users = {}
def run(self):
self.api.flush()
groups = self.api.group.list_joined_groups()['results']
for group in groups:
self._applied[group['alias']] = True
self.handle_user(self.api.user_alias)
def handle_user(self, user_alias):
self.join_user_groups(user_alias)
users = self.api.people.list_contacts()['results']
for user in users:
if self._users.get(user['alias'], None) is None:
self.handle_user(user['alias'])
self._users[user['alias']] = True
time.sleep(30)
else:
print('skip user: %s' % (user['alias']))
def join_user_groups(self, user_alias):
groups = self.api.group.list_joined_groups(user_alias)['results']
for group in groups:
if self._applied.get(group['alias'], None) is None:
self.api.group.join_group(group['alias'], 'Hello ~')
self._applied[group['alias']] = True
time.sleep(30)
else:
print('skip group: %s' % (group['alias']))
if __name__ == '__main__':
group = GroupAPI()
group.run()
| acrazing/dbapi | scripts/join_group.py | Python | mit | 1,512 | 0 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime, timedelta
from airflow.models import DAG
from airflow.operators.dummy_operator import DummyOperator
DEFAULT_DATE = datetime(2016, 1, 1)
# DAG tests backfill with pooled tasks
# Previously backfill would queue the task but never run it
dag1 = DAG(dag_id='test_start_date_scheduling', start_date=datetime.utcnow() + timedelta(days=1))
dag1_task1 = DummyOperator(task_id='dummy', dag=dag1, owner='airflow')
dag2 = DAG(dag_id='test_task_start_date_scheduling', start_date=DEFAULT_DATE)
dag2_task1 = DummyOperator(
task_id='dummy1', dag=dag2, owner='airflow', start_date=DEFAULT_DATE + timedelta(days=3)
)
dag2_task2 = DummyOperator(task_id='dummy2', dag=dag2, owner='airflow')
| mrkm4ntr/incubator-airflow | tests/dags/test_scheduler_dags.py | Python | apache-2.0 | 1,500 | 0.001333 |
#!/usr/bin/env python
# encoding: utf-8
"""Module contains common utilities used in automated code tests for Gensim modules.
Attributes:
-----------
module_path : str
Full path to this module directory.
common_texts : list of list of str
Toy dataset.
common_dictionary : :class:`~gensim.corpora.dictionary.Dictionary`
Dictionary of toy dataset.
common_corpus : list of list of (int, int)
Corpus of toy dataset.
Examples:
---------
It's easy to keep objects in temporary folder and reuse'em if needed:
.. sourcecode:: pycon
>>> from gensim.models import word2vec
>>> from gensim.test.utils import get_tmpfile, common_texts
>>>
>>> model = word2vec.Word2Vec(common_texts, min_count=1)
>>> temp_path = get_tmpfile('toy_w2v')
>>> model.save(temp_path)
>>>
>>> new_model = word2vec.Word2Vec.load(temp_path)
>>> result = new_model.wv.most_similar("human", topn=1)
Let's print first document in toy dataset and then recreate it using its corpus and dictionary.
.. sourcecode:: pycon
>>> from gensim.test.utils import common_texts, common_dictionary, common_corpus
>>> print(common_texts[0])
['human', 'interface', 'computer']
>>> assert common_dictionary.doc2bow(common_texts[0]) == common_corpus[0]
We can find our toy set in test data directory.
.. sourcecode:: pycon
>>> from gensim.test.utils import datapath
>>>
>>> with open(datapath("testcorpus.txt")) as f:
... texts = [line.strip().split() for line in f]
>>> print(texts[0])
['computer', 'human', 'interface']
If you don't need to keep temporary objects on disk use :func:`~gensim.test.utils.temporary_file`:
.. sourcecode:: pycon
>>> from gensim.test.utils import temporary_file, common_corpus, common_dictionary
>>> from gensim.models import LdaModel
>>>
>>> with temporary_file("temp.txt") as tf:
... lda = LdaModel(common_corpus, id2word=common_dictionary, num_topics=3)
... lda.save(tf)
"""
import contextlib
import tempfile
import os
import shutil
from gensim.corpora import Dictionary
module_path = os.path.dirname(__file__) # needed because sample data files are located in the same folder
def datapath(fname):
"""Get full path for file `fname` in test data directory placed in this module directory.
Usually used to place corpus to test_data directory.
Parameters
----------
fname : str
Name of file.
Returns
-------
str
Full path to `fname` in test_data folder.
Example
-------
Let's get path of test GloVe data file and check if it exits.
.. sourcecode:: pycon
>>> from gensim.corpora import MmCorpus
>>> from gensim.test.utils import datapath
>>>
>>> corpus = MmCorpus(datapath("testcorpus.mm"))
>>> for document in corpus:
... pass
"""
return os.path.join(module_path, 'test_data', fname)
def get_tmpfile(suffix):
"""Get full path to file `suffix` in temporary folder.
This function doesn't creates file (only generate unique name).
Also, it may return different paths in consecutive calling.
Parameters
----------
suffix : str
Suffix of file.
Returns
-------
str
Path to `suffix` file in temporary folder.
Examples
--------
Using this function we may get path to temporary file and use it, for example, to store temporary model.
.. sourcecode:: pycon
>>> from gensim.models import LsiModel
>>> from gensim.test.utils import get_tmpfile, common_dictionary, common_corpus
>>>
>>> tmp_f = get_tmpfile("toy_lsi_model")
>>>
>>> model = LsiModel(common_corpus, id2word=common_dictionary)
>>> model.save(tmp_f)
>>>
>>> loaded_model = LsiModel.load(tmp_f)
"""
return os.path.join(tempfile.gettempdir(), suffix)
@contextlib.contextmanager
def temporary_file(name=""):
"""This context manager creates file `name` in temporary directory and returns its full path.
Temporary directory with included files will deleted at the end of context. Note, it won't create file.
Parameters
----------
name : str
Filename.
Yields
------
str
Path to file `name` in temporary directory.
Examples
--------
This example demonstrates that created temporary directory (and included
files) will deleted at the end of context.
.. sourcecode:: pycon
>>> import os
>>> from gensim.test.utils import temporary_file
>>> with temporary_file("temp.txt") as tf, open(tf, 'w') as outfile:
... outfile.write("my extremely useful information")
... print("Is this file exists? {}".format(os.path.exists(tf)))
... print("Is this folder exists? {}".format(os.path.exists(os.path.dirname(tf))))
Is this file exists? True
Is this folder exists? True
>>>
>>> print("Is this file exists? {}".format(os.path.exists(tf)))
Is this file exists? False
>>> print("Is this folder exists? {}".format(os.path.exists(os.path.dirname(tf))))
Is this folder exists? False
"""
# note : when dropping python2.7 support, we can use tempfile.TemporaryDirectory
tmp = tempfile.mkdtemp()
try:
yield os.path.join(tmp, name)
finally:
shutil.rmtree(tmp, ignore_errors=True)
# set up vars used in testing ("Deerwester" from the web tutorial)
common_texts = [
['human', 'interface', 'computer'],
['survey', 'user', 'computer', 'system', 'response', 'time'],
['eps', 'user', 'interface', 'system'],
['system', 'human', 'system', 'eps'],
['user', 'response', 'time'],
['trees'],
['graph', 'trees'],
['graph', 'minors', 'trees'],
['graph', 'minors', 'survey']
]
common_dictionary = Dictionary(common_texts)
common_corpus = [common_dictionary.doc2bow(text) for text in common_texts]
| napsternxg/gensim | gensim/test/utils.py | Python | gpl-3.0 | 5,981 | 0.002508 |
from __future__ import absolute_import
__version__ = "1.1.6"
from .bucket import SCSFile, SCSBucket, SCSError, KeyNotFound
SCSFile, SCSBucket, SCSError, KeyNotFound
__all__ = "SCSFile", "SCSBucket", "SCSError"
class appinfo(object):
def __init__(self,access_key,secret_key,secure):
self.access_key=access_key
self.secret_key=secret_key
self.secure = secure
def getDefaultAppInfo():
pass
def setDefaultAppInfo(access_key,secret_key,secure=False):
default = appinfo(access_key,secret_key,secure)
global getDefaultAppInfo
getDefaultAppInfo = lambda: default | RawEvan/sharephotos | site-packages/sinastorage/__init__.py | Python | gpl-3.0 | 605 | 0.02314 |
#!/usr/bin/env python
#Copyright 2004,2008 Sebastian Hagen
# This file is part of gonium.
#
# gonium is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# gonium is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import os
import fcntl
class PidFile:
def __init__(self, filename:bytes=None):
"""Open pid-file."""
if (filename is None):
argv0 = sys.argv[0]
if (isinstance(argv0, str)):
# Get rid of silly unicode names
argv0 = argv0.encode()
filename = os.path.basename(argv0) + b'.pid'
if (os.path.exists(filename)):
mode = 'r+b'
else:
mode = 'wb'
# The feature allowing for calling open() on bytes filenames was added
# somewhere between CPython 3.0-rc1 and -rc3. This version is written
# for 3.0 final, so using it should be fine.
self.filename = filename
self.file = open(filename, mode)
def lock(self, else_die:bool=False):
"""Acquire lock on pid file; if successful, write our pid to it. If
the optional argument is specified and True, any IOErrors will
be caught and turned into SystemExits."""
try:
fcntl.lockf(self.file.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
if (else_die):
print('Our pid-file {0} is already locked, aborting.'.format(self.filename,))
sys.exit(0)
raise
self.file.seek(0)
self.file.write(ascii(os.getpid()).encode('ascii'))
self.file.truncate()
def unlock(self):
"""Release lock on pid file."""
fcntl.lockf(self.file.fileno(), fcntl.LOCK_UN)
| sh01/gonium | src/pid_filing.py | Python | gpl-2.0 | 2,162 | 0.019889 |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import binascii
import io
import logging
import os
import traceback
from builtins import bytes, object, open
from contextlib import contextmanager
from pants.scm.scm import Scm
from pants.util.contextutil import pushd
from pants.util.memo import memoized_method
from pants.util.process_handler import subprocess
from pants.util.strutil import ensure_binary, ensure_text
# 40 is Linux's hard-coded limit for total symlinks followed when resolving a path.
MAX_SYMLINKS_IN_REALPATH = 40
GIT_HASH_LENGTH = 20
# Precompute these because ensure_binary is slow and we'll need them a lot
SLASH = ensure_binary('/')
NUL = ensure_binary('\0')
SPACE = ensure_binary(' ')
NEWLINE = ensure_binary('\n')
EMPTY_STRING = ensure_binary("")
logger = logging.getLogger(__name__)
class Git(Scm):
"""An Scm implementation backed by git."""
@classmethod
def detect_worktree(cls, binary='git', subdir=None):
"""Detect the git working tree above cwd and return it; else, return None.
:param string binary: The path to the git binary to use, 'git' by default.
:param string subdir: The path to start searching for a git repo.
:returns: path to the directory where the git working tree is rooted.
:rtype: string
"""
# TODO(John Sirois): This is only used as a factory for a Git instance in
# pants.base.build_environment.get_scm, encapsulate in a true factory method.
cmd = [binary, 'rev-parse', '--show-toplevel']
try:
if subdir:
with pushd(subdir):
process, out = cls._invoke(cmd)
else:
process, out = cls._invoke(cmd)
cls._check_result(cmd, process.returncode, raise_type=Scm.ScmException)
except Scm.ScmException:
return None
return cls._cleanse(out)
@classmethod
def clone(cls, repo_url, dest, binary='git'):
"""Clone the repo at repo_url into dest.
:param string binary: The path to the git binary to use, 'git' by default.
:returns: an instance of this class representing the cloned repo.
:rtype: Git
"""
cmd = [binary, 'clone', repo_url, dest]
process, out = cls._invoke(cmd)
cls._check_result(cmd, process.returncode)
return cls(binary=binary, worktree=dest)
@classmethod
def _invoke(cls, cmd):
"""Invoke the given command, and return a tuple of process and raw binary output.
stderr flows to wherever its currently mapped for the parent process - generally to
the terminal where the user can see the error.
:param list cmd: The command in the form of a list of strings
:returns: The completed process object and its standard output.
:raises: Scm.LocalException if there was a problem exec'ing the command at all.
"""
try:
process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
except OSError as e:
# Binary DNE or is not executable
raise cls.LocalException('Failed to execute command {}: {}'.format(' '.join(cmd), e))
out, _ = process.communicate()
return process, out
@classmethod
def _cleanse(cls, output, errors='strict'):
return output.strip().decode('utf-8', errors=errors)
@classmethod
def _check_result(cls, cmd, result, failure_msg=None, raise_type=Scm.ScmException):
if result != 0:
raise raise_type(failure_msg or '{} failed with exit code {}'.format(' '.join(cmd), result))
def __init__(self, binary='git', gitdir=None, worktree=None, remote=None, branch=None):
"""Creates a git scm proxy that assumes the git repository is in the cwd by default.
binary: The path to the git binary to use, 'git' by default.
gitdir: The path to the repository's git metadata directory (typically '.git').
worktree: The path to the git repository working tree directory (typically '.').
remote: The default remote to use.
branch: The default remote branch to use.
"""
super(Scm, self).__init__()
self._gitcmd = binary
self._worktree = os.path.realpath(worktree or os.getcwd())
self._gitdir = os.path.realpath(gitdir) if gitdir else os.path.join(self._worktree, '.git')
self._remote = remote
self._branch = branch
def current_rev_identifier(self):
return 'HEAD'
@property
def worktree(self):
return self._worktree
@property
def commit_id(self):
return self._check_output(['rev-parse', 'HEAD'], raise_type=Scm.LocalException)
@property
def server_url(self):
git_output = self._check_output(['remote', '--verbose'], raise_type=Scm.LocalException)
def origin_urls():
for line in git_output.splitlines():
name, url, action = line.split()
if name == 'origin' and action == '(push)':
yield url
origins = list(origin_urls())
if len(origins) != 1:
raise Scm.LocalException("Unable to find remote named 'origin' that accepts pushes "
"amongst:\n{}".format(git_output))
return origins[0]
@property
def tag_name(self):
# Calls to git describe can have bad performance on large repos. Be aware
# of the performance hit if you use this property.
tag = self._check_output(['describe', '--tags', '--always'], raise_type=Scm.LocalException)
return None if 'cannot' in tag else tag
@property
def branch_name(self):
branch = self._check_output(['rev-parse', '--abbrev-ref', 'HEAD'],
raise_type=Scm.LocalException)
return None if branch == 'HEAD' else branch
def fix_git_relative_path(self, worktree_path, relative_to):
return os.path.relpath(os.path.join(self._worktree, worktree_path), relative_to)
def changed_files(self, from_commit=None, include_untracked=False, relative_to=None):
relative_to = relative_to or self._worktree
rel_suffix = ['--', relative_to]
uncommitted_changes = self._check_output(['diff', '--name-only', 'HEAD'] + rel_suffix,
raise_type=Scm.LocalException)
files = set(uncommitted_changes.splitlines())
if from_commit:
# Grab the diff from the merge-base to HEAD using ... syntax. This ensures we have just
# the changes that have occurred on the current branch.
committed_cmd = ['diff', '--name-only', from_commit + '...HEAD'] + rel_suffix
committed_changes = self._check_output(committed_cmd,
raise_type=Scm.LocalException)
files.update(committed_changes.split())
if include_untracked:
untracked_cmd = ['ls-files', '--other', '--exclude-standard', '--full-name'] + rel_suffix
untracked = self._check_output(untracked_cmd,
raise_type=Scm.LocalException)
files.update(untracked.split())
# git will report changed files relative to the worktree: re-relativize to relative_to
return {self.fix_git_relative_path(f, relative_to) for f in files}
def changes_in(self, diffspec, relative_to=None):
relative_to = relative_to or self._worktree
cmd = ['diff-tree', '--no-commit-id', '--name-only', '-r', diffspec]
files = self._check_output(cmd, raise_type=Scm.LocalException).split()
return {self.fix_git_relative_path(f.strip(), relative_to) for f in files}
def changelog(self, from_commit=None, files=None):
# We force the log output encoding to be UTF-8 here since the user may have a git config that
# overrides the git UTF-8 default log output encoding.
args = ['log', '--encoding=UTF-8', '--no-merges', '--stat', '--find-renames', '--find-copies']
if from_commit:
args.append(from_commit + '..HEAD')
if files:
args.append('--')
args.extend(files)
# There are various circumstances that can lead to git logs that are not transcodeable to utf-8,
# for example: http://comments.gmane.org/gmane.comp.version-control.git/262685
# Git will not error in these cases and we do not wish to either. Here we direct byte sequences
# that can not be utf-8 decoded to be replaced with the utf-8 replacement character.
return self._check_output(args, raise_type=Scm.LocalException, errors='replace')
def merge_base(self, left='master', right='HEAD'):
"""Returns the merge-base of master and HEAD in bash: `git merge-base left right`"""
return self._check_output(['merge-base', left, right], raise_type=Scm.LocalException)
def refresh(self, leave_clean=False):
"""Attempt to pull-with-rebase from upstream. This is implemented as fetch-plus-rebase
so that we can distinguish between errors in the fetch stage (likely network errors)
and errors in the rebase stage (conflicts). If leave_clean is true, then in the event
of a rebase failure, the branch will be rolled back. Otherwise, it will be left in the
conflicted state.
"""
remote, merge = self._get_upstream()
self._check_call(['fetch', '--tags', remote, merge], raise_type=Scm.RemoteException)
try:
self._check_call(['rebase', 'FETCH_HEAD'], raise_type=Scm.LocalException)
except Scm.LocalException as e:
if leave_clean:
logger.debug('Cleaning up after failed rebase')
try:
self._check_call(['rebase', '--abort'], raise_type=Scm.LocalException)
except Scm.LocalException as abort_exc:
logger.debug('Failed to up after failed rebase')
logger.debug(traceback.format_exc(abort_exc))
# But let the original exception propagate, since that's the more interesting one
raise e
def tag(self, name, message=None):
# We use -a here instead of --annotate to maintain maximum git compatibility.
# --annotate was only introduced in 1.7.8 via:
# https://github.com/git/git/commit/c97eff5a95d57a9561b7c7429e7fcc5d0e3a7f5d
self._check_call(['tag', '-a', '--message=' + (message or ''), name],
raise_type=Scm.LocalException)
self.push('refs/tags/' + name)
def commit(self, message, verify=True):
cmd = ['commit', '--all', '--message=' + message]
if not verify:
cmd.append('--no-verify')
self._check_call(cmd, raise_type=Scm.LocalException)
def add(self, *paths):
self._check_call(['add'] + list(paths), raise_type=Scm.LocalException)
def commit_date(self, commit_reference):
return self._check_output(['log', '-1', '--pretty=tformat:%ci', commit_reference],
raise_type=Scm.LocalException)
def push(self, *refs):
remote, merge = self._get_upstream()
self._check_call(['push', remote, merge] + list(refs), raise_type=Scm.RemoteException)
def set_state(self, rev):
self._check_call(['checkout', rev])
def _get_upstream(self):
"""Return the remote and remote merge branch for the current branch"""
if not self._remote or not self._branch:
branch = self.branch_name
if not branch:
raise Scm.LocalException('Failed to determine local branch')
def get_local_config(key):
value = self._check_output(['config', '--local', '--get', key],
raise_type=Scm.LocalException)
return value.strip()
self._remote = self._remote or get_local_config('branch.{}.remote'.format(branch))
self._branch = self._branch or get_local_config('branch.{}.merge'.format(branch))
return self._remote, self._branch
def _check_call(self, args, failure_msg=None, raise_type=None):
cmd = self._create_git_cmdline(args)
self._log_call(cmd)
result = subprocess.call(cmd)
self._check_result(cmd, result, failure_msg, raise_type)
def _check_output(self, args, failure_msg=None, raise_type=None, errors='strict'):
cmd = self._create_git_cmdline(args)
self._log_call(cmd)
process, out = self._invoke(cmd)
self._check_result(cmd, process.returncode, failure_msg, raise_type)
return self._cleanse(out, errors=errors)
def _create_git_cmdline(self, args):
return [self._gitcmd, '--git-dir=' + self._gitdir, '--work-tree=' + self._worktree] + args
def _log_call(self, cmd):
logger.debug('Executing: ' + ' '.join(cmd))
def repo_reader(self, rev):
return GitRepositoryReader(self, rev)
class GitRepositoryReader(object):
"""
Allows reading from files and directory information from an arbitrary git
commit. This is useful for pants-aware git sparse checkouts.
"""
def __init__(self, scm, rev):
self.scm = scm
self.rev = rev
self._cat_file_process = None
# Trees is a dict from path to [list of Dir, Symlink or File objects]
self._trees = {}
self._realpath_cache = {'.': './', '': './'}
def _maybe_start_cat_file_process(self):
if not self._cat_file_process:
cmdline = self.scm._create_git_cmdline(['cat-file', '--batch'])
self._cat_file_process = subprocess.Popen(cmdline,
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
class MissingFileException(Exception):
def __init__(self, rev, relpath):
self.relpath = relpath
self.rev = rev
def __str__(self):
return "MissingFileException({}, {})".format(self.relpath, self.rev)
class IsDirException(Exception):
def __init__(self, rev, relpath):
self.relpath = relpath
self.rev = rev
def __str__(self):
return "IsDirException({}, {})".format(self.relpath, self.rev)
class NotADirException(Exception):
def __init__(self, rev, relpath):
self.relpath = relpath
self.rev = rev
def __str__(self):
return "NotADirException({}, {})".format(self.relpath, self.rev)
class SymlinkLoopException(Exception):
def __init__(self, rev, relpath):
self.relpath = relpath
self.rev = rev
def __str__(self):
return "SymlinkLoop({}, {})".format(self.relpath, self.rev)
class ExternalSymlinkException(Exception):
def __init__(self, rev, relpath):
self.relpath = relpath
self.rev = rev
def __str__(self):
return "ExternalSymlink({}, {})".format(self.relpath, self.rev)
class GitDiedException(Exception):
pass
class UnexpectedGitObjectTypeException(Exception):
# Programmer error
pass
def _safe_realpath(self, relpath):
try:
return self._realpath(relpath)
except self.MissingFileException:
return None
except self.NotADirException:
return None
def _safe_read_object(self, relpath, max_symlinks):
try:
return self._read_object(relpath, max_symlinks)
except self.MissingFileException:
return None, relpath
except self.NotADirException:
return None, relpath
def exists(self, relpath):
path = self._safe_realpath(relpath)
return bool(path)
def isfile(self, relpath):
path = self._safe_realpath(relpath)
if path:
return not path.endswith('/')
return False
def isdir(self, relpath):
path = self._safe_realpath(relpath)
if path:
return path.endswith('/')
return False
def lstat(self, relpath):
obj, _ = self._safe_read_object(relpath, max_symlinks=0)
return obj
def readlink(self, relpath):
# TODO: Relatively inefficient, but easier than changing read_object, unfortunately.
if type(self.lstat(relpath)) != self.Symlink:
return None
obj, path_so_far = self._safe_read_object(relpath, max_symlinks=1)
if obj == None:
return None
return path_so_far
class Symlink(object):
def __init__(self, name, sha):
self.name = name
self.sha = sha
class Dir(object):
def __init__(self, name, sha):
self.name = name
self.sha = sha
class File(object):
def __init__(self, name, sha):
self.name = name
self.sha = sha
def listdir(self, relpath):
"""Like os.listdir, but reads from the git repository.
:returns: a list of relative filenames
"""
path = self._realpath(relpath)
if not path.endswith('/'):
raise self.NotADirException(self.rev, relpath)
if path[0] == '/' or path.startswith('../'):
return os.listdir(path)
tree = self._read_tree(path[:-1])
return list(tree.keys())
@contextmanager
def open(self, relpath):
"""Read a file out of the repository at a certain revision.
This is complicated because, unlike vanilla git cat-file, this follows symlinks in
the repo. If a symlink points outside repo, the file is read from the filesystem;
that's because presumably whoever put that symlink there knew what they were doing.
"""
path = self._realpath(relpath)
if path.endswith('/'):
raise self.IsDirException(self.rev, relpath)
if path.startswith('../') or path[0] == '/':
yield open(path, 'rb')
return
object_type, data = self._read_object_from_repo(rev=self.rev, relpath=path)
if object_type == b'tree':
raise self.IsDirException(self.rev, relpath)
assert object_type == b'blob'
yield io.BytesIO(data)
@memoized_method
def _realpath(self, relpath):
"""Follow symlinks to find the real path to a file or directory in the repo.
:returns: if the expanded path points to a file, the relative path
to that file; if a directory, the relative path + '/'; if
a symlink outside the repo, a path starting with / or ../.
"""
obj, path_so_far = self._read_object(relpath, MAX_SYMLINKS_IN_REALPATH)
if isinstance(obj, self.Symlink):
raise self.SymlinkLoopException(self.rev, relpath)
return path_so_far
def _read_object(self, relpath, max_symlinks):
path_so_far = ''
components = list(relpath.split(os.path.sep))
symlinks = 0
# Consume components to build path_so_far
while components:
component = components.pop(0)
if component == '' or component == '.':
continue
parent_tree = self._read_tree(path_so_far)
parent_path = path_so_far
if path_so_far != '':
path_so_far += '/'
path_so_far += component
try:
obj = parent_tree[component.encode('utf-8')]
except KeyError:
raise self.MissingFileException(self.rev, relpath)
if isinstance(obj, self.File):
if components:
# We've encountered a file while searching for a directory
raise self.NotADirException(self.rev, relpath)
else:
return obj, path_so_far
elif isinstance(obj, self.Dir):
if not components:
return obj, path_so_far + '/'
# A dir is OK; we just descend from here
elif isinstance(obj, self.Symlink):
symlinks += 1
if symlinks > max_symlinks:
return obj, path_so_far
# A git symlink is stored as a blob containing the name of the target.
# Read that blob.
object_type, path_data = self._read_object_from_repo(sha=obj.sha)
assert object_type == b'blob'
if path_data[0] == b'/':
# Is absolute, thus likely points outside the repo.
raise self.ExternalSymlinkException(self.rev, relpath)
link_to = os.path.normpath(os.path.join(parent_path, path_data.decode('utf-8')))
if link_to.startswith('../') or link_to[0] == '/':
# Points outside the repo.
raise self.ExternalSymlinkException(self.rev, relpath)
# Restart our search at the top with the new path.
# Git stores symlinks in terms of Unix paths, so split on '/' instead of os.path.sep
components = link_to.split('/') + components
path_so_far = ''
else:
# Programmer error
raise self.UnexpectedGitObjectTypeException()
return self.Dir('./', None), './'
def _fixup_dot_relative(self, path):
"""Git doesn't understand dot-relative paths."""
if path.startswith('./'):
return path[2:]
elif path == '.':
return ''
return path
def _read_tree(self, path):
"""Given a revision and path, parse the tree data out of git cat-file output.
:returns: a dict from filename -> [list of Symlink, Dir, and File objects]
"""
path = self._fixup_dot_relative(path)
tree = self._trees.get(path)
if tree:
return tree
tree = {}
object_type, tree_data = self._read_object_from_repo(rev=self.rev, relpath=path)
assert object_type == b'tree'
# The tree data here is (mode ' ' filename \0 20-byte-sha)*
# It's transformed to a list of byte chars to allow iteration.
# See http://python-future.org/compatible_idioms.html#byte-string-literals.
tree_data = [bytes([b]) for b in tree_data]
i = 0
while i < len(tree_data):
start = i
while tree_data[i] != b' ':
i += 1
mode = b''.join(tree_data[start:i])
i += 1 # skip space
start = i
while tree_data[i] != NUL:
i += 1
name = b''.join(tree_data[start:i])
sha = b''.join(tree_data[i + 1:i + 1 + GIT_HASH_LENGTH])
sha_hex = binascii.hexlify(sha)
i += 1 + GIT_HASH_LENGTH
if mode == b'120000':
tree[name] = self.Symlink(name, sha_hex)
elif mode == b'40000':
tree[name] = self.Dir(name, sha_hex)
else:
tree[name] = self.File(name, sha_hex)
self._trees[path] = tree
return tree
def _read_object_from_repo(self, rev=None, relpath=None, sha=None):
"""Read an object from the git repo.
This is implemented via a pipe to git cat-file --batch
"""
if sha:
spec = sha + b'\n'
else:
assert rev is not None
assert relpath is not None
rev = ensure_text(rev)
relpath = ensure_text(relpath)
relpath = self._fixup_dot_relative(relpath)
spec = '{}:{}\n'.format(rev, relpath).encode('utf-8')
self._maybe_start_cat_file_process()
self._cat_file_process.stdin.write(spec)
self._cat_file_process.stdin.flush()
header = None
while not header:
header = self._cat_file_process.stdout.readline()
if self._cat_file_process.poll() is not None:
raise self.GitDiedException("Git cat-file died while trying to read '{}'.".format(spec))
header = header.rstrip()
parts = header.rsplit(SPACE, 2)
if len(parts) == 2:
assert parts[1] == b'missing'
raise self.MissingFileException(rev, relpath)
_, object_type, object_len = parts
# Read the object data
blob = bytes(self._cat_file_process.stdout.read(int(object_len)))
# Read the trailing newline
assert self._cat_file_process.stdout.read(1) == b'\n'
assert len(blob) == int(object_len)
return object_type, blob
def __del__(self):
if self._cat_file_process:
self._cat_file_process.communicate()
| twitter/pants | src/python/pants/scm/git.py | Python | apache-2.0 | 22,734 | 0.011261 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0044_cm_defaults_for_allocation_and_quota'),
]
operations = [
migrations.AlterField(
model_name='machinerequest',
name='new_version_membership',
field=models.ManyToManyField(to='core.Group', blank=True),
),
migrations.AlterModelTable(
name='projectexternallink',
table='project_links',
),
]
| CCI-MOC/GUI-Backend | core/migrations/0045_allow_blank_membership_AND_rename_project_links.py | Python | apache-2.0 | 583 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
trappist1.py |github|
---------------------
This module hosts TRAPPIST-1-specific routines.
.. role:: raw-html(raw)
:format: html
.. |github| replace:: :raw-html:`<a href = "https://github.com/rodluger/planetplanet/blob/master/planetplanet/photo/trappist1.py"><i class="fa fa-github" aria-hidden="true"></i></a>`
'''
from __future__ import division, print_function, absolute_import, \
unicode_literals
from ..constants import *
from .ppo import Star, Planet, System
from . import theta
import numpy as np
import matplotlib.pyplot as pl
import os
from tqdm import tqdm
__all__ = ['Trappist1']
def Trappist1(sample = True, distance = 12, seed = None, **kwargs):
'''
Returns an instance of :py:obj:`planetplanet.photo.System` for the full
TRAPPIST-1 system. Star and planet parameters are drawn from their
respective prior distributions, which are based on the observed values
from Gillon et al. (2017), Luger et al. (2017), and
Burgasser & Mamajek (2017). Longitudes of ascending node are
drawn from the :math:`\\theta` distribution derived in the paper.
:param bool sample: Draw a random sample from the full prior? \
If :py:obj:`False`,returns the mean values for all parameters. \
Default :py:obj:`True`
:param float distance: Distance to the system in parsecs. \
Default :py:obj:`12`
:param int seed: Random number generator seed. Default :py:obj:`None`
:param kwargs: Any other :py:obj:`kwargs` to be passed to \
:py:func:`planetplanet.Star`, \
:py:func:`planetplanet.Planet`, and :py:func:`planetplanet.System`.
.. plot::
:align: center
from planetplanet.photo.trappist1 import Trappist1
from planetplanet.constants import MINUTE
import matplotlib.pyplot as pl
import numpy as np
system = Trappist1()
system.compute(np.arange(0, 10, 1 * MINUTE))
system.plot_lightcurve()
pl.show()
'''
# Randomizer seed
if seed is not None:
np.random.seed(seed)
# Account for the uncertainty?
if not sample:
N = lambda mu, sigma: mu
# Fix the inclinations at their mean values
inclinations = [89.65, 89.67, 89.75, 89.86, 89.680, 89.710, 89.80]
else:
N = lambda mu, sigma: mu + sigma * np.random.randn()
# Draw from the joint inclination distribution
PATH = os.path.dirname(os.path.abspath(__file__))
samples = np.loadtxt(os.path.join(PATH, "inclination.dat"))
inclinations = samples[np.random.randint(len(samples))]
# Instantiate the star; radius from Burgasser & Mamajek (2017)
mstar = N(0.0802, 0.0073)
rstar = N(0.121, 0.003)
teff = (N(0.000524, 0.000034)
* LSUN / (4 * np.pi * (rstar * RSUN) ** 2 * SBOLTZ)) ** 0.25
star = Star('A', m = mstar, r = rstar, teff = teff, color = 'k', **kwargs)
# Parameters from Gillon et al. (2017) and Luger et al. (2017)
# Mass for `h` is currently unconstrained, so basing it loosely on
# the mass distribution for `d`, which has a similar radius.
planets = [None for i in range(7)]
names = ['b', 'c', 'd', 'e', 'f', 'g', 'h']
periods = [(1.51087081, 0.60e-6),
(2.4218233, 0.17e-5),
(4.049610, 0.63e-4),
(6.099615, 0.11e-4),
(9.206690, 0.15e-4),
(12.35294, 0.12e-3),
(18.767, 0.004)]
# Transit times, t0 − 2,450,000 (BJD_{TDB})
# These were taken from the Excel source data corresponding
# to Extended Data Figure 4 of Gillon et al. (2017), downloaded from
# http://www.nature.com/nature/journal/v542/n7642/source_data/nature21360-sf4.xlsx
# These are the *last* transit times measured in the discovery paper.
# Note that photodynamical integrations will only be accurate for
# integrations starting close to this time (7670 corresponds to
# 12:00:00 UT October 8, 2016). We will update these ephemerides as
# more TTV data becomes available.
transits = [(7671.52876, 0.00033),
(7670.29869, 0.00035),
(7670.14198, 0.00066),
(7672.5793, 0.0026),
(7671.39279, 0.00072),
(7665.35151, 0.00028),
(7662.55463, 0.00056)]
masses = [(0.85, 0.72),
(1.38, 0.61),
(0.41, 0.27),
(0.62, 0.58),
(0.68, 0.18),
(1.34, 0.88),
(0.4, 0.3)]
depths = [(0.7266, 0.0088),
(0.687, 0.010),
(0.367, 0.017),
(0.519, 0.026),
(0.673, 0.023),
(0.782, 0.027),
(0.346, 0.032)]
# These are approximated from Supplementary Figure 6 in
# Luger et al. (2017). These can certainly be improved with better TTV
# data and more dynamical modeling.
eccentricities = [(0.0005, 0.0001),
(0.004, 0.001),
(0.0004, 0.0003),
(0.007, 0.0005),
(0.009, 0.001),
(0.004, 0.001),
(0.003, 0.001)]
# These we're just going to fix for now. We have no prior
# constraints on them. Let's assume the most optimistic albedos.
albedos = [(0., 0), (0., 0), (0., 0), (0., 0),
(0., 0), (0., 0), (0., 0)]
tnights = [(40., 0), (40., 0), (40., 0), (40., 0),
(40., 0), (40., 0), (40., 0)]
# Colors for plotting
colors = ['firebrick', 'coral', 'gold', 'mediumseagreen', 'turquoise',
'cornflowerblue', 'midnightblue']
# Compute the polar angle scatter
sig_theta = theta.sample()
# Instantiate the planets
for i in range(7):
# Period and time of transit
per = N(*periods[i])
t0 = N(*transits[i])
# Positive mass
m = 0
while m <= 0:
m = N(*masses[i])
# Inclination in range [0, 90]
inc = inclinations[i]
if inc > 90:
inc = 180 - inc
# Longitude of ascending node in degrees
if (i == 0) or (not sample):
Omega = 0
else:
Omega = N(0, sig_theta)
# Longitude of pericenter (uniform over [0-360 deg])
if sample:
w = 360. * np.random.rand()
else:
w = 0.
# Eccentricity
ecc = 1
while (ecc < 0) or (ecc >= 1):
ecc = N(*eccentricities[i])
# Radius from Rp / Rstar
mu = np.sqrt(depths[i][0] / 100)
sig = 0.5 * depths[i][1] / 100 / mu
RpRs = N(mu, sig)
r = RpRs * rstar * RSUN / REARTH
# Albedo, night side temperature, effective temperature
albedo = N(*albedos[i])
tnight = N(*tnights[i])
# Instantiate!
planets[i] = Planet(names[i], m = m, per = per, inc = inc, r = r,
t0 = t0, Omega = Omega, w = w, ecc = ecc,
color = colors[i], tnight = tnight,
albedo = albedo, **kwargs)
# Return the system
system = System(star, distance = distance, *planets, **kwargs)
return system | rodluger/planetplanet | planetplanet/photo/trappist1.py | Python | gpl-3.0 | 7,549 | 0.015511 |
"""Univariate features selection."""
# Authors: V. Michel, B. Thirion, G. Varoquaux, A. Gramfort, E. Duchesnay.
# L. Buitinck, A. Joly
# License: BSD 3 clause
import numpy as np
import warnings
from scipy import special, stats
from scipy.sparse import issparse
from ..base import BaseEstimator
from ..preprocessing import LabelBinarizer
from ..utils import (as_float_array, check_array, check_X_y, safe_sqr,
safe_mask)
from ..utils.extmath import norm, safe_sparse_dot
from ..utils.validation import check_is_fitted
from .base import SelectorMixin
def _clean_nans(scores):
"""
Fixes Issue #1240: NaNs can't be properly compared, so change them to the
smallest value of scores's dtype. -inf seems to be unreliable.
"""
# XXX where should this function be called? fit? scoring functions
# themselves?
scores = as_float_array(scores, copy=True)
scores[np.isnan(scores)] = np.finfo(scores.dtype).min
return scores
######################################################################
# Scoring functions
# The following function is a rewriting of scipy.stats.f_oneway
# Contrary to the scipy.stats.f_oneway implementation it does not
# copy the data while keeping the inputs unchanged.
def f_oneway(*args):
"""Performs a 1-way ANOVA.
The one-way ANOVA tests the null hypothesis that 2 or more groups have
the same population mean. The test is applied to samples from two or
more groups, possibly with differing sizes.
Parameters
----------
sample1, sample2, ... : array_like, sparse matrices
The sample measurements should be given as arguments.
Returns
-------
F-value : float
The computed F-value of the test.
p-value : float
The associated p-value from the F-distribution.
Notes
-----
The ANOVA test has important assumptions that must be satisfied in order
for the associated p-value to be valid.
1. The samples are independent
2. Each sample is from a normally distributed population
3. The population standard deviations of the groups are all equal. This
property is known as homoscedasticity.
If these assumptions are not true for a given set of data, it may still be
possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`_) although
with some loss of power.
The algorithm is from Heiman[2], pp.394-7.
See ``scipy.stats.f_oneway`` that should give the same results while
being less efficient.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 14.
http://faculty.vassar.edu/lowry/ch14pt1.html
.. [2] Heiman, G.W. Research Methods in Statistics. 2002.
"""
n_classes = len(args)
args = [as_float_array(a) for a in args]
n_samples_per_class = np.array([a.shape[0] for a in args])
n_samples = np.sum(n_samples_per_class)
ss_alldata = sum(safe_sqr(a).sum(axis=0) for a in args)
sums_args = [np.asarray(a.sum(axis=0)) for a in args]
square_of_sums_alldata = sum(sums_args) ** 2
square_of_sums_args = [s ** 2 for s in sums_args]
sstot = ss_alldata - square_of_sums_alldata / float(n_samples)
ssbn = 0.
for k, _ in enumerate(args):
ssbn += square_of_sums_args[k] / n_samples_per_class[k]
ssbn -= square_of_sums_alldata / float(n_samples)
sswn = sstot - ssbn
dfbn = n_classes - 1
dfwn = n_samples - n_classes
msb = ssbn / float(dfbn)
msw = sswn / float(dfwn)
constant_features_idx = np.where(msw == 0.)[0]
if (np.nonzero(msb)[0].size != msb.size and constant_features_idx.size):
warnings.warn("Features %s are constant." % constant_features_idx,
UserWarning)
f = msb / msw
# flatten matrix to vector in sparse case
f = np.asarray(f).ravel()
prob = stats.fprob(dfbn, dfwn, f)
return f, prob
def f_classif(X, y):
"""Compute the Anova F-value for the provided sample
Parameters
----------
X : {array-like, sparse matrix} shape = [n_samples, n_features]
The set of regressors that will tested sequentially.
y : array of shape(n_samples)
The data matrix.
Returns
-------
F : array, shape = [n_features,]
The set of F values.
pval : array, shape = [n_features,]
The set of p-values.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'])
args = [X[safe_mask(X, y == k)] for k in np.unique(y)]
return f_oneway(*args)
def _chisquare(f_obs, f_exp):
"""Fast replacement for scipy.stats.chisquare.
Version from https://github.com/scipy/scipy/pull/2525 with additional
optimizations.
"""
f_obs = np.asarray(f_obs, dtype=np.float64)
k = len(f_obs)
# Reuse f_obs for chi-squared statistics
chisq = f_obs
chisq -= f_exp
chisq **= 2
chisq /= f_exp
chisq = chisq.sum(axis=0)
return chisq, special.chdtrc(k - 1, chisq)
def chi2(X, y):
"""Compute chi-squared statistic for each class/feature combination.
This score can be used to select the n_features features with the
highest values for the test chi-squared statistic from X, which must
contain booleans or frequencies (e.g., term counts in document
classification), relative to the classes.
Recall that the chi-square test measures dependence between stochastic
variables, so using this function "weeds out" the features that are the
most likely to be independent of class and therefore irrelevant for
classification.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features_in)
Sample vectors.
y : array-like, shape = (n_samples,)
Target vector (class labels).
Returns
-------
chi2 : array, shape = (n_features,)
chi2 statistics of each feature.
pval : array, shape = (n_features,)
p-values of each feature.
Notes
-----
Complexity of this algorithm is O(n_classes * n_features).
"""
# XXX: we might want to do some of the following in logspace instead for
# numerical stability.
X = check_array(X, accept_sparse='csr')
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative.")
Y = LabelBinarizer().fit_transform(y)
if Y.shape[1] == 1:
Y = np.append(1 - Y, Y, axis=1)
observed = safe_sparse_dot(Y.T, X) # n_classes * n_features
feature_count = check_array(X.sum(axis=0))
class_prob = check_array(Y.mean(axis=0))
expected = np.dot(class_prob.T, feature_count)
return _chisquare(observed, expected)
def f_regression(X, y, center=True):
"""Univariate linear regression tests
Quick linear model for testing the effect of a single regressor,
sequentially for many regressors.
This is done in 3 steps:
1. the regressor of interest and the data are orthogonalized
wrt constant regressors
2. the cross correlation between data and regressors is computed
3. it is converted to an F score then to a p-value
Parameters
----------
X : {array-like, sparse matrix} shape = (n_samples, n_features)
The set of regressors that will tested sequentially.
y : array of shape(n_samples).
The data matrix
center : True, bool,
If true, X and y will be centered.
Returns
-------
F : array, shape=(n_features,)
F values of features.
pval : array, shape=(n_features,)
p-values of F-scores.
"""
if issparse(X) and center:
raise ValueError("center=True only allowed for dense data")
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float)
if center:
y = y - np.mean(y)
X = X.copy('F') # faster in fortran
X -= X.mean(axis=0)
# compute the correlation
corr = safe_sparse_dot(y, X)
# XXX could use corr /= row_norms(X.T) here, but the test doesn't pass
corr /= np.asarray(np.sqrt(safe_sqr(X).sum(axis=0))).ravel()
corr /= norm(y)
# convert to p-value
degrees_of_freedom = y.size - (2 if center else 1)
F = corr ** 2 / (1 - corr ** 2) * degrees_of_freedom
pv = stats.f.sf(F, 1, degrees_of_freedom)
return F, pv
######################################################################
# Base classes
class _BaseFilter(BaseEstimator, SelectorMixin):
"""Initialize the univariate feature selection.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
"""
def __init__(self, score_func):
self.score_func = score_func
def fit(self, X, y):
"""Run score function on (X, y) and get the appropriate features.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'])
if not callable(self.score_func):
raise TypeError("The score function should be a callable, %s (%s) "
"was passed."
% (self.score_func, type(self.score_func)))
self._check_params(X, y)
self.scores_, self.pvalues_ = self.score_func(X, y)
self.scores_ = np.asarray(self.scores_)
self.pvalues_ = np.asarray(self.pvalues_)
return self
def _check_params(self, X, y):
pass
######################################################################
# Specific filters
######################################################################
class SelectPercentile(_BaseFilter):
"""Select features according to a percentile of the highest scores.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
percentile : int, optional, default=10
Percent of features to keep.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
Notes
-----
Ties between features with equal scores will be broken in an unspecified
way.
"""
def __init__(self, score_func=f_classif, percentile=10):
super(SelectPercentile, self).__init__(score_func)
self.percentile = percentile
def _check_params(self, X, y):
if not 0 <= self.percentile <= 100:
raise ValueError("percentile should be >=0, <=100; got %r"
% self.percentile)
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
# Cater for NaNs
if self.percentile == 100:
return np.ones(len(self.scores_), dtype=np.bool)
elif self.percentile == 0:
return np.zeros(len(self.scores_), dtype=np.bool)
scores = _clean_nans(self.scores_)
treshold = stats.scoreatpercentile(scores,
100 - self.percentile)
mask = scores > treshold
ties = np.where(scores == treshold)[0]
if len(ties):
max_feats = len(scores) * self.percentile // 100
kept_ties = ties[:max_feats - mask.sum()]
mask[kept_ties] = True
return mask
class SelectKBest(_BaseFilter):
"""Select features according to the k highest scores.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
k : int or "all", optional, default=10
Number of top features to select.
The "all" option bypasses selection, for use in a parameter search.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
Notes
-----
Ties between features with equal scores will be broken in an unspecified
way.
"""
def __init__(self, score_func=f_classif, k=10):
super(SelectKBest, self).__init__(score_func)
self.k = k
def _check_params(self, X, y):
if not (self.k == "all" or 0 <= self.k <= X.shape[1]):
raise ValueError("k should be >=0, <= n_features; got %r."
"Use k='all' to return all features."
% self.k)
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
if self.k == 'all':
return np.ones(self.scores_.shape, dtype=bool)
elif self.k == 0:
return np.zeros(self.scores_.shape, dtype=bool)
else:
scores = _clean_nans(self.scores_)
mask = np.zeros(scores.shape, dtype=bool)
# Request a stable sort. Mergesort takes more memory (~40MB per
# megafeature on x86-64).
mask[np.argsort(scores, kind="mergesort")[-self.k:]] = 1
return mask
class SelectFpr(_BaseFilter):
"""Filter: Select the pvalues below alpha based on a FPR test.
FPR test stands for False Positive Rate test. It controls the total
amount of false detections.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
alpha : float, optional
The highest p-value for features to be kept.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
"""
def __init__(self, score_func=f_classif, alpha=5e-2):
super(SelectFpr, self).__init__(score_func)
self.alpha = alpha
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
return self.pvalues_ < self.alpha
class SelectFdr(_BaseFilter):
"""Filter: Select the p-values for an estimated false discovery rate
This uses the Benjamini-Hochberg procedure. ``alpha`` is the target false
discovery rate.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
alpha : float, optional
The highest uncorrected p-value for features to keep.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
"""
def __init__(self, score_func=f_classif, alpha=5e-2):
super(SelectFdr, self).__init__(score_func)
self.alpha = alpha
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
alpha = self.alpha
sv = np.sort(self.pvalues_)
selected = sv[sv < alpha * np.arange(len(self.pvalues_))]
if selected.size == 0:
return np.zeros_like(self.pvalues_, dtype=bool)
return self.pvalues_ <= selected.max()
class SelectFwe(_BaseFilter):
"""Filter: Select the p-values corresponding to Family-wise error rate
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
alpha : float, optional
The highest uncorrected p-value for features to keep.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
"""
def __init__(self, score_func=f_classif, alpha=5e-2):
super(SelectFwe, self).__init__(score_func)
self.alpha = alpha
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
return (self.pvalues_ < self.alpha / len(self.pvalues_))
######################################################################
# Generic filter
######################################################################
# TODO this class should fit on either p-values or scores,
# depending on the mode.
class GenericUnivariateSelect(_BaseFilter):
"""Univariate feature selector with configurable strategy.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
mode : {'percentile', 'k_best', 'fpr', 'fdr', 'fwe'}
Feature selection mode.
param : float or int depending on the feature selection mode
Parameter of the corresponding mode.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
"""
_selection_modes = {'percentile': SelectPercentile,
'k_best': SelectKBest,
'fpr': SelectFpr,
'fdr': SelectFdr,
'fwe': SelectFwe}
def __init__(self, score_func=f_classif, mode='percentile', param=1e-5):
super(GenericUnivariateSelect, self).__init__(score_func)
self.mode = mode
self.param = param
def _make_selector(self):
selector = self._selection_modes[self.mode](score_func=self.score_func)
# Now perform some acrobatics to set the right named parameter in
# the selector
possible_params = selector._get_param_names()
possible_params.remove('score_func')
selector.set_params(**{possible_params[0]: self.param})
return selector
def _check_params(self, X, y):
if self.mode not in self._selection_modes:
raise ValueError("The mode passed should be one of %s, %r,"
" (type %s) was passed."
% (self._selection_modes.keys(), self.mode,
type(self.mode)))
self._make_selector()._check_params(X, y)
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
selector = self._make_selector()
selector.pvalues_ = self.pvalues_
selector.scores_ = self.scores_
return selector._get_support_mask()
| loli/semisupervisedforests | sklearn/feature_selection/univariate_selection.py | Python | bsd-3-clause | 18,609 | 0 |
'''
Script to generate Kivy API from source code.
Code is messy, but working.
Be careful if you change anything in !
'''
ignore_list = (
'kivy._event',
'kivy.factory_registers',
'kivy.graphics.buffer',
'kivy.graphics.vbo',
'kivy.graphics.vertex',
'kivy.lib.osc'
)
import os
import sys
from glob import glob
import kivy
# force loading of kivy modules
import kivy.app
import kivy.metrics
import kivy.atlas
import kivy.context
import kivy.core.audio
import kivy.core.camera
import kivy.core.clipboard
import kivy.core.gl
import kivy.core.image
import kivy.core.spelling
import kivy.core.text
import kivy.core.text.markup
import kivy.core.video
import kivy.core.window
import kivy.ext
import kivy.geometry
import kivy.graphics
import kivy.graphics.shader
import kivy.animation
import kivy.modules.keybinding
import kivy.modules.monitor
import kivy.modules.touchring
import kivy.modules.inspector
import kivy.modules.recorder
import kivy.modules.screen
import kivy.storage
import kivy.storage.dictstore
import kivy.storage.jsonstore
import kivy.storage.redisstore
import kivy.network.urlrequest
import kivy.modules.webdebugger
import kivy.support
import kivy.input.recorder
import kivy.interactive
import kivy.garden
from kivy.factory import Factory
# force loading of all classes from factory
for x in list(Factory.classes.keys())[:]:
getattr(Factory, x)
# Directory of doc
base_dir = os.path.dirname(__file__)
dest_dir = os.path.join(base_dir, 'sources')
examples_framework_dir = os.path.join(base_dir, '..', 'examples', 'framework')
def writefile(filename, data):
global dest_dir
# avoid to rewrite the file if the content didn't change
f = os.path.join(dest_dir, filename)
print('write', filename)
if os.path.exists(f):
with open(f) as fd:
if fd.read() == data:
return
h = open(f, 'w')
h.write(data)
h.close()
# Activate Kivy modules
'''
for k in kivy.kivy_modules.list().keys():
kivy.kivy_modules.import_module(k)
'''
# Search all kivy module
l = [(x, sys.modules[x], os.path.basename(sys.modules[x].__file__).rsplit('.', 1)[0]) for x in sys.modules if x.startswith('kivy') and sys.modules[x]]
# Extract packages from modules
packages = []
modules = {}
api_modules = []
for name, module, filename in l:
if name in ignore_list:
continue
if not any([name.startswith(x) for x in ignore_list]):
api_modules.append(name)
if filename == '__init__':
packages.append(name)
else:
if hasattr(module, '__all__'):
modules[name] = module.__all__
else:
modules[name] = [x for x in dir(module) if not x.startswith('__')]
packages.sort()
# Create index
api_index = \
'''API Reference
-------------
The API reference is a lexicographic list of all the different classes,
methods and features that Kivy offers.
.. toctree::
:maxdepth: 1
'''
api_modules.sort()
for package in api_modules:
api_index += " api-%s.rst\n" % package
writefile('api-index.rst', api_index)
# Create index for all packages
template = \
'''==========================================================================================================
$SUMMARY
==========================================================================================================
$EXAMPLES_REF
.. automodule:: $PACKAGE
:members:
:show-inheritance:
.. toctree::
$EXAMPLES
'''
template_examples = \
'''.. _example-reference%d:
Examples
--------
%s
'''
template_examples_ref = \
'''# :ref:`Jump directly to Examples <example-reference%d>`'''
def extract_summary_line(doc):
if doc is None:
return
for line in doc.split('\n'):
line = line.strip()
# don't take empty line
if len(line) < 1:
continue
# ref mark
if line.startswith('.. _'):
continue
return line
for package in packages:
summary = extract_summary_line(sys.modules[package].__doc__)
if summary is None:
summary = 'NO DOCUMENTATION (package %s)' % package
t = template.replace('$SUMMARY', summary)
t = t.replace('$PACKAGE', package)
t = t.replace('$EXAMPLES_REF', '')
t = t.replace('$EXAMPLES', '')
# search packages
for subpackage in packages:
packagemodule = subpackage.rsplit('.', 1)[0]
if packagemodule != package or len(subpackage.split('.')) <= 2:
continue
t += " api-%s.rst\n" % subpackage
# search modules
m = list(modules.keys())
m.sort(key=lambda x: extract_summary_line(sys.modules[x].__doc__))
for module in m:
packagemodule = module.rsplit('.', 1)[0]
if packagemodule != package:
continue
t += " api-%s.rst\n" % module
writefile('api-%s.rst' % package, t)
# Create index for all module
m = list(modules.keys())
m.sort()
refid = 0
for module in m:
summary = extract_summary_line(sys.modules[module].__doc__)
if summary is None:
summary = 'NO DOCUMENTATION (module %s)' % package
# search examples
example_output = []
example_prefix = module
if module.startswith('kivy.'):
example_prefix = module[5:]
example_prefix = example_prefix.replace('.', '_')
# try to found any example in framework directory
list_examples = glob('%s*.py' % os.path.join(examples_framework_dir, example_prefix))
for x in list_examples:
# extract filename without directory
xb = os.path.basename(x)
# add a section !
example_output.append('File :download:`%s <%s>` ::' % (
xb, os.path.join('..', x)))
# put the file in
with open(x, 'r') as fd:
d = fd.read().strip()
d = '\t' + '\n\t'.join(d.split('\n'))
example_output.append(d)
t = template.replace('$SUMMARY', summary)
t = t.replace('$PACKAGE', module)
if len(example_output):
refid += 1
example_output = template_examples % (refid, '\n\n\n'.join(example_output))
t = t.replace('$EXAMPLES_REF', template_examples_ref % refid)
t = t.replace('$EXAMPLES', example_output)
else:
t = t.replace('$EXAMPLES_REF', '')
t = t.replace('$EXAMPLES', '')
writefile('api-%s.rst' % module, t)
# Generation finished
print('Generation finished, do make html')
| JulienMcJay/eclock | windows/kivy/doc/autobuild.py | Python | gpl-2.0 | 6,361 | 0.008018 |
#!/usr/bin/env python
# Copyright 2017 Google Inc.
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
# set_vcf_sample_id.py
#
# This script processes a single sample VCF file and replaces the
# sample ID in the header line.
#
# This could be replaced (almost) with a one-line sed script:
#
# sed -e 's/\(^#CHROM\t.*\t\)original$/\1new/' \
#
# What this script adds is a little more control, notably with error
# handling. sed will not report the number of changes, so to determine
# if a change was made, you'd need to make a second pass over the file.
#
# This script reads from stdin and writes to stdout.
#
# Usage:
# python set_vcf_sample_id.py original_id new_id
#
# If the original_id is specified, it will be verified before making the change.
# If the original_id is set to "", verification will be skipped.
import sys
def main():
"""Entry point to the script."""
if len(sys.argv) != 3:
print >> sys.stderr, "Usage: %s original_id new_id" % sys.argv[0]
sys.exit(1)
original_id = sys.argv[1]
new_id = sys.argv[2]
lines_processed = 0
lines_changed = 0
for line in sys.stdin:
lines_processed = lines_processed + 1
# Only line we care about is the #^CHROM line
if line.startswith('#CHROM\t'):
fields = line.rstrip('\n').split('\t')
# If an "original_id" was specified, verify that is what is in the file
if original_id:
curr_id = fields[-1]
if curr_id != original_id:
print >> sys.stderr, \
"ERROR: Current sample ID does not match expected: %s != %s\n" % (
curr_id, original_id)
sys.exit(1)
# Set the new value into the fields array and recreate the line
fields[-1] = new_id
line = '\t'.join(fields) + '\n'
lines_changed = lines_changed + 1
# Emit the current line
sys.stdout.write(line)
# Emit some statistics to stderr
print >> sys.stderr, "Total lines: %d" % lines_processed
print >> sys.stderr, "Changed lines: %d" % lines_changed
if lines_changed != 1:
print >> sys.stderr, "ERROR: Changed lines is not 1"
sys.exit(1)
if __name__ == "__main__":
main()
| googlegenomics/pipelines-api-examples | set_vcf_sample_id/set_vcf_sample_id.py | Python | bsd-3-clause | 2,263 | 0.011931 |
# (C) Datadog, Inc. 2015-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# stdlib
from collections import defaultdict
import logging
import os
from urlparse import urljoin
# project
from util import check_yaml
from utils.checkfiles import get_conf_path
from utils.http import retrieve_json
from utils.singleton import Singleton
from utils.dockerutil import DockerUtil
import requests
log = logging.getLogger('collector')
KUBERNETES_CHECK_NAME = 'kubernetes'
class KubeUtil:
__metaclass__ = Singleton
DEFAULT_METHOD = 'http'
MACHINE_INFO_PATH = '/api/v1.3/machine/'
METRICS_PATH = '/api/v1.3/subcontainers/'
PODS_LIST_PATH = '/pods/'
DEFAULT_CADVISOR_PORT = 4194
DEFAULT_KUBELET_PORT = 10255
DEFAULT_MASTER_PORT = 8080
DEFAULT_MASTER_NAME = 'kubernetes' # DNS name to reach the master from a pod.
CA_CRT_PATH = '/run/secrets/kubernetes.io/serviceaccount/ca.crt'
AUTH_TOKEN_PATH = '/run/secrets/kubernetes.io/serviceaccount/token'
POD_NAME_LABEL = "io.kubernetes.pod.name"
NAMESPACE_LABEL = "io.kubernetes.pod.namespace"
def __init__(self, instance=None):
self.docker_util = DockerUtil()
if instance is None:
try:
config_file_path = get_conf_path(KUBERNETES_CHECK_NAME)
check_config = check_yaml(config_file_path)
instance = check_config['instances'][0]
# kubernetes.yaml was not found
except IOError as ex:
log.error(ex.message)
instance = {}
except Exception:
log.error('Kubernetes configuration file is invalid. '
'Trying connecting to kubelet with default settings anyway...')
instance = {}
self.method = instance.get('method', KubeUtil.DEFAULT_METHOD)
self.host = instance.get("host") or self.docker_util.get_hostname()
self._node_ip = self._node_name = None # lazy evaluation
self.host_name = os.environ.get('HOSTNAME')
self.cadvisor_port = instance.get('port', KubeUtil.DEFAULT_CADVISOR_PORT)
self.kubelet_port = instance.get('kubelet_port', KubeUtil.DEFAULT_KUBELET_PORT)
self.kubelet_api_url = '%s://%s:%d' % (self.method, self.host, self.kubelet_port)
self.cadvisor_url = '%s://%s:%d' % (self.method, self.host, self.cadvisor_port)
self.kubernetes_api_url = 'https://%s/api/v1' % (os.environ.get('KUBERNETES_SERVICE_HOST') or self.DEFAULT_MASTER_NAME)
self.metrics_url = urljoin(self.cadvisor_url, KubeUtil.METRICS_PATH)
self.machine_info_url = urljoin(self.cadvisor_url, KubeUtil.MACHINE_INFO_PATH)
self.pods_list_url = urljoin(self.kubelet_api_url, KubeUtil.PODS_LIST_PATH)
self.kube_health_url = urljoin(self.kubelet_api_url, 'healthz')
# keep track of the latest k8s event we collected and posted
# default value is 0 but TTL for k8s events is one hour anyways
self.last_event_collection_ts = 0
def get_kube_labels(self, excluded_keys=None):
pods = self.retrieve_pods_list()
return self.extract_kube_labels(pods, excluded_keys=excluded_keys)
def extract_kube_labels(self, pods_list, excluded_keys=None):
"""
Extract labels from a list of pods coming from
the kubelet API.
"""
excluded_keys = excluded_keys or []
kube_labels = defaultdict(list)
pod_items = pods_list.get("items") or []
for pod in pod_items:
metadata = pod.get("metadata", {})
name = metadata.get("name")
namespace = metadata.get("namespace")
labels = metadata.get("labels")
if name and labels and namespace:
key = "%s/%s" % (namespace, name)
for k, v in labels.iteritems():
if k in excluded_keys:
continue
kube_labels[key].append(u"kube_%s:%s" % (k, v))
return kube_labels
def extract_meta(self, pods_list, field_name):
"""
Exctract fields like `uid` or `name` from the `metadata` section of a
list of pods coming from the kubelet API.
TODO: currently not in use, was added to support events filtering, consider to remove it.
"""
uids = []
pods = pods_list.get("items") or []
for p in pods:
value = p.get('metadata', {}).get(field_name)
if value is not None:
uids.append(value)
return uids
def retrieve_pods_list(self):
"""
Retrieve the list of pods for this cluster querying the kubelet API.
TODO: the list of pods could be cached with some policy to be decided.
"""
return retrieve_json(self.pods_list_url)
def retrieve_machine_info(self):
"""
Retrieve machine info from Cadvisor.
"""
return retrieve_json(self.machine_info_url)
def retrieve_metrics(self):
"""
Retrieve metrics from Cadvisor.
"""
return retrieve_json(self.metrics_url)
def filter_pods_list(self, pods_list, host_ip):
"""
Filter out (in place) pods that are not running on the given host.
TODO: currently not in use, was added to support events filtering, consider to remove it.
"""
pod_items = pods_list.get('items') or []
log.debug('Found {} pods to filter'.format(len(pod_items)))
filtered_pods = []
for pod in pod_items:
status = pod.get('status', {})
if status.get('hostIP') == host_ip:
filtered_pods.append(pod)
log.debug('Pods after filtering: {}'.format(len(filtered_pods)))
pods_list['items'] = filtered_pods
return pods_list
def retrieve_json_auth(self, url, auth_token, timeout=10):
"""
Kubernetes API requires authentication using a token available in
every pod.
We try to verify ssl certificate if available.
"""
verify = self.CA_CRT_PATH if os.path.exists(self.CA_CRT_PATH) else False
log.debug('ssl validation: {}'.format(verify))
headers = {'Authorization': 'Bearer {}'.format(auth_token)}
r = requests.get(url, timeout=timeout, headers=headers, verify=verify)
r.raise_for_status()
return r.json()
def get_node_info(self):
"""
Return the IP address and the hostname of the node where the pod is running.
"""
if None in (self._node_ip, self._node_name):
self._fetch_host_data()
return self._node_ip, self._node_name
def _fetch_host_data(self):
"""
Retrieve host name and IP address from the payload returned by the listing
pods endpoints from kubelet or kubernetes API.
The host IP address is different from the default router for the pod.
"""
try:
pod_items = self.retrieve_pods_list().get("items") or []
except Exception as e:
log.warning("Unable to retrieve pod list %s. Not fetching host data", str(e))
return
for pod in pod_items:
metadata = pod.get("metadata", {})
name = metadata.get("name")
if name == self.host_name:
status = pod.get('status', {})
spec = pod.get('spec', {})
# if not found, use an empty string - we use None as "not initialized"
self._node_ip = status.get('hostIP', '')
self._node_name = spec.get('nodeName', '')
break
def extract_event_tags(self, event):
"""
Return a list of tags extracted from an event object
"""
tags = []
if 'reason' in event:
tags.append('reason:%s' % event.get('reason', '').lower())
if 'namespace' in event.get('metadata', {}):
tags.append('namespace:%s' % event['metadata']['namespace'])
if 'host' in event.get('source', {}):
tags.append('node_name:%s' % event['source']['host'])
if 'kind' in event.get('involvedObject', {}):
tags.append('object_type:%s' % event['involvedObject'].get('kind', '').lower())
return tags
def are_tags_filtered(self, tags):
"""
Because it is a pain to call it from the kubernetes check otherwise.
"""
return self.docker_util.are_tags_filtered(tags)
@classmethod
def get_auth_token(cls):
"""
Return a string containing the authorization token for the pod.
"""
try:
with open(cls.AUTH_TOKEN_PATH) as f:
return f.read()
except IOError as e:
log.error('Unable to read token from {}: {}'.format(cls.AUTH_TOKEN_PATH, e))
return None
| cberry777/dd-agent | utils/kubernetes/kubeutil.py | Python | bsd-3-clause | 8,888 | 0.002025 |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.AP_list, name='AP_list'),
]
| OpenWinCon/OpenWinNet | web-gui/AP/urls.py | Python | apache-2.0 | 119 | 0 |
# -*- coding: utf-8 -*-
from django.db import models
from django.contrib import admin
from django.utils.translation import ugettext as _
from koalixcrm.plugin import *
from koalixcrm.crm.contact.phone_address import PhoneAddress
from koalixcrm.crm.contact.email_address import EmailAddress
from koalixcrm.crm.contact.postal_address import PostalAddress
from koalixcrm.crm.documents.invoice import Invoice
from koalixcrm.crm.documents.quote import Quote
from koalixcrm.crm.documents.purchase_order import PurchaseOrder
from koalixcrm.global_support_functions import xstr
from koalixcrm.crm.const.purpose import *
from koalixcrm.crm.documents.invoice import InlineInvoice
from koalixcrm.crm.documents.quote import InlineQuote
from koalixcrm.crm.reporting.generic_project_link import InlineGenericProjectLink
from koalixcrm.crm.exceptions import *
from koalixcrm.djangoUserExtension.models import UserExtension
import koalixcrm.crm.documents.calculations
import koalixcrm.crm.documents.pdf_export
from rest_framework import serializers
class PostalAddressForContract(PostalAddress):
purpose = models.CharField(verbose_name=_("Purpose"), max_length=1, choices=PURPOSESADDRESSINCONTRACT)
contract = models.ForeignKey('Contract')
class Meta:
app_label = "crm"
verbose_name = _('Postal Address For Contracts')
verbose_name_plural = _('Postal Address For Contracts')
def __str__(self):
return xstr(self.prename) + ' ' + xstr(self.name) + ' ' + xstr(self.addressline1)
class PhoneAddressForContract(PhoneAddress):
purpose = models.CharField(verbose_name=_("Purpose"), max_length=1, choices=PURPOSESADDRESSINCONTRACT)
contract = models.ForeignKey('Contract')
class Meta:
app_label = "crm"
verbose_name = _('Phone Address For Contracts')
verbose_name_plural = _('Phone Address For Contracts')
def __str__(self):
return str(self.phone)
class EmailAddressForContract(EmailAddress):
purpose = models.CharField(verbose_name=_("Purpose"), max_length=1, choices=PURPOSESADDRESSINCONTRACT)
contract = models.ForeignKey('Contract')
class Meta:
app_label = "crm"
verbose_name = _('Email Address For Contracts')
verbose_name_plural = _('Email Address For Contracts')
def __str__(self):
return str(self.email)
class ContractPostalAddress(admin.StackedInline):
model = PostalAddressForContract
extra = 1
classes = ['collapse']
fieldsets = (
('Basics', {
'fields': ('prefix',
'pre_name',
'name',
'address_line_1',
'address_line_2',
'address_line_3',
'address_line_4',
'zip_code',
'town',
'state',
'country',
'purpose'),
}),
)
allow_add = True
class ContractPhoneAddress(admin.TabularInline):
model = PhoneAddressForContract
extra = 1
classes = ['collapse']
fieldsets = (
('Basics', {
'fields': ('phone', 'purpose',)
}),
)
allow_add = True
class ContractEmailAddress(admin.TabularInline):
model = EmailAddressForContract
extra = 1
classes = ['collapse']
fieldsets = (
('Basics', {
'fields': ('email',
'purpose',)
}),
)
allow_add = True
class Contract(models.Model):
staff = models.ForeignKey('auth.User',
limit_choices_to={'is_staff': True},
verbose_name=_("Staff"),
related_name="db_relcontractstaff",
blank=True,
null=True)
description = models.TextField(verbose_name=_("Description"))
default_customer = models.ForeignKey("Customer",
verbose_name=_("Default Customer"),
null=True,
blank=True)
default_supplier = models.ForeignKey("Supplier",
verbose_name=_("Default Supplier"),
null=True,
blank=True)
default_currency = models.ForeignKey("Currency",
verbose_name=_("Default Currency"),
blank=False,
null=False)
default_template_set = models.ForeignKey("djangoUserExtension.TemplateSet",
verbose_name=_("Default Template Set"), null=True, blank=True)
date_of_creation = models.DateTimeField(verbose_name=_("Created at"),
auto_now_add=True)
last_modification = models.DateTimeField(verbose_name=_("Last modified"),
auto_now=True)
last_modified_by = models.ForeignKey('auth.User',
limit_choices_to={'is_staff': True},
verbose_name=_("Last modified by"),
related_name="db_contractlstmodified")
class Meta:
app_label = "crm"
verbose_name = _('Contract')
verbose_name_plural = _('Contracts')
def get_template_set(self, calling_model):
if self.default_template_set:
required_template_set = str(type(calling_model).__name__)
return self.default_template_set.get_template_set(required_template_set)
else:
raise TemplateSetMissingInContract("The Contract has no Default Template Set selected")
def create_from_reference(self, calling_model, staff):
staff_user_extension = UserExtension.get_user_extension(staff.id)
self.default_customer = calling_model
self.default_currency = staff_user_extension.defaultCurrency
self.default_template_set = staff_user_extension.defaultTemplateSet
self.last_modified_by = staff
self.staff = staff
self.save()
return self
def create_invoice(self):
invoice = Invoice()
invoice.create_from_reference(self)
return invoice
def create_quote(self):
quote = Quote()
quote.create_from_reference(self)
return quote
def create_purchase_order(self):
purchase_order = PurchaseOrder()
purchase_order.create_from_reference(self)
return purchase_order
def __str__(self):
return _("Contract") + " " + str(self.id)
class OptionContract(admin.ModelAdmin):
list_display = ('id',
'description',
'default_customer',
'default_supplier',
'staff',
'default_currency',
'date_of_creation',
'last_modification',
'last_modified_by')
list_display_links = ('id',)
list_filter = ('default_customer',
'default_supplier',
'staff',
'default_currency')
ordering = ('id', )
search_fields = ('id',
'contract')
fieldsets = (
(_('Basics'), {
'fields': ('description',
'default_customer',
'staff',
'default_supplier',
'default_currency',
'default_template_set')
}),
)
inlines = [ContractPostalAddress,
ContractPhoneAddress,
ContractEmailAddress,
InlineQuote,
InlineInvoice,
InlineGenericProjectLink]
pluginProcessor = PluginProcessor()
inlines.extend(pluginProcessor.getPluginAdditions("contractInlines"))
def create_quote(self, request, queryset):
from koalixcrm.crm.views.newdocument import CreateNewDocumentView
for obj in queryset:
response = CreateNewDocumentView.create_new_document(self, request, obj,
koalixcrm.crm.documents.quote.Quote,
("/admin/crm/"+obj.__class__.__name__.lower()+"/"))
return response
create_quote.short_description = _("Create Quote")
def create_invoice(self, request, queryset):
from koalixcrm.crm.views.newdocument import CreateNewDocumentView
for obj in queryset:
response = CreateNewDocumentView.create_new_document(self, request, obj,
koalixcrm.crm.documents.invoice.Invoice,
("/admin/crm/"+obj.__class__.__name__.lower()+"/"))
return response
create_invoice.short_description = _("Create Invoice")
def create_purchase_confirmation(self, request, queryset):
from koalixcrm.crm.views.newdocument import CreateNewDocumentView
for obj in queryset:
response = CreateNewDocumentView.create_new_document(self, request, obj,
koalixcrm.crm.documents.purchaseconfirmation.PurchaseConfirmation,
("/admin/crm/"+obj.__class__.__name__.lower()+"/"))
return response
create_purchase_confirmation.short_description = _("Create Purchase Confirmation")
def create_delivery_note(self, request, queryset):
from koalixcrm.crm.views.newdocument import CreateNewDocumentView
for obj in queryset:
response = CreateNewDocumentView.create_new_document(self, request, obj,
koalixcrm.crm.documents.deliverynote.DeliveryNote,
("/admin/crm/"+obj.__class__.__name__.lower()+"/"))
return response
create_delivery_note.short_description = _("Create Delivery note")
def create_payment_reminder(self, request, queryset):
from koalixcrm.crm.views.newdocument import CreateNewDocumentView
for obj in queryset:
response = CreateNewDocumentView.create_new_document(self, request, obj,
koalixcrm.crm.documents.paymentreminder.PaymentReminder,
("/admin/crm/"+obj.__class__.__name__.lower()+"/"))
return response
create_payment_reminder.short_description = _("Create Payment Reminder")
def create_purchase_order(self, request, queryset):
from koalixcrm.crm.views.newdocument import CreateNewDocumentView
for obj in queryset:
response = CreateNewDocumentView.create_new_document(self, request, obj,
koalixcrm.crm.documents.purchaseorder.PurchaseOrder,
("/admin/crm/"+obj.__class__.__name__.lower()+"/"))
return response
create_purchase_order.short_description = _("Create Purchase Order")
def save_model(self, request, obj, form, change):
if change:
obj.last_modified_by = request.user
else:
obj.last_modified_by = request.user
obj.staff = request.user
obj.save()
actions = ['create_quote', 'create_invoice', 'create_purchase_order']
pluginProcessor = PluginProcessor()
actions.extend(pluginProcessor.getPluginAdditions("contractActions"))
class ContractJSONSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Contract
fields = ('id',
'description')
| scaphilo/koalixcrm | koalixcrm/crm/documents/contract.py | Python | bsd-3-clause | 11,821 | 0.003215 |
""" -*- coding: utf-8 -*- """
from python2awscli import bin_aws
from python2awscli.error import AWSNotFound, ParseError, AWSDuplicate
from python2awscli import must
class BaseSecurityGroup(object):
def __init__(self, name, region, vpc, description, inbound=None, outbound=None):
"""
:param name: String, name of SG
:param region: String, AWS region
:param vpc: String, IP of the VPC this SG belongs to
:param description: String
:param inbound: List of dicts, IP Permissions that should exist
:param outbound: List of dicts, IP Permissions that should exist
"""
self.id = None
self.name = name
self.region = region
self.vpc = vpc
self.description = description
self.IpPermissions = []
self.IpPermissionsEgress = []
self.owner = None
self.changed = False
try:
self._get()
except AWSNotFound:
self._create()
self._merge_rules(must.be_list(inbound), self.IpPermissions)
self._merge_rules(must.be_list(outbound), self.IpPermissionsEgress, egress=True)
if self.changed:
self._get()
def _break_out(self, existing):
"""
Undo AWS's rule flattening so we can do simple 'if rule in existing' logic later.
:param existing: List of SG rules as dicts.
:return: List of SG rules as dicts.
"""
spool = list()
for rule in existing:
for ip in rule['IpRanges']:
copy_of_rule = rule.copy()
copy_of_rule['IpRanges'] = [ip]
copy_of_rule['UserIdGroupPairs'] = []
spool.append(copy_of_rule)
for group in rule['UserIdGroupPairs']:
copy_of_rule = rule.copy()
copy_of_rule['IpRanges'] = []
copy_of_rule['UserIdGroupPairs'] = [group]
spool.append(copy_of_rule)
return spool
def _merge_rules(self, requested, active, egress=False):
"""
:param requested: List of dicts, IP Permissions that should exist
:param active: List of dicts, IP Permissions that already exist
:param egress: Bool, addressing outbound rules or not?
:return: Bool
"""
if not isinstance(requested, list):
raise ParseError(
'SecurityGroup {0}, need a list of dicts, instead got "{1}"'.format(self.name, requested))
for rule in requested:
if rule not in active:
self._add_rule(rule, egress)
for active_rule in active:
if active_rule not in requested:
self._rm_rule(active_rule, egress)
return True
def _add_rule(self, ip_permissions, egress):
"""
:param ip_permissions: Dict of IP Permissions
:param egress: Bool
:return: Bool
"""
direction = 'authorize-security-group-ingress'
if egress:
direction = 'authorize-security-group-egress'
command = ['ec2', direction,
'--region', self.region,
'--group-id', self.id,
'--ip-permissions', str(ip_permissions).replace("'", '"')
]
bin_aws(command)
print('Authorized: {0}'.format(ip_permissions)) # TODO: Log(...)
self.changed = True
return True
def _rm_rule(self, ip_permissions, egress):
"""
:param ip_permissions: Dict of IP Permissions
:param egress: Bool
:return: Bool
"""
direction = 'revoke-security-group-ingress'
if egress:
direction = 'revoke-security-group-egress'
command = ['ec2', direction,
'--region', self.region,
'--group-id', self.id,
'--ip-permissions', str(ip_permissions).replace("'", '"')
]
bin_aws(command)
print('Revoked: {0}'.format(ip_permissions)) # TODO: Log(...)
self.changed = True
return True
def _create(self):
"""
Create a Security Group
:return:
"""
# AWS grants all new SGs this default outbound rule "This is pro-human & anti-machine behavior."
default_egress = {
'Ipv6Ranges': [],
'PrefixListIds': [],
'IpRanges': [{'CidrIp': '0.0.0.0/0'}],
'UserIdGroupPairs': [], 'IpProtocol': '-1'
}
command = [
'ec2', 'create-security-group',
'--region', self.region,
'--group-name', self.name,
'--description', self.description,
'--vpc-id', self.vpc
]
try:
self.id = bin_aws(command, key='GroupId')
except AWSDuplicate:
return False # OK if it already exists.
print('Created {0}'.format(command)) # TODO: Log(...)
self.IpPermissions = []
self.IpPermissionsEgress = [default_egress]
self.changed = True
return True
def _get(self):
"""
Get information about Security Group from AWS and update self
:return: Bool
"""
command = ['ec2', 'describe-security-groups', '--region', self.region, '--group-names', self.name]
result = bin_aws(command, key='SecurityGroups', max=1) # will raise NotFound if empty
me = result[0]
self.id = me['GroupId']
self.owner = me['OwnerId']
self.IpPermissions = self._break_out(me['IpPermissions'])
self.IpPermissionsEgress = self._break_out(me['IpPermissionsEgress'])
print('Got {0}'.format(command)) # TODO: Log(...)
return True
def _delete(self):
"""
Delete myself by my own id.
As of 20170114 no other methods call me. You must do `foo._delete()`
:return:
"""
command = ['ec2', 'delete-security-group', '--region', self.region,
# '--dry-run',
'--group-id', self.id
]
bin_aws(command, decode_output=False)
print('Deleted {0}'.format(command)) # TODO: Log(...)
return True
| jhazelwo/python-awscli | python2awscli/model/securitygroup.py | Python | mit | 6,235 | 0.001123 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import date
import tornado.escape
import tornado.ioloop
import tornado.web
import urllib
import mimetypes
import re
import threading
from urllib2 import quote
from sqlalchemy import desc
from sqlalchemy.orm import joinedload,subqueryload,aliased
from sqlalchemy.sql.expression import func, select
import json
import pprint
import mimetypes
from PIL import Image
try:
from PIL import WebPImagePlugin
except:
pass
import StringIO
import gzip
import dateutil.parser
import logging
import logging.handlers
import imghdr
import random
import signal
import sys
import socket
import webbrowser
import time
from comicapi.comicarchive import *
import csversion
import utils
from database import *
from monitor import Monitor
from config import ComicStreamerConfig
from folders import AppFolders
from options import Options
from bonjour import BonjourThread
from bookmark import Bookmark
from blacklist import Blacklist
from library import Library
# add webp test to imghdr in case it isn't there already
def my_test_webp(h, f):
if h.startswith(b'RIFF') and h[8:12] == b'WEBP':
return 'webp'
imghdr.tests.append(my_test_webp)
# to allow a blank username
def fix_username(username):
return username + "ComicStreamer"
def custom_get_current_user(handler):
user = handler.get_secure_cookie("user")
if user:
user = fix_username(user)
return user
# you can change default root here :-)
def deviceroot(s):
if(re.search('(iPhone|iPod).*', s.request.headers["User-Agent"])):
return "default/"
elif(re.search('(Android).*', s.request.headers["User-Agent"])):
return "default/"
elif(re.search('(iPad).*', s.request.headers["User-Agent"])):
return "default/"
else:
return "default/"
class BaseHandler(tornado.web.RequestHandler):
@property
def webroot(self):
return self.application.webroot
@property
def library(self):
return self.application.library
@property
def port(self):
return self.application.port
def get_current_user(self):
return custom_get_current_user(self)
class GenericAPIHandler(BaseHandler):
def validateAPIKey(self):
if self.application.config['security']['use_api_key']:
api_key = self.get_argument(u"api_key", default="")
if api_key == self.application.config['security']['api_key']:
return True
else:
raise tornado.web.HTTPError(400)
return False
class JSONResultAPIHandler(GenericAPIHandler):
def setContentType(self):
self.add_header("Content-type","application/json; charset=UTF-8")
def processPagingArgs(self, query):
per_page = self.get_argument(u"per_page", default=None)
offset = self.get_argument(u"offset", default=None)
# offset and max_results should be processed last
total_results = None
if per_page is not None:
total_results = query.distinct().count()
try:
max = 0
max = int(per_page)
if total_results > max:
query = query.limit(max)
except:
pass
if offset is not None:
try:
off = 0
off = int(offset)
query = query.offset(off)
except:
pass
return query, total_results
def processComicQueryArgs(self, query):
def hasValue(obj):
return obj is not None and obj != ""
keyphrase_filter = self.get_argument(u"keyphrase", default=None)
series_filter = self.get_argument(u"series", default=None)
path_filter = self.get_argument(u"path", default=None)
folder_filter = self.get_argument(u"folder", default="")
title_filter = self.get_argument(u"title", default=None)
start_filter = self.get_argument(u"start_date", default=None)
end_filter = self.get_argument(u"end_date", default=None)
added_since = self.get_argument(u"added_since", default=None)
modified_since = self.get_argument(u"modified_since", default=None)
lastread_since = self.get_argument(u"lastread_since", default=None)
order = self.get_argument(u"order", default=None)
character = self.get_argument(u"character", default=None)
team = self.get_argument(u"team", default=None)
location = self.get_argument(u"location", default=None)
storyarc = self.get_argument(u"storyarc", default=None)
alternateseries = self.get_argument(u"alternateseries", default=None)
volume = self.get_argument(u"volume", default=None)
publisher = self.get_argument(u"publisher", default=None)
language = self.get_argument(u"language", default=None)
credit_filter = self.get_argument(u"credit", default=None)
tag = self.get_argument(u"tag", default=None)
genre = self.get_argument(u"genre", default=None)
if folder_filter != "":
folder_filter = os.path.normcase(os.path.normpath(folder_filter))
#print folder_filter
person = None
role = None
if hasValue(credit_filter):
credit_info = credit_filter.split(":")
if len(credit_info[0]) != 0:
person = credit_info[0]
if len(credit_info) > 1:
role = credit_info[1]
if hasValue(person):
query = query.join(Credit).filter(Person.name.ilike(person.replace("*","%"))).filter(Credit.person_id==Person.id)
if role is not None:
query = query.filter(Credit.role_id==Role.id).filter(Role.name.ilike(role.replace("*","%")))
#query = query.filter( Comic.persons.contains(unicode(person).replace("*","%") ))
if hasValue(keyphrase_filter):
keyphrase_filter = unicode(keyphrase_filter).replace("*","%")
keyphrase_filter = "%" + keyphrase_filter + "%"
query = query.filter( Comic.series.ilike(keyphrase_filter)
| Comic.alternateseries_raw.any(AlternateSeries.name.ilike(keyphrase_filter))
| Comic.title.ilike(keyphrase_filter)
| Comic.publisher.ilike(keyphrase_filter)
| Comic.language.ilike(keyphrase_filter)
| Comic.path.ilike(keyphrase_filter)
| Comic.comments.ilike(keyphrase_filter)
| Comic.characters_raw.any(Character.name.ilike(keyphrase_filter))
| Comic.teams_raw.any(Team.name.ilike(keyphrase_filter))
| Comic.generictags_raw.any(GenericTag.name.ilike(keyphrase_filter))
| Comic.locations_raw.any(Location.name.ilike(keyphrase_filter))
| Comic.storyarcs_raw.any(StoryArc.name.ilike(keyphrase_filter))
| Comic.persons_raw.any(Person.name.ilike(keyphrase_filter))
)
def addQueryOnScalar(query, obj_prop, filt):
if hasValue(filt):
filt = unicode(filt).replace("*","%")
return query.filter( obj_prop.ilike(filt))
else:
return query
def addQueryOnList(query, obj_list, list_prop, filt):
if hasValue(filt):
filt = unicode(filt).replace("*","%")
return query.filter( obj_list.any(list_prop.ilike(filt)))
else:
return query
query = addQueryOnScalar(query, Comic.series, series_filter)
query = addQueryOnScalar(query, Comic.title, title_filter)
query = addQueryOnScalar(query, Comic.path, path_filter)
query = addQueryOnScalar(query, Comic.folder, folder_filter)
query = addQueryOnScalar(query, Comic.publisher, publisher)
query = addQueryOnScalar(query, Comic.language, language)
query = addQueryOnList(query, Comic.alternateseries_raw, AlternateSeries.name, alternateseries)
query = addQueryOnList(query, Comic.characters_raw, Character.name, character)
query = addQueryOnList(query, Comic.generictags_raw, GenericTag.name, tag)
query = addQueryOnList(query, Comic.teams_raw, Team.name, team)
query = addQueryOnList(query, Comic.locations_raw, Location.name, location)
query = addQueryOnList(query, Comic.storyarcs_raw, StoryArc.name, storyarc)
query = addQueryOnList(query, Comic.genres_raw, Genre.name, genre)
#if hasValue(series_filter):
# query = query.filter( Comic.series.ilike(unicode(series_filter).replace("*","%") ))
#if hasValue(title_filter):
# query = query.filter( Comic.title.ilike(unicode(title_filter).replace("*","%") ))
#if hasValue(filename_filter):
# query = query.filter( Comic.path.ilike(unicode(filename_filter).replace("*","%") ))
#if hasValue(publisher):
# query = query.filter( Comic.publisher.ilike(unicode(publisher).replace("*","%") ))
#if hasValue(character):
# query = query.filter( Comic.characters_raw.any(Character.name.ilike(unicode(character).replace("*","%") )))
#if hasValue(tag):
# query = query.filter( Comic.generictags.contains(unicode(tag).replace("*","%") ))
#if hasValue(team):
# query = query.filter( Comic.teams.contains(unicode(team).replace("*","%") ))
#if hasValue(location):
# query = query.filter( Comic.locations.contains(unicode(location).replace("*","%") ))
#if hasValue(storyarc):
# query = query.filter( Comic.storyarcs.contains(unicode(storyarc).replace("*","%") ))
#if hasValue(genre):
# query = query.filter( Comic.genres.contains(unicode(genre).replace("*","%") ))
if hasValue(volume):
try:
vol = 0
vol = int(volume)
query = query.filter(Comic.volume == vol)
except:
pass
if hasValue(start_filter):
try:
dt = dateutil.parser.parse(start_filter)
query = query.filter( Comic.date >= dt)
except:
pass
if hasValue(end_filter):
try:
dt = dateutil.parser.parse(end_filter)
query = query.filter( Comic.date <= dt)
except:
pass
if hasValue(modified_since):
try:
dt=dateutil.parser.parse(modified_since)
resultset = resultset.filter( Comic.mod_ts >= dt )
except:
pass
if hasValue(added_since):
try:
dt=dateutil.parser.parse(added_since)
query = query.filter( Comic.added_ts >= dt )
except:
pass
if hasValue(lastread_since):
try:
dt=dateutil.parser.parse(lastread_since)
query = query.filter( Comic.lastread_ts >= dt, Comic.lastread_ts != "" )
except:
pass
order_key = None
# ATB temp hack to cover "slicing" bug where
# if no order specified, the child collections
# get chopped off sometimes
if not hasValue(order):
order = "id"
if hasValue(order):
if order[0] == "-":
order_desc = True
order = order[1:]
else:
order_desc = False
if order == "id":
order_key = Comic.id
if order == "series":
order_key = Comic.series
elif order == "modified":
order_key = Comic.mod_ts
elif order == "added":
order_key = Comic.added_ts
elif order == "lastread":
order_key = Comic.lastread_ts
elif order == "volume":
order_key = Comic.volume
elif order == "issue":
order_key = Comic.issue_num
elif order == "date":
order_key = Comic.date
elif order == "publisher":
order_key = Comic.publisher
elif order == "language":
order_key = Comic.language
elif order == "title":
order_key = Comic.title
elif order == "path":
order_key = Comic.path
if order_key is not None:
if order_desc:
order_key = order_key.desc()
query = query.order_by(order_key)
return query
class ZippableAPIHandler(JSONResultAPIHandler):
def writeResults(self, json_data):
self.setContentType()
if self.get_argument(u"gzip", default=None) is not None:
self.add_header("Content-Encoding","gzip")
# TODO: make sure browser can handle gzip?
zbuf = StringIO.StringIO()
zfile = gzip.GzipFile(mode = 'wb', fileobj = zbuf, compresslevel = 9)
zfile.write(json.dumps(json_data))
zfile.close()
self.write(zbuf.getvalue())
else:
self.write(json_data)
class ServerAPIHandler(GenericAPIHandler):
def get(self):
self.validateAPIKey()
cmd = self.get_argument(u"cmd", default=None)
if cmd == "restart":
logging.info("Server: Restart")
self.application.restart()
elif cmd == "reset":
logging.info("Server: Rebuild Database")
self.application.rebuild()
elif cmd == "stop":
logging.info("Server: Stop")
self.application.shutdown()
elif cmd == "cache":
logging.info("Server: Clear Pages Cache")
self.application.library.cache_clear()
class ImageAPIHandler(GenericAPIHandler):
def setContentType(self, image_data):
imtype = imghdr.what(StringIO.StringIO(image_data))
self.add_header("Content-type","image/{0}".format(imtype))
class VersionAPIHandler(JSONResultAPIHandler):
def get(self):
self.validateAPIKey()
response = { 'version': self.application.version,
'last_build': date.today().isoformat() }
self.setContentType()
self.write(response)
class DBInfoAPIHandler(JSONResultAPIHandler):
def get(self):
self.validateAPIKey()
stats = self.library.getStats()
if mysql_active:
s = "MySQL"
else:
s = "SQLite"
response = {'id' : stats['uuid'],
'last_updated' : stats['last_updated'].isoformat(),
'created' : stats['created'].isoformat(),
'comic_count' : stats['total'],
'series_count' : stats['series'],
'artists_count' : stats['persons'],
'cache_active' : self.library.cache_active,
'cache_filled' : self.library.cache_filled / 1048576,
'cache_pages' : len(self.library.cache_list),
'cache_miss' : self.library.cache_miss,
'cache_hit' : self.library.cache_hit,
'cache_discard' : self.library.cache_discard,
'cache_max' : self.library.cache_maxsize,
'db_engine' : s,
'db_scheme' : SCHEMA_VERSION
}
self.setContentType()
self.write(response)
class ScanStatusAPIHandler(JSONResultAPIHandler):
def get(self):
self.validateAPIKey()
status = self.application.monitor.status
detail = self.application.monitor.statusdetail
last_complete = self.application.monitor.scancomplete_ts
response = { 'status': status,
'detail': detail,
'last_complete': last_complete,
'current_time': int(time.mktime(datetime.utcnow().timetuple()) * 1000),
}
self.setContentType()
self.write(response)
class ComicListAPIHandler(ZippableAPIHandler):
def get(self):
self.validateAPIKey()
criteria_args = [
u"keyphrase", u"series", u"path", u"folder", u"title", u"start_date",
u"end_date", u"added_since", u"modified_since", u"lastread_since",
u"order", u"character", u"team", u"location", u"storyarc", u"volume",
u"publisher", u"language", u"credit", u"tag", u"genre", u"alternateseries"
]
criteria = {key: self.get_argument(key, default=None) for key in criteria_args}
paging = {
'per_page': self.get_argument(u"per_page", default=None),
'offset': self.get_argument(u"offset", default=None)
}
resultset, total_results = self.library.list(criteria, paging)
json_data = resultSetToJson(resultset, "comics", total_results)
self.writeResults(json_data)
class DeletedAPIHandler(ZippableAPIHandler):
def get(self):
self.validateAPIKey()
since_filter = self.get_argument(u"since", default=None)
resultset = self.library.getDeletedComics(since_filter)
json_data = resultSetToJson(resultset, "deletedcomics")
self.writeResults(json_data)
class ComicListBrowserHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
entity_src = self.get_argument(u"entity_src", default=None)
if entity_src is not None:
src=entity_src
else:
default_src=self.webroot + "/comics"
arg_string = ""
##if '?' in self.request.uri:
# arg_string = '?'+self.request.uri.split('?',1)[1]
src = default_src + arg_string
self.render(deviceroot(self)+"browser.html",
src=src,
api_key=self.application.config['security']['api_key']
)
class FoldersBrowserHandler(BaseHandler):
@tornado.web.authenticated
def get(self,args):
entity_src = self.get_argument(u"entity_src", default=None)
if entity_src is not None:
src=entity_src
else:
default_src=self.webroot + "/comics"
arg_string = ""
##if '?' in self.request.uri:
# arg_string = '?'+self.request.uri.split('?',1)[1]
src = default_src + arg_string
if args is None:
args = "/"
args = utils.collapseRepeats(args, "/")
self.render(deviceroot(self)+"folders.html",
args=args,src=src,
api_key = self.application.config['security']['api_key'])
class EntitiesBrowserHandler(BaseHandler):
@tornado.web.authenticated
def get(self,args):
if args is None:
args = ""
arg_string = args
#if '/' in args:
# arg_string = args.split('/',1)[1]
#print arg_string
#if len(arg_string) == 0:
# arg_string = "?api_key=" + self.application.config['security']['api_key']
#else:
# arg_string = arg_string + "&api_key=" + self.application.config['security']['api_key']
self.render(deviceroot(self)+"entities.html",
args=arg_string,
api_key = self.application.config['security']['api_key'])
class ComicAPIHandler(JSONResultAPIHandler):
def get(self, id):
self.validateAPIKey()
result = [self.library.getComic(id)]
self.setContentType()
self.write(resultSetToJson(result, "comics"))
class ComicBookmarkAPIHandler(JSONResultAPIHandler):
def get(self, comic_id, pagenum):
self.validateAPIKey()
self.application.bookmark.setBookmark(comic_id, pagenum)
self.setContentType()
response = { 'status': 0 }
self.write(response)
class ComicBlacklistAPIHandler(JSONResultAPIHandler):
def get(self, comic_id, pagenum):
self.validateAPIKey()
self.application.blacklist.setBlacklist(comic_id, pagenum)
self.setContentType()
response = { 'status': 0 }
self.write(response)
class ComicFavoritesAPIHandler(JSONResultAPIHandler):
def get(self, comic_id, pagenum):
self.validateAPIKey()
# "HERE FIX: does not work need path...
"""if state == 'clear':
self.application.library.remove(comic_id)
else:
self.application.library(comic_id, pagenum)
"""
self.setContentType()
response = { 'status': 0 }
self.write(response)
class ComicCacheAPIHandler(JSONResultAPIHandler):
def get(self, comic_id, pagenum):
self.validateAPIKey()
# "HERE FIX: does not work need path...
"""if state == 'clear':
self.application.library.remove(comic_id)
else:
self.application.library(comic_id, pagenum)
"""
self.setContentType()
response = { 'status': 0 }
self.write(response)
class ComicPageAPIHandler(ImageAPIHandler):
def get(self, comic_id, pagenum):
self.validateAPIKey()
max_height = self.get_argument(u"max_height", default=None)
image_data = self.library.getComicPage(comic_id, pagenum, True, max_height)
self.setContentType(image_data)
self.write(image_data)
class ThumbnailAPIHandler(ImageAPIHandler):
def get(self, comic_id):
self.validateAPIKey()
thumbnail = self.library.getComicThumbnail(comic_id)
self.setContentType('image/jpg')
if thumbnail != None:
self.write(thumbnail)
else:
with open(AppFolders.missingPath("cover.png"), 'rb') as fd:
thumbnail = fd.read()
fd.close()
thumb = StringIO.StringIO()
utils.resize(thumbnail, (400, 400), thumb)
self.write(thumb.getvalue())
class FileAPIHandler(GenericAPIHandler):
@tornado.web.asynchronous
def get(self, comic_id):
self.validateAPIKey()
obj = self.library.getComic(comic_id)
if obj is not None:
(content_type, encoding) = mimetypes.guess_type(obj.path)
if content_type is None:
content_type = "application/octet-stream"
self.add_header("Content-type", content_type)
self.add_header("Content-Disposition", "attachment; filename=" + os.path.basename(obj.path))
# stream response in chunks, cbr/z could be over 300MB in size!
# TODO: check it doesn't buffer the response, it should send data chunk by chunk
with open(obj.path, 'rb') as f:
while True:
data = f.read(40960 * 1024)
if not data:
break
self.write(data)
self.flush()
self.finish()
class FolderAPIHandler(JSONResultAPIHandler):
def get(self, args):
self.validateAPIKey()
if args is not None:
args = urllib.unquote(args)
arglist = args.split('/')
arglist = filter(None, arglist)
argcount = len(arglist)
else:
arglist = list()
argcount = 0
folder_list = self.application.config['general']['folder_list']
response = {
'current' : "",
'folders' : [],
'comics' : {
'url_path' : "",
'count' : 0
}
}
if argcount == 0:
# just send the list of root level folders
for idx, val in enumerate(folder_list):
item = {
'name': val,
'url_path' : self.webroot + "/folders/" + str(idx)
}
response['folders'].append(item)
else:
try:
# need to validate the first arg is an index into the list
folder_idx = int(arglist[0])
if folder_idx >= len(folder_list):
raise Exception
# build up a folder by combining the root folder with the following path
path = os.path.join(folder_list[folder_idx], *arglist[1:] )
# validate *that* folder
if not os.path.exists(path):
print "Not exist", path, type(path)
raise Exception
response['current'] = path
# create a list of subfolders
for o in os.listdir(path):
if os.path.isdir(os.path.join(path,o)):
sub_path = u""+self.webroot+"/folders" + args + u"/" + o
sub_path = urllib.quote(sub_path.encode("utf-8"))
item = {
'name': o,
'url_path' : sub_path
}
response['folders'].append(item)
# see if there are any comics here
(ignore, total_results) = self.library.list({'folder': path}, {'per_page': 0, 'offset': 0})
response['comics']['count'] = total_results
comic_path = self.webroot + u"/comics?folder=" + urllib.quote(u"{0}".format(path).encode('utf-8'))
response['comics']['url_path'] = comic_path
except FloatingPointError as e:
print e
raise tornado.web.HTTPError(404, "Unknown folder")
self.setContentType()
self.write(response)
class EntityAPIHandler(JSONResultAPIHandler):
def get(self, args):
self.validateAPIKey()
session = self.application.dm.Session()
if args is None:
args = ""
arglist=args.split('/')
arglist = filter(None, arglist)
argcount = len(arglist)
arglist = [utils.unquote(a) for a in arglist]
entities = {
'characters' : Character.name,
'persons' : Person.name,
'language' : Comic.language,
'publishers' : Comic.publisher,
'roles' : Role.name,
'series': Comic.series,
'volumes' : Comic.volume,
'teams' : Team.name,
'storyarcs' : StoryArc.name,
'genres' : Genre.name,
'locations' : Location.name,
'generictags' : GenericTag.name,
'comics' : Comic,
'alternateseries' : AlternateSeries.name
}
#logging.debug("In EntityAPIHandler {0}".format(arglist))
#/entity1/filter1/entity2/filter2...
# validate all entities keys in args
#( check every other item)
for e in arglist[0::2]:
if e not in entities:
raise tornado.web.HTTPError(404, "Unknown entity:{0}".format(e))
#look for dupes
if len(arglist[0::2])!=len(set(arglist[0::2])):
raise tornado.web.HTTPError(400, "Duplicate entity")
#look for dupes
if 'comics' in arglist[0::2] and arglist[-1] != "comics":
raise tornado.web.HTTPError(400, "\"comics\" must be final entity")
resp = ""
# even number means listing entities
if argcount % 2 == 0:
name_list = [key for key in entities]
# (remove already-traversed entities)
for e in arglist[0::2]:
try:
name_list.remove(e)
except:
pass
# Find out how many of each entity are left, and build a list of
# dicts with name and count
dict_list = []
for e in name_list:
tmp_arg_list = list()
tmp_arg_list.extend(arglist)
tmp_arg_list.append(e)
query = self.buildQuery(session, entities, tmp_arg_list)
e_dict = dict()
e_dict['name'] = e
#self.application.dm.engine.echo = True
e_dict['count'] = query.distinct().count()
#self.application.dm.engine.echo = False
#print "----", e_dict, query
dict_list.append(e_dict)
#name_list = sorted(name_list)
resp = {"entities" : dict_list}
self.setContentType()
self.write(resp)
return
# odd number means listing last entity VALUES
else:
entity = arglist[-1] # the final entity in the list
query = self.buildQuery(session, entities, arglist)
if entity == "comics":
query = self.processComicQueryArgs(query)
query, total_results = self.processPagingArgs(query)
query = query.options(subqueryload('characters_raw'))
query = query.options(subqueryload('storyarcs_raw'))
query = query.options(subqueryload('alternateseries_raw'))
query = query.options(subqueryload('locations_raw'))
query = query.options(subqueryload('teams_raw'))
#query = query.options(subqueryload('credits_raw'))
query = query.options(subqueryload('generictags_raw'))
query = query.all()
resp = resultSetToJson(query, "comics", total_results)
else:
resp = {entity : sorted(list(set([i[0] for i in query.all()])))}
self.application.dm.engine.echo = False
self.setContentType()
self.write(resp)
def buildQuery(self, session, entities, arglist):
"""
Each entity-filter pair will be made into a separate query
and they will be all intersected together
"""
entity = arglist[-1]
querylist = []
#To build up the query, bridge every entity to a comic table
querybase = session.query(entities[entity])
if len(arglist) != 1:
if entity == 'roles':
querybase = querybase.join(Credit).join(Comic)
if entity == 'persons':
querybase = querybase.join(Credit).join(Comic)
if entity == 'characters':
querybase = querybase.join(comics_characters_table).join(Comic)
if entity == 'teams':
querybase = querybase.join(comics_teams_table).join(Comic)
if entity == 'storyarcs':
querybase = querybase.join(comics_storyarcs_table).join(Comic)
if entity == 'alternateseries':
querybase = querybase.join(comics_alternateseries_table).join(Comic)
if entity == 'genres':
querybase = querybase.join(comics_genres_table).join(Comic)
if entity == 'locations':
querybase = querybase.join(comics_locations_table).join(Comic)
if entity == 'generictags':
querybase = querybase.join(comics_generictags_table).join(Comic)
#print "Result entity is====>", entity
#iterate over list, 2 at a time, building query list,
#print zip(arglist[0::2], arglist[1::2])
for e,v in zip(arglist[0::2], arglist[1::2]):
#print "--->",e,v
query = querybase
if e == 'roles':
if entity != 'persons':
query = query.join(Credit)
query = query.join(Role)
if e == 'persons':
if entity != 'roles':
query = query.join(Credit)
query = query.join(Person)
if e == 'characters':
query = query.join(comics_characters_table).join(Character)
if e == 'teams':
query = query.join(comics_teams_table).join(Team)
if e == 'storyarcs':
query = query.join(comics_storyarcs_table).join(StoryArc)
if e == 'alternateseries':
query = query.join(comics_alternateseries_table).join(AlternateSeries)
if e == 'genres':
query = query.join(comics_genres_table).join(Genre)
if e == 'locations':
query = query.join(comics_locations_table).join(Location)
if e == 'generictags':
query = query.join(comics_generictags_table).join(GenericTag)
query = query.filter(entities[e]==v)
querylist.append(query)
#print query
if len(querylist) == 0:
finalquery = querybase
else:
finalquery = querylist[0].intersect(*querylist[1:])
return finalquery
class ReaderHandler(BaseHandler):
@tornado.web.authenticated
def get(self, comic_id):
obj = self.library.getComic(comic_id)
page_data = None
if obj is not None:
#self.render("templates/reader.html", make_list=self.make_list, id=comic_id, count=obj.page_count)
#self.render("test.html", make_list=self.make_list, id=comic_id, count=obj.page_count)
title = os.path.basename(obj.path)
if obj.series is not None and obj.issue is not None:
title = obj.series + u" #" + obj.issue
if obj.title is not None :
title += u" -- " + obj.title
if obj.lastread_page is None:
target_page = 0
else:
target_page=obj.lastread_page
self.render(deviceroot(self)+"comic.html",
title=title,
id=comic_id,
count=obj.page_count,
page=target_page,
api_key=self.application.config['security']['api_key'])
def make_list(self, id, count):
text = u""
for i in range(count):
text += u"\'page/" + str(i) + u"\',"
return text
class UnknownHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
self.render(deviceroot(self)+"missing.html", version=self.application.version)
class MainHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
stats = self.library.getStats()
stats['last_updated'] = utils.utc_to_local(stats['last_updated']).strftime("%Y-%m-%d %H:%M:%S")
stats['created'] = utils.utc_to_local(stats['created']).strftime("%Y-%m-%d %H:%M:%S")
recently_added_comics = self.library.recentlyAddedComics(10)
recently_read_comics = self.library.recentlyReadComics(10)
roles_list = [role.name for role in self.library.getRoles()]
random_comic = self.library.randomComic()
if random_comic is None:
random_comic = type('fakecomic', (object,),{'id':0, '':'No Comics', 'issue':'', 'series':'','title':''})()
caption = u""
if random_comic.issue is not None:
caption = random_comic.issue
if random_comic.title is not None:
if random_comic.issue is not None:
caption = caption + u" " + random_comic.title
caption = random_comic.title
self.render(deviceroot(self)+"index.html",
random_comic=random_comic,random_caption=caption,
api_key = self.application.config['security']['api_key']
)
class ServerPageHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
#stats = self.library.getStats()
#stats['last_updated'] = utils.utc_to_local(stats['last_updated']).strftime("%Y-%m-%d %H:%M:%S")
#stats['created'] = utils.utc_to_local(stats['created']).strftime("%Y-%m-%d %H:%M:%S")
self.render(deviceroot(self)+"server.html",
server_time = int(time.mktime(datetime.utcnow().timetuple()) * 1000),
api_key = self.application.config['security']['api_key']
)
#self.render(deviceroot(self)+"server.html", stats=stats,
# server_time = int(time.mktime(datetime.utcnow().timetuple()) * 1000),
# api_key = self.application.config['security']['api_key']
# )
class RecentlyPageHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
recently_added_comics = self.library.recentlyAddedComics(10)
recently_read_comics = self.library.recentlyReadComics(10)
self.render(deviceroot(self)+"recently.html",
recently_added = list(recently_added_comics),
recently_read = list(recently_read_comics),
api_key = self.application.config['security']['api_key']
)
class SearchPageHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
roles_list = [role.name for role in self.library.getRoles()]
random_comic = self.library.randomComic()
self.render(deviceroot(self)+"search.html",
roles = roles_list,
api_key = self.application.config['security']['api_key']
)
class GenericPageHandler(BaseHandler):
@tornado.web.authenticated
def get(self,page):
if os.path.isfile(AppFolders.appBase()+"/"+"gui"+"/"+deviceroot(self)+page+".html"):
self.render(deviceroot(self)+page+".html")
else:
self.render(deviceroot(self)+"missing.html", version=self.application.version)
class AboutPageHandler(BaseHandler):
#@tornado.web.authenticated
def get(self):
self.render(deviceroot(self)+"about.html", version=self.application.version)
class APIPageHandler(BaseHandler):
#@tornado.web.authenticated
def get(self):
self.render(deviceroot(self)+"api.html", api_key=self.application.config['security']['api_key'])
class HelpPageHandler(BaseHandler):
#@tornado.web.authenticated
def get(self):
self.render(deviceroot(self)+"help.html", api_key=self.application.config['security']['api_key'])
class LogPageHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
log_file = os.path.join(AppFolders.logs(), "ComicStreamer.log")
logtxt = ""
for line in reversed(open(log_file).readlines()):
logtxt += line.rstrip() + '\n'
self.render(deviceroot(self)+"log.html",
logtxt=logtxt)
class ConfigPageHandler(BaseHandler):
fakepass = "T01let$tRe@meR"
def is_port_available(self,port):
host = '127.0.0.1'
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((host, port))
s.shutdown(2)
return False
except Exception as e:
print e
return True
def render_config(self, formdata, success="", failure=""):
#convert boolean to "checked" or ""
formdata['use_pdf'] = "checked" if formdata['use_pdf'] else ""
formdata['use_cbw'] = "checked" if formdata['use_cbw'] else ""
formdata['use_folders'] = "checked" if formdata['use_folders'] else ""
formdata['use_zd'] = "checked" if formdata['use_zd'] else ""
formdata['use_comicvine'] = "checked" if formdata['use_comicvine'] else ""
formdata['cbw_autodownload'] = "checked" if formdata['cbw_autodownload'] else ""
formdata['use_ebook'] = "checked" if formdata['use_ebook'] else ""
formdata['use_mutool'] = "checked" if formdata['use_mutool'] else ""
formdata['use_mudraw'] = "checked" if formdata['use_mudraw'] else ""
formdata['use_https'] = "checked" if formdata['use_https'] else ""
formdata['use_sqlite'] = "checked" if formdata['use_sqlite'] else ""
formdata['use_mysql'] = "checked" if formdata['use_mysql'] else ""
formdata['use_api_key'] = "checked" if formdata['use_api_key'] else ""
formdata['use_cache'] = "checked" if formdata['use_cache'] else ""
formdata['use_authentication'] = "checked" if formdata['use_authentication'] else ""
formdata['launch_client'] = "checked" if formdata['launch_client'] else ""
formdata['fingerprint'] = "checked" if formdata['fingerprint'] else ""
if ( self.application.config['security']['use_authentication'] ):
formdata['password'] = ConfigPageHandler.fakepass
formdata['password_confirm'] = ConfigPageHandler.fakepass
else:
formdata['password'] = ""
formdata['password_confirm'] = ""
self.render(deviceroot(self)+"settings.html",
formdata=formdata,
api_key = self.application.config['security']['api_key'],
success=success,
failure=failure)
@tornado.web.authenticated
def get(self):
formdata = dict()
formdata['use_cache'] = self.application.config['cache']['active']
formdata['cache_size'] = self.application.config['cache']['size']
formdata['cache_free'] = self.application.config['cache']['free']
formdata['cache_location'] = self.application.config['cache']['location']
formdata['port'] = self.application.config['web']['port']
formdata['secure_port'] = self.application.config['web.ssl']['port']
formdata['key_file'] = self.application.config['web.ssl']['key_file']
formdata['certificate_file'] = self.application.config['web.ssl']['certificate_file']
formdata['use_https'] = self.application.config['web.ssl']['active']
formdata['bind'] = self.application.config['web']['bind']
formdata['webroot'] = self.application.config['web']['webroot']
formdata['folders'] = "\n".join(self.application.config['general']['folder_list'])
formdata['use_authentication'] = self.application.config['security']['use_authentication']
formdata['username'] = self.application.config['security']['username']
formdata['password'] = ""
formdata['password_confirm'] = ""
formdata['use_api_key'] = self.application.config['security']['use_api_key']
formdata['api_key'] = self.application.config['security']['api_key']
formdata['launch_client'] = self.application.config['general']['launch_client']
formdata['fingerprint'] = self.application.config['general']['fingerprint']
formdata['use_mysql'] = self.application.config['database']['engine'] == 'mysql'
formdata['use_sqlite'] = self.application.config['database']['engine'] == 'sqlite'
formdata['sqlite_location'] = self.application.config['database.sqlite']['location']
formdata['sqlite_database'] = self.application.config['database.sqlite']['database']
formdata['mysql_database'] = self.application.config['database.mysql']['database']
formdata['mysql_username'] = self.application.config['database.mysql']['username']
formdata['mysql_password'] = utils.decode(self.application.config['general']['install_id'],self.application.config['database.mysql']['password'])
formdata['mysql_host'] = self.application.config['database.mysql']['host']
formdata['mysql_port'] = self.application.config['database.mysql']['port']
formdata['ebook_resolution'] = self.application.config['format.ebook']['resolution']
formdata['ebook_margin'] = self.application.config['format.ebook']['margin']
formdata['pdf_resolution'] = self.application.config['format.pdf']['resolution']
formdata['pdf_engine'] = self.application.config['format.pdf']['engine']
formdata['use_mudraw'] = formdata['pdf_engine'] == 'mudraw'
formdata['use_mutool'] = formdata['pdf_engine'] == 'mutool'
formdata['use_pdf2png'] = formdata['pdf_engine'] == 'pdf2png'
formdata['qpdf'] = self.application.config['format.pdf']['qpdf']
formdata['mudraw'] = self.application.config['format.pdf']['mudraw']
formdata['mutool'] = self.application.config['format.pdf']['mutool']
formdata['pdf2png'] = self.application.config['format.pdf']['pdf2png']
formdata['cbw_location'] = self.application.config['format.webcomic']['location']
formdata['use_zd'] = self.application.config['metadata.zilverendolfijn']['active']
formdata['use_comicvine'] = self.application.config['metadata.comicvine']['active']
formdata['comicvine_key'] = self.application.config['metadata.comicvine']['key']
formdata['comicvine_location'] = self.application.config['metadata.comicvine']['location']
formdata['zilverendolfijn_location'] = self.application.config['metadata.zilverendolfijn']['location']
formdata['use_folders'] = self.application.config['format.folders']['active']
formdata['use_cbw'] = self.application.config['format.webcomic']['active']
formdata['cbw_autodownload'] = self.application.config['format.webcomic']['auto_download']
formdata['use_pdf'] = self.application.config['format.pdf']['active']
formdata['use_ebook'] = self.application.config['format.ebook']['active']
formdata['calibre'] = self.application.config['format.ebook']['calibre']
formdata['ebook_cache_location'] = self.application.config['format.ebook']['location']
formdata['ebook_cache_free'] = self.application.config['format.ebook']['free']
formdata['ebook_cache_size'] = self.application.config['format.ebook']['size']
self.render_config(formdata)
@tornado.web.authenticated
def post(self):
formdata = dict()
formdata['folders'] = self.get_argument(u"folders", default="")
formdata['webroot'] = self.get_argument(u"webroot", default="")
formdata['bind'] = self.get_argument(u"bind", default="")
formdata['port'] = self.get_argument(u"port", default="")
formdata['secure_port'] = self.get_argument(u"secure_port", default="")
formdata['key_file'] = self.get_argument(u"key_file", default="")
formdata['certificate_file'] = self.get_argument(u"certificate_file", default="")
formdata['use_https'] = (len(self.get_arguments("use_https"))!=0)
formdata['cache_size'] = self.get_argument(u"cache_size", default="")
formdata['cache_free'] = self.get_argument(u"cache_free", default="")
formdata['cache_location'] = self.get_argument(u"cache_location", default="")
formdata['use_cache'] = (len(self.get_arguments("use_cache"))!=0)
formdata['use_authentication'] = (len(self.get_arguments("use_authentication"))!=0)
formdata['username'] = self.get_argument(u"username", default="")
formdata['password'] = self.get_argument(u"password", default="")
formdata['password_confirm'] = self.get_argument(u"password_confirm", default="")
formdata['use_api_key'] = (len(self.get_arguments("use_api_key"))!=0)
formdata['api_key'] = self.get_argument(u"api_key", default="")
formdata['launch_client'] = (len(self.get_arguments("launch_client"))!=0)
formdata['fingerprint'] = (len(self.get_arguments("fingerprint"))!=0)
formdata['db_engine'] = self.get_arguments("db_engine")[0]
formdata['use_mysql'] = formdata['db_engine'] == 'mysql'
formdata['use_sqlite'] = formdata['db_engine'] == 'sqlite'
formdata['mysql_username'] = self.get_argument(u"mysql_username", default="")
formdata['mysql_database'] = self.get_argument(u"mysql_database", default="")
formdata['mysql_host'] = self.get_argument(u"mysql_host", default="")
formdata['mysql_port'] = self.get_argument(u"mysql_port", default="")
formdata['mysql_password'] = self.get_argument(u"mysql_password", default="")
formdata['comicvine_key'] = self.get_argument(u"comicvine_location", default="")
formdata['comicvine_location'] = self.get_argument(u"comicvine_key", default="")
formdata['zilverendolfijn_location'] = self.get_argument(u"zilverendolfijn_location", default="")
formdata['sqlite_location'] = self.get_argument(u"sqlite_location", default="")
formdata['sqlite_database'] = self.get_argument(u"sqlite_database", default="")
formdata['pdf_resolution'] = self.get_argument(u"pdf_resolution", default="")
formdata['ebook_margin'] = self.get_argument(u"ebook_margin", default="")
formdata['ebook_resolution'] = self.get_argument(u"ebook_resolution", default="")
formdata['pdf_engine'] = self.get_argument(u"pdf_engine")
formdata['use_mutool'] = formdata['pdf_engine'] == 'mutool'
formdata['use_mudraw'] = formdata['pdf_engine'] == 'mudraw'
formdata['use_pdf2png'] = formdata['pdf_engine'] == 'pdf2png'
formdata['cbw_location'] = self.get_argument(u"cbw_location", default="")
formdata['mudraw'] = self.get_argument(u"mudraw", default="")
formdata['mutool'] = self.get_argument(u"mutool", default="")
formdata['pdf2png'] = self.get_argument(u"pdf2png", default="")
formdata['qpdf'] = self.get_argument(u"qpdf", default="")
formdata['use_pdf'] = (len(self.get_arguments("use_pdf"))!=0)
formdata['use_cbw'] = (len(self.get_arguments("use_cbw"))!=0)
formdata['use_folders'] = (len(self.get_arguments("use_folders"))!=0)
formdata['use_comicvine'] = (len(self.get_arguments("use_comicvine"))!=0)
formdata['use_zd'] = (len(self.get_arguments("use_zd"))!=0)
formdata['cbw_autodownload'] = (len(self.get_arguments("cbw_autodownload"))!=0)
formdata['use_ebook'] = (len(self.get_arguments("use_ebook"))!=0)
formdata['calibre'] = self.get_argument(u"calibre", default="")
formdata['ebook_cache_location'] = self.get_argument(u"ebook_cache_location", default="")
formdata['ebook_cache_free'] = self.get_argument(u"ebook_cache_free", default="")
formdata['ebook_cache_size'] = self.get_argument(u"ebook_cache_size", default="")
failure_str = ""
success_str = ""
failure_strs = list()
validated = False
old_folder_list = self.application.config['general']['folder_list']
new_folder_list = [os.path.normcase(os.path.abspath(os.path.normpath(unicode(a)))) for a in formdata['folders'].splitlines()]
try:
for i, f in enumerate(new_folder_list):
#validate folders exist
if not (os.path.exists(f) and os.path.isdir(f)):
failure_strs.append(u"Folder {0} doesn't exist.".format(f))
break
# check for repeat or contained
for j, f1 in enumerate(new_folder_list):
if i != j:
if f1 == f:
failure_strs.append(u"Can't have repeat folders.")
raise Exception
if f1.startswith(f + os.path.sep):
failure_strs.append(u"One folder can't contain another.")
raise Exception
except Exception:
pass
port_failed = False
old_port = self.application.config['web']['port']
#validate numeric port
if not formdata['port'].isdigit():
port_failed = True
failure_strs.append(u"Non-numeric port value: {0}".format(formdata['port']))
#validate port range
if not port_failed:
new_port = int(formdata['port'])
if new_port > 49151 or new_port < 1024:
failure_strs.append(u"Port value out of range (1024-49151): {0}".format(new_port))
port_failed = True
#validate port availability
if self.port != new_port:
if not port_failed:
if new_port != old_port and not self.is_port_available(new_port):
failure_strs.append(u"Port not available: {0}".format(new_port))
port_failed = True
#validate password and username are set
if formdata['use_authentication'] and (formdata['username']=="" or formdata['password']==""):
failure_strs.append(u"Username and password must be filled in if the 'use authentication' box is checked")
if formdata['cache_location'] != "":
if not os.path.isdir(formdata['cache_location']):
try:
os.makedirs(formdata['cache_location'])
except:
failure_strs.append(u"Cache location failure")
if formdata['sqlite_location'] != "":
if not os.path.isdir(formdata['sqlite_location']):
failure_strs.append(u"SQLite database location does not exists")
if formdata['sqlite_database'] != "":
try:
import tempfile
test = os.path.join(tempfile.gettempdir(),formdata['sqlite_database'])
open(test, "wb").close()
os.remove(test)
except:
failure_strs.append(u"SQLite database name contains strange symbols")
if int(formdata['pdf_resolution']) < 72:
failure_strs.append(u"Min PDF Resoltion is 72")
if int(formdata['pdf_resolution']) > 600:
failure_strs.append(u"Max PDF Resoltion is 600")
if int(formdata['ebook_resolution']) < 72:
failure_strs.append(u"Min PDF Resoltion is 50")
if int(formdata['ebook_resolution']) > 600:
failure_strs.append(u"Max PDF Resoltion is 600")
if int(formdata['ebook_margin']) < 0:
failure_strs.append(u"Min Ebook Margin is 0")
if int(formdata['ebook_margin']) > 72:
failure_strs.append(u"Min Ebook Margin is 0")
#validate password pair is the same
if formdata['password'] != formdata['password_confirm']:
failure_strs.append(u"Password fields don't match.")
if formdata['use_api_key'] and formdata['api_key']=="":
failure_strs.append(u"API Key must have a value if the box is checked")
# check cache input... ok?
try:
int(formdata['cache_size'])
except:
failure_strs.append(u"Cache size not a number")
try:
int(formdata['cache_free'])
except:
failure_strs.append(u"Cache free size not a number")
# need more validation here on mysql!!!! secure https! database names?
# FIX: RELEASE1
if len(failure_strs) == 0:
validated = True
if validated:
# was the password changed?
password_changed = True
if formdata['use_authentication']:
if formdata['password'] == ConfigPageHandler.fakepass:
password_changed = False
elif utils.getDigest(formdata['password']) == self.application.config['security']['password_digest']:
password_changed = False
else:
password_changed = False
if (new_port != old_port or
formdata['bind'] != self.application.config['web']['bind'] or
formdata['webroot'] != self.application.config['web']['webroot'] or
formdata['secure_port'] != self.application.config['web.ssl']['port'] or
formdata['key_file'] != self.application.config['web.ssl']['key_file'] or
formdata['certificate_file'] != self.application.config['web.ssl']['certificate_file'] or
formdata['use_https'] != self.application.config['web.ssl']['active'] or
new_folder_list != old_folder_list or
formdata['username'] != self.application.config['security']['username'] or
password_changed or
formdata['use_api_key'] != self.application.config['security']['use_api_key'] or
formdata['api_key'] != self.application.config['security']['api_key'] or
formdata['db_engine'] != self.application.config['database']['engine'] or
formdata['mysql_database'] != self.application.config['database.mysql']['database'] or
utils.encode(self.application.config['general']['install_id'],formdata['mysql_password']) != self.application.config['database.mysql']['password'] or
formdata['mysql_username'] != self.application.config['database.mysql']['username'] or
formdata['mysql_port'] != self.application.config['database.mysql']['port'] or
formdata['mysql_host'] != self.application.config['database.mysql']['host'] or
formdata['sqlite_database'] != self.application.config['database.sqlite']['database'] or
formdata['sqlite_location'] != self.application.config['database.sqlite']['location'] or
formdata['comicvine_location'] != self.application.config['metadata.comicvine']['location'] or
formdata['comicvine_key'] != self.application.config['metadata.comicvine']['key'] or
formdata['zilverendolfijn_location'] != self.application.config['metadata.zilverendolfijn']['location'] or
formdata['cbw_location'] != self.application.config['format.webcomic']['location'] or
formdata['cbw_autodownload'] != self.application.config['format.webcomic']['auto_download'] or
formdata['use_comicvine'] != self.application.config['metadata.comicvine']['active'] or
formdata['use_zd'] != self.application.config['metadata.zilverendolfijn']['active'] or
formdata['use_cbw'] != self.application.config['format.webcomic']['active'] or
formdata['use_folders'] != self.application.config['format.folders']['active'] or
formdata['use_pdf'] != self.application.config['format.pdf']['active'] or
formdata['pdf_resolution'] != self.application.config['format.pdf']['resolution'] or
formdata['ebook_margin'] != self.application.config['format.ebook']['margin'] or
formdata['ebook_resolution'] != self.application.config['format.ebook']['resolution'] or
formdata['pdf_engine'] != self.application.config['format.pdf']['engine'] or
formdata['mudraw'] != self.application.config['format.pdf']['mudraw'] or
formdata['mutool'] != self.application.config['format.pdf']['mutool'] or
formdata['pdf2png'] != self.application.config['format.pdf']['pdf2png'] or
formdata['qpdf'] != self.application.config['format.pdf']['qpdf'] or
formdata['use_ebook'] != self.application.config['format.ebook']['active'] or
formdata['calibre'] != self.application.config['format.ebook']['calibre'] or
formdata['ebook_cache_location'] != self.application.config['format.ebook']['location'] or
formdata['ebook_cache_free'] != self.application.config['format.ebook']['free'] or
formdata['ebook_cache_size'] != self.application.config['format.ebook']['size'] or
formdata['launch_client'] != self.application.config['general']['launch_client'] or
formdata['fingerprint'] != self.application.config['general']['fingerprint'] or
formdata['use_cache'] != self.application.config['cache']['active'] or
formdata['cache_size'] != self.application.config['cache']['size'] or
formdata['cache_free'] != self.application.config['cache']['free'] or
formdata['cache_location'] != self.application.config['cache']['location']
):
# apply everything from the form
self.application.config['general']['folder_list'] = new_folder_list
self.application.config['web']['port'] = new_port
self.application.config['web']['webroot'] = formdata['webroot']
self.application.config['web']['bind'] = formdata['bind']
self.application.config['security']['use_authentication'] = formdata['use_authentication']
self.application.config['security']['username'] = formdata['username']
if formdata['password'] != ConfigPageHandler.fakepass:
self.application.config['security']['password_digest'] = utils.getDigest(formdata['password'])
self.application.config['security']['use_api_key'] = formdata['use_api_key']
if self.application.config['security']['use_api_key']:
self.application.config['security']['api_key'] = formdata['api_key']
else:
self.application.config['security']['api_key'] = ""
formdata['api_key'] = ""
self.application.config['general']['launch_client'] = formdata['launch_client']
self.application.config['general']['fingerprint'] = formdata['fingerprint']
self.application.config['web.ssl']['port'] = formdata['secure_port']
self.application.config['web.ssl']['active'] = formdata['use_https']
self.application.config['web.ssl']['key_file'] = formdata['key_file']
self.application.config['web.ssl']['certificate_file'] = formdata['certificate_file']
self.application.config['cache']['active'] = formdata['use_cache']
self.application.config['cache']['size'] = formdata['cache_size']
self.application.config['cache']['free'] = formdata['cache_free']
self.application.config['cache']['location'] = formdata['cache_location']
self.application.config['format.ebook']['calibre'] = formdata['calibre']
self.application.config['format.ebook']['location'] = formdata['ebook_cache_location']
self.application.config['format.ebook']['free'] = formdata['ebook_cache_free']
self.application.config['format.ebook']['size'] = formdata['ebook_cache_size']
self.application.config['format.ebook']['active'] = formdata['use_ebook']
self.application.config['format.webcomic']['active'] = formdata['use_cbw']
self.application.config['format.folders']['active'] = formdata['use_folders']
self.application.config['format.webcomic']['location'] = formdata['cbw_location']
self.application.config['format.webcomic']['auto_download'] = formdata['cbw_autodownload']
self.application.config['format.pdf']['active'] = formdata['use_pdf']
self.application.config['format.pdf']['resolution'] = formdata['pdf_resolution']
self.application.config['format.ebook']['resolution'] = formdata['ebook_resolution']
self.application.config['format.ebook']['margin'] = formdata['ebook_margin']
self.application.config['metadata.comicvine']['location'] = formdata['comicvine_location']
self.application.config['metadata.comicvine']['key'] = formdata['comicvine_key']
self.application.config['metadata.zilverendolfijn']['location'] = formdata['zilverendolfijn_location']
self.application.config['metadata.comicvine']['active'] = formdata['use_comicvine']
self.application.config['metadata.zilverendolfijn']['active'] = formdata['use_zd']
self.application.config['format.pdf']['engine'] = formdata['pdf_engine']
self.application.config['format.pdf']['mudraw'] = formdata['mudraw']
self.application.config['format.pdf']['mutool'] = formdata['mutool']
self.application.config['format.pdf']['pdf2png'] = formdata['pdf2png']
self.application.config['format.pdf']['qpdf'] = formdata['qpdf']
self.application.config['database']['engine'] = formdata['db_engine']
self.application.config['database.sqlite']['location'] = formdata['sqlite_location']
self.application.config['database.sqlite']['database'] = formdata['sqlite_database']
# lame password hide should be better...
self.application.config['database.mysql']['password'] = utils.encode(self.application.config['general']['install_id'],formdata['mysql_password'])
self.application.config['database.mysql']['username'] = formdata['mysql_username']
self.application.config['database.mysql']['database'] = formdata['mysql_database']
self.application.config['database.mysql']['host'] = formdata['mysql_host']
self.application.config['database.mysql']['port'] = formdata['mysql_port']
success_str = "Saved. Server restart needed"
self.application.config.write()
else:
failure_str = "<br/>".join(failure_strs)
formdata['password'] = ""
formdata['password_confirm'] = ""
logging.info("Config: " + str(self.application.config))
self.render_config(formdata, success=success_str, failure=failure_str)
class LoginHandler(BaseHandler):
def get(self):
if len(self.get_arguments("next")) != 0:
next=self.get_argument("next")
else:
next=self.webroot + "/"
#if password and user are blank, just skip to the "next"
if ( self.application.config['security']['password_digest'] == utils.getDigest("") and
self.application.config['security']['username'] == ""
):
self.set_secure_cookie("user", fix_username(self.application.config['security']['username']))
self.redirect(next)
else:
self.render(deviceroot(self)+'login.html', next=next)
def post(self):
next = self.get_argument("next")
if len(self.get_arguments("password")) != 0:
#print self.application.password, self.get_argument("password") , next
if (utils.getDigest(self.get_argument("password")) == self.application.config['security']['password_digest'] and
self.get_argument("username") == self.application.config['security']['username']):
#self.set_secure_cookie("auth", self.application.config['security']['password_digest'])
self.set_secure_cookie("user", fix_username(self.application.config['security']['username']))
self.redirect(next)
class WakeUp:
def __init__(self, dm):
self.dm = dm
pass
def start(self):
self.thread = threading.Thread(target=self.mainLoop)
self.thread.daemon = True
self.quit = False
self.thread.start()
def stop(self):
self.quit = True
self.thread.join()
def mainLoop(self):
logging.debug("WakeUp: Started")
i = 0
while not self.quit:
try:
time.sleep(1)
i += 1
# call mysql.query to keep it alive...
if i == 3600:
logging.debug("WakeUp: Database")
session = self.dm.Session()
obj = session.query(SchemaInfo).first()
i = 0
session.close()
except:
break;
logging.debug("WakeUp: Stopped")
class APIServer(tornado.web.Application):
def __init__(self, config, opts):
utils.fix_output_encoding()
self.config = config
logging.debug("Config: " + str(config))
self.opts = opts
self.port = self.config['web']['port']
self.port_secure = self.config['web.ssl']['port']
self.webroot = self.config['web']['webroot']
self.bind = self.config['web']['bind']
self.comicArchiveList = []
#if len(self.config['general']['folder_list']) == 0:
# logging.error("No folders on either command-line or config file. Quitting.")
# sys.exit(-1)
if not os.path.exists(AppFolders.appCachePages()):
os.makedirs(AppFolders.appCachePages())
if not os.path.exists(AppFolders.appCacheEbooks()):
os.makedirs(AppFolders.appCacheEbooks())
if not os.path.exists(AppFolders.appBlacklistPages()):
os.makedirs(AppFolders.appBlacklistPages())
if not os.path.exists(AppFolders.appWebComic()):
os.makedirs(AppFolders.appWebComic())
#self.dm = DataManager()
self.dm = DataManager(config)
self.library = Library(self.dm.Session)
# "HERE FIX Move to cache.py
cache_location = self.config['cache']['location']
cache_active = self.config['cache']['active']
if cache_location == "": cache_location = AppFolders.appCachePages()
else:
if not os.path.isdir(cache_location):
active = False
self.library.cache(cache_location,cache_active,self.config['cache']['size'],self.config['cache']['free'])
if opts.reset or opts.reset_and_run:
self.dm.delete()
self.library.cache_clear()
# quit on a standard reset
if opts.reset:
sys.exit(0)
try:
self.dm.create()
except SchemaVersionException as e:
# FIX "HERE frmo place error messgaes.
msg = "Couldn't open database. Probably the schema has changed."
logging.error("Database: " + msg)
utils.alert("Schema change", msg)
if mysql_active:
logging.error("Database: Please Drop the Database in MySQL and recreate it")
sys.exit(-1)
else:
self.dm.delete()
self.restart(True)
except sqlalchemy.exc.OperationalError as e:
msg = "Could not open database."
logging.error("Database: " + msg)
utils.alert("Database Error", msg)
logging.error("Database: Please delete Database file or restore backup!")
# "HERE FIX open sqlite temp db so you canfix the problem......
sys.exit(-1)
if opts.extract_last_page:
self.library.lastpage_extractor_for_blacklist()
try:
if self.config['web.ssl']['active']:
http_server = tornado.httpserver.HTTPServer(self, no_keep_alive = True, ssl_options={
"certfile": self.config['web.ssl']['certificate_file'], # "server.crt",
"keyfile": self.config['web.ssl']['key_file'] # "server.key",
})
http_server.listen(self.port_secure,address=self.bind)
# "HERE fix not finished
"""
redirect_handlers = [
(r'/(.*)', index.RedirectHandler),
]
http_settings = {
'host': ServerEnvironment.GetHost(),
'redirect_port': self.port_secure,
# 'xheaders': options.options.xheaders,
}
"""
else:
self.listen(self.port, no_keep_alive = True, address=self.bind)
except Exception as e:
logging.error(e)
msg = "Couldn't open socket on port {0}. (Maybe ComicStreamer is already running?) Quitting.".format(self.port)
logging.error("Server: " + msg)
utils.alert("Port not available", msg)
sys.exit(-1)
logging.info( "Server: Bind '{1}', Port {0}, Webroot: '{2}'".format(self.port,self.bind,self.webroot))
self.version = csversion.version
handlers = [
# Web Pages
(self.webroot + r"/", MainHandler),
(self.webroot + r"/(.*)\.html", GenericPageHandler),
(self.webroot + r"/about", AboutPageHandler),
(self.webroot + r"/api", APIPageHandler),
(self.webroot + r"/help", HelpPageHandler),
(self.webroot + r"/settings", ConfigPageHandler),
(self.webroot + r"/search", SearchPageHandler),
(self.webroot + r"/server", ServerPageHandler),
(self.webroot + r"/recently", RecentlyPageHandler),
(self.webroot + r"/log", LogPageHandler),
(self.webroot + r"/comics/browse", ComicListBrowserHandler),
(self.webroot + r"/comiclist/browse", ComicListBrowserHandler),
(self.webroot + r"/folders/browse(/.*)*", FoldersBrowserHandler),
(self.webroot + r"/entities/browse(/.*)*", EntitiesBrowserHandler),
(self.webroot + r"/comic/([0-9]+)/reader", ReaderHandler),
(self.webroot + r"/login", LoginHandler),
# Data
(self.webroot + r"/dbinfo", DBInfoAPIHandler),
(self.webroot + r"/version", VersionAPIHandler),
(self.webroot + r"/command", ServerAPIHandler),
(self.webroot + r"/scanstatus", ScanStatusAPIHandler),
(self.webroot + r"/deleted", DeletedAPIHandler),
(self.webroot + r"/comic/([0-9]+)", ComicAPIHandler),
(self.webroot + r"/comics", ComicListAPIHandler),
(self.webroot + r"/comiclist", ComicListAPIHandler),
(self.webroot + r"/comic/([0-9]+)/page/([0-9]+|clear)/bookmark", ComicBookmarkAPIHandler ),
(self.webroot + r"/comic/([0-9]+)/page/([0-9]+|clear)/blacklist", ComicBlacklistAPIHandler ),
(self.webroot + r"/comic/([0-9]+)/page/([0-9]+|clear)/like", ComicFavoritesAPIHandler ),
(self.webroot + r"/comic/([0-9]+)/page/([0-9]+|clear)/cache", ComicBlacklistAPIHandler ),
(self.webroot + r"/comic/([0-9]+)/page/([0-9]+)", ComicPageAPIHandler ),
(self.webroot + r"/comic/([0-9]+)/thumbnail", ThumbnailAPIHandler),
(self.webroot + r"/comic/([0-9]+)/file", FileAPIHandler),
(self.webroot + r"/entities(/.*)*", EntityAPIHandler),
(self.webroot + r"/folders(/.*)*", FolderAPIHandler),
#(r'/favicon.ico', tornado.web.StaticFileHandler, {'path': os.path.join(AppFolders.appBase(), "static","images")}),
(self.webroot + r'/.*', UnknownHandler),
]
settings = dict(
template_path=os.path.join(AppFolders.appBase(), "gui"),
static_path=os.path.join(AppFolders.appBase(), "static"),
static_url_prefix=self.webroot + "/static/",
debug=True,
#autoreload=False,
login_url=self.webroot + "/login",
cookie_secret=self.config['security']['cookie_secret'],
xsrf_cookies=True,
)
tornado.web.Application.__init__(self, handlers, **settings)
self.library.loadBlacklistFromFile(os.path.join(AppFolders.static(),'comicstreamer'))
if not opts.no_monitor:
self.monitor = Monitor(self.dm, self.config['general']['folder_list'])
self.monitor.start()
self.monitor.scan()
self.bookmark = Bookmark(self.dm)
self.bookmark.start()
self.blacklist = Blacklist(self.dm)
self.blacklist.start()
if mysql_active:
self.wakeup = WakeUp(self.dm)
self.wakeup.start()
if opts.launch_client and self.config['general']['launch_client']:
if ((platform.system() == "Linux" and os.environ.has_key('DISPLAY')) or
(platform.system() == "Darwin" and not os.environ.has_key('SSH_TTY')) or
platform.system() == "Windows"):
webbrowser.open("http://localhost:{0}".format(self.port), new=0)
self.bonjour = BonjourThread(self.port)
self.bonjour.start()
def rebuild(self):
# after restart, purge the DB
sys.argv.insert(1, "--_resetdb_and_run")
self.restart()
def restart(self,fast=False):
if not fast:
self.shutdown()
executable = sys.executable
new_argv = ["--nobrowser"]
new_argv.append("-p");
new_argv.append(str(self.port))
if self.opts.quiet:
new_argv.append("-q")
if self.opts.debug:
new_argv.append("-d")
if self.opts.no_monitor:
new_argv.append("--nomonitor")
# if self.opts.config_file:
# new_argv.append("--config-file")
# new_argv.append(self.opts.config_file)
if self.opts.user_dir:
new_argv.append("--user-dir")
new_argv.append(self.opts.user_dir)
if self.opts.webroot:
new_argv.append("--webroot")
new_argv.append(self.opts.webroot)
if self.opts.bind:
new_argv.append("-b")
new_argv.append(self.opts.bind)
if "--_resetdb_and_run" in sys.argv:
new_argv.append("--_resetdb_and_run")
if getattr(sys, 'frozen', None):
# only keep selected args
new_argv.insert(0, os.path.basename(executable) )
os.execv(executable, new_argv)
else:
new_argv.insert(0, os.path.basename(sys.argv[0]) )
os.execl(executable, executable, *new_argv)
def shutdown(self):
MAX_WAIT_SECONDS_BEFORE_SHUTDOWN = 3
logging.info('Server: Initiating shutdown...')
if not self.opts.no_monitor:
self.monitor.stop()
self.bookmark.stop()
if mysql_active:
self.wakeup.stop()
self.blacklist.stop()
self.bonjour.stop()
# logging.debug('Web: Will shutdown ComicStreamer in maximum %s seconds ...', MAX_WAIT_SECONDS_BEFORE_SHUTDOWN)
io_loop = tornado.ioloop.IOLoop.instance()
deadline = time.time() + MAX_WAIT_SECONDS_BEFORE_SHUTDOWN
def stop_loop():
now = time.time()
if now < deadline and (io_loop._callbacks or io_loop._timeouts):
io_loop.add_timeout(now + 1, stop_loop)
else:
io_loop.stop()
logging.info('Bye!')
stop_loop()
logging.debug('Server: Stopped')
self.dm.stop()
def log_request(self, handler):
if handler.get_status() < 300:
log_method = logging.debug
elif handler.get_status() < 400:
log_method = logging.debug
elif handler.get_status() < 500:
log_method = logging.warning
else:
log_method = logging.error
request_time = 1000.0 * handler.request.request_time()
log_method("%d %s %.2fms", handler.get_status(),
handler._request_summary(), request_time)
def run(self):
tornado.ioloop.IOLoop.instance().start()
def runInThread(self):
import threading
t = threading.Thread(target=self.run)
t.start()
| Tristan79/ComicStreamer | comicstreamerlib/server.py | Python | apache-2.0 | 80,525 | 0.009637 |
#
# (C) Copyright 2008 Jelmer Vernooij <[email protected]>
# (C) Copyright 2011 Jacek Konieczny <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License Version
# 2.1 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
"""GSSAPI authentication mechanism for PyXMPP SASL implementation.
Normative reference:
- `RFC 4752 <http://www.ietf.org/rfc/rfc4752.txt>`__
"""
__docformat__ = "restructuredtext en"
import base64
import kerberos
import logging
from .core import ClientAuthenticator, Response, Success
from .core import sasl_mechanism
logger = logging.getLogger("pyxmpp2.sasl.gssapi")
@sasl_mechanism("GSSAPI", 75)
class GSSAPIClientAuthenticator(ClientAuthenticator):
"""Provides client-side GSSAPI SASL (Kerberos 5) authentication."""
def __init__(self, password_manager):
ClientAuthenticator.__init__(self, password_manager)
self.password_manager = password_manager
self.username = None
self._gss = None
self.step = None
self.authzid = None
def start(self, username, authzid):
self.username = username
self.authzid = authzid
_unused, self._gss = kerberos.authGSSClientInit(authzid or
"{0}@{1}".format("xmpp",
self.password_manager.get_serv_host()))
self.step = 0
return self.challenge("")
def challenge(self, challenge):
if self.step == 0:
ret = kerberos.authGSSClientStep(self._gss,
base64.b64encode(challenge))
if ret != kerberos.AUTH_GSS_CONTINUE:
self.step = 1
elif self.step == 1:
ret = kerberos.authGSSClientUnwrap(self._gss,
base64.b64encode(challenge))
response = kerberos.authGSSClientResponse(self._gss)
ret = kerberos.authGSSClientWrap(self._gss, response, self.username)
response = kerberos.authGSSClientResponse(self._gss)
if response is None:
return Response("")
else:
return Response(base64.b64decode(response))
def finish(self, data):
self.username = kerberos.authGSSClientUserName(self._gss)
logger.debug("Authenticated as {0!r}".format(
kerberos.authGSSClientUserName(self._gss)))
return Success(self.username, None, self.authzid)
# vi: sts=4 et sw=4
| pforret/python-for-android | python3-alpha/python-libs/pyxmpp2/sasl/gssapi.py | Python | apache-2.0 | 3,008 | 0.003324 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import numpy as np
import six
TOKENIZER_RE = re.compile(r"[A-Z]{2,}(?![a-z])|[A-Z][a-z]+(?=[A-Z])|[\'\w\-]+",
re.UNICODE)
class Tokenizer(object):
def __init__(self):
pass
def tokenizer0(self, iter):
for str in iter:
yield TOKENIZER_RE.findall(str)
def tokenizer1(self, iter):
for str in iter:
#tokens = re.sub(r"[^a-z0-9]+", " ", str).split()
tokens = re.sub(r"(?!)[^a-z0-9]+", " ", str).split()
yield tokens
raw_doc = [
" Abbott of Farnham E D Abbott Limited was a British coachbuilding business based in Farnham Surrey trading under that name from 1929. A major part of their output was under sub-contract to motor vehicle manufacturers. Their business closed in 1972."
," Schwan-STABILO is a German maker of pens for writing colouring and cosmetics as well as markers and highlighters for office use. It is the world's largest manufacturer of highlighter pens Stabilo Boss."
" Q-workshop is a Polish company located in Poznań that specializes in designand production of polyhedral dice and dice accessories for use in various games (role-playing gamesboard games and tabletop wargames). They also run an online retail store and maintainan active forum community.Q-workshop was established in 2001 by Patryk Strzelewicz – a student from Poznań. Initiallythe company sold its products via online auction services but in 2005 a website and online store wereestablished."
]
# test
if __name__ == '__main__':
tokenizer = Tokenizer()
tokenizer_ = tokenizer.tokenizer1
for tokens in tokenizer_(raw_doc):
for token in tokens:
print(token)
| sunmont/textclassifier | tokenizer.py | Python | apache-2.0 | 1,809 | 0.007756 |
from __future__ import absolute_import
from .love import main
import sys
argv = sys.argv
if len(argv)>=2:
main(argv[1])
else:
main()
| takluyver/Love | love/__main__.py | Python | mit | 147 | 0.020408 |
# -*- coding: utf-8 -*-
import LineAlpha
from LineAlpha.Gen.ttypes import *
from datetime import datetime
import time,random,sys,json,codecs,threading,glob
cl = LineAlpha.LINE()
cl.login(qr=True)
cl.loginResult()
kk = LineAlpha.LINE()
kk.login(qr=True)
kk.loginResult()
ki = LineAlpha.LINE()
ki.login(qr=True)
ki.loginResult()
kc = LineAlpha.LINE()
kc.login(qr=True)
kc.loginResult()
print "login success"
reload(sys)
sys.setdefaultencoding('utf-8')
helpMessage =""" Rach Bot
[Id︎]
[Mid]
[Me︎]
[TL︎:「Text」]
[Mc 「mid」]
[K on/off]
[Join︎ on/off]
[Gcancel:︎「Number of people」]
[Group cancelalll︎]
[Leave︎ on/off]
[Add on/off]
[Share on/off]
[Message change:「text」]
[Message check]
[Confirm]
[Jam on/off]
[Change clock:「name」]
[Up]
[Cv join]
[*] Command in the groups [*]
[Curl]
[Ourl]
[url]
[url:「Group ID」]
[Invite:「mid」]
[Kick:「mid」]
[Ginfo]
[Cancel]
[Gn 「group name」]
[Nk 「name」]
[*] Command kicker only [*]
[Bye]
[Kill ban]
[Kill 「@」]
[Ban 「@」] By Tag
[Unban 「@」] By Tag
[Ban︎] Share Contact
[Unban︎] Share Contact
[Banlist︎]
[Cek ban]
[Cv mid]
[Cv ︎invite:「mid」]
[Cv ︎rename:「name」]
[Cv ︎gift]
[Respo︎n]
[Bot cancel]
[Title:]
"""
KAC=[cl,ki,kk,kc]
mid = cl.getProfile().mid
Amid = ki.getProfile().mid
Bmid = kk.getProfile().mid
Cmid = kc.getProfile().mid
Bots=[mid,Amid,Bmid,Cmid]
admin=["u3b2f7586e70571fd1f35b9ba58c91c96"]
wait = {
'contact':True,
'autoJoin':True,
'autoCancel':{"on":True,"members":1},
'leaveRoom':True,
'timeline':True,
'autoAdd':True,
'message':"Thanks for add me, Bitch",
"lang":"JP",
"comment":"Thanks for add me, Bitch",
"commentOn":False,
"commentBlack":{},
"wblack":False,
"dblack":False,
"clock":True,
"cName":"Pepepepe ",
"blacklist":{},
"wblacklist":False,
"dblacklist":False,
"protectionOn":True
}
wait2 = {
'readPoint':{},
'readMember':{},
'setTime':{},
'ROM':{}
}
setTime = {}
setTime = wait2['setTime']
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def NOTIFIED_READ_MESSAGE(op):
try:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n・" + Name
wait2['ROM'][op.param1][op.param2] = "・" + Name
else:
pass
except:
pass
def bot(op):
try:
if op.type == 0:
return
if op.type == 5:
if wait["autoAdd"] == True:
cl.findAndAddContactsByMid(op.param1)
if (wait["message"] in [""," ","\n",None]):
pass
else:
cl.sendText(op.param1,str(wait["message"]))
if op.type == 13:
if op.param3 in mid:
if op.param2 in Amid:
G = ki.getGroup(op.param1)
G.preventJoinByTicket = False
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
if op.param3 in Amid:
if op.param2 in Bmid:
X = kk.getGroup(op.param1)
X.preventJoinByTicket = False
kk.updateGroup(X)
Ti = kk.reissueGroupTicket(op.param1)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
kk.updateGroup(X)
Ti = kk.reissueGroupTicket(op.param1)
if op.param3 in Bmid:
if op.param2 in Cmid:
X = kc.getGroup(op.param1)
X.preventJoinByTicket = False
kc.updateGroup(X)
Ti = kc.reissueGroupTicket(op.param1)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
kc.updateGroup(X)
Ti = kc.reissueGroupTicket(op.param1)
if op.param3 in Cmid:
if op.param2 in mid:
X = cl.getGroup(op.param1)
X.preventJoinByTicket = False
cl.updateGroup(X)
Ti = cl.reissueGroupTicket(op.param1)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
cl.updateGroup(X)
Ti = cl.reissueGroupTicket(op.param1)
if op.type == 13:
print op.param1
print op.param2
print op.param3
if mid in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
cl.cancelGroupInvitation(op.param1, matched_list)
if op.type == 19:
if mid in op.param3:
if op.param2 in Bots:
pass
try:
ki.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group、\n["+op.param1+"]\nの\n["+op.param2+"]\nを蹴る事ができませんでした。\nブラックリストに追加します。")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
G = ki.getGroup(op.param1)
G.preventJoinByTicket = False
ki.updateGroup(G)
Ti = ki.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
X = cl.getGroup(op.param1)
X.preventJoinByTicket = True
cl.updateGroup(X)
Ti = cl.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Amid in op.param3:
if op.param2 in Bots:
pass
try:
kk.kickoutFromGroup(op.param1,[op.param2])
kc.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("clientが蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nを蹴る事ができませんでした。\nブラックリストに追加します。")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = kk.getGroup(op.param1)
X.preventJoinByTicket = False
cl.updateGroup(X)
Ti = kk.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
G = ki.getGroup(op.param1)
G.preventJoinByTicket = True
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Bmid in op.param3:
if op.param2 in Bots:
pass
try:
kc.kickoutFromGroup(op.param1,[op.param2])
kk.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("clientが蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nを蹴る事ができませんでした。\nブラックリストに追加します。")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = kc.getGroup(op.param1)
X.preventJoinByTicket = False
kc.updateGroup(X)
Ti = kc.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
G = kk.getGroup(op.param1)
G.preventJoinByTicket = True
kk.updateGroup(G)
Ticket = kk.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Cmid in op.param3:
if op.param2 in Bots:
pass
try:
cl.kickoutFromGroup(op.param1,[op.param2])
kk.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("clientが蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nを蹴る事ができませんでした。\nブラックリストに追加します。")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = cl.getGroup(op.param1)
X.preventJoinByTicket = False
cl.updateGroup(X)
Ti = cl.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
G = kc.getGroup(op.param1)
G.preventJoinByTicket = True
kc.updateGroup(G)
Ticket = kc.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if op.type == 13:
if mid in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
cl.cancelGroupInvitation(op.param1, matched_list)
if op.type == 22:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 24:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 26:
msg = op.message
if msg.toType == 0:
msg.to = msg.from_
if msg.from_ == profile.mid:
if "join:" in msg.text:
list_ = msg.text.split(":")
try:
cl.acceptGroupInvitationByTicket(list_[1],list_[2])
X = cl.getGroup(list_[1])
X.preventJoinByTicket = True
cl.updateGroup(X)
except:
cl.sendText(msg.to,"error")
if msg.toType == 1:
if wait["leaveRoom"] == True:
cl.leaveRoom(msg.to)
if msg.contentType == 16:
url = msg.contentMetadata("line://home/post?userMid="+mid+"&postId="+"new_post")
cl.like(url[25:58], url[66:], likeType=1001)
if op.type == 26:
msg = op.message
if msg.contentType == 13:
if wait["wblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
cl.sendText(msg.to,"already")
wait["wblack"] = False
else:
wait["commentBlack"][msg.contentMetadata["mid"]] = True
wait["wblack"] = False
cl.sendText(msg.to,"decided not to comment")
elif wait["dblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
del wait["commentBlack"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"deleted")
ki.sendText(msg.to,"deleted")
kk.sendText(msg.to,"deleted")
kc.sendText(msg.to,"deleted")
wait["dblack"] = False
else:
wait["dblack"] = False
cl.sendText(msg.to,"It is not in the black list")
ki.sendText(msg.to,"It is not in the black list")
kk.sendText(msg.to,"It is not in the black list")
kc.sendText(msg.to,"It is not in the black list")
elif wait["wblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
cl.sendText(msg.to,"already")
ki.sendText(msg.to,"already")
kk.sendText(msg.to,"already")
kc.sendText(msg.to,"already")
wait["wblacklist"] = False
else:
wait["blacklist"][msg.contentMetadata["mid"]] = True
wait["wblacklist"] = False
cl.sendText(msg.to,"aded")
ki.sendText(msg.to,"aded")
kk.sendText(msg.to,"aded")
kc.sendText(msg.to,"aded")
elif wait["dblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
del wait["blacklist"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"deleted")
ki.sendText(msg.to,"deleted")
kk.sendText(msg.to,"deleted")
kc.sendText(msg.to,"deleted")
wait["dblacklist"] = False
else:
wait["dblacklist"] = False
cl.sendText(msg.to,"It is not in the black list")
ki.sendText(msg.to,"It is not in the black list")
kk.sendText(msg.to,"It is not in the black list")
kc.sendText(msg.to,"It is not in the black list")
elif wait["contact"] == True:
msg.contentType = 0
cl.sendText(msg.to,msg.contentMetadata["mid"])
if 'displayName' in msg.contentMetadata:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + msg.contentMetadata["displayName"] + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
else:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + contact.displayName + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
elif msg.contentType == 16:
if wait["timeline"] == True:
msg.contentType = 0
if wait["lang"] == "JP":
msg.text = "post URL\n" + msg.contentMetadata["postEndUrl"]
else:
msg.text = "URL→\n" + msg.contentMetadata["postEndUrl"]
cl.sendText(msg.to,msg.text)
elif msg.text is None:
return
elif msg.text in ["Key","help","Help"]:
if wait["lang"] == "JP":
cl.sendText(msg.to,helpMessage)
else:
cl.sendText(msg.to,helpt)
elif ("Gn " in msg.text):
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Gn ","")
cl.updateGroup(X)
else:
cl.sendText(msg.to,"It can't be used besides the group.")
elif ("Cv1 gn " in msg.text):
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Cv1 gn ","")
ki.updateGroup(X)
else:
ki.sendText(msg.to,"It can't be used besides the group.")
elif ("Cv2 gn " in msg.text):
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Cv2 gn ","")
kk.updateGroup(X)
else:
kk.sendText(msg.to,"It can't be used besides the group.")
elif ("Cv3 gn " in msg.text):
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Cv3 gn ","")
kc.updateGroup(X)
else:
kc.sendText(msg.to,"It can't be used besides the group.")
elif "Kick " in msg.text:
midd = msg.text.replace("Kick ","")
cl.kickoutFromGroup(msg.to,[midd])
elif "Cv1 kick " in msg.text:
midd = msg.text.replace("Cv1 kick ","")
ki.kickoutFromGroup(msg.to,[midd])
elif "Cv2 kick " in msg.text:
midd = msg.text.replace("Cv2 kick ","")
kk.kickoutFromGroup(msg.to,[midd])
elif "Cv3 kick " in msg.text:
midd = msg.text.replace("Cv3 kick ","")
kc.kickoutFromGroup(msg.to,[midd])
elif "Invite " in msg.text:
midd = msg.text.replace("Invite ","")
cl.findAndAddContactsByMid(midd)
cl.inviteIntoGroup(msg.to,[midd])
elif "Cv1 invite " in msg.text:
midd = msg.text.replace("Cv1 invite ","")
ki.findAndAddContactsByMid(midd)
ki.inviteIntoGroup(msg.to,[midd])
elif "Cv2 invite " in msg.text:
midd = msg.text.replace("Cv2 invite ","")
kk.findAndAddContactsByMid(midd)
kk.inviteIntoGroup(msg.to,[midd])
elif "Cv3 invite " in msg.text:
midd = msg.text.replace("Cv3 invite ","")
kc.findAndAddContactsByMid(midd)
kc.inviteIntoGroup(msg.to,[midd])
elif msg.text in ["Me"]:
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage(msg)
elif msg.text in ["Cv1"]:
msg.contentType = 13
msg.contentMetadata = {'mid': Amid}
ki.sendMessage(msg)
elif msg.text in ["Cv2"]:
msg.contentType = 13
msg.contentMetadata = {'mid': Bmid}
kk.sendMessage(msg)
elif msg.text in ["æ„›ã®ãƒ—レゼント","Gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '5'}
msg.text = None
cl.sendMessage(msg)
elif msg.text in ["æ„›ã®ãƒ—レゼント","Cv1 gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '6'}
msg.text = None
ki.sendMessage(msg)
elif msg.text in ["æ„›ã®ãƒ—レゼント","Cv2 gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '8'}
msg.text = None
kk.sendMessage(msg)
elif msg.text in ["æ„›ã®ãƒ—レゼント","Cv3 gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '10'}
msg.text = None
kc.sendMessage(msg)
elif msg.text in ["æ„›ã®ãƒ—レゼント","All gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '12'}
msg.text = None
ki.sendMessage(msg)
kk.sendMessage(msg)
kc.sendMessage(msg)
elif msg.text in ["cancel","Cancel"]:
if msg.toType == 2:
X = cl.getGroup(msg.to)
if X.invitee is not None:
gInviMids = [contact.mid for contact in X.invitee]
cl.cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"No one is inviting")
else:
cl.sendText(msg.to,"Sorry, nobody absent")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Cv cancel","Bot cancel"]:
if msg.toType == 2:
G = k3.getGroup(msg.to)
if G.invitee is not None:
gInviMids = [contact.mid for contact in G.invitee]
k3.cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
k3.sendText(msg.to,"No one is inviting")
else:
k3.sendText(msg.to,"Sorry, nobody absent")
else:
if wait["lang"] == "JP":
k3.sendText(msg.to,"Can not be used outside the group")
else:
k3.sendText(msg.to,"Not for use less than group")
#elif "gurl" == msg.text:
#print cl.getGroup(msg.to)
##cl.sendMessage(msg)
elif msg.text in ["Ourl","Link on"]:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
cl.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Cv1 ourl","Cv1 link on"]:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
ki.updateGroup(X)
if wait["lang"] == "JP":
ki.sendText(msg.to,"Done Chivas")
else:
ki.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Cv2 ourl","Cv2 link on"]:
if msg.toType == 2:
X = kk.getGroup(msg.to)
X.preventJoinByTicket = False
kk.updateGroup(X)
if wait["lang"] == "JP":
kk.sendText(msg.to,"Done Chivas")
else:
kk.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
kk.sendText(msg.to,"Can not be used outside the group")
else:
kk.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Cv3 ourl","Cv3 link on"]:
if msg.toType == 2:
X = kc.getGroup(msg.to)
X.preventJoinByTicket = False
kc.updateGroup(X)
if wait["lang"] == "JP":
kc.sendText(msg.to,"Done Chivas")
else:
kc.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
kc.sendText(msg.to,"Can not be used outside the group")
else:
kc.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Curl","Link off"]:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = True
cl.updateGroup(X)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
cl.sendText(msg.to,"already close")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Cv1 curl","Cv1 link off"]:
if msg.toType == 2:
X = ki.getGroup(msg.to)
X.preventJoinByTicket = True
ki.updateGroup(X)
if wait["lang"] == "JP":
ki.sendText(msg.to,"Done Chivas")
else:
ki.sendText(msg.to,"already close")
else:
if wait["lang"] == "JP":
ki.sendText(msg.to,"Can not be used outside the group")
else:
ki.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Cv2 curl","Cv2 link off"]:
if msg.toType == 2:
X = kk.getGroup(msg.to)
X.preventJoinByTicket = True
kk.updateGroup(X)
if wait["lang"] == "JP":
kk.sendText(msg.to,"Done Chivas")
else:
kk.sendText(msg.to,"already close")
else:
if wait["lang"] == "JP":
kk.sendText(msg.to,"Can not be used outside the group")
else:
kk.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Cv3 curl","Cv3 link off"]:
if msg.toType == 2:
X = kc.getGroup(msg.to)
X.preventJoinByTicket = True
kc.updateGroup(X)
if wait["lang"] == "JP":
kc.sendText(msg.to,"Done Chivas")
else:
kc.sendText(msg.to,"already close")
else:
if wait["lang"] == "JP":
kc.sendText(msg.to,"Can not be used outside the group")
else:
kc.sendText(msg.to,"Not for use less than group")
elif msg.text == "Ginfo":
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
gCreator = ginfo.creator.displayName
except:
gCreator = "Error"
if wait["lang"] == "JP":
if ginfo.invitee is None:
sinvitee = "0"
else:
sinvitee = str(len(ginfo.invitee))
if ginfo.preventJoinByTicket == True:
u = "close"
else:
u = "open"
cl.sendText(msg.to,"[group name]\n" + str(ginfo.name) + "\n[gid]\n" + msg.to + "\n[group creator]\n" + gCreator + "\n[profile status]\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus + "\nmembers:" + str(len(ginfo.members)) + "members\npending:" + sinvitee + "people\nURL:" + u + "it is inside")
else:
cl.sendText(msg.to,"[group name]\n" + str(ginfo.name) + "\n[gid]\n" + msg.to + "\n[group creator]\n" + gCreator + "\n[profile status]\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif "Id" == msg.text:
cl.sendText(msg.to,msg.to)
elif "All mid" == msg.text:
cl.sendText(msg.to,mid)
ki.sendText(msg.to,Amid)
kk.sendText(msg.to,Bmid)
kc.sendText(msg.to,Cmid)
elif "Mid" == msg.text:
cl.sendText(msg.to,mid)
elif "Cv1 mid" == msg.text:
ki.sendText(msg.to,Amid)
elif "Cv2 mid" == msg.text:
kk.sendText(msg.to,Bmid)
elif "Cv3 mid" == msg.text:
kc.sendText(msg.to,Cmid)
elif msg.text in ["Wkwk"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "100",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Hehehe"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "10",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Galon"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "9",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["You"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "7",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Hadeuh"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "6",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Please"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "4",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Haaa"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "3",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Lol"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "110",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Hmmm"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "101",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
elif msg.text in ["Welcome"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "247",
"STKPKGID": "3",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["TL:"]:
tl_text = msg.text.replace("TL:","")
cl.sendText(msg.to,"line://home/post?userMid="+mid+"&postId="+cl.new_post(tl_text)["result"]["post"]["postInfo"]["postId"])
elif msg.text in ["Cn "]:
string = msg.text.replace("Cn ","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
cl.sendText(msg.to,"name " + string + " done")
elif msg.text in ["Cv1 rename "]:
string = msg.text.replace("Cv1 rename ","")
if len(string.decode('utf-8')) <= 20:
profile_B = ki.getProfile()
profile_B.displayName = string
ki.updateProfile(profile_B)
ki.sendText(msg.to,"name " + string + " done")
elif msg.text in ["Cv2 rename "]:
string = msg.text.replace("Cv2 rename ","")
if len(string.decode('utf-8')) <= 20:
profile_B = kk.getProfile()
profile_B.displayName = string
kk.updateProfile(profile_B)
kk.sendText(msg.to,"name " + string + " done")
elif msg.text in ["Mc "]:
mmid = msg.text.replace("Mc ","")
msg.contentType = 13
msg.contentMetadata = {"mid":mmid}
cl.sendMessage(msg)
elif msg.text in ["連絡先:オン","K on","Contact on","顯示:開"]:
if wait["contact"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["contact"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["連絡先:オフ","K off","Contact off","顯示:關"]:
if wait["contact"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done ")
else:
wait["contact"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["自動å‚åŠ :オン","Join on","Auto join:on","自動åƒåŠ :開"]:
if wait["autoJoin"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["autoJoin"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["自動å‚åŠ :オフ","Join off","Auto join:off","自動åƒåŠ :關"]:
if wait["autoJoin"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["autoJoin"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Gcancel:"]:
try:
strnum = msg.text.replace("Gcancel:","")
if strnum == "off":
wait["autoCancel"]["on"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Invitation refused turned off\nTo turn on please specify the number of people and send")
else:
cl.sendText(msg.to,"关了邀请拒ç»ã€‚è¦æ—¶å¼€è¯·æŒ‡å®šäººæ•°å‘é€")
else:
num = int(strnum)
wait["autoCancel"]["on"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,strnum + "The group of people and below decided to automatically refuse invitation")
else:
cl.sendText(msg.to,strnum + "使人以下的å°ç»„用自动邀请拒ç»")
except:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Value is wrong")
else:
cl.sendText(msg.to,"Bizarre ratings")
elif msg.text in ["強制自動退出:オン","Leave on","Auto leave:on","強制自動退出:開"]:
if wait["leaveRoom"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["leaveRoom"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦äº†å¼€ã€‚")
elif msg.text in ["強制自動退出:オフ","Leave off","Auto leave:off","強制自動退出:關"]:
if wait["leaveRoom"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["leaveRoom"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already")
elif msg.text in ["共有:オン","Share on","Share on"]:
if wait["timeline"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["timeline"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦äº†å¼€ã€‚")
elif msg.text in ["共有:オフ","Share off","Share off"]:
if wait["timeline"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["timeline"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦äº†å…³æ–。")
elif msg.text in ["Set"]:
md = ""
if wait["contact"] == True: md+=" Contact : on\n"
else: md+=" Contact : off\n"
if wait["autoJoin"] == True: md+=" Auto join : on\n"
else: md +=" Auto join : off\n"
if wait["autoCancel"]["on"] == True:md+=" Group cancel :" + str(wait["autoCancel"]["members"]) + "\n"
else: md+= " Group cancel : off\n"
if wait["leaveRoom"] == True: md+=" Auto leave : on\n"
else: md+=" Auto leave : off\n"
if wait["timeline"] == True: md+=" Share : on\n"
else:md+=" Share : off\n"
if wait["autoAdd"] == True: md+=" Auto add : on\n"
else:md+=" Auto add : off\n"
if wait["commentOn"] == True: md+=" Comment : on\n"
else:md+=" Comment : off\n"
cl.sendText(msg.to,md)
elif "album merit " in msg.text:
gid = msg.text.replace("album merit ","")
album = cl.getAlbum(gid)
if album["result"]["items"] == []:
if wait["lang"] == "JP":
cl.sendText(msg.to,"There is no album")
else:
cl.sendText(msg.to,"相册没在。")
else:
if wait["lang"] == "JP":
mg = "The following is the target album"
else:
mg = "以下是对象的相册"
for y in album["result"]["items"]:
if "photoCount" in y:
mg += str(y["title"]) + ":" + str(y["photoCount"]) + "sheet\n"
else:
mg += str(y["title"]) + ":0sheet\n"
cl.sendText(msg.to,mg)
elif "album " in msg.text:
gid = msg.text.replace("album ","")
album = cl.getAlbum(gid)
if album["result"]["items"] == []:
if wait["lang"] == "JP":
cl.sendText(msg.to,"There is no album")
else:
cl.sendText(msg.to,"相册没在。")
else:
if wait["lang"] == "JP":
mg = "The following is the target album"
else:
mg = "以下是对象的相册"
for y in album["result"]["items"]:
if "photoCount" in y:
mg += str(y["title"]) + ":" + str(y["photoCount"]) + "sheet\n"
else:
mg += str(y["title"]) + ":0sheet\n"
elif "album remove " in msg.text:
gid = msg.text.replace("album remove ","")
albums = cl.getAlbum(gid)["result"]["items"]
i = 0
if albums != []:
for album in albums:
cl.deleteAlbum(gid,album["id"])
i += 1
if wait["lang"] == "JP":
cl.sendText(msg.to,str(i) + "Deleted albums")
else:
cl.sendText(msg.to,str(i) + "åˆ é™¤äº†äº‹çš„ç›¸å†Œã€‚")
elif msg.text in ["Group id","群組全id"]:
gid = cl.getGroupIdsJoined()
h = ""
for i in gid:
h += "[%s]:%s\n" % (cl.getGroup(i).name,i)
cl.sendText(msg.to,h)
elif msg.text in ["Cancelall"]:
gid = cl.getGroupIdsInvited()
for i in gid:
cl.rejectGroupInvitation(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"All invitations have been refused")
else:
cl.sendText(msg.to,"æ‹’ç»äº†å…¨éƒ¨çš„邀请。")
elif "album remove→" in msg.text:
gid = msg.text.replace("album remove→","")
albums = cl.getAlbum(gid)["result"]["items"]
i = 0
if albums != []:
for album in albums:
cl.deleteAlbum(gid,album["id"])
i += 1
if wait["lang"] == "JP":
cl.sendText(msg.to,str(i) + "Albums deleted")
else:
cl.sendText(msg.to,str(i) + "åˆ é™¤äº†äº‹çš„ç›¸å†Œã€‚")
elif msg.text in ["è‡ªå‹•è¿½åŠ :オン","Add on","Auto add:on","è‡ªå‹•è¿½åŠ ï¼šé–‹"]:
if wait["autoAdd"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["autoAdd"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦äº†å¼€ã€‚")
elif msg.text in ["è‡ªå‹•è¿½åŠ :オフ","Add off","Auto add:off","è‡ªå‹•è¿½åŠ ï¼šé—œ"]:
if wait["autoAdd"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["autoAdd"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦äº†å…³æ–。")
elif "Message change: " in msg.text:
wait["message"] = msg.text.replace("Message change: ","")
cl.sendText(msg.to,"message changed")
elif "Message add: " in msg.text:
wait["message"] = msg.text.replace("Message add: ","")
if wait["lang"] == "JP":
cl.sendText(msg.to,"message changed")
else:
cl.sendText(msg.to,"done。")
elif msg.text in ["Message","è‡ªå‹•è¿½åŠ å•å€™èªžç¢ºèª"]:
if wait["lang"] == "JP":
cl.sendText(msg.to,"message change to\n\n" + wait["message"])
else:
cl.sendText(msg.to,"The automatic appending information is set as follows。\n\n" + wait["message"])
elif "Comment:" in msg.text:
c = msg.text.replace("Comment:","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"message changed")
else:
wait["comment"] = c
cl.sendText(msg.to,"changed\n\n" + c)
elif "Add comment:" in msg.text:
c = msg.text.replace("Add comment:","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"String that can not be changed")
else:
wait["comment"] = c
cl.sendText(msg.to,"changed\n\n" + c)
elif msg.text in ["コメント:オン","Comment on","Comment:on","自動首é 留言:開"]:
if wait["commentOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already on")
else:
wait["commentOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦äº†å¼€ã€‚")
elif msg.text in ["コメント:オフ","Comment on","Comment off","自動首é 留言:關"]:
if wait["commentOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already off")
else:
wait["commentOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦äº†å…³æ–。")
elif msg.text in ["Comment","留言確èª"]:
cl.sendText(msg.to,"message changed to\n\n" + str(wait["comment"]))
elif msg.text in ["Gurl"]:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
cl.updateGroup(x)
gurl = cl.reissueGroupTicket(msg.to)
cl.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Cv1 gurl"]:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
ki.updateGroup(x)
gurl = ki.reissueGroupTicket(msg.to)
ki.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Cv2 gurl"]:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
kk.updateGroup(x)
gurl = kk.reissueGroupTicket(msg.to)
kk.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Cv3 gurl"]:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
kc.updateGroup(x)
gurl = kc.reissueGroupTicket(msg.to)
kc.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Comment bl "]:
wait["wblack"] = True
cl.sendText(msg.to,"add to comment bl")
elif msg.text in ["Comment wl "]:
wait["dblack"] = True
cl.sendText(msg.to,"wl to comment bl")
elif msg.text in ["Comment bl confirm"]:
if wait["commentBlack"] == {}:
cl.sendText(msg.to,"confirmed")
else:
cl.sendText(msg.to,"Blacklist")
mc = ""
for mi_d in wait["commentBlack"]:
mc += "" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
elif msg.text in ["Jam on"]:
if wait["clock"] == True:
cl.sendText(msg.to,"already on")
else:
wait["clock"] = True
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
cl.sendText(msg.to,"done")
elif msg.text in ["Jam off"]:
if wait["clock"] == False:
cl.sendText(msg.to,"already off")
else:
wait["clock"] = False
cl.sendText(msg.to,"done")
elif msg.text in ["Change clock "]:
n = msg.text.replace("Change clock ","")
if len(n.decode("utf-8")) > 13:
cl.sendText(msg.to,"changed")
else:
wait["cName"] = n
cl.sendText(msg.to,"changed to\n\n" + n)
elif msg.text in ["Up"]:
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
cl.sendText(msg.to,"Jam Update")
else:
cl.sendText(msg.to,"Please turn on the name clock")
elif msg.text == "$set":
cl.sendText(msg.to, "Check sider")
ki.sendText(msg.to, "Check sider")
kk.sendText(msg.to, "Check sider")
kc.sendText(msg.to, "Check sider")
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
except:
pass
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['ROM'][msg.to] = {}
print wait2
elif msg.text == "$read":
if msg.to in wait2['readPoint']:
if wait2["ROM"][msg.to].items() == []:
chiya = ""
else:
chiya = ""
for rom in wait2["ROM"][msg.to].items():
print rom
chiya += rom[1] + "\n"
cl.sendText(msg.to, "People who readed %s\nthat's it\n\nPeople who have ignored reads\n%sIt is abnormal ♪\n\nReading point creation date n time:\n[%s]" % (wait2['readMember'][msg.to],chiya,setTime[msg.to]))
else:
cl.sendText(msg.to, "An already read point has not been set.\n「set」you can send ♪ read point will be created ♪")
#-----------------------------------------------
#-----------------------------------------------
elif msg.text in ["All join"]:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
kk.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
kc.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki.updateGroup(G)
elif msg.text in ["Cv1 join"]:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ti)
G = kk.getGroup(msg.to)
G.preventJoinByTicket = True
ki.updateGroup(G)
Ticket = kk.reissueGroupTicket(msg.to)
elif msg.text in ["Cv2 join"]:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
kk.acceptGroupInvitationByTicket(msg.to,Ti)
G = ki.getGroup(msg.to)
G.preventJoinByTicket = True
kk.updateGroup(G)
Ticket = kk.reissueGroupTicket(msg.to)
#-----------------------------------------------
#.acceptGroupInvitationByTicket(msg.to,Ticket)
elif msg.text in ["Cv3 join"]:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
kc.acceptGroupInvitationByTicket(msg.to,Ticket)
print "kicker ok"
G.preventJoinByTicket = True
kc.updateGroup(G)
#-----------------------------------------------
elif msg.text in ["Bye all"]:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
kk.leaveGroup(msg.to)
kc.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Bye 1"]:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Bye 2"]:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
kk.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Cv1 @bye"]:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Cv2 @bye"]:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
kk.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Cv3 @bye"]:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
kc.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif msg.text in ["Kill"]:
if msg.toType == 2:
group = ki.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
kk.sendText(msg.to,"Fuck You")
kc.sendText(msg.to,"Fuck You")
return
for jj in matched_list:
try:
klist=[ki,kk,kc]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[jj])
print (msg.to,[jj])
except:
pass
elif "Cleanse" in msg.text:
if msg.toType == 2:
print "ok"
_name = msg.text.replace("Cleanse","")
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
ki.sendText(msg.to,"Just some casual cleansing ô")
kk.sendText(msg.to,"Group cleansed.")
kc.sendText(msg.to,"Fuck You All")
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Not found.")
kk.sendText(msg.to,"Not found.")
kc.sendText(msg.to,"Not found.")
else:
for target in targets:
try:
klist=[ki,kk,kc]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
ki.sendText(msg.to,"Group cleanse")
kk.sendText(msg.to,"Group cleanse")
kc.sendText(msg.to,"Group cleanse")
elif "Nk " in msg.text:
if msg.from_ in admin:
nk0 = msg.text.replace("Nk ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
klist=[cl,ki,kk,kc]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
ki.sendText(msg.to,"Succes Cv")
kk.sendText(msg.to,"Fuck You")
elif "Blacklist @ " in msg.text:
_name = msg.text.replace("Blacklist @ ","")
_kicktarget = _name.rstrip(' ')
gs = ki2.getGroup(msg.to)
targets = []
for g in gs.members:
if _kicktarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found")
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
k3.sendText(msg.to,"Succes Cv")
except:
ki.sendText(msg.to,"error")
elif "Ban @" in msg.text:
if msg.toType == 2:
print "[Ban]ok"
_name = msg.text.replace("Ban @","")
_nametarget = _name.rstrip(' ')
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Not found Cv")
kk.sendText(msg.to,"Not found Cv")
kc.sendText(msg.to,"Not found Cv")
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Succes Cv")
ki.sendText(msg.to,"Succes Cv")
kk.sendText(msg.to,"Succes Cv")
kc.sendText(msg.to,"Succes Cv")
except:
ki.sendText(msg.to,"Error")
kk.sendText(msg.to,"Error")
kc.sendText(msg.to,"Error")
elif "Unban @" in msg.text:
if msg.toType == 2:
print "[Unban]ok"
_name = msg.text.replace("Unban @","")
_nametarget = _name.rstrip(' ')
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Not found Cv")
kk.sendText(msg.to,"Not found Cv")
kc.sendText(msg.to,"Not found Cv")
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Succes Cv")
ki.sendText(msg.to,"Succes Cv")
kk.sendText(msg.to,"Succes Cv")
kc.sendText(msg.to,"Succes Cv")
except:
ki.sendText(msg.to,"Succes Cv")
kk.sendText(msg.to,"Succes Cv")
kc.sendText(msg.to,"Succes Cv")
#-----------------------------------------------
elif msg.text in ["Test"]:
ki.sendText(msg.to,"Ok Cv double thumbs up")
kk.sendText(msg.to,"Ok Cv double thumbs up")
kc.sendText(msg.to,"Ok Cv double thumbs up")
#-----------------------------------------------
elif "Bc " in msg.text:
bctxt = msg.text.replace("Bc ","")
ki.sendText(msg.to,(bctxt))
kk.sendText(msg.to,(bctxt))
kc.sendText(msg.to,(bctxt))
#-----------------------------------------------
elif msg.text in ["Cv say hi"]:
ki.sendText(msg.to,"Hi buddy Har Har")
kk.sendText(msg.to,"Hi buddy Har Har")
kc.sendText(msg.to,"Hi buddy Har Har")
#-----------------------------------------------
elif msg.text in ["Cv say hinata pekok"]:
ki.sendText(msg.to,"Hinata pekok Har Har")
kk.sendText(msg.to,"Hinata pekok Har Har")
kc.sendText(msg.to,"Hinata pekok Har Har")
elif msg.text in ["Cv say didik pekok"]:
ki.sendText(msg.to,"Didik pekok Har Har")
kk.sendText(msg.to,"Didik pekok Har Har")
kc.sendText(msg.to,"Didik pekok Har Har")
elif msg.text in ["Cv say bobo ah","Bobo dulu ah"]:
ki.sendText(msg.to,"Have a nice dream Cv Har Har")
kk.sendText(msg.to,"Have a nice dream Cv Har Har")
kc.sendText(msg.to,"Have a nice dream Cv Har Har")
elif msg.text in ["Cv say chomel pekok"]:
ki.sendText(msg.to,"Chomel pekok Har Har")
kk.sendText(msg.to,"Chomel pekok Har Har")
kc.sendText(msg.to,"Chomel pekok Har Har")
elif msg.text in ["#welcome"]:
ki.sendText(msg.to,"Selamat datang di Chivas Family Room")
kk.sendText(msg.to,"Jangan nakal ok!")
#-----------------------------------------------
elif msg.text in ["PING","Ping","ping"]:
ki.sendText(msg.to,"PONG double thumbs upHar Har")
kk.sendText(msg.to,"PONG double thumbs upHar Har")
kc.sendText(msg.to,"PONG double thumbs upHar Har")
#-----------------------------------------------
elif msg.text in ["Respon","respon"]:
ki.sendText(msg.to,"Cv1")
kk.sendText(msg.to,"Cv2")
kc.sendText(msg.to,"Cv3")
#-----------------------------------------------
elif msg.text in ["Sp","Speed","speed"]:
start = time.time()
cl.sendText(msg.to, "Progress...")
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
ki.sendText(msg.to, "%sseconds" % (elapsed_time))
kk.sendText(msg.to, "%sseconds" % (elapsed_time))
kc.sendText(msg.to, "%sseconds" % (elapsed_time))
#------------------------------------------------------------------
elif msg.text in ["Ban"]:
wait["wblacklist"] = True
cl.sendText(msg.to,"send contact")
ki.sendText(msg.to,"send contact")
kk.sendText(msg.to,"send contact")
kc.sendText(msg.to,"send contact")
elif msg.text in ["Unban"]:
wait["dblacklist"] = True
cl.sendText(msg.to,"send contact")
ki.sendText(msg.to,"send contact")
kk.sendText(msg.to,"send contact")
kc.sendText(msg.to,"send contact")
elif msg.text in ["Banlist"]:
if wait["blacklist"] == {}:
cl.sendText(msg.to,"nothing")
ki.sendText(msg.to,"nothing")
kk.sendText(msg.to,"nothing")
kc.sendText(msg.to,"nothing")
else:
cl.sendText(msg.to,"Blacklist user")
mc = ""
for mi_d in wait["blacklist"]:
mc += "->" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
ki.sendText(msg.to,mc)
kk.sendText(msg.to,mc)
kc.sendText(msg.to,mc)
elif msg.text in ["Cek ban"]:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
cocoa = ""
for mm in matched_list:
cocoa += mm + "\n"
cl.sendText(msg.to,cocoa + "")
elif msg.text in ["Kill ban"]:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
cl.sendText(msg.to,"There was no blacklist user")
ki.sendText(msg.to,"There was no blacklist user")
kk.sendText(msg.to,"There was no blacklist user")
kc.sendText(msg.to,"There was no blacklist user")
return
for jj in matched_list:
cl.kickoutFromGroup(msg.to,[jj])
ki.kickoutFromGroup(msg.to,[jj])
kk.kickoutFromGroup(msg.to,[jj])
kc.kickoutFromGroup(msg.to,[jj])
cl.sendText(msg.to,"Blacklist emang pantas tuk di usir")
ki.sendText(msg.to,"Blacklist emang pantas tuk di usir")
kk.sendText(msg.to,"Blacklist emang pantas tuk di usir")
kc.sendText(msg.to,"Blacklist emang pantas tuk di usir")
elif msg.text in ["Clear"]:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
cl.cancelGroupInvitation(msg.to,[_mid])
cl.sendText(msg.to,"I pretended to cancel and canceled.")
elif "random:" in msg.text:
if msg.toType == 2:
strnum = msg.text.replace("random:","")
source_str = 'abcdefghijklmnopqrstuvwxyz1234567890@:;./_][!&%$#)(=~^|'
try:
num = int(strnum)
group = cl.getGroup(msg.to)
for var in range(0,num):
name = "".join([random.choice(source_str) for x in xrange(10)])
time.sleep(0.01)
group.name = name
cl.updateGroup(group)
except:
cl.sendText(msg.to,"Error")
elif "album→" in msg.text:
try:
albumtags = msg.text.replace("album→","")
gid = albumtags[:6]
name = albumtags.replace(albumtags[:34],"")
cl.createAlbum(gid,name)
cl.sendText(msg.to,name + "created an album")
except:
cl.sendText(msg.to,"Error")
elif "fakec→" in msg.text:
try:
source_str = 'abcdefghijklmnopqrstuvwxyz1234567890@:;./_][!&%$#)(=~^|'
name = "".join([random.choice(source_str) for x in xrange(10)])
anu = msg.text.replace("fakec→","")
cl.sendText(msg.to,str(cl.channel.createAlbum(msg.to,name,anu)))
except Exception as e:
try:
cl.sendText(msg.to,str(e))
except:
pass
if op.type == 59:
print op
except Exception as error:
print error
def a2():
now2 = datetime.now()
nowT = datetime.strftime(now2,"%M")
if nowT[14:] in ["10","20","30","40","50","00"]:
return False
else:
return True
def nameUpdate():
while True:
try:
#while a2():
#pass
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
time.sleep(600)
except:
pass
thread2 = threading.Thread(target=nameUpdate)
thread2.daemon = True
thread2.start()
while True:
try:
Ops = cl.fetchOps(cl.Poll.rev, 5)
except EOFError:
raise Exception("It might be wrong revision\n" + str(cl.Poll.rev))
for Op in Ops:
if (Op.type != OpType.END_OF_OPERATION):
cl.Poll.rev = max(cl.Poll.rev, Op.revision)
bot(Op)
| rachmansenpai/rach-devp | chivasbot.py | Python | gpl-3.0 | 83,839 | 0.010302 |
# -*- coding: cp936 -*-
import sys
default_encoding = 'utf-8'
if sys.getdefaultencoding() != default_encoding:
reload(sys)
sys.setdefaultencoding(default_encoding)
inf = open("F:/workspace/kancolle/log.txt",'r')
dahe=open("F:/workspace/kancolle/5-13-dahe.txt",'w')
dafeng=open("F:/workspace/kancolle/5-13-dafeng.txt",'w')
bsm=open("F:/workspace/kancolle/5-13-bsm.txt",'w')
maruyu=open("F:/workspace/kancolle/5-13-maruyu.txt",'w')
line = inf.readline()
while line:
if line.find(u'結果:大和')>0:
dahe.write(line)
elif line.find(u'結果:大鳳')>0:
dafeng.write(line)
elif line.find(u'結果:Bismarck')>0:
bsm.write(line)
elif line.find(u'結果:まるゆ')>0:
maruyu.write(line)
line = inf.readline()
inf.close()
dahe.close()
dafeng.close()
bsm.close()
maruyu.close()
| misaki-nyanya/MyPieces | kancolle/buildCalc/run.py | Python | gpl-3.0 | 840 | 0.018519 |
import base64
from django.conf import settings
from crum import get_current_request
from openedx.core.lib.request_utils import safe_get_host
from common.lib.mandrill_client.client import MandrillClient
def send_admin_activation_email(first_name, org_id, org_name, claimed_by_name, claimed_by_email, dest_addr, hash_key):
"""
Send an admin activation email.
"""
request = get_current_request()
max_retries = settings.RETRY_ACTIVATION_EMAIL_MAX_ATTEMPTS
encoded_org_id = base64.b64encode(str(org_id))
message_context = {
"first_name": first_name,
"key": hash_key.activation_hash,
"org_id": encoded_org_id,
"org_name": org_name,
"referring_user": hash_key.suggested_by.username,
"claimed_by_name": claimed_by_name,
"claimed_by_email": claimed_by_email,
}
admin_activation_link = '{protocol}://{site}/onboarding/admin_activate/{activation_key}?admin_activation=True'.format(
protocol='https' if request.is_secure() else 'http',
site=safe_get_host(request),
org_id=encoded_org_id,
activation_key=hash_key.activation_hash
)
message_context["admin_activation_link"] = admin_activation_link
while max_retries > 0:
try:
MandrillClient().send_mail(MandrillClient.ORG_ADMIN_ACTIVATION_TEMPLATE, dest_addr, message_context)
max_retries = 0
except:
max_retries -= 1
def send_admin_update_email(org_id, org_name, dest_addr, org_admin_name, hash_key, claimed_by_email, claimed_by_name):
"""
Send an email to the admin, that this user claims himself to be the admin
"""
request = get_current_request()
admin_activation_link = '{protocol}://{site}/onboarding/admin_activate/{claimed_by_key}'.format(
protocol='https' if request.is_secure() else 'http',
site=safe_get_host(request),
claimed_by_key=hash_key.activation_hash
)
message_context = {
"org_name": org_name,
"first_name": org_admin_name,
"claimed_by_name": claimed_by_name,
"claimed_by_email": claimed_by_email,
"admin_activation_link": admin_activation_link
}
MandrillClient().send_mail(MandrillClient.ORG_ADMIN_CHANGE_TEMPLATE, dest_addr, message_context)
def send_admin_update_confirmation_email(org_name, current_admin, new_admin, confirm):
"""
Send an email to the claimed admin, that he is either accepted as admin or rejected
Arguments:
org_name -- the name of the organization
current_admin -- the current admin of the organization
new_admin -- the new admin of the organization
confirm -- 1 if the current_admin has confirmed resignation else 0
"""
if confirm == 1:
MandrillClient().send_mail(MandrillClient.ORG_ADMIN_CLAIM_CONFIRMATION, current_admin.email, {
"first_name": current_admin.first_name,
"org_name": org_name,
"claimed_by_name": new_admin.email,
})
MandrillClient().send_mail(MandrillClient.NEW_ADMIN_CLAIM_CONFIRMATION, new_admin.email, {
"first_name": new_admin.first_name,
"org_name": org_name,
"confirm": confirm,
})
else:
MandrillClient().send_mail(MandrillClient.ORG_ADMIN_GET_IN_TOUCH, current_admin.email, {
"first_name": current_admin.first_name,
"org_name": org_name,
"claimed_by_name": "{first_name} {last_name}".format(
first_name=new_admin.first_name, last_name=new_admin.last_name
),
"claimed_by_email": new_admin.email,
})
MandrillClient().send_mail(MandrillClient.NEW_ADMIN_GET_IN_TOUCH, new_admin.email, {
"first_name": new_admin.first_name,
"org_name": org_name,
"current_admin": current_admin.email,
})
| philanthropy-u/edx-platform | lms/djangoapps/onboarding/email_utils.py | Python | agpl-3.0 | 3,876 | 0.003354 |
#Short module amiright
def main(message):
return [["purgeText", int(message.content.split(' ', 1)[1])]]
| ubidiscordbot/ubi | src/lib/modules/clear.py | Python | mit | 108 | 0.009259 |
r"""
A simple, fast, extensible JSON encoder and decoder
JSON (JavaScript Object Notation) <http://json.org> is a subset of
JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
interchange format.
simplejson exposes an API familiar to uses of the standard library
marshal and pickle modules.
Encoding basic Python object hierarchies::
>>> import simplejson
>>> simplejson.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
'["foo", {"bar": ["baz", null, 1.0, 2]}]'
>>> print simplejson.dumps("\"foo\bar")
"\"foo\bar"
>>> print simplejson.dumps(u'\u1234')
"\u1234"
>>> print simplejson.dumps('\\')
"\\"
>>> print simplejson.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)
{"a": 0, "b": 0, "c": 0}
>>> from StringIO import StringIO
>>> io = StringIO()
>>> simplejson.dump(['streaming API'], io)
>>> io.getvalue()
'["streaming API"]'
Compact encoding::
>>> import simplejson
>>> simplejson.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':'))
'[1,2,3,{"4":5,"6":7}]'
Pretty printing::
>>> import simplejson
>>> print simplejson.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4)
{
"4": 5,
"6": 7
}
Decoding JSON::
>>> import simplejson
>>> simplejson.loads('["foo", {"bar":["baz", null, 1.0, 2]}]')
[u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
>>> simplejson.loads('"\\"foo\\bar"')
u'"foo\x08ar'
>>> from StringIO import StringIO
>>> io = StringIO('["streaming API"]')
>>> simplejson.load(io)
[u'streaming API']
Specializing JSON object decoding::
>>> import simplejson
>>> def as_complex(dct):
... if '__complex__' in dct:
... return complex(dct['real'], dct['imag'])
... return dct
...
>>> simplejson.loads('{"__complex__": true, "real": 1, "imag": 2}',
... object_hook=as_complex)
(1+2j)
Extending JSONEncoder::
>>> import simplejson
>>> class ComplexEncoder(simplejson.JSONEncoder):
... def default(self, obj):
... if isinstance(obj, complex):
... return [obj.real, obj.imag]
... return simplejson.JSONEncoder.default(self, obj)
...
>>> dumps(2 + 1j, cls=ComplexEncoder)
'[2.0, 1.0]'
>>> ComplexEncoder().encode(2 + 1j)
'[2.0, 1.0]'
>>> list(ComplexEncoder().iterencode(2 + 1j))
['[', '2.0', ', ', '1.0', ']']
Note that the JSON produced by this module's default settings
is a subset of YAML, so it may be used as a serializer for that as well.
"""
__version__ = '1.7.4'
__all__ = [
'dump', 'dumps', 'load', 'loads',
'JSONDecoder', 'JSONEncoder',
]
from decoder import JSONDecoder
from encoder import JSONEncoder
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8'
)
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', **kw):
"""
Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is ``False``, then the some chunks written to ``fp``
may be ``unicode`` instances, subject to normal Python ``str`` to
``unicode`` coercion rules. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
to cause an error.
If ``check_circular`` is ``False``, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and object
members will be pretty-printed with that indent level. An indent level
of 0 will only insert newlines. ``None`` is the most compact representation.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (skipkeys is False and ensure_ascii is True and
check_circular is True and allow_nan is True and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and not kw):
iterable = _default_encoder.iterencode(obj)
else:
if cls is None:
cls = JSONEncoder
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding, **kw).iterencode(obj)
# could accelerate with writelines in some versions of Python, at
# a debuggability cost
for chunk in iterable:
fp.write(chunk)
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', **kw):
"""
Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is ``False``, then the return value will be a
``unicode`` instance subject to normal Python ``str`` to ``unicode``
coercion rules instead of being escaped to an ASCII ``str``.
If ``check_circular`` is ``False``, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and
object members will be pretty-printed with that indent level. An indent
level of 0 will only insert newlines. ``None`` is the most compact
representation.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (skipkeys is False and ensure_ascii is True and
check_circular is True and allow_nan is True and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and not kw):
return _default_encoder.encode(obj)
if cls is None:
cls = JSONEncoder
return cls(
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding,
**kw).encode(obj)
_default_decoder = JSONDecoder(encoding=None, object_hook=None)
def load(fp, encoding=None, cls=None, object_hook=None, **kw):
"""
Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
a JSON document) to a Python object.
If the contents of ``fp`` is encoded with an ASCII based encoding other
than utf-8 (e.g. latin-1), then an appropriate ``encoding`` name must
be specified. Encodings that are not ASCII based (such as UCS-2) are
not allowed, and should be wrapped with
``codecs.getreader(fp)(encoding)``, or simply decoded to a ``unicode``
object and passed to ``loads()``
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
return loads(fp.read(),
encoding=encoding, cls=cls, object_hook=object_hook, **kw)
def loads(s, encoding=None, cls=None, object_hook=None, **kw):
"""
Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
document) to a Python object.
If ``s`` is a ``str`` instance and is encoded with an ASCII based encoding
other than utf-8 (e.g. latin-1) then an appropriate ``encoding`` name
must be specified. Encodings that are not ASCII based (such as UCS-2)
are not allowed and should be decoded to ``unicode`` first.
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
if cls is None and encoding is None and object_hook is None and not kw:
return _default_decoder.decode(s)
if cls is None:
cls = JSONDecoder
if object_hook is not None:
kw['object_hook'] = object_hook
return cls(encoding=encoding, **kw).decode(s)
def read(s):
"""
json-py API compatibility hook. Use loads(s) instead.
"""
import warnings
warnings.warn("simplejson.loads(s) should be used instead of read(s)",
DeprecationWarning)
return loads(s)
def write(obj):
"""
json-py API compatibility hook. Use dumps(s) instead.
"""
import warnings
warnings.warn("simplejson.dumps(s) should be used instead of write(s)",
DeprecationWarning)
return dumps(obj)
| zepheira/exhibit | src/webapp/api/extensions/curate/files/admin/simplejson/__init__.py | Python | bsd-3-clause | 10,786 | 0.002781 |
# Copyright 2011 James McCauley
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
"""
Some of POX's core API and functionality is here, largely in the POXCore
class (an instance of which is available as pox.core.core).
This includes things like component rendezvous, logging, system status
(up and down events), etc.
"""
# Set up initial log state
import logging
import inspect
import time
import os
_path = inspect.stack()[0][1]
_ext_path = _path[0:_path.rindex(os.sep)]
_ext_path = os.path.dirname(_ext_path) + os.sep
_path = os.path.dirname(_path) + os.sep
SQUELCH_TIME = 5
_squelch = ''
_squelchTime = 0
_squelchCount = 0
def getLogger (name=None, moreFrames=0):
"""
In general, you don't need to call this directly, and will use
core.getLogger() instead.
"""
if name is None:
s = inspect.stack()[1+moreFrames]
name = s[1]
if name.endswith('.py'):
name = name[0:-3]
elif name.endswith('.pyc'):
name = name[0:-4]
if name.startswith(_path):
name = name[len(_path):]
elif name.startswith(_ext_path):
name = name[len(_ext_path):]
name = name.replace('/', '.').replace('\\', '.') #FIXME: use os.path or whatever
# Remove double names ("topology.topology" -> "topology")
if name.find('.') != -1:
n = name.split('.')
if len(n) >= 2:
if n[-1] == n[-2]:
del n[-1]
name = '.'.join(n)
if name.endswith(".__init__"):
name = name.rsplit(".__init__",1)[0]
l = logging.getLogger(name)
g=globals()
if not hasattr(l, "print"):
def printmsg (*args, **kw):
#squelch = kw.get('squelch', True)
msg = ' '.join((str(s) for s in args))
s = inspect.stack()[1]
o = '['
if 'self' in s[0].f_locals:
o += s[0].f_locals['self'].__class__.__name__ + '.'
o += s[3] + ':' + str(s[2]) + '] '
o += msg
if o == _squelch:
if time.time() >= _squelchTime:
l.debug("[Previous message repeated %i more times]" % (g['_squelchCount']+1,))
g['_squelchCount'] = 0
g['_squelchTime'] = time.time() + SQUELCH_TIME
else:
g['_squelchCount'] += 1
else:
g['_squelch'] = o
if g['_squelchCount'] > 0:
l.debug("[Previous message repeated %i more times]" % (g['_squelchCount'],))
g['_squelchCount'] = 0
g['_squelchTime'] = time.time() + SQUELCH_TIME
l.debug(o)
setattr(l, "print", printmsg)
setattr(l, "msg", printmsg)
return l
log = (lambda : getLogger())()
from pox.lib.revent import *
# Now use revent's exception hook to put exceptions in event handlers into
# the log...
def _revent_exception_hook (source, event, args, kw, exc_info):
try:
c = source
t = event
if hasattr(c, "__class__"): c = c.__class__.__name__
if isinstance(t, Event): t = t.__class__.__name__
elif issubclass(t, Event): t = t.__name__
except:
pass
log.exception("Exception while handling %s!%s...\n" % (c,t))
import pox.lib.revent.revent
pox.lib.revent.revent.handleEventException = _revent_exception_hook
class GoingUpEvent (Event):
""" Fired when system is going up. """
pass
class GoingDownEvent (Event):
""" Fired when system is going down. """
pass
class UpEvent (Event):
""" Fired when system is up. """
pass
class DownEvent (Event):
""" Fired when system is down. """
pass
class ComponentRegistered (Event):
"""
This is raised by core whenever a new component is registered.
By watching this, a component can monitor whether other components it
depends on are available.
"""
def __init__ (self, name, component):
Event.__init__(self)
self.name = name
self.component = component
import pox.lib.recoco as recoco
class POXCore (EventMixin):
"""
A nexus of of the POX API.
pox.core.core is a reference to an instance of this class. This class
serves a number of functions.
An important one is that it can serve as a rendezvous point for
components. A component can register objects on core, and they can
then be accessed on the core object (e.g., if you register foo, then
there will then be a pox.core.core.foo). In many cases, this means you
won't need to import a module.
Another purpose to the central registration is that it decouples
functionality from a specific module. If myL2Switch and yourL2Switch
both register as "switch" and both provide the same API, then it doesn't
matter. Doing this with imports is a pain.
Additionally, a number of commmon API functions are vailable here.
"""
_eventMixin_events = set([
UpEvent,
DownEvent,
GoingUpEvent,
GoingDownEvent,
ComponentRegistered
])
def __init__ (self):
self.debug = False
self.running = True
self.components = {}
self.version = (0,0,0)
print "{0} / Copyright 2011 James McCauley".format(self.version_string)
self.scheduler = recoco.Scheduler(daemon=True)
@property
def version_string (self):
return "POX " + '.'.join(map(str, self.version))
def callDelayed (_self, _seconds, _func, *args, **kw):
"""
Calls the function at a later time.
This is just a wrapper around a recoco timer.
"""
t = recoco.Timer(_seconds, _func, args=args, kw=kw,
scheduler = _self.scheduler)
return t
def callLater (_self, _func, *args, **kw):
# first arg is `_self` rather than `self` in case the user wants
# to specify self as a keyword argument
"""
Call the given function with the given arguments within the context
of the co-operative threading environment.
It actually calls it sooner rather than later. ;)
Much of POX is written without locks because it's all thread-safe
with respect to itself, as it's written using the recoco co-operative
threading library. If you have a real thread outside of the
co-operative thread context, you need to be careful about calling
things within it. This function provides a rather simple way that
works for most situations: you give it a callable (like a method)
and some arguments, and it will call that callable with those
arguments from within the co-operative threader, taking care of
synchronization for you.
"""
_self.scheduler.callLater(_func, *args, **kw)
def raiseLater (_self, _obj, *args, **kw):
# first arg is `_self` rather than `self` in case the user wants
# to specify self as a keyword argument
"""
This is similar to callLater(), but provides an easy way to raise a
revent event from outide the co-operative context.
Rather than foo.raiseEvent(BarEvent, baz, spam), you just do
core.raiseLater(foo, BarEvent, baz, spam).
"""
_self.scheduler.callLater(_obj.raiseEvent, *args, **kw)
def getLogger (self, *args, **kw):
"""
Returns a logger. Pass it the name you want if you'd like to specify
one (e.g., core.getLogger("foo")). If you don't specify a name, it
will make one up based on the module name it is called from.
"""
return getLogger(moreFrames=1,*args, **kw)
def quit (self):
"""
Shut down POX.
"""
if self.running:
self.running = False
log.info("Going down...")
import gc
gc.collect()
self.raiseEvent(GoingDownEvent())
self.callLater(self.scheduler.quit)
for i in range(50):
if self.scheduler._hasQuit: break
gc.collect()
time.sleep(.1)
if not self.scheduler._allDone:
log.warning("Scheduler didn't quit in time")
self.raiseEvent(DownEvent())
log.info("Down.")
def goUp (self):
log.debug(self.version_string + " going up...")
import platform
py = "{impl} ({vers}/{build})".format(
impl=platform.python_implementation(),
vers=platform.python_version(),
build=platform.python_build()[1].replace(" "," "))
log.debug("Running on " + py)
self.raiseEvent(GoingUpEvent())
log.info(self.version_string + " is up.")
self.raiseEvent(UpEvent())
def hasComponent (self, name):
"""
Returns True if a component with the given name has been registered.
"""
return name in self.components
def registerNew (self, __componentClass, *args, **kw):
"""
Give it a class (and optional __init__ arguments), and it will
create an instance and register it using the class name. If the
instance has a _core_name property, it will use that instead.
It returns the new instance.
core.registerNew(FooClass, arg) is roughly equivalent to
core.register("FooClass", FooClass(arg)).
"""
name = __componentClass.__name__
obj = __componentClass(*args, **kw)
if hasattr(obj, '_core_name'):
# Default overridden
name = obj._core_name
self.register(name, obj)
return obj
def register (self, name, component):
"""
Makes the object "component" available as pox.core.core.name.
"""
#TODO: weak references?
if name in self.components:
log.warn("Warning: Registered '%s' multipled times" % (name,))
self.components[name] = component
self.raiseEventNoErrors(ComponentRegistered, name, component)
def listenToDependencies(self, sink, components):
"""
If a component depends on having other components
registered with core before it can boot, it can use this method to
check for registration, and listen to events on those dependencies.
Note that event handlers named with the _handle* pattern in the sink must
include the name of the desired source as a prefix. For example, if topology is a
dependency, a handler for topology's SwitchJoin event must be labeled:
def _handle_topology_SwitchJoin(...)
sink - the component waiting on dependencies
components - a list of dependent component names
Returns whether all of the desired components are registered.
"""
if components == None or len(components) == 0:
return True
got = set()
for c in components:
if self.hasComponent(c):
setattr(sink, c, getattr(self, c))
sink.listenTo(getattr(self, c), prefix=c)
got.add(c)
else:
setattr(sink, c, None)
for c in got:
components.remove(c)
if len(components) == 0:
log.debug(sink.__class__.__name__ + " ready")
return True
return False
def __getattr__ (self, name):
if name not in self.components:
raise AttributeError("'%s' not registered" % (name,))
return self.components[name]
core = POXCore()
| lewischeng-ms/pox | pox/core.py | Python | gpl-3.0 | 11,128 | 0.012491 |
# -*- coding: utf-8 -*-
#
#
# $Date: 2005/11/04 14:06:36 $, by $Author: ivan $, $Revision: 1.1 $
#
"""
Graph pattern class used by the SPARQL implementation
"""
import sys, os, time, datetime
from rdflib.term import Literal, BNode, URIRef, Variable
from types import *
from rdflib.namespace import NamespaceManager
from rdflib.graph import Graph
from rdflib.sparql import _questChar, Debug, SPARQLError
def _createResource(v) :
"""Create an RDFLib Literal instance with the corresponding XML
Schema datatype set. If the variable is already an RDFLib
resource, it simply returns the resource; otherwise the
corresponding Literal. A SPARQLError Exception is raised if the
type is not implemented.
The Literal contains the string representation of the variable (as
Python does it by default) with the corresponding XML Schema URI
set.
@param v: Python variable
@return: either an RDFLib Literal (if 'v' is not an RDFLib Resource), or the same variable if it is already
an RDFLib resource (ie, Literal, BNode, or URIRef)
@raise SPARQLError: if the type of 'v' is not implemented
"""
if isinstance(v,Literal) or isinstance(v,BNode) or isinstance(v,URIRef) :
# just do nothing
return v
else :
return Literal(v) # Literal now does the datatype bits
def _isResQuest(r) :
"""
Is 'r' a request string (ie, of the form "?XXX")?
@rtype: Boolean
"""
if r and isinstance(r,basestring) and r[0] == _questChar :
return True
return False
class GraphPattern :
"""
Storage of one Graph Pattern, ie, the pattern tuples and the
possible (functional) constraints (filters)
"""
def __init__(self,patterns=[]) :
"""
@param patterns: an initial list of graph pattern tuples
"""
self.patterns = []
self.constraints = []
self.unbounds = []
self.bnodes = {}
if type(patterns) == list :
self.addPatterns(patterns)
elif type(patterns) == tuple :
self.addPattern(patterns)
else :
raise SPARQLError("illegal argument, pattern must be a tuple or a list of tuples")
def _generatePattern(self,tupl) :
"""
Append a tuple to the local patterns. Possible type literals
are converted to real literals on the fly. Each tuple should
be contain either 3 elements (for an RDF Triplet pattern) or
four, where the fourth element is a per-pattern constraint
(filter). (The general constraint of SPARQL can be optimized
by assigning a constraint to a specific pattern; because it
stops the graph expansion, its usage might be much more
optimal than the the 'global' constraint).
@param tupl: either a three or four element tuple
"""
if type(tupl) != tuple :
raise SPARQLError("illegal argument, pattern must be a tuple, got %s" % type(tupl))
if len(tupl) != 3 and len(tupl) != 4 :
raise SPARQLError("illegal argument, pattern must be a tuple of 3 or 4 element, got %s" % len(tupl))
if len(tupl) == 3 :
(s,p,o) = tupl
f = None
else :
(s,p,o,f) = tupl
final=[]
for c in (s,p,o) :
if _isResQuest(c) :
if not c in self.unbounds :
self.unbounds.append(c)
final.append(c)
elif isinstance(c, BNode):
#Do nothing - BNode name management is handled by SPARQL parser
# if not c in self.bnodes :
# self.bnodes[c] = BNode()
final.append(c)
else :
final.append(_createResource(c))
final.append(f)
return tuple(final)
def addPattern(self,tupl) :
"""
Append a tuple to the local patterns. Possible type literals
are converted to real literals on the fly. Each tuple should
be contain either 3 elements (for an RDF Triplet pattern) or
four, where the fourth element is a per-pattern constraint
(filter). (The general constraint of SPARQL can be optimized
by assigning a constraint to a specific pattern; because it
stops the graph expansion, its usage might be much more
optimal than the the 'global' constraint).
@param tupl: either a three or four element tuple
"""
self.patterns.append(self._generatePattern(tupl))
def insertPattern(self,tupl) :
"""
Insert a tuple to to the start of local patterns. Possible
type literals are converted to real literals on the fly. Each
tuple should be contain either 3 elements (for an RDF Triplet
pattern) or four, where the fourth element is a per-pattern
constraint (filter). (The general constraint of SPARQL can be
optimized by assigning a constraint to a specific pattern;
because it stops the graph expansion, its usage might be much
more optimal than the the 'global' constraint).
Semantically, the behaviour induced by a graphPattern does not
depend on the order of the patterns. However, due to the
behaviour of the expansion algorithm, users may control the
speed somewhat by adding patterns that would 'cut' the
expansion tree soon (ie, patterns that reduce the available
triplets significantly). API users may be able to do that,
hence this additional method.
@param tupl: either a three or four element tuple
"""
self.patterns.insert(0,self._generatePattern(tupl))
def addPatterns(self,lst) :
"""
Append a list of tuples to the local patterns. Possible type
literals are converted to real literals on the fly. Each
tuple should be contain either three elements (for an RDF
Triplet pattern) or four, where the fourth element is a
per-pattern constraint. (The general constraint of SPARQL can
be optimized by assigning a constraint to a specific pattern;
because it stops the graph expansion, its usage might be much
more optimal than the the 'global' constraint).
@param lst: list consisting of either a three or four element tuples
"""
for l in lst:
self.addPattern(l)
def insertPatterns(self,lst) :
"""
Insert a list of tuples to the start of the local
patterns. Possible type literals are converted to real
literals on the fly. Each tuple should be contain either
three elements (for an RDF Triplet pattern) or four, where the
fourth element is a per-pattern constraint. (The general
constraint of SPARQL can be optimized by assigning a
constraint to a specific pattern; because it stops the graph
expansion, its usage might be much more optimal than the the
'global' constraint).
Semantically, the behaviour induced by a graphPattern does not
depend on the order of the patterns. However, due to the
behaviour of the expansion algorithm, users may control the
speed somewhat by adding patterns that would 'cut' the
expansion tree soon (ie, patterns that reduce the available
triplets significantly). API users may be able to do that,
hence this additional method.
@param lst: list consisting of either a three or four element tuples
"""
for i in xrange(len(lst)-1,-1,-1) :
self.insertPattern(lst[i])
def addConstraint(self,func) :
"""
Add a global filter constraint to the graph pattern. 'func'
must be a method with a single input parameter (a dictionary)
returning a boolean. This method is I{added} to previously
added methods, ie, I{all} methods must return True to accept a
binding.
@param func: filter function
"""
if type(func) == FunctionType :
self.constraints.append(func)
else :
raise SPARQLError("illegal argument, constraint must be a function type, got %s" % type(func))
def addConstraints(self,lst) :
"""
Add a list of global filter constraints to the graph
pattern. Each function in the list must be a method with a
single input parameter (a dictionary) returning a
boolean. These methods are I{added} to previously added
methods, ie, I{all} methods must return True to accept a
binding.
@param lst: list of functions
"""
for l in lst:
self.addConstraint(l)
def construct(self,tripleStore,bindings) :
"""
Add triples to a tripleStore based on a variable bindings of
the patterns stored locally. The triples are patterned by the
current Graph Pattern. The method is used to construct a graph
after a successful querying.
@param tripleStore: an (rdflib) Triple Store
@param bindings: dictionary
"""
localBnodes = {}
for c in self.bnodes :
localBnodes[c] = BNode()
def bind(st) :
if _isResQuest(st) :
if st in bindings :
return bindings[st]
else :
if isinstance(self,GraphPattern2) :
return st
else :
return None
elif isinstance(st,BNode) :
for c in self.bnodes :
if self.bnodes[c] == st :
# this is a BNode that was created as part of building up the pattern
return localBnodes[c]
# if we got here, the BNode comes from somewhere else...
return st
else :
return st
for pattern in self.patterns :
(s,p,o,f) = pattern
triplet = []
valid = True
for res in (s,p,o) :
val = bind(res)
if val != None :
triplet.append(val)
else :
valid = False
break
if valid :
tripleStore.add(tuple(triplet))
def __add__(self,other) :
"""Adding means concatenating all the patterns and filters arrays"""
retval = GraphPattern()
retval += self
retval += other
return retval
def __iadd__(self,other) :
"""Adding means concatenating all the patterns and filters arrays"""
self.patterns += other.patterns
self.constraints += other.constraints
for c in other.unbounds :
if not c in self.unbounds :
self.unbounds.append(c)
for c in other.bnodes :
if not c in self.bnodes :
self.bnodes[c] = other.bnodes[c]
return self
def __str__(self) :
return self.__repr__()
def isEmpty(self) :
"""Is the pattern empty?
@rtype: Boolean
"""
return len(self.patterns) == 0
class BasicGraphPattern(GraphPattern) :
"""One, justified, problem with the current definition of L{GraphPattern<GraphPattern>} is that it
makes it difficult for users to use a literal of the type "?XXX", because any string beginning
with "?" will be considered to be an unbound variable. The only way of doing this is that the user
explicitly creates a Literal object and uses that as part of the pattern.
This class is a superclass of L{GraphPattern<GraphPattern>} which does I{not} do this, but requires the
usage of a separate variable class instance"""
def __init__(self,patterns=[],prolog=None) :
"""
@param patterns: an initial list of graph pattern tuples
"""
GraphPattern.__init__(self,patterns)
self.prolog = prolog
def canonicalTerm(self,term):
if isinstance(term,URIRef):
if self.prolog is not None:
namespace_manager = NamespaceManager(Graph())
for prefix,uri in self.prolog.prefixBindings.items():
namespace_manager.bind(prefix, uri, override=False)
try:
prefix,uri,localName=namespace_manager.compute_qname(term)
except:
return term
if prefix not in self.prolog.prefixBindings:
return term
else:
return u':'.join([prefix,localName])
else:
return term
elif isinstance(term,Literal):
return term.n3()
elif isinstance(term,BNode):
return term.n3()
else:
assert isinstance(term,Variable)
return term.n3()
def __repr__(self):
# from pprint import pformat
if self.constraints:
#return "Filter(.. a filter ..,BGP(%s))"%(','.join([pformat(p[:3]) for p in self.patterns]))
return "Filter(.. a filter ..,BGP(%s))"%(','.join([','.join([self.canonicalTerm(pat[0]),
self.canonicalTerm(pat[1]),
self.canonicalTerm(pat[2])])
for pat in self.patterns]))
else:
#return "BGP(%s)"%(','.join([repr(p[:3]) for p in self.patterns]))
return "BGP(%s)"%(','.join(['('+','.join([self.canonicalTerm(s),
self.canonicalTerm(p),
self.canonicalTerm(o)])+')'
for s,p,o,f in self.patterns]))
retval = " Patterns: %s\n" % self.patterns
retval += " Constraints: %s\n" % self.constraints
retval += " Unbounds: %s\n" % self.unbounds
return retval
def _generatePattern(self,tupl) :
"""
Append a tuple to the local patterns. Possible type literals
are converted to real literals on the fly. Each tuple should
be contain either 3 elements (for an RDF Triplet pattern) or
four, where the fourth element is a per-pattern constraint
(filter). (The general constraint of SPARQL can be optimized
by assigning a constraint to a specific pattern; because it
stops the graph expansion, its usage might be much more
optimal than the the 'global' constraint).
@param tupl: either a three or four element tuple
"""
if type(tupl) != tuple :
raise SPARQLError("illegal argument, pattern must be a tuple, got %s" % type(tupl))
if len(tupl) != 3 and len(tupl) != 4 :
raise SPARQLError("illegal argument, pattern must be a tuple of 3 or 4 element, got %s" % len(tupl))
if len(tupl) == 3 :
(s,p,o) = tupl
f = None
else :
(s,p,o,f) = tupl
final=[]
for c in (s,p,o) :
if isinstance(c,Variable) :
if not c in self.unbounds :
self.unbounds.append(c)
final.append(c)
elif isinstance(c, BNode):
#Do nothing - BNode name management is handled by SPARQL parser
final.append(c)
else :
final.append(_createResource(c))
final.append(f)
return tuple(final)
def fetchTerminalExpression(self):
yield self
if __name__ == '__main__' :
v1 = Variable("a")
g = BasicGraphPattern([("a","?b",24),("?r","?c",12345),(v1,"?c",3333)])
print g
| alcides/rdflib | rdflib/sparql/graphPattern.py | Python | bsd-3-clause | 15,809 | 0.013349 |
# -*- coding: utf-8 -*-
"""
This file is part of SimpleFSM.
SimpleFSM is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
SimpleFSM is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
Lesser GNU General Public License for more details.
You should have received a copy of the Lesser GNU General Public License
along with SimpleFSM. If not, see <http://www.gnu.org/licenses/>.
Copyright 2014 Lucas Liendo.
"""
from abc import ABCMeta, abstractmethod
from exceptions import *
class State(object):
"""
The State class models a defined state.
To create a new state an id must be supplied to identify it among other
states. Two other keyword arguments can be supplied to identify if the
state is a start state and/or a final state.
Note that at least a final state is needed between all states and just
only one start state must be established among all states.
"""
def __init__(self, id, start_state=False, final_state=False):
self._id = id
self._start_state = start_state
self._final_state = final_state
@property
def id(self):
"""Returns the id of the state."""
return self._id
@property
def start_state(self):
"""Returns True if the state is marked as a start state."""
return self._start_state
@start_state.setter
def start_state(self, start_state):
self._start_state = start_state
@property
def final_state(self):
"""Returns True if the state is marked as a final state."""
return self._final_state
@final_state.setter
def final_state(self, final_state):
self._final_state = final_state
def transit(self, fsm):
"""
This method is automatically called from SimpleFSM and performs
the transition from one state to another provided that a transition
match applies otherwise a FSMRejectedInput is raised.
"""
symbol = fsm.read_symbol()
try:
transition = [t for t in fsm.transitions if t.from_state.id == self.id and t.accepts(symbol)].pop()
except IndexError:
raise FSMRejectedInput([symbol])
fsm.current_state = transition.to_state
return symbol
def __eq__(self, other):
return self.id == other.id
class Transition(object):
"""
The Transition class models a transition between two given states.
To create a new transition three mandatory arguments must be supplied :
from_state : The state from which you want to transit.
to_state : The state you want to transit to.
transition_function : The function used to actually test if a symbol matches
the transition. This function must take only the symbol to be tested.
"""
def __init__(self, from_state, to_state, transition_function):
self._from_state = from_state
self._to_state = to_state
self._transition_function = transition_function
@property
def from_state(self):
"""Returns the state from which this transition should transit."""
return self._from_state
@property
def to_state(self):
"""Returns the state from which this transition should transit to."""
return self._to_state
@property
def transition_function(self):
"""Returns the transition function used by a Transition object."""
return self._transition_function
def accepts(self, symbol):
"""
Returns True if the read symbol is accepted by the transition function.
"""
return self._transition_function(symbol)
def __eq__(self, other):
return self.from_state == other.from_state \
and self.to_state == other.to_state \
and self.transition_function == other.transition_function
class SimpleFSM(object):
"""
The SimpleFSM class models a finite state machine. To use this class
you must create a custom class that inherits from SimpleFSM and implement
the read_symbol() method. This method is responsible for returning a symbol
each time is called. This symbol is then tested to check if it's actually
accepted by the FSM.
Typically you would instantiate a set of States and Transitions. After
this is done you instantiate your custom-implemented FSM and add all the
states and transitions.
After your custom-implemented FSM is built you should call the run()
method. If the word is recognized a list with all the accepted symbols
is returned otherwise a FSMRejectedInput is raised.
"""
__metaclass__ = ABCMeta
def __init__(self):
self._states = []
self._transitions = []
self._accepted_symbols = []
self._final_states = None
self._current_state = None
self._remaining_input = True
@property
def transitions(self):
"""Returns a list containing all the defined transitions for this FSM."""
return self._transitions
@property
def current_state(self):
return self._current_state
@current_state.setter
def current_state(self, state):
self._current_state = state
def add_state(self, state):
"""
Adds a new state to the FSM. If the supplied state already exists
a FSMDuplicatedState exception is raised.
"""
if state in self._states:
raise FSMDuplicatedState(state)
self._states.append(state)
def add_states(self, states):
"""
Adds a set of states to the FSM. If one of the states is already
present a FSMDuplicatedState exception is raised.
"""
[self.add_state(s) for s in states]
def add_transition(self, transition):
"""
Adds a new transition to this FSM. If the supplied transition already
exists a FSMDuplicatedTransition exception is raised.
"""
if transition in self._transitions:
raise FSMDuplicatedTransition(transition)
self._transitions.append(transition)
def add_transitions(self, transitions):
"""
Adds a set of transitions to the FSM. If one of the transitions is
already present a FSMDuplicatedTransition exception is raised.
"""
[self.add_transition(t) for t in transitions]
def pre_transit(self):
"""
This method is called just before a transition is performed.
You may optionally implement this method.
"""
pass
@abstractmethod
def read_symbol(self):
"""
Abstract method that must be implemented by the user. When there
is no more input a FSMEndOfInput exception should be raised
to notify the FSM that no more input is available.
"""
raise FSMNotImplementedInput()
def post_transit(self):
"""
This method is called after a sucessfull transition between two
states is performed. You may optionally implement this method.
"""
pass
def _set_initial_state(self):
start_state = [s for s in self._states if s.start_state]
if len(start_state) > 1:
raise FSMStartStatesError()
try:
self._current_state = start_state.pop()
except IndexError:
raise FSMNoStartStateError()
def _set_final_states(self):
self._final_states = [s for s in self._states if s.final_state]
if not self._final_states:
raise FSMFinalStateError()
def _set_states(self):
self._accepted_symbols = []
self._remaining_input = True
self._set_initial_state()
self._set_final_states()
def run(self):
"""
Starts the FSM. Returns a list containing the accepted symbols
otherwise a FSMRejectedInput exception is raised.
"""
self._set_states()
while self._remaining_input:
try:
self.pre_transit()
self._accepted_symbols.append(self._current_state.transit(self))
self.post_transit()
except FSMEndOfInput:
self._remaining_input = False
if self.current_state not in self._final_states:
raise FSMRejectedInput(self._accepted_symbols, type='string')
return self._accepted_symbols
| lliendo/SimpleFSM | simplefsm/__init__.py | Python | lgpl-3.0 | 8,624 | 0.000464 |
# Copyright 2013 - Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from unittest import mock
from oslo_config import cfg
from solum.api.controllers.v1 import trigger
from solum.tests import base
from solum.tests import fakes
@mock.patch('pecan.request', new_callable=fakes.FakePecanRequest)
@mock.patch('pecan.response', new_callable=fakes.FakePecanResponse)
@mock.patch('solum.api.controllers.v1.trigger.app_handler'
'.AppHandler')
class TestTriggerController(base.BaseTestCase):
def test_trigger_get_workflow_with_empty_body(self, assem_mock,
resp_mock, request_mock):
obj = trigger.TriggerController()
workflow = obj._get_workflow({})
self.assertIsNone(workflow)
def test_trigger_get_workflow_with_deploy(self, assem_mock,
resp_mock, request_mock):
obj = trigger.TriggerController()
query = {'workflow': 'deploy'}
workflow = obj._get_workflow(query)
self.assertEqual(['deploy'], list(workflow))
def test_trigger_get_workflow_with_build_deploy(self, assem_mock,
resp_mock, request_mock):
obj = trigger.TriggerController()
query = {'workflow': 'build+deploy'}
workflow = obj._get_workflow(query)
self.assertEqual(['build', 'deploy'], list(workflow))
def test_trigger_get_workflow_with_all(self, assem_mock,
resp_mock, request_mock):
obj = trigger.TriggerController()
query = {'workflow': 'unittest+build+deploy'}
workflow = obj._get_workflow(query)
self.assertEqual(['unittest', 'build', 'deploy'], list(workflow))
def test_trigger_get_workflow_with_invalid_stage(self, assem_mock,
resp_mock, request_mock):
obj = trigger.TriggerController()
query = {'workflow': 'unittest+unitunitunittest'}
workflow = obj._get_workflow(query)
self.assertEqual(['unittest'], list(workflow))
def test_trigger_process_request_private_repo(self, assem_mock,
resp_mock, request_mock):
cfg.CONF.api.rebuild_phrase = "solum retry tests"
status_url = 'https://api.github.com/repos/u/r/statuses/{sha}'
collab_url = ('https://api.github.com/repos/u/r/' +
'collaborators{/collaborator}')
body_dict = {'sender': {'url': 'https://api.github.com'},
'comment': {'commit_id': 'asdf',
'body': ' SOLUM retry tests ',
'user': {'login': 'u'}},
'repository': {'statuses_url': status_url,
'collaborators_url': collab_url,
'private': True}}
obj = trigger.TriggerController()
commit_sha, collab_url = obj._process_request(body_dict)
self.assertIsNone(collab_url)
self.assertEqual('asdf', commit_sha)
def test_trigger_process_request_on_valid_pub_repo(self,
assem_mock, resp_mock,
request_mock):
cfg.CONF.api.rebuild_phrase = "solum retry tests"
status_url = 'https://api.github.com/repos/u/r/statuses/{sha}'
collab_url = ('https://api.github.com/repos/u/r/' +
'collaborators{/collaborator}')
body_dict = {'sender': {'url': 'https://api.github.com'},
'comment': {'commit_id': 'asdf',
'body': 'solum retry tests',
'user': {'login': 'u'}},
'repository': {'statuses_url': status_url,
'collaborators_url': collab_url,
'private': False}}
obj = trigger.TriggerController()
commit_sha, collab_url = obj._process_request(body_dict)
self.assertEqual('https://api.github.com/repos/u/r/collaborators/u',
collab_url)
self.assertEqual('asdf', commit_sha)
@mock.patch('solum.common.policy.check')
def test_trigger_post_with_empty_body(self, mock_policy, assem_mock,
resp_mock, request_mock):
mock_policy.return_value = True
obj = trigger.TriggerController()
obj.post('test_id')
self.assertEqual(400, resp_mock.status)
tw = assem_mock.return_value.trigger_workflow
assert not tw.called
@mock.patch('solum.common.policy.check')
def test_trigger_post_on_github_webhook(self, mock_policy, assem_mock,
resp_mock, request_mock):
mock_policy.return_value = True
status_url = 'https://api.github.com/repos/u/r/statuses/{sha}'
body_dict = {'sender': {'url': 'https://api.github.com'},
'action': 'opened',
'pull_request': {'head': {'sha': 'asdf'}},
'repository': {'statuses_url': status_url}}
expected_st_url = 'https://api.github.com/repos/u/r/statuses/asdf'
request_mock.body = json.dumps(body_dict)
obj = trigger.TriggerController()
obj.post('test_id')
self.assertEqual(202, resp_mock.status)
tw = assem_mock.return_value.trigger_workflow
tw.assert_called_once_with('test_id', 'asdf', expected_st_url, None,
workflow=None)
@mock.patch('solum.common.policy.check')
def test_trigger_post_on_github_comment_webhook(self, mock_policy,
assem_mock, resp_mock,
request_mock):
mock_policy.return_value = True
cfg.CONF.api.rebuild_phrase = "solum retry tests"
status_url = 'https://api.github.com/repos/u/r/statuses/{sha}'
collab_url = ('https://api.github.com/repos/u/r/' +
'collaborators{/collaborator}')
body_dict = {'sender': {'url': 'https://api.github.com'},
'action': 'created',
'comment': {'commit_id': 'asdf',
'body': ' SOLUM retry tests ',
'user': {'login': 'u'}},
'repository': {'statuses_url': status_url,
'collaborators_url': collab_url,
'private': True}}
expected_st_url = 'https://api.github.com/repos/u/r/statuses/asdf'
request_mock.body = json.dumps(body_dict)
obj = trigger.TriggerController()
obj.post('test_id')
self.assertEqual(202, resp_mock.status)
tw = assem_mock.return_value.trigger_workflow
tw.assert_called_once_with('test_id', 'asdf', expected_st_url, None,
workflow=None)
@mock.patch('httplib2.Http.request')
@mock.patch('solum.common.policy.check')
def test_trigger_post_on_mismatch_comment_pub_repo(self, http_mock,
mock_policy,
assem_mock, resp_mock,
request_mock):
mock_policy.return_value = True
cfg.CONF.api.rebuild_phrase = "solum retry tests"
status_url = 'https://api.github.com/repos/u/r/statuses/{sha}'
collab_url = ('https://api.github.com/repos/u/r/' +
'collaborators{/collaborator}')
body_dict = {'sender': {'url': 'https://api.github.com'},
'action': 'created',
'comment': {'commit_id': 'asdf',
'body': 'solum is awesome',
'user': {'login': 'u'}},
'repository': {'statuses_url': status_url,
'collaborators_url': collab_url,
'private': False}}
request_mock.body = json.dumps(body_dict)
http_mock.return_value = ({'status': '204'}, '') # a collaborator
obj = trigger.TriggerController()
obj.post('test_id')
self.assertEqual(403, resp_mock.status)
tw = assem_mock.return_value.trigger_workflow
assert not tw.called
@mock.patch('httplib2.Http.request')
@mock.patch('solum.common.policy.check')
def test_trigger_post_on_valid_comment_pub_repo(self, http_mock,
mock_policy,
assem_mock, resp_mock,
request_mock):
mock_policy.return_value = True
cfg.CONF.api.rebuild_phrase = "solum retry tests"
status_url = 'https://api.github.com/repos/u/r/statuses/{sha}'
collab_url = ('https://api.github.com/repos/u/r/' +
'collaborators{/collaborator}')
body_dict = {'sender': {'url': 'https://api.github.com'},
'action': 'created',
'comment': {'commit_id': 'asdf',
'body': 'solum retry tests',
'user': {'login': 'u'}},
'repository': {'statuses_url': status_url,
'collaborators_url': collab_url,
'private': False}}
expected_st_url = 'https://api.github.com/repos/u/r/statuses/asdf'
expected_clb_url = 'https://api.github.com/repos/u/r/collaborators/u'
request_mock.body = json.dumps(body_dict)
http_mock.return_value = ({'status': '204'}, '') # Valid collaborator
obj = trigger.TriggerController()
obj.post('test_id')
self.assertEqual(202, resp_mock.status)
tw = assem_mock.return_value.trigger_workflow
tw.assert_called_once_with('test_id', 'asdf', expected_st_url,
expected_clb_url, workflow=None)
@mock.patch('solum.common.policy.check')
def test_trigger_post_on_comment_missing_login(self, mock_policy,
assem_mock, resp_mock,
request_mock):
mock_policy.return_value = True
cfg.CONF.api.rebuild_phrase = "solum retry tests"
status_url = 'https://api.github.com/repos/u/r/statuses/{sha}'
collab_url = ('https://api.github.com/repos/u/r/' +
'collaborators{/collaborator}')
body_dict = {'sender': {'url': 'https://api.github.com'},
'comment': {'commit_id': 'asdf',
'body': 'solum retry tests',
'user': 'MISSING_LOGIN'},
'repository': {'statuses_url': status_url,
'collaborators_url': collab_url,
'private': False}}
request_mock.body = json.dumps(body_dict)
obj = trigger.TriggerController()
obj.post('test_id')
self.assertEqual(400, resp_mock.status)
tw = assem_mock.return_value.trigger_workflow
assert not tw.called
@mock.patch('solum.common.policy.check')
def test_trigger_post_on_wrong_github_webhook(self, mock_policy,
assem_mock,
resp_mock, request_mock):
mock_policy.return_value = True
status_url = 'https://api.github.com/repos/u/r/statuses/{sha}'
body_dict = {'sender': {'url': 'https://api.github.com'},
'pull_request': {'head': {'sha': 'asdf'}},
'repository': {'HACKED_statuses_url': status_url}}
request_mock.body = json.dumps(body_dict)
obj = trigger.TriggerController()
obj.post('test_id')
self.assertEqual(400, resp_mock.status)
tw = assem_mock.return_value.trigger_workflow
assert not tw.called
@mock.patch('solum.common.policy.check')
def test_trigger_post_on_unknown_git_webhook(self, mock_policy, assem_mock,
resp_mock, request_mock):
mock_policy.return_value = True
body_dict = {"pull_request": {"head": {"sha": "asdf"}}}
request_mock.body = json.dumps(body_dict)
obj = trigger.TriggerController()
obj.post('test_id')
self.assertEqual(501, resp_mock.status)
tw = assem_mock.return_value.trigger_workflow
assert not tw.called
@mock.patch('solum.common.policy.check')
def test_trigger_post_on_non_github_webhook(self, mock_policy, assem_mock,
resp_mock, request_mock):
mock_policy.return_value = True
body_dict = {"sender": {"url": "https://non-github.com"},
"pull_request": {"head": {"sha": "asdf"}}}
request_mock.body = json.dumps(body_dict)
obj = trigger.TriggerController()
obj.post('test_id')
self.assertEqual(501, resp_mock.status)
tw = assem_mock.return_value.trigger_workflow
assert not tw.called
@mock.patch('solum.common.policy.check')
def test_trigger_post_on_github_ping_webhook(self, mock_policy, assem_mock,
resp_mock, request_mock):
mock_policy.return_value = True
body_dict = {"sender": {"url": "https://api.github.com"},
"zen": "Keep it logically awesome."}
request_mock.body = json.dumps(body_dict)
obj = trigger.TriggerController()
obj.post('test_id')
self.assertEqual(501, resp_mock.status)
tw = assem_mock.return_value.trigger_workflow
assert not tw.called
| stackforge/solum | solum/tests/api/controllers/v1/test_trigger.py | Python | apache-2.0 | 14,612 | 0 |
"""
Tests of CourseKeys and CourseLocators
"""
import ddt
from bson.objectid import ObjectId
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locator import BlockUsageLocator, CourseLocator
from opaque_keys.edx.tests import LocatorBaseTest, TestDeprecated
@ddt.ddt
class TestCourseKeys(LocatorBaseTest, TestDeprecated):
"""
Tests of :class:`.CourseKey` and :class:`.CourseLocator`
"""
@ddt.data(
"foo/bar/baz",
)
def test_deprecated_roundtrip(self, course_id):
self.assertEquals(
course_id,
unicode(CourseKey.from_string(course_id))
)
@ddt.data(
"foo!/bar/baz",
)
def test_invalid_chars_in_ssck_string(self, course_id):
with self.assertRaises(InvalidKeyError):
CourseKey.from_string(course_id)
@ddt.data(
"org/course/run/foo",
"org/course",
"org+course+run+foo",
"org+course",
)
def test_invalid_format_location(self, course_id):
with self.assertRaises(InvalidKeyError):
CourseLocator.from_string(course_id)
def test_make_usage_key(self):
depr_course = CourseKey.from_string('org/course/run')
self.assertEquals(
unicode(BlockUsageLocator(depr_course, 'category', 'name', deprecated=True)),
unicode(depr_course.make_usage_key('category', 'name'))
)
course = CourseKey.from_string('course-v1:org+course+run')
self.assertEquals(
unicode(BlockUsageLocator(course, 'block_type', 'block_id')),
unicode(course.make_usage_key('block_type', 'block_id'))
)
def test_convert_deprecation(self):
depr_course = CourseKey.from_string('org/course/run')
course = CourseKey.from_string('course-v1:org+course+run')
self.assertEquals(unicode(depr_course.replace(deprecated=False)), unicode(course))
self.assertEquals(unicode(course.replace(deprecated=True)), unicode(depr_course))
def test_course_constructor_underspecified(self):
with self.assertRaises(InvalidKeyError):
CourseLocator()
with self.assertRaises(InvalidKeyError):
CourseLocator(branch='published')
def test_course_constructor_bad_version_guid(self):
with self.assertRaises(ValueError):
CourseLocator(version_guid="012345")
with self.assertRaises(InvalidKeyError):
CourseLocator(version_guid=None)
def test_course_constructor_version_guid(self):
# pylint: disable=no-member,protected-access
# generate a random location
test_id_1 = ObjectId()
test_id_1_loc = str(test_id_1)
testobj_1 = CourseLocator(version_guid=test_id_1)
self.check_course_locn_fields(testobj_1, version_guid=test_id_1)
self.assertEqual(str(testobj_1.version_guid), test_id_1_loc)
testobj_1_string = u'@'.join((testobj_1.VERSION_PREFIX, test_id_1_loc))
self.assertEqual(testobj_1._to_string(), testobj_1_string)
self.assertEqual(str(testobj_1), u'course-v1:' + testobj_1_string)
self.assertEqual(testobj_1.html_id(), u'course-v1:' + testobj_1_string)
self.assertEqual(testobj_1.version, test_id_1)
# Test using a given string
test_id_2_loc = '519665f6223ebd6980884f2b'
test_id_2 = ObjectId(test_id_2_loc)
testobj_2 = CourseLocator(version_guid=test_id_2)
self.check_course_locn_fields(testobj_2, version_guid=test_id_2)
self.assertEqual(str(testobj_2.version_guid), test_id_2_loc)
testobj_2_string = u'@'.join((testobj_2.VERSION_PREFIX, test_id_2_loc))
self.assertEqual(testobj_2._to_string(), testobj_2_string)
self.assertEqual(str(testobj_2), u'course-v1:' + testobj_2_string)
self.assertEqual(testobj_2.html_id(), u'course-v1:' + testobj_2_string)
self.assertEqual(testobj_2.version, test_id_2)
@ddt.data(
' mit.eecs',
'mit.eecs ',
CourseLocator.VERSION_PREFIX + '@mit.eecs',
BlockUsageLocator.BLOCK_PREFIX + '@black+mit.eecs',
'mit.ee cs',
'mit.ee,cs',
'mit.ee+cs',
'mit.ee&cs',
'mit.ee()cs',
CourseLocator.BRANCH_PREFIX + '@this',
'mit.eecs+' + CourseLocator.BRANCH_PREFIX,
'mit.eecs+' + CourseLocator.BRANCH_PREFIX + '@this+' + CourseLocator.BRANCH_PREFIX + '@that',
'mit.eecs+' + CourseLocator.BRANCH_PREFIX + '@this+' + CourseLocator.BRANCH_PREFIX,
'mit.eecs+' + CourseLocator.BRANCH_PREFIX + '@this ',
'mit.eecs+' + CourseLocator.BRANCH_PREFIX + '@th%is ',
u'\ufffd',
)
def test_course_constructor_bad_package_id(self, bad_id):
"""
Test all sorts of badly-formed package_ids (and urls with those package_ids)
"""
with self.assertRaises(InvalidKeyError):
CourseLocator(org=bad_id, course='test', run='2014_T2')
with self.assertRaises(InvalidKeyError):
CourseLocator(org='test', course=bad_id, run='2014_T2')
with self.assertRaises(InvalidKeyError):
CourseLocator(org='test', course='test', run=bad_id)
with self.assertRaises(InvalidKeyError):
CourseKey.from_string(u'course-v1:test+{}+2014_T2'.format(bad_id))
@ddt.data(
'course-v1:',
'course-v1:/mit.eecs',
'http:mit.eecs',
'course-v1:mit+course+run{}@branch'.format(CourseLocator.BRANCH_PREFIX),
'course-v1:mit+course+run+',
)
def test_course_constructor_bad_url(self, bad_url):
with self.assertRaises(InvalidKeyError):
CourseKey.from_string(bad_url)
def test_course_constructor_url(self):
# Test parsing a url when it starts with a version ID and there is also a block ID.
# This hits the parsers parse_guid method.
test_id_loc = '519665f6223ebd6980884f2b'
testobj = CourseKey.from_string("course-v1:{}@{}+{}@hw3".format(
CourseLocator.VERSION_PREFIX, test_id_loc, CourseLocator.BLOCK_PREFIX
))
self.check_course_locn_fields(
testobj,
version_guid=ObjectId(test_id_loc)
)
def test_course_constructor_url_package_id_and_version_guid(self):
test_id_loc = '519665f6223ebd6980884f2b'
testobj = CourseKey.from_string(
'course-v1:mit.eecs+honors.6002x+2014_T2+{}@{}'.format(CourseLocator.VERSION_PREFIX, test_id_loc)
)
self.check_course_locn_fields(
testobj,
org='mit.eecs',
course='honors.6002x',
run='2014_T2',
version_guid=ObjectId(test_id_loc)
)
def test_course_constructor_url_package_id_branch_and_version_guid(self):
test_id_loc = '519665f6223ebd6980884f2b'
org = 'mit.eecs'
course = '~6002x'
run = '2014_T2'
testobj = CourseKey.from_string('course-v1:{}+{}+{}+{}@draft-1+{}@{}'.format(
org, course, run, CourseLocator.BRANCH_PREFIX, CourseLocator.VERSION_PREFIX, test_id_loc
))
self.check_course_locn_fields(
testobj,
org=org,
course=course,
run=run,
branch='draft-1',
version_guid=ObjectId(test_id_loc)
)
def test_course_constructor_package_id_no_branch(self):
org = 'mit.eecs'
course = '6002x'
run = '2014_T2'
testurn = '{}+{}+{}'.format(org, course, run)
testobj = CourseLocator(org=org, course=course, run=run)
self.check_course_locn_fields(testobj, org=org, course=course, run=run)
# Allow access to _to_string
# pylint: disable=protected-access
self.assertEqual(testobj._to_string(), testurn)
def test_course_constructor_package_id_separate_branch(self):
org = 'mit.eecs'
course = '6002x'
run = '2014_T2'
test_branch = 'published'
expected_urn = '{}+{}+{}+{}@{}'.format(org, course, run, CourseLocator.BRANCH_PREFIX, test_branch)
testobj = CourseLocator(org=org, course=course, run=run, branch=test_branch)
self.check_course_locn_fields(
testobj,
org=org,
course=course,
run=run,
branch=test_branch,
)
# pylint: disable=no-member,protected-access
self.assertEqual(testobj.branch, test_branch)
self.assertEqual(testobj._to_string(), expected_urn)
def test_course_constructor_deprecated_offering(self):
org = 'mit.eecs'
course = '6002x'
run = '2014_T2'
offering = '{}/{}'.format(course, run)
test_branch = 'published'
with self.assertDeprecationWarning(count=2):
testobj = CourseLocator(org=org, offering=offering, branch=test_branch)
with self.assertRaises(InvalidKeyError):
CourseLocator(org=org, offering='', branch=test_branch)
with self.assertRaises(InvalidKeyError):
CourseLocator(org=org, offering=course, branch=test_branch)
self.check_course_locn_fields(
testobj,
org=org,
course=course,
run=run,
branch=test_branch,
)
@ddt.data(
"i4x://org/course/category/name",
"i4x://org/course/category/name@revision"
)
def test_make_usage_key_from_deprecated_string_roundtrip(self, url):
course_key = CourseLocator('org', 'course', 'run')
with self.assertDeprecationWarning(count=2):
self.assertEquals(
url,
course_key.make_usage_key_from_deprecated_string(url).to_deprecated_string()
)
def test_empty_run(self):
with self.assertRaises(InvalidKeyError):
CourseLocator('org', 'course', '')
self.assertEquals(
'org/course/',
unicode(CourseLocator('org', 'course', '', deprecated=True))
)
| devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/opaque_keys/edx/tests/test_course_locators.py | Python | agpl-3.0 | 10,010 | 0.001598 |
# -*- coding: utf-8 -*-
# Copyright 2007-2022 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
from operator import attrgetter
import warnings
import inspect
import copy
import types
from io import StringIO
import codecs
from collections.abc import Iterable, Mapping
import unicodedata
from contextlib import contextmanager
import importlib
import logging
import numpy as np
from hyperspy.misc.signal_tools import broadcast_signals
from hyperspy.exceptions import VisibleDeprecationWarning
from hyperspy.docstrings.signal import SHOW_PROGRESSBAR_ARG
from hyperspy.docstrings.utils import STACK_METADATA_ARG
_logger = logging.getLogger(__name__)
def attrsetter(target, attrs, value):
"""Sets attribute of the target to specified value, supports nested
attributes. Only creates a new attribute if the object supports such
behaviour (e.g. DictionaryTreeBrowser does)
Parameters
----------
target : object
attrs : string
attributes, separated by periods (e.g.
'metadata.Signal.Noise_parameters.variance' )
value : object
Example
-------
First create a signal and model pair:
>>> s = hs.signals.Signal1D(np.arange(10))
>>> m = s.create_model()
>>> m.signal.data
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
Now set the data of the model with attrsetter
>>> attrsetter(m, 'signal1D.data', np.arange(10)+2)
>>> self.signal.data
array([2, 3, 4, 5, 6, 7, 8, 9, 10, 10])
The behaviour is identical to
>>> self.signal.data = np.arange(10) + 2
"""
where = attrs.rfind(".")
if where != -1:
target = attrgetter(attrs[:where])(target)
setattr(target, attrs[where + 1 :], value)
@contextmanager
def stash_active_state(model):
active_state = []
for component in model:
if component.active_is_multidimensional:
active_state.append(component._active_array)
else:
active_state.append(component.active)
yield
for component in model:
active_s = active_state.pop(0)
if isinstance(active_s, bool):
component.active = active_s
else:
if not component.active_is_multidimensional:
component.active_is_multidimensional = True
component._active_array[:] = active_s
@contextmanager
def dummy_context_manager(*args, **kwargs):
yield
def str2num(string, **kargs):
"""Transform a a table in string form into a numpy array
Parameters
----------
string : string
Returns
-------
numpy array
"""
stringIO = StringIO(string)
return np.loadtxt(stringIO, **kargs)
def parse_quantity(quantity, opening="(", closing=")"):
"""Parse quantity of the signal outputting quantity and units separately.
It looks for the last matching opening and closing separator.
Parameters
----------
quantity : string
opening : string
Separator used to define the beginning of the units
closing : string
Separator used to define the end of the units
Returns
-------
quantity_name : string
quantity_units : string
"""
# open_bracket keep track of the currently open brackets
open_bracket = 0
for index, c in enumerate(quantity.strip()[::-1]):
if c == closing:
# we find an closing, increment open_bracket
open_bracket += 1
if c == opening:
# we find a opening, decrement open_bracket
open_bracket -= 1
if open_bracket == 0:
# we found the matching bracket and we will use the index
break
if index + 1 == len(quantity):
return quantity, ""
else:
quantity_name = quantity[: -index - 1].strip()
quantity_units = quantity[-index:-1].strip()
return quantity_name, quantity_units
_slugify_strip_re_data = "".join(
c for c in map(chr, np.delete(np.arange(256), [95, 32])) if not c.isalnum()
).encode()
def slugify(value, valid_variable_name=False):
"""
Normalizes string, converts to lowercase, removes non-alpha characters,
and converts spaces to hyphens.
Adapted from Django's "django/template/defaultfilters.py".
"""
if not isinstance(value, str):
try:
# Convert to unicode using the default encoding
value = str(value)
except BaseException:
# Try latin1. If this does not work an exception is raised.
value = str(value, "latin1")
value = unicodedata.normalize("NFKD", value).encode("ascii", "ignore")
value = value.translate(None, _slugify_strip_re_data).decode().strip()
value = value.replace(" ", "_")
if valid_variable_name and not value.isidentifier():
value = "Number_" + value
return value
class DictionaryTreeBrowser:
"""A class to comfortably browse a dictionary using a CLI.
In addition to accessing the values using dictionary syntax
the class enables navigating a dictionary that constains
nested dictionaries as attribures of nested classes.
Also it is an iterator over the (key, value) items. The
`__repr__` method provides pretty tree printing. Private
keys, i.e. keys that starts with an underscore, are not
printed, counted when calling len nor iterated.
Methods
-------
export : saves the dictionary in pretty tree printing format in a text
file.
keys : returns a list of non-private keys.
as_dictionary : returns a dictionary representation of the object.
set_item : easily set items, creating any necessary nodes on the way.
has_item: given a path, or part of a path, checks if the item exists.
get_item given a path, or part of a path, return the value of the item.
add_node : add all non existing nodes in a given path.
add_dictionary: add new items from dictionary.
Examples
--------
>>> tree = DictionaryTreeBrowser()
>>> tree.set_item("Branch.Leaf1.color", "green")
>>> tree.set_item("Branch.Leaf2.color", "brown")
>>> tree.set_item("Branch.Leaf2.caterpillar", True)
>>> tree.set_item("Branch.Leaf1.caterpillar", False)
>>> tree
└── Branch
├── Leaf1
│ ├── caterpillar = False
│ └── color = green
└── Leaf2
├── caterpillar = True
└── color = brown
>>> tree.Branch
├── Leaf1
│ ├── caterpillar = False
│ └── color = green
└── Leaf2
├── caterpillar = True
└── color = brown
>>> for label, leaf in tree.Branch:
... print("%s is %s" % (label, leaf.color))
Leaf1 is green
Leaf2 is brown
>>> tree.Branch.Leaf2.caterpillar
True
>>> "Leaf1" in tree.Branch
True
>>> "Leaf3" in tree.Branch
False
>>>
"""
def __init__(self, dictionary=None, double_lines=False, lazy=True):
"""When creating a DictionaryTreeBrowser lazily, the dictionary is
added to the `_lazy_attributes` attribute. The first time a lazy
attribute is called or the DictionaryTreeBrowser is printed, the
DictionaryTreeBrowser processes the lazy attributes with the
`process_lazy_attributes` method.
DictionaryTreeBrowser is lazy by default, using non-lazy instances
can be useful for debugging purposes.
"""
self._lazy_attributes = {}
self._double_lines = double_lines
if dictionary is None:
dictionary = {}
if lazy:
self._lazy_attributes.update(dictionary)
else:
self._process_dictionary(dictionary, double_lines)
def _process_dictionary(self, dictionary, double_lines):
"""Process the provided dictionary to set the attributes"""
for key, value in dictionary.items():
if key == "_double_lines":
value = double_lines
self.__setattr__(key, value)
def process_lazy_attributes(self):
"""Run the DictionaryTreeBrowser machinery for the lazy attributes."""
if len(self._lazy_attributes) > 0:
_logger.debug("Processing lazy attributes DictionaryBrowserTree")
self._process_dictionary(self._lazy_attributes, self._double_lines)
self._lazy_attributes = {}
def add_dictionary(self, dictionary, double_lines=False):
"""Add new items from dictionary."""
if len(self._lazy_attributes) > 0:
# To simplify merging lazy and non lazy attribute, we get self
# as a dictionary and update the dictionary with the attributes
d = self.as_dictionary()
nested_dictionary_merge(d, dictionary)
self.__init__(d, double_lines=double_lines, lazy=True)
else:
self._process_dictionary(dictionary, double_lines)
def export(self, filename, encoding="utf8"):
"""Export the dictionary to a text file
Parameters
----------
filename : str
The name of the file without the extension that is
txt by default
encoding : valid encoding str
"""
self.process_lazy_attributes()
f = codecs.open(filename, "w", encoding=encoding)
f.write(self._get_print_items(max_len=None))
f.close()
def _get_print_items(self, padding="", max_len=78):
"""Prints only the attributes that are not methods"""
from hyperspy.defaults_parser import preferences
string = ""
eoi = len(self)
j = 0
if preferences.General.dtb_expand_structures and self._double_lines:
s_end = "╚══ "
s_middle = "╠══ "
pad_middle = "║ "
else:
s_end = "└── "
s_middle = "├── "
pad_middle = "│ "
for key_, value in iter(sorted(self.__dict__.items())):
if key_.startswith("_"):
continue
if not isinstance(key_, types.MethodType):
key = ensure_unicode(value["key"])
value = value["_dtb_value_"]
if j == eoi - 1:
symbol = s_end
else:
symbol = s_middle
if preferences.General.dtb_expand_structures:
if isinstance(value, list) or isinstance(value, tuple):
iflong, strvalue = check_long_string(value, max_len)
if iflong:
key += " <list>" if isinstance(value, list) else " <tuple>"
value = DictionaryTreeBrowser(
{"[%d]" % i: v for i, v in enumerate(value)},
double_lines=True,
lazy=False,
)
else:
string += "%s%s%s = %s\n" % (padding, symbol, key, strvalue)
j += 1
continue
if isinstance(value, DictionaryTreeBrowser):
string += "%s%s%s\n" % (padding, symbol, key)
if j == eoi - 1:
extra_padding = " "
else:
extra_padding = pad_middle
string += value._get_print_items(padding + extra_padding)
else:
_, strvalue = check_long_string(value, max_len)
string += "%s%s%s = %s\n" % (padding, symbol, key, strvalue)
j += 1
return string
def _get_html_print_items(self, padding="", max_len=78, recursive_level=0):
"""Recursive method that creates a html string for fancy display
of metadata.
"""
recursive_level += 1
from hyperspy.defaults_parser import preferences
string = "" # Final return string
for key_, value in iter(sorted(self.__dict__.items())):
if key_.startswith("_"): # Skip any private attributes
continue
if not isinstance(
key_, types.MethodType
): # If it isn't a method, then continue
key = ensure_unicode(value["key"])
value = value["_dtb_value_"]
# dtb_expand_structures is a setting that sets whether to fully expand long strings
if preferences.General.dtb_expand_structures:
if isinstance(value, list) or isinstance(value, tuple):
iflong, strvalue = check_long_string(value, max_len)
if iflong:
key += " <list>" if isinstance(value, list) else " <tuple>"
value = DictionaryTreeBrowser(
{"[%d]" % i: v for i, v in enumerate(value)},
double_lines=True,
lazy=False,
)
else:
string += add_key_value(key, strvalue)
continue # skips the next if-else
# If DTB, then add a details html tag
if isinstance(value, DictionaryTreeBrowser):
string += """<ul style="margin: 0px; list-style-position: outside;">
<details {}>
<summary style="display: list-item;">
<li style="display: inline;">
{}
</li></summary>
""".format(
"open" if recursive_level < 2 else "closed",
replace_html_symbols(key),
)
string += value._get_html_print_items(
recursive_level=recursive_level
)
string += "</details></ul>"
# Otherwise just add value
else:
_, strvalue = check_long_string(value, max_len)
string += add_key_value(key, strvalue)
return string
def __repr__(self):
self.process_lazy_attributes()
return self._get_print_items()
def _repr_html_(self):
self.process_lazy_attributes()
return self._get_html_print_items()
def __getitem__(self, key):
self.process_lazy_attributes()
return self.__getattribute__(key)
def __setitem__(self, key, value):
self.__setattr__(key, value)
def __getattr__(self, name):
"""__getattr__ is called when the default attribute access (
__getattribute__) fails with an AttributeError.
"""
# Skip the attribute we are not interested in. This is also necessary
# to recursive loops.
if name.startswith("__"):
raise AttributeError(name)
# Attribute name are been slugified, so we need to do the same for
# the dictionary keys. Also check with `_sig_` prefix for signal attributes.
keys = [slugify(k) for k in self._lazy_attributes.keys()]
if name in keys or f"_sig_{name}" in keys:
# It is a lazy attribute, we need to process the lazy attribute
self.process_lazy_attributes()
return self.__dict__[name]["_dtb_value_"]
else:
raise AttributeError(name)
def __getattribute__(self, name):
if isinstance(name, bytes):
name = name.decode()
name = slugify(name, valid_variable_name=True)
item = super().__getattribute__(name)
if isinstance(item, dict) and "_dtb_value_" in item and "key" in item:
return item["_dtb_value_"]
else:
return item
def __setattr__(self, key, value):
if key in ["_double_lines", "_lazy_attributes"]:
super().__setattr__(key, value)
return
if key == "binned":
warnings.warn(
"Use of the `binned` attribute in metadata is "
"going to be deprecated in v2.0. Set the "
"`axis.is_binned` attribute instead. ",
VisibleDeprecationWarning,
)
if key.startswith("_sig_"):
key = key[5:]
from hyperspy.signal import BaseSignal
value = BaseSignal(**value)
slugified_key = str(slugify(key, valid_variable_name=True))
if isinstance(value, dict):
if slugified_key in self.__dict__.keys():
self.__dict__[slugified_key]["_dtb_value_"].add_dictionary(
value, double_lines=self._double_lines
)
return
else:
value = DictionaryTreeBrowser(
value, double_lines=self._double_lines, lazy=False
)
super().__setattr__(slugified_key, {"key": key, "_dtb_value_": value})
def __len__(self):
if len(self._lazy_attributes) > 0:
d = self._lazy_attributes
else:
d = self.__dict__
return len([key for key in d.keys() if not key.startswith("_")])
def keys(self):
"""Returns a list of non-private keys."""
return sorted([key for key in self.__dict__.keys() if not key.startswith("_")])
def as_dictionary(self):
"""Returns its dictionary representation."""
if len(self._lazy_attributes) > 0:
return copy.deepcopy(self._lazy_attributes)
par_dict = {}
from hyperspy.signal import BaseSignal
for key_, item_ in self.__dict__.items():
if not isinstance(item_, types.MethodType):
if key_ in ["_db_index", "_double_lines", "_lazy_attributes"]:
continue
key = item_["key"]
if isinstance(item_["_dtb_value_"], DictionaryTreeBrowser):
item = item_["_dtb_value_"].as_dictionary()
elif isinstance(item_["_dtb_value_"], BaseSignal):
item = item_["_dtb_value_"]._to_dictionary()
key = "_sig_" + key
elif hasattr(item_["_dtb_value_"], "_to_dictionary"):
item = item_["_dtb_value_"]._to_dictionary()
else:
item = item_["_dtb_value_"]
par_dict.update({key: item})
return par_dict
def _nested_get_iter(self, item, wild=False):
"""Recursive function to search for an item key in a nested
DictionaryTreeBrowser."""
self.process_lazy_attributes()
for key_, item_ in self.__dict__.items():
if not isinstance(item_, types.MethodType) and not key_.startswith("_"):
key = item_["key"]
if isinstance(item_["_dtb_value_"], DictionaryTreeBrowser):
for result in item_["_dtb_value_"]._nested_get_iter(item, wild):
yield key + "." + result[0], result[1]
else:
if key == item or (
wild and (str(item).lower() in str(key).lower())
):
yield key, item_["_dtb_value_"]
def _nested_get(self, item_path, wild=False, return_path=False):
"""Search for an item key in a nested DictionaryTreeBrowser and yield a
list of values. If `wild` is `True`, looks for any key that contains
the string `item` (case insensitive). If part of a path is given,
search for matching items and then make sure that the full path is
contained."""
if "." in item_path:
item = item_path.split(".").pop(-1)
else:
item = item_path
result = list(self._nested_get_iter(item, wild))
# remove item where item matches, but not additional elements of item_path
if return_path:
return [i for i in result if item_path in i[0]]
else:
return [i[1] for i in result if item_path in i[0]]
def has_item(
self, item_path, default=None, full_path=True, wild=False, return_path=False
):
"""Given a path, return True if it exists. May also perform a search
whether an item exists and optionally returns the full path instead of
boolean value.
The nodes of the path are separated using periods.
Parameters
----------
item_path : Str
A string describing the path with each item separated by
full stops (periods).
full_path : boolean
If `True` (default), the full path to the item has to be given. If
`False`, a search for the item key is performed (can include
additional nodes preceding they key separated by full stops).
wild : boolean
Only applies if `full_path=False`. If `True`, searches for any items
where `item` matches a substring of the item key (case insensitive).
Default is `False`.
return_path : boolean
Only applies if `full_path=False`. If `False` (default), a boolean
value is returned. If `True`, the full path to the item is returned,
a list of paths for multiple matches, or default value if it does
not exist.
default :
The value to return for path if the item does not exist (default is `None`).
Examples
--------
>>> dict = {'To' : {'be' : True}}
>>> dict_browser = DictionaryTreeBrowser(dict)
>>> dict_browser.has_item('To')
True
>>> dict_browser.has_item('To.be')
True
>>> dict_browser.has_item('To.be.or')
False
>>> dict_browser.has_item('be', full_path=False)
True
>>> dict_browser.has_item('be', full_path=False, return_path=True)
'To.be'
"""
if full_path:
if isinstance(item_path, str):
item_path = item_path.split(".")
else:
item_path = copy.copy(item_path)
attrib = item_path.pop(0)
if hasattr(self, attrib):
if len(item_path) == 0:
return True
else:
item = self[attrib]
if isinstance(item, type(self)):
return item.has_item(item_path)
else:
return False
else:
return False
else:
if not return_path:
return self._nested_get(item_path, wild=wild) != []
else:
result = self._nested_get(item_path, wild=wild, return_path=True)
if len(result) == 0:
return default
elif len(result) == 1:
return result[0][0]
return [i[0] for i in result]
def get_item(
self, item_path, default=None, full_path=True, wild=False, return_path=False
):
"""Given a path, return it's value if it exists, or default value if
missing. May also perform a search whether an item key exists and then
returns the value or a list of values for multiple occurences of the
key -- optionally returns the full path(s) in addition to its value(s).
The nodes of the path are separated using periods.
Parameters
----------
item_path : Str
A string describing the path with each item separated by
full stops (periods)
full_path : boolean
If `True` (default), the full path to the item has to be given. If
`False`, a search for the item key is performed (can include
additional nodes preceding they key separated by full stops).
wild : boolean
Only applies if `full_path=False`. If `True`, searches for any items
where `item` matches a substring of the item key (case insensitive).
Default is `False`.
return_path : boolean
Only applies if `full_path=False`. Default `False`. If `True`,
returns an additional list of paths to the item(s) that match `key`.
default :
The value to return if the path or item does not exist.
(default is `None`).
Examples
--------
>>> dict = {'To' : {'be' : True}}
>>> dict_browser = DictionaryTreeBrowser(dict)
>>> dict_browser.get_item('To')
└── be = True
>>> dict_browser.get_item('To.be')
True
>>> dict_browser.get_item('To.be.or', 'default_value')
'default_value'
>>> dict_browser.get_nested_item('be')
True
"""
if full_path:
if isinstance(item_path, str):
item_path = item_path.split(".")
else:
item_path = copy.copy(item_path)
attrib = item_path.pop(0)
if hasattr(self, attrib):
if len(item_path) == 0:
return self[attrib]
else:
item = self[attrib]
if isinstance(item, type(self)):
return item.get_item(item_path, default=default)
else:
return default
else:
return default
else:
result = self._nested_get(item_path, wild=wild, return_path=return_path)
if len(result) == 0:
return default
elif len(result) == 1:
if return_path:
return result[0][1], result[0][0]
else:
return result[0]
else:
if return_path:
return [i[1] for i in result], [i[0] for i in result]
else:
return result
def __contains__(self, item):
return self.has_item(item_path=item)
def copy(self):
return copy.copy(self)
def deepcopy(self):
return copy.deepcopy(self)
def set_item(self, item_path, value):
"""Given the path and value, create the missing nodes in
the path and assign to the last one the value
Parameters
----------
item_path : Str
A string describing the path with each item separated by a
full stops (periods)
Examples
--------
>>> dict_browser = DictionaryTreeBrowser({})
>>> dict_browser.set_item('First.Second.Third', 3)
>>> dict_browser
└── First
└── Second
└── Third = 3
"""
if not self.has_item(item_path):
self.add_node(item_path)
if isinstance(item_path, str):
item_path = item_path.split(".")
if len(item_path) > 1:
self.__getattribute__(item_path.pop(0)).set_item(item_path, value)
else:
self.__setattr__(item_path.pop(), value)
def add_node(self, node_path):
"""Adds all the nodes in the given path if they don't exist.
Parameters
----------
node_path: str
The nodes must be separated by full stops (periods).
Examples
--------
>>> dict_browser = DictionaryTreeBrowser({})
>>> dict_browser.add_node('First.Second')
>>> dict_browser.First.Second = 3
>>> dict_browser
└── First
└── Second = 3
"""
keys = node_path.split(".")
dtb = self
for key in keys:
if dtb.has_item(key) is False:
dtb[key] = DictionaryTreeBrowser(lazy=False)
dtb = dtb[key]
def __next__(self):
"""
Standard iterator method, updates the index and returns the
current coordinates
Returns
-------
val : tuple of ints
Returns a tuple containing the coordiantes of the current
iteration.
"""
if len(self) == 0:
raise StopIteration
if not hasattr(self, "_db_index"):
self._db_index = 0
elif self._db_index >= len(self) - 1:
del self._db_index
raise StopIteration
else:
self._db_index += 1
self.process_lazy_attributes()
key = list(self.keys())[self._db_index]
return key, getattr(self, key)
def __iter__(self):
return self
def strlist2enumeration(lst):
lst = tuple(lst)
if not lst:
return ""
elif len(lst) == 1:
return lst[0]
elif len(lst) == 2:
return "%s and %s" % lst
else:
return "%s, " * (len(lst) - 2) % lst[:-2] + "%s and %s" % lst[-2:]
def ensure_unicode(stuff, encoding="utf8", encoding2="latin-1"):
if not isinstance(stuff, (bytes, np.string_)):
return stuff
else:
string = stuff
try:
string = string.decode(encoding)
except BaseException:
string = string.decode(encoding2, errors="ignore")
return string
def check_long_string(value, max_len):
"Checks whether string is too long for printing in html metadata"
if not isinstance(value, (str, np.string_)):
value = repr(value)
value = ensure_unicode(value)
strvalue = str(value)
_long = False
if max_len is not None and len(strvalue) > 2 * max_len:
right_limit = min(max_len, len(strvalue) - max_len)
strvalue = "%s ... %s" % (strvalue[:max_len], strvalue[-right_limit:])
_long = True
return _long, strvalue
def replace_html_symbols(str_value):
"Escapes any &, < and > tags that would become invisible when printing html"
str_value = str_value.replace("&", "&")
str_value = str_value.replace("<", "<")
str_value = str_value.replace(">", ">")
return str_value
def add_key_value(key, value):
"Returns the metadata value as a html string"
return """
<ul style="margin: 0px; list-style-position: outside;">
<li style='margin-left:1em; padding-left: 0.5em'>{} = {}</li></ul>
""".format(
replace_html_symbols(key), replace_html_symbols(value)
)
def swapelem(obj, i, j):
"""Swaps element having index i with element having index j in object obj
IN PLACE.
Example
-------
>>> L = ['a', 'b', 'c']
>>> swapelem(L, 1, 2)
>>> print(L)
['a', 'c', 'b']
"""
if len(obj) > 1:
buf = obj[i]
obj[i] = obj[j]
obj[j] = buf
def rollelem(a, index, to_index=0):
"""Roll the specified axis backwards, until it lies in a given position.
Parameters
----------
a : list
Input list.
index : int
The index of the item to roll backwards. The positions of the items
do not change relative to one another.
to_index : int, optional
The item is rolled until it lies before this position. The default,
0, results in a "complete" roll.
Returns
-------
res : list
Output list.
"""
res = copy.copy(a)
res.insert(to_index, res.pop(index))
return res
def fsdict(nodes, value, dictionary):
"""Populates the dictionary 'dic' in a file system-like
fashion creating a dictionary of dictionaries from the
items present in the list 'nodes' and assigning the value
'value' to the innermost dictionary.
'dic' will be of the type:
dic['node1']['node2']['node3']...['nodeN'] = value
where each node is like a directory that contains other
directories (nodes) or files (values)
"""
node = nodes.pop(0)
if node not in dictionary:
dictionary[node] = {}
if len(nodes) != 0 and isinstance(dictionary[node], dict):
fsdict(nodes, value, dictionary[node])
else:
dictionary[node] = value
def find_subclasses(mod, cls):
"""Find all the subclasses in a module.
Parameters
----------
mod : module
cls : class
Returns
-------
dictonary in which key, item = subclass name, subclass
"""
return dict(
[
(name, obj)
for name, obj in inspect.getmembers(mod)
if inspect.isclass(obj) and issubclass(obj, cls)
]
)
def isiterable(obj):
return isinstance(obj, Iterable)
def ordinal(value):
"""
Converts zero or a *postive* integer (or their string
representations) to an ordinal value.
>>> for i in range(1,13):
... ordinal(i)
...
'1st'
'2nd'
'3rd'
'4th'
'5th'
'6th'
'7th'
'8th'
'9th'
'10th'
'11th'
'12th'
>>> for i in (100, '111', '112',1011):
... ordinal(i)
...
'100th'
'111th'
'112th'
'1011th'
Notes
-----
Author: Serdar Tumgoren
http://code.activestate.com/recipes/576888-format-a-number-as-an-ordinal/
MIT license
"""
try:
value = int(value)
except ValueError:
return value
if value % 100 // 10 != 1:
if value % 10 == 1:
ordval = "%d%s" % (value, "st")
elif value % 10 == 2:
ordval = "%d%s" % (value, "nd")
elif value % 10 == 3:
ordval = "%d%s" % (value, "rd")
else:
ordval = "%d%s" % (value, "th")
else:
ordval = "%d%s" % (value, "th")
return ordval
def underline(line, character="-"):
"""Return the line underlined."""
return line + "\n" + character * len(line)
def closest_power_of_two(n):
"""Returns next higher power of two, not the closest one as its name
suggests.
"""
# np.ceil would have to be replaced by np.round to do what the name says
return int(2 ** np.ceil(np.log2(n)))
def stack(
signal_list,
axis=None,
new_axis_name="stack_element",
lazy=None,
stack_metadata=True,
show_progressbar=None,
**kwargs,
):
"""Concatenate the signals in the list over a given axis or a new axis.
The title is set to that of the first signal in the list.
Parameters
----------
signal_list : list of BaseSignal instances
List of signals to stack.
axis : {None, int, str}
If None, the signals are stacked over a new axis. The data must
have the same dimensions. Otherwise the signals are stacked over the
axis given by its integer index or its name. The data must have the
same shape, except in the dimension corresponding to `axis`. If the
stacking axis of the first signal is uniform, it is extended up to the
new length; if it is non-uniform, the axes vectors of all signals are
concatenated along this direction; if it is a `FunctionalDataAxis`,
it is extended based on the expression of the first signal (and its sub
axis `x` is handled as above depending on whether it is uniform or not).
new_axis_name : str
The name of the new axis when `axis` is None.
If an axis with this name already
exists it automatically append '-i', where `i` are integers,
until it finds a name that is not yet in use.
lazy : {bool, None}
Returns a LazySignal if True. If None, only returns lazy result if at
least one is lazy.
%s
%s
Returns
-------
signal : BaseSignal instance
Examples
--------
>>> data = np.arange(20)
>>> s = hs.stack([hs.signals.Signal1D(data[:10]),
... hs.signals.Signal1D(data[10:])])
>>> s
<Signal1D, title: Stack of , dimensions: (2, 10)>
>>> s.data
array([[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14, 15, 16, 17, 18, 19]])
"""
from hyperspy.signals import BaseSignal
from hyperspy.axes import FunctionalDataAxis, UniformDataAxis, DataAxis
import dask.array as da
from numbers import Number
for k in [k for k in ["mmap", "mmap_dir"] if k in kwargs]:
lazy = True
warnings.warn(
f"'{k}' argument is deprecated and will be removed in "
"HyperSpy v2.0. Please use 'lazy=True' instead.",
VisibleDeprecationWarning,
)
axis_input = copy.deepcopy(axis)
signal_list = list(signal_list)
# Get the real signal with the most axes to get metadata/class/etc
# first = sorted(filter(lambda _s: isinstance(_s, BaseSignal), signal_list),
# key=lambda _s: _s.data.ndim)[-1]
first = next(filter(lambda _s: isinstance(_s, BaseSignal), signal_list))
# Cast numbers as signals. Will broadcast later.
for i, _s in enumerate(signal_list):
if isinstance(_s, BaseSignal):
pass
elif isinstance(_s, Number):
sig = BaseSignal(_s)
signal_list[i] = sig
else:
raise ValueError(f"Objects of type {type(_s)} cannot be stacked")
if lazy is None:
lazy = any(_s._lazy for _s in signal_list)
if not isinstance(lazy, bool):
raise ValueError("'lazy' argument has to be None, True or False")
for i, _s in enumerate(signal_list):
# Cast all as lazy if required
if not _s._lazy:
signal_list[i] = _s.as_lazy()
if len(signal_list) > 1:
# Matching axis calibration is checked here
broadcasted_sigs = broadcast_signals(*signal_list, ignore_axis=axis_input)
if axis_input is not None:
step_sizes = [s.axes_manager[axis_input].size for s in broadcasted_sigs]
axis = broadcasted_sigs[0].axes_manager[axis_input]
# stack axes if non-uniform (DataAxis)
if type(axis) is DataAxis:
for _s in signal_list[1:]:
_axis = _s.axes_manager[axis_input]
if (
axis.axis[0] < axis.axis[-1] and axis.axis[-1] < _axis.axis[0]
) or (
axis.axis[-1] < axis.axis[0] and _axis.axis[-1] < axis.axis[0]
):
axis.axis = np.concatenate((axis.axis, _axis.axis))
else:
raise ValueError(
"Signals can only be stacked along a "
"non-uniform axes if the axis values do not overlap"
" and have the correct order."
)
# stack axes if FunctionalDataAxis and its x axis is uniform
elif (
type(axis) is FunctionalDataAxis
and type(axis.axes_manager[axis_input].x) is UniformDataAxis
):
axis.x.size = np.sum(step_sizes)
# stack axes if FunctionalDataAxis and its x axis is not uniform
elif (
type(axis) is FunctionalDataAxis
and type(axis.axes_manager[axis_input].x) is DataAxis
):
for _s in signal_list[1:]:
_axis = _s.axes_manager[axis_input]
if (
axis.x.axis[0] < axis.x.axis[-1]
and axis.x.axis[-1] < _axis.x.axis[0]
) or (
axis.x.axis[-1] < axis.x.axis[0]
and _axis.x.axis[-1] < axis.x.axis[0]
):
axis.x.axis = np.concatenate((axis.x.axis, _axis.x.axis))
else:
raise ValueError(
"Signals can only be stacked along a "
"non-uniform axes if the axis values do not overlap"
" and have the correct order."
)
datalist = [s.data for s in broadcasted_sigs]
newdata = (
da.stack(datalist, axis=0)
if axis is None
else da.concatenate(datalist, axis=axis.index_in_array)
)
if axis_input is None:
signal = first.__class__(newdata)
signal.axes_manager._axes[1:] = copy.deepcopy(
broadcasted_sigs[0].axes_manager._axes
)
axis_name = new_axis_name
axis_names = [axis_.name for axis_ in signal.axes_manager._axes[1:]]
j = 1
while axis_name in axis_names:
axis_name = f"{new_axis_name}_{j}"
j += 1
eaxis = signal.axes_manager._axes[0]
eaxis.name = axis_name
eaxis.navigate = True # This triggers _update_parameters
else:
signal = broadcasted_sigs[0]._deepcopy_with_new_data(newdata)
signal._lazy = True
signal._assign_subclass()
signal.get_dimensions_from_data()
# Set the metadata, if an stack_metadata is an integer, the metadata
# will overwritten later
signal.metadata = first.metadata.deepcopy()
signal.metadata.General.title = f"Stack of {first.metadata.General.title}"
# Stack metadata
if isinstance(stack_metadata, bool):
if stack_metadata:
signal.original_metadata.add_node("stack_elements")
for i, obj in enumerate(signal_list):
signal.original_metadata.stack_elements.add_node(f"element{i}")
node = signal.original_metadata.stack_elements[f"element{i}"]
node.original_metadata = obj.original_metadata.deepcopy()
node.metadata = obj.metadata.deepcopy()
else:
signal.original_metadata = DictionaryTreeBrowser({})
elif isinstance(stack_metadata, int):
obj = signal_list[stack_metadata]
signal.metadata = obj.metadata.deepcopy()
signal.original_metadata = obj.original_metadata.deepcopy()
else:
raise ValueError("`stack_metadata` must a boolean or an integer.")
if axis_input is None:
axis_input = signal.axes_manager[-1 + 1j].index_in_axes_manager
step_sizes = 1
signal.metadata._HyperSpy.set_item("Stacking_history.axis", axis_input)
signal.metadata._HyperSpy.set_item("Stacking_history.step_sizes", step_sizes)
if np.all(
[
s.metadata.has_item("Signal.Noise_properties.variance")
for s in signal_list
]
):
variance = stack(
[s.metadata.Signal.Noise_properties.variance for s in signal_list], axis
)
signal.metadata.set_item("Signal.Noise_properties.variance", variance)
else:
signal = signal_list[0]
# compute if not lazy
if not lazy:
signal.compute(False, show_progressbar=show_progressbar)
return signal
stack.__doc__ %= (STACK_METADATA_ARG, SHOW_PROGRESSBAR_ARG)
def shorten_name(name, req_l):
if len(name) > req_l:
return name[: req_l - 2] + ".."
else:
return name
def transpose(*args, signal_axes=None, navigation_axes=None, optimize=False):
"""Transposes all passed signals according to the specified options.
For parameters see ``BaseSignal.transpose``.
Examples
--------
>>> signal_iterable = [hs.signals.BaseSignal(np.random.random((2,)*(i+1)))
for i in range(3)]
>>> signal_iterable
[<BaseSignal, title: , dimensions: (|2)>,
<BaseSignal, title: , dimensions: (|2, 2)>,
<BaseSignal, title: , dimensions: (|2, 2, 2)>]
>>> hs.transpose(*signal_iterable, signal_axes=1)
[<BaseSignal, title: , dimensions: (|2)>,
<BaseSignal, title: , dimensions: (2|2)>,
<BaseSignal, title: , dimensions: (2, 2|2)>]
>>> hs.transpose(signal1, signal2, signal3, signal_axes=["Energy"])
"""
from hyperspy.signal import BaseSignal
if not all(map(isinstance, args, (BaseSignal for _ in args))):
raise ValueError("Not all pased objects are signals")
return [
sig.transpose(
signal_axes=signal_axes, navigation_axes=navigation_axes, optimize=optimize
)
for sig in args
]
def process_function_blockwise(
data,
*args,
function,
nav_indexes=None,
output_signal_size=None,
block_info=None,
arg_keys=None,
**kwargs,
):
"""
Convenience function for processing a function blockwise. By design, its
output is used as an argument of the dask ``map_blocks`` so that the
function only gets applied to the signal axes.
Parameters
----------
data : np.ndarray
The data for one chunk
*args : tuple
Any signal the is iterated alongside the data in. In the form
((key1, value1), (key2, value2))
function : function
The function to applied to the signal axis
nav_indexes : tuple
The indexes of the navigation axes for the dataset.
output_signal_shape: tuple
The shape of the output signal. For a ragged signal, this is equal to 1
block_info : dict
The block info as described by the ``dask.array.map_blocks`` function
arg_keys : tuple
The list of keys for the passed arguments (args). Together this makes
a set of key:value pairs to be passed to the function.
**kwargs : dict
Any additional key value pairs to be used by the function
(Note that these are the constants that are applied.)
"""
# Both of these values need to be passed in
dtype = block_info[None]["dtype"]
chunk_nav_shape = tuple([data.shape[i] for i in sorted(nav_indexes)])
output_shape = chunk_nav_shape + tuple(output_signal_size)
# Pre-allocating the output array
output_array = np.empty(output_shape, dtype=dtype)
if len(args) == 0:
# There aren't any BaseSignals for iterating
for nav_index in np.ndindex(chunk_nav_shape):
islice = np.s_[nav_index]
output_array[islice] = function(data[islice], **kwargs)
else:
# There are BaseSignals which iterate alongside the data
for index in np.ndindex(chunk_nav_shape):
islice = np.s_[index]
iter_dict = {}
for key, a in zip(arg_keys, args):
arg_i = a[islice].squeeze()
# Some functions does not handle 0-dimension NumPy arrys
if arg_i.shape == ():
arg_i = arg_i[()]
iter_dict[key] = arg_i
output_array[islice] = function(data[islice], **iter_dict, **kwargs)
if not (chunk_nav_shape == output_array.shape):
try:
output_array = output_array.squeeze(-1)
except ValueError:
pass
return output_array
def guess_output_signal_size(test_data, function, ragged, **kwargs):
"""This function is for guessing the output signal shape and size.
It will attempt to apply the function to some test data and then output
the resulting signal shape and datatype.
Parameters
----------
test_data : NumPy array
Data from a test signal for the function to be applied to.
The data must be from a signal with 0 navigation dimensions.
function : function
The function to be applied to the data
ragged : bool
If the data is ragged then the output signal size is () and the
data type is 'object'
**kwargs : dict
Any other keyword arguments passed to the function.
"""
if ragged:
output_dtype = object
output_signal_size = ()
else:
output = function(test_data, **kwargs)
try:
output_dtype = output.dtype
output_signal_size = output.shape
except AttributeError:
output = np.asarray(output)
output_dtype = output.dtype
output_signal_size = output.shape
return output_signal_size, output_dtype
def multiply(iterable):
"""Return product of sequence of numbers.
Equivalent of functools.reduce(operator.mul, iterable, 1).
>>> product([2**8, 2**30])
274877906944
>>> product([])
1
"""
prod = 1
for i in iterable:
prod *= i
return prod
def iterable_not_string(thing):
return isinstance(thing, Iterable) and not isinstance(thing, str)
def deprecation_warning(msg):
warnings.warn(msg, VisibleDeprecationWarning)
def add_scalar_axis(signal, lazy=None):
am = signal.axes_manager
from hyperspy.signal import BaseSignal
from hyperspy._signals.lazy import LazySignal
if lazy is None:
lazy = signal._lazy
signal.__class__ = LazySignal if lazy else BaseSignal
am.remove(am._axes)
am._append_axis(size=1, scale=1, offset=0, name="Scalar", navigate=False)
def get_object_package_info(obj):
"""Get info about object package
Returns
-------
dic: dict
Dictionary containing ``package`` and ``package_version`` (if available)
"""
dic = {}
# Note that the following can be "__main__" if the component was user
# defined
dic["package"] = obj.__module__.split(".")[0]
if dic["package"] != "__main__":
try:
dic["package_version"] = importlib.import_module(dic["package"]).__version__
except AttributeError:
dic["package_version"] = ""
_logger.warning(
"The package {package} does not set its version in "
+ "{package}.__version__. Please report this issue to the "
+ "{package} developers.".format(package=dic["package"])
)
else:
dic["package_version"] = ""
return dic
def print_html(f_text, f_html):
"""Print html version when in Jupyter Notebook"""
class PrettyText:
def __repr__(self):
return f_text()
def _repr_html_(self):
return f_html()
return PrettyText()
def is_hyperspy_signal(input_object):
"""
Check if an object is a Hyperspy Signal
Parameters
----------
input_object : object
Object to be tests
Returns
-------
bool
If true the object is a subclass of hyperspy.signal.BaseSignal
"""
from hyperspy.signals import BaseSignal
return isinstance(input_object, BaseSignal)
def nested_dictionary_merge(dict1, dict2):
"""Merge dict2 into dict1 recursively"""
for key, value in dict2.items():
if (
key in dict1
and isinstance(dict1[key], dict)
and isinstance(dict2[key], Mapping)
):
nested_dictionary_merge(dict1[key], dict2[key])
else:
dict1[key] = dict2[key]
def is_binned(signal, axis=-1):
"""Backwards compatibility check utility for is_binned attribute.
Can be removed in v2.0.
"""
if signal.metadata.has_item("Signal.binned"):
return signal.metadata.Signal.binned
else:
return signal.axes_manager[axis].is_binned
| hyperspy/hyperspy | hyperspy/misc/utils.py | Python | gpl-3.0 | 51,574 | 0.000856 |
from __future__ import print_function
import argparse
import logging
import imp
import os
import sys
from . import logger
from . import plugin
from . import nmea # noqa
from .api import BoatdHTTPServer, BoatdRequestHandler
from .behaviour import Behaviour
from .behaviour import BehaviourManager
from .boat import Boat
from .color import color
from .config import Config
from .driver import BaseBoatdDriver # noqa
from .base_plugin import BasePlugin # noqa
__version__ = '2.0.0'
log = logging.getLogger()
def load_conf(conf_file):
'''
Return the configuration object. Reads from the first argument by default,
otherwise falls back to 'boatd-config.yaml'.
'''
_, ext = os.path.splitext(conf_file)
if ext == '.json':
conf = Config.from_json(conf_file)
else:
conf = Config.from_yaml(conf_file)
conf.filename = conf_file
return conf
def load_driver(conf):
'''
Return the driver module from the filename specified in the configuration
file with key configuration.scripts.driver.
'''
expanded_path = os.path.expanduser(conf.driver.file)
directory, name = os.path.split(expanded_path)
sys.path.append(os.path.dirname(directory))
if hasattr(conf, 'filename'):
conf_directory, _ = os.path.split(conf.filename)
search_dirs = [directory, conf_directory]
else:
search_dirs = [directory]
module_name = os.path.splitext(name)[0]
try:
found_module = imp.find_module(module_name, search_dirs)
_, filename, _ = found_module
log.info('Loading boat driver from {}'.format(color(filename, 37)))
driver_module = imp.load_module('driver_module', *found_module)
log.info('Using \'{}\' as boat driver'.format(
color(type(driver_module.driver).__name__, 33)))
except Exception:
log.exception('Exception raised in boat driver module')
raise
finally:
found_module[0].close()
if not isinstance(driver_module.driver, BaseBoatdDriver):
log.error('Driver module does not instantiate BaseBoatdDriver')
sys.exit(1)
return driver_module.driver
def load_behaviours(conf):
behaviour_manager = BehaviourManager()
for behaviour in conf.behaviours:
name = list(behaviour.keys())[0]
behaviour_conf = behaviour.get(name)
filename = behaviour_conf.get('file')
b = Behaviour(name, filename)
behaviour_manager.add(b)
return behaviour_manager
def parse_args():
description = '''\
Experimental robotic sailing boat daemon.
'''
parser = argparse.ArgumentParser(description=description)
parser.add_argument('config', metavar='CONFIG FILE',
default='boatd-config.yaml',
nargs='?',
help='a path to a configuration file')
parser.add_argument('--version',
action='version',
version='boatd {}'.format(__version__))
return parser.parse_args()
def run():
'''Run the main server.'''
args = parse_args()
conf = load_conf(args.config)
logger.setup_logging()
driver = load_driver(conf)
boat = Boat(driver)
plugins = plugin.load_plugins(conf, boat)
behaviour_manager = load_behaviours(conf)
httpd = BoatdHTTPServer(boat, behaviour_manager,
(conf.boatd.interface, conf.boatd.port),
BoatdRequestHandler)
while httpd.running:
try:
httpd.handle_request()
except (KeyboardInterrupt, SystemExit):
log.info('Quitting and requesting plugins end...')
behaviour_manager.stop()
for p in plugins:
p.running = False
sys.exit()
| Danyc0/boatd | boatd/__init__.py | Python | lgpl-3.0 | 3,800 | 0 |
""" generator.py: Contains the Generator class. """
import random
import copy
import graphics
from helpers import *
# Just to check we have generated the correct number of polyominoes
# {order: number of omiones}
counts = {1: 1, 2: 1, 3: 2, 4: 7, 5: 18, 6: 60}
class Generator:
""" A class for generating polyominoes. Call the generate function with the
polyomino order wanted. Please Note: This class has not been tested for
orders greater than 6. """
def generate(self, order):
""" Return a list of all the one-sided polyominoes of the given order.
Objects in returned list are 2D square lists representing the shape of
the polyominoes by boolean values.
generate(int) -> list<list<list<bool>>>
"""
self._order = order
ominoes = []
if order == 1:
ominoes = [[[True]]]
return ominoes
# This is the 'growth method' algorithm for generating polyominoes.
# A order * order grid is made, then the bottom-left block filled.
# The squares adjacent to that block are numbered, and one of them
# is randomly picked. This continues till order blocks are filled.
# Check to see if generated polyomino is a repeat, and continue
# till we've generated enough.
while len(ominoes) < counts[order]:
free_squares = {}
pick = 0
max_number = 0
omino = rect_list(order, order, False)
if order > 4:
# A different starting point for orders > 4
# This is so crosses and similar shapes can be generated
row, col = order - 2, 0
else:
row, col = order - 1, 0
omino[row][col] = True
for s in xrange(order - 1):
free_squares, max_number = self._number_adjacent_squares(omino,
(row, col), free_squares, max_number)
possible = [n for n in free_squares.keys() if n > pick]
pick = random.choice(possible)
row, col = free_squares[pick]
free_squares.pop(pick)
omino[row][col] = True
omino = self._normalise(omino)
if not [n for n in ominoes if n == omino]:
ominoes.append(omino)
return ominoes
def generate_colours(self, n):
""" Generate n unique colours and return as a list of RGB triples.
Colours are as contrasted as possible.
generate_colours(int) -> list<(int, int, int)>
"""
# This divides the 360 degrees of hue in the HSV colour space by n,
# and so chooses n colours with equally spaced hues.
colours = []
degrees = 360 / n
for i in xrange(n):
hsv = (degrees * i, 1.0, 0.78)
rgb = graphics.hsv2rgb(hsv)
colours.append(rgb)
return colours
def _normalise(self, polyomino):
""" Return a copy of the given polyomino with its rotation and position
normalised. That is, in its left- and bottom-most position and rotation.
_normalise(list<list<bool>>) -> list<list<bool>>
"""
# Bottom- and left-most rotation and position is defined here as the
# position in which the most bottom row and left column squares are
# filled.
adjusted = copy.deepcopy(polyomino)
rowfractions = {} # Fraction of bottom row filled
colfractions = {} # Fraction of left column filled
for rotation in xrange(4):
adjusted = self._move(adjusted)
rowfilled = adjusted[self._order - 1].count(True)
rowfraction = float(rowfilled) / self._order
rowfractions.update({rotation: rowfraction})
colfilled = [adjusted[row][0] for row in xrange(self._order)].count(True)
colfraction = float(colfilled) / self._order
colfractions.update({rotation: colfraction})
adjusted = self._rotate(adjusted)
# Pick the rotation with the largest fractions
rowpick = max(rowfractions.values())
rowpicked_rotations = [k for k, v in rowfractions.iteritems() \
if v == rowpick]
if len(rowpicked_rotations) > 1:
colpick = max([v for k, v in colfractions.iteritems() \
if k in rowpicked_rotations])
colpicked_rotations = [k for k, v in colfractions.iteritems() \
if v == colpick and k in rowpicked_rotations]
if len(colpicked_rotations) == 0:
rotations = rowpicked_rotations[0]
else:
rotations = colpicked_rotations[0]
else:
rotations = rowpicked_rotations[0]
normalised = copy.deepcopy(polyomino)
for rotation in xrange(rotations):
normalised = self._rotate(normalised)
normalised = self._move(normalised)
return normalised
def _move(self, polyomino):
""" Return a copy of the given polyomino pushed into the bottom left
corner of its grid.
_move(list<list<bool>>) -> list<list<bool>>
"""
moved = copy.deepcopy(polyomino)
while moved[self._order - 1].count(True) == 0:
# While bottom row is empty, move down
for row in xrange(self._order - 1, 0, -1):
for col in xrange(self._order):
moved[row][col] = moved[row - 1][col]
moved[0] = [False] * self._order
while [moved[row][0] for row in xrange(self._order)].count(True) == 0:
# While left column is empty, move left
for row in xrange(self._order):
for col in xrange(self._order - 1):
moved[row][col] = moved[row][col + 1]
for row in xrange(self._order):
moved[row][self._order - 1] = False
return moved
def _rotate(self, polyomino):
""" Return a copy of the given polyomino rotated clockwise 90 degrees.
_rotate(list<list<bool>>) -> list<list<bool>>
"""
rotated = rect_list(self._order, self._order, False)
for row in xrange(self._order):
for col in xrange(self._order):
rotated[col][self._order - 1 - row] = polyomino[row][col]
return rotated
def _number_adjacent_squares(self, polyomino, coordinates, \
numbered_squares, max_number):
""" Return a pair with a dictionary of all the adjacent squares in the
given polyomino, keyed by their number, where they are numbered
clockwise from the top, and the highest numbered square. Numbering will
start from max_number and any previously numbered squares in
numbered_squares will be included.
_number_adjacent_squares(list<list<bool>>, (int,int),
dict<int:(int,int)>, int) ->
(dict<int:(int, int)>, int)
"""
row, col = coordinates
possible_squares = [(row - 1, col), (row, col + 1),
(row + 1, col), (row, col - 1)]
adjacents = copy.deepcopy(numbered_squares)
n = max_number
for row, col in possible_squares:
if row in range(self._order) and col in range(self._order) \
and not polyomino[row][col] \
and not (row, col) in numbered_squares.values():
# Number the square only if its in the grid, not already
# numbered and not already filled
n += 1
adjacents.update({n: (row, col)})
return adjacents, n
| nickjhughes/polyominohs | generator.py | Python | mit | 7,968 | 0.005146 |
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import html
import bleach
import misaka
from configparser import NoOptionError
class Sanitizer(object):
def __init__(self, elements, attributes):
# attributes found in Sundown's HTML serializer [1]
# except for <img> tag,
# because images are not generated anyways.
#
# [1] https://github.com/vmg/sundown/blob/master/html/html.c
self.elements = ["a", "p", "hr", "br", "ol", "ul", "li",
"pre", "code", "blockquote",
"del", "ins", "strong", "em",
"h1", "h2", "h3", "h4", "h5", "h6",
"table", "thead", "tbody", "th", "td"] + elements
# href for <a> and align for <table>
self.attributes = ["align", "href"] + attributes
def sanitize(self, text):
clean_html = bleach.clean(text, tags=self.elements, attributes=self.attributes, strip=True)
def set_links(attrs, new=False):
href_key = (None, u'href')
if href_key not in attrs:
return attrs
if attrs[href_key].startswith(u'mailto:'):
return attrs
rel_key = (None, u'rel')
rel_values = [val for val in attrs.get(rel_key, u'').split(u' ') if val]
for value in [u'nofollow', u'noopener']:
if value not in [rel_val.lower() for rel_val in rel_values]:
rel_values.append(value)
attrs[rel_key] = u' '.join(rel_values)
return attrs
linker = bleach.linkifier.Linker(callbacks=[set_links])
return linker.linkify(clean_html)
def Markdown(extensions=("strikethrough", "superscript", "autolink",
"fenced-code"), flags=[]):
renderer = Unofficial(flags=flags)
md = misaka.Markdown(renderer, extensions=extensions)
def inner(text):
rv = md(text).rstrip("\n")
if rv.startswith("<p>") or rv.endswith("</p>"):
return rv
return "<p>" + rv + "</p>"
return inner
class Unofficial(misaka.HtmlRenderer):
"""A few modifications to process "common" Markdown.
For instance, fenced code blocks (~~~ or ```) are just wrapped in <code>
which does not preserve line breaks. If a language is given, it is added
to <code class="$lang">, compatible with Highlight.js.
"""
def blockcode(self, text, lang):
lang = ' class="{0}"'.format(html.escape(lang)) if lang else ''
return "<pre><code{1}>{0}</code></pre>\n".format(html.escape(text, False), lang)
class Markup(object):
def __init__(self, conf):
try:
conf_flags = conf.getlist("flags")
except NoOptionError:
conf_flags = []
parser = Markdown(extensions=conf.getlist("options"), flags=conf_flags)
sanitizer = Sanitizer(
conf.getlist("allowed-elements"),
conf.getlist("allowed-attributes"))
self._render = lambda text: sanitizer.sanitize(parser(text))
def render(self, text):
return self._render(text)
| jelmer/isso | isso/utils/html.py | Python | mit | 3,145 | 0.000954 |
import urllib
import urllib2
import xml.dom.minidom
import re
import socket
from util import hook
chatbot_re = (r'(^.*\b(taiga|taigabot)\b.*$)', re.I)
@hook.regex(*chatbot_re)
@hook.command
def chatbot(inp, reply=None, nick=None, conn=None):
inp = inp.group(1).lower().replace('taigabot', '').replace('taiga', '').replace(':', '')
args = {'bot_id': '6', 'say': inp.strip(), 'convo_id': conn.nick, 'format': 'xml'}
data = urllib.urlencode(args)
resp = False
url_response = urllib2.urlopen('http://api.program-o.com/v2/chatbot/?', data)
response = url_response.read()
response_dom = xml.dom.minidom.parseString(response)
text = response_dom.getElementsByTagName('response')[0].childNodes[0].data.strip()
return nick + ': ' + str(text.lower().replace('programo', 'taiga').replace('program-o', 'taigabot').replace('elizabeth', 'wednesday'))
| FrozenPigs/Taigabot | plugins/_broken/chatbot.py | Python | gpl-3.0 | 874 | 0.006865 |
# Copyright (C) 2007, Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
"""
Test the style of toggle and radio buttons inside a palette. The buttons
contains only an icon and should be rendered similarly to the toolbar
controls. Ticket #2855.
"""
from gi.repository import Gtk
from sugar3.graphics.palette import Palette
from sugar3.graphics.icon import Icon
from sugar3.graphics import style
import common
test = common.TestPalette()
palette = Palette('Test radio and toggle')
test.set_palette(palette)
box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)
toggle = Gtk.ToggleButton()
icon = Icon(icon_name='go-previous', pixel_size=style.STANDARD_ICON_SIZE)
toggle.set_image(icon)
box.pack_start(toggle, False, False, 0)
toggle.show()
radio = Gtk.RadioButton()
icon = Icon(icon_name='go-next', pixel_size=style.STANDARD_ICON_SIZE)
radio.set_image(icon)
radio.set_mode(False)
box.pack_start(radio, False, False, 0)
radio.show()
palette.set_content(box)
box.show()
if __name__ == '__main__':
common.main(test)
| quozl/sugar-toolkit-gtk3 | examples/ticket2855.py | Python | lgpl-2.1 | 1,719 | 0 |
"""This file defines the neural network class, where a network is reinitialized from configuration files.
The class also has a forward propagation method.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import numpy as np
import tensorflow as tf
class NeuralNetwork(object):
"""NeuralNetwork is a class that interfaces the verification code with
the neural net parameters (weights).
"""
def __init__(
self,
net_weights,
net_biases,
net_layer_types,
input_shape=None,
cnn_params=None,
):
"""Function to initialize NeuralNetParams class.
Args:
net_weights: list of numpy matrices of weights of each layer
[convention: x[i+1] = W[i] x[i]
net_biases: list of numpy arrays of biases of each layer
net_layer_types: type of each layer ['ff' or 'ff_relu' or 'ff_conv' or
'ff_conv_relu']
'ff': Simple feedforward layer with no activations
'ff_relu': Simple feedforward layer with ReLU activations
'ff_conv': Convolution layer with no activation
'ff_conv_relu': Convolution layer with ReLU activation
input_shape: [num_rows, num_columns, num_channels] at the input layer
cnn_params: list of dictionaries containing stride and padding for
each layer
Raises:
ValueError: the input lists of net params are not of the same length
"""
if (len(net_weights) != len(net_biases)) or len(net_biases) != len(
net_layer_types
):
raise ValueError("Inputs of net params are not of same length ....")
if net_layer_types[len(net_layer_types) - 1] != "ff":
raise ValueError("Final layer is not linear")
self.num_hidden_layers = len(net_weights) - 1
self.weights = []
self.biases = []
self.layer_types = []
self.sizes = []
self.input_shapes = []
self.output_shapes = []
self.has_conv = False
if input_shape is not None:
current_num_rows = input_shape[0]
current_num_columns = input_shape[1]
current_num_channels = input_shape[2]
self.cnn_params = cnn_params
# Setting the sizes of the layers of the network
# sizes[i] contains the size of x_i
for i in range(self.num_hidden_layers):
shape = np.shape(net_weights[i])
self.weights.append(tf.convert_to_tensor(net_weights[i], dtype=tf.float32))
self.layer_types.append(net_layer_types[i])
if self.layer_types[i] in {"ff", "ff_relu"}:
self.sizes.append(int(shape[1]))
# For feedforward networks, no unraveling the bias terms
small_bias = tf.convert_to_tensor(net_biases[i], dtype=tf.float32)
self.biases.append(tf.reshape(small_bias, [-1, 1]))
# Assumes that x^{i+1} = W_i x^i
self.input_shapes.append([int(shape[1]), 1])
self.output_shapes.append([int(shape[0]), 1])
# Convolution type
else:
self.has_conv = True
num_filters = shape[3]
self.input_shapes.append(
[1, current_num_rows, current_num_columns, current_num_channels]
)
self.sizes.append(
current_num_rows * current_num_columns * current_num_channels
)
current_num_channels = num_filters
# For propagating across multiple conv layers
if self.cnn_params[i]["padding"] == "SAME":
current_num_rows = int(
current_num_rows / self.cnn_params[i]["stride"]
)
current_num_columns = int(
current_num_columns / self.cnn_params[i]["stride"]
)
self.output_shapes.append(
[1, current_num_rows, current_num_columns, current_num_channels]
)
# For conv networks, unraveling the bias terms
small_bias = tf.convert_to_tensor(net_biases[i], dtype=tf.float32)
large_bias = tf.tile(
tf.reshape(small_bias, [-1, 1]),
[current_num_rows * current_num_columns, 1],
)
self.biases.append(large_bias)
# Last layer shape: always ff
if self.has_conv:
final_dim = int(np.shape(net_weights[self.num_hidden_layers])[1])
self.input_shapes.append([final_dim, 1])
else:
final_dim = int(np.shape(net_weights[self.num_hidden_layers - 1])[0])
self.sizes.append(final_dim)
self.final_weights = tf.convert_to_tensor(
net_weights[self.num_hidden_layers], dtype=tf.float32
)
self.final_bias = tf.convert_to_tensor(
net_biases[self.num_hidden_layers], dtype=tf.float32
)
def forward_pass(self, vector, layer_index, is_transpose=False, is_abs=False):
"""Performs forward pass through the layer weights at layer_index.
Args:
vector: vector that has to be passed through in forward pass
layer_index: index of the layer
is_transpose: whether the weights of the layer have to be transposed
is_abs: whether to take the absolute value of the weights
Returns:
tensor that corresponds to the forward pass through the layer
Raises:
ValueError: if the layer_index is negative or more than num hidden layers
"""
if layer_index < 0 or layer_index > self.num_hidden_layers:
raise ValueError("Invalid layer index")
layer_type = self.layer_types[layer_index]
weight = self.weights[layer_index]
if is_abs:
weight = tf.abs(weight)
if is_transpose:
vector = tf.reshape(vector, self.output_shapes[layer_index])
else:
vector = tf.reshape(vector, self.input_shapes[layer_index])
if layer_type in {"ff", "ff_relu"}:
if is_transpose:
weight = tf.transpose(weight)
return_vector = tf.matmul(weight, vector)
elif layer_type in {"conv", "conv_relu"}:
if is_transpose:
return_vector = tf.nn.conv2d_transpose(
vector,
weight,
output_shape=self.input_shapes[layer_index],
strides=[
1,
self.cnn_params[layer_index]["stride"],
self.cnn_params[layer_index]["stride"],
1,
],
padding=self.cnn_params[layer_index]["padding"],
)
else:
return_vector = tf.nn.conv2d(
vector,
weight,
strides=[
1,
self.cnn_params[layer_index]["stride"],
self.cnn_params[layer_index]["stride"],
1,
],
padding=self.cnn_params[layer_index]["padding"],
)
else:
raise NotImplementedError("Unsupported layer type: {0}".format(layer_type))
if is_transpose:
return tf.reshape(return_vector, (self.sizes[layer_index], 1))
return tf.reshape(return_vector, (self.sizes[layer_index + 1], 1))
def load_network_from_checkpoint(checkpoint, model_json, input_shape=None):
"""Function to read the weights from checkpoint based on json description.
Args:
checkpoint: tensorflow checkpoint with trained model to
verify
model_json: path of json file with model description of
the network list of dictionary items for each layer
containing 'type', 'weight_var', 'bias_var' and
'is_transpose' 'type'is one of {'ff', 'ff_relu' or
'conv'}; 'weight_var' is the name of tf variable for
weights of layer i; 'bias_var' is the name of tf
variable for bias of layer i; 'is_transpose' is set to
True if the weights have to be transposed as per
convention Note that last layer is always feedforward
net_weights: list of numpy matrices of weights of each layer
convention: x[i+1] = W[i] x[i]
net_biases: list of numpy arrays of biases of each layer
net_layer_types: type of each layer ['ff' or 'ff_relu' or 'ff_conv'
or 'ff_conv_relu']
'ff': Simple feedforward layer with no activations
'ff_relu': Simple feedforward layer with ReLU activations
'ff_conv': Convolution layer with no activation
'ff_conv_relu': Convolution layer with ReLU activation
Raises:
ValueError: If layer_types are invalid or variable names
not found in checkpoint
"""
# Load checkpoint
reader = tf.train.load_checkpoint(checkpoint)
variable_map = reader.get_variable_to_shape_map()
checkpoint_variable_names = variable_map.keys()
# Parse JSON file for names
with tf.gfile.Open(model_json) as f:
list_model_var = json.load(f)
net_layer_types = []
net_weights = []
net_biases = []
cnn_params = []
# Checking validity of the input and adding to list
for layer_model_var in list_model_var:
if layer_model_var["type"] not in {"ff", "ff_relu", "conv"}:
raise ValueError("Invalid layer type in description")
if (
layer_model_var["weight_var"] not in checkpoint_variable_names
or layer_model_var["bias_var"] not in checkpoint_variable_names
):
raise ValueError("Variable names not found in checkpoint")
net_layer_types.append(layer_model_var["type"])
layer_weight = reader.get_tensor(layer_model_var["weight_var"])
layer_bias = reader.get_tensor(layer_model_var["bias_var"])
# TODO(aditirag): is there a way to automatically check when to transpose
# We want weights W such that x^{i+1} = W^i x^i + b^i
# Can think of a hack involving matching shapes but if shapes are equal
# it can be ambiguous
if layer_model_var["type"] in {"ff", "ff_relu"}:
layer_weight = np.transpose(layer_weight)
cnn_params.append(None)
if layer_model_var["type"] in {"conv"}:
if "stride" not in layer_model_var or "padding" not in layer_model_var:
raise ValueError("Please define stride and padding for conv layers.")
cnn_params.append(
{
"stride": layer_model_var["stride"],
"padding": layer_model_var["padding"],
}
)
net_weights.append(layer_weight)
net_biases.append(np.reshape(layer_bias, (np.size(layer_bias), 1)))
return NeuralNetwork(
net_weights, net_biases, net_layer_types, input_shape, cnn_params
)
| cleverhans-lab/cleverhans | cleverhans_v3.1.0/cleverhans/experimental/certification/nn.py | Python | mit | 11,199 | 0.001429 |
#
# Copyright (c) 2008--2012 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
#
# Kickstart-related operations
#
from spacewalk.common import rhnFlags
from spacewalk.common.rhnLog import log_debug, log_error
from spacewalk.common.rhnException import rhnException
from spacewalk.server import rhnSQL, rhnAction, rhnLib, rhnChannel
def update_kickstart_session(server_id, action_id, action_status,
kickstart_state, next_action_type):
log_debug(3, server_id, action_id, action_status, kickstart_state, next_action_type)
# Is this a kickstart-related action?
ks_session_id = get_kickstart_session_id(server_id, action_id)
if ks_session_id is None:
# Nothing more to do
log_debug(4, "Kickstart session not found")
return None
# Check the current action state
if action_status == 2:
# Completed
ks_status = kickstart_state
# Get the next action - it has to be of the right type
next_action_id = get_next_action_id(action_id, next_action_type)
elif action_status == 3:
# Failed
ks_status = 'failed'
next_action_id = None
else:
raise rhnException("Invalid action state %s" % action_status)
update_ks_session_table(ks_session_id, ks_status, next_action_id,
server_id)
return ks_session_id
_query_update_ks_session_table = rhnSQL.Statement("""
update rhnKickstartSession
set action_id = :action_id,
state_id = :ks_status_id,
new_server_id = :server_id
where id = :ks_session_id
""")
def update_ks_session_table(ks_session_id, ks_status, next_action_id,
server_id):
log_debug(4, ks_session_id, ks_status, next_action_id, server_id)
ks_table = rhnSQL.Table('rhnKickstartSessionState', 'label')
ks_status_id = ks_table[ks_status]['id']
h = rhnSQL.prepare(_query_update_ks_session_table)
h.execute(ks_session_id=ks_session_id, ks_status_id=ks_status_id,
action_id=next_action_id, server_id=server_id)
if ks_status == 'complete':
delete_guests(server_id)
_query_lookup_guests_for_host = rhnSQL.Statement("""
select virtual_system_id from rhnVirtualInstance
where host_system_id = :server_id
""")
_query_delete_virtual_instances = rhnSQL.Statement("""
delete from rhnVirtualInstance where host_system_id = :server_id
""")
def delete_guests(server_id):
"""
Callback used after a successful kickstart to remove any guest virtual
instances, as well as their associated servers.
"""
# First delete all the guest server objects:
h = rhnSQL.prepare(_query_lookup_guests_for_host)
h.execute(server_id=server_id)
delete_server = rhnSQL.Procedure("delete_server")
log_debug(4, "Deleting guests")
while 1:
row = h.fetchone_dict()
if not row:
break
guest_id = row['virtual_system_id']
log_debug(4, 'Deleting guest server: %s'% guest_id)
try:
if guest_id != None:
delete_server(guest_id)
except rhnSQL.SQLError:
log_error("Error deleting server: %s" % guest_id)
# Finally delete all the virtual instances:
log_debug(4, "Deleting all virtual instances for host")
h = rhnSQL.prepare(_query_delete_virtual_instances)
h.execute(server_id=server_id)
# Commit all changes:
try:
rhnSQL.commit()
except rhnSQL.SQLError, e:
log_error("Error committing transaction: %s" % e)
rhnSQL.rollback()
_query_get_next_action_id = rhnSQL.Statement("""
select a.id
from rhnAction a, rhnActionType at
where a.prerequisite = :action_id
and a.action_type = at.id
and at.label = :next_action_type
""")
def get_next_action_id(action_id, next_action_type = None):
if not next_action_type:
return None
h = rhnSQL.prepare(_query_get_next_action_id)
h.execute(action_id=action_id, next_action_type=next_action_type)
row = h.fetchone_dict()
if not row:
return None
return row['id']
_query_lookup_kickstart_session_id = rhnSQL.Statement("""
select ks.id
from rhnKickstartSession ks
where (
(ks.old_server_id = :server_id and ks.new_server_id is NULL)
or ks.new_server_id = :server_id
or ks.host_server_id = :server_id
)
and ks.action_id = :action_id
""")
def get_kickstart_session_id(server_id, action_id):
h = rhnSQL.prepare(_query_lookup_kickstart_session_id)
h.execute(server_id=server_id, action_id=action_id)
row = h.fetchone_dict()
if not row:
# Nothing to do
return None
return row['id']
_query_insert_package_delta = rhnSQL.Statement("""
insert into rhnPackageDelta (id, label)
values (:package_delta_id, 'ks-delta-' || :package_delta_id)
""")
_query_insert_action_package_delta = rhnSQL.Statement("""
insert into rhnActionPackageDelta (action_id, package_delta_id)
values (:action_id, :package_delta_id)
""")
_query_insert_package_delta_element = rhnSQL.Statement("""
insert into rhnPackageDeltaElement
(package_delta_id, transaction_package_id)
values
(:package_delta_id,
lookup_transaction_package(:operation, :n, :e, :v, :r, :a))
""")
def schedule_kickstart_delta(server_id, kickstart_session_id,
installs, removes):
log_debug(3, server_id, kickstart_session_id)
row = get_kickstart_session_info(kickstart_session_id, server_id)
org_id = row['org_id']
scheduler = row['scheduler']
action_id = rhnAction.schedule_server_action(
server_id,
action_type='packages.runTransaction', action_name="Package delta",
delta_time=0, scheduler=scheduler, org_id=org_id,
)
package_delta_id = rhnSQL.Sequence('rhn_packagedelta_id_seq').next()
h = rhnSQL.prepare(_query_insert_package_delta)
h.execute(package_delta_id=package_delta_id)
h = rhnSQL.prepare(_query_insert_action_package_delta)
h.execute(action_id=action_id, package_delta_id=package_delta_id)
h = rhnSQL.prepare(_query_insert_package_delta_element)
col_names = [ 'n', 'v', 'r', 'e']
__execute_many(h, installs, col_names, operation='insert', a=None,
package_delta_id=package_delta_id)
__execute_many(h, removes, col_names, operation='delete', a=None,
package_delta_id=package_delta_id)
update_ks_session_table(kickstart_session_id, 'package_synch_scheduled',
action_id, server_id)
return action_id
def schedule_kickstart_sync(server_id, kickstart_session_id):
row = get_kickstart_session_info(kickstart_session_id, server_id)
org_id = row['org_id']
scheduler = row['scheduler']
# Create a new action
action_id = rhnAction.schedule_server_action(
server_id,
action_type='kickstart.schedule_sync',
action_name="Schedule a package sync",
delta_time=0, scheduler=scheduler, org_id=org_id,
)
return action_id
def _get_ks_virt_type(type_id):
_query_kickstart_virt_type = rhnSQL.Statement("""
select label
from rhnKickstartVirtualizationType kvt
where kvt.id = :id
""")
prepared_query = rhnSQL.prepare(_query_kickstart_virt_type)
prepared_query.execute(id=type_id)
row = prepared_query.fetchone_dict()
# XXX: we should have better constraints on the db so this doesn't happen.
if not row:
kstype = 'auto'
else:
kstype = row['label']
log_debug(1, "KS_TYPE: %s" % kstype)
return kstype
def get_kickstart_session_type(server_id, action_id):
ks_session_id = get_kickstart_session_id(server_id, action_id)
ks_session_info = get_kickstart_session_info(ks_session_id, server_id)
ks_type_id = ks_session_info['virtualization_type']
ks_type = _get_ks_virt_type(ks_type_id)
return ks_type
def subscribe_to_tools_channel(server_id, kickstart_session_id):
log_debug(3)
row = get_kickstart_session_info(kickstart_session_id, server_id)
org_id = row['org_id']
scheduler = row['scheduler']
ks_type_id = row['virtualization_type']
ks_type = _get_ks_virt_type(ks_type_id)
if ks_type == 'para_host':
action_id = rhnAction.schedule_server_action(
server_id,
action_type='kickstart_host.add_tools_channel',
action_name='Subscribe server to RHN Tools channel.',
delta_time=0, scheduler=scheduler, org_id=org_id,
)
elif ks_type == 'para_guest':
action_id = rhnAction.schedule_server_action(
server_id,
action_type='kickstart_guest.add_tools_channel',
action_name='Subscribe guest to RHN Tools channel.',
delta_time=0, scheduler=scheduler, org_id=org_id,
)
else:
action_id = None
return action_id
def schedule_virt_pkg_install(server_id, kickstart_session_id):
log_debug(3)
row = get_kickstart_session_info(kickstart_session_id, server_id)
org_id = row['org_id']
scheduler = row['scheduler']
ks_type_id = row['virtualization_type']
log_debug(1, "VIRTUALIZATION_TYPE: %s" % str(ks_type_id))
ks_type = _get_ks_virt_type(ks_type_id)
log_debug(1, "VIRTUALZIATION_TYPE_LABEL: %s" % str(ks_type))
if ks_type == 'para_host':
log_debug(1, "SCHEDULING VIRT HOST PACKAGE INSTALL...")
action_id = rhnAction.schedule_server_action(
server_id,
action_type='kickstart_host.schedule_virt_host_pkg_install',
action_name="Schedule install of rhn-virtualization-host package.",
delta_time=0, scheduler=scheduler, org_id=org_id,
)
elif ks_type == 'para_guest':
log_debug(1, "SCHEDULING VIRT GUEST PACKAGE INSTALL...")
action_id = rhnAction.schedule_server_action(
server_id,
action_type='kickstart_guest.schedule_virt_guest_pkg_install',
action_name="Schedule install of rhn-virtualization-guest package.",
delta_time=0, scheduler=scheduler, org_id=org_id,
)
else:
log_debug(1, "NOT A VIRT KICKSTART")
action_id = None
return action_id
_query_ak_deploy_config = rhnSQL.Statement("""
select rt.deploy_configs
from rhnKickstartSession ks,
rhnKickstartDefaultRegToken kdrt,
rhnRegToken rt
where ks.kickstart_id = kdrt.kickstart_id
and kdrt.regtoken_id = rt.id
and ks.id = :session_id
""")
# Make sure the activation keys associated with this kickstart profile
# have enabled deploying config files. Only deploy configs if at least one
# of them has. This is replacing code that didn't work because the
# rhnFlags('registration_token') could not be set during the rhn_check call.
def ks_activation_key_deploy_config(kickstart_session_id):
h = rhnSQL.prepare(_query_ak_deploy_config)
h.execute(session_id=kickstart_session_id)
rows = h.fetchall_dict()
if rows:
for row in rows:
if row['deploy_configs'] and row['deploy_configs'] == 'Y':
return True
return False
_query_schedule_config_files = rhnSQL.Statement("""
insert into rhnActionConfigRevision
(id, action_id, server_id, config_revision_id)
select sequence_nextval('rhn_actioncr_id_seq'), :action_id,
server_id, config_revision_id
from (
select distinct scc.server_id,
cf.latest_config_revision_id config_revision_id
from rhnServerConfigChannel scc,
rhnConfigChannelType cct,
rhnConfigChannel cc,
rhnConfigFile cf,
rhnConfigFileState cfs
where scc.server_id = :server_id
and scc.config_channel_id = cf.config_channel_id
and cf.config_channel_id = cc.id
and cc.confchan_type_id = cct.id
and cct.label in ('normal', 'local_override')
and cf.latest_config_revision_id is not null
and cf.state_id = cfs.id
and cfs.label = 'alive'
) X
""")
def schedule_config_deploy(server_id, action_id, kickstart_session_id,
server_profile):
""" schedule a configfiles.deploy action dependent on the current action """
log_debug(3, server_id, action_id, kickstart_session_id)
row = get_kickstart_session_info(kickstart_session_id, server_id)
org_id = row['org_id']
scheduler = row['scheduler']
deploy_configs = (row['deploy_configs'] == 'Y'
and ks_activation_key_deploy_config(kickstart_session_id))
if not deploy_configs:
# Nothing more to do here
update_ks_session_table(kickstart_session_id, 'complete',
next_action_id=None, server_id=server_id)
return None
if server_profile:
# Have to schedule a package deploy action
aid = schedule_rhncfg_install(server_id, action_id, scheduler,
server_profile)
else:
aid = action_id
next_action_id = rhnAction.schedule_server_action(
server_id,
action_type='configfiles.deploy',
action_name='Deploy config files',
delta_time=0, scheduler=scheduler, org_id=org_id,
prerequisite=aid,
)
# Deploy all of the config files that are part of this server's config
# channels
h = rhnSQL.prepare(_query_schedule_config_files)
h.execute(server_id=server_id, action_id=next_action_id)
update_ks_session_table(kickstart_session_id, 'configuration_deploy',
next_action_id, server_id)
return next_action_id
class MissingBaseChannelError(Exception):
pass
def schedule_rhncfg_install(server_id, action_id, scheduler,
server_profile=None):
capability = 'rhn-config-action'
try:
packages = _subscribe_server_to_capable_channels(server_id, scheduler,
capability)
except MissingBaseChannelError:
log_debug(2, "No base channel", server_id)
return action_id
if not packages:
# No channels offer this capability
log_debug(3, server_id, action_id,
"No channels to provide %s found" % capability)
# No new action needed here
return action_id
if not server_profile:
server_profile = get_server_package_profile(server_id)
# Make the package profile a hash, for easier checking
sphash = {}
for p in server_profile:
sphash[tuple(p)] = None
packages_to_install = []
for p in packages:
key = (p['name'], p['version'], p['release'], p['epoch'])
if not sphash.has_key(key):
packages_to_install.append(p['package_id'])
if not packages_to_install:
# We already have these packages installed
log_debug(4, "No packages needed to be installed")
return action_id
log_debug(4, "Scheduling package install action")
new_action_id = schedule_package_install(server_id, action_id, scheduler,
packages_to_install)
return new_action_id
_query_lookup_subscribed_server_channels = rhnSQL.Statement("""
select sc.channel_id,
case when c.parent_channel is not null then 0 else 1 end is_base_channel
from rhnServerChannel sc, rhnChannel c
where sc.server_id = :server_id
and sc.channel_id = c.id
""")
_query_lookup_unsubscribed_server_channels = rhnSQL.Statement("""
select c.id
from
-- Get all the channels available to this org
( select cfm.channel_id
from rhnChannelFamilyMembers cfm,
rhnPrivateChannelFamily pcf
where pcf.org_id = :org_id
and pcf.channel_family_id = cfm.channel_family_id
and pcf.current_members < coalesce(pcf.max_members,
pcf.current_members + 1)
union
select cfm.channel_id
from rhnChannelFamilyMembers cfm,
rhnPublicChannelFamily pcf
where pcf.channel_family_id = cfm.channel_family_id) ac,
rhnChannel c
where c.parent_channel = :base_channel_id
and c.id = ac.channel_id
and not exists (
select 1
from rhnServerChannel
where server_id = :server_id
and channel_id = c.id)
""")
def _subscribe_server_to_capable_channels(server_id, scheduler, capability):
log_debug(4, server_id, scheduler, capability)
# Look through the channels this server is already subscribed to
h = rhnSQL.prepare(_query_lookup_subscribed_server_channels)
h.execute(server_id=server_id)
base_channel_id = None
channels = []
while 1:
row = h.fetchone_dict()
if not row:
break
channel_id = row['channel_id']
if row['is_base_channel']:
base_channel_id = channel_id
channels.append((channel_id, 1))
if base_channel_id is None:
raise MissingBaseChannelError()
org_id = rhnSQL.Table('rhnServer', 'id')[server_id]['org_id']
# Get the child channels this system is *not* subscribed to
h = rhnSQL.prepare(_query_lookup_unsubscribed_server_channels)
h.execute(server_id=server_id, org_id=org_id,
base_channel_id=base_channel_id)
l = map(lambda x: (x['id'], 0), h.fetchall_dict() or [])
channels.extend(l)
# We now have a list of channels; look for one that provides the
# capability
for channel_id, is_subscribed in channels:
log_debug(5, "Checking channel:", channel_id, "; subscribed:",
is_subscribed)
packages = _channel_provides_capability(channel_id, capability)
if packages:
if is_subscribed:
log_debug(4, "Already subscribed; found packages", packages)
return packages
# Try to subscribe to it
try:
rhnChannel._subscribe_sql(server_id, channel_id, 0)
except rhnChannel.SubscriptionCountExceeded:
# Try another one
continue
log_debug(4, "Subscribed to", channel_id,
"Found packages", packages)
# We subscribed to this channel - we're done
return packages
# No channels provide this capability - we're done
log_debug(4, "No channels to provide capability", capability)
return None
_query_channel_provides_capability = rhnSQL.Statement("""
select distinct pp.package_id, pn.name, pe.version, pe.release, pe.epoch
from rhnChannelNewestPackage cnp,
rhnPackageProvides pp,
rhnPackageCapability pc,
rhnPackageName pn,
rhnPackageEVR pe
where cnp.channel_id = :channel_id
and cnp.package_id = pp.package_id
and pp.capability_id = pc.id
and pc.name = :capability
and cnp.name_id = pn.id
and cnp.evr_id = pe.id
""")
def _channel_provides_capability(channel_id, capability):
log_debug(4, channel_id, capability)
h = rhnSQL.prepare(_query_channel_provides_capability)
h.execute(channel_id=channel_id, capability=capability)
ret = h.fetchall_dict()
if not ret:
return ret
return ret
_query_insert_action_packages = rhnSQL.Statement("""
insert into rhnActionPackage
(id, action_id, name_id, evr_id, package_arch_id, parameter)
select sequence_nextval('rhn_act_p_id_seq'), :action_id, name_id, evr_id,
package_arch_id, 'upgrade'
from rhnPackage
where id = :package_id
""")
def schedule_package_install(server_id, action_id, scheduler, packages):
if not packages:
# Nothing to do
return action_id
new_action_id = rhnAction.schedule_server_action(
server_id, action_type='packages.update',
action_name="Package update to enable configuration deployment",
delta_time=0, scheduler=scheduler, prerequisite=action_id,
)
# Add entries to rhnActionPackage
action_ids = [ new_action_id ] * len(packages)
h = rhnSQL.prepare(_query_insert_action_packages)
h.executemany(action_id=action_ids, package_id=packages)
return new_action_id
def __execute_many(cursor, array, col_names, **kwargs):
""" Execute the cursor, with arguments extracted from the array
The array is converted into a hash having col_names as keys, and adds
whatever kwarg was specified too.
"""
linecount = len(array)
if not linecount:
return
# Transpose the array into a hash with col_names as keys
params = rhnLib.transpose_to_hash(array, col_names)
for k, v in kwargs.items():
params[k] = [ v ] * linecount
apply(cursor.executemany, (), params)
def _packages_from_cursor(cursor):
result = []
while 1:
row = cursor.fetchone_dict()
if not row:
break
p_name = row['name']
if p_name == 'gpg-pubkey':
# We ignore GPG public keys since they are too weird to schedule
# as a package delta
continue
result.append((p_name, row['version'], row['release'], row['epoch']))
return result
_query_lookup_pending_kickstart_sessions = rhnSQL.Statement("""
select ks.id, ks.action_id, NULL other_server_id
from rhnKickstartSessionState kss,
rhnKickstartSession ks
where (
(ks.old_server_id = :server_id and ks.new_server_id is null)
or ks.new_server_id = :server_id
)
and ks.state_id = kss.id
and kss.label not in ('complete', 'failed')
and (:ks_session_id is null or ks.id != :ks_session_id)
""")
_query_terminate_pending_kickstart_sessions = rhnSQL.Statement("""
update rhnKickstartSession
set action_id = NULL,
state_id = :state_id
where id = :kickstart_session_id
""")
def terminate_kickstart_sessions(server_id):
log_debug(3, server_id)
history = []
tokens_obj = rhnFlags.get('registration_token')
current_ks_session_id = tokens_obj.get_kickstart_session_id()
# ks_session_id can be null
h = rhnSQL.prepare(_query_lookup_pending_kickstart_sessions)
h.execute(server_id=server_id, ks_session_id=current_ks_session_id)
log_debug(4, "current_ks_session_id", current_ks_session_id)
ks_session_ids = []
action_ids = []
while 1:
row = h.fetchone_dict()
if not row:
break
ks_session_ids.append(row['id'])
action_ids.append(row['action_id'])
if not ks_session_ids:
# Nothing to do
log_debug(4, "Nothing to do", server_id, current_ks_session_id)
return []
ks_session_table = rhnSQL.Table('rhnKickstartSessionState', 'label')
state_id_failed = ks_session_table['failed']['id']
state_ids = [state_id_failed] * len(ks_session_ids)
# Add a history item
for ks_session_id in ks_session_ids:
log_debug(4, "Adding history entry for session id", ks_session_id)
history.append(("Kickstart session canceled",
"A kickstart session for this system was canceled because "
"the system was re-registered with token <strong>%s</strong>" %
tokens_obj.get_names()))
h = rhnSQL.prepare(_query_terminate_pending_kickstart_sessions)
params = {
'kickstart_session_id' : ks_session_ids,
'state_id' : state_ids,
}
# Terminate pending actions
log_debug(4, "Terminating sessions", params)
h.execute_bulk(params)
# Invalidate pending actions
for action_id in action_ids:
if action_id is None:
continue
rhnAction.invalidate_action(server_id, action_id)
return history
def get_kickstart_profile_package_profile(kickstart_session_id):
""" Fetches the package profile from the kickstart profile (Not the session) """
h = rhnSQL.prepare("""
select pn.name, pe.version, pe.release, pe.epoch, pa.label
from rhnKickstartSession ks,
rhnKickstartDefaults kd,
rhnServerProfilePackage spp,
rhnPackageName pn,
rhnPackageEVR pe,
rhnPackageArch pa
where ks.id = :kickstart_session_id
and kd.server_profile_id = spp.server_profile_id
and spp.name_id = pn.id
and spp.evr_id = pe.id
and spp.package_arch_id = pa.id
and kd.kickstart_id = ks.kickstart_id
""")
h.execute(kickstart_session_id=kickstart_session_id)
return _packages_from_cursor(h)
def get_kisckstart_session_package_profile(kickstart_session_id):
""" Fetches the package profile from the kickstart session """
h = rhnSQL.prepare("""
select pn.name, pe.version, pe.release, pe.epoch, pa.label
from rhnKickstartSession ks,
rhnServerProfilePackage spp,
rhnPackageName pn,
rhnPackageEVR pe,
rhnPackageArch pa
where ks.id = :kickstart_session_id
and ks.server_profile_id = spp.server_profile_id
and spp.name_id = pn.id
and spp.evr_id = pe.id
and spp.package_arch_id = pa.id
""")
h.execute(kickstart_session_id=kickstart_session_id)
return _packages_from_cursor(h)
def get_server_package_profile(server_id):
# XXX misa 2005-05-25 May need to look at package arches too
h = rhnSQL.prepare("""
select pn.name, pe.version, pe.release, pe.epoch, pa.label
from rhnServerPackage sp,
rhnPackageName pn,
rhnPackageEVR pe,
rhnPackageArch pa
where sp.server_id = :server_id
and sp.name_id = pn.id
and sp.evr_id = pe.id
and sp.package_arch_id = pa.id
""")
h.execute(server_id=server_id)
return _packages_from_cursor(h)
_query_get_kickstart_session_info = rhnSQL.Statement("""
select org_id, scheduler, deploy_configs, virtualization_type
from rhnKickstartSession
where id = :kickstart_session_id
""")
def get_kickstart_session_info(kickstart_session_id, server_id):
h = rhnSQL.prepare(_query_get_kickstart_session_info)
h.execute(kickstart_session_id=kickstart_session_id)
row = h.fetchone_dict()
if not row:
raise rhnException("Could not fetch kickstart session id %s "
"for server %s" % (kickstart_session_id, server_id))
return row
_query_lookup_ks_server_profile = rhnSQL.Statement("""
select kss.server_profile_id
from rhnServerProfileType spt,
rhnServerProfile sp,
rhnKickstartSession kss
where kss.id = :ks_session_id
and kss.server_profile_id = sp.id
and sp.profile_type_id = spt.id
and spt.label = :profile_type_label
""")
_query_delete_server_profile = rhnSQL.Statement("""
delete from rhnServerProfile where id = :server_profile_id
""")
def cleanup_profile(server_id, action_id, ks_session_id, action_status):
if ks_session_id is None:
log_debug(4, "No kickstart session")
return
if action_status != 2:
log_debug(4, "Action status: %s; nothing to do" % action_status)
return
h = rhnSQL.prepare(_query_lookup_ks_server_profile)
h.execute(ks_session_id=ks_session_id, profile_type_label='sync_profile')
row = h.fetchone_dict()
if not row:
log_debug(4, "No server profile of the right type found; nothing to do")
return
server_profile_id = row['server_profile_id']
if server_profile_id is None:
log_debug(4, "No server profile associated with this kickstart session")
return
# There is an "on delete cascade" constraint on
# rhnKickstartSession.server_profile_id and on
# rhnServerProfilePacakge.server_profile_id
h = rhnSQL.prepare(_query_delete_server_profile)
h.execute(server_profile_id=server_profile_id)
| dmacvicar/spacewalk | backend/server/rhnServer/server_kickstart.py | Python | gpl-2.0 | 28,246 | 0.002868 |
"""
Custom importers for addresses
"""
# Django
from django.conf import settings
# Standard Library
import csv
import re
# Third Party
from localflavor.us.us_states import STATE_CHOICES
from smart_open.smart_open_lib import smart_open
# MuckRock
from muckrock.communication.models import Address
# columns
AGENCY_PK = 0
AGENCY_NAME = 1
ADDRESS_TYPE = 2
ADDRESS_PK = 3
ORIG_ADDRESS = 4
STREET = 5
CITY = 6
STATE = 7
ZIP = 8
LONG = 9
LAT = 10
SUITE = 11
AGENCY_OVERRIDE = 12
ATTN_OVERRIDE = 13
STATES = {s[0] for s in list(STATE_CHOICES)}
p_zip = re.compile(r"^\d{5}(?:-\d{4})?$")
# pylint: disable=broad-except
def import_addresses(file_name):
"""Import addresses from spreadsheet"""
# pylint: disable=too-many-locals
s3_path = f"s3://{settings.AWS_MEDIA_BUCKET_NAME}/{file_name}"
with smart_open(s3_path) as tmp_file:
reader = csv.reader(tmp_file)
# discard header row
next(reader)
for i, row in enumerate(reader):
if i % 1000 == 0:
print(i)
if row[STATE] and row[STATE] not in STATES:
print('Illegal State "{}"'.format(row[STATE]))
if row[ZIP] and not p_zip.match(row[ZIP]):
print('Illegal Zip "{}"'.format(row[ZIP]))
try:
address = Address.objects.get(pk=row[ADDRESS_PK])
except Address.DoesNotExist:
print("Address {} does not exist".format(row[ADDRESS_PK]))
else:
address.street = row[STREET].strip()
address.suite = row[SUITE].strip()
address.city = row[CITY].strip()
address.state = row[STATE].strip()
address.zip_code = row[ZIP].strip()
address.point = {
"type": "Point",
"coordinates": [row[LONG].strip(), row[LAT].strip()],
}
address.agency_override = row[AGENCY_OVERRIDE].strip()
address.attn_override = row[ATTN_OVERRIDE].strip()
try:
address.save()
except Exception as exc:
print("Data Error", exc, row[ADDRESS_PK])
print(row)
| MuckRock/muckrock | muckrock/communication/importers.py | Python | agpl-3.0 | 2,224 | 0 |
__title__ = 'fobi.contrib.plugins.form_elements.fields.ip_address'
__author__ = 'Artur Barseghyan <[email protected]>'
__copyright__ = '2014-2017 Artur Barseghyan'
__license__ = 'GPL 2.0/LGPL 2.1'
__all__ = ('default_app_config', 'UID',)
default_app_config = 'fobi.contrib.plugins.form_elements.fields.' \
'ip_address.apps.Config'
UID = 'ip_address'
| mansonul/events | events/contrib/plugins/form_elements/fields/ip_address/__init__.py | Python | mit | 382 | 0 |
from sklearn2sql_heroku.tests.regression import generic as reg_gen
reg_gen.test_model("GradientBoostingRegressor" , "RandomReg_500" , "sqlite")
| antoinecarme/sklearn2sql_heroku | tests/regression/RandomReg_500/ws_RandomReg_500_GradientBoostingRegressor_sqlite_code_gen.py | Python | bsd-3-clause | 146 | 0.013699 |
def copy_files(src, target_node, parent=None, name=None):
"""Copy the files from src to the target node
:param Folder src: The source to copy children from
:param Node target_node: The node to copy files to
:param Folder parent: The parent of to attach the clone of src to, if applicable
"""
assert not parent or not parent.is_file, 'Parent must be a folder'
cloned = src.clone()
cloned.parent = parent
cloned.target = target_node
cloned.name = name or cloned.name
cloned.copied_from = src
cloned.save()
if src.is_file and src.versions.exists():
fileversions = src.versions.select_related('region').order_by('-created')
most_recent_fileversion = fileversions.first()
if most_recent_fileversion.region and most_recent_fileversion.region != target_node.osfstorage_region:
# add all original version except the most recent
cloned.versions.add(*fileversions[1:])
# create a new most recent version and update the region before adding
new_fileversion = most_recent_fileversion.clone()
new_fileversion.region = target_node.osfstorage_region
new_fileversion.save()
cloned.versions.add(new_fileversion)
else:
cloned.versions.add(*src.versions.all())
# copy over file metadata records
if cloned.provider == 'osfstorage':
for record in cloned.records.all():
record.metadata = src.records.get(schema__name=record.schema.name).metadata
record.save()
if not src.is_file:
for child in src.children:
copy_files(child, target_node, parent=cloned)
return cloned
| pattisdr/osf.io | website/files/utils.py | Python | apache-2.0 | 1,720 | 0.002907 |
#!/bin/env python
# Automatically translated python version of
# OpenSceneGraph example program "osgwidgetprogress"
# !!! This program will need manual tuning before it will work. !!!
import sys
from osgpypp import osgDB
from osgpypp import osgWidget
# Translated from file 'osgwidgetprogress.cpp'
# -*-c++-*- osgWidget - Code by: Jeremy Moles (cubicool) 2007-2008
# $Id$
#include <osgDB/ReadFile>
#include <osgWidget/Util>
#include <osgWidget/WindowManager>
#include <osgWidget/Canvas>
MASK_2D = 0xF0000000
class UpdateProgressNode (osg.NodeCallback) :
start = float()
done = float()
UpdateProgressNode():
start (0.0),
done (5.0)
virtual void operator()(osg.Node* node, osg.NodeVisitor* nv)
fs = nv.getFrameStamp()
t = fs.getSimulationTime()
if start == 0.0 : start = t
width = ((t - start) / done) * 512.0
percent = (width / 512.0) * 100.0
if width < 1.0 or width > 512.0 : return
window = dynamic_cast<osgWidget.Window*>(node)
if not window : return
w = window.getByName("pMeter")
l = dynamic_cast<osgWidget.Label*>(window.getByName("pLabel"))
if not w or not l : return
w.setWidth(width)
w.setTexCoordRegion(0.0, 0.0, width, 64.0)
ss = std.ostringstream()
ss, osg.round(percent), "% Done"
l.setLabel(ss.str())
def main(argv):
viewer = osgViewer.Viewer()
wm = osgWidget.WindowManager(
viewer,
1280.0,
1024.0,
MASK_2D,
osgWidget.WindowManager.WM_PICK_DEBUG
)
canvas = osgWidget.Canvas("canvas")
pOutline = osgWidget.Widget("pOutline", 512.0, 64.0)
pMeter = osgWidget.Widget("pMeter", 0.0, 64.0)
pLabel = osgWidget.Label("pLabel", "0% Done")
pOutline.setImage("osgWidget/progress-outline.png", True)
pOutline.setLayer(osgWidget.Widget.LAYER_MIDDLE, 2)
pMeter.setImage("osgWidget/progress-meter.png")
pMeter.setColor(0.7, 0.1, 0.1, 0.7)
pMeter.setLayer(osgWidget.Widget.LAYER_MIDDLE, 1)
pLabel.setFont("fonts/VeraMono.ttf")
pLabel.setFontSize(20)
pLabel.setFontColor(1.0, 1.0, 1.0, 1.0)
pLabel.setSize(512.0, 64.0)
pLabel.setLayer(osgWidget.Widget.LAYER_MIDDLE, 3)
canvas.setOrigin(300.0, 300.0)
canvas.addWidget(pMeter, 0.0, 0.0)
canvas.addWidget(pOutline, 0.0, 0.0)
canvas.addWidget(pLabel, 0.0, 0.0)
canvas.getBackground().setColor(0.0, 0.0, 0.0, 0.0)
canvas.setUpdateCallback(UpdateProgressNode())
wm.addChild(canvas)
return osgWidget.createExample(viewer, wm, osgDB.readNodeFile("cow.osgt"))
if __name__ == "__main__":
main(sys.argv)
| JaneliaSciComp/osgpyplusplus | examples/rough_translated1/osgwidgetprogress.py | Python | bsd-3-clause | 2,695 | 0.014842 |
# common.py: global variables
# arch-tag: global variables
# author: Alberto Griggio <[email protected]>
# license: GPL
import wx
import os, sys, locale, Image
import threading
__version__ = '0.6.1'
if os.path.expanduser('~') != '~':
bookmarks_file = os.path.expanduser('~/.cornice/bookmarks')
config_file = os.path.expanduser('~/.cornice/config')
confdir = os.path.expanduser('~/.cornice')
if not os.path.exists(confdir):
try: os.mkdir(confdir)
except (IOError, OSError): pass # this is not fatal...
else:
confdir = os.path.dirname(sys.argv[0])
bookmarks_file = os.path.join(confdir, 'bookmarks')
config_file = os.path.join(confdir, 'config')
config = None # ConfigParser instance used to load/store options
try:
interpolations = [ Image.NEAREST, Image.BILINEAR,
Image.BICUBIC, Image.ANTIALIAS ]
except AttributeError:
# probably PIL < 1.1.3
interpolations = [ Image.NEAREST, Image.BILINEAR,
Image.BICUBIC, Image.BICUBIC ]
icons_and_colors = {
'GIF': ('file_gif.xpm', (208, 232, 208)),
'ICO': ('file_ico.xpm', (249, 240, 208)),
'JPEG': ('file_jpg.xpm', (224, 232, 192)),
'PCX': ('file_pcx.xpm', (216, 231, 216)),
'PNG': ('file_png.xpm', (224, 216, 208)),
'PNM': ('file_pnm.xpm', (218, 237, 192)),
'PSD': ('file_psd.xpm', (255, 255, 223)),
'TIF': ('file_tif.xpm', (200, 200, 213)),
'XBM': ('file_xbm.xpm', (224, 224, 224)),
'XCF': ('file_xcf.xpm', (191, 239, 233)),
'XPM': ('file_xpm.xpm', (222, 217, 234)),
'BMP': ('file_bmp.xpm', (229, 213, 213)),
}
default_icon_and_color = ('file_image.xpm', (240, 240, 240))
unknown_icon_and_color = ('file_unknown.xpm', (255, 255, 255))
# sort indexes
SORT_NAME = 0
SORT_DATE = 1
SORT_SIZE = 2
SORT_TYPE = 3
sort_index = 0
reverse_sort = False
def format_size_str(number):
sf = ['bytes', 'KB', 'MB', 'GB']
i = 0
while number > 1000 and i < 4:
number = number / 1024.0
i += 1
return '%s %s' % (locale.format('%.1f', number), sf[i])
has_alpha = wx.VERSION[:3] >= (2, 5, 2) and 'gtk1' not in wx.PlatformInfo
if wx.Platform == '__WXGTK__':
_mask_table = [0]*128 + [255]*128
else:
_mask_table = [255]*128 + [0]*128
def get_mask(pil_image):
"""\
If the image has some transparency, returns a wx.Mask object used to mask
the transparent pixels, otherwise returns None
The function should always be called with only one argument
"""
if pil_image.mode == 'RGBA' and not has_alpha:
alpha = pil_image.split()[3]
mask = wx.EmptyImage(*alpha.size)
#mask.SetData(alpha.convert('1').convert('RGB').tostring())
mask.SetData(alpha.point(_mask_table, '1').convert('RGB').tostring())
return wx.Mask(wx.BitmapFromImage(mask, 1))
elif pil_image.mode == 'P':
# let's try to get the transparency value...
transparency = pil_image.info.get('transparency')
if transparency:
## mode, data = pil_image.palette.getdata()
## if 0: #mode[:3] == 'RGB':
## if mode == 'RGBA': n = 4
## else: n = 3
## rgb = data[transparency*n : transparency*n + n]
## mask = wx.EmptyImage(*pil_image.size)
## mask.SetData(pil_image.convert('RGB').tostring())
## color = wx.Colour(*[ord(c) for c in rgb[:3]])
## if wx.VERSION[:3] >= (2, 5, 2):
## return wx.Mask(mask.ConvertToBitmap(), color)
## else:
## return wx.MaskColour(mask.ConvertToBitmap(), color)
## else:
if wx.Platform != '__WXGTK__': c1, c2 = 255, 0
else: c1, c2 = 0, 255
palette = [c1] * 768 #[255] * 768
palette[transparency*3 : transparency*3 + 3] = [c2, c2, c2]#[0, 0, 0]
pil_image = pil_image.copy()
pil_image.putpalette(palette)
mask = wx.EmptyImage(*pil_image.size)
mask.SetData(pil_image.convert('1').convert('RGB').tostring())
return wx.Mask(wx.BitmapFromImage(mask, 1))
return None
# custom event to update the menubar when the sorting of the PictureList
# changes
_SORT_CHANGED_EVENT = wx.NewEventType()
class _SortChangedEvent(wx.PyEvent):
def __init__(self):
wx.PyEvent.__init__(self)
self.SetEventType(_SORT_CHANGED_EVENT)
self.sort_index = sort_index
self.reverse_sort = reverse_sort
# end of class _SortChangedEvent
_win_to_post = None
def EVT_SORT_CHANGED(win, func):
global _win_to_post; _win_to_post = win
win.Connect(-1, -1, _SORT_CHANGED_EVENT, func)
def send_sort_changed_event():
wx.PostEvent(_win_to_post, _SortChangedEvent())
_exiting_lock = threading.RLock()
_is_exiting = False
def exiting(val=None):
global _is_exiting
_exiting_lock.acquire()
if val is not None: _is_exiting = val
retval = _is_exiting
_exiting_lock.release()
return retval
exit_app = None # reference to a function called to exit the app nicely
really_exit_app = None
_theme_dir = None
def load_from_theme(resource):
global _theme_dir
if _theme_dir is None:
_theme_dir = config.get('cornice', 'theme', '')
d = os.path.join(confdir, _theme_dir)
if not os.path.isdir(d):
d = os.path.join(os.getcwd(), 'icons', _theme_dir)
if not os.path.isdir(d) or \
not os.path.isfile(os.path.join(d, 'toolbars.xrc')):
d = os.path.join(os.getcwd(), 'icons')
old = os.getcwd()
#resource = os.path.abspath(resource)
os.chdir(d)
res = wx.xrc.XmlResource_Get()
res.Load(resource)
os.chdir(old)
def get_theme_icon():
global _theme_dir
if _theme_dir is None:
_theme_dir = config.get('cornice', 'theme', '')
if wx.Platform == '__WXMSW__':
name = 'icon.ico'
else:
name = 'icon.png'
d = os.path.join(confdir, _theme_dir)
if not os.path.isdir(d):
d = os.path.join(os.getcwd(), 'icons', _theme_dir)
if not os.path.isdir(d) or \
not os.path.isfile(os.path.join(d, name)):
d = os.path.join(os.getcwd(), 'icons')
if wx.Platform == '__WXMSW__':
icon = wx.Icon(os.path.join(d, name), wx.BITMAP_TYPE_ICO)
else:
icon = wx.EmptyIcon()
bmp = wx.Bitmap(os.path.join(d, name), wx.BITMAP_TYPE_PNG)
icon.CopyFromBitmap(bmp)
return icon
def get_bitmap_for_theme(imagepath):
global _theme_dir
if _theme_dir is None:
_theme_dir = config.get('cornice', 'theme', '')
name, ext = os.path.splitext(imagepath)
if ext: extensions = [ext]
else: extensions = ['.png', '.xpm']
paths = [os.path.join(os.getcwd(), 'icons', _theme_dir),
os.path.join(os.getcwd(), 'icons')]
log_null = wx.LogNull()
for path in paths:
for ext in extensions:
imagepath = os.path.join(path, name + ext)
try:
bmp = wx.Bitmap(imagepath, wx.BITMAP_TYPE_ANY)
if bmp.Ok():
return bmp
except:
pass
return None
if wx.Platform != '__WXMSW__' or wx.VERSION[:2] >= (2, 5):
def delete_dc(dc):
pass
else:
def delete_dc(dc):
dc.Destroy()
def get_image_info(path):
import fileops, time
pi = fileops.get_path_info(path)
im = Image.open(fileops.open(path))
w, h = im.size
if im.mode == '1': sdepth = '1'
elif im.mode == 'P': sdepth = '256'
else: sdepth = '16M'
info = [
fileops.basename(path),
time.strftime('%Y/%m/%d %H:%M',
time.localtime(pi.mtime)),
pi.size,
'%sx%sx%s %s' % (w, h, sdepth, im.format)
]
return info
def create_thumbnail(pil_image, thumb_size):
"""\
Returns a bitmap with the thumbnail
"""
pil_image.thumbnail(thumb_size, Image.NEAREST)
mask = get_mask(pil_image)
img = wx.EmptyImage(*pil_image.size)
if has_alpha and pil_image.mode == 'RGBA':
alpha = pil_image.split()[3].tostring()
img.SetData(pil_image.convert('RGB').tostring())
img.SetAlphaData(alpha)
elif pil_image.mode != 'RGB':
pil_image = pil_image.convert('RGB')
img.SetData(pil_image.tostring())
else:
img.SetData(pil_image.tostring())
bmp = wx.BitmapFromImage(img)
if mask is not None:
bmp.SetMask(mask)
return bmp
import locale
def wxstr(s):
if not isinstance(s, basestring):
return str(s)
return s.decode(locale.getpreferredencoding(), 'ignore')
| labordus/cornice | common.py | Python | gpl-2.0 | 8,653 | 0.008321 |
#!/usr/bin/env python
###############################################################################
# $Id: hfa_read.py 32166 2015-12-13 19:29:52Z goatbar $
#
# Project: GDAL/OGR Test Suite
# Purpose: Test basic read support for all datatypes from a HFA file.
# Author: Frank Warmerdam <[email protected]>
#
###############################################################################
# Copyright (c) 2003, Frank Warmerdam <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import sys
sys.path.append( '../pymod' )
import gdaltest
###############################################################################
# When imported build a list of units based on the files available.
gdaltest_list = []
init_list = [
('byte.img', 1, 4672, None),
('int16.img', 1, 4672, None),
('uint16.img', 1, 4672, None),
('int32.img', 1, 4672, None),
('uint32.img', 1, 4672, None),
('float32.img', 1, 4672, None),
('float64.img', 1, 4672, None),
('utmsmall.img', 1, 50054, None),
('2bit_compressed.img', 1, 11918, None)]
for item in init_list:
ut = gdaltest.GDALTest( 'HFA', item[0], item[1], item[2] )
if ut is None:
print( 'HFA tests skipped' )
sys.exit()
gdaltest_list.append( (ut.testOpen, item[0]) )
if __name__ == '__main__':
gdaltest.setup_run( 'hfa_read' )
gdaltest.run_tests( gdaltest_list )
gdaltest.summarize()
| nextgis-extra/tests | lib_gdal/gcore/hfa_read.py | Python | gpl-2.0 | 2,508 | 0.005183 |
#!/usr/bin/python2
'''
This script parses JPEG images of text documents to isolate and save images
of individual characters. The size of these output images in pixels is
specified by the parameters desired_height and desired_width.
The JPEG images are converted to grey scale using a parameter called
luminance_threshold to distinguish between light and dark pixels. Lines of
text are found by searching for rows that contain dark pixels, and
characters are found by searching for columns that contain dark pixels. Once
a character is found it is padded with blank rows and columns to obtain the
desired size. The images are saved using the filenames given in the XML file.
'''
# Set desired output image height and width in pixels
desired_height = 32
desired_width = 32
DEBUG = False
import matplotlib.pyplot as plot
import numpy as np
import operator
import sys
import re
import os
from PIL import Image
from xml.dom import minidom
jpg_list = [ 'characters-0.jpg', 'characters-1.jpg', 'characters-2.jpg',
'characters-3.jpg', 'characters-4.jpg', 'characters-5.jpg',
'characters-6.jpg', 'characters-7.jpg', 'characters-8.jpg',
'characters-9.jpg', 'characters-10.jpg', 'characters-11.jpg',
'characters-12.jpg', 'characters-13.jpg', 'characters-14.jpg',
'characters-15.jpg', 'characters-16.jpg', 'characters-17.jpg',
'characters-18.jpg', 'characters-19.jpg' ]
#jpg_list = [ 'debug_doc.jpg' ]
# Parse XML file for filenames to use when saving each character image
xmldoc = minidom.parse('characters.xml')
#xmldoc = minidom.parse('debug_doc.xml')
filelist = xmldoc.getElementsByTagName('image')
print len(filelist)
#for i in range(145):
#print filelist[62*i].attributes['file'].value
# this counter gets used to select file names from an xml file
output_files_saved = 0
for jpg in jpg_list:
print jpg
im = Image.open(jpg)
width, length = im.size
if DEBUG:
print "image size: ", im.size
print "image mode: ", im.mode
print im.size[1],im.size[0]
# read pixel data from image into a numpy array
if im.mode == 'L':
pixels = np.array(list(im.getdata())).reshape(im.size[1],im.size[0])
elif im.mode == 'RGB':
pixels = np.array(list(im.convert('L').getdata())).reshape(im.size[1],
im.size[0])
#im.show()
##############################################################################
# Removed all logic for determining the value to use to distinguish between
# light and dark pixels because this is a non-trivial challenge of its own and
# I want to get to generating a data set for OCR which I can do much faster by
# choosing the threshold manually.
##############################################################################
luminance_threshold = 100
##############################################################################
# parse document for lines of text
##############################################################################
row = 0
while row < length:
# Find the first row of pixels in next line of text by ignoring blank rows
# of pixels which will have a non-zero product since white pixels have a
# luminance value of 255
#row_data = pixels[row * width : row * width + width]
while (row < length and pixels[row,:].min() > luminance_threshold):
row += 1
first_row = row
if DEBUG:
print "the first row of pixels in the line of text is ", first_row
# Find the last row of pixels in this line of text by counting rows with
# dark pixels. These rows have a product of zero since the luminance value
# of all dark pixels was set to zero
while (row < length and pixels[row:row + 2,:].min() < luminance_threshold):
row += 1
last_row = row
#if row < length:
#last_row = row + 2 # this is a hack for Cochin font Q
#row += 5 # this is a hack for Cochin font Q
if DEBUG:
print "the last row of pixels in the line of text is ", last_row
##############################################################################
# parse line of text for characters
##############################################################################
if first_row < last_row:
col = 0
while col < width:
# find first column of pixels in the next character by ignoring blank
# cols of pixels
while col < width and pixels[first_row:last_row,col].min() > luminance_threshold:
col += 1
first_col = col
# find last column of pixels in the next character by counting columns
# with dark pixels
while col < width and \
pixels[first_row:last_row,col:col + 5].min() < luminance_threshold:
col += 1
last_col = col
##############################################################################
# remove blank rows from the top and bottom of characters
##############################################################################
if first_col < last_col:
# remove blank rows from the top of the character
r = first_row;
while pixels[r,first_col:last_col].min() > luminance_threshold:
r = r + 1;
char_first_row = r;
# remove blank rows from the bottom of the character
r = last_row;
while pixels[r,first_col:last_col].min() > luminance_threshold:
r = r - 1;
char_last_row = r + 1;
if DEBUG:
# isolate an image of this character
character = im.crop([first_col, char_first_row, last_col,
char_last_row])
print "Character size after whitespace removal", character.size
print first_col, first_row, last_col, last_row
#character.show()
# pad character width out to desired_width
char_width = last_col - first_col
if char_width > desired_width:
print "Character is wider than ", desired_width
else:
# add the same number of blank columns to the left and right
first_col = first_col - (desired_width - char_width) / 2
last_col = last_col + (desired_width - char_width) / 2
# if the difference was odd we'll be short one column
char_width = last_col - first_col
if char_width < desired_width:
last_col = last_col + 1
# pad character height out to desired_height
char_height = char_last_row - char_first_row
if char_height > desired_height:
print "Character is taller than ", desired_height
else:
# add the same number of blank rows to the left and right
char_first_row = char_first_row - (desired_height - char_height) / 2
char_last_row = char_last_row + (desired_height - char_height) / 2
# if the difference was odd we'll be short one row
char_height = char_last_row - char_first_row
if char_height < desired_height:
char_last_row = char_last_row + 1
character = im.crop([first_col, char_first_row, last_col,
char_last_row])
if DEBUG:
print "Character size after padding", character.size
print first_col, char_first_row, last_col, char_last_row
#character.show()
#garbage = raw_input()
# save image to filename specified in ground truth file
filename = filelist[output_files_saved].attributes['file'].value
directory = filename.split('/')[0]
if not os.path.exists(directory):
os.makedirs(directory)
character.save(filename, "JPEG", quality=80)
output_files_saved = output_files_saved + 1
print output_files_saved
| numenta/nupic.vision | src/nupic/vision/data/OCR/characters/parseJPG.py | Python | agpl-3.0 | 7,772 | 0.019429 |
'''This example demonstrates the use of Convolution1D for text classification.
Gets to 0.89 test accuracy after 2 epochs.
90s/epoch on Intel i5 2.4Ghz CPU.
10s/epoch on Tesla K40 GPU.
'''
from __future__ import print_function
from tensorflow.contrib.keras.python.keras.preprocessing import sequence
from tensorflow.contrib.keras.python.keras.models import Model
from tensorflow.contrib.keras.python.keras.layers import Dense, Dropout, Activation
from tensorflow.contrib.keras.python.keras.layers import Input, Embedding
from tensorflow.contrib.keras.python.keras.layers import Conv1D, GlobalMaxPooling1D
from tensorflow.contrib.keras.python.keras.datasets import imdb
# set hype parameters:
max_features = 5000
maxlen = 400
batch_size = 32
embedding_dims = 50
filters = 250
kernel_size = 3
hidden_dims = 250
epochs = 5
def load_data():
'''
'''
print('Loading data...')
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
print(len(x_train), 'train sequences')
print(len(x_test), 'test sequences')
print('Pad sequences (samples x time)')
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)
return [x_train, y_train, x_test, y_test]
def cnn_model_fn():
''' '''
print('Build model...')
inputs = Input(shape=(maxlen,), dtype='int32') # a index sequence with lenght = maxlen
x = Embedding( max_features,
embedding_dims,
input_length=maxlen)(inputs)
x = Dropout(0.2)(x)
x = Conv1D(filters,
kernel_size,
padding='valid',
activation='relu',
strides=1)(x)
x = GlobalMaxPooling1D()(x)
x = Dense(hidden_dims)(x)
x = Dropout(0.2)(x)
x = Activation('relu')(x)
x = Dense(1)(x)
x = Activation('sigmoid')(x)
model = Model(inputs=inputs, outputs=x)
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return model
def train(data, model):
''' '''
x_train, y_train, x_test, y_test = data[0], data[1], data[2], data[3]
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test))
if __name__ == '__main__':
''' '''
data = load_data()
model = cnn_model_fn()
train(data, model)
| trhongbinwang/data_science_journey | deep_learning/keras/examples/imdb_cnn.py | Python | apache-2.0 | 2,519 | 0.004367 |
def highfive(ctx, serv, nick, dest, msg):
if msg=="\o":
serv.say(dest, "o/")
elif msg=="o/":
serv.say(dest, "\o")
elif msg=="o'":
serv.say(dest, "'o")
elif msg=="'o":
serv.say(dest, "o'")
serv.on_msg.connect(highfive)
| flipcoder/litebot | plugins/highfive.py | Python | mit | 268 | 0.029851 |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.net.proto import ProtocolBuffer
import array
import _dummy_thread as thread
__pychecker__ = """maxreturns=0 maxbranches=0 no-callinit
unusednames=printElemNumber,debug_strs no-special"""
if hasattr(ProtocolBuffer, 'ExtendableProtocolMessage'):
_extension_runtime = True
_ExtendableProtocolMessage = ProtocolBuffer.ExtendableProtocolMessage
else:
_extension_runtime = False
_ExtendableProtocolMessage = ProtocolBuffer.ProtocolMessage
class FieldValue_Geo(ProtocolBuffer.ProtocolMessage):
has_lat_ = 0
lat_ = 0.0
has_lng_ = 0
lng_ = 0.0
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def lat(self): return self.lat_
def set_lat(self, x):
self.has_lat_ = 1
self.lat_ = x
def clear_lat(self):
if self.has_lat_:
self.has_lat_ = 0
self.lat_ = 0.0
def has_lat(self): return self.has_lat_
def lng(self): return self.lng_
def set_lng(self, x):
self.has_lng_ = 1
self.lng_ = x
def clear_lng(self):
if self.has_lng_:
self.has_lng_ = 0
self.lng_ = 0.0
def has_lng(self): return self.has_lng_
def MergeFrom(self, x):
assert x is not self
if (x.has_lat()): self.set_lat(x.lat())
if (x.has_lng()): self.set_lng(x.lng())
def Equals(self, x):
if x is self: return 1
if self.has_lat_ != x.has_lat_: return 0
if self.has_lat_ and self.lat_ != x.lat_: return 0
if self.has_lng_ != x.has_lng_: return 0
if self.has_lng_ and self.lng_ != x.lng_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_lat_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: lat not set.')
if (not self.has_lng_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: lng not set.')
return initialized
def ByteSize(self):
n = 0
return n + 18
def ByteSizePartial(self):
n = 0
if (self.has_lat_):
n += 9
if (self.has_lng_):
n += 9
return n
def Clear(self):
self.clear_lat()
self.clear_lng()
def OutputUnchecked(self, out):
out.putVarInt32(41)
out.putDouble(self.lat_)
out.putVarInt32(49)
out.putDouble(self.lng_)
def OutputPartial(self, out):
if (self.has_lat_):
out.putVarInt32(41)
out.putDouble(self.lat_)
if (self.has_lng_):
out.putVarInt32(49)
out.putDouble(self.lng_)
def TryMerge(self, d):
while 1:
tt = d.getVarInt32()
if tt == 36: break
if tt == 41:
self.set_lat(d.getDouble())
continue
if tt == 49:
self.set_lng(d.getDouble())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_lat_: res+=prefix+("lat: %s\n" % self.DebugFormat(self.lat_))
if self.has_lng_: res+=prefix+("lng: %s\n" % self.DebugFormat(self.lng_))
return res
class FieldValue(ProtocolBuffer.ProtocolMessage):
TEXT = 0
HTML = 1
ATOM = 2
DATE = 3
NUMBER = 4
GEO = 5
UNTOKENIZED_PREFIX = 6
TOKENIZED_PREFIX = 7
_ContentType_NAMES = {
0: "TEXT",
1: "HTML",
2: "ATOM",
3: "DATE",
4: "NUMBER",
5: "GEO",
6: "UNTOKENIZED_PREFIX",
7: "TOKENIZED_PREFIX",
}
def ContentType_Name(cls, x): return cls._ContentType_NAMES.get(x, "")
ContentType_Name = classmethod(ContentType_Name)
has_type_ = 0
type_ = 0
has_language_ = 0
language_ = "en"
has_string_value_ = 0
string_value_ = ""
has_geo_ = 0
geo_ = None
def __init__(self, contents=None):
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def type(self): return self.type_
def set_type(self, x):
self.has_type_ = 1
self.type_ = x
def clear_type(self):
if self.has_type_:
self.has_type_ = 0
self.type_ = 0
def has_type(self): return self.has_type_
def language(self): return self.language_
def set_language(self, x):
self.has_language_ = 1
self.language_ = x
def clear_language(self):
if self.has_language_:
self.has_language_ = 0
self.language_ = "en"
def has_language(self): return self.has_language_
def string_value(self): return self.string_value_
def set_string_value(self, x):
self.has_string_value_ = 1
self.string_value_ = x
def clear_string_value(self):
if self.has_string_value_:
self.has_string_value_ = 0
self.string_value_ = ""
def has_string_value(self): return self.has_string_value_
def geo(self):
if self.geo_ is None:
self.lazy_init_lock_.acquire()
try:
if self.geo_ is None: self.geo_ = FieldValue_Geo()
finally:
self.lazy_init_lock_.release()
return self.geo_
def mutable_geo(self): self.has_geo_ = 1; return self.geo()
def clear_geo(self):
if self.has_geo_:
self.has_geo_ = 0;
if self.geo_ is not None: self.geo_.Clear()
def has_geo(self): return self.has_geo_
def MergeFrom(self, x):
assert x is not self
if (x.has_type()): self.set_type(x.type())
if (x.has_language()): self.set_language(x.language())
if (x.has_string_value()): self.set_string_value(x.string_value())
if (x.has_geo()): self.mutable_geo().MergeFrom(x.geo())
def Equals(self, x):
if x is self: return 1
if self.has_type_ != x.has_type_: return 0
if self.has_type_ and self.type_ != x.type_: return 0
if self.has_language_ != x.has_language_: return 0
if self.has_language_ and self.language_ != x.language_: return 0
if self.has_string_value_ != x.has_string_value_: return 0
if self.has_string_value_ and self.string_value_ != x.string_value_: return 0
if self.has_geo_ != x.has_geo_: return 0
if self.has_geo_ and self.geo_ != x.geo_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (self.has_geo_ and not self.geo_.IsInitialized(debug_strs)): initialized = 0
return initialized
def ByteSize(self):
n = 0
if (self.has_type_): n += 1 + self.lengthVarInt64(self.type_)
if (self.has_language_): n += 1 + self.lengthString(len(self.language_))
if (self.has_string_value_): n += 1 + self.lengthString(len(self.string_value_))
if (self.has_geo_): n += 2 + self.geo_.ByteSize()
return n
def ByteSizePartial(self):
n = 0
if (self.has_type_): n += 1 + self.lengthVarInt64(self.type_)
if (self.has_language_): n += 1 + self.lengthString(len(self.language_))
if (self.has_string_value_): n += 1 + self.lengthString(len(self.string_value_))
if (self.has_geo_): n += 2 + self.geo_.ByteSizePartial()
return n
def Clear(self):
self.clear_type()
self.clear_language()
self.clear_string_value()
self.clear_geo()
def OutputUnchecked(self, out):
if (self.has_type_):
out.putVarInt32(8)
out.putVarInt32(self.type_)
if (self.has_language_):
out.putVarInt32(18)
out.putPrefixedString(self.language_)
if (self.has_string_value_):
out.putVarInt32(26)
out.putPrefixedString(self.string_value_)
if (self.has_geo_):
out.putVarInt32(35)
self.geo_.OutputUnchecked(out)
out.putVarInt32(36)
def OutputPartial(self, out):
if (self.has_type_):
out.putVarInt32(8)
out.putVarInt32(self.type_)
if (self.has_language_):
out.putVarInt32(18)
out.putPrefixedString(self.language_)
if (self.has_string_value_):
out.putVarInt32(26)
out.putPrefixedString(self.string_value_)
if (self.has_geo_):
out.putVarInt32(35)
self.geo_.OutputPartial(out)
out.putVarInt32(36)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 8:
self.set_type(d.getVarInt32())
continue
if tt == 18:
self.set_language(d.getPrefixedString())
continue
if tt == 26:
self.set_string_value(d.getPrefixedString())
continue
if tt == 35:
self.mutable_geo().TryMerge(d)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_type_: res+=prefix+("type: %s\n" % self.DebugFormatInt32(self.type_))
if self.has_language_: res+=prefix+("language: %s\n" % self.DebugFormatString(self.language_))
if self.has_string_value_: res+=prefix+("string_value: %s\n" % self.DebugFormatString(self.string_value_))
if self.has_geo_:
res+=prefix+"Geo {\n"
res+=self.geo_.__str__(prefix + " ", printElemNumber)
res+=prefix+"}\n"
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in range(0, 1+maxtag)])
ktype = 1
klanguage = 2
kstring_value = 3
kGeoGroup = 4
kGeolat = 5
kGeolng = 6
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "type",
2: "language",
3: "string_value",
4: "Geo",
5: "lat",
6: "lng",
}, 6)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.NUMERIC,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STRING,
4: ProtocolBuffer.Encoder.STARTGROUP,
5: ProtocolBuffer.Encoder.DOUBLE,
6: ProtocolBuffer.Encoder.DOUBLE,
}, 6, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'storage_onestore_v3.FieldValue'
class Field(ProtocolBuffer.ProtocolMessage):
has_name_ = 0
name_ = ""
has_value_ = 0
def __init__(self, contents=None):
self.value_ = FieldValue()
if contents is not None: self.MergeFromString(contents)
def name(self): return self.name_
def set_name(self, x):
self.has_name_ = 1
self.name_ = x
def clear_name(self):
if self.has_name_:
self.has_name_ = 0
self.name_ = ""
def has_name(self): return self.has_name_
def value(self): return self.value_
def mutable_value(self): self.has_value_ = 1; return self.value_
def clear_value(self):self.has_value_ = 0; self.value_.Clear()
def has_value(self): return self.has_value_
def MergeFrom(self, x):
assert x is not self
if (x.has_name()): self.set_name(x.name())
if (x.has_value()): self.mutable_value().MergeFrom(x.value())
def Equals(self, x):
if x is self: return 1
if self.has_name_ != x.has_name_: return 0
if self.has_name_ and self.name_ != x.name_: return 0
if self.has_value_ != x.has_value_: return 0
if self.has_value_ and self.value_ != x.value_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_name_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: name not set.')
if (not self.has_value_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: value not set.')
elif not self.value_.IsInitialized(debug_strs): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.name_))
n += self.lengthString(self.value_.ByteSize())
return n + 2
def ByteSizePartial(self):
n = 0
if (self.has_name_):
n += 1
n += self.lengthString(len(self.name_))
if (self.has_value_):
n += 1
n += self.lengthString(self.value_.ByteSizePartial())
return n
def Clear(self):
self.clear_name()
self.clear_value()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.name_)
out.putVarInt32(18)
out.putVarInt32(self.value_.ByteSize())
self.value_.OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_name_):
out.putVarInt32(10)
out.putPrefixedString(self.name_)
if (self.has_value_):
out.putVarInt32(18)
out.putVarInt32(self.value_.ByteSizePartial())
self.value_.OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_name(d.getPrefixedString())
continue
if tt == 18:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_value().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_name_: res+=prefix+("name: %s\n" % self.DebugFormatString(self.name_))
if self.has_value_:
res+=prefix+"value <\n"
res+=self.value_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in range(0, 1+maxtag)])
kname = 1
kvalue = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "name",
2: "value",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'storage_onestore_v3.Field'
class FieldTypes(ProtocolBuffer.ProtocolMessage):
has_name_ = 0
name_ = ""
def __init__(self, contents=None):
self.type_ = []
if contents is not None: self.MergeFromString(contents)
def name(self): return self.name_
def set_name(self, x):
self.has_name_ = 1
self.name_ = x
def clear_name(self):
if self.has_name_:
self.has_name_ = 0
self.name_ = ""
def has_name(self): return self.has_name_
def type_size(self): return len(self.type_)
def type_list(self): return self.type_
def type(self, i):
return self.type_[i]
def set_type(self, i, x):
self.type_[i] = x
def add_type(self, x):
self.type_.append(x)
def clear_type(self):
self.type_ = []
def MergeFrom(self, x):
assert x is not self
if (x.has_name()): self.set_name(x.name())
for i in range(x.type_size()): self.add_type(x.type(i))
def Equals(self, x):
if x is self: return 1
if self.has_name_ != x.has_name_: return 0
if self.has_name_ and self.name_ != x.name_: return 0
if len(self.type_) != len(x.type_): return 0
for e1, e2 in zip(self.type_, x.type_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_name_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: name not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.name_))
n += 1 * len(self.type_)
for i in range(len(self.type_)): n += self.lengthVarInt64(self.type_[i])
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_name_):
n += 1
n += self.lengthString(len(self.name_))
n += 1 * len(self.type_)
for i in range(len(self.type_)): n += self.lengthVarInt64(self.type_[i])
return n
def Clear(self):
self.clear_name()
self.clear_type()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.name_)
for i in range(len(self.type_)):
out.putVarInt32(16)
out.putVarInt32(self.type_[i])
def OutputPartial(self, out):
if (self.has_name_):
out.putVarInt32(10)
out.putPrefixedString(self.name_)
for i in range(len(self.type_)):
out.putVarInt32(16)
out.putVarInt32(self.type_[i])
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_name(d.getPrefixedString())
continue
if tt == 16:
self.add_type(d.getVarInt32())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_name_: res+=prefix+("name: %s\n" % self.DebugFormatString(self.name_))
cnt=0
for e in self.type_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("type%s: %s\n" % (elm, self.DebugFormatInt32(e)))
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in range(0, 1+maxtag)])
kname = 1
ktype = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "name",
2: "type",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.NUMERIC,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'storage_onestore_v3.FieldTypes'
class IndexShardSettings(ProtocolBuffer.ProtocolMessage):
has_num_shards_ = 0
num_shards_ = 1
has_local_replica_ = 0
local_replica_ = ""
def __init__(self, contents=None):
self.prev_num_shards_ = []
self.prev_num_shards_search_false_ = []
if contents is not None: self.MergeFromString(contents)
def prev_num_shards_size(self): return len(self.prev_num_shards_)
def prev_num_shards_list(self): return self.prev_num_shards_
def prev_num_shards(self, i):
return self.prev_num_shards_[i]
def set_prev_num_shards(self, i, x):
self.prev_num_shards_[i] = x
def add_prev_num_shards(self, x):
self.prev_num_shards_.append(x)
def clear_prev_num_shards(self):
self.prev_num_shards_ = []
def num_shards(self): return self.num_shards_
def set_num_shards(self, x):
self.has_num_shards_ = 1
self.num_shards_ = x
def clear_num_shards(self):
if self.has_num_shards_:
self.has_num_shards_ = 0
self.num_shards_ = 1
def has_num_shards(self): return self.has_num_shards_
def prev_num_shards_search_false_size(self): return len(self.prev_num_shards_search_false_)
def prev_num_shards_search_false_list(self): return self.prev_num_shards_search_false_
def prev_num_shards_search_false(self, i):
return self.prev_num_shards_search_false_[i]
def set_prev_num_shards_search_false(self, i, x):
self.prev_num_shards_search_false_[i] = x
def add_prev_num_shards_search_false(self, x):
self.prev_num_shards_search_false_.append(x)
def clear_prev_num_shards_search_false(self):
self.prev_num_shards_search_false_ = []
def local_replica(self): return self.local_replica_
def set_local_replica(self, x):
self.has_local_replica_ = 1
self.local_replica_ = x
def clear_local_replica(self):
if self.has_local_replica_:
self.has_local_replica_ = 0
self.local_replica_ = ""
def has_local_replica(self): return self.has_local_replica_
def MergeFrom(self, x):
assert x is not self
for i in range(x.prev_num_shards_size()): self.add_prev_num_shards(x.prev_num_shards(i))
if (x.has_num_shards()): self.set_num_shards(x.num_shards())
for i in range(x.prev_num_shards_search_false_size()): self.add_prev_num_shards_search_false(x.prev_num_shards_search_false(i))
if (x.has_local_replica()): self.set_local_replica(x.local_replica())
def Equals(self, x):
if x is self: return 1
if len(self.prev_num_shards_) != len(x.prev_num_shards_): return 0
for e1, e2 in zip(self.prev_num_shards_, x.prev_num_shards_):
if e1 != e2: return 0
if self.has_num_shards_ != x.has_num_shards_: return 0
if self.has_num_shards_ and self.num_shards_ != x.num_shards_: return 0
if len(self.prev_num_shards_search_false_) != len(x.prev_num_shards_search_false_): return 0
for e1, e2 in zip(self.prev_num_shards_search_false_, x.prev_num_shards_search_false_):
if e1 != e2: return 0
if self.has_local_replica_ != x.has_local_replica_: return 0
if self.has_local_replica_ and self.local_replica_ != x.local_replica_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_num_shards_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: num_shards not set.')
return initialized
def ByteSize(self):
n = 0
n += 1 * len(self.prev_num_shards_)
for i in range(len(self.prev_num_shards_)): n += self.lengthVarInt64(self.prev_num_shards_[i])
n += self.lengthVarInt64(self.num_shards_)
n += 1 * len(self.prev_num_shards_search_false_)
for i in range(len(self.prev_num_shards_search_false_)): n += self.lengthVarInt64(self.prev_num_shards_search_false_[i])
if (self.has_local_replica_): n += 1 + self.lengthString(len(self.local_replica_))
return n + 1
def ByteSizePartial(self):
n = 0
n += 1 * len(self.prev_num_shards_)
for i in range(len(self.prev_num_shards_)): n += self.lengthVarInt64(self.prev_num_shards_[i])
if (self.has_num_shards_):
n += 1
n += self.lengthVarInt64(self.num_shards_)
n += 1 * len(self.prev_num_shards_search_false_)
for i in range(len(self.prev_num_shards_search_false_)): n += self.lengthVarInt64(self.prev_num_shards_search_false_[i])
if (self.has_local_replica_): n += 1 + self.lengthString(len(self.local_replica_))
return n
def Clear(self):
self.clear_prev_num_shards()
self.clear_num_shards()
self.clear_prev_num_shards_search_false()
self.clear_local_replica()
def OutputUnchecked(self, out):
for i in range(len(self.prev_num_shards_)):
out.putVarInt32(8)
out.putVarInt32(self.prev_num_shards_[i])
out.putVarInt32(16)
out.putVarInt32(self.num_shards_)
for i in range(len(self.prev_num_shards_search_false_)):
out.putVarInt32(24)
out.putVarInt32(self.prev_num_shards_search_false_[i])
if (self.has_local_replica_):
out.putVarInt32(34)
out.putPrefixedString(self.local_replica_)
def OutputPartial(self, out):
for i in range(len(self.prev_num_shards_)):
out.putVarInt32(8)
out.putVarInt32(self.prev_num_shards_[i])
if (self.has_num_shards_):
out.putVarInt32(16)
out.putVarInt32(self.num_shards_)
for i in range(len(self.prev_num_shards_search_false_)):
out.putVarInt32(24)
out.putVarInt32(self.prev_num_shards_search_false_[i])
if (self.has_local_replica_):
out.putVarInt32(34)
out.putPrefixedString(self.local_replica_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 8:
self.add_prev_num_shards(d.getVarInt32())
continue
if tt == 16:
self.set_num_shards(d.getVarInt32())
continue
if tt == 24:
self.add_prev_num_shards_search_false(d.getVarInt32())
continue
if tt == 34:
self.set_local_replica(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
cnt=0
for e in self.prev_num_shards_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("prev_num_shards%s: %s\n" % (elm, self.DebugFormatInt32(e)))
cnt+=1
if self.has_num_shards_: res+=prefix+("num_shards: %s\n" % self.DebugFormatInt32(self.num_shards_))
cnt=0
for e in self.prev_num_shards_search_false_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("prev_num_shards_search_false%s: %s\n" % (elm, self.DebugFormatInt32(e)))
cnt+=1
if self.has_local_replica_: res+=prefix+("local_replica: %s\n" % self.DebugFormatString(self.local_replica_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in range(0, 1+maxtag)])
kprev_num_shards = 1
knum_shards = 2
kprev_num_shards_search_false = 3
klocal_replica = 4
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "prev_num_shards",
2: "num_shards",
3: "prev_num_shards_search_false",
4: "local_replica",
}, 4)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.NUMERIC,
2: ProtocolBuffer.Encoder.NUMERIC,
3: ProtocolBuffer.Encoder.NUMERIC,
4: ProtocolBuffer.Encoder.STRING,
}, 4, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'storage_onestore_v3.IndexShardSettings'
class IndexMetadata(ProtocolBuffer.ProtocolMessage):
ACTIVE = 0
SOFT_DELETED = 1
PURGING = 2
_IndexState_NAMES = {
0: "ACTIVE",
1: "SOFT_DELETED",
2: "PURGING",
}
def IndexState_Name(cls, x): return cls._IndexState_NAMES.get(x, "")
IndexState_Name = classmethod(IndexState_Name)
has_is_over_field_number_threshold_ = 0
is_over_field_number_threshold_ = 0
has_index_shard_settings_ = 0
index_shard_settings_ = None
has_index_state_ = 0
index_state_ = 0
has_index_delete_time_ = 0
index_delete_time_ = 0
has_max_index_size_bytes_ = 0
max_index_size_bytes_ = 0
def __init__(self, contents=None):
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def is_over_field_number_threshold(self): return self.is_over_field_number_threshold_
def set_is_over_field_number_threshold(self, x):
self.has_is_over_field_number_threshold_ = 1
self.is_over_field_number_threshold_ = x
def clear_is_over_field_number_threshold(self):
if self.has_is_over_field_number_threshold_:
self.has_is_over_field_number_threshold_ = 0
self.is_over_field_number_threshold_ = 0
def has_is_over_field_number_threshold(self): return self.has_is_over_field_number_threshold_
def index_shard_settings(self):
if self.index_shard_settings_ is None:
self.lazy_init_lock_.acquire()
try:
if self.index_shard_settings_ is None: self.index_shard_settings_ = IndexShardSettings()
finally:
self.lazy_init_lock_.release()
return self.index_shard_settings_
def mutable_index_shard_settings(self): self.has_index_shard_settings_ = 1; return self.index_shard_settings()
def clear_index_shard_settings(self):
if self.has_index_shard_settings_:
self.has_index_shard_settings_ = 0;
if self.index_shard_settings_ is not None: self.index_shard_settings_.Clear()
def has_index_shard_settings(self): return self.has_index_shard_settings_
def index_state(self): return self.index_state_
def set_index_state(self, x):
self.has_index_state_ = 1
self.index_state_ = x
def clear_index_state(self):
if self.has_index_state_:
self.has_index_state_ = 0
self.index_state_ = 0
def has_index_state(self): return self.has_index_state_
def index_delete_time(self): return self.index_delete_time_
def set_index_delete_time(self, x):
self.has_index_delete_time_ = 1
self.index_delete_time_ = x
def clear_index_delete_time(self):
if self.has_index_delete_time_:
self.has_index_delete_time_ = 0
self.index_delete_time_ = 0
def has_index_delete_time(self): return self.has_index_delete_time_
def max_index_size_bytes(self): return self.max_index_size_bytes_
def set_max_index_size_bytes(self, x):
self.has_max_index_size_bytes_ = 1
self.max_index_size_bytes_ = x
def clear_max_index_size_bytes(self):
if self.has_max_index_size_bytes_:
self.has_max_index_size_bytes_ = 0
self.max_index_size_bytes_ = 0
def has_max_index_size_bytes(self): return self.has_max_index_size_bytes_
def MergeFrom(self, x):
assert x is not self
if (x.has_is_over_field_number_threshold()): self.set_is_over_field_number_threshold(x.is_over_field_number_threshold())
if (x.has_index_shard_settings()): self.mutable_index_shard_settings().MergeFrom(x.index_shard_settings())
if (x.has_index_state()): self.set_index_state(x.index_state())
if (x.has_index_delete_time()): self.set_index_delete_time(x.index_delete_time())
if (x.has_max_index_size_bytes()): self.set_max_index_size_bytes(x.max_index_size_bytes())
def Equals(self, x):
if x is self: return 1
if self.has_is_over_field_number_threshold_ != x.has_is_over_field_number_threshold_: return 0
if self.has_is_over_field_number_threshold_ and self.is_over_field_number_threshold_ != x.is_over_field_number_threshold_: return 0
if self.has_index_shard_settings_ != x.has_index_shard_settings_: return 0
if self.has_index_shard_settings_ and self.index_shard_settings_ != x.index_shard_settings_: return 0
if self.has_index_state_ != x.has_index_state_: return 0
if self.has_index_state_ and self.index_state_ != x.index_state_: return 0
if self.has_index_delete_time_ != x.has_index_delete_time_: return 0
if self.has_index_delete_time_ and self.index_delete_time_ != x.index_delete_time_: return 0
if self.has_max_index_size_bytes_ != x.has_max_index_size_bytes_: return 0
if self.has_max_index_size_bytes_ and self.max_index_size_bytes_ != x.max_index_size_bytes_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (self.has_index_shard_settings_ and not self.index_shard_settings_.IsInitialized(debug_strs)): initialized = 0
return initialized
def ByteSize(self):
n = 0
if (self.has_is_over_field_number_threshold_): n += 2
if (self.has_index_shard_settings_): n += 1 + self.lengthString(self.index_shard_settings_.ByteSize())
if (self.has_index_state_): n += 1 + self.lengthVarInt64(self.index_state_)
if (self.has_index_delete_time_): n += 1 + self.lengthVarInt64(self.index_delete_time_)
if (self.has_max_index_size_bytes_): n += 1 + self.lengthVarInt64(self.max_index_size_bytes_)
return n
def ByteSizePartial(self):
n = 0
if (self.has_is_over_field_number_threshold_): n += 2
if (self.has_index_shard_settings_): n += 1 + self.lengthString(self.index_shard_settings_.ByteSizePartial())
if (self.has_index_state_): n += 1 + self.lengthVarInt64(self.index_state_)
if (self.has_index_delete_time_): n += 1 + self.lengthVarInt64(self.index_delete_time_)
if (self.has_max_index_size_bytes_): n += 1 + self.lengthVarInt64(self.max_index_size_bytes_)
return n
def Clear(self):
self.clear_is_over_field_number_threshold()
self.clear_index_shard_settings()
self.clear_index_state()
self.clear_index_delete_time()
self.clear_max_index_size_bytes()
def OutputUnchecked(self, out):
if (self.has_is_over_field_number_threshold_):
out.putVarInt32(8)
out.putBoolean(self.is_over_field_number_threshold_)
if (self.has_index_shard_settings_):
out.putVarInt32(18)
out.putVarInt32(self.index_shard_settings_.ByteSize())
self.index_shard_settings_.OutputUnchecked(out)
if (self.has_index_state_):
out.putVarInt32(24)
out.putVarInt32(self.index_state_)
if (self.has_index_delete_time_):
out.putVarInt32(32)
out.putVarInt64(self.index_delete_time_)
if (self.has_max_index_size_bytes_):
out.putVarInt32(40)
out.putVarInt64(self.max_index_size_bytes_)
def OutputPartial(self, out):
if (self.has_is_over_field_number_threshold_):
out.putVarInt32(8)
out.putBoolean(self.is_over_field_number_threshold_)
if (self.has_index_shard_settings_):
out.putVarInt32(18)
out.putVarInt32(self.index_shard_settings_.ByteSizePartial())
self.index_shard_settings_.OutputPartial(out)
if (self.has_index_state_):
out.putVarInt32(24)
out.putVarInt32(self.index_state_)
if (self.has_index_delete_time_):
out.putVarInt32(32)
out.putVarInt64(self.index_delete_time_)
if (self.has_max_index_size_bytes_):
out.putVarInt32(40)
out.putVarInt64(self.max_index_size_bytes_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 8:
self.set_is_over_field_number_threshold(d.getBoolean())
continue
if tt == 18:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_index_shard_settings().TryMerge(tmp)
continue
if tt == 24:
self.set_index_state(d.getVarInt32())
continue
if tt == 32:
self.set_index_delete_time(d.getVarInt64())
continue
if tt == 40:
self.set_max_index_size_bytes(d.getVarInt64())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_is_over_field_number_threshold_: res+=prefix+("is_over_field_number_threshold: %s\n" % self.DebugFormatBool(self.is_over_field_number_threshold_))
if self.has_index_shard_settings_:
res+=prefix+"index_shard_settings <\n"
res+=self.index_shard_settings_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_index_state_: res+=prefix+("index_state: %s\n" % self.DebugFormatInt32(self.index_state_))
if self.has_index_delete_time_: res+=prefix+("index_delete_time: %s\n" % self.DebugFormatInt64(self.index_delete_time_))
if self.has_max_index_size_bytes_: res+=prefix+("max_index_size_bytes: %s\n" % self.DebugFormatInt64(self.max_index_size_bytes_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in range(0, 1+maxtag)])
kis_over_field_number_threshold = 1
kindex_shard_settings = 2
kindex_state = 3
kindex_delete_time = 4
kmax_index_size_bytes = 5
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "is_over_field_number_threshold",
2: "index_shard_settings",
3: "index_state",
4: "index_delete_time",
5: "max_index_size_bytes",
}, 5)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.NUMERIC,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.NUMERIC,
4: ProtocolBuffer.Encoder.NUMERIC,
5: ProtocolBuffer.Encoder.NUMERIC,
}, 5, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'storage_onestore_v3.IndexMetadata'
class FacetValue(ProtocolBuffer.ProtocolMessage):
ATOM = 2
NUMBER = 4
_ContentType_NAMES = {
2: "ATOM",
4: "NUMBER",
}
def ContentType_Name(cls, x): return cls._ContentType_NAMES.get(x, "")
ContentType_Name = classmethod(ContentType_Name)
has_type_ = 0
type_ = 2
has_string_value_ = 0
string_value_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def type(self): return self.type_
def set_type(self, x):
self.has_type_ = 1
self.type_ = x
def clear_type(self):
if self.has_type_:
self.has_type_ = 0
self.type_ = 2
def has_type(self): return self.has_type_
def string_value(self): return self.string_value_
def set_string_value(self, x):
self.has_string_value_ = 1
self.string_value_ = x
def clear_string_value(self):
if self.has_string_value_:
self.has_string_value_ = 0
self.string_value_ = ""
def has_string_value(self): return self.has_string_value_
def MergeFrom(self, x):
assert x is not self
if (x.has_type()): self.set_type(x.type())
if (x.has_string_value()): self.set_string_value(x.string_value())
def Equals(self, x):
if x is self: return 1
if self.has_type_ != x.has_type_: return 0
if self.has_type_ and self.type_ != x.type_: return 0
if self.has_string_value_ != x.has_string_value_: return 0
if self.has_string_value_ and self.string_value_ != x.string_value_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
if (self.has_type_): n += 1 + self.lengthVarInt64(self.type_)
if (self.has_string_value_): n += 1 + self.lengthString(len(self.string_value_))
return n
def ByteSizePartial(self):
n = 0
if (self.has_type_): n += 1 + self.lengthVarInt64(self.type_)
if (self.has_string_value_): n += 1 + self.lengthString(len(self.string_value_))
return n
def Clear(self):
self.clear_type()
self.clear_string_value()
def OutputUnchecked(self, out):
if (self.has_type_):
out.putVarInt32(8)
out.putVarInt32(self.type_)
if (self.has_string_value_):
out.putVarInt32(26)
out.putPrefixedString(self.string_value_)
def OutputPartial(self, out):
if (self.has_type_):
out.putVarInt32(8)
out.putVarInt32(self.type_)
if (self.has_string_value_):
out.putVarInt32(26)
out.putPrefixedString(self.string_value_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 8:
self.set_type(d.getVarInt32())
continue
if tt == 26:
self.set_string_value(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_type_: res+=prefix+("type: %s\n" % self.DebugFormatInt32(self.type_))
if self.has_string_value_: res+=prefix+("string_value: %s\n" % self.DebugFormatString(self.string_value_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in range(0, 1+maxtag)])
ktype = 1
kstring_value = 3
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "type",
3: "string_value",
}, 3)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.NUMERIC,
3: ProtocolBuffer.Encoder.STRING,
}, 3, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'storage_onestore_v3.FacetValue'
class Facet(ProtocolBuffer.ProtocolMessage):
has_name_ = 0
name_ = ""
has_value_ = 0
def __init__(self, contents=None):
self.value_ = FacetValue()
if contents is not None: self.MergeFromString(contents)
def name(self): return self.name_
def set_name(self, x):
self.has_name_ = 1
self.name_ = x
def clear_name(self):
if self.has_name_:
self.has_name_ = 0
self.name_ = ""
def has_name(self): return self.has_name_
def value(self): return self.value_
def mutable_value(self): self.has_value_ = 1; return self.value_
def clear_value(self):self.has_value_ = 0; self.value_.Clear()
def has_value(self): return self.has_value_
def MergeFrom(self, x):
assert x is not self
if (x.has_name()): self.set_name(x.name())
if (x.has_value()): self.mutable_value().MergeFrom(x.value())
def Equals(self, x):
if x is self: return 1
if self.has_name_ != x.has_name_: return 0
if self.has_name_ and self.name_ != x.name_: return 0
if self.has_value_ != x.has_value_: return 0
if self.has_value_ and self.value_ != x.value_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_name_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: name not set.')
if (not self.has_value_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: value not set.')
elif not self.value_.IsInitialized(debug_strs): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.name_))
n += self.lengthString(self.value_.ByteSize())
return n + 2
def ByteSizePartial(self):
n = 0
if (self.has_name_):
n += 1
n += self.lengthString(len(self.name_))
if (self.has_value_):
n += 1
n += self.lengthString(self.value_.ByteSizePartial())
return n
def Clear(self):
self.clear_name()
self.clear_value()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.name_)
out.putVarInt32(18)
out.putVarInt32(self.value_.ByteSize())
self.value_.OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_name_):
out.putVarInt32(10)
out.putPrefixedString(self.name_)
if (self.has_value_):
out.putVarInt32(18)
out.putVarInt32(self.value_.ByteSizePartial())
self.value_.OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_name(d.getPrefixedString())
continue
if tt == 18:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_value().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_name_: res+=prefix+("name: %s\n" % self.DebugFormatString(self.name_))
if self.has_value_:
res+=prefix+"value <\n"
res+=self.value_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in range(0, 1+maxtag)])
kname = 1
kvalue = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "name",
2: "value",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'storage_onestore_v3.Facet'
class DocumentMetadata(ProtocolBuffer.ProtocolMessage):
has_version_ = 0
version_ = 0
has_committed_st_version_ = 0
committed_st_version_ = 0
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def version(self): return self.version_
def set_version(self, x):
self.has_version_ = 1
self.version_ = x
def clear_version(self):
if self.has_version_:
self.has_version_ = 0
self.version_ = 0
def has_version(self): return self.has_version_
def committed_st_version(self): return self.committed_st_version_
def set_committed_st_version(self, x):
self.has_committed_st_version_ = 1
self.committed_st_version_ = x
def clear_committed_st_version(self):
if self.has_committed_st_version_:
self.has_committed_st_version_ = 0
self.committed_st_version_ = 0
def has_committed_st_version(self): return self.has_committed_st_version_
def MergeFrom(self, x):
assert x is not self
if (x.has_version()): self.set_version(x.version())
if (x.has_committed_st_version()): self.set_committed_st_version(x.committed_st_version())
def Equals(self, x):
if x is self: return 1
if self.has_version_ != x.has_version_: return 0
if self.has_version_ and self.version_ != x.version_: return 0
if self.has_committed_st_version_ != x.has_committed_st_version_: return 0
if self.has_committed_st_version_ and self.committed_st_version_ != x.committed_st_version_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
if (self.has_version_): n += 1 + self.lengthVarInt64(self.version_)
if (self.has_committed_st_version_): n += 1 + self.lengthVarInt64(self.committed_st_version_)
return n
def ByteSizePartial(self):
n = 0
if (self.has_version_): n += 1 + self.lengthVarInt64(self.version_)
if (self.has_committed_st_version_): n += 1 + self.lengthVarInt64(self.committed_st_version_)
return n
def Clear(self):
self.clear_version()
self.clear_committed_st_version()
def OutputUnchecked(self, out):
if (self.has_version_):
out.putVarInt32(8)
out.putVarInt64(self.version_)
if (self.has_committed_st_version_):
out.putVarInt32(16)
out.putVarInt64(self.committed_st_version_)
def OutputPartial(self, out):
if (self.has_version_):
out.putVarInt32(8)
out.putVarInt64(self.version_)
if (self.has_committed_st_version_):
out.putVarInt32(16)
out.putVarInt64(self.committed_st_version_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 8:
self.set_version(d.getVarInt64())
continue
if tt == 16:
self.set_committed_st_version(d.getVarInt64())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_version_: res+=prefix+("version: %s\n" % self.DebugFormatInt64(self.version_))
if self.has_committed_st_version_: res+=prefix+("committed_st_version: %s\n" % self.DebugFormatInt64(self.committed_st_version_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in range(0, 1+maxtag)])
kversion = 1
kcommitted_st_version = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "version",
2: "committed_st_version",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.NUMERIC,
2: ProtocolBuffer.Encoder.NUMERIC,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'storage_onestore_v3.DocumentMetadata'
class Document(ProtocolBuffer.ProtocolMessage):
DISK = 0
_Storage_NAMES = {
0: "DISK",
}
def Storage_Name(cls, x): return cls._Storage_NAMES.get(x, "")
Storage_Name = classmethod(Storage_Name)
has_id_ = 0
id_ = ""
has_language_ = 0
language_ = "en"
has_order_id_ = 0
order_id_ = 0
has_storage_ = 0
storage_ = 0
def __init__(self, contents=None):
self.field_ = []
self.facet_ = []
if contents is not None: self.MergeFromString(contents)
def id(self): return self.id_
def set_id(self, x):
self.has_id_ = 1
self.id_ = x
def clear_id(self):
if self.has_id_:
self.has_id_ = 0
self.id_ = ""
def has_id(self): return self.has_id_
def language(self): return self.language_
def set_language(self, x):
self.has_language_ = 1
self.language_ = x
def clear_language(self):
if self.has_language_:
self.has_language_ = 0
self.language_ = "en"
def has_language(self): return self.has_language_
def field_size(self): return len(self.field_)
def field_list(self): return self.field_
def field(self, i):
return self.field_[i]
def mutable_field(self, i):
return self.field_[i]
def add_field(self):
x = Field()
self.field_.append(x)
return x
def clear_field(self):
self.field_ = []
def order_id(self): return self.order_id_
def set_order_id(self, x):
self.has_order_id_ = 1
self.order_id_ = x
def clear_order_id(self):
if self.has_order_id_:
self.has_order_id_ = 0
self.order_id_ = 0
def has_order_id(self): return self.has_order_id_
def storage(self): return self.storage_
def set_storage(self, x):
self.has_storage_ = 1
self.storage_ = x
def clear_storage(self):
if self.has_storage_:
self.has_storage_ = 0
self.storage_ = 0
def has_storage(self): return self.has_storage_
def facet_size(self): return len(self.facet_)
def facet_list(self): return self.facet_
def facet(self, i):
return self.facet_[i]
def mutable_facet(self, i):
return self.facet_[i]
def add_facet(self):
x = Facet()
self.facet_.append(x)
return x
def clear_facet(self):
self.facet_ = []
def MergeFrom(self, x):
assert x is not self
if (x.has_id()): self.set_id(x.id())
if (x.has_language()): self.set_language(x.language())
for i in range(x.field_size()): self.add_field().CopyFrom(x.field(i))
if (x.has_order_id()): self.set_order_id(x.order_id())
if (x.has_storage()): self.set_storage(x.storage())
for i in range(x.facet_size()): self.add_facet().CopyFrom(x.facet(i))
def Equals(self, x):
if x is self: return 1
if self.has_id_ != x.has_id_: return 0
if self.has_id_ and self.id_ != x.id_: return 0
if self.has_language_ != x.has_language_: return 0
if self.has_language_ and self.language_ != x.language_: return 0
if len(self.field_) != len(x.field_): return 0
for e1, e2 in zip(self.field_, x.field_):
if e1 != e2: return 0
if self.has_order_id_ != x.has_order_id_: return 0
if self.has_order_id_ and self.order_id_ != x.order_id_: return 0
if self.has_storage_ != x.has_storage_: return 0
if self.has_storage_ and self.storage_ != x.storage_: return 0
if len(self.facet_) != len(x.facet_): return 0
for e1, e2 in zip(self.facet_, x.facet_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
for p in self.field_:
if not p.IsInitialized(debug_strs): initialized=0
for p in self.facet_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
if (self.has_id_): n += 1 + self.lengthString(len(self.id_))
if (self.has_language_): n += 1 + self.lengthString(len(self.language_))
n += 1 * len(self.field_)
for i in range(len(self.field_)): n += self.lengthString(self.field_[i].ByteSize())
if (self.has_order_id_): n += 1 + self.lengthVarInt64(self.order_id_)
if (self.has_storage_): n += 1 + self.lengthVarInt64(self.storage_)
n += 1 * len(self.facet_)
for i in range(len(self.facet_)): n += self.lengthString(self.facet_[i].ByteSize())
return n
def ByteSizePartial(self):
n = 0
if (self.has_id_): n += 1 + self.lengthString(len(self.id_))
if (self.has_language_): n += 1 + self.lengthString(len(self.language_))
n += 1 * len(self.field_)
for i in range(len(self.field_)): n += self.lengthString(self.field_[i].ByteSizePartial())
if (self.has_order_id_): n += 1 + self.lengthVarInt64(self.order_id_)
if (self.has_storage_): n += 1 + self.lengthVarInt64(self.storage_)
n += 1 * len(self.facet_)
for i in range(len(self.facet_)): n += self.lengthString(self.facet_[i].ByteSizePartial())
return n
def Clear(self):
self.clear_id()
self.clear_language()
self.clear_field()
self.clear_order_id()
self.clear_storage()
self.clear_facet()
def OutputUnchecked(self, out):
if (self.has_id_):
out.putVarInt32(10)
out.putPrefixedString(self.id_)
if (self.has_language_):
out.putVarInt32(18)
out.putPrefixedString(self.language_)
for i in range(len(self.field_)):
out.putVarInt32(26)
out.putVarInt32(self.field_[i].ByteSize())
self.field_[i].OutputUnchecked(out)
if (self.has_order_id_):
out.putVarInt32(32)
out.putVarInt32(self.order_id_)
if (self.has_storage_):
out.putVarInt32(40)
out.putVarInt32(self.storage_)
for i in range(len(self.facet_)):
out.putVarInt32(66)
out.putVarInt32(self.facet_[i].ByteSize())
self.facet_[i].OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_id_):
out.putVarInt32(10)
out.putPrefixedString(self.id_)
if (self.has_language_):
out.putVarInt32(18)
out.putPrefixedString(self.language_)
for i in range(len(self.field_)):
out.putVarInt32(26)
out.putVarInt32(self.field_[i].ByteSizePartial())
self.field_[i].OutputPartial(out)
if (self.has_order_id_):
out.putVarInt32(32)
out.putVarInt32(self.order_id_)
if (self.has_storage_):
out.putVarInt32(40)
out.putVarInt32(self.storage_)
for i in range(len(self.facet_)):
out.putVarInt32(66)
out.putVarInt32(self.facet_[i].ByteSizePartial())
self.facet_[i].OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_id(d.getPrefixedString())
continue
if tt == 18:
self.set_language(d.getPrefixedString())
continue
if tt == 26:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_field().TryMerge(tmp)
continue
if tt == 32:
self.set_order_id(d.getVarInt32())
continue
if tt == 40:
self.set_storage(d.getVarInt32())
continue
if tt == 66:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_facet().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_id_: res+=prefix+("id: %s\n" % self.DebugFormatString(self.id_))
if self.has_language_: res+=prefix+("language: %s\n" % self.DebugFormatString(self.language_))
cnt=0
for e in self.field_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("field%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
if self.has_order_id_: res+=prefix+("order_id: %s\n" % self.DebugFormatInt32(self.order_id_))
if self.has_storage_: res+=prefix+("storage: %s\n" % self.DebugFormatInt32(self.storage_))
cnt=0
for e in self.facet_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("facet%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in range(0, 1+maxtag)])
kid = 1
klanguage = 2
kfield = 3
korder_id = 4
kstorage = 5
kfacet = 8
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "id",
2: "language",
3: "field",
4: "order_id",
5: "storage",
8: "facet",
}, 8)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STRING,
4: ProtocolBuffer.Encoder.NUMERIC,
5: ProtocolBuffer.Encoder.NUMERIC,
8: ProtocolBuffer.Encoder.STRING,
}, 8, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'storage_onestore_v3.Document'
if _extension_runtime:
pass
__all__ = ['FieldValue','FieldValue_Geo','Field','FieldTypes','IndexShardSettings','IndexMetadata','FacetValue','Facet','DocumentMetadata','Document']
| Suwmlee/XX-Net | gae_proxy/server/lib/google/appengine/datastore/document_pb.py | Python | bsd-2-clause | 55,713 | 0.021144 |
import cStringIO
import zlib
import wx
#----------------------------------------------------------------------
def getMailData():
return zlib.decompress(
"x\xda\x01M\x01\xb2\xfe\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00 \x00\
\x00\x00 \x08\x06\x00\x00\x00szz\xf4\x00\x00\x00\x04sBIT\x08\x08\x08\x08|\
\x08d\x88\x00\x00\x01\x04IDATX\x85\xed\x941\x0e\x82@\x10E\x9f\xc6`,\x88\xad\
\x8d\x8d\x89r\x02B\xc1\t\xbc\x94\x857\xf04\x9e\xc0C\x00\x95\xb1\xb1\xa52\xda\
h\xc1N\xe1\xc8f5j\x9cD^Ev\x98\x81\xffv\x01::\xfe\x9d^\x91e\xd7\xb6\xc2d\xb9\
\x04`\xb8X\xbc\xf5\x80sY\x02p\xdcn[\xeb\xfd\xb7\xa6\x7f\x80\x81\xaf o<O\xd3f\
\xc1\x19y\x1a\xd7\xbf\xf7$\x17\xec\x19\x90\xbd?\x15\x05\x00\xd5z\r\xc0\\n\
\x08\x99p\x89\xa5o<\x9b\x010J\x12\xe0\xf1,\xd83\x10\xafV\xcd\x85K \x04M\x04\
\x92\xcb\\\xfb\x06\x84\xa7M\xa8u_r\x1fv\r\x08\xb1\xfc\x07\x14\x952\xf3\x90\
\xdc\xd3\xa71l\xe0p\x00\xe0R\xd7@8\x91N.}\x91\x9b\xc3t\xda\xdag\xd0\x80$\xdf\
\xed\x00\x88\xf2\xbcYw\tb\xf9\xfe\xd5\x19\xd0\xa7=\xf2\xcdQ\xd83\xe0K\xae\t}\
\xdf\xd2'sd\xae\xc6\x9e\x81P\xf2\x97Q&\xd8l\xee\xca\xf6\x0c\xf8\xf6\xea[\xfc\
\xdc@G\xc7\rv\x18V\xd3#+\xef\x8c\x00\x00\x00\x00IEND\xaeB`\x82\xb38\x8e\xb0"\
)
def getMailBitmap():
return wx.BitmapFromImage(getMailImage())
def getMailImage():
stream = cStringIO.StringIO(getMailData())
return wx.ImageFromStream(stream)
def getMailIcon():
icon = wx.EmptyIcon()
icon.CopyFromBitmap(getMailBitmap())
return icon
#----------------------------------------------------------------------
def getNoMailData():
return zlib.decompress(
'x\xda\x01G\x04\xb8\xfb\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00 \x00\
\x00\x00 \x08\x06\x00\x00\x00szz\xf4\x00\x00\x00\x04sBIT\x08\x08\x08\x08|\
\x08d\x88\x00\x00\x03\xfeIDATX\x85\xed\x97[o\xdb8\x10F\x0fu\xa1$\xeb\x96(A\
\x92\x1a}\xe8\xcf\xdc\xdd?\xeb\xa0h\x12\'\xa9#;\xba\x8b\x12\xb5\x0f\x81\x88\
\xba\xb6w\xb37\xf4a;\x80!\x98\xb09gf8\xdfPBX6?\xd2\xac\x1f\xea\xfd\'\xc0O\
\x00\xc0\xf9\xed\xd7_\xa6i\x9a\xf6\x16\xb3,\xe3\xea\xea\x8a8\x8eY,\x16X\xd6\
\xdf\xe3\x1c\xc7\x91\xba\xae\xa9\xaa\x8a\xa7\xa7\'6\x9b\xcd!@\x92$\x07\x8b\
\xbe\xef\x9b\xe7\xe5\xe5%a\x18"\xa5\xc4\xb6\xdf\xd7\xb2\xe38\xd2\xf7=UU\xf1\
\xf8\xf8HUUx\x9eG\x9a\xa6\x87\x00\xc76\xa8\xeb\x9a\xae\xeb\xf0}\x9f\xeb\xebk\
\xc20$MS\\\xd7}\x17\x80R\x8a\xddnG]\xd7\x94e\xc9\xd3\xd3\x13\xe38\x1e\xfd\
\xed\x1e\x80\x94\x12\xdf\xf7\xd1Z3\x0c\x03M\xd3\xb0^\xaf\x11B\xe0\xba.q\x1c#\
\xa5\xc4q\x8er3\x0c\x03}\xdfS\xd75_\xbf~e\xbd^\xd34\r\x8e\xe3\xe0\xfb>\xb6m\
\xd3\xb6-]\xd7\x1d\x07\x08\xc3\x90\x8b\x8b\x0b\x94R4MC\xd7u\xacV+\xba\xae\
\xc3q\x1c\x84\x10\xa4iz\x12`\x1cG\xca\xb2\xe4\xf9\xf9\x99\xdb\xdb[\xee\xef\
\xef\rx\x10\x04x\x9e\xc7f\xb39\r\x90$\t\x1f?~\xa4\xaek6\x9b\rEQ\xd0u\x1d\xbb\
\xdd\x8e\xbb\xbb;\xc6qd\x9a\xa6\x83L\xcc\x91\x17E\xc1z\xbdf\xbd^\xb3\xdb\xed\
\xd0Z\x1b\x80,\xcb\x88\xa2\x08\xa5\x14///\xc7\x01\xd24\xe5\xd3\xa7O\xbc\xbc\
\xbc\xd0\xf7=sw\xf4}\xcf\xed\xed-M\xd3`Y\x16B\x08\x92$\xd9\x03\x98k\xbdZ\xad\
x||\xc4\xb2,\xa2("\x0cC\x92$\xe1\xc3\x87\x0fdY\xb6\xe7\xfc\x00\xc0\xf3<\xe28\
6N]\xd7\xc5\xb2,^__)\xcb\x92\xedv\xcb\xfd\xfd=Zk\xa6ib\x18\x06\x00\xaa\xaa2\
\x91o\xb7[\xfa\xbe\'\x8a"\x13\xf9\xe5\xe5%Y\x96\x99\xcc\x9d\x04\xf8\xb6\x14R\
J\xa4\x94\x0c\xc3\x80\xd6\xdaD\xfa\xf9\xf3g\x9a\xa6A\x08\xc1\xf9\xf99\x00y\
\x9e\xb3Z\xadx~~F\x08A\x14EDQD\x9a\xa6,\x97Knnn\xf0<\x8f\xef\xf5\xe6$\x80\
\xef\xfb\xf8\xbeO\xd34\xa6\x96\x00eYR\x96%y\x9e\xf3\xf0\xf0@Q\x14f=\xcfs\xba\
\xae\xdbK{\x92$\xa4ij\xfa\xbfi\x9a\xf7\x01\xcc&\xa5$I\x12\x93\xf2\xd9\x94R|\
\xf9\xf2\x05!\x04\x00\xd34\xa1\xb5&\x0cC\xe3<MS\xe28\xfeS\xed8\n0\x9f\xf6\
\xb9\xff\x83 `\x1cG\xe3\xb0(\n\xaa\xaa\xa2\xef{\x03\x1a\x86!q\x1c\x13\xc71Q\
\x14\xe1\xfb>\xae\xeb"\x84`\x18\x06\xf3\xdfw\x01h\xad\xe9\xfb\x9e\xae\xebPJa\
Y\x16q\x1cc\xdb\xb6\xc9\x84\x10\xe2(@\x9a\xa6\x04A\x80\x10\x02\xa5\x14]\xd7\
\xd1u\xdd\xc9L\xec\x01h\xad\x19\xc7\x11\xad5u]\x1b\xe7s4\xf3SJ\x89eY\xb4m\
\x0b\xbcu\xcf\xd9\xd9\x19gggDQ\x84\x94\x12\xa5\x14\xd34\xa1\x94\xa2\xaek\x82\
0>N\x02\xccCd\x18\x06^__\xb1m\x9b0\x0c\xf1<\x0f\xd7u\x99\xa6\x89\xf3\xf3s\
\xf2<\x07\xde\x0e\x1f@\x14E,\x97K...L\xa4s\xf4\xf3\\\x98\xa6\t\xc7q\x0ef\xc2\
\x1e\xc0L\xab\xb5F)\x85\xeb\xba,\x16\x0b\x82 \xc0u]#<\x8e\xe3\xd0\xb6-\x9e\
\xe7\x01\x10\xc71WWWdY\x06\xbc\xb5\xabR\n\xdb\xb6)\x8a\x82\xb6mi\xdb\x16\xcb\
\xb2PJ\x9d\x06\x98ew\xb1X\x18\xfd\x0e\x82\xc0\xcc\x81\xd9\x82 `\xb9\\\x9a\
\xcd\xa4\x94&\xc5\xf0v>\x1c\xc7!\x08\x02\xa6i\xc2\xb6m\x94RF\xdaO\x02\xcc\
\x9a>\x0b\x89\xe7yx\x9ewp!\x99\xc1N\x99m\xdb\xe63\x7f\xdf\xedv\xf4}\xff\xc7%\
\xf0}\x9f4MM\xddOM\xbd\xbfb\xf3\x1eQ\x141\x8e\xa3)\xdbQ\x80yn\xcf\xa7\xfc[\
\xbd\xff\'fY\x96\xb9k|\x1f\xd4\xd130\xcf\xff\x7f\xd3\xc6q4w\x8c=\x80\xa6i\
\x8c\xb8\xe4yn.\x11\xff\x85)\xa5\xd8n\xb7\xd4um\xd6\xc4\xcfw\xc3\xff=\xc0\
\xefa\x89?u1\xd3\xf5 \x00\x00\x00\x00IEND\xaeB`\x82\xc4\x1f\x08\x9f' )
def getNoMailBitmap():
return wx.BitmapFromImage(getNoMailImage())
def getNoMailImage():
stream = cStringIO.StringIO(getNoMailData())
return wx.ImageFromStream(stream)
def getNoMailIcon():
icon = wx.EmptyIcon()
icon.CopyFromBitmap(getNoMailBitmap())
return icon
#----------------------------------------------------------------------
def getErrMailData():
return zlib.decompress(
'x\xda\x01W\x05\xa8\xfa\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00 \x00\
\x00\x00 \x08\x06\x00\x00\x00szz\xf4\x00\x00\x00\x04sBIT\x08\x08\x08\x08|\
\x08d\x88\x00\x00\x05\x0eIDATX\x85\xcd\x97\xcf\x8f\xdb\xd4\x16\xc7?v\xae\x7f\
\xc5N&\x8e\xd3L\x92\xceL%T\x15\rbQQ!\xe8\x0e\xc4\x92\xff\x80%H\xac\xdeC\xf0\
\xfe\x94\x07\xdb\xf7\x96\xac\xfa\x1f TT\t\x06\x90\xa0,*UB#\x90f:i"\'\x99L\
\xec\xd8\xf1\xaf\x98\xc5LLC\x92\x8aH\xa0r$/|t\xef9\x1f\xdf\xfb\xbd\xe7\\K\
\x92\\\xe2E\x9a\xfcB\xb3\x03b\xdb\t\x9f}\xfa\xdf\xfc\xf5\xd1\x88\x83\xcf?\
\xa7\xf2\xf81\x00\xde\xe1!\xa7\xef\xbd\xc7\xf7\xf5:\xff\xfa\xf7G\xd2\xdf\n\
\xb0w\xff>\xd7\x83\x80\xeah\x84q\xe5\x93F#:GG\xec\x95\xcb\xdb\x86C\xdaV\x03\
\xdfjj\xfeZ\x9e#\xc71\xf2|\x0e\xc0\\\x96\x99\xab*?J\x12oF\xf1V+\xb0\xb5\x06\
\x1cUE\xccfEr\x00y>G\xccf8\xaa\xbam8\xc4\x7f>\xf98\xcf\xf3|\xc9\xd9n\xb7\xd9\
\xdb\xdbCQ\x94%\xff\xf5\xef\xbe\xa3~\xef\x1e\\\\\xac\rV\xaf\xd7\xf9\xe6\xc3\
\x0f\xf3\xb37\xdeX\xf2\'I\xc2\x93\'Ox\xfa\xf4\xe9*@\xa5RYu\nA\x92$\xe8\xba\
\x8eeY\xc5cw\xbb\xe8\xba\xbe\xf1kt]g\x7f\x7f\x1f\xeb\xe5\x97\xf1}\xbfx\x82 @\
\x08A\xb5Z]\xcd\xb5.\x90\xe7y\x84a\xc8\xee\xee.\x86a`\x9a&\xedv\x1b\xab\xd1@\
<g\x99UU\xa5\xd1h\xa0\xb7\xdbt\xbb]...\x18\x8dF\xf4\xfb}\xd24];g\t`\x91L\x92\
.u\x94\xe79\xc3\xe1\x10UU)\x97\xcb\x94\xc2\x90r\x96\xb1I\xb6Y\x96\x11\x86!\
\xe3\xf1\x98\xc1`\xc0p8$\xcfsvvv\x8ax\xd3\xe9\x940\x0c\xd7\x03T\xabU:\x9d\
\x0e\xa5\xd2e\x8a\xf3\xf3s\xfa\xfd>I\x92\x000w]\xdaq\xcc\xa65\x88\xe3\x18\
\xd7uyrr\xc2\xc9\xc9\t\xa3\xd1\x88k\xd7\xae\xd1j\xb5\n\xc0n\xb7\xfb|\x80\xfd\
\xfd}\xd24%\x08\x02\xe28&\x08\x02\x92$\xa1\xd7\xeb\xa1\xb9.N\x1coH\xff;@\xaf\
\xd7#I\x12L\xd3\xc44M,\xcb\xa2\\.#\x84\xc0\xf7}\xfa\xfd\xfef\x80\xbd\xbd=&\
\x93\tQ\x14aY\x16\xaa\xaa2\x1e\x8fq]\x97\xb2\xeb\xf2\xd2\x9f\x00p]\x17\xc7q\
\xa8\xd5j\xa8\xaaJ\xa9T\xa2^\xafS\xadV9;;[\x9a\xb3\x04\xa0\xaa*\x96e!I\x12Q\
\x14\x15\xfb\x15\xc71\xbe\xef#\x84(\xf4\xb1\xce$IB\x08\x81\xa6i\x94\xcbe*\
\x95J\xa1\xabj\xb5Z|\xd0F\x80\x85U*\x15TUe0\x18\xd0\xeb\xf50M\x93N\xa7C\xb3\
\xd9D\xd3\xb4\x8d\x00\x9a\xa6\xd1l6\x99w:h\x9a\x86\x10\x02\xc7qh4\x1a\xa8\
\xaa\xca\x1f\xeb\xcdF\x00M\xd3\xd04\x8d\xe9t\x8a,\xcb\xc5\xbbh\xb7\x99\xbe\
\xf2\n%IB\xef\xf5P\xa6S\x00\x12\xd3d\xd6j1=<D\xb4\xdb\xc5y\x97e\x19\xc30\x8a\
\xf7g\xc5\xf7\\\x80M\x16\x1c\x1c\xd0{\xf7]f\xad\x16\xbb_|Q\x00D\x8d\x06\xee\
\xdbos~\xe7\x0e\xb3+\xc5\xffY\xdb\n \xb5m|\xdbF\xb9\xb8 ;:*\xfc\x99e1\xbdy\
\x13\xff\xf0p\xab\xe4\xf0O\xbd\x90DQD\x1c\xc7dY\x86a\x18\x08\xb1<Lq\x1c\xa2\
\x1b7\x98\\\x1d\xc9\xe8\xc6\r\x84\xe3`\x9a\xe6\xf28E!\xcb2<\xcf[Q\xffs\x01|\
\xdf\xc7u]\x84\x104\x9b\xcd\xa22.,\x06\xce\xb3\x8c\xe4\xaa\xa0(\xbb\xbbX\xb7\
o\xe3\x1c\x1c,\x8d\xcb\xb2\x8c\xe9t\x8a\xef\xfb4\x1a\x8d\x15\xc0\x15\x80$I\
\x08\x82\xa0xj\xb5\x1a\xb6m\xaft\xc0sE\xe1\xc20\x08\xaeDh\x9a&V\xa7\x83m\xdb\
K\xe3f\xb3\x19a\x18\x16\xf1$I*\xca\xfaZ\x80\xc9d\xc2\xe9\xe9)\x95J\x85V\xab\
\x85i\x9a+\xcb\x0f\x97M\xab\xd5j\x15\xc1\x14E\xc10\x8c\x95q\x8b:\xa0\xeb:\
\xb3\xd9\x8c\xd3\xd3S&\x93\xc9f\x80(\x8a\xf0<\x8fj\xb5\x8a\xe38+E\'MS\xd24E\
\nCjA\x80\xbchR\x8aB*\xcb\xcc\xae\x92.\xa0\x85\x10\xec\xec\xec\xa0\xeb:\xddn\
\x17\xcf\xf3\x88\xa2h3\xc0\xa2\x19\xd5j\xb5\x95}\x07\x08\x82\x80\xe1p\x88x\
\xfc\x18\xe7\xe8\x08\xa3\xdb\xbd\x04\xeft\x18\xdd\xbdKrx\x88\xe38+\x17\x8fE/\
\x90$\t\xd7u7\x03\x18\x86\x81m\xdbh\x9aV|\xed\xb36\x1d\x8d\x18\x1f\x1f\xa3}\
\xfd5;\xf7\xee!\xfd\xfc\xf3\xe5\xca\xdc\xba\x857\x9f3S\x14tIZ\xabxM\xd3\xb0m\
{e\xab\xd6j`\xd3\x91)=z\x84\xf3\xe5\x97\x94\x1f>D\x1b\x0c~\x0f>\x18p\xed\xfe\
}\x82\xf1\x98\xe0\x9dw\xf0^}u\xed\xfc8\x8eW5\x10\x86a\xd1$\xfa\xfd>\xaa\xaa\
\xae\x15\x1e@\xeb\xa7\x9fx\xe9\xc1\x03v\x8e\x8f\x91\x9fi\xcb\xcaxL\xed\xe1C$\
\xcf\xe3\x17\xc7\xa1\xf7\x87\xcb\xec\xc2\xd24\xa5\xdf\xef\x13\x04A\xe1\xdb\
\xfa\xbf\xe0\xab\x0f\xde\xcfo\x9e\x9da\xff\xf0\x03\xc6U\x1d\x08ww9\xbfs\x87\
\xe3\xeb\xd7y\xeb\x7f\xff\xff{\xff\x8c\x1e\xdd\xbe\x8dqp@\xe9\xd7_\xc9\xaf\
\x00\xbcz\x9d\xee\xdd\xbb<\xaa\xd7\xb7\r\xb7\xfd\n\xfc\xd5\xf6\xc2\x9b\xd1o\
\xd1r.\xaf\xfe\x90\x016\x00\x00\x00\x00IEND\xaeB`\x82\x8a\x1a\x9f\x99' )
def getErrMailBitmap():
return wx.BitmapFromImage(getErrMailImage())
def getErrMailImage():
stream = cStringIO.StringIO(getErrMailData())
return wx.ImageFromStream(stream)
def getErrMailIcon():
icon = wx.EmptyIcon()
icon.CopyFromBitmap(getErrMailBitmap())
return icon
| doudz/checkfeedmail | icon.py | Python | bsd-2-clause | 9,632 | 0.012978 |
from django.conf import settings
from django.core.files.storage import FileSystemStorage
from django.utils.functional import LazyObject
class PrivateMediaFileSystemStorage(FileSystemStorage):
"""
Storage that puts files in the private media folder that isn't
globally available.
"""
def __init__(self, *args, **kwargs):
kwargs.setdefault('location', settings.PRIVATE_MEDIA_ROOT)
kwargs.setdefault('base_url', settings.PRIVATE_MEDIA_URL)
super().__init__(*args, **kwargs)
class PrivateMediaStorage(LazyObject):
def _setup(self):
self._wrapped = PrivateMediaFileSystemStorage()
private_media_storage = PrivateMediaStorage()
| Clarity-89/clarityv2 | src/clarityv2/utils/storages.py | Python | mit | 685 | 0 |
"""All constants related to the ZHA component."""
import enum
import logging
from homeassistant.components.binary_sensor import DOMAIN as BINARY_SENSOR
from homeassistant.components.device_tracker import DOMAIN as DEVICE_TRACKER
from homeassistant.components.fan import DOMAIN as FAN
from homeassistant.components.light import DOMAIN as LIGHT
from homeassistant.components.lock import DOMAIN as LOCK
from homeassistant.components.sensor import DOMAIN as SENSOR
from homeassistant.components.switch import DOMAIN as SWITCH
ATTR_ARGS = "args"
ATTR_ATTRIBUTE = "attribute"
ATTR_AVAILABLE = "available"
ATTR_CLUSTER_ID = "cluster_id"
ATTR_CLUSTER_TYPE = "cluster_type"
ATTR_COMMAND = "command"
ATTR_COMMAND_TYPE = "command_type"
ATTR_ENDPOINT_ID = "endpoint_id"
ATTR_IEEE = "ieee"
ATTR_LAST_SEEN = "last_seen"
ATTR_LEVEL = "level"
ATTR_LQI = "lqi"
ATTR_MANUFACTURER = "manufacturer"
ATTR_MANUFACTURER_CODE = "manufacturer_code"
ATTR_MODEL = "model"
ATTR_NAME = "name"
ATTR_NWK = "nwk"
ATTR_POWER_SOURCE = "power_source"
ATTR_QUIRK_APPLIED = "quirk_applied"
ATTR_QUIRK_CLASS = "quirk_class"
ATTR_RSSI = "rssi"
ATTR_SIGNATURE = "signature"
ATTR_TYPE = "type"
ATTR_VALUE = "value"
BAUD_RATES = [2400, 4800, 9600, 14400, 19200, 38400, 57600, 115200, 128000, 256000]
CHANNEL_ATTRIBUTE = "attribute"
CHANNEL_BASIC = "basic"
CHANNEL_COLOR = "light_color"
CHANNEL_DOORLOCK = "door_lock"
CHANNEL_ELECTRICAL_MEASUREMENT = "electrical_measurement"
CHANNEL_EVENT_RELAY = "event_relay"
CHANNEL_FAN = "fan"
CHANNEL_LEVEL = ATTR_LEVEL
CHANNEL_ON_OFF = "on_off"
CHANNEL_POWER_CONFIGURATION = "power"
CHANNEL_ZDO = "zdo"
CHANNEL_ZONE = ZONE = "ias_zone"
CLUSTER_COMMAND_SERVER = "server"
CLUSTER_COMMANDS_CLIENT = "client_commands"
CLUSTER_COMMANDS_SERVER = "server_commands"
CLUSTER_TYPE_IN = "in"
CLUSTER_TYPE_OUT = "out"
COMPONENTS = (BINARY_SENSOR, DEVICE_TRACKER, FAN, LIGHT, LOCK, SENSOR, SWITCH)
CONF_BAUDRATE = "baudrate"
CONF_DATABASE = "database_path"
CONF_DEVICE_CONFIG = "device_config"
CONF_ENABLE_QUIRKS = "enable_quirks"
CONF_RADIO_TYPE = "radio_type"
CONF_USB_PATH = "usb_path"
CONTROLLER = "controller"
DATA_DEVICE_CONFIG = "zha_device_config"
DATA_ZHA = "zha"
DATA_ZHA_CONFIG = "config"
DATA_ZHA_BRIDGE_ID = "zha_bridge_id"
DATA_ZHA_CORE_EVENTS = "zha_core_events"
DATA_ZHA_DISPATCHERS = "zha_dispatchers"
DATA_ZHA_GATEWAY = "zha_gateway"
DEBUG_COMP_BELLOWS = "bellows"
DEBUG_COMP_ZHA = "homeassistant.components.zha"
DEBUG_COMP_ZIGPY = "zigpy"
DEBUG_COMP_ZIGPY_DECONZ = "zigpy_deconz"
DEBUG_COMP_ZIGPY_XBEE = "zigpy_xbee"
DEBUG_COMP_ZIGPY_ZIGATE = "zigpy_zigate"
DEBUG_LEVEL_CURRENT = "current"
DEBUG_LEVEL_ORIGINAL = "original"
DEBUG_LEVELS = {
DEBUG_COMP_BELLOWS: logging.DEBUG,
DEBUG_COMP_ZHA: logging.DEBUG,
DEBUG_COMP_ZIGPY: logging.DEBUG,
DEBUG_COMP_ZIGPY_XBEE: logging.DEBUG,
DEBUG_COMP_ZIGPY_DECONZ: logging.DEBUG,
DEBUG_COMP_ZIGPY_ZIGATE: logging.DEBUG,
}
DEBUG_RELAY_LOGGERS = [DEBUG_COMP_ZHA, DEBUG_COMP_ZIGPY]
DEFAULT_RADIO_TYPE = "ezsp"
DEFAULT_BAUDRATE = 57600
DEFAULT_DATABASE_NAME = "zigbee.db"
DISCOVERY_KEY = "zha_discovery_info"
DOMAIN = "zha"
MFG_CLUSTER_ID_START = 0xFC00
POWER_MAINS_POWERED = "Mains"
POWER_BATTERY_OR_UNKNOWN = "Battery or Unknown"
class RadioType(enum.Enum):
"""Possible options for radio type."""
ezsp = "ezsp"
xbee = "xbee"
deconz = "deconz"
zigate = "zigate"
@classmethod
def list(cls):
"""Return list of enum's values."""
return [e.value for e in RadioType]
REPORT_CONFIG_MAX_INT = 900
REPORT_CONFIG_MAX_INT_BATTERY_SAVE = 10800
REPORT_CONFIG_MIN_INT = 30
REPORT_CONFIG_MIN_INT_ASAP = 1
REPORT_CONFIG_MIN_INT_IMMEDIATE = 0
REPORT_CONFIG_MIN_INT_OP = 5
REPORT_CONFIG_MIN_INT_BATTERY_SAVE = 3600
REPORT_CONFIG_RPT_CHANGE = 1
REPORT_CONFIG_DEFAULT = (
REPORT_CONFIG_MIN_INT,
REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_RPT_CHANGE,
)
REPORT_CONFIG_ASAP = (
REPORT_CONFIG_MIN_INT_ASAP,
REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_RPT_CHANGE,
)
REPORT_CONFIG_BATTERY_SAVE = (
REPORT_CONFIG_MIN_INT_BATTERY_SAVE,
REPORT_CONFIG_MAX_INT_BATTERY_SAVE,
REPORT_CONFIG_RPT_CHANGE,
)
REPORT_CONFIG_IMMEDIATE = (
REPORT_CONFIG_MIN_INT_IMMEDIATE,
REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_RPT_CHANGE,
)
REPORT_CONFIG_OP = (
REPORT_CONFIG_MIN_INT_OP,
REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_RPT_CHANGE,
)
SENSOR_ACCELERATION = "acceleration"
SENSOR_BATTERY = "battery"
SENSOR_ELECTRICAL_MEASUREMENT = "electrical_measurement"
SENSOR_GENERIC = "generic"
SENSOR_HUMIDITY = "humidity"
SENSOR_ILLUMINANCE = "illuminance"
SENSOR_METERING = "metering"
SENSOR_OCCUPANCY = "occupancy"
SENSOR_OPENING = "opening"
SENSOR_PRESSURE = "pressure"
SENSOR_TEMPERATURE = "temperature"
SENSOR_TYPE = "sensor_type"
SIGNAL_ATTR_UPDATED = "attribute_updated"
SIGNAL_AVAILABLE = "available"
SIGNAL_MOVE_LEVEL = "move_level"
SIGNAL_REMOVE = "remove"
SIGNAL_SET_LEVEL = "set_level"
SIGNAL_STATE_ATTR = "update_state_attribute"
UNKNOWN = "unknown"
UNKNOWN_MANUFACTURER = "unk_manufacturer"
UNKNOWN_MODEL = "unk_model"
ZHA_DISCOVERY_NEW = "zha_discovery_new_{}"
ZHA_GW_MSG_RAW_INIT = "raw_device_initialized"
ZHA_GW_MSG = "zha_gateway_message"
ZHA_GW_MSG_DEVICE_REMOVED = "device_removed"
ZHA_GW_MSG_DEVICE_INFO = "device_info"
ZHA_GW_MSG_DEVICE_FULL_INIT = "device_fully_initialized"
ZHA_GW_MSG_DEVICE_JOINED = "device_joined"
ZHA_GW_MSG_LOG_OUTPUT = "log_output"
ZHA_GW_MSG_LOG_ENTRY = "log_entry"
ZHA_GW_RADIO = "radio"
ZHA_GW_RADIO_DESCRIPTION = "radio_description"
| fbradyirl/home-assistant | homeassistant/components/zha/core/const.py | Python | apache-2.0 | 5,498 | 0.000182 |
"""Support for RainMachine devices."""
import logging
from datetime import timedelta
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import (
ATTR_ATTRIBUTION, CONF_BINARY_SENSORS, CONF_IP_ADDRESS, CONF_PASSWORD,
CONF_PORT, CONF_SCAN_INTERVAL, CONF_SENSORS, CONF_SSL,
CONF_MONITORED_CONDITIONS, CONF_SWITCHES)
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import aiohttp_client, config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_time_interval
from .config_flow import configured_instances
from .const import (
DATA_CLIENT, DEFAULT_PORT, DEFAULT_SCAN_INTERVAL, DEFAULT_SSL, DOMAIN)
REQUIREMENTS = ['regenmaschine==1.2.0']
_LOGGER = logging.getLogger(__name__)
DATA_LISTENER = 'listener'
PROGRAM_UPDATE_TOPIC = '{0}_program_update'.format(DOMAIN)
SENSOR_UPDATE_TOPIC = '{0}_data_update'.format(DOMAIN)
ZONE_UPDATE_TOPIC = '{0}_zone_update'.format(DOMAIN)
CONF_CONTROLLERS = 'controllers'
CONF_PROGRAM_ID = 'program_id'
CONF_SECONDS = 'seconds'
CONF_ZONE_ID = 'zone_id'
CONF_ZONE_RUN_TIME = 'zone_run_time'
DEFAULT_ATTRIBUTION = 'Data provided by Green Electronics LLC'
DEFAULT_ICON = 'mdi:water'
DEFAULT_ZONE_RUN = 60 * 10
TYPE_FREEZE = 'freeze'
TYPE_FREEZE_PROTECTION = 'freeze_protection'
TYPE_FREEZE_TEMP = 'freeze_protect_temp'
TYPE_HOT_DAYS = 'extra_water_on_hot_days'
TYPE_HOURLY = 'hourly'
TYPE_MONTH = 'month'
TYPE_RAINDELAY = 'raindelay'
TYPE_RAINSENSOR = 'rainsensor'
TYPE_WEEKDAY = 'weekday'
BINARY_SENSORS = {
TYPE_FREEZE: ('Freeze Restrictions', 'mdi:cancel'),
TYPE_FREEZE_PROTECTION: ('Freeze Protection', 'mdi:weather-snowy'),
TYPE_HOT_DAYS: ('Extra Water on Hot Days', 'mdi:thermometer-lines'),
TYPE_HOURLY: ('Hourly Restrictions', 'mdi:cancel'),
TYPE_MONTH: ('Month Restrictions', 'mdi:cancel'),
TYPE_RAINDELAY: ('Rain Delay Restrictions', 'mdi:cancel'),
TYPE_RAINSENSOR: ('Rain Sensor Restrictions', 'mdi:cancel'),
TYPE_WEEKDAY: ('Weekday Restrictions', 'mdi:cancel'),
}
SENSORS = {
TYPE_FREEZE_TEMP: ('Freeze Protect Temperature', 'mdi:thermometer', '°C'),
}
BINARY_SENSOR_SCHEMA = vol.Schema({
vol.Optional(CONF_MONITORED_CONDITIONS, default=list(BINARY_SENSORS)):
vol.All(cv.ensure_list, [vol.In(BINARY_SENSORS)])
})
SENSOR_SCHEMA = vol.Schema({
vol.Optional(CONF_MONITORED_CONDITIONS, default=list(SENSORS)):
vol.All(cv.ensure_list, [vol.In(SENSORS)])
})
SERVICE_PAUSE_WATERING = vol.Schema({
vol.Required(CONF_SECONDS): cv.positive_int,
})
SERVICE_START_PROGRAM_SCHEMA = vol.Schema({
vol.Required(CONF_PROGRAM_ID): cv.positive_int,
})
SERVICE_START_ZONE_SCHEMA = vol.Schema({
vol.Required(CONF_ZONE_ID): cv.positive_int,
vol.Optional(CONF_ZONE_RUN_TIME, default=DEFAULT_ZONE_RUN):
cv.positive_int,
})
SERVICE_STOP_PROGRAM_SCHEMA = vol.Schema({
vol.Required(CONF_PROGRAM_ID): cv.positive_int,
})
SERVICE_STOP_ZONE_SCHEMA = vol.Schema({
vol.Required(CONF_ZONE_ID): cv.positive_int,
})
SWITCH_SCHEMA = vol.Schema({vol.Optional(CONF_ZONE_RUN_TIME): cv.positive_int})
CONTROLLER_SCHEMA = vol.Schema({
vol.Required(CONF_IP_ADDRESS): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean,
vol.Optional(CONF_SCAN_INTERVAL, default=DEFAULT_SCAN_INTERVAL):
cv.time_period,
vol.Optional(CONF_BINARY_SENSORS, default={}): BINARY_SENSOR_SCHEMA,
vol.Optional(CONF_SENSORS, default={}): SENSOR_SCHEMA,
vol.Optional(CONF_SWITCHES, default={}): SWITCH_SCHEMA,
})
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_CONTROLLERS):
vol.All(cv.ensure_list, [CONTROLLER_SCHEMA]),
}),
}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass, config):
"""Set up the RainMachine component."""
hass.data[DOMAIN] = {}
hass.data[DOMAIN][DATA_CLIENT] = {}
hass.data[DOMAIN][DATA_LISTENER] = {}
if DOMAIN not in config:
return True
conf = config[DOMAIN]
for controller in conf[CONF_CONTROLLERS]:
if controller[CONF_IP_ADDRESS] in configured_instances(hass):
continue
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={'source': SOURCE_IMPORT},
data=controller))
return True
async def async_setup_entry(hass, config_entry):
"""Set up RainMachine as config entry."""
from regenmaschine import login
from regenmaschine.errors import RainMachineError
websession = aiohttp_client.async_get_clientsession(hass)
try:
client = await login(
config_entry.data[CONF_IP_ADDRESS],
config_entry.data[CONF_PASSWORD],
websession,
port=config_entry.data[CONF_PORT],
ssl=config_entry.data[CONF_SSL])
rainmachine = RainMachine(
client,
config_entry.data.get(CONF_BINARY_SENSORS, {}).get(
CONF_MONITORED_CONDITIONS, list(BINARY_SENSORS)),
config_entry.data.get(CONF_SENSORS, {}).get(
CONF_MONITORED_CONDITIONS, list(SENSORS)),
config_entry.data.get(CONF_ZONE_RUN_TIME, DEFAULT_ZONE_RUN))
await rainmachine.async_update()
except RainMachineError as err:
_LOGGER.error('An error occurred: %s', err)
raise ConfigEntryNotReady
hass.data[DOMAIN][DATA_CLIENT][config_entry.entry_id] = rainmachine
for component in ('binary_sensor', 'sensor', 'switch'):
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(
config_entry, component))
async def refresh(event_time):
"""Refresh RainMachine sensor data."""
_LOGGER.debug('Updating RainMachine sensor data')
await rainmachine.async_update()
async_dispatcher_send(hass, SENSOR_UPDATE_TOPIC)
hass.data[DOMAIN][DATA_LISTENER][
config_entry.entry_id] = async_track_time_interval(
hass,
refresh,
timedelta(seconds=config_entry.data[CONF_SCAN_INTERVAL]))
async def pause_watering(service):
"""Pause watering for a set number of seconds."""
await rainmachine.client.watering.pause_all(service.data[CONF_SECONDS])
async_dispatcher_send(hass, PROGRAM_UPDATE_TOPIC)
async def start_program(service):
"""Start a particular program."""
await rainmachine.client.programs.start(service.data[CONF_PROGRAM_ID])
async_dispatcher_send(hass, PROGRAM_UPDATE_TOPIC)
async def start_zone(service):
"""Start a particular zone for a certain amount of time."""
await rainmachine.client.zones.start(
service.data[CONF_ZONE_ID], service.data[CONF_ZONE_RUN_TIME])
async_dispatcher_send(hass, ZONE_UPDATE_TOPIC)
async def stop_all(service):
"""Stop all watering."""
await rainmachine.client.watering.stop_all()
async_dispatcher_send(hass, PROGRAM_UPDATE_TOPIC)
async def stop_program(service):
"""Stop a program."""
await rainmachine.client.programs.stop(service.data[CONF_PROGRAM_ID])
async_dispatcher_send(hass, PROGRAM_UPDATE_TOPIC)
async def stop_zone(service):
"""Stop a zone."""
await rainmachine.client.zones.stop(service.data[CONF_ZONE_ID])
async_dispatcher_send(hass, ZONE_UPDATE_TOPIC)
async def unpause_watering(service):
"""Unpause watering."""
await rainmachine.client.watering.unpause_all()
async_dispatcher_send(hass, PROGRAM_UPDATE_TOPIC)
for service, method, schema in [
('pause_watering', pause_watering, SERVICE_PAUSE_WATERING),
('start_program', start_program, SERVICE_START_PROGRAM_SCHEMA),
('start_zone', start_zone, SERVICE_START_ZONE_SCHEMA),
('stop_all', stop_all, {}),
('stop_program', stop_program, SERVICE_STOP_PROGRAM_SCHEMA),
('stop_zone', stop_zone, SERVICE_STOP_ZONE_SCHEMA),
('unpause_watering', unpause_watering, {}),
]:
hass.services.async_register(DOMAIN, service, method, schema=schema)
return True
async def async_unload_entry(hass, config_entry):
"""Unload an OpenUV config entry."""
hass.data[DOMAIN][DATA_CLIENT].pop(config_entry.entry_id)
remove_listener = hass.data[DOMAIN][DATA_LISTENER].pop(
config_entry.entry_id)
remove_listener()
for component in ('binary_sensor', 'sensor', 'switch'):
await hass.config_entries.async_forward_entry_unload(
config_entry, component)
return True
class RainMachine:
"""Define a generic RainMachine object."""
def __init__(
self, client, binary_sensor_conditions, sensor_conditions,
default_zone_runtime):
"""Initialize."""
self.binary_sensor_conditions = binary_sensor_conditions
self.client = client
self.default_zone_runtime = default_zone_runtime
self.device_mac = self.client.mac
self.restrictions = {}
self.sensor_conditions = sensor_conditions
async def async_update(self):
"""Update sensor/binary sensor data."""
self.restrictions.update({
'current': await self.client.restrictions.current(),
'global': await self.client.restrictions.universal()
})
class RainMachineEntity(Entity):
"""Define a generic RainMachine entity."""
def __init__(self, rainmachine):
"""Initialize."""
self._attrs = {ATTR_ATTRIBUTION: DEFAULT_ATTRIBUTION}
self._dispatcher_handlers = []
self._name = None
self.rainmachine = rainmachine
@property
def device_info(self):
"""Return device registry information for this entity."""
return {
'identifiers': {
(DOMAIN, self.rainmachine.client.mac)
},
'name': self.rainmachine.client.name,
'manufacturer': 'RainMachine',
'model': 'Version {0} (API: {1})'.format(
self.rainmachine.client.hardware_version,
self.rainmachine.client.api_version),
'sw_version': self.rainmachine.client.software_version,
}
@property
def device_state_attributes(self) -> dict:
"""Return the state attributes."""
return self._attrs
@property
def name(self) -> str:
"""Return the name of the entity."""
return self._name
async def async_will_remove_from_hass(self):
"""Disconnect dispatcher listener when removed."""
for handler in self._dispatcher_handlers:
handler()
| HydrelioxGitHub/home-assistant | homeassistant/components/rainmachine/__init__.py | Python | apache-2.0 | 10,913 | 0 |
import logging
import six
import warnings
from ..auth import auth
from ..constants import INSECURE_REGISTRY_DEPRECATION_WARNING
from .. import utils
from .. import errors
log = logging.getLogger(__name__)
class ImageApiMixin(object):
@utils.check_resource
def get_image(self, image):
res = self._get(self._url("/images/{0}/get", image), stream=True)
self._raise_for_status(res)
return res.raw
@utils.check_resource
def history(self, image):
res = self._get(self._url("/images/{0}/history", image))
return self._result(res, True)
def images(self, name=None, quiet=False, all=False, viz=False,
filters=None):
if viz:
if utils.compare_version('1.7', self._version) >= 0:
raise Exception('Viz output is not supported in API >= 1.7!')
return self._result(self._get(self._url("images/viz")))
params = {
'filter': name,
'only_ids': 1 if quiet else 0,
'all': 1 if all else 0,
}
if filters:
params['filters'] = utils.convert_filters(filters)
res = self._result(self._get(self._url("/images/json"), params=params),
True)
if quiet:
return [x['Id'] for x in res]
return res
def import_image(self, src=None, repository=None, tag=None, image=None):
if src:
if isinstance(src, six.string_types):
try:
result = self.import_image_from_file(
src, repository=repository, tag=tag)
except IOError:
result = self.import_image_from_url(
src, repository=repository, tag=tag)
else:
result = self.import_image_from_data(
src, repository=repository, tag=tag)
elif image:
result = self.import_image_from_image(
image, repository=repository, tag=tag)
else:
raise Exception("Must specify a src or image")
return result
def import_image_from_data(self, data, repository=None, tag=None):
u = self._url("/images/create")
params = {
'fromSrc': '-',
'repo': repository,
'tag': tag
}
headers = {
'Content-Type': 'application/tar',
}
return self._result(
self._post(u, data=data, params=params, headers=headers))
def import_image_from_file(self, filename, repository=None, tag=None):
u = self._url("/images/create")
params = {
'fromSrc': '-',
'repo': repository,
'tag': tag
}
headers = {
'Content-Type': 'application/tar',
}
with open(filename, 'rb') as f:
return self._result(
self._post(u, data=f, params=params, headers=headers,
timeout=None))
def import_image_from_stream(self, stream, repository=None, tag=None):
u = self._url("/images/create")
params = {
'fromSrc': '-',
'repo': repository,
'tag': tag
}
headers = {
'Content-Type': 'application/tar',
'Transfer-Encoding': 'chunked',
}
return self._result(
self._post(u, data=stream, params=params, headers=headers))
def import_image_from_url(self, url, repository=None, tag=None):
u = self._url("/images/create")
params = {
'fromSrc': url,
'repo': repository,
'tag': tag
}
return self._result(
self._post(u, data=None, params=params))
def import_image_from_image(self, image, repository=None, tag=None):
u = self._url("/images/create")
params = {
'fromImage': image,
'repo': repository,
'tag': tag
}
return self._result(
self._post(u, data=None, params=params))
@utils.check_resource
def insert(self, image, url, path):
if utils.compare_version('1.12', self._version) >= 0:
raise errors.DeprecatedMethod(
'insert is not available for API version >=1.12'
)
api_url = self._url("/images/{0}/insert", image)
params = {
'url': url,
'path': path
}
return self._result(self._post(api_url, params=params))
@utils.check_resource
def inspect_image(self, image):
return self._result(
self._get(self._url("/images/{0}/json", image)), True
)
def load_image(self, data):
res = self._post(self._url("/images/load"), data=data)
self._raise_for_status(res)
def pull(self, repository, tag=None, stream=False,
insecure_registry=False, auth_config=None):
if insecure_registry:
warnings.warn(
INSECURE_REGISTRY_DEPRECATION_WARNING.format('pull()'),
DeprecationWarning
)
if not tag:
repository, tag = utils.parse_repository_tag(repository)
registry, repo_name = auth.resolve_repository_name(repository)
params = {
'tag': tag,
'fromImage': repository
}
headers = {}
if utils.compare_version('1.5', self._version) >= 0:
# If we don't have any auth data so far, try reloading the config
# file one more time in case anything showed up in there.
if auth_config is None:
log.debug('Looking for auth config')
if not self._auth_configs:
log.debug(
"No auth config in memory - loading from filesystem"
)
self._auth_configs = auth.load_config()
authcfg = auth.resolve_authconfig(self._auth_configs, registry)
# Do not fail here if no authentication exists for this
# specific registry as we can have a readonly pull. Just
# put the header if we can.
if authcfg:
log.debug('Found auth config')
# auth_config needs to be a dict in the format used by
# auth.py username , password, serveraddress, email
headers['X-Registry-Auth'] = auth.encode_header(
authcfg
)
else:
log.debug('No auth config found')
else:
log.debug('Sending supplied auth config')
headers['X-Registry-Auth'] = auth.encode_header(auth_config)
response = self._post(
self._url('/images/create'), params=params, headers=headers,
stream=stream, timeout=None
)
self._raise_for_status(response)
if stream:
return self._stream_helper(response)
return self._result(response)
def push(self, repository, tag=None, stream=False,
insecure_registry=False):
if insecure_registry:
warnings.warn(
INSECURE_REGISTRY_DEPRECATION_WARNING.format('push()'),
DeprecationWarning
)
if not tag:
repository, tag = utils.parse_repository_tag(repository)
registry, repo_name = auth.resolve_repository_name(repository)
u = self._url("/images/{0}/push", repository)
params = {
'tag': tag
}
headers = {}
if utils.compare_version('1.5', self._version) >= 0:
# If we don't have any auth data so far, try reloading the config
# file one more time in case anything showed up in there.
if not self._auth_configs:
self._auth_configs = auth.load_config()
authcfg = auth.resolve_authconfig(self._auth_configs, registry)
# Do not fail here if no authentication exists for this specific
# registry as we can have a readonly pull. Just put the header if
# we can.
if authcfg:
headers['X-Registry-Auth'] = auth.encode_header(authcfg)
response = self._post_json(
u, None, headers=headers, stream=stream, params=params
)
self._raise_for_status(response)
if stream:
return self._stream_helper(response)
return self._result(response)
@utils.check_resource
def remove_image(self, image, force=False, noprune=False):
params = {'force': force, 'noprune': noprune}
res = self._delete(self._url("/images/{0}", image), params=params)
self._raise_for_status(res)
def search(self, term):
return self._result(
self._get(self._url("/images/search"), params={'term': term}),
True
)
@utils.check_resource
def tag(self, image, repository, tag=None, force=False):
params = {
'tag': tag,
'repo': repository,
'force': 1 if force else 0
}
url = self._url("/images/{0}/tag", image)
res = self._post(url, params=params)
self._raise_for_status(res)
return res.status_code == 201
| AccelAI/accel.ai | flask-aws/lib/python2.7/site-packages/docker/api/image.py | Python | mit | 9,342 | 0 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Script to fix the links in the staged website.
Finds all internal links which do not have index.html at the end and appends
index.html in the appropriate place (preserving anchors, etc).
Usage:
From root directory, after running the jekyll build, execute
'python .jenkins/append_index_html_to_internal_links.py'.
Dependencies:
beautifulsoup4
Installable via pip as 'sudo pip install beautifulsoup4' or apt via
'sudo apt-get install python-beautifulsoup4'.
"""
from __future__ import print_function
import argparse
import fnmatch
import os
import re
from bs4 import BeautifulSoup
try:
unicode # pylint: disable=unicode-builtin
except NameError:
unicode = str
# Original link match. Matches any string which starts with '/' and doesn't
# have a file extension.
linkMatch = r'^\/(.*\.(?!([^\/]+)$))?[^.]*$'
# Regex which matches strings of type /internal/link/#anchor. Breaks into two
# groups for ease of inserting 'index.html'.
anchorMatch1 = r'(.+\/)(#[^\/]+$)'
# Regex which matches strings of type /internal/link#anchor. Breaks into two
# groups for ease of inserting 'index.html'.
anchorMatch2 = r'(.+\/[a-zA-Z0-9]+)(#[^\/]+$)'
parser = argparse.ArgumentParser(description='Fix links in the staged website.')
parser.add_argument('content_dir', help='Generated content directory to fix links in')
args = parser.parse_args()
matches = []
# Recursively walk content directory and find all html files.
for root, dirnames, filenames in os.walk(args.content_dir):
for filename in fnmatch.filter(filenames, '*.html'):
# Javadoc does not have the index.html problem, so omit it.
if 'javadoc' not in root:
matches.append(os.path.join(root, filename))
print('Matches: ' + str(len(matches)))
# Iterates over each matched file looking for link matches.
for match in matches:
print('Fixing links in: ' + match)
mf = open(match)
soup = BeautifulSoup(mf)
# Iterates over every <meta> which is used for aliases - redirected links
for meta in soup.findAll('meta'):
try:
content = meta['content']
alias = content.replace('0; url=', '')
if re.match(linkMatch, alias) is not None:
if alias.endswith('/'):
# /internal/link/
meta['content'] = content + 'index.html'
else:
# /internal/link
meta['content'] = content + '/index.html'
mf.close()
html = unicode(soup).encode('utf-8')
# Write back to the file.
with open(match, "wb") as f:
print('Replacing ' + content + ' with: ' + meta['content'])
f.write(html)
except KeyError as e:
# Some <meta> tags don't have url.
continue
# Iterates over every <a>
for a in soup.findAll('a'):
try:
hr = a['href']
if re.match(linkMatch, hr) is not None:
if hr.endswith('/'):
# /internal/link/
a['href'] = hr + 'index.html'
elif re.match(anchorMatch1, hr) is not None:
# /internal/link/#anchor
mat = re.match(anchorMatch1, hr)
a['href'] = mat.group(1) + 'index.html' + mat.group(2)
elif re.match(anchorMatch2, hr) is not None:
# /internal/link#anchor
mat = re.match(anchorMatch2, hr)
a['href'] = mat.group(1) + '/index.html' + mat.group(2)
else:
# /internal/link
a['href'] = hr + '/index.html'
mf.close()
html = unicode(soup).encode('utf-8')
# Write back to the file.
with open(match, "wb") as f:
print('Replacing ' + hr + ' with: ' + a['href'])
f.write(html)
except KeyError as e:
# Some <a> tags don't have an href.
continue
| lukecwik/incubator-beam | website/append_index_html_to_internal_links.py | Python | apache-2.0 | 4,461 | 0.008518 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-07 16:56
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import model_utils.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
('shops', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Offer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('name', models.CharField(max_length=300)),
('expiration', models.DateTimeField()),
('count', models.IntegerField(default=0)),
('shop', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='offers', to='shops.Shop')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='OfferHistory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('offer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='offer_history', to='offers.Offer')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='offers_used', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='OfferReview',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('review', models.TextField()),
('offer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='offer_reviews', to='offers.Offer')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='offers_reviewed', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
]
| cliffton/localsecrets | offers/migrations/0001_initial.py | Python | mit | 3,179 | 0.004404 |
from impedance_map.sphere import correlation_coefficient, form_factor, \
pair_distribution_function_PY, structure_factor_function_PY, \
cross_section_dimension, fit_correlation_coefficient_nls, \
fit_form_factor_nls
import numpy as np
import math
import unittest
class TestCode(unittest.TestCase):
def test1_sphere_cross_section_dimension(self):
a = 1
q = 0
A = cross_section_dimension(a, q)
A_real = 1
self.assertTrue(A == A_real)
def test2_sphere_cross_section_dimension(self):
a = 1
q = 1
A = cross_section_dimension(a, q)
A_real = 0
self.assertTrue(A == A_real)
def test3_sphere_cross_section_dimension(self):
a = 1
q = 0.5
A = cross_section_dimension(a, q)
A_real = np.sqrt(1 - 0.5 ** 2)
self.assertTrue(A == A_real)
def test1_sphere_form_factor(self):
ndim = 1
a = 1
k = np.linspace(0, 6, 7)
H = form_factor(k=k, a=a, ndim=ndim)
Hr = np.array([1., 0.70807342, 0.20670545, 0.00221276, 0.03579688,
0.03678143, 0.0021687])
self.assertTrue(np.allclose(H, Hr))
def test2_sphere_form_factor(self):
ndim = 2
a = 1
k = np.linspace(0, 6, 7)
H = form_factor(k=k, a=a, ndim=ndim)
Hr = np.array([1., 0.77457807, 0.3326115, 0.05109377, 0.00109043,
0.01716929, 0.008506])
self.assertTrue(np.allclose(H, Hr))
def test3_sphere_form_factor(self):
ndim = 3
a = 1
k = np.linspace(0, 6, 7)
H = form_factor(k=k, a=a, ndim=ndim)
Hr = np.array([1., 0.81632316, 0.42653525, 0.11949293, 0.00758346,
0.00325512, 0.00703836])
self.assertTrue(np.allclose(H, Hr))
def test1_sphere_corr_coeff(self):
ndim = 1
a = 1.
r = np.linspace(0., 2., 11)
b = correlation_coefficient(ndim=ndim, a=a, r=r)
b_real = np.linspace(1., 0., 11)
self.assertTrue(np.allclose(b, b_real))
def test2_sphere_corr_coeff(self):
ndim = 1
a = 1.
r = np.linspace(0., 3., 16)
b = correlation_coefficient(ndim=ndim, a=a, r=r)
b_real = np.zeros(len(r))
b_real[0:11] = np.linspace(1., 0., 11)
self.assertTrue(np.allclose(b, b_real))
def test3_sphere_corr_coeff(self):
ndim = 1
a = 3.
r = np.linspace(0., 6., 11)
b = correlation_coefficient(ndim=ndim, a=a, r=r)
b_real = np.linspace(1., 0., 11)
self.assertTrue(np.allclose(b, b_real))
def test4_sphere_corr_coeff(self):
ndim = 1
a = 0.0
r = np.linspace(0., 6., 11)
b = correlation_coefficient(ndim=ndim, a=a, r=r)
b_real = np.zeros(11)
self.assertTrue(np.allclose(b, b_real))
def test5_sphere_corr_coeff(self):
ndim = 2
a = 1.
r = np.linspace(0., 2., 6)
b = correlation_coefficient(ndim=ndim, a=a, r=r)
b_real = np.array([1., 0.74706008, 0.50463158, 0.28475698, 0.10408804,
0.])
self.assertTrue(np.allclose(b, b_real))
def test6_sphere_corr_coeff(self):
ndim = 2
a = 1.
r = np.linspace(0., 3.2, 9)
b = correlation_coefficient(ndim=ndim, a=a, r=r)
b_real = np.array([1., 0.74706008, 0.50463158, 0.28475698, 0.10408804,
0., 0., 0., 0.])
self.assertTrue(np.allclose(b, b_real))
def test7_sphere_corr_coeff(self):
ndim = 2
a = 3.
r = np.linspace(0., 6., 6)
b = correlation_coefficient(ndim=ndim, a=a, r=r)
b_real = np.array([1., 0.74706008, 0.50463158, 0.28475698, 0.10408804,
0.])
self.assertTrue(np.allclose(b, b_real))
def test8_sphere_corr_coeff(self):
ndim = 2
a = 0.0
r = np.linspace(0., 6., 6)
b = correlation_coefficient(ndim=ndim, a=a, r=r)
b_real = np.zeros(6)
self.assertTrue(np.allclose(b, b_real))
def test9_sphere_corr_coeff(self):
ndim = 3
a = 1.
r = np.linspace(0., 2., 6)
b = correlation_coefficient(ndim=ndim, a=a, r=r)
b_real = np.array([1., 0.704, 0.432, 0.208, 0.056, 0.])
self.assertTrue(np.allclose(b, b_real))
def test10_sphere_corr_coeff(self):
ndim = 3
a = 1.
r = np.linspace(0., 3.2, 9)
b = correlation_coefficient(ndim=ndim, a=a, r=r)
b_real = np.array([1., 0.704, 0.432, 0.208, 0.056, 0., 0., 0., 0.])
self.assertTrue(np.allclose(b, b_real))
def test11_sphere_corr_coeff(self):
ndim = 3
a = 3.
r = np.linspace(0., 6., 6)
b = correlation_coefficient(ndim=ndim, a=a, r=r)
b_real = np.array([1., 0.704, 0.432, 0.208, 0.056, 0.])
self.assertTrue(np.allclose(b, b_real))
def test13_sphere_corr_coeff(self):
ndim = 3
a = 0.0
r = np.linspace(0., 6., 6)
b = correlation_coefficient(ndim=ndim, a=a, r=r)
b_real = np.zeros(6)
self.assertTrue(np.allclose(b, b_real))
def test1_pair_distribution_function_PY(self):
f = 0.4
a = 0.01
rb = np.linspace(1, 5, 5)
r = rb * 2 * a
g = pair_distribution_function_PY(r, a, f)
g_real = np.asarray([ 1.665534 , 1.14167826, 1.04312259, 1.01389934, 1.00453527])
self.assertTrue(np.allclose(g, g_real))
def test1_structure_factor_function_PY(self):
k = np.linspace(0, 10, 5)
f = 0.15
a = 0.01
S = structure_factor_function_PY(k/a, a=0.01, f=0.15)
S_real = np.asarray([ 0.30887944, 1.03988757, 0.95564256, 0.98177134, 1.00532684])
self.assertTrue(np.allclose(S, S_real))
def test1_fit_correlation_coefficient_nls(self):
a = 0.75
r = np.linspace(0, 3., 10)
y = correlation_coefficient(a=a, r=r)
a_guess = fit_correlation_coefficient_nls(r, y)
self.assertTrue(np.allclose(a, a_guess))
def test1_fit_form_factor_nls(self):
a = 0.75
k = np.linspace(0.01, 3, 10)
y = form_factor(a=a, k=k)
a_guess = fit_form_factor_nls(k, y)
self.assertTrue(np.allclose(a, a_guess))
if __name__ == '__main__':
print 'Running unit tests for impedance_map.sphere'
unittest.main()
| aluchies/impedance_map | impedance_map/tests/test_sphere.py | Python | bsd-3-clause | 6,422 | 0.001246 |
from setuptools import setup, find_packages
with open('README.md', encoding = "utf-8") as f:
readme = f.read()
setup(
name='datatoaster',
version='0.1.0',
description='A Python library that can convert raw data to chart data',
long_description=readme,
author='Harry Yu',
author_email='[email protected]',
url='https://github.com/abc612008/datatoaster',
license="MIT",
packages=find_packages(exclude=('tests', 'docs', 'demo'))
)
| abc612008/datatoaster | setup.py | Python | mit | 474 | 0.004219 |
#!/usr/bin/env python
import click
from aeriscloud.cli.helpers import standard_options, Command
@click.command(cls=Command)
@standard_options(start_prompt=False)
def cli(box):
"""
Destroy a box
"""
box.destroy()
if __name__ == '__main__':
cli()
| AerisCloud/AerisCloud | aeriscloud/cli/aeris/destroy.py | Python | mit | 271 | 0 |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example Airflow DAG that creates, updates, queries and deletes a Cloud Spanner instance.
This DAG relies on the following environment variables
* GCP_PROJECT_ID - Google Cloud Platform project for the Cloud Spanner instance.
* GCP_SPANNER_INSTANCE_ID - Cloud Spanner instance ID.
* GCP_SPANNER_DATABASE_ID - Cloud Spanner database ID.
* GCP_SPANNER_CONFIG_NAME - The name of the instance's configuration. Values are of the
form ``projects/<gcp_project>/instanceConfigs/<configuration>``. See also:
https://cloud.google.com/spanner/docs/reference/rest/v1/projects.instanceConfigs#InstanceConfig
https://cloud.google.com/spanner/docs/reference/rest/v1/projects.instanceConfigs/list#google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs
* GCP_SPANNER_NODE_COUNT - Number of nodes allocated to the instance.
* GCP_SPANNER_DISPLAY_NAME - The descriptive name for this instance as it appears in UIs.
Must be unique per project and between 4 and 30 characters in length.
"""
import os
import airflow
from airflow import models
from airflow.gcp.operators.spanner import (
CloudSpannerInstanceDatabaseDeleteOperator, CloudSpannerInstanceDatabaseDeployOperator,
CloudSpannerInstanceDatabaseQueryOperator, CloudSpannerInstanceDatabaseUpdateOperator,
CloudSpannerInstanceDeleteOperator, CloudSpannerInstanceDeployOperator,
)
# [START howto_operator_spanner_arguments]
GCP_PROJECT_ID = os.environ.get('GCP_PROJECT_ID', 'example-project')
GCP_SPANNER_INSTANCE_ID = os.environ.get('GCP_SPANNER_INSTANCE_ID', 'testinstance')
GCP_SPANNER_DATABASE_ID = os.environ.get('GCP_SPANNER_DATABASE_ID', 'testdatabase')
GCP_SPANNER_CONFIG_NAME = os.environ.get('GCP_SPANNER_CONFIG_NAME',
'projects/example-project/instanceConfigs/eur3')
GCP_SPANNER_NODE_COUNT = os.environ.get('GCP_SPANNER_NODE_COUNT', '1')
GCP_SPANNER_DISPLAY_NAME = os.environ.get('GCP_SPANNER_DISPLAY_NAME', 'Test Instance')
# OPERATION_ID should be unique per operation
OPERATION_ID = 'unique_operation_id'
# [END howto_operator_spanner_arguments]
default_args = {
'start_date': airflow.utils.dates.days_ago(1)
}
with models.DAG(
'example_gcp_spanner',
default_args=default_args,
schedule_interval=None # Override to match your needs
) as dag:
# Create
# [START howto_operator_spanner_deploy]
spanner_instance_create_task = CloudSpannerInstanceDeployOperator(
project_id=GCP_PROJECT_ID,
instance_id=GCP_SPANNER_INSTANCE_ID,
configuration_name=GCP_SPANNER_CONFIG_NAME,
node_count=int(GCP_SPANNER_NODE_COUNT),
display_name=GCP_SPANNER_DISPLAY_NAME,
task_id='spanner_instance_create_task'
)
spanner_instance_update_task = CloudSpannerInstanceDeployOperator(
instance_id=GCP_SPANNER_INSTANCE_ID,
configuration_name=GCP_SPANNER_CONFIG_NAME,
node_count=int(GCP_SPANNER_NODE_COUNT) + 1,
display_name=GCP_SPANNER_DISPLAY_NAME + '_updated',
task_id='spanner_instance_update_task'
)
# [END howto_operator_spanner_deploy]
# [START howto_operator_spanner_database_deploy]
spanner_database_deploy_task = CloudSpannerInstanceDatabaseDeployOperator(
project_id=GCP_PROJECT_ID,
instance_id=GCP_SPANNER_INSTANCE_ID,
database_id=GCP_SPANNER_DATABASE_ID,
ddl_statements=[
"CREATE TABLE my_table1 (id INT64, name STRING(MAX)) PRIMARY KEY (id)",
"CREATE TABLE my_table2 (id INT64, name STRING(MAX)) PRIMARY KEY (id)",
],
task_id='spanner_database_deploy_task'
)
spanner_database_deploy_task2 = CloudSpannerInstanceDatabaseDeployOperator(
instance_id=GCP_SPANNER_INSTANCE_ID,
database_id=GCP_SPANNER_DATABASE_ID,
ddl_statements=[
"CREATE TABLE my_table1 (id INT64, name STRING(MAX)) PRIMARY KEY (id)",
"CREATE TABLE my_table2 (id INT64, name STRING(MAX)) PRIMARY KEY (id)",
],
task_id='spanner_database_deploy_task2'
)
# [END howto_operator_spanner_database_deploy]
# [START howto_operator_spanner_database_update]
spanner_database_update_task = CloudSpannerInstanceDatabaseUpdateOperator(
project_id=GCP_PROJECT_ID,
instance_id=GCP_SPANNER_INSTANCE_ID,
database_id=GCP_SPANNER_DATABASE_ID,
ddl_statements=[
"CREATE TABLE my_table3 (id INT64, name STRING(MAX)) PRIMARY KEY (id)",
],
task_id='spanner_database_update_task'
)
# [END howto_operator_spanner_database_update]
# [START howto_operator_spanner_database_update_idempotent]
spanner_database_update_idempotent1_task = CloudSpannerInstanceDatabaseUpdateOperator(
project_id=GCP_PROJECT_ID,
instance_id=GCP_SPANNER_INSTANCE_ID,
database_id=GCP_SPANNER_DATABASE_ID,
operation_id=OPERATION_ID,
ddl_statements=[
"CREATE TABLE my_table_unique (id INT64, name STRING(MAX)) PRIMARY KEY (id)",
],
task_id='spanner_database_update_idempotent1_task'
)
spanner_database_update_idempotent2_task = CloudSpannerInstanceDatabaseUpdateOperator(
instance_id=GCP_SPANNER_INSTANCE_ID,
database_id=GCP_SPANNER_DATABASE_ID,
operation_id=OPERATION_ID,
ddl_statements=[
"CREATE TABLE my_table_unique (id INT64, name STRING(MAX)) PRIMARY KEY (id)",
],
task_id='spanner_database_update_idempotent2_task'
)
# [END howto_operator_spanner_database_update_idempotent]
# [START howto_operator_spanner_query]
spanner_instance_query_task = CloudSpannerInstanceDatabaseQueryOperator(
project_id=GCP_PROJECT_ID,
instance_id=GCP_SPANNER_INSTANCE_ID,
database_id=GCP_SPANNER_DATABASE_ID,
query=["DELETE FROM my_table2 WHERE true"],
task_id='spanner_instance_query_task'
)
spanner_instance_query_task2 = CloudSpannerInstanceDatabaseQueryOperator(
instance_id=GCP_SPANNER_INSTANCE_ID,
database_id=GCP_SPANNER_DATABASE_ID,
query=["DELETE FROM my_table2 WHERE true"],
task_id='spanner_instance_query_task2'
)
# [END howto_operator_spanner_query]
# [START howto_operator_spanner_database_delete]
spanner_database_delete_task = CloudSpannerInstanceDatabaseDeleteOperator(
project_id=GCP_PROJECT_ID,
instance_id=GCP_SPANNER_INSTANCE_ID,
database_id=GCP_SPANNER_DATABASE_ID,
task_id='spanner_database_delete_task'
)
spanner_database_delete_task2 = CloudSpannerInstanceDatabaseDeleteOperator(
instance_id=GCP_SPANNER_INSTANCE_ID,
database_id=GCP_SPANNER_DATABASE_ID,
task_id='spanner_database_delete_task2'
)
# [END howto_operator_spanner_database_delete]
# [START howto_operator_spanner_delete]
spanner_instance_delete_task = CloudSpannerInstanceDeleteOperator(
project_id=GCP_PROJECT_ID,
instance_id=GCP_SPANNER_INSTANCE_ID,
task_id='spanner_instance_delete_task'
)
spanner_instance_delete_task2 = CloudSpannerInstanceDeleteOperator(
instance_id=GCP_SPANNER_INSTANCE_ID,
task_id='spanner_instance_delete_task2'
)
# [END howto_operator_spanner_delete]
spanner_instance_create_task \
>> spanner_instance_update_task \
>> spanner_database_deploy_task \
>> spanner_database_deploy_task2 \
>> spanner_database_update_task \
>> spanner_database_update_idempotent1_task \
>> spanner_database_update_idempotent2_task \
>> spanner_instance_query_task \
>> spanner_instance_query_task2 \
>> spanner_database_delete_task \
>> spanner_database_delete_task2 \
>> spanner_instance_delete_task \
>> spanner_instance_delete_task2
| Fokko/incubator-airflow | airflow/gcp/example_dags/example_spanner.py | Python | apache-2.0 | 8,612 | 0.002206 |
#!/usr/bin/python
from __future__ import print_function, unicode_literals
__license__ = """
This file is part of GNU FreeFont.
GNU FreeFont is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version.
GNU FreeFont is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
GNU FreeFont. If not, see <http://www.gnu.org/licenses/>.
"""
__author__ = "Emmanuel Vallois"
__email__ = "[email protected]"
__copyright__ = "Copyright 2011 Emmanuel Vallois"
__date__ = "$Date$"
__version__ = "$Revision$"
__doc__ = """
Writes in the file named by the first argument an HTML page comprising a table
for testing joining cursive script characters.
Runs under normal Python, version 2.7 or above.
Typical usage:
unicode_joining.py "Unicode joining test page.html"
"""
import sys
from codecs import open
from string import Template
from collections import OrderedDict
from itertools import chain
_module_missing_msg = """Please run
generate_arabic_shaping.py
to generate
arabic_shaping.py"""
try:
from arabic_shaping import arabic_shapings, joining_type
except:
print( _module_missing_msg, file=sys.stderr)
sys.exit( 1 )
if len(sys.argv) > 1:
outfile = sys.argv[1]
else:
outfile = 'Unicode joining test page.html'
sys.stdout = open(outfile, 'w', 'utf-8')
class OrderedDefaultDict(OrderedDict):
def __missing__(self, key):
self[key] = rv = []
return rv
def move_to_end(self, key):
tmp = self[key]
del self[key]
self[key] = tmp
arabic_ranges = tuple(chain(range(0x600, 0x6FF +1), range(0x750, 0x77F +1), range(0x8A0, 0x8FF)))
unicode61_new_ranges = [0x604, 0x8A0]
unicode61_new_ranges.extend(range(0x8A2, 0x8AC + 1))
unicode61_new_ranges.extend(range(0x8E4, 0x8FE + 1))
unicode62_new_ranges = [0x605, 0x8A1]
unicode62_new_ranges.extend(range(0x8AD, 0x8B1 + 1))
unicode62_new_ranges.append(0x8FF)
shapings = filter(lambda s: s.joining_type in 'RD' and (s.joining_group != 'No_Joining_Group' or s.code_point not in arabic_ranges), arabic_shapings.values())
jg_shapings_arabic = OrderedDefaultDict()
jg_shapings_other_scripts = OrderedDefaultDict()
for s in shapings:
if s.code_point in arabic_ranges:
jg_shapings_arabic[s.joining_group].append(s)
else:
jg_shapings_other_scripts[s.joining_group].append(s)
if s.code_point == 0x62B:
jg_shapings_arabic.move_to_end('TEH MARBUTA')
jg_shapings_arabic['TEH MARBUTA GOAL']
elif s.code_point == 0x642:
jg_shapings_arabic.move_to_end('GAF')
jg_shapings_arabic['SWASH KAF']
elif s.code_point == 0x646:
jg_shapings_arabic['NYA']
elif s.code_point == 0x647:
jg_shapings_arabic['KNOTTED HEH']
jg_shapings_arabic['HEH GOAL']
elif s.code_point == 0x64A:
jg_shapings_arabic.move_to_end('FARSI YEH')
elif s.code_point in chain(range(0x627, 0x63A + 1), range(0x641, 0x64A + 1)):
jg_shapings_arabic.move_to_end(s.joining_group)
#for jg, ls in jg_shapings_arabic.items():
# for s in ls:
# print(jg, ls, file=sys.stderr)
table_head = '''
<table frame="box" rules="rows">
{}
<colgroup><col/><col/><col/></colgroup>
<colgroup id="characterCols"><col/><col/><col/><col/></colgroup>
<colgroup><col/></colgroup>'''
table_internal_title = '''<tr><td colspan="8"><h2>{}</h2></td></tr>
<tr>
<th rowspan="2">Joining Group</th>
<th rowspan="2">Code Point</th>
<th rowspan="2">Short Name</th>
<th colspan="5">Contextual Forms</th>
</tr>
<tr><th>Isolated</th><th>Final</th><th>Medial</th><th>Initial</th><th>Joined</th></tr>'''
def print_table():
contextual_form_formats = { 'isolat':'{}', 'final>':'‍{}', 'medial':'‍{}‍', 'initia':'{}‍' }
contextual_forms = 'isolat', 'final>', 'medial', 'initia'
def print_shaping(shaping, rowspan):
# print('print_shaping', shaping, file=sys.stderr)
cp = shaping.code_point
char = unichr(cp)
print('<tr{}>'.format(' class="nextVersion"' if cp in unicode61_new_ranges else ' class="furtherFuture"' if cp in unicode62_new_ranges else ''))
if rowspan: print('<td rowspan="{}">{}</td>'.format(rowspan, shaping.joining_group))
print('<td>{:04X}</td>'.format(cp))
print('<td>{}</td>'.format(shaping.short_name))
i = 0
for form in contextual_forms:
print('<td class="ch">{}</td>'.format(contextual_form_formats[form].format(char)))
i += 1
if { 'R':'final>', 'D':'' }[joining_type(cp)] == form:
break
if i < 4:
print('<td colspan="{}"></td>'.format(4 - i))
print('<td class="ch">{}</td>'.format('\u0640' * (4 - i) + char * (i - 1) + ' ' + char))
print('</tr>')
print(table_head.format(caption))
print(table_internal_title.format('Arabic'))
for shaping_list in jg_shapings_arabic.values():
rowspan = len(shaping_list)
for shaping in shaping_list:
print_shaping(shaping, rowspan)
rowspan = None
print(table_internal_title.format('Syriac, Nko and Mandaic'))
for shaping_list in jg_shapings_other_scripts.values():
rowspan = len(shaping_list)
for shaping in shaping_list:
print_shaping(shaping, rowspan)
rowspan = None
print('</table>')
html_heading = Template('''<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en">
<head>
<meta http-equiv="Content-Type" content="text/html;charset=utf-8"/>
<title>$title</title>
<style type="text/css">
.captionSquare { float: left; width: 2em; height: 1em; margin-right: 0.5em }
caption { width: 60em; text-align: left }
table { text-align: center; font-family: FreeSerif, FreeSans }
td { padding: 10px }
small { font-size: small }
#characterCols { border-left: medium double black; border-right: medium double black }
.nextVersion { background-color: #CCFF99 }
.furtherFuture { background-color: #FFFFCC }
.name { width: 10em }
.ch { vertical-align: baseline; line-height: 75%; font-size: 250%; direction: rtl }
.empty { background:#EEEEEE }
</style>
</head>
<body>
<h1>$title</h1>
<p>Choose the font to test: <select onchange="changefont(this)"><option>FreeSerif</option><option>FreeSerif, bold</option><option>FreeSans</option><option>FreeMono</option></select></p>
<script type="text/javascript">//<![CDATA[
function changefont(select) {
var font = select.options.item(select.selectedIndex).value.split(', ');
var bold = font.length > 1 ? font[1] == 'bold' : false;
font = font[0];
var elementsToStyle = document.getElementsByClassName("ch");
for (i = 0; i < elementsToStyle.length; i++) {
elementsToStyle[i].style.fontFamily = font;
elementsToStyle[i].style.fontWeight = bold ? 'bold' : 'normal';
}
}//]]></script>''')
caption='''<caption><span class="captionSquare nextVersion"> </span> New characters in Unicode 6.1, which will be published in February 2012.
These can be relied upon and will not change or be removed. See <a href="http://www.unicode.org/Public/6.1.0/charts/blocks//U08A0.pdf">the
Unicode chart for the new block <b>Arabic Extended-A</b></a>, and for more about these characters, see <a href="http://std.dkuug.dk/JTC1/SC2/WG2/docs/n3734.pdf">N3734</a>
for U+0604, <a href="http://std.dkuug.dk/JTC1/SC2/WG2/docs/n3882.pdf">the complete
proposal</a> for most characters, <a href="http://std.dkuug.dk/JTC1/SC2/WG2/docs/n3791.pdf">N3791</a> for U+08F0-U+08F3.<br/>
<span class="captionSquare furtherFuture"> </span> Future new characters in Unicode 6.2. These can will probably be standardized this way,
but could in principle still change or be removed. See <a href="http://std.dkuug.dk/JTC1/SC2/WG2/docs/n3990.pdf">N3990, in 4.2 Orthography</a> for U+0605,
<a href="http://std.dkuug.dk/JTC1/SC2/WG2/docs/n4072.pdf">N4072 proposal</a> about U+08AD-U+08B1, and
<a href="http://std.dkuug.dk/JTC1/SC2/WG2/docs/n3989.pdf">N3989 proposal</a> about U+08FF.</caption>'''
def print_arabic_test_page():
print(html_heading.substitute(title='Test of Joining Characters From Unicode Cursive Scripts'))
print_table()
print('</body>')
print('</html>')
print_arabic_test_page()
| vernnobile/Gnu_Free_Font | tools/test/ranges/Arabic/unicode_joining.py | Python | gpl-3.0 | 8,257 | 0.016108 |
# This is an auto-generated Django model module.
# You'll have to do the following manually to clean this up:
# * Rearrange models' order
# * Make sure each model has one field with primary_key=True
# * Remove `managed = False` lines for those models you wish to give write DB access
# Feel free to rename the models, but don't rename db_table values or field names.
#
# Also note: You'll have to insert the output of 'django-admin.py sqlcustom [appname]'
# into your database.
from __future__ import unicode_literals
from django.db import models
class Attributes(models.Model):
attr_id = models.AutoField(primary_key=True)
descr = models.CharField(max_length=140)
descr_en = models.CharField(max_length=140)
notes = models.CharField(max_length=250, blank=True)
class Meta:
db_table = 'attributes'
verbose_name= 'Attributes'
# maybe instantiate a def _unicode_ function that will return model's name?
class Department(models.Model):
id = models.AutoField(primary_key=True)
tmima_per = models.TextField()
proedros = models.IntegerField(default='0')
pr_spoudwn = models.TextField()
pr_spoudwn_en = models.TextField()
tmima_per_en = models.TextField()
tmima_en = models.CharField(max_length=5)
homepage = models.TextField()
homepage_en = models.TextField()
lastupdate = models.DateTimeField()
class Meta:
db_table = 'department'
verbose_name= 'Department'
class Instructors(models.Model):
instr_id = models.AutoField(primary_key=True, default='0')
cv = models.TextField(blank=True)
cv_en = models.TextField(blank=True)
research = models.TextField(blank=True)
research_en = models.TextField(blank=True)
subject = models.CharField(max_length=255, blank=True)
subject_en = models.CharField(max_length=255, blank=True)
lastupdate = models.DateTimeField()
class Meta:
db_table = 'instructors'
verbose_name= 'Instructors'
class Katefth(models.Model):
kat_id = models.AutoField(primary_key=True)
perigrafi_kat = models.CharField(max_length=100)
perigrafi_kat_en = models.CharField(max_length=100)
class Meta:
db_table = 'katefth'
verbose_name= 'Katefthnsh'
class KatefthKykloi(models.Model):
kat_id = models.IntegerField(primary_key=True)
kyklos_id = models.IntegerField()
class Meta:
db_table = 'katefth_kykloi'
unique_together = ("kat_id", "kyklos_id")
verbose_name= 'Katefthnsh kykloi'
class Kykloi(models.Model):
kyklos_id = models.AutoField(primary_key=True)
name = models.CharField(max_length=255, blank=True)
name_en = models.CharField(max_length=255, blank=True)
notes = models.TextField(blank=True)
notes_en = models.TextField(blank=True)
dept_id = models.IntegerField(blank=True, null=True)
examina = models.IntegerField()
indexing = models.IntegerField(default='0000')
class Meta:
db_table = 'kykloi'
verbose_name= 'Kykloi'
class KykloiExamina(models.Model):
id = models.AutoField(primary_key=True)
examina = models.TextField()
notes = models.CharField(max_length=255, blank=True)
notes_en = models.CharField(max_length=255, blank=True)
comments = models.TextField(blank=True)
class Meta:
db_table = 'kykloi_examina'
verbose_name= 'Kykloi examina'
class ModuleKykloi(models.Model):
module_id = models.IntegerField(primary_key=True, default='0')
kyklos_id = models.IntegerField(default='0')
semester = models.IntegerField(default='0')
indexing = models.IntegerField(default='99')
class Meta:
db_table = 'module_kykloi'
unique_together = (("module_id", "kyklos_id", "semester"),)
verbose_name= 'Modules Kyklwn'
class Modules(models.Model):
id = models.AutoField(primary_key=True)
module = models.CharField(max_length=255, default='')
description = models.TextField()
choice = models.IntegerField(default='0')
module_en = models.CharField(max_length=255, default='')
description_en = models.TextField()
notes = models.CharField(max_length=255, default='')
notes_en = models.CharField(max_length=255)
class Meta:
db_table = 'modules'
verbose_name= 'Modules'
class ModulesTutors(models.Model):
module_id = models.IntegerField(primary_key=True, default='0')
tutor_id = models.IntegerField(default='0')
last_update = models.DateTimeField()
class Meta:
db_table = 'modules_tutors'
unique_together = (("module_id", "tutor_id"),)
verbose_name= 'Modules tutors'
class PubInstr(models.Model):
pubid = models.IntegerField(primary_key=True, default='0')
instrid = models.IntegerField(default='0')
cduom = models.IntegerField(default='1')
lastupdate = models.DateTimeField()
class Meta:
db_table = 'pub_instr'
unique_together = (("pubid", "instrid"),)
verbose_name= 'Publication instr'
class PubTypes(models.Model):
id = models.AutoField(primary_key=True, default='0')
type_description = models.CharField(max_length=255)
lastupdate = models.DateTimeField()
class Meta:
db_table = 'pub_types'
verbose_name= 'Publication types'
class Publications(models.Model):
id = models.AutoField(primary_key=True)
description = models.TextField()
year = models.CharField(max_length=4, default='')
typeid = models.IntegerField(default='0')
filelink = models.CharField(max_length=255, blank=True)
pubdate = models.DateField(blank=True, null=True)
lastupdate = models.DateTimeField()
class Meta:
db_table = 'publications'
verbose_name= 'Publications'
class Ranks(models.Model):
rank_id = models.AutoField(primary_key=True)
idiotita_per = models.CharField(max_length=150, default='')
idiotita_per_en = models.CharField(max_length=150, default='')
notes = models.CharField(max_length=250, blank=True)
class Meta:
db_table = 'ranks'
verbose_name= 'Ranks'
class Service(models.Model):
service_id = models.AutoField(primary_key=True)
name = models.CharField(max_length=150, blank=True)
name_en = models.CharField(max_length=150, blank=True)
notes = models.TextField(blank=True)
notes_en = models.TextField(blank=True)
is_academic = models.IntegerField(default='0')
class Meta:
db_table = 'service'
verbose_name= 'Service'
class Users(models.Model):
username = models.CharField(primary_key=True, max_length=20, default='')
password = models.CharField(max_length=4, default='ldap')
status = models.IntegerField(default='1')
class Meta:
db_table = 'users'
verbose_name= 'Users'
class Works(models.Model):
emp_id = models.IntegerField(primary_key=True)
service_id = models.IntegerField()
attribute_id = models.IntegerField(default='44')
phone = models.CharField(max_length=36, blank=True)
primary_academic = models.IntegerField()
lastupdate = models.DateTimeField()
class Meta:
db_table = 'works'
unique_together = (("emp_id", "service_id", "attribute_id"),)
verbose_name= 'Works'
| per7inac1ousQ/Directories | DirectoriesBackupFiles/models.py | Python | apache-2.0 | 7,043 | 0.015618 |
from chimera.util.votable import VOTable
from httplib import HTTPConnection
import tempfile
import os
import urllib
class VizQuery(object):
"""
Queries A catalog in Vizier
within a given radius or box of the zenith
"""
def __init__(self):
self.args = {}
self.args["-mime"] = "xml"
self.columns = None
def useCat(self, catName):
"""
@param catName: the catalog's name in Vizier
@type catName: str
Simply sets the catalog's name
"""
self.args["-source"] = catName
def useColumns(self, columns, sortBy, reverse=False):
"""
@param columns: list of catalog's columns to use
@type columns: list
@param sortBy: define which column to sort by
@type sortBy: str
@param reverse: decide to reverse sort @type reverse: bool
Define which columns will be fetched and which column will be used
for sorting.
"""
self.columns = columns.split(",")
self.args["-out"] = columns
if reverse:
self.args["-sort"] = "-" + sortBy
else:
self.args["-sort"] = sortBy
def sortBy(self, column):
"""
One sets here which column to sort by
@param column: name of column to sort by
@type column: str
"""
def constrainColumns(self, columns):
"""
Use this to add constraints to any of the columns
@param columns: list of dictionaries {COLUMN:condition}
@type columns: list
"""
self.args.update(columns)
def useTarget(self, center, radius=None, box=None):
"""
@param center: center of search in catalog
@type center: L{Position}
@param radius: radius of search
@type radius: float
@param box: box size, if you want a square use an integer
if you want a rectangle use a tuple (ww,hh)
@type box: int | tuple
"""
self.args["-c"] = str(center)
self.args["-c.eq"] = "J2000"
if radius:
self.args["-c.rd"] = radius
elif box:
try:
self.args["-c.bd"] = "=%fx%f" % radius
except:
self.args["-c.bd"] = radius
else:
raise TypeError("You must specify either radius or box size")
def find(self, limit=9999):
"""
@param limit: Number of stars to return from Vizier
@type limit: int
"""
assert "-c.rd" in self.args or "-c.bd" in self.args, "No target selected, use useTarget method first."
self.args["-out.max"] = limit
results = tempfile.NamedTemporaryFile(mode='w+',
prefix="chimera.vizquery",
dir=tempfile.gettempdir())
# query the catalog in Vizier's database
conn = HTTPConnection("webviz.u-strasbg.fr")
s = urllib.urlencode(self.args)
conn.request("POST", "/viz-bin/votable", s)
resp = conn.getresponse()
ret = resp.read()
f = open(results.name, "w")
f.write(ret)
f.close()
obj = []
votable = VOTable(results.name)
for linha in votable.getDataRows():
v = [c.getContent() for c in linha.getNodeList()]
obj.append(dict(zip(self.columns, v)))
return obj
| ankanaan/chimera | src/chimera/util/vizquery.py | Python | gpl-2.0 | 3,452 | 0.000579 |
#!/usr/bin/env python3
# Copyright (C) 2017-2021 The btclib developers
#
# This file is part of btclib. It is subject to the license terms in the
# LICENSE file found in the top-level directory of this distribution.
#
# No part of btclib including this file, may be copied, modified, propagated,
# or distributed except according to the terms contained in the LICENSE file.
"Tests for the `btclib.hashes` module."
from btclib.hashes import hash160, hash256
from tests.test_to_key import (
net_unaware_compressed_pub_keys,
net_unaware_uncompressed_pub_keys,
plain_prv_keys,
)
def test_hash160_hash256() -> None:
test_vectors = (
plain_prv_keys
+ net_unaware_compressed_pub_keys
+ net_unaware_uncompressed_pub_keys
)
for hexstring in test_vectors:
hash160(hexstring)
hash256(hexstring)
# def test_fingerprint() -> None:
#
# seed = "bfc4cbaad0ff131aa97fa30a48d09ae7df914bcc083af1e07793cd0a7c61a03f65d622848209ad3366a419f4718a80ec9037df107d8d12c19b83202de00a40ad"
# xprv = rootxprv_from_seed(seed)
# pf = fingerprint(xprv) # xprv is automatically converted to xpub
# child_key = derive(xprv, 0x80000000)
# pf2 = BIP32KeyData.b58decode(child_key).parent_fingerprint
# assert pf == pf2
| fametrano/BitcoinBlockchainTechnology | tests/test_hashes.py | Python | mit | 1,276 | 0.000784 |
# Copyright (c) 2011 Intel Corporation
# Copyright (c) 2011 OpenStack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The FilterScheduler is for creating shares.
You can customize this scheduler by specifying your own share Filters and
Weighing Functions.
"""
import operator
from manila import exception
from manila.openstack.common import importutils
from manila.openstack.common import log as logging
from manila.scheduler import driver
from manila.scheduler import scheduler_options
from oslo.config import cfg
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class FilterScheduler(driver.Scheduler):
"""Scheduler that can be used for filtering and weighing."""
def __init__(self, *args, **kwargs):
super(FilterScheduler, self).__init__(*args, **kwargs)
self.cost_function_cache = None
self.options = scheduler_options.SchedulerOptions()
self.max_attempts = self._max_attempts()
def schedule(self, context, topic, method, *args, **kwargs):
"""The schedule() contract requires we return the one
best-suited host for this request.
"""
self._schedule(context, topic, *args, **kwargs)
def _get_configuration_options(self):
"""Fetch options dictionary. Broken out for testing."""
return self.options.get_configuration()
def _post_select_populate_filter_properties(self, filter_properties,
host_state):
"""Add additional information to the filter properties after a host has
been selected by the scheduling process.
"""
# Add a retry entry for the selected volume backend:
self._add_retry_host(filter_properties, host_state.host)
def _add_retry_host(self, filter_properties, host):
"""Add a retry entry for the selected volume backend. In the event that
the request gets re-scheduled, this entry will signal that the given
backend has already been tried.
"""
retry = filter_properties.get('retry', None)
if not retry:
return
hosts = retry['hosts']
hosts.append(host)
def _max_attempts(self):
max_attempts = CONF.scheduler_max_attempts
if max_attempts < 1:
msg = _("Invalid value for 'scheduler_max_attempts', "
"must be >=1")
raise exception.InvalidParameterValue(err=msg)
return max_attempts
def schedule_create_share(self, context, request_spec, filter_properties):
weighed_host = self._schedule_share(context,
request_spec,
filter_properties)
if not weighed_host:
raise exception.NoValidHost(reason="")
host = weighed_host.obj.host
share_id = request_spec['share_id']
snapshot_id = request_spec['snapshot_id']
updated_share = driver.share_update_db(context, share_id, host)
self._post_select_populate_filter_properties(filter_properties,
weighed_host.obj)
# context is not serializable
filter_properties.pop('context', None)
self.share_rpcapi.create_share(context, updated_share, host,
request_spec=request_spec,
filter_properties=filter_properties,
snapshot_id=snapshot_id)
def _schedule_share(self, context, request_spec, filter_properties=None):
"""Returns a list of hosts that meet the required specs,
ordered by their fitness.
"""
elevated = context.elevated()
share_properties = request_spec['share_properties']
# Since Manila is using mixed filters from Oslo and it's own, which
# takes 'resource_XX' and 'volume_XX' as input respectively, copying
# 'volume_XX' to 'resource_XX' will make both filters happy.
resource_properties = share_properties.copy()
share_type = request_spec.get("share_type", {})
resource_type = request_spec.get("share_type", {})
request_spec.update({'resource_properties': resource_properties})
config_options = self._get_configuration_options()
if filter_properties is None:
filter_properties = {}
self._populate_retry_share(filter_properties, resource_properties)
filter_properties.update({'context': context,
'request_spec': request_spec,
'config_options': config_options,
'share_type': share_type,
'resource_type': resource_type
})
self.populate_filter_properties_share(request_spec, filter_properties)
# Find our local list of acceptable hosts by filtering and
# weighing our options. we virtually consume resources on
# it so subsequent selections can adjust accordingly.
# Note: remember, we are using an iterator here. So only
# traverse this list once.
hosts = self.host_manager.get_all_host_states_share(elevated)
# Filter local hosts based on requirements ...
hosts = self.host_manager.get_filtered_hosts(hosts,
filter_properties)
if not hosts:
return None
LOG.debug(_("Filtered share %(hosts)s") % locals())
# weighted_host = WeightedHost() ... the best
# host for the job.
weighed_hosts = self.host_manager.get_weighed_hosts(hosts,
filter_properties)
best_host = weighed_hosts[0]
LOG.debug(_("Choosing for share: %(best_host)s") % locals())
#NOTE(rushiagr): updating the available space parameters at same place
best_host.obj.consume_from_volume(share_properties)
return best_host
def _populate_retry_share(self, filter_properties, properties):
"""Populate filter properties with history of retries for this
request. If maximum retries is exceeded, raise NoValidHost.
"""
max_attempts = self.max_attempts
retry = filter_properties.pop('retry', {})
if max_attempts == 1:
# re-scheduling is disabled.
return
# retry is enabled, update attempt count:
if retry:
retry['num_attempts'] += 1
else:
retry = {
'num_attempts': 1,
'hosts': [] # list of share service hosts tried
}
filter_properties['retry'] = retry
share_id = properties.get('share_id')
self._log_share_error(share_id, retry)
if retry['num_attempts'] > max_attempts:
msg = _("Exceeded max scheduling attempts %(max_attempts)d for "
"share %(share_id)s") % locals()
raise exception.NoValidHost(reason=msg)
def _log_share_error(self, share_id, retry):
"""If the request contained an exception from a previous share
create operation, log it to aid debugging.
"""
exc = retry.pop('exc', None) # string-ified exception from share
if not exc:
return # no exception info from a previous attempt, skip
hosts = retry.get('hosts', None)
if not hosts:
return # no previously attempted hosts, skip
last_host = hosts[-1]
msg = _("Error scheduling %(share_id)s from last share-service: "
"%(last_host)s : %(exc)s") % locals()
LOG.error(msg)
def populate_filter_properties_share(self, request_spec,
filter_properties):
"""Stuff things into filter_properties. Can be overridden in a
subclass to add more data.
"""
shr = request_spec['share_properties']
filter_properties['size'] = shr['size']
filter_properties['availability_zone'] = shr.get('availability_zone')
filter_properties['user_id'] = shr.get('user_id')
filter_properties['metadata'] = shr.get('metadata')
| tucbill/manila | manila/scheduler/filter_scheduler.py | Python | apache-2.0 | 8,816 | 0.000113 |
import sys
class sharedGpsData(object):
def __init__(self):
self.pos = (0.0,0.0,None,) # float latitude(from equateur),float longitude(from greenwitch),datetime (Universal Time Coordinated)
self.alt = (0.0, "M", None,) # float altitude, string scale unit, datetime (Universal Time Coordinated)
self.place = ("",sys.maxint,"M", "", None, ) # string place name, distance from this point, datetime (Universal Time Coordinated)
self.point_to_print = []
self.gpsShareDaemonId = -1
###
def setGpsLogId(self, process_id):
self.gpsShareDaemonId = process_id
def getGpsLogId(self):
return self.gpsShareDaemonId
###
def setPosition(self, latitude, longitude, dtime):
self.pos = (latitude, longitude, dtime,)
def getPosition(self):
return self.pos
###
def setAltitude(self, altitude, dtime, unit = "M"):
self.alt = (altitude, unit, dtime,)
def getAltitude(self):
return self.alt
###
def setPlace(self, placename, distance, distanceType, dtime, unit = "M"):
self.place = (placename, distance, unit, distanceType, dtime,)
def getPlace(self):
return self.place
###
def addPointOfInterest(self, lat, lon, key, descr=None):
self.point_to_print.append( (lat, lon, key, descr,) )
def getAndResetPointOfInterest(self):
toRet = self.point_to_print
self.point_to_print = []
return toRet
| djo938/GPSPythonSharingWithPyro | pysharegps/gpsSharedData.py | Python | gpl-3.0 | 1,548 | 0.016796 |
__author__ = 'gpratt'
__author__ = 'gpratt'
import argparse
import subprocess
import os
def wrap_wait_error(wait_result):
if wait_result != 0:
raise NameError("Failed to execute command correctly {}".format(wait_result))
def pre_process_bam(bam, bam01, bam02, bam03, bam04, bam05, bam06, bam07, bam08, bam09, no_shuffle, no_sort):
#split bam file into two, return file handle for the two bam files
print "word counting"
p = subprocess.Popen("samtools view {} | wc -l".format(bam), shell=True, stdout=subprocess.PIPE) # Number of reads in the tagAlign file
stdout, stderr = p.communicate()
nlines = int(stdout)
print "header counting"
p = subprocess.Popen("samtools view -H {} | wc -l".format(bam), shell=True, stdout=subprocess.PIPE) # Number of header lines (for when we've got a lot of chromosomes)
stdout, stderr = p.communicate()
n_header_lines = int(stdout)
if no_shuffle:
shuffled_bam = os.path.splitext(bam)[0]
else: #shuffle
shuffled_bam = os.path.splitext(os.path.basename(bam))[0] + "_shuff"
p = subprocess.Popen("samtools bamshuf {0} {1}".format(bam, shuffled_bam), shell=True) # This will shuffle the lines in the file and split it into two parts
wrap_wait_error(p.wait())
bam_and_percent = [(bam01, int(nlines * .1) + n_header_lines),
(bam02, int(nlines * .2) + n_header_lines),
(bam03, int(nlines * .3) + n_header_lines),
(bam04, int(nlines * .4) + n_header_lines),
(bam05, int(nlines * .5) + n_header_lines),
(bam06, int(nlines * .6) + n_header_lines),
(bam07, int(nlines * .7) + n_header_lines),
(bam08, int(nlines * .8) + n_header_lines),
(bam09, int(nlines * .9) + n_header_lines),]
cmds = []
for bam_file, percent in bam_and_percent:
if percent % 2 == 1:
percent -= 1
if no_sort: #if we are aren't shuffling, don't delete
#Make sure I select pairs of reads
cmd = "samtools view -h {0}.bam | head -n {1} | samtools view -bS - -o {2}".format(shuffled_bam, percent, bam_file)
else: #sort
cmd = "samtools view -h {0}.bam | head -n {1} | samtools view -bS - | samtools sort - -o {2} && samtools index {2}".format(shuffled_bam, percent, bam_file)
print cmd
p1 = subprocess.Popen(cmd, shell=True)
wrap_wait_error(p1.wait())
if not no_shuffle: #if we are aren't shuffling, don't delete
p1 = subprocess.Popen("rm {0}.bam".format(shuffled_bam), shell=True)
wrap_wait_error(p1.wait())
return bam01, bam02
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Downsamples bam to a given number of reads')
parser.add_argument(
'--bam', required=True, help='bam file to split')
parser.add_argument(
'--bam01', required=True, help='name of first output bam')
parser.add_argument(
'--bam02', required=True, help='name of second output bam')
parser.add_argument(
'--bam03', required=True, help='name of third output bam')
parser.add_argument(
'--bam04', required=True, help='name of fourth output bam')
parser.add_argument(
'--bam05', required=True, help='name of fith output bam')
parser.add_argument(
'--bam06', required=True, help='name of sixth output bam')
parser.add_argument(
'--bam07', required=True, help='name of seventh output bam')
parser.add_argument(
'--bam08', required=True, help='name of eighth output bam')
parser.add_argument(
'--bam09', required=True, help='name of ninth output bam')
parser.add_argument("--no_shuffle", action="store_true", help="Don't shuffle input bam file, only use this if input bam is already somehow shuffled")
parser.add_argument("--no_sort", action="store_true", help="Don't sort the resulting bam files")
args = parser.parse_args()
bam01, bam02 = pre_process_bam(args.bam, args.bam01, args.bam02, args.bam03,
args.bam04, args.bam05, args.bam06, args.bam07,
args.bam08, args.bam09, args.no_shuffle, args.no_sort)
| YeoLab/gscripts | gscripts/general/downsample_bam.py | Python | mit | 4,356 | 0.008494 |
import unittest
from bolt.discord.permissions import Permission
class TestPermission(unittest.TestCase):
def test_permission_from_list_to_list(self):
expected = ['MANAGE_WEBHOOKS', 'USE_EXTERNAL_EMOJIS']
permission = Permission(['MANAGE_WEBHOOKS', 'USE_EXTERNAL_EMOJIS'])
actual = permission.to_list()
self.assertListEqual(sorted(actual), sorted(expected))
def test_permission_from_int_to_list(self):
expected = ['ADMINISTRATOR', 'SEND_MESSAGES']
permission = Permission(2056)
actual = permission.to_list()
self.assertListEqual(sorted(actual), sorted(expected))
def test_permission_in_permission(self):
self.assertTrue("ADMINISTRATOR" in Permission(2056))
def test_permissions_in_permission(self):
self.assertTrue(["ADMINISTRATOR", "SEND_MESSAGES"] in Permission(2056))
def test_permission_not_in_permission(self):
self.assertTrue("USE_VAD" not in Permission(2056))
def test_permissions_not_in_permission(self):
self.assertTrue(["SPEAK", "MANAGE_EMOJIS"] not in Permission(2056))
def test_permission_add(self):
permission = Permission(2056)
self.assertTrue(permission.allows("ADMINISTRATOR"))
self.assertFalse(permission.allows("MENTION_EVERYONE"))
permission.add("MENTION_EVERYONE")
self.assertTrue(permission.allows("MENTION_EVERYONE"))
def test_permission_remove(self):
permission = Permission(2056)
self.assertTrue(permission.allows("ADMINISTRATOR"))
self.assertTrue(permission.allows("SEND_MESSAGES"))
permission.remove("SEND_MESSAGES")
self.assertFalse(permission.allows("SEND_MESSAGES"))
| Arcbot-Org/Arcbot | tests/discord/test_permission.py | Python | gpl-3.0 | 1,719 | 0 |
# Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for Glance Registry's client.
This tests are temporary and will be removed once
the registry's driver tests will be added.
"""
import copy
import datetime
import os
import uuid
import mox
from glance.common import config
from glance.common import exception
from glance import context
from glance.db.sqlalchemy import api as db_api
from glance.openstack.common import timeutils
from glance.registry.api import v2 as rserver
import glance.registry.client.v2.api as rapi
from glance.registry.client.v2.api import client as rclient
from glance.tests.unit import base
from glance.tests import utils as test_utils
_gen_uuid = lambda: str(uuid.uuid4())
UUID1 = str(uuid.uuid4())
UUID2 = str(uuid.uuid4())
#NOTE(bcwaldon): needed to init config_dir cli opt
config.parse_args(args=[])
class TestRegistryV2Client(base.IsolatedUnitTest,
test_utils.RegistryAPIMixIn):
"""
Test proper actions made for both valid and invalid requests
against a Registry service
"""
# Registry server to user
# in the stub.
registry = rserver
def setUp(self):
"""Establish a clean test environment"""
super(TestRegistryV2Client, self).setUp()
db_api.get_engine()
self.context = context.RequestContext(is_admin=True)
uuid1_time = timeutils.utcnow()
uuid2_time = uuid1_time + datetime.timedelta(seconds=5)
self.FIXTURES = [
self.get_extra_fixture(
id=UUID1, name='fake image #1', is_public=False,
disk_format='ami', container_format='ami', size=13,
virtual_size=26, properties={'type': 'kernel'},
location="swift://user:passwd@acct/container/obj.tar.0",
created_at=uuid1_time),
self.get_extra_fixture(id=UUID2, name='fake image #2',
properties={}, size=19, virtual_size=38,
location="file:///tmp/glance-tests/2",
created_at=uuid2_time)]
self.destroy_fixtures()
self.create_fixtures()
self.client = rclient.RegistryClient("0.0.0.0")
def tearDown(self):
"""Clear the test environment"""
super(TestRegistryV2Client, self).tearDown()
self.destroy_fixtures()
def test_image_get_index(self):
"""Test correct set of public image returned"""
images = self.client.image_get_all()
self.assertEqual(len(images), 2)
def test_create_image_with_null_min_disk_min_ram(self):
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, name='asdf', min_disk=None,
min_ram=None)
db_api.image_create(self.context, extra_fixture)
image = self.client.image_get(image_id=UUID3)
self.assertEqual(0, image["min_ram"])
self.assertEqual(0, image["min_disk"])
def test_get_index_sort_name_asc(self):
"""
Tests that the registry API returns list of
public images sorted alphabetically by name in
ascending order.
"""
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, name='asdf')
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, name='xyz')
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(sort_key='name', sort_dir='asc')
self.assertEqualImages(images, (UUID3, UUID1, UUID2, UUID4),
unjsonify=False)
def test_get_index_sort_status_desc(self):
"""
Tests that the registry API returns list of
public images sorted alphabetically by status in
descending order.
"""
uuid4_time = timeutils.utcnow() + datetime.timedelta(seconds=10)
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, name='asdf',
status='queued')
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, name='xyz',
created_at=uuid4_time)
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(sort_key='status', sort_dir='desc')
self.assertEqualImages(images, (UUID3, UUID4, UUID2, UUID1),
unjsonify=False)
def test_get_index_sort_disk_format_asc(self):
"""
Tests that the registry API returns list of
public images sorted alphabetically by disk_format in
ascending order.
"""
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, name='asdf',
disk_format='ami',
container_format='ami')
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, name='xyz',
disk_format='vdi')
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(sort_key='disk_format',
sort_dir='asc')
self.assertEqualImages(images, (UUID1, UUID3, UUID4, UUID2),
unjsonify=False)
def test_get_index_sort_container_format_desc(self):
"""
Tests that the registry API returns list of
public images sorted alphabetically by container_format in
descending order.
"""
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, name='asdf',
disk_format='ami',
container_format='ami')
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, name='xyz',
disk_format='iso',
container_format='bare')
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(sort_key='container_format',
sort_dir='desc')
self.assertEqualImages(images, (UUID2, UUID4, UUID3, UUID1),
unjsonify=False)
def test_get_index_sort_size_asc(self):
"""
Tests that the registry API returns list of
public images sorted by size in ascending order.
"""
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, name='asdf',
disk_format='ami',
container_format='ami',
size=100, virtual_size=200)
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, name='asdf',
disk_format='iso',
container_format='bare',
size=2, virtual_size=4)
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(sort_key='size', sort_dir='asc')
self.assertEqualImages(images, (UUID4, UUID1, UUID2, UUID3),
unjsonify=False)
def test_get_index_sort_created_at_asc(self):
"""
Tests that the registry API returns list of
public images sorted by created_at in ascending order.
"""
uuid4_time = timeutils.utcnow() + datetime.timedelta(seconds=10)
uuid3_time = uuid4_time + datetime.timedelta(seconds=5)
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, created_at=uuid3_time)
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, created_at=uuid4_time)
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(sort_key='created_at',
sort_dir='asc')
self.assertEqualImages(images, (UUID1, UUID2, UUID4, UUID3),
unjsonify=False)
def test_get_index_sort_updated_at_desc(self):
"""
Tests that the registry API returns list of
public images sorted by updated_at in descending order.
"""
uuid4_time = timeutils.utcnow() + datetime.timedelta(seconds=10)
uuid3_time = uuid4_time + datetime.timedelta(seconds=5)
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, created_at=None,
updated_at=uuid3_time)
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, created_at=None,
updated_at=uuid4_time)
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(sort_key='updated_at',
sort_dir='desc')
self.assertEqualImages(images, (UUID3, UUID4, UUID2, UUID1),
unjsonify=False)
def test_image_get_index_marker(self):
"""Test correct set of images returned with marker param."""
uuid4_time = timeutils.utcnow() + datetime.timedelta(seconds=10)
uuid3_time = uuid4_time + datetime.timedelta(seconds=5)
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, name='new name! #123',
status='saving',
created_at=uuid3_time)
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, name='new name! #125',
status='saving',
created_at=uuid4_time)
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(marker=UUID3)
self.assertEqualImages(images, (UUID4, UUID2, UUID1), unjsonify=False)
def test_image_get_index_limit(self):
"""Test correct number of images returned with limit param."""
extra_fixture = self.get_fixture(id=_gen_uuid(),
name='new name! #123',
status='saving')
db_api.image_create(self.context, extra_fixture)
extra_fixture = self.get_fixture(id=_gen_uuid(),
name='new name! #125',
status='saving')
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(limit=2)
self.assertEqual(len(images), 2)
def test_image_get_index_marker_limit(self):
"""Test correct set of images returned with marker/limit params."""
uuid4_time = timeutils.utcnow() + datetime.timedelta(seconds=10)
uuid3_time = uuid4_time + datetime.timedelta(seconds=5)
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, name='new name! #123',
status='saving',
created_at=uuid3_time)
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, name='new name! #125',
status='saving',
created_at=uuid4_time)
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(marker=UUID4, limit=1)
self.assertEqualImages(images, (UUID2,), unjsonify=False)
def test_image_get_index_limit_None(self):
"""Test correct set of images returned with limit param == None."""
extra_fixture = self.get_fixture(id=_gen_uuid(),
name='new name! #123',
status='saving')
db_api.image_create(self.context, extra_fixture)
extra_fixture = self.get_fixture(id=_gen_uuid(),
name='new name! #125',
status='saving')
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(limit=None)
self.assertEqual(len(images), 4)
def test_image_get_index_by_name(self):
"""
Test correct set of public, name-filtered image returned. This
is just a sanity check, we test the details call more in-depth.
"""
extra_fixture = self.get_fixture(id=_gen_uuid(),
name='new name! #123')
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(filters={'name': 'new name! #123'})
self.assertEqual(len(images), 1)
for image in images:
self.assertEqual('new name! #123', image['name'])
def test_image_get_is_public_v2(self):
"""Tests that a detailed call can be filtered by a property"""
extra_fixture = self.get_fixture(id=_gen_uuid(), status='saving',
properties={'is_public': 'avalue'})
context = copy.copy(self.context)
db_api.image_create(context, extra_fixture)
filters = {'is_public': 'avalue'}
images = self.client.image_get_all(filters=filters)
self.assertEqual(len(images), 1)
for image in images:
self.assertEqual('avalue', image['properties'][0]['value'])
def test_image_get(self):
"""Tests that the detailed info about an image returned"""
fixture = self.get_fixture(id=UUID1, name='fake image #1',
is_public=False, size=13, virtual_size=26,
disk_format='ami', container_format='ami')
data = self.client.image_get(image_id=UUID1)
for k, v in fixture.items():
el = data[k]
self.assertEqual(v, data[k],
"Failed v != data[k] where v = %(v)s and "
"k = %(k)s and data[k] = %(el)s" %
dict(v=v, k=k, el=el))
def test_image_get_non_existing(self):
"""Tests that NotFound is raised when getting a non-existing image"""
self.assertRaises(exception.NotFound,
self.client.image_get,
image_id=_gen_uuid())
def test_image_create_basic(self):
"""Tests that we can add image metadata and returns the new id"""
fixture = self.get_fixture()
new_image = self.client.image_create(values=fixture)
# Test all other attributes set
data = self.client.image_get(image_id=new_image['id'])
for k, v in fixture.items():
self.assertEqual(v, data[k])
# Test status was updated properly
self.assertIn('status', data)
self.assertEqual('active', data['status'])
def test_image_create_with_properties(self):
"""Tests that we can add image metadata with properties"""
fixture = self.get_fixture(location="file:///tmp/glance-tests/2",
properties={'distro': 'Ubuntu 10.04 LTS'})
new_image = self.client.image_create(values=fixture)
self.assertIn('properties', new_image)
self.assertEqual(new_image['properties'][0]['value'],
fixture['properties']['distro'])
del fixture['location']
del fixture['properties']
for k, v in fixture.items():
self.assertEqual(v, new_image[k])
# Test status was updated properly
self.assertIn('status', new_image.keys())
self.assertEqual('active', new_image['status'])
def test_image_create_already_exists(self):
"""Tests proper exception is raised if image with ID already exists"""
fixture = self.get_fixture(id=UUID2,
location="file:///tmp/glance-tests/2")
self.assertRaises(exception.Duplicate,
self.client.image_create,
values=fixture)
def test_image_create_with_bad_status(self):
"""Tests proper exception is raised if a bad status is set"""
fixture = self.get_fixture(status='bad status',
location="file:///tmp/glance-tests/2")
self.assertRaises(exception.Invalid,
self.client.image_create,
values=fixture)
def test_image_update(self):
"""Tests that the registry API updates the image"""
fixture = {'name': 'fake public image #2',
'disk_format': 'vmdk',
'status': 'saving'}
self.assertTrue(self.client.image_update(image_id=UUID2,
values=fixture))
# Test all other attributes set
data = self.client.image_get(image_id=UUID2)
for k, v in fixture.items():
self.assertEqual(v, data[k])
def test_image_update_conflict(self):
"""Tests that the registry API updates the image"""
next_state = 'saving'
fixture = {'name': 'fake public image #2',
'disk_format': 'vmdk',
'status': next_state}
image = self.client.image_get(image_id=UUID2)
current = image['status']
self.assertEqual(current, 'active')
# image is in 'active' state so this should cause a failure.
from_state = 'saving'
self.assertRaises(exception.Conflict, self.client.image_update,
image_id=UUID2, values=fixture,
from_state=from_state)
try:
self.client.image_update(image_id=UUID2, values=fixture,
from_state=from_state)
except exception.Conflict as exc:
msg = (_('cannot transition from %(current)s to '
'%(next)s in update (wanted '
'from_state=%(from)s)') %
{'current': current, 'next': next_state,
'from': from_state})
self.assertEqual(str(exc), msg)
def _test_image_update_not_existing(self):
"""Tests non existing image update doesn't work"""
fixture = self.get_fixture(status='bad status')
self.assertRaises(exception.NotFound,
self.client.image_update,
image_id=_gen_uuid(),
values=fixture)
def test_image_destroy(self):
"""Tests that image metadata is deleted properly"""
# Grab the original number of images
orig_num_images = len(self.client.image_get_all())
# Delete image #2
image = self.FIXTURES[1]
deleted_image = self.client.image_destroy(image_id=image['id'])
self.assertTrue(deleted_image)
self.assertEqual(image['id'], deleted_image['id'])
self.assertTrue(deleted_image['deleted'])
self.assertTrue(deleted_image['deleted_at'])
# Verify one less image
filters = {'deleted': False}
new_num_images = len(self.client.image_get_all(filters=filters))
self.assertEqual(new_num_images, orig_num_images - 1)
def test_image_destroy_not_existing(self):
"""Tests cannot delete non-existing image"""
self.assertRaises(exception.NotFound,
self.client.image_destroy,
image_id=_gen_uuid())
def test_image_get_members(self):
"""Tests getting image members"""
memb_list = self.client.image_member_find(image_id=UUID2)
num_members = len(memb_list)
self.assertEqual(num_members, 0)
def test_image_get_members_not_existing(self):
"""Tests getting non-existent image members"""
self.assertRaises(exception.NotFound,
self.client.image_get_members,
image_id=_gen_uuid())
def test_image_member_find(self):
"""Tests getting member images"""
memb_list = self.client.image_member_find(member='pattieblack')
num_members = len(memb_list)
self.assertEqual(num_members, 0)
def test_add_update_members(self):
"""Tests updating image members"""
values = dict(image_id=UUID2, member='pattieblack')
member = self.client.image_member_create(values=values)
self.assertTrue(member)
values['member'] = 'pattieblack2'
self.assertTrue(self.client.image_member_update(memb_id=member['id'],
values=values))
def test_add_delete_member(self):
"""Tests deleting image members"""
values = dict(image_id=UUID2, member='pattieblack')
member = self.client.image_member_create(values=values)
self.client.image_member_delete(memb_id=member['id'])
memb_list = self.client.image_member_find(member='pattieblack')
self.assertEqual(len(memb_list), 0)
class TestRegistryV2ClientApi(base.IsolatedUnitTest):
"""
Test proper actions made for both valid and invalid requests
against a Registry service
"""
def setUp(self):
"""Establish a clean test environment"""
super(TestRegistryV2ClientApi, self).setUp()
self.mox = mox.Mox()
reload(rapi)
def tearDown(self):
"""Clear the test environment"""
super(TestRegistryV2ClientApi, self).tearDown()
self.mox.UnsetStubs()
def test_configure_registry_client_not_using_use_user_token(self):
self.config(use_user_token=False)
self.mox.StubOutWithMock(rapi, 'configure_registry_admin_creds')
rapi.configure_registry_admin_creds()
self.mox.ReplayAll()
rapi.configure_registry_client()
self.mox.VerifyAll()
def _get_fake_config_creds(self, auth_url='auth_url', strategy='keystone'):
return {
'user': 'user',
'password': 'password',
'username': 'user',
'tenant': 'tenant',
'auth_url': auth_url,
'strategy': strategy,
'region': 'region'
}
def test_configure_registry_admin_creds(self):
expected = self._get_fake_config_creds(auth_url=None,
strategy='configured_strategy')
self.config(admin_user=expected['user'])
self.config(admin_password=expected['password'])
self.config(admin_tenant_name=expected['tenant'])
self.config(auth_strategy=expected['strategy'])
self.config(auth_region=expected['region'])
self.stubs.Set(os, 'getenv', lambda x: None)
self.assertIsNone(rapi._CLIENT_CREDS)
rapi.configure_registry_admin_creds()
self.assertEqual(rapi._CLIENT_CREDS, expected)
def test_configure_registry_admin_creds_with_auth_url(self):
expected = self._get_fake_config_creds()
self.config(admin_user=expected['user'])
self.config(admin_password=expected['password'])
self.config(admin_tenant_name=expected['tenant'])
self.config(auth_url=expected['auth_url'])
self.config(auth_strategy='test_strategy')
self.config(auth_region=expected['region'])
self.assertIsNone(rapi._CLIENT_CREDS)
rapi.configure_registry_admin_creds()
self.assertEqual(rapi._CLIENT_CREDS, expected)
| tanglei528/glance | glance/tests/unit/v2/test_registry_client.py | Python | apache-2.0 | 24,618 | 0.000081 |
#!/usr/bin/env python
from __future__ import print_function
from time import sleep
from subprocess import call
import os
from drive import get_active_torrents_count, get_vpn_ip, WAIT_CYCLE
MAX_FAIL=2
def transmission_down():
try:
_ = get_active_torrents_count()
return False
except Exception as exc:
print("Problem getting active torrent count: {}".format(exc))
return True
def vpn_down():
try:
_ = get_vpn_ip()
return False
except Exception as exc:
print("Problem getting vpn IP: {}".format(exc))
return True
def suicide():
"""Kill tini which will cause everything to restart properly."""
print("Something went wrong, comitting suicide.")
call("pkill -f tini", shell=True)
if __name__ == "__main__":
fail_count = 0
while True:
sleep(WAIT_CYCLE)
print("Health checking...")
if transmission_down() or vpn_down():
fail_count += 1
print("Fail count: {}".format(fail_count))
else:
fail_count = 0
if fail_count >= MAX_FAIL:
suicide()
| thomasanderson9000/ipredator_btt | transmission/healthcheck.py | Python | apache-2.0 | 1,116 | 0.005376 |
"""Family module for Meta Wiki."""
#
# (C) Pywikibot team, 2005-2020
#
# Distributed under the terms of the MIT license.
#
from pywikibot import family
# The Wikimedia Meta-Wiki family
class Family(family.WikimediaOrgFamily):
"""Family class for Meta Wiki."""
name = 'meta'
interwiki_forward = 'wikipedia'
cross_allowed = ['meta', ]
category_redirect_templates = {
'meta': (
'Category redirect',
),
}
# Subpages for documentation.
doc_subpages = {
'_default': (('/doc',), ['meta']),
}
| wikimedia/pywikibot-core | pywikibot/families/meta_family.py | Python | mit | 564 | 0 |
"""
A library of useful helper classes to the saxlib classes, for the
convenience of application and driver writers.
$Id: saxutils.py,v 1.35 2004/03/20 07:46:04 fdrake Exp $
"""
import os, urlparse, urllib2, types
import handler
import xmlreader
import sys, _exceptions, saxlib
try:
_StringTypes = [types.StringType, types.UnicodeType]
except AttributeError: # 1.5 compatibility:UnicodeType not defined
_StringTypes = [types.StringType]
def __dict_replace(s, d):
"""Replace substrings of a string using a dictionary."""
for key, value in d.items():
s = s.replace(key, value)
return s
def escape(data, entities={}):
"""Escape &, <, and > in a string of data.
You can escape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
data = data.replace("&", "&")
data = data.replace("<", "<")
data = data.replace(">", ">")
if entities:
data = __dict_replace(data, entities)
return data
def unescape(data, entities={}):
"""Unescape &, <, and > in a string of data.
You can unescape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
data = data.replace("<", "<")
data = data.replace(">", ">")
if entities:
data = __dict_replace(data, entities)
# must do ampersand last
return data.replace("&", "&")
def quoteattr(data, entities={}):
"""Escape and quote an attribute value.
Escape &, <, and > in a string of data, then quote it for use as
an attribute value. The \" character will be escaped as well, if
necessary.
You can escape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
data = escape(data, entities)
if '"' in data:
if "'" in data:
data = '"%s"' % data.replace('"', """)
else:
data = "'%s'" % data
else:
data = '"%s"' % data
return data
# --- DefaultHandler
class DefaultHandler(handler.EntityResolver, handler.DTDHandler,
handler.ContentHandler, handler.ErrorHandler):
"""Default base class for SAX2 event handlers. Implements empty
methods for all callback methods, which can be overridden by
application implementors. Replaces the deprecated SAX1 HandlerBase
class."""
# --- Location
class Location:
"""Represents a location in an XML entity. Initialized by being passed
a locator, from which it reads off the current location, which is then
stored internally."""
def __init__(self, locator):
self.__col = locator.getColumnNumber()
self.__line = locator.getLineNumber()
self.__pubid = locator.getPublicId()
self.__sysid = locator.getSystemId()
def getColumnNumber(self):
return self.__col
def getLineNumber(self):
return self.__line
def getPublicId(self):
return self.__pubid
def getSystemId(self):
return self.__sysid
def __str__(self):
if self.__line is None:
line = "?"
else:
line = self.__line
if self.__col is None:
col = "?"
else:
col = self.__col
return "%s:%s:%s" % (
self.__sysid or self.__pubid or "<unknown>",
line, col)
# --- ErrorPrinter
class ErrorPrinter:
"A simple class that just prints error messages to standard out."
def __init__(self, level=0, outfile=sys.stderr):
self._level = level
self._outfile = outfile
def warning(self, exception):
if self._level <= 0:
self._outfile.write("WARNING in %s: %s\n" %
(self.__getpos(exception),
exception.getMessage()))
def error(self, exception):
if self._level <= 1:
self._outfile.write("ERROR in %s: %s\n" %
(self.__getpos(exception),
exception.getMessage()))
def fatalError(self, exception):
if self._level <= 2:
self._outfile.write("FATAL ERROR in %s: %s\n" %
(self.__getpos(exception),
exception.getMessage()))
def __getpos(self, exception):
if isinstance(exception, _exceptions.SAXParseException):
return "%s:%s:%s" % (exception.getSystemId(),
exception.getLineNumber(),
exception.getColumnNumber())
else:
return "<unknown>"
# --- ErrorRaiser
class ErrorRaiser:
"A simple class that just raises the exceptions it is passed."
def __init__(self, level = 0):
self._level = level
def error(self, exception):
if self._level <= 1:
raise exception
def fatalError(self, exception):
if self._level <= 2:
raise exception
def warning(self, exception):
if self._level <= 0:
raise exception
# --- AttributesImpl now lives in xmlreader
from xmlreader import AttributesImpl
# --- XMLGenerator is the SAX2 ContentHandler for writing back XML
import codecs
def _outputwrapper(stream,encoding):
writerclass = codecs.lookup(encoding)[3]
return writerclass(stream)
if hasattr(codecs, "register_error"):
def writetext(stream, text, entities={}):
stream.errors = "xmlcharrefreplace"
stream.write(escape(text, entities))
stream.errors = "strict"
else:
def writetext(stream, text, entities={}):
text = escape(text, entities)
try:
stream.write(text)
except UnicodeError:
for c in text:
try:
stream.write(c)
except UnicodeError:
stream.write(u"&#%d;" % ord(c))
def writeattr(stream, text):
countdouble = text.count('"')
if countdouble:
countsingle = text.count("'")
if countdouble <= countsingle:
entities = {'"': """}
quote = '"'
else:
entities = {"'": "'"}
quote = "'"
else:
entities = {}
quote = '"'
stream.write(quote)
writetext(stream, text, entities)
stream.write(quote)
class XMLGenerator(handler.ContentHandler):
GENERATED_PREFIX = "pyxml.sax.saxutils.prefix%s"
def __init__(self, out=None, encoding="iso-8859-1"):
if out is None:
import sys
out = sys.stdout
handler.ContentHandler.__init__(self)
self._out = _outputwrapper(out,encoding)
self._ns_contexts = [{}] # contains uri -> prefix dicts
self._current_context = self._ns_contexts[-1]
self._undeclared_ns_maps = []
self._encoding = encoding
self._generated_prefix_ctr = 0
return
# ContentHandler methods
def startDocument(self):
self._out.write('<?xml version="1.0" encoding="%s"?>\n' %
self._encoding)
def startPrefixMapping(self, prefix, uri):
self._ns_contexts.append(self._current_context.copy())
self._current_context[uri] = prefix
self._undeclared_ns_maps.append((prefix, uri))
def endPrefixMapping(self, prefix):
self._current_context = self._ns_contexts[-1]
del self._ns_contexts[-1]
def startElement(self, name, attrs):
self._out.write('<' + name)
for (name, value) in attrs.items():
self._out.write(' %s=' % name)
writeattr(self._out, value)
self._out.write('>')
def endElement(self, name):
self._out.write('</%s>' % name)
def startElementNS(self, name, qname, attrs):
if name[0] is None:
name = name[1]
elif self._current_context[name[0]] is None:
# default namespace
name = name[1]
else:
name = self._current_context[name[0]] + ":" + name[1]
self._out.write('<' + name)
for k,v in self._undeclared_ns_maps:
if k is None:
self._out.write(' xmlns="%s"' % (v or ''))
else:
self._out.write(' xmlns:%s="%s"' % (k,v))
self._undeclared_ns_maps = []
for (name, value) in attrs.items():
if name[0] is None:
name = name[1]
elif self._current_context[name[0]] is None:
# default namespace
#If an attribute has a nsuri but not a prefix, we must
#create a prefix and add a nsdecl
prefix = self.GENERATED_PREFIX % self._generated_prefix_ctr
self._generated_prefix_ctr = self._generated_prefix_ctr + 1
name = prefix + ':' + name[1]
self._out.write(' xmlns:%s=%s' % (prefix, quoteattr(name[0])))
self._current_context[name[0]] = prefix
else:
name = self._current_context[name[0]] + ":" + name[1]
self._out.write(' %s=' % name)
writeattr(self._out, value)
self._out.write('>')
def endElementNS(self, name, qname):
# XXX: if qname is not None, we better use it.
# Python 2.0b2 requires us to use the recorded prefix for
# name[0], though
if name[0] is None:
qname = name[1]
elif self._current_context[name[0]] is None:
qname = name[1]
else:
qname = self._current_context[name[0]] + ":" + name[1]
self._out.write('</%s>' % qname)
def characters(self, content):
writetext(self._out, content)
def ignorableWhitespace(self, content):
self._out.write(content)
def processingInstruction(self, target, data):
self._out.write('<?%s %s?>' % (target, data))
class LexicalXMLGenerator(XMLGenerator, saxlib.LexicalHandler):
"""A XMLGenerator that also supports the LexicalHandler interface"""
def __init__(self, out=None, encoding="iso-8859-1"):
XMLGenerator.__init__(self, out, encoding)
self._in_cdata = 0
def characters(self, content):
if self._in_cdata:
self._out.write(content.replace(']]>', ']]>]]><![CDATA['))
else:
self._out.write(escape(content))
# LexicalHandler methods
# (we only support the most important ones and inherit the rest)
def startDTD(self, name, public_id, system_id):
self._out.write('<!DOCTYPE %s' % name)
if public_id:
self._out.write(' PUBLIC %s %s' % (
quoteattr(public_id or ""), quoteattr(system_id or "")
))
elif system_id:
self._out.write(' SYSTEM %s' % quoteattr(system_id or ""))
def endDTD(self):
self._out.write('>')
def comment(self, content):
self._out.write('<!--')
self._out.write(content)
self._out.write('-->')
def startCDATA(self):
self._in_cdata = 1
self._out.write('<![CDATA[')
def endCDATA(self):
self._in_cdata = 0
self._out.write(']]>')
# --- ContentGenerator is the SAX1 DocumentHandler for writing back XML
class ContentGenerator(XMLGenerator):
def characters(self, str, start, end):
# In SAX1, characters receives start and end; in SAX2, it receives
# a string. For plain strings, we may want to use a buffer object.
return XMLGenerator.characters(self, str[start:start+end])
# --- XMLFilterImpl
class XMLFilterBase(saxlib.XMLFilter):
"""This class is designed to sit between an XMLReader and the
client application's event handlers. By default, it does nothing
but pass requests up to the reader and events on to the handlers
unmodified, but subclasses can override specific methods to modify
the event stream or the configuration requests as they pass
through."""
# ErrorHandler methods
def error(self, exception):
self._err_handler.error(exception)
def fatalError(self, exception):
self._err_handler.fatalError(exception)
def warning(self, exception):
self._err_handler.warning(exception)
# ContentHandler methods
def setDocumentLocator(self, locator):
self._cont_handler.setDocumentLocator(locator)
def startDocument(self):
self._cont_handler.startDocument()
def endDocument(self):
self._cont_handler.endDocument()
def startPrefixMapping(self, prefix, uri):
self._cont_handler.startPrefixMapping(prefix, uri)
def endPrefixMapping(self, prefix):
self._cont_handler.endPrefixMapping(prefix)
def startElement(self, name, attrs):
self._cont_handler.startElement(name, attrs)
def endElement(self, name):
self._cont_handler.endElement(name)
def startElementNS(self, name, qname, attrs):
self._cont_handler.startElementNS(name, qname, attrs)
def endElementNS(self, name, qname):
self._cont_handler.endElementNS(name, qname)
def characters(self, content):
self._cont_handler.characters(content)
def ignorableWhitespace(self, chars):
self._cont_handler.ignorableWhitespace(chars)
def processingInstruction(self, target, data):
self._cont_handler.processingInstruction(target, data)
def skippedEntity(self, name):
self._cont_handler.skippedEntity(name)
# DTDHandler methods
def notationDecl(self, name, publicId, systemId):
self._dtd_handler.notationDecl(name, publicId, systemId)
def unparsedEntityDecl(self, name, publicId, systemId, ndata):
self._dtd_handler.unparsedEntityDecl(name, publicId, systemId, ndata)
# EntityResolver methods
def resolveEntity(self, publicId, systemId):
self._ent_handler.resolveEntity(publicId, systemId)
# XMLReader methods
def parse(self, source):
self._parent.setContentHandler(self)
self._parent.setErrorHandler(self)
self._parent.setEntityResolver(self)
self._parent.setDTDHandler(self)
self._parent.parse(source)
def setLocale(self, locale):
self._parent.setLocale(locale)
def getFeature(self, name):
return self._parent.getFeature(name)
def setFeature(self, name, state):
self._parent.setFeature(name, state)
def getProperty(self, name):
return self._parent.getProperty(name)
def setProperty(self, name, value):
self._parent.setProperty(name, value)
# FIXME: remove this backward compatibility hack when not needed anymore
XMLFilterImpl = XMLFilterBase
# --- BaseIncrementalParser
class BaseIncrementalParser(xmlreader.IncrementalParser):
"""This class implements the parse method of the XMLReader
interface using the feed, close and reset methods of the
IncrementalParser interface as a convenience to SAX 2.0 driver
writers."""
def parse(self, source):
source = prepare_input_source(source)
self.prepareParser(source)
self._cont_handler.startDocument()
# FIXME: what about char-stream?
inf = source.getByteStream()
buffer = inf.read(16384)
while buffer != "":
self.feed(buffer)
buffer = inf.read(16384)
self.close()
self.reset()
self._cont_handler.endDocument()
def prepareParser(self, source):
"""This method is called by the parse implementation to allow
the SAX 2.0 driver to prepare itself for parsing."""
raise NotImplementedError("prepareParser must be overridden!")
# --- Utility functions
def prepare_input_source(source, base = ""):
"""This function takes an InputSource and an optional base URL and
returns a fully resolved InputSource object ready for reading."""
if type(source) in _StringTypes:
source = xmlreader.InputSource(source)
elif hasattr(source, "read"):
f = source
source = xmlreader.InputSource()
source.setByteStream(f)
if hasattr(f, "name"):
source.setSystemId(f.name)
if source.getByteStream() is None:
sysid = source.getSystemId()
if os.path.isfile(sysid):
basehead = os.path.split(os.path.normpath(base))[0]
source.setSystemId(os.path.join(basehead, sysid))
f = open(sysid, "rb")
else:
source.setSystemId(urlparse.urljoin(base, sysid))
f = urllib2.urlopen(source.getSystemId())
source.setByteStream(f)
return source
# ===========================================================================
#
# DEPRECATED SAX 1.0 CLASSES
#
# ===========================================================================
# --- AttributeMap
class AttributeMap:
"""An implementation of AttributeList that takes an (attr,val) hash
and uses it to implement the AttributeList interface."""
def __init__(self, map):
self.map=map
def getLength(self):
return len(self.map.keys())
def getName(self, i):
try:
return self.map.keys()[i]
except IndexError,e:
return None
def getType(self, i):
return "CDATA"
def getValue(self, i):
try:
if type(i)==types.IntType:
return self.map[self.getName(i)]
else:
return self.map[i]
except KeyError,e:
return None
def __len__(self):
return len(self.map)
def __getitem__(self, key):
if type(key)==types.IntType:
return self.map.keys()[key]
else:
return self.map[key]
def items(self):
return self.map.items()
def keys(self):
return self.map.keys()
def has_key(self,key):
return self.map.has_key(key)
def get(self, key, alternative=None):
return self.map.get(key, alternative)
def copy(self):
return AttributeMap(self.map.copy())
def values(self):
return self.map.values()
# --- Event broadcasting object
class EventBroadcaster:
"""Takes a list of objects and forwards any method calls received
to all objects in the list. The attribute list holds the list and
can freely be modified by clients."""
class Event:
"Helper objects that represent event methods."
def __init__(self,list,name):
self.list=list
self.name=name
def __call__(self,*rest):
for obj in self.list:
apply(getattr(obj,self.name), rest)
def __init__(self,list):
self.list=list
def __getattr__(self,name):
return self.Event(self.list,name)
def __repr__(self):
return "<EventBroadcaster instance at %d>" % id(self)
# --- ESIS document handler
import saxlib
class ESISDocHandler(saxlib.HandlerBase):
"A SAX document handler that produces naive ESIS output."
def __init__(self,writer=sys.stdout):
self.writer=writer
def processingInstruction (self,target, remainder):
"""Receive an event signalling that a processing instruction
has been found."""
self.writer.write("?"+target+" "+remainder+"\n")
def startElement(self,name,amap):
"Receive an event signalling the start of an element."
self.writer.write("("+name+"\n")
for a_name in amap.keys():
self.writer.write("A"+a_name+" "+amap[a_name]+"\n")
def endElement(self,name):
"Receive an event signalling the end of an element."
self.writer.write(")"+name+"\n")
def characters(self,data,start_ix,length):
"Receive an event signalling that character data has been found."
self.writer.write("-"+data[start_ix:start_ix+length]+"\n")
# --- XML canonizer
class Canonizer(saxlib.HandlerBase):
"A SAX document handler that produces canonized XML output."
def __init__(self,writer=sys.stdout):
self.elem_level=0
self.writer=writer
def processingInstruction (self,target, remainder):
if not target=="xml":
self.writer.write("<?"+target+" "+remainder+"?>")
def startElement(self,name,amap):
self.writer.write("<"+name)
a_names=amap.keys()
a_names.sort()
for a_name in a_names:
self.writer.write(" "+a_name+"=\"")
self.write_data(amap[a_name])
self.writer.write("\"")
self.writer.write(">")
self.elem_level=self.elem_level+1
def endElement(self,name):
self.writer.write("</"+name+">")
self.elem_level=self.elem_level-1
def ignorableWhitespace(self,data,start_ix,length):
self.characters(data,start_ix,length)
def characters(self,data,start_ix,length):
if self.elem_level>0:
self.write_data(data[start_ix:start_ix+length])
def write_data(self,data):
"Writes datachars to writer."
data=data.replace("&","&")
data=data.replace("<","<")
data=data.replace("\"",""")
data=data.replace(">",">")
data=data.replace(chr(9),"	")
data=data.replace(chr(10)," ")
data=data.replace(chr(13)," ")
self.writer.write(data)
# --- mllib
class mllib:
"""A re-implementation of the htmllib, sgmllib and xmllib interfaces as a
SAX DocumentHandler."""
# Unsupported:
# - setnomoretags
# - setliteral
# - translate_references
# - handle_xml
# - handle_doctype
# - handle_charref
# - handle_entityref
# - handle_comment
# - handle_cdata
# - tag_attributes
def __init__(self):
self.reset()
def reset(self):
import saxexts # only used here
self.parser=saxexts.XMLParserFactory.make_parser()
self.handler=mllib.Handler(self.parser,self)
self.handler.reset()
def feed(self,data):
self.parser.feed(data)
def close(self):
self.parser.close()
def get_stack(self):
return self.handler.get_stack()
# --- Handler methods (to be overridden)
def handle_starttag(self,name,method,atts):
method(atts)
def handle_endtag(self,name,method):
method()
def handle_data(self,data):
pass
def handle_proc(self,target,data):
pass
def unknown_starttag(self,name,atts):
pass
def unknown_endtag(self,name):
pass
def syntax_error(self,message):
pass
# --- The internal handler class
class Handler(saxlib.DocumentHandler,saxlib.ErrorHandler):
"""An internal class to handle SAX events and translate them to mllib
events."""
def __init__(self,driver,handler):
self.driver=driver
self.driver.setDocumentHandler(self)
self.driver.setErrorHandler(self)
self.handler=handler
self.reset()
def get_stack(self):
return self.stack
def reset(self):
self.stack=[]
# --- DocumentHandler methods
def characters(self, ch, start, length):
self.handler.handle_data(ch[start:start+length])
def endElement(self, name):
if hasattr(self.handler,"end_"+name):
self.handler.handle_endtag(name,
getattr(self.handler,"end_"+name))
else:
self.handler.unknown_endtag(name)
del self.stack[-1]
def ignorableWhitespace(self, ch, start, length):
self.handler.handle_data(ch[start:start+length])
def processingInstruction(self, target, data):
self.handler.handle_proc(target,data)
def startElement(self, name, atts):
self.stack.append(name)
if hasattr(self.handler,"start_"+name):
self.handler.handle_starttag(name,
getattr(self.handler,
"start_"+name),
atts)
else:
self.handler.unknown_starttag(name,atts)
# --- ErrorHandler methods
def error(self, exception):
self.handler.syntax_error(str(exception))
def fatalError(self, exception):
raise RuntimeError(str(exception))
| selfcommit/gaedav | pyxml/sax/saxutils.py | Python | lgpl-2.1 | 24,606 | 0.005486 |
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sample pipeline with ForEach context."""
from tfx.components import CsvExampleGen
from tfx.components import Pusher
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.dsl.components.common import resolver
from tfx.dsl.control_flow import for_each
from tfx.dsl.input_resolution.strategies import latest_artifact_strategy
from tfx.orchestration import pipeline
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
def create_test_pipeline():
"""Creates a sample pipeline with ForEach context."""
example_gen = CsvExampleGen(input_base='/data/mydummy_dataset')
with for_each.ForEach(example_gen.outputs['examples']) as each_example:
statistics_gen = StatisticsGen(examples=each_example)
latest_stats_resolver = resolver.Resolver(
statistics=statistics_gen.outputs['statistics'],
strategy_class=latest_artifact_strategy.LatestArtifactStrategy,
).with_id('latest_stats_resolver')
schema_gen = SchemaGen(statistics=latest_stats_resolver.outputs['statistics'])
with for_each.ForEach(example_gen.outputs['examples']) as each_example:
trainer = Trainer(
module_file='/src/train.py',
examples=each_example,
schema=schema_gen.outputs['schema'],
train_args=trainer_pb2.TrainArgs(num_steps=2000),
)
with for_each.ForEach(trainer.outputs['model']) as each_model:
pusher = Pusher(
model=each_model,
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory='/models')),
)
return pipeline.Pipeline(
pipeline_name='foreach',
pipeline_root='/tfx/pipelines/foreach',
components=[
example_gen,
statistics_gen,
latest_stats_resolver,
schema_gen,
trainer,
pusher,
],
enable_cache=True,
execution_mode=pipeline.ExecutionMode.SYNC,
)
| tensorflow/tfx | tfx/dsl/compiler/testdata/foreach_pipeline.py | Python | apache-2.0 | 2,580 | 0.003488 |
"""
Tree and graph transformations.
"""
from typing import Optional, Dict, Set, List, Tuple
import logging
from penman.types import (Variable, Target, BasicTriple, Node)
from penman.exceptions import ModelError
from penman.epigraph import (Epidatum, Epidata)
from penman.surface import (Alignment, RoleAlignment, alignments)
from penman.tree import (Tree, is_atomic)
from penman.graph import (Graph, CONCEPT_ROLE)
from penman.model import Model
from penman.layout import (
Push,
Pop,
POP,
appears_inverted,
get_pushed_variable,
)
logger = logging.getLogger(__name__)
def canonicalize_roles(t: Tree, model: Model) -> Tree:
"""
Normalize roles in *t* so they are canonical according to *model*.
This is a tree transformation instead of a graph transformation
because the orientation of the pure graph's triples is not decided
until the graph is configured into a tree.
Args:
t: a :class:`~penman.tree.Tree` object
model: a model defining role normalizations
Returns:
A new :class:`~penman.tree.Tree` object with canonicalized
roles.
Example:
>>> from penman.codec import PENMANCodec
>>> from penman.models.amr import model
>>> from penman.transform import canonicalize_roles
>>> codec = PENMANCodec()
>>> t = codec.parse('(c / chapter :domain-of 7)')
>>> t = canonicalize_roles(t, model)
>>> print(codec.format(t))
(c / chapter
:mod 7)
"""
if model is None:
model = Model()
tree = Tree(_canonicalize_node(t.node, model), metadata=t.metadata)
logger.info('Canonicalized roles: %s', tree)
return tree
def _canonicalize_node(node: Node, model: Model) -> Node:
var, edges = node
canonical_edges = []
for i, edge in enumerate(edges):
role, tgt = edge
# alignments aren't parsed off yet, so handle them superficially
role, tilde, alignment = role.partition('~')
if not is_atomic(tgt):
tgt = _canonicalize_node(tgt, model)
canonical_role = model.canonicalize_role(role) + tilde + alignment
canonical_edges.append((canonical_role, tgt))
return (var, canonical_edges)
def reify_edges(g: Graph, model: Model) -> Graph:
"""
Reify all edges in *g* that have reifications in *model*.
Args:
g: a :class:`~penman.graph.Graph` object
model: a model defining reifications
Returns:
A new :class:`~penman.graph.Graph` object with reified edges.
Example:
>>> from penman.codec import PENMANCodec
>>> from penman.models.amr import model
>>> from penman.transform import reify_edges
>>> codec = PENMANCodec(model=model)
>>> g = codec.decode('(c / chapter :mod 7)')
>>> g = reify_edges(g, model)
>>> print(codec.encode(g))
(c / chapter
:ARG1-of (_ / have-mod-91
:ARG2 7))
"""
vars = g.variables()
if model is None:
model = Model()
new_epidata = dict(g.epidata)
new_triples: List[BasicTriple] = []
for triple in g.triples:
if model.is_role_reifiable(triple[1]):
in_triple, node_triple, out_triple = model.reify(triple, vars)
if appears_inverted(g, triple):
in_triple, out_triple = out_triple, in_triple
new_triples.extend((in_triple, node_triple, out_triple))
var = node_triple[0]
vars.add(var)
# manage epigraphical markers
new_epidata[in_triple] = [Push(var)]
old_epis = new_epidata.pop(triple) if triple in new_epidata else []
node_epis, out_epis = _edge_markers(old_epis)
new_epidata[node_triple] = node_epis
new_epidata[out_triple] = out_epis
# we don't know where to put the final POP without configuring
# the tree; maybe this should be a tree operation?
else:
new_triples.append(triple)
g = Graph(new_triples,
epidata=new_epidata,
metadata=g.metadata)
logger.info('Reified edges: %s', g)
return g
def dereify_edges(g: Graph, model: Model) -> Graph:
"""
Dereify edges in *g* that have reifications in *model*.
Args:
g: a :class:`~penman.graph.Graph` object
Returns:
A new :class:`~penman.graph.Graph` object with dereified
edges.
Example:
>>> from penman.codec import PENMANCodec
>>> from penman.models.amr import model
>>> from penman.transform import dereify_edges
>>> codec = PENMANCodec(model=model)
>>> g = codec.decode(
... '(c / chapter'
... ' :ARG1-of (_ / have-mod-91'
... ' :ARG2 7))')
>>> g = dereify_edges(g, model)
>>> print(codec.encode(g))
(c / chapter
:mod 7)
"""
if model is None:
model = Model()
agenda = _dereify_agenda(g, model)
new_epidata = dict(g.epidata)
new_triples: List[BasicTriple] = []
for triple in g.triples:
var = triple[0]
if var in agenda:
first, dereified, epidata = agenda[var]
# only insert at the first triple so the dereification
# appears in the correct location
if triple == first:
new_triples.append(dereified)
new_epidata[dereified] = epidata
if triple in new_epidata:
del new_epidata[triple]
else:
new_triples.append(triple)
g = Graph(new_triples,
epidata=new_epidata,
metadata=g.metadata)
logger.info('Dereified edges: %s', g)
return g
def reify_attributes(g: Graph) -> Graph:
"""
Reify all attributes in *g*.
Args:
g: a :class:`~penman.graph.Graph` object
Returns:
A new :class:`~penman.graph.Graph` object with reified
attributes.
Example:
>>> from penman.codec import PENMANCodec
>>> from penman.models.amr import model
>>> from penman.transform import reify_attributes
>>> codec = PENMANCodec(model=model)
>>> g = codec.decode('(c / chapter :mod 7)')
>>> g = reify_attributes(g)
>>> print(codec.encode(g))
(c / chapter
:mod (_ / 7))
"""
variables = g.variables()
new_epidata = dict(g.epidata)
new_triples: List[BasicTriple] = []
i = 2
for triple in g.triples:
source, role, target = triple
if role != CONCEPT_ROLE and target not in variables:
# get unique var for new node
var = '_'
while var in variables:
var = f'_{i}'
i += 1
variables.add(var)
role_triple = (source, role, var)
node_triple = (var, CONCEPT_ROLE, target)
new_triples.extend((role_triple, node_triple))
# manage epigraphical markers
old_epis = new_epidata.pop(triple) if triple in new_epidata else []
role_epis, node_epis = _attr_markers(old_epis)
new_epidata[role_triple] = role_epis + [Push(var)]
new_epidata[node_triple] = node_epis + [POP]
else:
new_triples.append(triple)
g = Graph(new_triples,
epidata=new_epidata,
metadata=g.metadata)
logger.info('Reified attributes: %s', g)
return g
def indicate_branches(g: Graph, model: Model) -> Graph:
"""
Insert TOP triples in *g* indicating the tree structure.
Note:
This depends on *g* containing the epigraphical layout markers
from parsing; it will not work with programmatically
constructed Graph objects or those whose epigraphical data
were removed.
Args:
g: a :class:`~penman.graph.Graph` object
model: a model defining the TOP role
Returns:
A new :class:`~penman.graph.Graph` object with TOP roles
indicating tree branches.
Example:
>>> from penman.codec import PENMANCodec
>>> from penman.models.amr import model
>>> from penman.transform import indicate_branches
>>> codec = PENMANCodec(model=model)
>>> g = codec.decode('''
... (w / want-01
... :ARG0 (b / boy)
... :ARG1 (g / go-02
... :ARG0 b))''')
>>> g = indicate_branches(g, model)
>>> print(codec.encode(g))
(w / want-01
:TOP b
:ARG0 (b / boy)
:TOP g
:ARG1 (g / go-02
:ARG0 b))
"""
new_triples: List[BasicTriple] = []
for t in g.triples:
push = next((epi for epi in g.epidata.get(t, [])
if isinstance(epi, Push)),
None)
if push is not None:
if push.variable == t[2]:
new_triples.append((t[0], model.top_role, t[2]))
elif push.variable == t[0]:
assert isinstance(t[2], str)
new_triples.append((t[2], model.top_role, t[0]))
new_triples.append(t)
g = Graph(new_triples,
epidata=g.epidata,
metadata=g.metadata)
logger.info('Indicated branches: %s', g)
return g
_SplitMarkers = Tuple[Optional[Push], List[Pop], Epidata, Epidata]
def _reified_markers(epidata: Epidata) -> _SplitMarkers:
"""
Return epigraphical markers broken down by function.
When a relation is reified the original triple disappears so its
epigraphical data needs to be moved and sometimes altered.
Consider the following, which has surface alignment markers::
(a :role~1 b~2)
Under edge reification, the desired outcome is::
(a :ARG1-of (_ / role-label~1 :ARG2 b~2))
Under attribute reification, it is::
(a :role~1 (_ / b~2))
"""
push = None
pops = []
role_epis = []
other_epis = []
for epi in epidata:
if isinstance(epi, Push):
push = epi
elif isinstance(epi, Pop):
pops.append(epi)
elif epi.mode == 1:
role_epis.append(epi)
else:
other_epis.append(epi)
return push, pops, role_epis, other_epis
def _edge_markers(epidata: Epidata) -> Tuple[Epidata, Epidata]:
push, pops, role_epis, other_epis = _reified_markers(epidata)
# role markers on the original triple need to be converted to
# target markers, if possible
node_epis: List[Epidatum] = []
for epi in role_epis:
if isinstance(epi, RoleAlignment):
node_epis.append(Alignment(epi.indices, prefix=epi.prefix))
else:
pass # discard things we can't convert
# other markers on the original triple get grouped for the
# new outgoing triple
out_epis = other_epis
if push:
out_epis.append(push)
out_epis.extend(pops)
return node_epis, out_epis
_Dereification = Dict[Variable,
Tuple[BasicTriple, # inverted triple of reification
BasicTriple, # dereified triple
List[Epidatum]]] # computed epidata
def _dereify_agenda(g: Graph, model: Model) -> _Dereification:
alns = alignments(g)
agenda: _Dereification = {}
fixed: Set[Target] = set([g.top])
inst: Dict[Variable, BasicTriple] = {}
other: Dict[Variable, List[BasicTriple]] = {}
for triple in g.triples:
var, role, tgt = triple
if role == CONCEPT_ROLE:
inst[var] = triple
else:
fixed.add(tgt)
if var not in other:
other[var] = [triple]
else:
other[var].append(triple)
for var, instance in inst.items():
if (var not in fixed
and len(other.get(var, [])) == 2
and model.is_concept_dereifiable(instance[2])):
# passed initial checks
# now figure out which other edge is the first one
first, second = other[var]
if get_pushed_variable(g, second) == var:
first, second = second, first
try:
dereified = model.dereify(instance, first, second)
except ModelError:
pass
else:
# migrate epidata
epidata: List[Epidatum] = []
if instance in alns:
aln = alns[instance]
epidata.append(
RoleAlignment(aln.indices, prefix=aln.prefix))
epidata.extend(epi for epi in g.epidata[second]
if not isinstance(epi, RoleAlignment))
agenda[var] = (first, dereified, epidata)
return agenda
def _attr_markers(epidata: Epidata) -> Tuple[Epidata, Epidata]:
_, pops, role_epis, other_epis = _reified_markers(epidata)
node_epis = other_epis
node_epis.extend(pops)
return role_epis, node_epis
| goodmami/penman | penman/transform.py | Python | mit | 13,014 | 0 |
"""Support for OVO Energy."""
from __future__ import annotations
from datetime import datetime, timedelta
import logging
import aiohttp
import async_timeout
from ovoenergy import OVODailyUsage
from ovoenergy.ovoenergy import OVOEnergy
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryAuthFailed, ConfigEntryNotReady
from homeassistant.helpers.device_registry import DeviceEntryType
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.typing import ConfigType
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
UpdateFailed,
)
from .const import DATA_CLIENT, DATA_COORDINATOR, DOMAIN
_LOGGER = logging.getLogger(__name__)
PLATFORMS = ["sensor"]
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up OVO Energy from a config entry."""
client = OVOEnergy()
try:
authenticated = await client.authenticate(
entry.data[CONF_USERNAME], entry.data[CONF_PASSWORD]
)
except aiohttp.ClientError as exception:
_LOGGER.warning(exception)
raise ConfigEntryNotReady from exception
if not authenticated:
raise ConfigEntryAuthFailed
async def async_update_data() -> OVODailyUsage:
"""Fetch data from OVO Energy."""
async with async_timeout.timeout(10):
try:
authenticated = await client.authenticate(
entry.data[CONF_USERNAME], entry.data[CONF_PASSWORD]
)
except aiohttp.ClientError as exception:
raise UpdateFailed(exception) from exception
if not authenticated:
raise ConfigEntryAuthFailed("Not authenticated with OVO Energy")
return await client.get_daily_usage(datetime.utcnow().strftime("%Y-%m"))
coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
# Name of the data. For logging purposes.
name="sensor",
update_method=async_update_data,
# Polling interval. Will only be polled if there are subscribers.
update_interval=timedelta(seconds=3600),
)
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][entry.entry_id] = {
DATA_CLIENT: client,
DATA_COORDINATOR: coordinator,
}
# Fetch initial data so we have data when entities subscribe
await coordinator.async_config_entry_first_refresh()
# Setup components
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigType) -> bool:
"""Unload OVO Energy config entry."""
# Unload sensors
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
del hass.data[DOMAIN][entry.entry_id]
return unload_ok
class OVOEnergyEntity(CoordinatorEntity):
"""Defines a base OVO Energy entity."""
def __init__(
self,
coordinator: DataUpdateCoordinator,
client: OVOEnergy,
) -> None:
"""Initialize the OVO Energy entity."""
super().__init__(coordinator)
self._client = client
class OVOEnergyDeviceEntity(OVOEnergyEntity):
"""Defines a OVO Energy device entity."""
@property
def device_info(self) -> DeviceInfo:
"""Return device information about this OVO Energy instance."""
return DeviceInfo(
entry_type=DeviceEntryType.SERVICE,
identifiers={(DOMAIN, self._client.account_id)},
manufacturer="OVO Energy",
name=self._client.username,
)
| jawilson/home-assistant | homeassistant/components/ovo_energy/__init__.py | Python | apache-2.0 | 3,757 | 0.000799 |
"""Hidden Markov Models implemented in linear memory/running time"""
from distutils.core import setup
from distutils.extension import Extension
import numpy
try:
from Cython.Distutils import build_ext
except ImportError:
use_cython = False
else:
use_cython = True
cmdclass = { }
ext_modules = [ ]
if use_cython:
ext_modules += [
Extension("linearhmm.score", [ "linearhmm/score.pyx" ], include_dirs=[numpy.get_include()]),
]
cmdclass.update({ 'build_ext': build_ext })
else:
ext_modules += [
Extension("linearhmm.score", [ "linearhmm/score.c" ],include_dirs=[numpy.get_include()]),
]
import linearhmm
VERSION = linearhmm.__version__
# MAINTAINER = "Sergei Lebedev"
# MAINTAINER_EMAIL = "[email protected]"
install_requires = ["numpy"]
tests_require = install_requires + ["pytest"]
setup_options = dict(
name="linearhmm",
version=VERSION,
# maintainer=MAINTAINER,
# maintainer_email=MAINTAINER_EMAIL,
url="https://github.com/hmmlearn/hmmlearn",
packages=["linearhmm"],
ext_modules=ext_modules,
cmdclass=cmdclass,
install_requires=install_requires,
)
if __name__ == "__main__":
setup(**setup_options)
| edenhuangSH/STA663_Final_Project | setup.py | Python | gpl-3.0 | 1,203 | 0.010806 |