content
stringlengths 5
1.05M
|
---|
from src.commands.Command import Command
from src.models.domain.User import User
from src.utilities.database import db_session
class AddStrandUserToTeamCommand(Command):
"""
1) Grab additional user info from Slack
2) Either create user with the team, or just attach existing user to the team
3) Update User.strand_user_id
"""
def __init__(self, slack_team_id, slack_user_id, strand_team_id, strand_api_client_wrapper, slack_client_wrapper):
super().__init__(strand_api_client_wrapper=strand_api_client_wrapper, slack_client_wrapper=slack_client_wrapper)
self.slack_team_id = slack_team_id
self.slack_user_id = slack_user_id
self.strand_team_id = strand_team_id
@db_session
def execute(self, session):
log_msg = f'Creating user {self.slack_user_id} on strand team {self.strand_team_id}'
self.logger.debug(log_msg)
slack_user = self.slack_client_wrapper.get_user_info(slack_team_id=self.slack_team_id,
slack_user_id=self.slack_user_id)
real_name_tokens = slack_user.profile.real_name.split(' ')
first_name = real_name_tokens[0]
last_name = real_name_tokens[-1] if len(real_name_tokens) > 1 else ''
strand_user = self.strand_api_client_wrapper.get_user_by_email(email=slack_user.profile.email)
if strand_user:
# Strand API has seen this user email before
self.strand_api_client_wrapper.add_user_to_team(user_id=strand_user.id,
team_id=self.strand_team_id)
else:
strand_user = self.strand_api_client_wrapper.create_user_with_team(email=slack_user.profile.email,
first_name=first_name,
last_name=last_name,
team_id=self.strand_team_id)
self._update_user(slack_user_id=self.slack_user_id, strand_user_id=strand_user.id, session=session)
@staticmethod
def _update_user(slack_user_id, strand_user_id, session):
user = session.query(User).filter(User.slack_user_id == slack_user_id).one()
user.strand_user_id = strand_user_id
|
import numpy as np
import scipy.signal
import matplotlib.pyplot as plt
from skimage import io, color
from skimage import exposure
img = io.imread('lena_gray.jpg') # Load the image
img = color.rgb2gray(img) # Convert the image to grayscale (1 channel)
# apply sharpen filter to the original image
# sharpen_kernel = np.array([[0,-1,0],[-1,5,-1],[0,-1,0]])
# image_sharpen = scipy.signal.convolve2d(img, sharpen_kernel, 'valid')
# apply edge detection filter to the sharpen image
edge_kernel = np.array([[-1,-2,-1],[-1,-2,-1],[1,2,1]])
edges = scipy.signal.convolve2d(img, edge_kernel, 'valid')
# apply blur filter to the edge detection filtered image
# blur_kernel = np.array([[1,1,1],[1,1,1],[1,1,1]])/9.0;
# denoised = scipy.signal.convolve2d(edges, blur_kernel, 'valid')
# Adjust the contrast of the filtered image by applying Histogram Equalization
# denoised_equalized = exposure.equalize_adapthist(denoised/np.max(np.abs(denoised)), clip_limit=0.03)
plt.imshow(edges, cmap=plt.cm.gray) # plot the denoised_clipped
plt.axis('off')
plt.show() |
import pandas as pd
from death.unified.uni import *
if __name__ == '__main__':
# test_all_for_AUROC_0(epoch=40, test_stat_dir="40epoch")
# test_all_for_AUROC_0(epoch=20, test_stat_dir="20epoch")
test_all_for_AUROC_0(epoch=4, test_stat_dir="4epoch")
# all_working_1()
# transformer200() |
import os
import genshinstats as gs
import pytest
import urllib3
# unless anyone knows how to inject certificates into a github workflow this is required
try:
gs.get_langs()
gs.search("a")
except urllib3.exceptions.SSLError:
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
gs.genshinstats.session.verify = False
uid = 710785423
hoyolab_uid = 8366222
@pytest.fixture(scope="module", autouse=True)
def set_cookie():
gs.set_cookie(ltuid=os.environ["GS_LTUID"], ltoken=os.environ["GS_LTOKEN"])
def test_recognize_server():
assert gs.recognize_server(uid) == "os_euro"
def test_user_stats():
stats = gs.get_user_stats(uid)
def test_characters():
characters = gs.get_characters(uid)
def test_spiral_abyss():
abyss = [gs.get_spiral_abyss(uid), gs.get_spiral_abyss(uid, previous=True)]
def test_activities():
activities = gs.get_activities(uid)
def test_is_game_uid():
assert gs.is_game_uid(710785423)
assert not gs.is_game_uid(8366222)
def test_is_chinese():
for i in ("cn_gf01", "cn_qd01", "123456789", 567890123):
assert gs.is_chinese(i)
for i in ("os_usa", "os_asia", "678901234", 890123456):
assert not gs.is_chinese(i)
def test_record_card():
card = gs.get_record_card(hoyolab_uid)
assert card is not None
def test_uid_from_community():
assert gs.get_uid_from_hoyolab_uid(hoyolab_uid) == uid
def test_recommended():
recommended = gs.get_recommended_users()
assert len(recommended) > 100
def test_hot_posts():
hot_posts = gs.get_hot_posts(size=120)
assert len(hot_posts) > 100
|
from django.apps import AppConfig
class DatasetConfig(AppConfig):
name = 'volumes'
verbose_name = 'Scaleout Dataset'
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from gaiatest import GaiaTestCase
from gaiatest.apps.settings.app import Settings
class TestWallpaper(GaiaTestCase):
# default wallpaper
_default_wallpaper_settings = None
_new_wallpaper_settings = None
def test_change_wallpaper(self):
"""https://moztrap.mozilla.org/manage/case/3449/"""
settings = Settings(self.marionette)
settings.launch()
display_settings = settings.open_display()
self._default_wallpaper_settings = self.data_layer.get_setting('wallpaper.image')
# Open activities menu
activities_menu = display_settings.pick_wallpaper()
# choose the source as wallpaper app
wallpaper = activities_menu.tap_wallpaper()
wallpaper.tap_wallpaper_by_index(3)
self._new_wallpaper_settings = self.data_layer.get_setting('wallpaper.image')
self.assertNotEqual(self._default_wallpaper_settings, self._new_wallpaper_settings)
|
#!/usr/bin/env python
# vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import argparse
import os
import re
from time import sleep
from subprocess import check_call
from io import BytesIO
import pexpect
def get_argparser(ArgumentParser=argparse.ArgumentParser):
parser = ArgumentParser(description='Run powerline shell test using pexpect')
parser.add_argument('--wait-for-echo', action='store_true', help='Wait until the input is echoed back.')
parser.add_argument('--type', metavar='TYPE', help='Test type (daemon, nodaemon, …).')
parser.add_argument('--client', metavar='CLIENT', help='Type of the client used (C, shell, zpython, …).')
parser.add_argument('--shell', metavar='SHELL', help='Shell name.')
parser.add_argument('command', nargs=argparse.REMAINDER, metavar='COMMAND',
help='Command to run and its argument.')
return parser
def main():
parser = get_argparser()
args = parser.parse_args()
shell = args.shell or args.command[0]
test_type = args.type or shell
test_client = args.client or test_type
log_file_base = '{0}.{1}.{2}'.format(shell, test_type, test_client)
full_log_file_name = os.path.join('tests', 'shell', '{0}.full.log'.format(log_file_base))
# postproc_log_file_name = os.path.join('tests', 'shell', '{0}.log'.format(log_file_base))
local_paths = [
os.path.abspath(os.path.join('tests', 'shell', 'path')),
os.path.abspath('scripts'),
]
if test_type == 'fish':
local_paths += ['/usr/bin', '/bin']
python_paths = os.environ.get('PYTHONPATH', '')
if python_paths:
python_paths = ':' + python_paths
python_paths = os.path.abspath('.') + python_paths
environ = {
'LANG': 'en_US.UTF-8',
'PATH': os.pathsep.join(local_paths),
'TERM': 'screen-256color',
'DIR1': os.environ['DIR1'],
'DIR2': os.environ['DIR2'],
'XDG_CONFIG_HOME': os.path.abspath(os.path.join('tests', 'shell', 'fish_home')),
'IPYTHONDIR': os.path.abspath(os.path.join('tests', 'shell', 'ipython_home')),
'PYTHONPATH': python_paths,
'POWERLINE_CONFIG_OVERRIDES': os.environ.get('POWERLINE_CONFIG_OVERRIDES', ''),
'POWERLINE_THEME_OVERRIDES': os.environ.get('POWERLINE_THEME_OVERRIDES', ''),
'POWERLINE_CONFIG_PATHS': os.path.abspath(os.path.join('powerline', 'config_files')),
'POWERLINE_COMMAND_ARGS': os.environ.get('POWERLINE_COMMAND_ARGS', ''),
'POWERLINE_COMMAND': os.environ.get('POWERLINE_COMMAND', ''),
'LD_LIBRARY_PATH': os.environ.get('LD_LIBRARY_PATH', ''),
}
os.environ['PATH'] = environ['PATH']
if test_type == 'daemon':
environ['POWERLINE_SHELL_CONTINUATION'] = '1'
environ['POWERLINE_SHELL_SELECT'] = '1'
if test_type != 'zpython' and shell == 'zsh':
environ['POWERLINE_NO_ZSH_ZPYTHON'] = '1'
sio = BytesIO()
child = pexpect.spawn(
args.command[0],
args.command[1:],
env=environ,
logfile=sio,
timeout=30,
)
child.expect(re.compile(b'.*'))
sleep(0.5)
child.setwinsize(1, 300)
with open(os.path.join('tests', 'test_shells', 'input.{0}'.format(shell)), 'rb') as F:
if not args.wait_for_echo:
child.send(F.read())
else:
for line in F:
child.send(line)
sleep(1)
# TODO Implement something more smart
with open(full_log_file_name, 'wb') as LF:
while True:
try:
s = child.read_nonblocking(1000)
except pexpect.TIMEOUT:
break
except pexpect.EOF:
break
else:
LF.write(s)
child.close(force=True)
check_call([
os.path.join('tests', 'shell', 'path', 'python'),
os.path.join('tests', 'test_shells', 'postproc.py'),
test_type, test_client, shell
])
pidfile = os.path.join('tests', 'shell', '3rd', 'pid')
if os.path.exists(pidfile):
os.unlink(pidfile)
if __name__ == '__main__':
main()
|
"""
This is a setup.py script generated by py2applet
Usage:
python setup.py py2app
"""
from setuptools import setup, find_packages
APP = ['moonticker.py']
DATA_FILES = []
OPTIONS = {
'argv_emulation': True,
'plist': {
'LSUIElement': True,
},
'packages': ['rumps', 'requests', 'certifi']
}
setup(
name='MoonTicker',
version='0.0.1',
author='skxu',
author_email='[email protected]',
description='MacOS StatusBar Ticker for cryptocurrencies like Ethereum',
license='MIT',
url='https://github.com/skxu/MoonTicker',
packages=find_packages(),
app=APP,
data_files=DATA_FILES,
options={'py2app': OPTIONS},
setup_requires=['py2app', 'rumps', 'requests', 'ConfigParser'],
)
|
# helper file of droidbot
# it parses command arguments and send the options to droidbot
import argparse
import input_manager
import input_policy
import env_manager
from droidbot import DroidBot
from droidmaster import DroidMaster
def parse_args():
"""
parse command line input
generate options including host name, port number
"""
parser = argparse.ArgumentParser(description="Start DroidBot to test an Android app.",
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("-d", action="store", dest="device_serial", required=False,
help="The serial number of target device (use `adb devices` to find)")
parser.add_argument("-a", action="store", dest="apk_path", required=True,
help="The file path to target APK")
parser.add_argument("-o", action="store", dest="output_dir",
help="directory of output")
# parser.add_argument("-env", action="store", dest="env_policy",
# help="policy to set up environment. Supported policies:\n"
# "none\tno environment will be set. App will run in default environment of device; \n"
# "dummy\tadd some fake contacts, SMS log, call log; \n"
# "static\tset environment based on static analysis result; \n"
# "<file>\tget environment policy from a json file.\n")
parser.add_argument("-policy", action="store", dest="input_policy", default=input_manager.DEFAULT_POLICY,
help='Policy to use for test input generation. '
'Default: %s.\nSupported policies:\n' % input_manager.DEFAULT_POLICY +
' \"%s\" -- No event will be sent, user should interact manually with device; \n'
' \"%s\" -- Use "adb shell monkey" to send events; \n'
' \"%s\" -- Explore UI using a naive depth-first strategy;\n'
' \"%s\" -- Explore UI using a greedy depth-first strategy;\n'
' \"%s\" -- Explore UI using a naive breadth-first strategy;\n'
' \"%s\" -- Explore UI using a greedy breadth-first strategy;\n'
%
(
input_policy.POLICY_NONE,
input_policy.POLICY_MONKEY,
input_policy.POLICY_NAIVE_DFS,
input_policy.POLICY_GREEDY_DFS,
input_policy.POLICY_NAIVE_BFS,
input_policy.POLICY_GREEDY_DFS,
))
# for distributed DroidBot
parser.add_argument("-distributed", action="store", dest="distributed",
choices=["master", "worker"],
help="Start DroidBot in distributed mode.")
parser.add_argument("-master", action="store", dest="master",
help="DroidMaster's RPC address")
parser.add_argument("-qemu_hda", action="store", dest="qemu_hda",
help="The QEMU's hda image")
parser.add_argument("-qemu_no_graphic", action="store_true", dest="qemu_no_graphic",
help="Run QEMU with -nograpihc parameter")
parser.add_argument("-script", action="store", dest="script_path",
help="Use a script to customize input for certain states.")
parser.add_argument("-count", action="store", dest="count", default=input_manager.DEFAULT_EVENT_COUNT,
type=int, help="Number of events to generate in total. "
"Default: %d" % input_manager.DEFAULT_EVENT_COUNT)
parser.add_argument("-interval", action="store", dest="interval", default=input_manager.DEFAULT_EVENT_INTERVAL,
type=int, help="Interval in seconds between each two events. "
"Default: %d" % input_manager.DEFAULT_EVENT_INTERVAL)
parser.add_argument("-timeout", action="store", dest="timeout", default=input_manager.DEFAULT_TIMEOUT,
type=int, help="Timeout in seconds, -1 means unlimited. "
"Default: %d" % input_manager.DEFAULT_TIMEOUT)
parser.add_argument("-cv", action="store_true", dest="cv_mode",
help="Use OpenCV (instead of UIAutomator) to identify UI components. "
"CV mode requires opencv-python installed.")
parser.add_argument("-debug", action="store_true", dest="debug_mode",
help="Run in debug mode (dump debug messages).")
parser.add_argument("-random", action="store_true", dest="random_input",
help="Add randomness to input events.")
parser.add_argument("-keep_app", action="store_true", dest="keep_app",
help="Keep the app on the device after testing.")
parser.add_argument("-keep_env", action="store_true", dest="keep_env",
help="Keep the test environment (eg. minicap and accessibility service) after testing.")
parser.add_argument("-use_method_profiling", action="store", dest="profiling_method",
help="Record method trace for each event. can be \"full\" or a sampling rate.")
parser.add_argument("-grant_perm", action="store_true", dest="grant_perm",
help="Grant all permissions while installing. Useful for Android 6.0+.")
parser.add_argument("-is_emulator", action="store_true", dest="is_emulator",
help="Declare the target device to be an emulator, which would be treated specially by DroidBot.")
parser.add_argument("-accessibility_auto", action="store_true", dest="enable_accessibility_hard",
help="Enable the accessibility service automatically even though it might require device restart\n(can be useful for Android API level < 23).")
options = parser.parse_args()
# print options
return options
def main():
"""
the main function
it starts a droidbot according to the arguments given in cmd line
"""
opts = parse_args()
import os
if not os.path.exists(opts.apk_path):
print "APK does not exist."
return
if not opts.output_dir and opts.cv_mode:
print "To run in CV mode, you need to specify an output dir (using -o option)."
if opts.distributed:
if opts.distributed == "master":
start_mode = "master"
else:
start_mode = "worker"
else:
start_mode = "normal"
if start_mode == "master":
droidmaster = DroidMaster(app_path=opts.apk_path,
device_serial=opts.device_serial,
is_emulator=opts.is_emulator,
output_dir=opts.output_dir,
# env_policy=opts.env_policy,
env_policy=env_manager.POLICY_NONE,
policy_name=opts.input_policy,
random_input=opts.random_input,
script_path=opts.script_path,
event_interval=opts.interval,
timeout=opts.timeout,
event_count=opts.count,
cv_mode=opts.cv_mode,
debug_mode=opts.debug_mode,
keep_app=opts.keep_app,
keep_env=opts.keep_env,
profiling_method=opts.profiling_method,
grant_perm=opts.grant_perm,
enable_accessibility_hard=opts.enable_accessibility_hard,
qemu_hda=opts.qemu_hda,
qemu_no_graphic=opts.qemu_no_graphic)
droidmaster.start()
else:
droidbot = DroidBot(app_path=opts.apk_path,
device_serial=opts.device_serial,
is_emulator=opts.is_emulator,
output_dir=opts.output_dir,
# env_policy=opts.env_policy,
env_policy=env_manager.POLICY_NONE,
policy_name=opts.input_policy,
random_input=opts.random_input,
script_path=opts.script_path,
event_interval=opts.interval,
timeout=opts.timeout,
event_count=opts.count,
cv_mode=opts.cv_mode,
debug_mode=opts.debug_mode,
keep_app=opts.keep_app,
keep_env=opts.keep_env,
profiling_method=opts.profiling_method,
grant_perm=opts.grant_perm,
enable_accessibility_hard=opts.enable_accessibility_hard,
master=opts.master)
droidbot.start()
return
if __name__ == "__main__":
main()
|
# python-mqlight - high-level API by which you can interact with MQ Light
#
# Copyright 2015-2017 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division, absolute_import
import sys
import socket
import ssl
import select
import threading
import time
import sys
from json import loads
from . import cproton
from .exceptions import MQLightError, SecurityError, ReplacedError, \
NetworkError, InvalidArgumentError, NotPermittedError
from .logging import get_logger, NO_CLIENT_ID
from .definitions import QOS_AT_MOST_ONCE, QOS_AT_LEAST_ONCE
from .utils import is_text
PYTHON2 = sys.version_info < (3, 0)
PYTHON3 = sys.version_info >= (3, 0)
if PYTHON3:
from queue import Queue
else:
from Queue import Queue
from exceptions import SystemExit
LOG = get_logger(__name__)
STATUSES = ['UNKNOWN', 'PENDING', 'ACCEPTED', 'REJECTED', 'RELEASED',
'MODIFIED', 'ABORTED', 'SETTLED']
class _MQLightMessage(object):
"""
Wrapper for the Proton Message class
"""
def __init__(self, message=None):
"""
MQLight Message constructor
"""
LOG.entry('_MQLightMessage.constructor', NO_CLIENT_ID)
if message:
LOG.parms(NO_CLIENT_ID, 'message:', message)
self._msg = message
self._body = None
self._body = self._get_body()
else:
self._msg = cproton.pn_message()
self._body = None
self._tracker = None
self._link_address = None
self.connection_id = None
LOG.exit('_MQLightMessage.constructor', NO_CLIENT_ID, None)
def _set_body(self, value):
"""
Handles body data type and encoding
"""
LOG.entry('_MQLightMessage._set_body', NO_CLIENT_ID)
LOG.parms(NO_CLIENT_ID, 'value:', value)
if self._msg:
body = cproton.pn_message_body(self._msg)
if is_text(value):
LOG.data(NO_CLIENT_ID, 'setting the body format as text')
if PYTHON3:
cproton.pn_data_put_string(body, bytes(value, 'utf8'))
else:
cproton.pn_data_put_string(body, str(value))
else:
LOG.data(NO_CLIENT_ID, 'setting the body format as data')
cproton.pn_data_put_binary(body, bytes(value))
self._body = self._get_body()
LOG.data(NO_CLIENT_ID, 'body:', self._body)
LOG.exit('_MQLightMessage._set_body', NO_CLIENT_ID, None)
def _get_body(self):
"""
Handles body data type and encoding
"""
LOG.entry('_MQLightMessage._get_body', NO_CLIENT_ID)
result = None
if self._msg:
if self._body is None:
body = cproton.pn_message_body(self._msg)
# Inspect data:
# (1) Type String or Binary
# (2) Content type.
cproton.pn_data_next(body)
data_type = cproton.pn_data_type(body)
if data_type == cproton.PN_STRING:
LOG.data(NO_CLIENT_ID, 'Inbound message data type: String')
amqp_content_type = self.content_type
result = cproton.pn_data_get_string(body).decode('utf8')
if amqp_content_type == "text/plain":
LOG.data(NO_CLIENT_ID,
'Inbound message content type: text/plain')
elif amqp_content_type == "application/json":
LOG.data(NO_CLIENT_ID,
'Inbound message content type: '
'application/json')
result = loads(result)
else:
# Not any of the expected content type.
# Will treat as text/plain
LOG.data(NO_CLIENT_ID,
'Inbound message content type(Unknown):',
amqp_content_type)
elif data_type == cproton.PN_BINARY:
LOG.data(NO_CLIENT_ID, 'Inbound message data type: Binary')
if PYTHON3:
result = bytearray(cproton.pn_data_get_binary(body))
else:
result = bytearray(
ord(
str(byte)) for byte in list(
cproton.pn_data_get_binary(body)))
else:
# Not any of the expected data types
# Try loading as String & text/plain
LOG.data(NO_CLIENT_ID,
'Inbound message data type(Unknown):',
data_type)
result = cproton.pn_data_get_string(body).decode('utf8')
else:
result = self._body
LOG.exit('_MQLightMessage._get_body',
NO_CLIENT_ID, str(result) + ' ' + str(type(result)))
return result
body = property(_get_body, _set_body)
def _get_delivery_annotations(self):
"""
Gets the message delivery annotations
"""
LOG.entry('_MQLightMessage._get_delivery_annotations', NO_CLIENT_ID)
# instructions === delivery annotations
anno = cproton.pn_message_instructions(self.message)
# Count the number of delivery annotations that we are interested in
# returning
lval = cproton.pn_data_next(anno) # Move to Map
elements = 0
result = []
# Check it actually is a Map
if lval and cproton.pn_data_type(anno) == cproton.PN_MAP:
if lval:
lval = cproton.pn_data_enter(anno) # Enter the Map
if lval:
lval = cproton.pn_data_next(anno) # Position on 1st map key
if lval:
while True:
if cproton.pn_data_type(anno) == cproton.PN_SYMBOL:
if cproton.pn_data_next(anno):
if cproton.pn_data_type(anno) in (
cproton.PN_SYMBOL,
cproton.PN_STRING,
cproton.PN_INT):
elements += 1
else:
break
if not cproton.pn_data_next(anno):
break
else:
break
cproton.pn_data_rewind(anno)
# Return early if there are no (interesting) delivery annotations
if elements == 0:
LOG.exit(
'_MQLightMessage._get_delivery_annotations',
NO_CLIENT_ID,
result)
return result
cproton.pn_data_next(anno) # Move to Map
cproton.pn_data_enter(anno) # Enter the Map
cproton.pn_data_next(anno) # Position on first map key
# Build an array of objects, where each object has the following
# four properties:
# key : the key of the delivery annotation entry
# key_type : the type of the delivery annotation key (always
# 'symbol')
# value : the value of the delivery annotation entry
# value_type : the type of the delivery annotation value
# ('symbol' ,'string', or 'int32')
while True:
if cproton.pn_data_type(anno) == cproton.PN_SYMBOL:
key = cproton.pn_data_get_symbol(anno)
if cproton.pn_data_next(anno):
value = None
value_type = None
data_type = cproton.pn_data_type(anno)
add_entry = False
if data_type == cproton.PN_SYMBOL:
add_entry = True
value_type = 'symbol'
value = cproton.pn_data_get_symbol(anno)
elif data_type == cproton.PN_STRING:
add_entry = True
value_type = 'string'
value = cproton.pn_data_get_string(anno)
elif data_type == cproton.PN_INT:
add_entry = True
value_type = 'int32'
value = cproton.pn_data_get_int(anno)
if add_entry:
# e.g. { 'key': 'xopt-blah', 'key_type': 'symbol',
# 'value': 'blahblah', 'value_type': 'string' }
item = {
'key': key,
'key_type': 'symbol',
'value': value,
'value_type': value_type
}
result.append(item)
if not cproton.pn_data_next(anno):
break
else:
break
cproton.pn_data_rewind(anno)
LOG.exit(
'_MQLightMessage._get_delivery_annotations',
NO_CLIENT_ID,
result)
return result
annotations = property(_get_delivery_annotations)
def set_content_type(self, content_type):
"""
Sets the message content type
"""
cproton.pn_message_set_content_type(self.message, content_type)
def _get_content_type(self):
"""
Gets the message content type
"""
content_type = cproton.pn_message_get_content_type(self.message)
return content_type
content_type = property(_get_content_type, set_content_type)
def _set_address(self, address):
"""
Sets the address
"""
cproton.pn_message_set_address(self.message, address)
def _get_address(self):
"""
Gets the address
"""
addr = cproton.pn_message_get_address(self.message)
return addr
address = property(_get_address, _set_address)
def _set_tracker(self, tracker):
"""
Sets the tracker
"""
self._tracker = tracker
def _get_tracker(self):
"""
Returns the tracker
"""
return self._tracker
tracker = property(_get_tracker, _set_tracker)
def _set_link_address(self, link_address):
"""
Sets the link address
"""
self._link_address = link_address
def _get_link_address(self):
"""
Returns the link address
"""
return self._link_address
link_address = property(_get_link_address, _set_link_address)
def _set_time_to_live(self, ttl):
"""
Sets the ttl
"""
cproton.pn_message_set_ttl(self.message, ttl)
def _get_time_to_live(self):
"""
Returns the ttl
"""
return cproton.pn_message_get_ttl(self.message)
ttl = property(_get_time_to_live, _set_time_to_live)
def _get_message(self):
"""
Returns the Proton Message object
"""
return self._msg
message = property(_get_message)
class _MQLightMessenger(object):
"""
Wrapper for the Proton Messenger class
"""
def __init__(self, name):
"""
MQLightMessenger constructor
"""
LOG.entry('_MQLightMessenger.constructor', NO_CLIENT_ID)
LOG.parms(NO_CLIENT_ID, 'name:', name)
self.messenger = None
self.connection = None
self.sasl_outcome = cproton.PN_SASL_NONE
self._name = name
self._lock = threading.RLock()
LOG.exit('_MQLightMessenger.constructor', NO_CLIENT_ID, None)
@staticmethod
def _raise_error(text):
"""
Parses an error message from messenger and raises the corresponding
Error
"""
if 'sasl ' in text or 'SSL ' in text or ' 2035 ' in text:
raise SecurityError(text)
if '_Takeover' in text:
raise ReplacedError(text)
if '_InvalidSourceTimeout' in text:
raise NotPermittedError(text)
raise NetworkError(text)
def connect(self, service):
"""
Connects to the specified service
"""
LOG.entry('_MQLightMessenger.connect', NO_CLIENT_ID)
# If the proton messenger already exists and has been stopped then free
# it so that we can recreate a new instance. This situation can arise
# if the messenger link is closed by the remote end instead of a call
# to stop()
if self.messenger:
stopped = cproton.pn_messenger_stopped(self.messenger)
if stopped:
self.messenger = None
self.connection = None
# throw exception if already connected
if self.messenger:
raise NetworkError('Already connected')
# Create the messenger object and update the name in case messenger has
# changed it
self.messenger = cproton.pn_messenger(self._name)
self._name = cproton.pn_messenger_name(self.messenger)
cproton.pn_messenger_set_blocking(self.messenger, False)
cproton.pn_messenger_set_incoming_window(self.messenger, 2147483647)
cproton.pn_messenger_set_outgoing_window(self.messenger, 2147483647)
error = cproton.pn_messenger_route(
self.messenger, service.route_pattern, service.route_address)
LOG.data(NO_CLIENT_ID, 'pn_messenger_route:', error)
if error:
self.messenger = None
raise MQLightError('Failed to set messenger route')
# Indicate that the route should be validated
if cproton.pn_messenger_set_flags(
self.messenger, cproton.PN_FLAGS_CHECK_ROUTES |
cproton.PN_FLAGS_ALLOW_INSECURE_MECHS):
self.messenger = None
raise TypeError('Invalid set flags call')
# Indicate that an external socket is in use
if cproton.pn_messenger_set_external_socket(self.messenger):
self.messenger = None
raise TypeError('Failed to set external socket')
# Start the messenger. This will fail if the route is invalid
error = cproton.pn_messenger_start(self.messenger)
LOG.data(NO_CLIENT_ID, 'pn_messenger_start:', error)
if error:
text = cproton.pn_error_text(
cproton.pn_messenger_error(self.messenger))
self.messenger = None
_MQLightMessenger._raise_error(text)
# Get the proton connection by resolving the route
self.connection = cproton.pn_messenger_resolve(self.messenger,
service.route_pattern)
LOG.data(NO_CLIENT_ID, 'connection:', self.connection)
if not self.connection:
self.messenger = None
raise NetworkError('Unable to resolve connection')
LOG.exit('_MQLightMessenger.connect', NO_CLIENT_ID, None)
def stop(self, sock, attempts):
"""
Calls stop() on the proton Messenger
"""
LOG.entry('_MQLightMessenger.stop', NO_CLIENT_ID)
# if already stopped then simply return True
if self.messenger is None:
LOG.exit('_MQLightMessenger.stop', NO_CLIENT_ID, True)
return True
cproton.pn_messenger_stop(self.messenger)
try:
for tries in range(0, attempts):
self._write(sock, False)
stopped = cproton.pn_messenger_stopped(self.messenger)
if stopped:
break
time.sleep(0.05)
except socket.error as exc:
# Connection is closed
stopped = True
if stopped:
cproton.pn_messenger_free(self.messenger)
self.messenger = None
self.connection = None
LOG.data(NO_CLIENT_ID, 'MQLightMessenger.stop - messenger freed')
LOG.exit('_MQLightMessenger.stop', NO_CLIENT_ID, stopped)
return stopped
def _is_stopped(self):
"""
Returns True if the messenger is currently stopped
"""
LOG.entry('_MQLightMessenger._is_stopped', NO_CLIENT_ID)
if self.messenger is not None:
state = cproton.pn_messenger_stopped(self.messenger)
else:
state = True
LOG.exit('_MQLightMessenger._is_stopped', NO_CLIENT_ID, state)
return state
stopped = property(_is_stopped)
def started(self):
"""
Returns True if the messenger is currently started
"""
LOG.entry('_MQLightMessenger.started', NO_CLIENT_ID)
if self.messenger is not None:
if self.connection:
transport = cproton.pn_connection_transport(self.connection)
self._update_sasl_outcome(transport)
error = cproton.pn_messenger_errno(self.messenger)
if error:
text = cproton.pn_error_text(
cproton.pn_messenger_error(
self.messenger))
_MQLightMessenger._raise_error(text)
# FIXME: these should really come from pn_messenger_error
elif self.sasl_outcome == cproton.PN_SASL_AUTH:
_MQLightMessenger._raise_error('sasl authentication failed')
elif self.sasl_outcome > cproton.PN_SASL_AUTH:
_MQLightMessenger._raise_error('sasl negotiation failed')
started = cproton.pn_messenger_started(self.messenger) and \
self.sasl_outcome != cproton.PN_SASL_NONE
else:
started = False
LOG.exit('_MQLightMessenger.started', NO_CLIENT_ID, started)
return started
def set_snd_settle_mode(self, mode):
"""
Sets the settle mode for sending messages
"""
LOG.entry('_MQLightMessenger.set_snd_settle_mode', NO_CLIENT_ID)
LOG.parms(NO_CLIENT_ID, 'mode:', mode)
cproton.pn_messenger_set_snd_settle_mode(self.messenger, mode)
LOG.exit('_MQLightMessenger.set_snd_settle_mode', NO_CLIENT_ID, None)
def set_rcv_settle_mode(self, mode):
"""
Sets the settle mode for receiving messages
"""
LOG.entry('_MQLightMessenger.set_rcv_settle_mode', NO_CLIENT_ID)
LOG.parms(NO_CLIENT_ID, 'mode:', mode)
cproton.pn_messenger_set_rcv_settle_mode(self.messenger, mode)
LOG.exit('_MQLightMessenger.set_rcv_settle_mode', NO_CLIENT_ID, None)
def status_error(self, message):
"""
Finds the reason why the message has been rejected
"""
LOG.entry('_MQLightMessenger.status_error', NO_CLIENT_ID)
LOG.parms(NO_CLIENT_ID, 'message:', message)
if self.messenger is None:
raise NetworkError('Not connected')
delivery = cproton.pn_messenger_delivery(
self.messenger, message.tracker)
disposition = None
condition = None
description = None
if delivery:
disposition = cproton.pn_delivery_remote(delivery)
if disposition:
condition = cproton.pn_disposition_condition(disposition)
if condition:
description = cproton.pn_condition_get_description(
condition)
LOG.exit(
'_MQLightMessenger.status_error',
NO_CLIENT_ID,
description)
return description
def get_remote_idle_timeout(self, address):
"""
Returns the idle timeout of the Messenger
"""
LOG.entry('_MQLightMessenger.get_remote_idle_timeout', NO_CLIENT_ID)
LOG.parms(NO_CLIENT_ID, 'address:', address)
if self.messenger is None:
raise NetworkError('Not connected')
remote_idle_timeout = cproton.pn_messenger_get_remote_idle_timeout(
self.messenger,
address) / 1000
LOG.exit(
'_MQLightMessenger.get_remote_idle_timeout',
NO_CLIENT_ID,
remote_idle_timeout)
return remote_idle_timeout
def flow(self, address, credit, sock):
"""
Process messages based on the number of credit available
"""
LOG.entry('_MQLightMessenger.flow', NO_CLIENT_ID)
LOG.parms(NO_CLIENT_ID, 'address:', address)
LOG.parms(NO_CLIENT_ID, 'credit:', credit)
LOG.parms(NO_CLIENT_ID, 'sock:', sock)
# throw exception if not connected
if self.messenger is None:
raise NetworkError('Not connected')
# Find link based on address, and flow link credit.
link = cproton.pn_messenger_get_link(self.messenger, address, False)
if link:
cproton.pn_link_flow(link, credit)
self._write(sock, False)
LOG.exit('_MQLightMessenger.flow', NO_CLIENT_ID, None)
def put(self, msg, qos):
"""
Puts a message on the outgoing queue
"""
LOG.entry('_MQLightMessenger.put', NO_CLIENT_ID)
LOG.parms(NO_CLIENT_ID, 'msg:', msg)
LOG.parms(NO_CLIENT_ID, 'qos:', qos)
# throw exception if not connected
if self.messenger is None:
raise NetworkError('Not connected')
# Set the required QoS, by setting the sender settler mode to settled
# (QoS = AMO) or unsettled (QoS = ALO). Note that the receiver settler
# mode is always set to first, as the MQ Light listener will
# negotiate down any receiver settler mode to first.
if qos not in (QOS_AT_MOST_ONCE, QOS_AT_LEAST_ONCE):
raise InvalidArgumentError('Invalid QoS')
LOG.data(NO_CLIENT_ID, 'message:', msg.message)
LOG.data(NO_CLIENT_ID, 'body:', msg.body)
cproton.pn_messenger_put(self.messenger, msg.message)
error = cproton.pn_messenger_errno(self.messenger)
if error:
text = cproton.pn_error_text(
cproton.pn_messenger_error(
self.messenger))
_MQLightMessenger._raise_error(text)
tracker = cproton.pn_messenger_outgoing_tracker(self.messenger)
LOG.data(NO_CLIENT_ID, 'tracker:', tracker)
msg.tracker = tracker
if qos == QOS_AT_MOST_ONCE:
error = cproton.pn_messenger_settle(self.messenger, tracker, 0)
if error:
text = cproton.pn_error_text(
cproton.pn_messenger_error(
self.messenger))
_MQLightMessenger._raise_error(text)
LOG.exit('_MQLightMessenger.put', NO_CLIENT_ID, True)
return True
def send(self, sock):
"""
Sends the messages on the outgoing queue
"""
LOG.entry('_MQLightMessenger.send', NO_CLIENT_ID)
# throw exception if not connected
if self.messenger is None:
raise NetworkError('Not connected')
with self._lock:
cproton.pn_messenger_send(self.messenger, -1)
error = cproton.pn_messenger_errno(self.messenger)
if error:
error_text = cproton.pn_error_text(
cproton.pn_messenger_error(
self.messenger))
_raise_error(error_text)
self._write(sock, False)
LOG.exit('_MQLightMessenger.send', NO_CLIENT_ID, True)
return True
def receive(self, sock):
"""
Retrieves messages from the incoming queue
"""
LOG.entry('_MQLightMessenger.receive', NO_CLIENT_ID)
LOG.parms(NO_CLIENT_ID, 'sock:', sock)
# throw exception if not connected
if self.messenger is None:
raise NetworkError('Not connected')
with self._lock:
cproton.pn_messenger_recv(self.messenger, -2)
error = cproton.pn_messenger_errno(self.messenger)
if error:
text = cproton.pn_error_text(
cproton.pn_messenger_error(
self.messenger))
_MQLightMessenger._raise_error(text)
messages = []
count = cproton.pn_messenger_incoming(self.messenger)
LOG.data(NO_CLIENT_ID, 'messages count: {0}'.format(count))
while cproton.pn_messenger_incoming(self.messenger) > 0:
message = cproton.pn_message()
rc = cproton.pn_messenger_get(self.messenger, message)
# try again if message not yet available on incoming queue
if rc == cproton.PN_EOS:
continue
error = cproton.pn_messenger_errno(self.messenger)
if error:
text = cproton.pn_error_text(
cproton.pn_messenger_error(
self.messenger))
_MQLightMessenger._raise_error(text)
msg = _MQLightMessage(message)
tracker = cproton.pn_messenger_incoming_tracker(self.messenger)
msg.tracker = tracker
link = cproton.pn_messenger_tracker_link(
self.messenger,
tracker)
if link:
if cproton.pn_link_state(link) & cproton.PN_LOCAL_CLOSED:
LOG.data(
NO_CLIENT_ID,
'Link closed so ignoring received message for ' +
'address: {0}'.format(
cproton.pn_message_get_address(message)))
else:
msg.link_address = cproton.pn_terminus_get_address(
cproton.pn_link_remote_target(link))
messages.append(msg)
else:
LOG.data(
NO_CLIENT_ID,
'No link associated with received message tracker ' +
'for address: {0}'.format(
cproton.pn_message_get_address(message)))
self._write(sock, False)
LOG.exit('_MQLightMessenger.receive', NO_CLIENT_ID, messages)
return messages
def settle(self, message, sock):
"""
Settles a message
"""
LOG.entry('_MQLightMessenger.settle', NO_CLIENT_ID)
LOG.parms(NO_CLIENT_ID, 'message:', message)
LOG.parms(NO_CLIENT_ID, 'sock:', sock)
# throw exception if not connected
if self.messenger is None:
raise NetworkError('Not connected')
with self._lock:
tracker = message.tracker
status = cproton.pn_messenger_settle(
self.messenger,
tracker,
0)
error = cproton.pn_messenger_errno(self.messenger)
if error:
text = cproton.pn_error_text(
cproton.pn_messenger_error(
self.messenger))
_MQLightMessenger._raise_error(text)
elif status != 0:
raise NetworkError('Failed to settle')
self._write(sock, False)
LOG.exit('_MQLightMessenger.settle', NO_CLIENT_ID, True)
return True
def settled(self, message):
"""
Checks if a message has been settled
"""
LOG.entry('_MQLightMessenger.settled', NO_CLIENT_ID)
LOG.parms(NO_CLIENT_ID, 'message:', message)
# throw exception if not connected
if self.messenger is None:
raise NetworkError('Not connected')
with self._lock:
delivery = cproton.pn_messenger_delivery(
self.messenger,
message.tracker)
# For incoming messages, if we haven't already settled it, block
# for a while until we *think* the settlement disposition has been
# communicated over the network. We detect that by querying
# pn_transport_quiesced which should return True once all pending
# output has been written to the wire.
settled = True
if (delivery and
cproton.pn_link_is_receiver(
cproton.pn_delivery_link(delivery))):
session = cproton.pn_link_session(
cproton.pn_delivery_link(delivery))
if session:
connection = cproton.pn_session_connection(session)
if connection:
transport = cproton.pn_connection_transport(connection)
if transport:
if not cproton.pn_transport_quiesced(transport):
settled = False
LOG.exit('_MQLightMessenger.settled', NO_CLIENT_ID, settled)
return settled
def accept(self, message):
"""
Accepts a message
"""
LOG.entry('_MQLightMessenger.accept', NO_CLIENT_ID)
LOG.parms(NO_CLIENT_ID, 'message:', message)
# throw exception if not connected
if self.messenger is None:
raise NetworkError('Not connected')
with self._lock:
tracker = message.tracker
status = cproton.pn_messenger_accept(self.messenger, tracker, 0)
error = cproton.pn_messenger_errno(self.messenger)
if error:
text = cproton.pn_error_text(
cproton.pn_messenger_error(self.messenger))
_MQLightMessenger._raise_error(text)
elif status != 0:
raise NetworkError('Failed to accept')
LOG.exit('_MQLightMessenger.accept', NO_CLIENT_ID, True)
return True
def status(self, message):
"""
Get the status of a message
"""
LOG.entry('_MQLightMessenger.status', NO_CLIENT_ID)
LOG.parms(NO_CLIENT_ID, 'message:', message)
# throw exception if not connected
if self.messenger is None:
raise NetworkError('Not connected')
with self._lock:
tracker = message.tracker
disp = cproton.pn_messenger_status(self.messenger, tracker)
LOG.data(NO_CLIENT_ID, 'status: ', disp)
status = STATUSES[disp]
LOG.exit('_MQLightMessenger.status', NO_CLIENT_ID, status)
return status
def subscribe(self, address, qos, ttl, credit, sock):
"""
Subscribes to a topic
"""
LOG.entry('_MQLightMessenger.subscribe', NO_CLIENT_ID)
if credit > 4294967295:
credit = 4294967295
LOG.parms(NO_CLIENT_ID, 'address:', address)
LOG.parms(NO_CLIENT_ID, 'qos:', qos)
LOG.parms(NO_CLIENT_ID, 'ttl:', ttl)
LOG.parms(NO_CLIENT_ID, 'credit:', credit)
LOG.parms(NO_CLIENT_ID, 'sock:', sock)
# throw exception if not connected
if self.messenger is None:
raise NetworkError('Not connected')
with self._lock:
# Set the required QoS, by setting the sender settler mode to
# settled (QoS = AMO) or unsettled (QoS = ALO).
# Note that our API client implementation will always specify a
# value of first - meaning "The Receiver will spontaneously settle
# all incoming transfers" - this equates to a maximum QoS of "at
# least once delivery".
if qos == 0:
self.set_snd_settle_mode(cproton.PN_SND_SETTLED)
self.set_rcv_settle_mode(cproton.PN_RCV_FIRST)
elif qos == 1:
self.set_snd_settle_mode(cproton.PN_SND_UNSETTLED)
self.set_rcv_settle_mode(cproton.PN_RCV_FIRST)
else:
raise InvalidArgumentError('Invalid QoS')
cproton.pn_messenger_subscribe_ttl(self.messenger, address, ttl)
cproton.pn_messenger_recv(self.messenger, -2)
error = cproton.pn_messenger_errno(self.messenger)
if error:
text = cproton.pn_error_text(
cproton.pn_messenger_error(self.messenger))
_MQLightMessenger._raise_error(text)
self._write(sock, False)
LOG.exit('_MQLightMessenger.subscribe', NO_CLIENT_ID, True)
return True
def subscribed(self, address):
"""
Return True if the client is subscribed to the specified topic
"""
LOG.entry('_MQLightMessenger.subscribed', NO_CLIENT_ID)
LOG.parms(NO_CLIENT_ID, 'address:', address)
# throw Error if not connected
if self.messenger is None:
raise NetworkError('Not connected')
with self._lock:
link = cproton.pn_messenger_get_link(self.messenger,
address,
False)
error = cproton.pn_messenger_errno(self.messenger)
if error:
text = cproton.pn_error_text(
cproton.pn_messenger_error(self.messenger))
_MQLightMessenger._raise_error(text)
if link is None:
# throw Error if unable to find a matching Link
raise MQLightError(
'Unable to locate link for {0}'.format(address))
if not (cproton.pn_link_state(link) & cproton.PN_REMOTE_ACTIVE):
subscribed = False
else:
subscribed = True
LOG.exit('_MQLightMessenger.subscribed', NO_CLIENT_ID, subscribed)
return subscribed
def unsubscribe(self, address, ttl, sock):
"""
Unsubscribes from a topic
"""
LOG.entry('_MQLightMessenger.Unsubscribe', NO_CLIENT_ID)
if ttl is None:
ttl = -1
LOG.parms(NO_CLIENT_ID, 'address:', address)
LOG.parms(NO_CLIENT_ID, 'ttl:', ttl)
LOG.parms(NO_CLIENT_ID, 'sock:', sock)
# throw exception if not connected
if self.messenger is None:
raise NetworkError('Not connected')
with self._lock:
# find link based on address
link = cproton.pn_messenger_get_link(self.messenger,
address,
False)
if link is None:
# throw Error if unable to find a matching Link
raise MQLightError(
'unable to locate link for {0}'.format(address))
if ttl == 0:
cproton.pn_terminus_set_expiry_policy(
cproton.pn_link_target(link), cproton.PN_EXPIRE_WITH_LINK)
cproton.pn_terminus_set_expiry_policy(
cproton.pn_link_source(link), cproton.PN_EXPIRE_WITH_LINK)
LOG.parms(NO_CLIENT_ID, 'ttl:', ttl)
cproton.pn_terminus_set_timeout(cproton.pn_link_target(link),
ttl)
cproton.pn_terminus_set_timeout(cproton.pn_link_source(link),
ttl)
# Check if we are detaching with @closed=true
closing = True
expiry_policy = cproton.pn_terminus_get_expiry_policy(
cproton.pn_link_target(link))
timeout = cproton.pn_terminus_get_timeout(
cproton.pn_link_target(link))
if expiry_policy == cproton.PN_EXPIRE_NEVER or timeout > 0:
closing = False
LOG.data(NO_CLIENT_ID, 'closing:', closing)
if closing:
cproton.pn_link_close(link)
else:
cproton.pn_link_detach(link)
self._write(sock, False)
LOG.exit('_MQLightMessenger.Unsubscribe', NO_CLIENT_ID, True)
return True
def unsubscribed(self, address):
"""
Return True if the client is not subscribed to the specified topic
"""
LOG.entry('_MQLightMessenger.unsubscribed', NO_CLIENT_ID)
LOG.parms(NO_CLIENT_ID, 'address:', address)
# throw Error if not connected
if self.messenger is None:
raise NetworkError('Not connected')
with self._lock:
# find link based on address, in any state.
link = cproton.pn_messenger_get_stated_link(
self.messenger,
address,
False,
0)
if link is None:
# throw Error if unable to find a matching Link
raise MQLightError('Unable to locate link for {0}'.
format(address))
# Check if we are detaching with @closed=true
closing = True
expiry_policy = cproton.pn_terminus_get_expiry_policy(
cproton.pn_link_target(link))
timeout = cproton.pn_terminus_get_timeout(
cproton.pn_link_target(link))
if expiry_policy == cproton.PN_EXPIRE_NEVER or timeout > 0:
closing = False
LOG.data(NO_CLIENT_ID, 'closing:', closing)
# check if the remote end has acknowledged the close or detach
if closing:
if not (cproton.pn_link_state(link) &
cproton.PN_REMOTE_CLOSED):
unsubscribed = False
else:
unsubscribed = True
else:
if not cproton.pn_link_remote_detached(link):
unsubscribed = False
else:
unsubscribed = True
cproton.pn_messenger_reclaim_link(self.messenger, link)
cproton.pn_link_free(link)
LOG.exit('_MQLightMessenger.unsubscribed', NO_CLIENT_ID, unsubscribed)
return unsubscribed
def pending_outbound(self, address):
"""
Indicates if there are pending messages
"""
LOG.entry('_MQLightMessenger.pending_outbound', NO_CLIENT_ID)
LOG.parms(NO_CLIENT_ID, 'address:', address)
# throw exception if not connected
if self.messenger is None:
raise NetworkError('Not connected')
result = False
with self._lock:
pending = cproton.pn_messenger_pending_outbound(
self.messenger,
address)
if pending < 0:
raise NetworkError('Not connected')
elif pending > 0:
result = True
LOG.exit('_MQLightMessenger.pending_outbound', NO_CLIENT_ID, result)
return result
def pop(self, sock, force):
LOG.entry('_MQLightMessenger.pop', NO_CLIENT_ID)
LOG.parms(NO_CLIENT_ID, 'sock:', sock)
LOG.parms(NO_CLIENT_ID, 'force:', force)
with self._lock:
written = self._write(sock, force)
LOG.exit('_MQLightMessenger.pop', NO_CLIENT_ID, written)
return written
def push(self, chunk):
LOG.entry('_MQLightMessenger.push', NO_CLIENT_ID)
with self._lock:
if self.messenger and self.connection:
with self._lock:
pushed = cproton.pn_connection_push(
self.connection,
chunk,
len(chunk))
else:
# This connection has already been closed, so this data can
# never be pushed in, so just return saying it has so the data
# will be discarded.
LOG.data(
NO_CLIENT_ID,
'connection already closed: discarding data')
pushed = len(chunk)
LOG.exit('_MQLightMessenger.push', NO_CLIENT_ID, pushed)
return pushed
def _update_sasl_outcome(self, transport):
"""
Retrieves and stores the sasl outcome from the transport
"""
LOG.entry('_update_sasl_outcome', NO_CLIENT_ID)
if transport:
with self._lock:
self.sasl_outcome = cproton.pn_sasl_outcome(
cproton.pn_sasl(transport))
LOG.data(NO_CLIENT_ID, 'outcome:', self.sasl_outcome)
else:
LOG.data(NO_CLIENT_ID, 'connection is closed')
LOG.exit('_update_sasl_outcome', NO_CLIENT_ID, None)
def _write(self, sock, force):
LOG.entry_often('_MQLightMessenger._write', NO_CLIENT_ID)
LOG.parms_often(NO_CLIENT_ID, 'force:', force)
with self._lock:
# Checking for pending data requires a messenger connection
n = 0
if self.messenger and self.connection:
transport = cproton.pn_connection_transport(self.connection)
if transport:
pack_size = cproton.pn_transport_pending(transport)
# Force a pop, causing a heartbeat to be generated, if
# necessary
if force:
closed = cproton.pn_connection_pop(self.connection, 0)
if closed:
LOG.data(NO_CLIENT_ID, 'connection is closed')
self._update_sasl_outcome(transport)
self.connection = None
while self.connection and pack_size > 0:
# write n bytes to stream
buf = cproton.pn_transport_head(transport, pack_size)
sock.send(buf)
n += pack_size
closed = cproton.pn_connection_pop(self.connection,
pack_size)
if closed:
LOG.data(NO_CLIENT_ID, 'connection is closed')
self._update_sasl_outcome(transport)
self.connection = None
pack_size = 0
else:
# Is there any more pending?
pack_size = cproton.pn_transport_pending(transport)
else:
n = -1
else:
n = -1
LOG.exit_often('_MQLightMessenger._write', NO_CLIENT_ID, n)
return n
def heartbeat(self, sock):
LOG.entry_often('_MQLightMessenger.heartbeat', NO_CLIENT_ID)
LOG.parms_often(NO_CLIENT_ID, 'sock:', sock)
with self._lock:
self._write(sock, True)
LOG.exit_often('_MQLightMessenger.heartbeat', NO_CLIENT_ID, None)
def closed(self):
LOG.entry('_MQLightMessenger.closed', NO_CLIENT_ID)
if self.messenger and self.connection:
with self._lock:
cproton.pn_connection_was_closed(
self.messenger, self.connection)
error = cproton.pn_messenger_errno(self.messenger)
if error:
text = cproton.pn_error_text(
cproton.pn_messenger_error(self.messenger))
_MQLightMessenger._raise_error(text)
LOG.exit('_MQLightMessenger.closed', NO_CLIENT_ID, None)
class _MQLightSocket(object):
def __init__(self, service, security_options, on_read, on_close):
LOG.entry('_MQLightSocket.__init__', NO_CLIENT_ID)
LOG.parms(NO_CLIENT_ID, 'service:', service)
LOG.parms(NO_CLIENT_ID, 'security_options:', security_options)
LOG.parms(NO_CLIENT_ID, 'on_read:', on_read)
LOG.parms(NO_CLIENT_ID, 'on_close:', on_close)
def create_thread(target, name, daemon=False):
t = threading.Thread(target=target)
t.setName(t.getName() + name)
t.setDaemon(daemon)
return t
self.running = False
self.on_read = on_read
self.on_close = on_close
self._data_queue = Queue()
self._data_handler_thread = create_thread(self.queue_data,
':QueueReader')
try:
self.sock = socket.socket(
socket.AF_INET,
socket.SOCK_STREAM)
if security_options.ssl or service.ssl:
LOG.data(NO_CLIENT_ID, 'wrapping the socket in a SSL context')
ctx = security_options.context
self.sock = ctx.wrap_socket(
self.sock, server_hostname=service.host_port[0])
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.connect(service.host_port)
self.running = True
self.io_loop = create_thread(target=self.loop, name=':IOReader')
self.io_loop.start()
except socket.error as exc:
LOG.error('_MQLightSocket.__init__(socket)', NO_CLIENT_ID, exc)
self.running = False
if 'SSL' in str(exc) or '_ssl' in str(exc):
raise SecurityError(exc)
else:
raise NetworkError(exc)
except (ssl.SSLError, ssl.CertificateError) as exc:
LOG.error('_MQLightSocket.__init__(ssl)', NO_CLIENT_ID, exc)
self.running = False
raise SecurityError(exc)
LOG.exit('_MQLightSocket.__init__', NO_CLIENT_ID, None)
def queue_data(self):
LOG.entry('_MQLightSocket.queue_data', NO_CLIENT_ID)
while self.running:
args = self._data_queue.get()
callback = args[0]
if len(args) > 1:
callback(*args[1:])
# if we do not get arguments that means on_close was passed in
else:
callback()
self.running = False
LOG.exit('_MQLightSocket.queue_data', NO_CLIENT_ID, None)
def loop(self):
LOG.entry('_MQLightSocket.loop', NO_CLIENT_ID)
self._data_handler_thread.start()
exc = None
try:
while self.running and not exc:
read, write, exc = select.select([self.sock], [], [self.sock])
if read:
data = self.sock.recv(65536) # 64K
if data:
self._data_queue.put((self.on_read, data))
else:
LOG.data(NO_CLIENT_ID,
'Socket closed : Loop marked as closed')
self.running = False
self._data_queue.put((self.on_close,))
except socket.error as se:
LOG.data('Received IO error ', socket.error)
self._data_queue.put((self.on_close,))
except Exception as exc:
raise exc
LOG.exit('_MQLightSocket.loop', NO_CLIENT_ID, None)
def send(self, msg):
LOG.entry('_MQLightSocket.send', NO_CLIENT_ID)
if self.sock and self.running:
sent = self.sock.send(msg)
else:
sent = 0
LOG.exit('_MQLightSocket.send', NO_CLIENT_ID, sent)
def close(self):
LOG.entry('_MQLightSocket.close', NO_CLIENT_ID)
self.running = False
try:
self.sock.shutdown(socket.SHUT_WR)
except:
# Ignore [Errno 107] Transport endpoint is not connected
pass
self.io_loop.join()
self.sock.close()
LOG.exit('_MQLightSocket.close', NO_CLIENT_ID, None)
|
import pymc3_ext as pm
import numpy as np
from numpy import random as nr
import numpy.testing as npt
import pytest
import theano.tensor as tt
import theano
from pymc3_ext.distributions.distribution import _draw_value, draw_values
from .helpers import SeededTest
def test_draw_value():
npt.assert_equal(_draw_value(np.array([5, 6])), [5, 6])
npt.assert_equal(_draw_value(np.array(5.)), 5)
npt.assert_equal(_draw_value(tt.constant([5., 6.])), [5, 6])
assert _draw_value(tt.constant(5)) == 5
npt.assert_equal(_draw_value(2 * tt.constant([5., 6.])), [10, 12])
val = theano.shared(np.array([5., 6.]))
npt.assert_equal(_draw_value(val), [5, 6])
npt.assert_equal(_draw_value(2 * val), [10, 12])
a = tt.scalar('a')
a.tag.test_value = 6
npt.assert_equal(_draw_value(2 * a, givens=[(a, 1)]), 2)
assert _draw_value(5) == 5
assert _draw_value(5.) == 5
assert isinstance(_draw_value(5.), type(5.))
assert isinstance(_draw_value(5), type(5))
with pm.Model():
mu = 2 * tt.constant(np.array([5., 6.])) + theano.shared(np.array(5))
a = pm.Normal('a', mu=mu, sigma=5, shape=2)
val1 = _draw_value(a)
val2 = _draw_value(a)
assert np.all(val1 != val2)
with pytest.raises(ValueError) as err:
_draw_value([])
err.match('Unexpected type')
class TestDrawValues:
def test_empty(self):
assert draw_values([]) == []
def test_vals(self):
npt.assert_equal(draw_values([np.array([5, 6])])[0], [5, 6])
npt.assert_equal(draw_values([np.array(5.)])[0], 5)
npt.assert_equal(draw_values([tt.constant([5., 6.])])[0], [5, 6])
assert draw_values([tt.constant(5)])[0] == 5
npt.assert_equal(draw_values([2 * tt.constant([5., 6.])])[0], [10, 12])
val = theano.shared(np.array([5., 6.]))
npt.assert_equal(draw_values([val])[0], [5, 6])
npt.assert_equal(draw_values([2 * val])[0], [10, 12])
def test_simple_model(self):
with pm.Model():
mu = 2 * tt.constant(np.array([5., 6.])) + theano.shared(np.array(5))
a = pm.Normal('a', mu=mu, sigma=5, shape=2)
val1 = draw_values([a])
val2 = draw_values([a])
assert np.all(val1[0] != val2[0])
point = {'a': np.array([3., 4.])}
npt.assert_equal(draw_values([a], point=point), [point['a']])
def test_dep_vars(self):
with pm.Model():
mu = 2 * tt.constant(np.array([5., 6.])) + theano.shared(np.array(5))
sd = pm.HalfNormal('sd', shape=2)
tau = 1 / sd ** 2
a = pm.Normal('a', mu=mu, tau=tau, shape=2)
point = {'a': np.array([1., 2.])}
npt.assert_equal(draw_values([a], point=point), [point['a']])
val1 = draw_values([a])[0]
val2 = draw_values([a], point={'sd': np.array([2., 3.])})[0]
val3 = draw_values([a], point={'sd_log__': np.array([2., 3.])})[0]
val4 = draw_values([a], point={'sd_log__': np.array([2., 3.])})[0]
assert all([np.all(val1 != val2), np.all(val1 != val3),
np.all(val1 != val4), np.all(val2 != val3),
np.all(val2 != val4), np.all(val3 != val4)])
def test_gof_constant(self):
# Issue 3595 pointed out that slice(None) can introduce
# theano.gof.graph.Constant into the compute graph, which wasn't
# handled correctly by draw_values
n_d = 500
n_x = 2
n_y = 1
n_g = 10
g = np.random.randint(0, n_g, (n_d,)) # group
x = np.random.randint(0, n_x, (n_d,)) # x factor
with pm.Model():
multi_dim_rv = pm.Normal('multi_dim_rv', mu=0, sd=1, shape=(n_x, n_g, n_y))
indexed_rv = multi_dim_rv[x, g, :]
i = draw_values([indexed_rv])
assert i is not None
class TestJointDistributionDrawValues(SeededTest):
def test_joint_distribution(self):
with pm.Model() as model:
a = pm.Normal('a', mu=0, sigma=100)
b = pm.Normal('b', mu=a, sigma=1e-8)
c = pm.Normal('c', mu=a, sigma=1e-8)
d = pm.Deterministic('d', b + c)
# Expected RVs
N = 1000
norm = np.random.randn(3, N)
eA = norm[0] * 100
eB = eA + norm[1] * 1e-8
eC = eA + norm[2] * 1e-8
eD = eB + eC
# Drawn RVs
nr.seed(self.random_seed)
# A, B, C, D = list(zip(*[draw_values([a, b, c, d]) for i in range(N)]))
A, B, C, D = draw_values([a, b, c, d], size=N)
A = np.array(A).flatten()
B = np.array(B).flatten()
C = np.array(C).flatten()
D = np.array(D).flatten()
# Assert that the drawn samples match the expected values
assert np.allclose(eA, A)
assert np.allclose(eB, B)
assert np.allclose(eC, C)
assert np.allclose(eD, D)
# Assert that A, B and C have the expected difference
assert np.all(np.abs(A - B) < 1e-6)
assert np.all(np.abs(A - C) < 1e-6)
assert np.all(np.abs(B - C) < 1e-6)
# Marginal draws
mA = np.array([draw_values([a]) for i in range(N)]).flatten()
mB = np.array([draw_values([b]) for i in range(N)]).flatten()
mC = np.array([draw_values([c]) for i in range(N)]).flatten()
# Also test the with model context of draw_values
with model:
mD = np.array([draw_values([d]) for i in range(N)]).flatten()
# Assert that the marginal distributions have different sample values
assert not np.all(np.abs(B - mB) < 1e-2)
assert not np.all(np.abs(C - mC) < 1e-2)
assert not np.all(np.abs(D - mD) < 1e-2)
# Assert that the marginal distributions do not have high cross
# correlation
assert np.abs(np.corrcoef(mA, mB)[0, 1]) < 0.1
assert np.abs(np.corrcoef(mA, mC)[0, 1]) < 0.1
assert np.abs(np.corrcoef(mB, mC)[0, 1]) < 0.1
|
"""
Drop-in replacement class for unittest.TestCase.
"""
from pedal.assertions.runtime import *
class PedalTestCase:
"""
Drop-in replacement for the existing unittest.TestCase class. Emulates
the original interface, but also supports all the Pedal features, allowing
an easier transition to our toolset.
TODO: Implement the entire interface
setUp()
tearDown()
setUpClass()
tearDownClass()
skipTest()
subTest()
debug()
run()
fail()
"""
def assertEqual(self, left, right, msg=None, **kwargs):
"""
Test that first and second are equal.
If the values do not compare equal, the test will fail.
"""
return assert_equal(left, right, explanation=msg, **kwargs)
def assertNotEqual(self, left, right, msg=None, **kwargs):
"""
Test that first and second are not equal.
If the values do compare equal, the test will fail.
"""
return assert_not_equal(left, right, explanation=msg, **kwargs)
def assertLess(self, left, right, msg=None, **kwargs):
"""
Test that first is less than the second.
"""
return assert_less(left, right, explanation=msg, **kwargs)
def assertLessEqual(self, left, right, msg=None, **kwargs):
"""
Test that first is less than or equal to the second.
"""
return assert_less_equal(left, right, explanation=msg, **kwargs)
def assertGreater(self, left, right, msg=None, **kwargs):
"""
Test that first is greater than the second.
"""
return assert_greater(left, right, explanation=msg, **kwargs)
def assertGreaterEqual(self, left, right, msg=None, **kwargs):
"""
Test that first is greater than or equal to the second.
"""
return assert_greater_equal(left, right, explanation=msg, **kwargs)
def assertIn(self, needle, haystack, msg=None, **kwargs):
"""
Tests that the needle is in the haystack.
"""
return assert_in(needle, haystack, explanation=msg, **kwargs)
def assertNotIn(self, needle, haystack, msg=None, **kwargs):
"""
Tests that the needle is not in the haystack.
"""
return assert_not_in(needle, haystack, explanation=msg, **kwargs)
def assertIs(self, left, right, msg=None, **kwargs):
"""
Test that first is second.
"""
return assert_is(left, right, explanation=msg, **kwargs)
def assertIsNot(self, left, right, msg=None, **kwargs):
"""
Test that first is not second.
"""
return assert_is_not(left, right, explanation=msg, **kwargs)
def assertIsNone(self, expr, msg=None, **kwargs):
"""
Test that expr is None
"""
return assert_is_none(expr, explanation=msg, **kwargs)
def assertIsNotNone(self, expr, msg=None, **kwargs):
"""
Test that expr is not None
"""
return assert_is_not_none(expr, explanation=msg, **kwargs)
def assertTrue(self, expr, msg=None, **kwargs):
"""
Test that expr is true
"""
return assert_true(expr, explanation=msg, **kwargs)
def assertFalse(self, expr, msg=None, **kwargs):
"""
Test that expr is false
"""
return assert_false(expr, explanation=msg, **kwargs)
def assertLengthEqual(self, sequence, length, msg=None, **kwargs):
"""
Test that sequence has length.
"""
return assert_length_equal(sequence, length, explanation=msg, **kwargs)
def assertLengthNotEqual(self, sequence, length, msg=None, **kwargs):
"""
Test that sequence does not have length.
"""
return assert_length_not_equal(sequence, length, explanation=msg, **kwargs)
def assertLengthLess(self, sequence, length, msg=None, **kwargs):
"""
Test that sequence has less than length.
"""
return assert_length_less(sequence, length, explanation=msg, **kwargs)
def assertLengthLessEqual(self, sequence, length, msg=None, **kwargs):
"""
Test that sequence has less or equal to length.
"""
return assert_length_less_equal(sequence, length, explanation=msg, **kwargs)
def assertLengthGreater(self, sequence, length, msg=None, **kwargs):
"""
Test that sequence has greater than length.
"""
return assert_length_greater(sequence, length, explanation=msg, **kwargs)
def assertLengthGreaterEqual(self, sequence, length, msg=None, **kwargs):
"""
Test that sequence has greater or equal length.
"""
return assert_length_greater_equal(sequence, length, explanation=msg, **kwargs)
def assertIsInstance(self, obj, cls, msg=None, **kwargs):
"""
Test that obj is an instance of cls.
"""
return assert_is_instance(obj, cls, explanation=msg, **kwargs)
def assertNotIsInstance(self, obj, cls, msg=None, **kwargs):
"""
Test that obj is not an instance of cls.
"""
return assert_not_is_instance(obj, cls, explanation=msg, **kwargs)
def assertAlmostEqual(self, first, second, places=7, msg=None, delta=None, **kwargs):
""" Test that first is approximately equal to second. """
if places is not None:
delta = 10**(-places)
return assert_equal(first, second, delta=delta, explanation=msg, **kwargs)
def assertNotAlmostEqual(self, first, second, places=7, msg=None, delta=None, **kwargs):
""" Test that first is not approximately equal to second. """
if places is not None:
delta = 10**(-places)
return assert_not_equal(first, second, delta=delta, explanation=msg, **kwargs)
def assertRegex(self, regex, text, msg=None, **kwargs):
"""
Test that regex matches text.
"""
return assert_regex(regex, text, explanation=msg, **kwargs)
def assertNotRegex(self, regex, text, msg=None, **kwargs):
"""
Test that regex does not match text.
"""
return assert_not_regex(regex, text, explanation=msg, **kwargs)
assertMultiLineEqual = assert_equal
assertSequenceEqual = assert_equal
assertListEqual = assert_equal
assertTupleEqual = assert_equal
assertSetEqual = assert_equal
assertDictEqual = assert_equal
|
class Solution:
def subsets(self, nums):
if len(nums) == 0:
return []
if len(nums) == 1:
return [[], nums]
subs = [[], nums]
for i, el in enumerate(nums):
newSubs = self.subsets(nums[:i] + nums[i+1:])
for s in newSubs:
if s not in subs:
subs = subs + [s]
return subs
|
#!/usr/bin/env python3
#
# Author:
# Tamas Jos (@skelsec)
#
import enum
# https://tools.ietf.org/html/rfc4513
# simple auth:
# - anonymous
# - user without password
# - username + password
#
# SASL:
# - plain
# - gssapi
# -SSPI
# - NTLM
# - KERBEROS
# Sicily:
# - NTLM
# Multiplexor
#
class LDAPAuthProtocol(enum.Enum):
SIMPLE = 'SIMPLE' #SIMPLE can be with no creds - anonymous bind
PLAIN = 'PLAIN' #actually SASL-PLAIN
SICILY = 'SICILY' #NTLM (old proprietary from MS)
NTLM_PASSWORD = 'NTLM_PASSWORD' #actually SASL-GSSAPI-SPNEGO-NTLM
NTLM_NT = 'NTLM_NT' #actually SASL-GSSAPI-SPNEGO-NTLM
KERBEROS_RC4 = 'KERBEROS_RC4' #actually SASL-GSSAPI-SPNEGO-KERBEROS
KERBEROS_NT = 'KERBEROS_NT' #actually SASL-GSSAPI-SPNEGO-KERBEROS
KERBEROS_AES = 'KERBEROS_AES' #actually SASL-GSSAPI-SPNEGO-KERBEROS
KERBEROS_PASSWORD = 'KERBEROS_PASSWORD' #actually SASL-GSSAPI-SPNEGO-KERBEROS
KERBEROS_CCACHE = 'KERBEROS_CCACHE' #actually SASL-GSSAPI-SPNEGO-KERBEROS
KERBEROS_KEYTAB = 'KERBEROS_KEYTAB' #actually SASL-GSSAPI-SPNEGO-KERBEROS
MULTIPLEXOR_KERBEROS = 'MULTIPLEXOR_KERBEROS'
MULTIPLEXOR_NTLM = 'MULTIPLEXOR_NTLM'
MULTIPLEXOR_SSL_KERBEROS = 'MULTIPLEXOR_SSL_KERBEROS'
MULTIPLEXOR_SSL_NTLM = 'MULTIPLEXOR_SSL_NTLM'
SSPI_NTLM = 'SSPI_NTLM' #actually SASL-GSSAPI-SPNEGO-NTLM but with integrated SSPI
SSPI_KERBEROS = 'SSPI_KERBEROS' #actually SASL-GSSAPI-SPNEGO-KERBEROS but with integrated SSPI
MSLDAP_GSS_METHODS = [
LDAPAuthProtocol.NTLM_PASSWORD ,
LDAPAuthProtocol.NTLM_NT ,
LDAPAuthProtocol.KERBEROS_RC4 ,
LDAPAuthProtocol.KERBEROS_NT ,
LDAPAuthProtocol.KERBEROS_AES ,
LDAPAuthProtocol.KERBEROS_PASSWORD ,
LDAPAuthProtocol.KERBEROS_CCACHE ,
LDAPAuthProtocol.KERBEROS_KEYTAB ,
LDAPAuthProtocol.SSPI_NTLM ,
LDAPAuthProtocol.SSPI_KERBEROS,
LDAPAuthProtocol.MULTIPLEXOR_KERBEROS,
LDAPAuthProtocol.MULTIPLEXOR_NTLM,
LDAPAuthProtocol.MULTIPLEXOR_SSL_KERBEROS,
LDAPAuthProtocol.MULTIPLEXOR_SSL_NTLM,
]
MSLDAP_KERBEROS_PROTOCOLS = [
LDAPAuthProtocol.KERBEROS_RC4 ,
LDAPAuthProtocol.KERBEROS_NT ,
LDAPAuthProtocol.KERBEROS_AES ,
LDAPAuthProtocol.KERBEROS_PASSWORD ,
LDAPAuthProtocol.KERBEROS_CCACHE ,
LDAPAuthProtocol.KERBEROS_KEYTAB ,
]
class MSLDAPCredential:
def __init__(self, domain=None, username= None, password = None, auth_method = None, settings = None):
self.auth_method = auth_method
self.domain = domain
self.username = username
self.password = password
self.signing_preferred = False
self.encryption_preferred = False
self.settings = settings
self.etypes = None
def get_msuser(self):
if not self.domain:
return self.username
return '%s\\%s' % (self.domain,self.username)
def __str__(self):
t = '==== MSLDAPCredential ====\r\n'
for k in self.__dict__:
t += '%s: %s\r\n' % (k, self.__dict__[k])
return t
|
import numpy as np
from scipy.optimize import curve_fit
from scipy.special import *
import parsers
def load_slices(fname):
gout = parsers.parse_genesis_out(fname)
slices = gout['slice_data']
zsep = gout['input_parameters']['zsep']
xlamds = gout['input_parameters']['xlamds']
Nslice = len(slices)
return slices, zsep, Nslice, xlamds
def power(slices):
power = np.asarray([s['data']['power'][:] for s in slices])
return power
def espread(slices):
espread = np.asarray([s['data']['e-spread'][:] for s in slices])
return espread
def bunching(slices):
bunching = np.asarray([s['data']['bunching'][:] for s in slices])
return bunching
def current_profile(slices):
current = np.asarray([s['current'] for s in slices])
return current
def power_spectrum(slices):
Z0 = 120 * np.pi
power = np.asarray([s['data']['p_mid'][-1] for s in slices])
phi_mid = np.asarray([s['data']['phi_mid'][-1] for s in slices])
field = np.sqrt(power) * np.exp(1j*phi_mid)
power_fft = np.abs(np.fft.fftshift(np.fft.fft(field)))**2
return power_fft
def freq_domain_eV(zsep,Nslice,xlamds):
#constants
hbar = 6.582e-16 #in eV
c = 2.997925e8
#omega of the radiation in eV
omega = hbar * 2.0 * np.pi / (xlamds/c);
df = hbar * 2.0 * np.pi/Nslice/zsep/(xlamds/c);
freq = np.linspace(omega - Nslice/2 * df, omega + Nslice/2 * df,Nslice)
return freq
def gaussian(x, *p):
A, mu, sigma, bg = p
return A*np.exp(-(x-mu)**2/(2.*sigma**2)) + bg
def FWHM(power_fft,omega):
power_fft = power_fft / np.max(power_fft)
peak_pos = np.argmax(power_fft)
p0 = [1.0, omega[peak_pos], 0.15, 1e-4]
window = 10
coeff, var_matrix = curve_fit(gaussian, omega[peak_pos-window:peak_pos+window], power_fft[peak_pos-window:peak_pos+window], p0=p0)
FWHM = 2.0 * np.sqrt(2.0 * np.log(2.0)) * coeff[2]
print ('Fitted mean = ', coeff[1])
print ('Fitted standard deviation = ', coeff[2])
print ('Fitted FWHM = ', FWHM)
return coeff, FWHM
def calculate_JJ(K):
J_arg = K**2 / (1.0 + K**2) / 2.0
JJ = j0(J_arg) - j1(J_arg)
return JJ
def calculate_gamma_res(g, K):
lambdau = g.input_params['xlamd']
lambdas = g.input_params['xlamds']
gamma_r2 = (lambdau/(2.0 * lambdas)) * (1.0 + K**2 )
gamma_res = np.sqrt(gamma_r2)
return gamma_res
def calculate_AD(K, Ngap):
NP = np.ceil(Ngap/(1 + K**2))
AD = np.sqrt( (1 + K**2 - Ngap/NP) / (Ngap/NP) )
return AD
#slices, zsep, Nslice, xlamds = load_slices('/home/alex/Desktop/pyGENT/genesis_run_150MW_tdp/mod_620.out')
#power_fft = power_spectrum(slices)
#omega = freq_domain_eV(zsep, Nslice, xlamds)
#plot omega vs power_fft
|
#!/usr/bin/python3.7
# -*- coding: utf-8 -*-
"""Advanced calculator."""
import math
import operator
CONSTANTS = {
"pi": math.pi,
"e": math.e,
"t": math.tau,
}
BINARY_OPERATORS = {
"+": operator.add,
"-": operator.sub,
"*": operator.mul,
"/": operator.truediv,
"^": operator.pow,
}
PREFIX_OPERATORS = {
"sqrt": math.sqrt,
"sin": math.sin,
"cos": math.cos,
"tg": math.tan,
"arcsin": math.asin,
"arccos": math.acos,
"ln": math.log,
}
POSTFIX_OPERATORS = {
"!": math.factorial,
}
PRIORITIES = {
"(": 0,
")": 0,
"+": 1,
"-": 1,
"*": 2,
"/": 2,
"^": 3,
}
def is_brackets_agreed(expression: list) -> bool:
amount_of_brackets = 0
for el in expression:
if el == "(":
amount_of_brackets += 1
elif el == ")":
amount_of_brackets -= 1
if amount_of_brackets == 0:
return True
return False
def is_known_characters(expression: list) -> bool:
for el in expression:
if el not in [i for i in CONSTANTS.keys()] +\
[i for i in BINARY_OPERATORS.keys()] +\
[i for i in PREFIX_OPERATORS.keys()] + [i for i in POSTFIX_OPERATORS.keys()] +\
["(", ")"] and not el.isdigit():
return False
return True
def calculate(reverse_polish_notation: list) -> float:
stack = []
for el in reverse_polish_notation:
if el.isdigit():
stack.append(float(el))
elif el in BINARY_OPERATORS:
first_operand, second_operand = stack.pop(-2), stack.pop(-1)
stack.append(BINARY_OPERATORS[el](first_operand, second_operand))
elif el in PREFIX_OPERATORS:
stack.append(PREFIX_OPERATORS[el](stack.pop(-1)))
elif el in POSTFIX_OPERATORS:
stack.append(POSTFIX_OPERATORS[el](stack[-1]))
elif el in CONSTANTS:
stack.append(CONSTANTS[el])
return stack.pop()
def check_for_correctness(func):
def inner(expression):
if is_brackets_agreed(expression) and is_known_characters(expression):
return calculate(func(expression))
else:
if not is_brackets_agreed(expression):
return "Не согласованы скобки!"
elif not is_known_characters(expression):
return "Вы ввели неизвестные символы!"
return inner
@check_for_correctness
def convert_to_rpn(expression: list) -> list:
stack = []
reverse_polish_notation = []
for el in expression:
if el.isdigit() or el in POSTFIX_OPERATORS or el in CONSTANTS:
reverse_polish_notation.append(el)
elif el in PREFIX_OPERATORS or el == "(":
stack.append(el)
elif el == ")":
while stack[-1] != "(":
reverse_polish_notation.append(stack.pop())
stack.pop()
elif el in BINARY_OPERATORS:
if len(stack) != 0:
while stack[-1] in PREFIX_OPERATORS or PRIORITIES[stack[-1]] >= PRIORITIES[el]:
reverse_polish_notation.append(stack.pop())
if not stack:
break
stack.append(el)
reverse_polish_notation.extend(reversed(stack))
return reverse_polish_notation
def parse(expression: str) -> list:
array = []
operand = ""
for index, el in enumerate(expression):
if el.isalpha() or el.isdigit():
operand += el
elif el != " ":
if operand != "":
array.append(operand)
operand = ""
elif el == "*" and array[-1] == "*":
array[-1] = "^"
continue
elif el == "-" and index == 0 or array[-1] == "(":
array.append("0")
array.append(el)
if index == len(expression) - 1 and operand != "":
array.append(operand)
return array
if __name__ == "__main__":
repeat = True
while repeat:
user_input = input("\nEnter the expression ('q' - to quit): ")
if user_input != "q":
user_input = parse(user_input)
reverse_polish_notation = convert_to_rpn(user_input)
print(reverse_polish_notation)
else:
repeat = False
|
import os
import click
from diana.apis import DcmDir
from diana.dixel import DixelView
from diana.plus.halibut import get_mobilenet
epilog = """
\b
$ diana-plus classify resources/models/view_classifier/view_classifier.h5 tests/resources/dcm IM2263
Classifying images
------------------
Predicted: negative (0.88)
"""
@click.command(short_help="Classify DICOM files", epilog=epilog)
@click.argument('model', type=click.File())
@click.argument('path', type=click.Path(exists=True))
@click.argument('images', nargs=-1)
@click.option("--positive", "-p", help="Positive class", default="positive")
@click.option("--negative", "-n", help="Negative class", default="negative")
def classify(model, path, images, positive, negative):
"""Apply a classification MODEL to PATH with IMAGES"""
click.echo(click.style('Classifying images', underline=True, bold=True))
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
_model = get_mobilenet(0, weights=None)
_model.load_weights(model.name)
D = DcmDir(path=path)
for image in images:
d = D.get(image, view=DixelView.PIXELS)
# prediction = get_prediction( model, image )
prediction = d.get_prediction(_model)
if prediction >= 0.5:
click.echo("Predicted: {} ({})".format( positive, round(prediction, 2) ))
else:
click.echo("Predicted: {} ({})".format( negative, round(1.0-prediction, 2) ))
|
import functools
import operator
import day03.data as data
def part1():
value = data.INPUT
mod = len(value[0])
tree_idx = 0
tree_count = 0
row_count = len(value)
for row_idx in range(row_count):
tree_idx = row_idx * 3
if value[row_idx][tree_idx % mod] == "#":
tree_count += 1
return tree_count
def part2():
value = data.INPUT
row_count = len(value)
mod = len(value[0])
slopes = [
(1, 1),
(1, 3),
(1, 5),
(1, 7),
(2, 0.5),
]
trees = []
for slope in slopes:
step = slope[0]
right = slope[1]
tree_idx = 0
tree_count = 0
for row_idx in range(0, row_count, step):
tree_idx = int(row_idx * right)
if value[row_idx][tree_idx % mod] == "#":
tree_count += 1
trees.append(tree_count)
result = functools.reduce(operator.mul, trees)
return result
|
"""
MIT License
Copyright (c) 2020 Collin Brooks
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import pathlib
from yaml import load, dump
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
EXT = '.xsd'
XSD = 'xsd'
ATTRIBUTES = 'attributes'
ELEMENTS = 'elements'
GROUPS = 'groups'
TYPES = 'types'
ENUMS = 'enums'
PLACEHOLDER = 'placeholder'
ANY = 'any'
BOOLS = 'bools'
SIMPLE = 'simple'
COMPLEX = 'complex'
TYPE = 'type'
ENUMS = 'enums'
class Config():
"""Class which aids in the reading and writing of class generation for
xsd-based classes.
"""
def __init__(self, path):
self._path = path
self._config = None
self._xsd = path
def set_xsd(self, xsd):
"""Set the xsd name the config is to work with.
Args:
xsd (str): The name of the source xsd file without the extension.
"""
self._xsd = xsd
self._provide(self.get_config(), xsd, {})
def get_xsd(self):
return self._xsd
def clear_xsd(self):
self.get_config()[self._xsd] = {}
def load(self):
"""Load the configuration found at the given path.
Returns:
self
"""
if self._config is None:
# Load the current configuration
if pathlib.Path(self._path).exists():
config_file = open(self._path, 'r')
self._config = load(config_file, Loader=Loader)
config_file.close()
self._config = {} if self._config is None else self._config
return self
def save(self):
"""Save the configuration data to the configuration file.
Returns:
self
"""
header = "#This file was autogenerated. See config_generator.py\n"
config_file = open(self._path, 'w+')
config_file.write(header)
dump(self._config, config_file)
config_file.close()
return self
@staticmethod
def _provide(data, key, default):
"""Provide the given data dictionary with a default value for the
given key if no data exists at the key.
Args:
data (dict): A dictionary to provision
key (str): The key where data should be provisioned.
default (mixed): Whatever value the given key should have within
the data.
Returns:
dict: The provisioned dictionary.
"""
if data.get(key) is None:
data[key] = default
return data
def set_path(self, value, *path_parts):
"""Set the given value at the path rooted at the current xsd config
level.
This method makes it easy to travel into the configuration dictionary
and provision each of the levels with preliminary data so the final
level can have the given value data.
This method returns the value given so, if the value is a dict or
list, additional data can be placed within.
Args:
value (mixed): The value to place at the given path in the
configuration dictionary.
Returns:
mixed: the given value
"""
path_parts = list(path_parts)
config = self.get_xsd_config()
cur_config = config
last_part = path_parts.pop()
for part in path_parts:
self._provide(cur_config, part, {})
cur_config = cur_config[part]
cur_config[last_part] = value
return cur_config[last_part]
def get_config(self, require_xsd=True):
"""Get the configuration for the current xsd.
Raises:
Exception: Raised if the current xsd has not been set.
Returns:
dict: The config dictionary rooted at the data within the XSD
key.
"""
self.load()
self._provide(self._config, XSD, {})
if require_xsd:
if self._xsd is None:
raise Exception('Unable to get configuration without setting xsd!')
return self._config.get(XSD)
def get_xsd_config(self):
"""Get the configuration for the current xsd.
Returns:
dict: the data for the current xsd
"""
key = self._xsd
config = self.get_config()
self._provide(config, key, {})
return config[key]
def get_xsd_files(self):
"""Get a list of xsd file names our config file knows about.
Returns:
list: The list of xsd files our config knows about.
"""
return self.get_config(False).keys()
def get_types(self):
"""Get the TYPES for the current xsd.
Returns:
dict: Types keyed by type name with their data as values.
"""
return self.get_xsd_config().get(TYPES, {})
def get_groups(self):
"""Get the GROUPS for the current xsd.
Returns:
dict: Groups keyed by group name with their data as values.
"""
return self.get_xsd_config().get(GROUPS, {})
def get_elements(self):
"""Get the ELEMENTS for the current xsd.
Returns:
dict: Elements keyed by element name with their types as values.
"""
return self.get_xsd_config().get(ELEMENTS, {})
def get_element_type(self, element_name):
"""Get the type name for the element with the given name.
Args:
element_name (str): The element's name
Returns:
str|None: The type name if the element is known, None otherwise.
"""
return self.get_elements().get(element_name)
def get_type_config(self, type_name):
"""Get the type configuration for the type with the given name.
Args:
type_name (str): The type's name
Returns:
dict|None: The type's data if the type is known, None otherwise.
"""
return self.get_types().get(type_name)
def get_group_config(self, group_name):
"""Get the group configuration for the group with the given name.
Args:
group_name (str): The group's name
Returns:
dict: The group's data.
"""
return self.get_groups().get(group_name, {})
def get_group_groups(self, group_name):
"""Get the list of groups associated with the named group.
Args:
group_name (str): The name of the group to obtain child group
names for.
Returns:
list: A list of child group names
"""
return self.get_group_config(group_name).get(GROUPS, [])
def get_type_groups(self, type_name):
"""Get the list of groups associated with the named type.
Args:
type_name (str): The name of the type to obtain child group
names for.
Returns:
list: A list of child group names
"""
return self.get_type_config(type_name).get(GROUPS, [])
def get_type_attributes(self, type_name):
"""Return attribute data for the type with the given name.
Args:
type_name (str): The type to get attribute data for.
Returns:
dict: Attribute data for the given type name.
"""
return self.get_type_config(type_name).get(ATTRIBUTES, {})
def get_type_elements(self, type_name):
"""Return element data for the type with the given name.
Args:
type_name (str): The type to get element data for.
Returns:
dict: Element data for the given type name.
"""
return self.get_type_config(type_name).get(ELEMENTS, {})
def get_group_elements(self, group_name):
"""Return element data for the group with the given name.
Args:
group_name (str): The group to get element data for.
Returns:
dict: Element data for the given group name.
"""
return self.get_group_config(group_name).get(ELEMENTS, {})
def get_element_config(self, element_name):
"""Return element configuration for the element with the given name.
Args:
element_name (str): The element to get config data for.
Returns:
dict: Element configuration for the given element name.
"""
return self.get_elements().get(element_name)
def type_has_enum_attributes(self, type_name):
"""Determine whether or not the type with the given name has
attributes with enum data.
Args:
type_name (str): The type to check.
Returns:
bool: True if the type has attributes with enum data. False
otherwise.
"""
return ENUMS in self.get_type_attributes(type_name).keys()
def get_type_enum_attributes(self, type_name):
"""Get the list of attributes of this type which have enum data.
Args:
type_name (str): The name of the type to obtain enum attributes
for.
Returns:
dict: A dictionary of attribute name keys with enum list values.
"""
return self.get_type_attributes(type_name).get(ENUMS, {})
def add_group_config(self, name, group):
"""Add group configuration to the xsd config
Args:
name (str): The name of the group
group (Group): The group to add configuration details for
"""
group_info = {}
group_info[ELEMENTS] = self.get_element_info(group.get_elements())
groups = group.get_groups()
if len(groups) > 0:
group_info[GROUPS] = list(groups.keys())
self.set_path(group_info, GROUPS, name)
def add_type_config(self, name, node_type):
"""Add type configuration to the xsd config
Args:
name (str): The name of the type
node_type (Type): The type to add configuration details for
"""
type_config = self.set_path({}, TYPES, name)
attr_info = self.get_attr_info(node_type)
elem_info = self.get_element_info(node_type.get_elements())
group_info = self.get_group_info(node_type)
if len(attr_info) > 0:
type_config[ATTRIBUTES] = attr_info
if len(elem_info) > 0:
type_config[ELEMENTS] = elem_info
if len(group_info) > 0:
type_config[GROUPS] = group_info
@staticmethod
def get_group_info(node_type):
"""Compile doxyparser-specific information about the groups in
the given node type.
Args:
node_type (Type): The Type to return information for
Raises:
Exception: If multiple groups exist in the Type, we don't support
it yet.
Returns:
list: A list of group names in this Type
"""
info = []
groups = node_type.get_groups()
if len(groups) > 1:
raise Exception(
'Multiple groups in a single node not supported yet!')
for group in groups.values():
info.append(group.get_name())
return info
def get_attr_info(self, node_type):
"""Compile doxyparser-specific information about the attributes in
the given node type.
Args:
node_type (Type): The type to obtain attribute information about
Returns:
dict: Dict of doxyparser-specific data to aid in the generation
of python classes for this type.
"""
info = {}
# Go through each attribute and see if they are special
for attr in node_type.get_attributes().values():
# Can't do anything with an "any" attribute
if attr.is_any_attribute():
continue
if attr.is_any_type():
self._provide(info, ANY, [])
info[ANY].append(attr.get_name())
continue
# Doxygen has a DoxBool type that is either 'yes' or 'no'. We can
# make it so we have methods that will return a boolean value based
# on these two options.
if attr.is_dox_bool():
self._provide(info, BOOLS, [])
info[BOOLS].append(attr.get_name())
continue
# For attributes that have to be within a list of enumerated
# values, we can make methods that search for these known values
if attr.is_enum():
self._provide(info, ENUMS, {})
info[ENUMS][attr.get_name()] = attr.get_enum_values()
continue
# catchall
self._provide(info, SIMPLE, {})
info[SIMPLE][attr.get_name()] = attr.get_type().get_local_name()
return info
def get_element_info(self, elements):
"""Compile doxyparser-specific information about the elements.
Returns:
dict: Dict of doxyparser-specific data to aid in the generation
of python classes for this type.
"""
info = {}
for elem in elements.values():
if elem.is_placeholder():
self._provide(info, PLACEHOLDER, [])
info[PLACEHOLDER].append(elem.get_name())
elif elem.is_any_type():
self._provide(info, ANY, [])
info[ANY].append(elem.get_name())
elif elem.is_simple():
self._provide(info, SIMPLE, {})
info[SIMPLE][elem.get_name()] = elem.get_type().get_local_name()
elif elem.is_complex():
self._provide(info, COMPLEX, {})
info[COMPLEX][elem.get_name()] = elem.get_type_name()
else:
info[elem.get_name()] = {
TYPE: elem.get_type_name()
}
return info
def add_element_config(self, name, type_name):
"""Add element configuration.
Args:
name (str): The name of the element
type_name (str): The type of the element
"""
self.set_path(type_name, ELEMENTS, name)
|
import time
import asyncio
async def fn(delay,msg):
await asyncio.sleep(delay)
print(msg,'at',str(int(time.time())))
async def main():
print('Create tasks at',int(time.time()))
sec = (0x00)
t1 = asyncio.create_task(fn(sec+3,'Task 1'))
t2 = asyncio.create_task(fn(sec+2,'Task 2'))
t3 = asyncio.create_task(fn(sec+1,'Task 3'))
print('Started at',int(time.time()))
print('--------')
await t1
await t2
await t3
print('--------')
print('Finished at',int(time.time()))
if(__name__==('__main__')): asyncio.run(main())
|
"""
The strategy is inspired and derived from Turtle Trading System
Rules:
Entries:
Turtles entered positions when the price exceeded by a single tick the
high or low of the preceding 20 days. If the price exceeded the 20-day high, then the
Turtles would buy one Unit to initiate a long position in the corresponding
commodity. If the price dropped one tick below the low of the last 20-days, the Turtles
would sell one Unit to initiate a short position.
Adding units:
Turtles entered single Unit long positions at the breakouts and added to those
positions at ½ N intervals following their initial entry. This ½ N interval was based on
the actual fill price of the previous order
SL
The Turtles placed their stops based on position risk. No trade could incur more than
2% risk. Turtle stops were set at 2N (ATR) below the entry for long positions,
and 2N (ATR) above the entry for short positions.
For example:
Crude Oil
ATR = 1.20
55 day breakout = 28.30
First unit:
|Units |Entry Price|Stop |
|First Unit | 28.30| 25.90|
second unit:
|Units |Entry Price|Stop |
|First Unit | 28.30| 26.50|
|First Unit | 28.90| 26.50|
third unit:
|Units |Entry Price|Stop |
|First Unit | 28.30| 27.10|
|First Unit | 28.90| 27.10|
|First Unit | 29.50| 27.10|
fourth unit:
|Units |Entry Price|Stop |
|First Unit | 28.30| 27.70|
|Second Unit | 28.90| 27.70|
|Third Unit | 29.50| 27.70|
|Fourth Unit | 30.10| 27.70|
Case where fourth unit was added at a higher price because the market opened gapping up to 30.80:
|Units |Entry Price|Stop |
|First Unit | 28.30| 27.70|
|Second Unit | 28.90| 27.70|
|Third Unit | 29.50| 27.70|
|Fourth Unit | 30.80| 28.40|
Exit
The System 1 exit was a 10 day low for long positions and a 10 day high for short
positions. All the Units in the position would be exited if the price went against the
position for a 10 day breakout.
Position Size:
%2 risk
"""
import logging
import pandas as pd
from src.backtester import BackTester
from src.orders.order import Order, OrderSide, OrderStatus
from src.position_calculator import pos_size
logger = logging.getLogger(__name__)
if __name__ == '__main__':
price_df = pd.read_csv('c:/temp/gbp_usd_h1_enrich.csv')
'''
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 time 65770 non-null object
1 open 65770 non-null float64
2 high 65770 non-null float64
3 low 65770 non-null float64
4 close 65770 non-null float64
5 last_10_high 65770 non-null float64
6 last_20_high 65770 non-null float64
7 last_10_low 65770 non-null float64
8 last_20_low 65770 non-null float64
9 day_close 65770 non-null float64
10 day_atr 65770 non-null float64
11 day_rsi 65770 non-null float64
12 day_ema_55 65770 non-null float64
'''
price_df = price_df[(price_df['time'] > '2010-01-01') & (price_df['time'] < '2020-12-01')]
initial_capital = 10000
risk_pct = 0.02
max_orders = 1
orders = []
for price in price_df.to_dict('records'):
# Checking open orders
open_orders = [o for o in orders if o.is_open]
for o in open_orders:
if o.is_long:
if price['low'] <= o.sl:
o.close_with_loss(price['time'])
elif price['low'] <= price['last_10_low']:
if o.entry <= price['last_10_low']:
o.close_with_win(price['time'], price['last_10_low'])
elif o.entry > price['last_10_low']:
o.close_with_loss(price['time'], price['last_10_low'])
else:
if price['high'] >= o.sl:
o.close_with_loss(price['time'])
elif price['high'] >= price['last_10_high']:
if o.entry >= price['last_10_high']:
o.close_with_win(price['time'], price['last_10_high'])
elif o.entry < price['last_10_high']:
o.close_with_loss(price['time'], price['last_10_high'])
open_longs = [o for o in orders if o.is_open and o.is_long]
open_shorts = [o for o in orders if o.is_open and o.is_short]
if len(open_longs) < max_orders:
if len(open_longs) == 0:
if price['high'] >= price['last_20_high'] and price['close'] > price['day_ema_55'] and (price['day_rsi'] >= 70 or price['day_rsi'] <= 30):
sl = price['close'] - price['day_atr'] * 2
lots = pos_size(account_balance=initial_capital, risk_pct=risk_pct, sl_pips=price['day_atr'] * 2 * 10000, instrument='GBP_USD')
orders.append(Order(price['time'], OrderSide.LONG, 'GBP_USD', entry=price['close'], sl=sl, status=OrderStatus.FILLED, units=100000 * lots))
else:
previous_entry = open_longs[-1].entry
atr = price['day_atr']
if price['high'] >= previous_entry + atr / 2:
initial_units = open_longs[0].units
new_entry = previous_entry + atr / 2
new_sl = new_entry - atr * 2
logger.info('Adding buy units ...')
orders.append(Order(price['time'], OrderSide.LONG, 'GBP_USD', entry=new_entry, status=OrderStatus.FILLED, units=initial_units))
for o in orders:
if o.is_open and o.is_long:
o.sl = new_sl
if len(open_shorts) < max_orders:
if len(open_shorts) == 0:
if price['low'] <= price['last_20_low'] and price['close'] < price['day_ema_55'] and (price['day_rsi'] >= 70 or price['day_rsi'] <= 30):
sl = price['close'] + price['day_atr'] * 2
lots = pos_size(account_balance=initial_capital, risk_pct=risk_pct, sl_pips=price['day_atr'] * 2 * 10000, instrument='GBP_USD')
orders.append(Order(price['time'], OrderSide.SHORT, 'GBP_USD', entry=price['close'], sl=sl, status=OrderStatus.FILLED, units=100000 * lots))
else:
previous_entry = open_shorts[-1].entry
atr = price['day_atr']
if price['low'] <= previous_entry - atr / 2:
initial_units = open_shorts[0].units
new_entry = previous_entry - atr / 2
new_sl = new_entry + atr * 2
logger.info('Adding sell units ...')
orders.append(Order(price['time'], OrderSide.SHORT, 'GBP_USD', entry=new_entry, status=OrderStatus.FILLED, units=initial_units))
for o in orders:
if o.is_open and o.is_short:
o.sl = new_sl
back_tester = BackTester(strategy='turtle trading')
back_tester.lot_size = 10000
back_tester.print_stats(orders)
|
import os
import time
import numpy as np
from functions.model import Classifier
from functions.utils import Utilities
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
import torchvision.datasets
import torchvision.models as models
class UnNormalize(object):
"""
Used to convert a normalized torch tensor (image) into an unnormalized state. Used for plotting classification prediction images.
"""
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
"""
Parameters:
tensor (torch.Tensor): Tensor image of size (colour, height, width) to be normalized.
Returns:
torch.Tensor: Normalized image.
"""
# Normalized state: t.sub_(m).div_(s) - simply perform opposite
for t, m, s in zip(tensor, self.mean, self.std):
t.mul_(s).add_(m)
return tensor
class Tuner:
"""
Contains utility functions that are used within the hyperparameter tuning Notebook. Combines multiple components from the initial Notebook to condense the hyperparameter Notebook and focus on the tuning.
"""
def __init__(self):
self.utils = Utilities()
self.device = self.utils.set_device()
def set_data(self, filepath):
"""
Sets the dataset using pre-defined transformations.
Parameters:
filepath (string) - filepath to the dataset
"""
mean = (0.5, 0.5, 0.5)
std = (0.5, 0.5, 0.5)
# Set transformations for batch data
transform = transforms.Compose([
transforms.Resize(224), # Resize images to 224
transforms.CenterCrop(224), # Make images 224x224
transforms.RandomHorizontalFlip(), # Randomly flip some samples (50% chance)
transforms.RandomRotation(20), # Randomly rotate some samples
transforms.ToTensor(), # Convert image to a tensor
transforms.Normalize(mean=mean, std=std) # Normalize image values
])
# Set dataset, labels and unnormalized object
dataset = torchvision.datasets.ImageFolder(filepath, transform=transform)
self.labels = np.array(list(dataset.class_to_idx), dtype=object)
self.unnorm = UnNormalize(mean=mean, std=std)
return dataset
def set_initial_models(self, n_classes, h_layers):
"""
Used to set the three models (GoogLeNet, MobileNet v2, and ResNet-34) with new classifiers.
Parameters:
n_classes (int) - number of classes for to output
h_layers (list) - integers that represent each layers node count, can be 1 list or 3 lists
"""
# Set class specific variables
self.h_layers = h_layers
self.n_classes = n_classes
# Create instances of pretrained CNN architectures
googlenet = models.googlenet(pretrained=True)
mobilenetv2 = models.mobilenet_v2(pretrained=True)
resnet34 = models.resnet34(pretrained=True)
# Initialize new classifiers
if isinstance(h_layers[0], list):
gnet_classifier = Classifier(in_features=googlenet.fc.in_features,
out_features=n_classes,
hidden_layers=h_layers[0])
mobilenet_classifier = Classifier(in_features=mobilenetv2.classifier[1].in_features,
out_features=n_classes,
hidden_layers=h_layers[1])
resnet_classifier = Classifier(in_features=resnet34.fc.in_features,
out_features=n_classes,
hidden_layers=h_layers[2])
else:
gnet_classifier = Classifier(in_features=googlenet.fc.in_features,
out_features=n_classes,
hidden_layers=h_layers)
mobilenet_classifier = Classifier(in_features=mobilenetv2.classifier[1].in_features,
out_features=n_classes,
hidden_layers=h_layers)
resnet_classifier = Classifier(in_features=resnet34.fc.in_features,
out_features=n_classes,
hidden_layers=h_layers)
cnn_models = [googlenet, mobilenetv2, resnet34]
# Freeze architecture parameters to avoid backpropagating them
# Avoiding replacing pretrained weights
for model in cnn_models:
for param in model.parameters():
param.requires_grad = False
# Replace last FC/classifier with new classifier
googlenet.fc = gnet_classifier
mobilenetv2.classifier = mobilenet_classifier
resnet34.fc = resnet_classifier
return cnn_models
def calc_params(self, model_names, n_classes, h_layers):
"""
Used to calculate the amount of trainable parameters vs total parameters for each model.
Parameters:
model_names (list) - a list of the model names
n_classes (int) - number of output classes
h_layers (list) - hidden node integers, one per layer
"""
models = self.set_initial_models(n_classes, h_layers)
# Total params for each model
for idx, model in enumerate(models):
print(f"{model_names[idx]}:")
model.total_params = sum(p.numel() for p in model.parameters())
print(f'{model.total_params:,} total parameters')
model.trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
print(f'{model.trainable_params:,} training parameters\n')
def _set_filename(self, model_name, batch_size, h_layers):
"""
Helper function used to set the saved models filename. Returns the filename.
Parameters:
model_name (string) - name of the model
batch_size (int) - batch size of data loader
h_layers (list) - hidden node integers, one per layer
Format: [model_name]_[batch_size]_[hidden_sizes]
"""
filename = f"{model_name}_{str(batch_size)}_"
for layer in h_layers:
filename += f"{str(layer)}_"
return filename[:-1]
def tune_model(self, model_names, batch_size, train_loader, valid_loader,
n_classes, h_layers, lr, epochs=1000, iterations=2, patience=5):
"""
Used to tune a model on the given training loader, evaluated against the validation loader. Iterates over a list of hidden layers, saving multiple model versions.
Parameters:
model_names (list) - a list of the model names as strings
batch_size (int) - batch size of the train and validation loader
train_loader (torch.DataLoader) - torch training dataset loader
valid_loader (torch.DataLoader) - torch validation dataset loader
n_classes (int) - number of output classes
h_layers (list) - lists of a variety of hidden node sizes
lr (float) - learning rate for training the model
epochs (int) - number of epochs for training (default: 1000)
iterations (int) - iterations per number of epochs (default: 2)
patience (int) - number of epochs to wait before early stopping (default: 5)
"""
# Iterate over hidden layers
for l in range(len(h_layers)):
# Create instances of pretrained CNN architectures
models = self.set_initial_models(n_classes, h_layers[l])
# Iterate over models
for m in range(len(models)):
filename = self._set_filename(model_names[m], batch_size, h_layers[l])
filepath = f"saved_models/{filename}.pt"
# Skip model training if already has been trained
if os.path.isfile(filepath):
print(f"{filename} already trained.")
else:
print(f"\nTraining: {filename}")
criterion = nn.NLLLoss() # Negative Log Likelihood Loss
# Set optimizer
if m == 1: # MobileNetV2 specific
optimizer = torch.optim.Adam(models[m].classifier.parameters(),
lr=lr)
else:
optimizer = torch.optim.Adam(models[m].fc.parameters(),
lr=lr)
models[m].to(self.device) # move to GPU
# Train model
self.utils.train(models[m], train_loader, valid_loader, criterion,
optimizer, filepath, epochs, iterations, patience)
def set_model(self, model_paths, model, model_name):
"""
Used to check what type of model needs to be set for testing. Returns the model and its name.
Name format: [model]_[batch_size]_[hidden_size]_[hidden_size]
Parameters:
model_paths (list) - list of filepaths of saved models
model (torchvision.models) - initial pretrained model
model_name (string) - name of the model
"""
# Set initial variables
load_name = ""
compare_parts = [model_name, self.utils.batch_size]
compare_parts.extend(self.h_layers)
# Iterate over each model
for filepath in model_paths:
compare_name = filepath.split('/')[-1].rstrip('.pt').split('_')
valid = []
# Check components match
for item in range(len(compare_name)):
if compare_name[item] == str(compare_parts[item]):
valid.append(True)
# Load saved model
if len(valid) == len(compare_name):
load_name = filepath.split('/')[-1].rstrip('.pt')
self.utils.load_model(model, f'saved_models/{filepath}')
break
return model, load_name
def save_best_models(self, model_stats, model_names, n_preds):
"""
Used to save the three best performing models based on the statistics of all model variations. Returns a list of the best models.
Parameters:
model_stats (pandas.DataFrame) - table of best model statistics
model_names (list) - model names as strings
n_preds (int) - number of additional predictions to store (e.g. top-5)
"""
best_models = []
n_models = len(model_names)
count = 1
start_time = time.time()
# Iterate over each model
for idx, item in enumerate(model_stats['Name']):
name, batch = item.split('_')[:2]
h_layers = list(map(int, item.split("_")[2:]))
filepath = f'saved_models/{item}.pt'
cnn_models = self.set_initial_models(self.n_classes, h_layers)
# Check names match
for cnn_name in model_names:
if name == cnn_name:
# Load model and store it
model = cnn_models[idx]
self.utils.load_model(model, filepath)
best_models.append(model)
filename = cnn_name.replace('-', '').lower()
# Set statistics
stats = model_stats.iloc[idx, 1:].to_dict()
# Set additional model parameters
model.batch_size = int(batch)
model.h_layers = h_layers
model.stats = stats
# Save model predictions
print(f"Calculating preds and stats for {name}...", end=" ")
_, _, test_loader = self.utils.split_data(self.utils.dataset, int(batch),
self.utils.split_size,
self.utils.seed)
self._save_predictions(model, test_loader, n_preds)
print(f"Complete ({count}/{n_models}).")
count += 1
# Save as best model
print(f"Saving model...", end=" ")
self._save_model(model, filename)
print(f"Complete.")
self.utils.time_taken(time.time() - start_time)
return best_models
def _save_predictions(self, model, test_loader, n_preds):
"""
Helper function used to save the best models predictions, labels and probabilities for plotting.
Parameters:
model (torchvision.models) - models predictions to save
valid_loader (torch.DataLoader) - torch test dataset loader
n_preds (int) - number of additional predictions to store (e.g. top-5)
"""
# Calculate predictions, labels and probabilities for best models
y_pred, y_true, all_n_preds, y_probas = self.utils.predict(model, test_loader,
n_preds,
store_labels=True,
store_probas=True)
# Store data
model.y_pred = y_pred
model.y_true = y_true
model.n_preds = all_n_preds
model.y_probas = y_probas
def _save_model(self, model, filename):
"""
Helper function used to save the best models.
Parameters:
model (torchvision.models) - model to save
filename (string) - filename of model to save
"""
torch.save({'parameters': model.state_dict(),
'train_losses': model.train_losses,
'valid_losses': model.valid_losses,
'batch_size': model.batch_size,
'h_layers': model.h_layers,
'stats': model.stats,
'y_pred': model.y_pred,
'y_true': model.y_true,
'n_preds': model.n_preds,
'y_probas': model.y_probas,
}, f'saved_models/best_{filename}.pt')
def load_best_models(self, models, filenames):
"""
Used to load the three best models.
Parameters:
model (list) - torchvision.models to load
filenames (list) - filenames of saved models to load within saved_models folder
"""
# Set a checkpoint
for idx, model in enumerate(models):
checkpoint = torch.load(f"saved_models/{filenames[idx]}.pt")
# Store utility variables
model.train_losses = checkpoint['train_losses']
model.valid_losses = checkpoint['valid_losses']
model.batch_size = checkpoint['batch_size']
model.h_layers = checkpoint['h_layers']
model.stats = checkpoint['stats']
model.y_pred = checkpoint['y_pred']
model.y_true = checkpoint['y_true']
model.n_preds = checkpoint['n_preds']
model.y_probas = checkpoint['y_probas']
# load model parameters
model.load_state_dict(checkpoint['parameters'])
print("Models loaded. Utility variables available:")
print("\ttrain_losses, valid_losses, batch_size, h_layers, stats,\n")
print("\ty_pred, y_true, n_preds, y_probas.") |
# Generated by Django 3.0.5 on 2020-04-23 02:53
from decimal import Decimal
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='BankAccount',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('token', models.CharField(db_index=True, help_text='A bank account token provided by Pin', max_length=40, verbose_name='Pin API Bank account token')),
('bank_name', models.CharField(help_text='The name of the bank at which this account is held', max_length=100, verbose_name='Bank Name')),
('branch', models.CharField(blank=True, help_text='The name of the branch at which this account is held', max_length=100, verbose_name='Branch name')),
('name', models.CharField(help_text='The name of the bank account', max_length=100, verbose_name='Recipient Name')),
('bsb', models.IntegerField(help_text='The BSB (Bank State Branch) code of the bank account.', verbose_name='BSB')),
('number', models.CharField(help_text='The account number of the bank account', max_length=20, verbose_name='BSB')),
('environment', models.CharField(blank=True, db_index=True, help_text='The name of the Pin environment to use, eg test or live.', max_length=25)),
],
),
migrations.CreateModel(
name='CustomerToken',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('environment', models.CharField(blank=True, db_index=True, help_text='The name of the Pin environment to use, eg test or live.', max_length=25)),
('token', models.CharField(help_text='Generated by Card API or Customers API', max_length=100, verbose_name='Token')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('active', models.BooleanField(default=True, verbose_name='Active')),
('card_type', models.CharField(blank=True, choices=[('master', 'Mastercard'), ('visa', 'Visa')], help_text='Determined automatically by Pin', max_length=20, null=True, verbose_name='Card Type')),
('card_number', models.CharField(blank=True, help_text='Cleansed by Pin API', max_length=100, null=True, verbose_name='Card Number')),
('card_name', models.CharField(blank=True, max_length=100, null=True, verbose_name='Name on Card')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='PinRecipient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('token', models.CharField(db_index=True, help_text='A recipient token provided by Pin', max_length=40)),
('email', models.EmailField(help_text='As passed to Pin.', max_length=100)),
('name', models.CharField(blank=True, help_text='Optional. The name by which the recipient is referenced', max_length=100, null=True)),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Time created')),
('environment', models.CharField(blank=True, db_index=True, help_text='The name of the Pin environment to use, eg test or live.', max_length=25)),
('bank_account', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='pinpayments.BankAccount')),
],
),
migrations.CreateModel(
name='PinTransfer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('transfer_token', models.CharField(blank=True, db_index=True, help_text='Unique ID from Pin for this transfer', max_length=100, null=True, verbose_name='Pin API Transfer Token')),
('status', models.CharField(blank=True, help_text='Status of transfer at time of saving', max_length=100, null=True)),
('currency', models.CharField(help_text='currency of transfer', max_length=10)),
('description', models.CharField(blank=True, help_text='Description as shown on statement', max_length=100, null=True)),
('amount', models.IntegerField(help_text='Transfer amount, in the base unit of the currency (e.g.: cents for AUD, yen for JPY)')),
('created', models.DateTimeField(auto_now_add=True)),
('pin_response_text', models.TextField(blank=True, help_text='The full JSON response from the Pin API', null=True, verbose_name='Complete API Response')),
('recipient', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='pinpayments.PinRecipient')),
],
),
migrations.CreateModel(
name='PinTransaction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField(db_index=True, help_text='Time this transaction was put in the database. May differ from the time that PIN reports the transaction.', verbose_name='Date')),
('environment', models.CharField(blank=True, db_index=True, help_text='The name of the Pin environment to use, eg test or live.', max_length=25)),
('amount', models.DecimalField(decimal_places=2, max_digits=10, verbose_name='Amount (Dollars)')),
('fees', models.DecimalField(blank=True, decimal_places=2, default=Decimal('0.00'), help_text='Fees charged to you by Pin, for this transaction, in dollars', max_digits=10, null=True, verbose_name='Transaction Fees')),
('description', models.TextField(blank=True, help_text='As provided when you initiated the transaction', null=True, verbose_name='Description')),
('processed', models.BooleanField(default=False, help_text='Has this been sent to Pin yet?', verbose_name='Processed?')),
('succeeded', models.BooleanField(default=False, help_text='Was the transaction approved?', verbose_name='Success?')),
('currency', models.CharField(default='AUD', help_text='Currency transaction was processed in', max_length=100, verbose_name='Currency')),
('transaction_token', models.CharField(blank=True, db_index=True, help_text='Unique ID from Pin for this transaction', max_length=100, null=True, verbose_name='Pin API Transaction Token')),
('card_token', models.CharField(blank=True, help_text='Card token used for this transaction (Card API and Web Forms)', max_length=40, null=True, verbose_name='Pin API Card Token')),
('pin_response', models.CharField(blank=True, help_text='Response text, usually Success!', max_length=255, null=True, verbose_name='API Response')),
('ip_address', models.GenericIPAddressField(help_text='IP Address used for payment')),
('email_address', models.EmailField(help_text='As passed to Pin.', max_length=100, verbose_name='E-Mail Address')),
('card_address1', models.CharField(blank=True, help_text='Address entered by customer to process this transaction', max_length=100, null=True, verbose_name='Cardholder Street Address')),
('card_address2', models.CharField(blank=True, max_length=100, null=True, verbose_name='Cardholder Street Address Line 2')),
('card_city', models.CharField(blank=True, max_length=100, null=True, verbose_name='Cardholder City')),
('card_state', models.CharField(blank=True, max_length=100, null=True, verbose_name='Cardholder State')),
('card_postcode', models.CharField(blank=True, max_length=100, null=True, verbose_name='Cardholder Postal / ZIP Code')),
('card_country', models.CharField(blank=True, max_length=100, null=True, verbose_name='Cardholder Country')),
('card_number', models.CharField(blank=True, help_text='Cleansed by Pin API', max_length=100, null=True, verbose_name='Card Number')),
('card_type', models.CharField(blank=True, choices=[('master', 'Mastercard'), ('visa', 'Visa')], help_text='Determined automatically by Pin', max_length=20, null=True, verbose_name='Card Type')),
('pin_response_text', models.TextField(blank=True, help_text='The full JSON response from the Pin API', null=True, verbose_name='Complete API Response')),
('customer_token', models.ForeignKey(blank=True, help_text='Provided by Customer API', null=True, on_delete=django.db.models.deletion.SET_NULL, to='pinpayments.CustomerToken')),
],
options={
'verbose_name': 'PIN.net.au Transaction',
'verbose_name_plural': 'PIN.net.au Transactions',
'ordering': ['-date'],
},
),
]
|
import os
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
#SE CREA EL DIRECTORIO DE NUESTRO ARCHIVO PARA LA BASE DE DATOS
directory = os.path.abspath(os.path.dirname(__file__))
app = Flask(__name__)
#SE CONFIGURA LA BASE DE DATOS Y SE ESTABLECE PARA CREAR EL ALRCHIVO .sqlite
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(directory, 'data.sqlite')
#ESTEBLECEMOS QUE NO HABRÁ MODIFICACIONES EN NUESTRA BASE DE DATOS
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
#SE CREA LA BASE DE DATOS POR MEDIO DE UNA INSTANCIA DE LA CLASE SQLAlchemy CON PARAMETRO
# NUESTRA APP
dataBase = SQLAlchemy(app)
#SE UTILIZA ESTE MÉTODO Migrate PARA QUE SE PUEDAN ENVIAR LAS ACTUALIZACIONES CORRESPONDIENTES DE LA TABLA
# A LA BASE DE DATOS SIN NECESIDAD DE REHACERLA
Migrate(app, dataBase)
#SE CREA NUESTRA TABLA CON LOS DATOS QUE TENDRAN EN SUS COLUMNAS
class Person(dataBase.Model):
#SE ESTABLECE EL NOMBRE DE NUESTRA TABLA PERO NO ES NECESARIO YA
#QUE EL NOMBRE ESTABLECIDO EN LA class SERÍA EL DE NUESTRA TABLA
__tablename__ = 'Persons'
id = dataBase.Column(dataBase.Integer, primary_key = True)
name = dataBase.Column(dataBase.Text)
age = dataBase.Column(dataBase.Integer)
country = dataBase.Column(dataBase.Text)
def __init__(self, name, age, country):
self.name = name
self.age = age
self.country = country
def __repr__(self):
text = "Person: {} is {} from {}".format(self.name, self.age, self.country)
return text
|
import snakerf as srf
import matplotlib.pyplot as plt
import numpy as np
from math import inf, pi, log2
from scipy import signal
# see https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.periodogram.html#scipy.signal.periodogram
m = 3
data = '{0:0{1:d}b}'.format(srf.gold_codes(m)[2], 2**m - 1)
print(data)
n = 1
f = 1234
f_bit = 9001
T_bit = 1/f_bit
# t_max = len(data)*T_bit/n - T_bit/100
fs = 10e3
ns = 10000
t_max = ns/fs
N = ns
amp = 2*np.sqrt(2)
freq = 1234.0
noise_power = 0.001 * fs / 2
time = np.arange(N) / fs
# x = amp*np.sin(2*np.pi*freq*time)
x = np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
f, Pxx_spec = signal.periodogram(x, fs, scaling='spectrum')
plt.plot(f, srf.W2dBm(Pxx_spec/50))
df = (f[1] - f[0])
mean_noise = np.mean(srf.mag(Pxx_spec/df))
print(mean_noise)
plt.axhline(srf.W2dBm(mean_noise * df/50), c = 'purple', ls = '-')
V1 = srf.Signal(ns, t_max)
# V1.update_Vt(amp*np.sin(2*np.pi*freq*time))
V1.add_noise(noise = 0.001 /(4 * srf.kB * V1.Z0))
srf.plot_power_spectrum(plt.gca(), V1.fs, V1.Pf)
mean_noise = np.mean(srf.mag(V1.Pf * V1.Z0 / V1.df))
print(mean_noise)
plt.axhline(srf.W2dBm(mean_noise * V1.df/V1.Z0), c = 'k', ls = '--')
# srf.plot_power_spectrum(plt.gca(), V1.ts, np.random.normal(0, 1, len(V1.ts)), time = True)
# f_ref = [0, 4, 5, 8.3, 12] # log frequency
# Fa_ref = [270, 150, 80, 0, 0] # Fa = 10*log10(T_noise/t0)
#
# T_noise = srf.undB(np.interp(np.log10(np.maximum(V1.fs,np.ones(len(V1.fs)))), f_ref, Fa_ref)) * srf.t0 # weird thing with ones to avoid log(0)
# plt.plot(V1.fs, srf.W2dBm(srf.kB*T_noise))
#
# plt.subplot(2,1,2)
# plt.plot(V1.ts, V1.Vt)
plt.show()
# V1.update_Vt(srf.V_psk(V1.ts, f, f_bit, data, -90, n = n))
# V2 = V1.copy()
#
# V1.add_noise(srf.NF2T_noise(6))
# V2.add_noise(srf.NF2T_noise(3))
# V2.add_noise(srf.NF2T_noise(3))
# V2.add_noise(srf.NF2T_noise(3))
# # V2.add_noise(srf.NF2T_noise(3))
#
# srf.plot_power_spectrum(plt.gca(), V1.fs, V1.Pf, c = 'blue')
# srf.plot_power_spectrum(plt.gca(), V2.fs, V2.Pf, c = 'orange')
#
# plt.axhline(srf.W2dBm(np.mean(srf.mag(V1.Pf))), ls = '--', c = 'blue')
# plt.axhline(srf.W2dBm(np.mean(srf.mag(V2.Pf))), ls = '--', c = 'orange')
#
# plt.show()
# for i in range(len(data)):
# plt.axvline(T_bit * i, ls = '--', c = 'black')
# plt.show()
# plt.subplot(2,1,1)
# # plt.plot(fs, srf.dB(srf.mag(srf.Pdiv(srf.R(R1, ws), srf.C(C1, ws)))))
# # plt.axvline(srf.w2f(1/(R1*C1)), ls = '-', c = 'black')
# # srf.plot_power_spectrum(plt.gca(), t, v1, time = True)
# # srf.plot_power_spectrum(plt.gca(), t, v2, time = True)
# # plt.xlim(f-10*f_bit,f+10*f_bit)
# plt.subplot(2,1,2)
# # plt.plot(t, srf.Pf2Vt(v3f, len(t)))
# plt.plot(t, srf.Pf2Vt(v1f, len(t)))
# plt.plot(t, srf.Pf2Vt(v2f, len(t)))
# for i in range(len(data)):
# plt.axvline(T_bit * i, ls = '--', c = 'black')
# plt.show()
|
#!/usr/bin/env python3
import sys
import rospy
import actionlib
from control_msgs.msg import (FollowJointTrajectoryAction,
FollowJointTrajectoryGoal,
GripperCommandAction,
GripperCommandGoal)
from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint
arm_joint_names = ["joint_1, joint_2, joint_3, joint_4, joint_5"]
arm_intermediate_positions = [0.0, 0.9575, -0.2510, 0.8364, 0.0] # picking_arm
arm_joint_positions = [0.0, -1.0, 1.0, 1.2, 0.0] # rest_arm
if __name__ == "__main__":
rospy.init_node("prepare_simulated_robot")
rospy.loginfo("Waiting for arm_controller...")
arm_client = actionlib.SimpleActionClient("arm_controller/follow_joint_trajectory", FollowJointTrajectoryAction)
arm_client.wait_for_server()
rospy.loginfo("...connected.")
rospy.loginfo("Waiting for gripper_controller...")
gripper_client = actionlib.SimpleActionClient("gripper_controller/gripper_action", GripperCommandAction)
gripper_client.wait_for_server()
rospy.loginfo("...connected.")
trajectory = JointTrajectory()
trajectory.joint_names = arm_joint_names
trajectory.points.append(JointTrajectoryPoint())
trajectory.points[0].positions = [0.0] * len(arm_joint_positions)
trajectory.points[0].velocities = [0.0] * len(arm_joint_positions)
trajectory.points[0].accelerations = [0.0] * len(arm_joint_positions)
trajectory.points[0].time_from_start = rospy.Duration(1.0)
trajectory.points.append(JointTrajectoryPoint())
trajectory.points[1].positions = arm_intermediate_positions
trajectory.points[1].velocities = [0.0] * len(arm_joint_positions)
trajectory.points[1].accelerations = [0.0] * len(arm_joint_positions)
trajectory.points[1].time_from_start = rospy.Duration(4.0)
trajectory.points.append(JointTrajectoryPoint())
trajectory.points[2].positions = arm_joint_positions
trajectory.points[2].velocities = [0.0] * len(arm_joint_positions)
trajectory.points[2].accelerations = [0.0] * len(arm_joint_positions)
trajectory.points[2].time_from_start = rospy.Duration(7.5)
arm_goal = FollowJointTrajectoryGoal()
arm_goal.trajectory = trajectory
arm_goal.goal_time_tolerance = rospy.Duration(0.0)
gripper_goal = GripperCommandGoal()
gripper_goal.command.max_effort = 10.0
gripper_goal.command.position = 0.108
rospy.loginfo("Setting positions...")
arm_client.send_goal(arm_goal)
gripper_client.send_goal(gripper_goal)
gripper_client.wait_for_result(rospy.Duration(5.0))
arm_client.wait_for_result(rospy.Duration(6.0))
rospy.loginfo("...done")
|
from . import commands
from .task import BaseTask
from .utils import echo
from .worker import Worker
|
"""
This module contains the structs necessary to represent an automata.
"""
from __future__ import annotations
import logging
from typing import Any, Dict, Iterable, List, Set, Tuple, Union
from numlab.automata.state import State
from numlab.automata.transition import Transition
_ATMT_COUNT = 0
class Automata:
"""
An automata.
Parameters
----------
name : str
The name of the automata.
Attributes
----------
name : str
The name of the automata.
states : Dict[str, State]
The states of the automata.
start_states : List[State]
The start states of the automata.
end_states : List[State]
The end states of the automata.
"""
def __init__(self, name: str = None) -> None:
if name is None:
global _ATMT_COUNT
name = f"atmt_{_ATMT_COUNT}"
_ATMT_COUNT += 1
self.name = name
self.states: Dict[str, State] = {}
self.start_states: List[State] = []
self.end_states: List[State] = []
self._pos = 0
self._input = None
self._current_state: State = None
self._processes: List[Tuple[State, int]] = []
self._processes_idx: int = 0
def __getattr__(self, item: str) -> Any:
if item in self.states:
return self.states[item]
raise AttributeError(f"No attribute {item}")
@property
def alphabet(self) -> Set[Tuple[Any, bool]]:
"""
Get the alphabet of the automata.
Returns
-------
List[Any]
The alphabet of the automata.
"""
alphabet = set()
for state in self.states.values():
for transition in state.transitions:
if transition.is_epsilon:
continue
if isinstance(transition.condition, str):
alphabet.add(transition.condition)
else:
alphabet.update(transition.condition)
return alphabet
def concatenate(self, other: Automata, set_single: bool = False) -> Automata:
"""
Concatenate the automata with another one.
Parameters
----------
other : Automata
The other automata.
set_single : bool, optional
Whether to set the automata to have a single start and end state
when needed, by default False.
Returns
-------
Automata
The concatenated automata.
Raises
------
ValueError
If the current automata has multiple end states and ``set_single`` is
False.
ValueError
If the other automata has multiple start states and ``set_single`` is
False.
"""
if len(self.end_states) != 1:
if set_single:
self.set_single_end()
else:
raise ValueError(f"Automata {self.name} has multiple end states.")
if len(other.start_states) != 1:
if set_single:
other.set_single_start()
else:
raise ValueError(f"Automata {other.name} has multiple start states.")
other = other.flat()
other_first_state = other.start_state
other_last_state = other.end_state
self.end_state.merge(other_first_state)
if other_last_state == other_first_state:
other_last_state = self.end_state
for state in other.states.values():
for trans in state.transitions:
if trans.to_state is other_first_state:
trans.to_state = self.end_state
self.end_states = [other_last_state]
return self
@property
def pos(self) -> int:
"""Position of the automata on the input"""
return self._pos
@property
def start_state(self) -> State:
"""Get the start state of the automata."""
if len(self.start_states) == 1:
return self.start_states[0]
raise ValueError("The automata has multiple start states.")
@property
def end_state(self) -> State:
"""Get the end state of the automata."""
if len(self.end_states) == 1:
return self.end_states[0]
raise ValueError("The automata has multiple end states.")
def add_state(
self,
state: Union[str, State] = None,
start: bool = False,
end: bool = False,
name: str = None,
) -> State:
"""
Add a state to the automata.
Parameters
----------
state : Union[str, State]
The name of the state to add or the state itself.
start : bool
Whether the state is a start state.
end : bool
Whether the state is an end state.
Returns
-------
State
The added state.
"""
if state is None:
state = State(f"q{len(self.states)}")
if isinstance(state, str):
if state in self.states:
raise ValueError(f"State {state} already exists.")
state = State(state)
state.automata = self
name = name if name is not None else state.name
self.states[name] = state
if start:
self.start_states.append(state)
if end:
self.end_states.append(state)
return state
def add_transition(
self,
from_state: Union[str, State],
to_state: Union[str, State],
condition: Any = None,
action: int = None,
negated: bool = False,
) -> None:
"""
Add a transition to the automata.
Parameters
----------
from_state : Union[str, State]
The state from which the transition starts.
to_state : Union[str, State]
The state to which the transition goes.
condition : Any
The condition under which the transition is taken.
action : int
The action to perform when the transition is taken.
Raises
------
ValueError
If any of the states does not exist.
"""
if isinstance(from_state, str):
from_state = self.states.get(from_state, None)
if from_state is None:
raise ValueError(f"No state {from_state} defined.")
if isinstance(to_state, str):
to_state = self.states.get(to_state, None)
if to_state is None:
raise ValueError(f"No state {to_state} defined.")
if action is None:
action = 0 if condition is None else 1
transition = Transition(from_state, to_state, condition, action, negated)
from_state.transitions.append(transition)
return transition
def set_single_start(self) -> State:
"""
Set the automata to have a single start state.
Returns
-------
State
The start state.
"""
if len(self.start_states) == 1:
return self.start_states[0]
start_st = self.add_state(f"_start_{self.name}")
for state in self.start_states:
self.add_transition(start_st, state)
self.start_states = [start_st]
return start_st
def set_single_end(self) -> State:
"""
Set the automata to have a single end state.
Returns
-------
State
The end state.
"""
if len(self.end_states) == 1:
return self.end_states[0]
end_st = self.add_state(f"_end_{self.name}")
for state in self.end_states:
self.add_transition(state, end_st)
self.end_states = [end_st]
return end_st
def set_single_start_end(self) -> Tuple[State, State]:
"""
Set the automata to have a single start and end state.
Returns
-------
Tuple[State, State]
The start and end state.
"""
start_st = self.set_single_start()
end_st = self.set_single_end()
return start_st, end_st
def flat(self) -> Automata:
"""
Flatten the automata.
Returns
-------
Automata
The flattened automata.
"""
flat = Automata(self.name)
count = 0
visited_states = []
non_visited_states = self.start_states
while non_visited_states:
new_non_visited_states = []
for state in non_visited_states:
flat.add_state(
state,
state in self.start_states,
state in self.end_states,
name=f"q{count}",
)
state.name = f"q{count}"
count += 1
visited_states.append(state)
for transition in state.transitions:
to_state = transition.to_state
if (
to_state not in visited_states
and to_state not in new_non_visited_states
and to_state not in non_visited_states
):
new_non_visited_states.append(transition.to_state)
non_visited_states = new_non_visited_states
return flat
def show(self) -> None:
"""
Show the automata.
"""
# Inverse name states dict
inv_states = {v: k for k, v in self.states.items()}
for name, state in self.states.items():
print(name, f"Final: {state in self.end_states}")
for transition in state.transitions:
neg = "^" if transition.negated else ""
print(
f" ({neg}{transition.str_cond}) "
f"-> {inv_states[transition.to_state]}"
)
def _eps_closure_single(self, state: Union[str, State]) -> Set[State]:
"""
Compute the epsilon closure of a single state.
Parameters
----------
state : Union[str, State]
The state to compute the epsilon closure of.
Returns
-------
Set[State]
The epsilon closure of the state.
Raises
------
ValueError
If the state does not exist.
"""
if isinstance(state, str):
if state not in self.states:
raise ValueError(f"No state {state} defined.")
state = self.states[state]
visited = set()
non_vsited = [state]
while non_vsited:
new_non_vsited = []
for current_state in non_vsited:
visited.add(current_state)
for transition in current_state.transitions:
if transition.is_epsilon:
to_st = transition.to_state
if (
to_st not in visited
and to_st not in new_non_vsited
and to_st not in non_vsited
):
new_non_vsited.append(to_st)
non_vsited = new_non_vsited
return visited
def eps_closure(
self, state: Union[str, State, Iterable[str], Iterable[State]]
) -> Set[State]:
"""
Compute the epsilon closure of a state or a set of states.
Parameters
----------
state : Union[str, State, Iterable[str], Iterable[State]]
The state or a list of states.
Returns
-------
Set[State]
The epsilon closure of the state or a set of states.
Raises
------
ValueError
If any of the states does not exist.
"""
if isinstance(state, (str, State)):
return self._eps_closure_single(state)
whole_closure = set()
for current_state in state:
whole_closure.update(self._eps_closure_single(current_state))
return whole_closure
def _goto_single(self, state: Union[str, State], symbol: str) -> Set[State]:
"""
Compute the goto of a single state.
Parameters
----------
state : Union[str, State]
The state to compute the goto of.
symbol : str
The symbol to compute the goto of.
Returns
-------
Set[State]
The goto of the state.
Raises
------
ValueError
If the state does not exist.
"""
if isinstance(state, str):
if state not in self.states:
raise ValueError(f"No state {state} defined.")
state = self.states[state]
answer = set()
st_esp_closure = self.eps_closure(state)
for current_state in st_esp_closure:
for transition in current_state.transitions:
if not transition.is_epsilon and transition.check_condition(symbol):
answer.add(transition.to_state)
return answer
def goto(
self, state: Union[str, State, Iterable[str], Iterable[State]], symbol: str
) -> Set[State]:
"""
Compute the goto of a state or a set of states.
Parameters
----------
state : Union[str, State, Iterable[str], Iterable[State]]
The state or a list of states.
symbol : str
The symbol to compute the goto of.
Returns
-------
Set[State]
The goto of the state or a set of states.
Raises
------
ValueError
If any of the states does not exist.
"""
if isinstance(state, (str, State)):
return self._goto_single(state, symbol)
whole_goto = set()
for current_state in state:
whole_goto.update(self._goto_single(current_state, symbol))
return whole_goto
def to_dfa(self, dfa2nfa: bool = False) -> Union[Automata, Tuple[Automata, Dict]]:
"""
Convert the automata to a DFA.
Parameters
----------
dfa2nfa : bool
If True, the return value will be a tuple of the DFA and the dfa2nfa
dictionary, otherwise only the DFA will be returned. By default, False.
Returns
-------
Union[Automata, Tuple[Automata, Dict]]
The DFA.
"""
get_name = lambda states: "".join(sorted(x.name for x in states))
alphabet = self.alphabet
dfa = Automata(self.name)
start_state = self.eps_closure(self.start_states)
start_name = get_name(start_state)
q_0 = dfa.add_state(start_name, start=True, end=start_state in self.end_states)
dfa_to_nfa = {q_0: start_state}
visited = set()
non_visited = [q_0]
while non_visited:
new_non_visited = []
for current_state in non_visited:
if current_state in visited:
continue
visited.add(current_state)
for char in alphabet:
goto_states = self.goto(dfa_to_nfa[current_state], char)
if not goto_states:
continue
next_state = self.eps_closure(goto_states)
next_name = get_name(next_state)
if next_name not in dfa.states:
dfa_state = dfa.add_state(
next_name,
end=any(s in self.end_states for s in next_state),
)
dfa_to_nfa[dfa_state] = next_state
new_non_visited.append(dfa_state)
else:
dfa_state = dfa.states[next_name]
dfa.add_transition(current_state.name, next_name, char)
if next_state not in new_non_visited and next_state not in visited:
new_non_visited.append(dfa_state)
non_visited = new_non_visited
return dfa if not dfa2nfa else (dfa, dfa_to_nfa)
def run(
self,
input_: Iterable,
stop_at_end: bool = False,
) -> bool:
"""
Run the automata on the given input.
Parameters
----------
input_ : Iterable
The input to run the automata on.
stop_at_end : bool
Whether to stop the automata at the first end state encountered.
success_at_full_input : bool
Whether to consider the automata successful if the input is fully
consumed.
Returns
-------
bool
Whether the automata succeeded.
Raises
------
ValueError
If the automata has no start state.
"""
if not self.start_states:
raise ValueError("No start states defined.")
self._pos = 0
self._processes_idx = 0
self._input = input_
self._processes = [(st, self._pos) for st in self.start_states]
while self._processes:
stop = self._step()
if self._current_state in self.end_states:
if stop_at_end:
return True
if stop:
break
else:
return False
logging.debug(f"Final {self._processes_idx} {self._processes}")
return self._current_state in self.end_states
def _step(self):
self._current_state, self._pos = self._processes[self._processes_idx]
self._current_state.visited()
if self._pos > len(self._input):
self._processes.pop(self._processes_idx)
return False
new_processes = 0
logging.debug(f"{self._processes_idx} {self._processes}")
for transition in self._current_state.transitions:
if transition.is_epsilon or (
0 <= self._pos < len(self._input)
and transition.check_condition(self._input[self._pos])
):
run_state = (transition.to_state, self._pos + transition.action)
if new_processes == 0:
self._processes[self._processes_idx] = run_state
else:
self._processes.append(run_state)
new_processes += 1
if not new_processes:
self._processes.pop(self._processes_idx)
if self._processes:
self._processes_idx = (self._processes_idx + 1) % len(self._processes)
if self._pos >= len(self._input) or self._pos < 0:
return self._current_state in self.end_states
return False
|
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Tiago de Freitas Pereira <[email protected]>
# Tranformers based on tensorflow
import os
import pkg_resources
from bob.learn.tensorflow.utils.image import to_channels_last
from sklearn.base import TransformerMixin, BaseEstimator
from bob.extension.download import get_file
from sklearn.utils import check_array
import numpy as np
import tensorflow as tf
from bob.bio.face.utils import (
dnn_default_cropping,
embedding_transformer,
cropped_positions_arcface,
)
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
VanillaBiometricsPipeline,
)
from bob.bio.face.annotator import BobIpMTCNN
def sanderberg_rescaling():
# FIXED_STANDARDIZATION from https://github.com/davidsandberg/facenet
# [-0.99609375, 0.99609375]
preprocessor = tf.keras.layers.experimental.preprocessing.Rescaling(
scale=1 / 128, offset=-127.5 / 128
)
return preprocessor
class TensorflowTransformer(TransformerMixin, BaseEstimator):
"""
Base Transformer for Tensorflow architectures.
Parameters
----------
checkpoint_path: str
Path containing the checkpoint
preprocessor:
A function that will transform the data right before forward
memory_demanding bool
If `True`, the `transform` method will run one sample at the time.
This is useful when there is not enough memory available to forward big chucks of data.
"""
def __init__(
self, checkpoint_path, preprocessor=None, memory_demanding=False, **kwargs
):
super().__init__(**kwargs)
self.checkpoint_path = checkpoint_path
self.model = None
self.preprocessor = preprocessor
self.memory_demanding = memory_demanding
def load_model(self):
self.model = tf.keras.models.load_model(self.checkpoint_path, compile=False)
def transform(self, X):
def _transform(X):
X = tf.convert_to_tensor(X)
X = to_channels_last(X)
if X.shape[-3:] != self.model.input_shape[-3:]:
raise ValueError(
f"Image shape {X.shape} not supported. Expected {self.model.input_shape}"
)
return self.inference(X).numpy()
if self.model is None:
self.load_model()
X = check_array(X, allow_nd=True)
if self.memory_demanding:
features = np.array([_transform(x[None, ...]) for x in X])
# If we ndim is > than 3. We should stack them all
# The enroll_features can come from a source where there are `N` samples containing
# nxd samples
if features.ndim >= 3:
features = np.vstack(features)
return features
else:
return _transform(X)
def __getstate__(self):
# Handling unpicklable objects
d = self.__dict__.copy()
d["model"] = None
return d
def inference(self, X):
if self.preprocessor is not None:
X = self.preprocessor(tf.cast(X, "float32"))
prelogits = self.model.predict_on_batch(X)
embeddings = tf.math.l2_normalize(prelogits, axis=-1)
return embeddings
def _more_tags(self):
return {"stateless": True, "requires_fit": False}
def __del__(self):
self.model = None
class InceptionResnetv2_MsCeleb_CenterLoss_2018(TensorflowTransformer):
"""
InceptionResnet v2 model trained in 2018 using the MSCeleb dataset in the context of the work:
Freitas Pereira, Tiago, André Anjos, and Sébastien Marcel. "Heterogeneous face recognition using domain specific units." IEEE Transactions on Information Forensics and Security 14.7 (2018): 1803-1816.
"""
def __init__(self, memory_demanding=False, **kwargs):
urls = [
"https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/inceptionresnetv2_msceleb_centerloss_2018.tar.gz",
"http://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/inceptionresnetv2_msceleb_centerloss_2018.tar.gz",
]
filename = get_file(
"inceptionresnetv2_msceleb_centerloss_2018.tar.gz",
urls,
cache_subdir="data/tensorflow/inceptionresnetv2_msceleb_centerloss_2018",
file_hash="7c0aa46bba16c01768a38594a3b4c14d",
extract=True,
)
checkpoint_path = os.path.dirname(filename)
super(InceptionResnetv2_MsCeleb_CenterLoss_2018, self).__init__(
checkpoint_path,
preprocessor=tf.image.per_image_standardization,
memory_demanding=memory_demanding,
**kwargs,
)
class InceptionResnetv2_Casia_CenterLoss_2018(TensorflowTransformer):
"""
InceptionResnet v2 model trained in 2018 using the CasiaWebFace dataset in the context of the work:
Freitas Pereira, Tiago, André Anjos, and Sébastien Marcel. "Heterogeneous face recognition using domain specific units." IEEE Transactions on Information Forensics and Security 14.7 (2018): 1803-1816.
"""
def __init__(self, memory_demanding=False, **kwargs):
urls = [
"https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/inceptionresnetv2_casia_centerloss_2018.tar.gz",
"http://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/inceptionresnetv2_casia_centerloss_2018.tar.gz",
]
filename = get_file(
"inceptionresnetv2_casia_centerloss_2018.tar.gz",
urls,
cache_subdir="data/tensorflow/inceptionresnetv2_casia_centerloss_2018",
file_hash="1e0b62e45430a8d7516d7a6101a24c40",
extract=True,
)
checkpoint_path = os.path.dirname(filename)
super(InceptionResnetv2_Casia_CenterLoss_2018, self).__init__(
checkpoint_path,
preprocessor=tf.image.per_image_standardization,
memory_demanding=memory_demanding,
**kwargs,
)
class InceptionResnetv1_Casia_CenterLoss_2018(TensorflowTransformer):
"""
InceptionResnet v1 model trained in 2018 using the CasiaWebFace dataset in the context of the work:
Freitas Pereira, Tiago, André Anjos, and Sébastien Marcel. "Heterogeneous face recognition using domain specific units." IEEE Transactions on Information Forensics and Security 14.7 (2018): 1803-1816.
"""
def __init__(self, memory_demanding=False, **kwargs):
urls = [
"https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/inceptionresnetv1_casia_centerloss_2018.tar.gz",
"http://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/inceptionresnetv1_casia_centerloss_2018.tar.gz",
]
filename = get_file(
"inceptionresnetv1_casia_centerloss_2018.tar.gz",
urls,
cache_subdir="data/tensorflow/inceptionresnetv1_casia_centerloss_2018",
file_hash="6601e6f6840ae863c7daf31a7c6b9a27",
extract=True,
)
checkpoint_path = os.path.dirname(filename)
super(InceptionResnetv1_Casia_CenterLoss_2018, self).__init__(
checkpoint_path,
preprocessor=tf.image.per_image_standardization,
memory_demanding=memory_demanding,
**kwargs,
)
class InceptionResnetv1_MsCeleb_CenterLoss_2018(TensorflowTransformer):
"""
InceptionResnet v1 model trained in 2018 using the MsCeleb dataset in the context of the work:
Freitas Pereira, Tiago, André Anjos, and Sébastien Marcel. "Heterogeneous face recognition using domain specific units." IEEE Transactions on Information Forensics and Security 14.7 (2018): 1803-1816.
"""
def __init__(self, memory_demanding=False, **kwargs):
urls = [
"https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/inceptionresnetv1_msceleb_centerloss_2018.tar.gz",
"http://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/inceptionresnetv1_msceleb_centerloss_2018.tar.gz",
]
filename = get_file(
"inceptionresnetv1_msceleb_centerloss_2018.tar.gz",
urls,
cache_subdir="data/tensorflow/inceptionresnetv1_msceleb_centerloss_2018",
file_hash="1ca0149619e4e9320a927ea65b2b5521",
extract=True,
)
checkpoint_path = os.path.dirname(filename)
super(InceptionResnetv1_MsCeleb_CenterLoss_2018, self).__init__(
checkpoint_path,
preprocessor=tf.image.per_image_standardization,
memory_demanding=memory_demanding,
**kwargs,
)
class FaceNetSanderberg_20170512_110547(TensorflowTransformer):
"""
Wrapper for the free FaceNet from David Sanderberg model 20170512_110547:
https://github.com/davidsandberg/facenet
And for a preprocessor you can use::
from bob.bio.face.preprocessor import FaceCrop
# This is the size of the image that this model expects
CROPPED_IMAGE_HEIGHT = 160
CROPPED_IMAGE_WIDTH = 160
# eye positions for frontal images
RIGHT_EYE_POS = (46, 53)
LEFT_EYE_POS = (46, 107)
# Crops the face using eye annotations
preprocessor = FaceCrop(
cropped_image_size=(CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH),
cropped_positions={'leye': LEFT_EYE_POS, 'reye': RIGHT_EYE_POS},
color_channel='rgb'
)
"""
def __init__(self, memory_demanding=False, **kwargs):
urls = [
"http://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/facenet_sanderberg_20170512_110547.tar.gz"
]
filename = get_file(
"facenet_sanderberg_20170512_110547.tar.gz",
urls,
cache_subdir="data/tensorflow/facenet_sanderberg_20170512_110547",
file_hash="734d1c997c10acdcdffc79fb51a2e715",
extract=True,
)
checkpoint_path = os.path.dirname(filename)
super(FaceNetSanderberg_20170512_110547, self).__init__(
checkpoint_path,
tf.image.per_image_standardization,
memory_demanding=memory_demanding,
**kwargs,
)
class Resnet50_MsCeleb_ArcFace_2021(TensorflowTransformer):
"""
Resnet50 Backbone trained with the MSCeleb 1M database.
The bottleneck layer (a.k.a embedding) has 512d.
The configuration file used to trained is:
.. warning::
This configuration file might change in future releases
```yaml
batch-size: 128
face-size: 112
face-output_size: 112
n-classes: 85742
## Backbone
backbone: 'resnet50'
head: 'arcface'
s: 10
bottleneck: 512
m: 0.5
# Training parameters
solver: "sgd"
lr: 0.1
dropout-rate: 0.5
epochs: 500
train-tf-record-path: "<PATH>"
validation-tf-record-path: "<PATH>"
```
"""
def __init__(self, memory_demanding=False, **kwargs):
urls = [
"https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/resnet50-msceleb-arcface_2021-48ec5cb8.tar.gz",
"http://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/resnet50-msceleb-arcface_2021-48ec5cb8.tar.gz",
]
filename = get_file(
"resnet50-msceleb-arcface_2021-48ec5cb8.tar.gz",
urls,
cache_subdir="data/tensorflow/resnet50-msceleb-arcface_2021-48ec5cb8",
file_hash="17946f121af5ddd18c637c4620e54da6",
extract=True,
)
checkpoint_path = os.path.dirname(filename)
super(Resnet50_MsCeleb_ArcFace_2021, self).__init__(
checkpoint_path,
preprocessor=lambda X: X / 255.0,
memory_demanding=memory_demanding,
**kwargs,
)
class Resnet50_MsCeleb_ArcFace_20210521(TensorflowTransformer):
"""
Resnet50 Backbone trained with the MSCeleb 1M database. The bottleneck layer (a.k.a embedding) has 512d.
The difference from this one to :any:`Resnet50_MsCeleb_ArcFace_2021` is the MSCeleb version used to train it.
This one uses 100% of the data pruned from annotators.
The configuration file used to trained is:
.. warning::
This configuration file might change in future releases
```yaml
batch-size: 128
face-size: 112
face-output_size: 112
n-classes: 83009
## Backbone
backbone: 'resnet50'
head: 'arcface'
s: 30
bottleneck: 512
m: 0.5
# Training parameters
solver: "sgd"
lr: 0.1
dropout-rate: 0.5
epochs: 300
train-tf-record-path: "<PATH>"
validation-tf-record-path: "<PATH>"
```
"""
def __init__(self, memory_demanding=False, **kwargs):
urls = [
"https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/resnet50-msceleb-arcface_20210521-e9bc085c.tar.gz",
"http://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/resnet50-msceleb-arcface_20210521-e9bc085c.tar.gz",
]
filename = get_file(
"resnet50-msceleb-arcface_20210521-e9bc085c.tar.gz",
urls,
cache_subdir="data/tensorflow/resnet50-msceleb-arcface_20210521-801991f0",
file_hash="e33090eea4951ce80be4620a0dac680d",
extract=True,
)
checkpoint_path = os.path.dirname(filename)
super(Resnet50_MsCeleb_ArcFace_20210521, self).__init__(
checkpoint_path,
preprocessor=lambda X: X / 255.0,
memory_demanding=memory_demanding,
**kwargs,
)
class Resnet101_MsCeleb_ArcFace_20210521(TensorflowTransformer):
"""
Resnet101 Backbone trained with the MSCeleb 1M database. The bottleneck layer (a.k.a embedding) has 512d.
The difference from this one to :any:`Resnet101_MsCeleb_ArcFace_2021` is the MSCeleb version used to train it.
This one uses 100% of the data pruned from annotators.
The configuration file used to trained is:
.. warning::
This configuration file might change in future releases
```yaml
batch-size: 128
face-size: 112
face-output_size: 112
n-classes: 83009
## Backbone
backbone: 'resnet50'
head: 'arcface'
s: 30
bottleneck: 512
m: 0.5
# Training parameters
solver: "sgd"
lr: 0.1
dropout-rate: 0.5
epochs: 300
train-tf-record-path: "<PATH>"
validation-tf-record-path: "<PATH>"
```
"""
def __init__(self, memory_demanding=False, **kwargs):
urls = [
"https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/resnet101-msceleb-arcface_20210521.tar.gz",
"http://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/resnet101-msceleb-arcface_20210521.tar.gz",
]
filename = get_file(
"resnet101-msceleb-arcface_20210521.tar.gz",
urls,
cache_subdir="data/tensorflow/resnet101-msceleb-arcface_20210521",
file_hash="c1b2124cb69186ff965f7e818f9f8641",
extract=True,
)
checkpoint_path = os.path.dirname(filename)
super(Resnet101_MsCeleb_ArcFace_20210521, self).__init__(
checkpoint_path,
preprocessor=lambda X: X / 255.0,
memory_demanding=memory_demanding,
**kwargs,
)
class IResnet50_MsCeleb_ArcFace_20210623(TensorflowTransformer):
"""
IResnet50 Backbone trained with the MSCeleb 1M database. The bottleneck layer (a.k.a embedding) has 512d.
The complete code to reproduce this model is in the (private) repository:
bob.project.hardening/-/commit/9ac25c0a17c9628b7a99e84217cd7c680f1a3e1e
but you can reproduce it using ``cnn_training/arcface_large_batch.py`` script and the following configuration::
CONFIG = {
"n-workers": 8,
"batch-size": 256,
"n-train-samples-per-epoch": 256_000 * 1,
"real-n-train-samples": 985702,
"shuffle-buffer": int(1e6),
"face-size": 126,
"face-output_size": 112,
"n-classes": 83009,
"backbone": "resnet50_large_batch",
"use-l2-regularizer": False,
"batch-norm-decay": 0.9,
"batch-norm-epsilon": 1e-5,
"head": "arcface",
"s": 30,
"bottleneck": 512,
"m": 0.5,
"dropout-rate": 0.0,
"learning-rate-schedule": "none",
"train-tf-record-path": "/face-tfrecords/126x126/msceleb_facecrop/*.tfrecords",
"validation-tf-record-path": "/face-tfrecords/126x126/lfw_sharded/*.tfrecords",
"checkpoint-path": "/temp/hardening/arcface_sgd_prelu/w8_b1000_fp16_drp0",
"pre-train": False,
"epochs": 6000,
}
strategy_fn = "multi-worker-mirrored-strategy"
mixed_precision_policy = "mixed_float16"
initial_lr = 0.1 / 512 * CONFIG["batch-size"] * CONFIG["n-workers"]
real_n_steps_per_epoch = CONFIG["real-n-train-samples"] / (CONFIG["batch-size"] * CONFIG["n-workers"])
params = {
"optimizer": {
"type": "sgdw",
"sgdw": {
"momentum": min(0.9 * initial_lr, 0.999),
"nesterov": False,
"weight_decay": 5e-4,
},
},
"learning_rate": {
"type": "stepwise",
"stepwise": {
"boundaries": [int(i * real_n_steps_per_epoch) for i in [11, 17, 22]],
"values": [initial_lr / (10 ** i) for i in range(0, 4)],
},
},
}
The tensorboard logs can be found in: https://tensorboard.dev/experiment/6bBn0ya3SeilJ2elcZZoSg
The model at epoch 90 is used.
"""
def __init__(self, memory_demanding=False, **kwargs):
urls = [
"https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/arcface_iresnet50_msceleb_idiap-089640d2.tar.gz",
"http://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/arcface_iresnet50_msceleb_idiap-089640d2.tar.gz",
]
filename = get_file(
"arcface_iresnet50_msceleb_idiap-089640d2.tar.gz",
urls,
cache_subdir="data/tensorflow/arcface_iresnet50_msceleb_idiap-089640d2",
file_hash="089640d2",
extract=True,
)
checkpoint_path = os.path.dirname(filename)
super().__init__(
checkpoint_path,
preprocessor=lambda X: X / 255.0,
memory_demanding=memory_demanding,
**kwargs,
)
class IResnet100_MsCeleb_ArcFace_20210623(TensorflowTransformer):
"""
IResnet100 Backbone trained with the MSCeleb 1M database. The bottleneck layer (a.k.a embedding) has 512d.
The complete code to reproduce this model is in the (private) repository:
bob.project.hardening/-/commit/b162ca60d26fcf8a93f6767f5b5a026a406c1076
but you can reproduce it using ``cnn_training/arcface_large_batch.py`` script and the following configuration::
CONFIG = {
"n-workers": 8,
"batch-size": 128,
"n-train-samples-per-epoch": 256_000 * 1,
"real-n-train-samples": 985702,
"shuffle-buffer": int(1e5),
"face-size": 126,
"face-output_size": 112,
"n-classes": 83009,
"backbone": "iresnet100",
"use-l2-regularizer": False,
"batch-norm-decay": 0.9,
"batch-norm-epsilon": 1e-5,
"head": "arcface",
"s": 30,
"bottleneck": 512,
"m": 0.5,
"dropout-rate": 0.0,
"learning-rate-schedule": "none",
"train-tf-record-path": "/face-tfrecords/126x126/msceleb_facecrop/*.tfrecords",
"validation-tf-record-path": "/face-tfrecords/126x126/lfw_sharded/*.tfrecords",
"checkpoint-path": "/temp/hardening/arcface_sgd_prelu/i100_w8_b128_fp16_drp0",
"pre-train": False,
"epochs": 6000,
}
strategy_fn = "multi-worker-mirrored-strategy"
mixed_precision_policy = "mixed_float16"
initial_lr = 0.1 / 512 * CONFIG["batch-size"] * CONFIG["n-workers"]
real_n_steps_per_epoch = CONFIG["real-n-train-samples"] / (CONFIG["batch-size"] * CONFIG["n-workers"])
params = {
"optimizer": {
"type": "sgdw",
"sgdw": {
"momentum": min(0.9 * initial_lr, 0.999),
"nesterov": False,
"weight_decay": 5e-4,
},
},
"learning_rate": {
# with ReduceLROnPlateau callback
"type": "constant",
"constant": {
"learning_rate": initial_lr,
}
},
}
The tensorboard logs can be found in: https://tensorboard.dev/experiment/HYJTPiowRMa36VZHDLJqdg/
The model is saved based on best ``epoch_embeddings_embedding_accuracy``, epoch 51
"""
def __init__(self, memory_demanding=False):
urls = [
"https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/arcface_iresnet100_msceleb_idiap-1b22d544.tar.gz",
"http://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/arcface_iresnet100_msceleb_idiap-1b22d544.tar.gz",
]
filename = get_file(
"arcface_iresnet100_msceleb_idiap-1b22d544.tar.gz",
urls,
cache_subdir="data/tensorflow/arcface_iresnet100_msceleb_idiap-1b22d544",
file_hash="1b22d544",
extract=True,
)
checkpoint_path = os.path.dirname(filename)
super().__init__(
checkpoint_path,
preprocessor=lambda X: X / 255.0,
memory_demanding=memory_demanding,
)
class Resnet50_VGG2_ArcFace_2021(TensorflowTransformer):
"""
Resnet50 Backbone trained with the VGG2 database.
The bottleneck layer (a.k.a embedding) has 512d.
The configuration file used to trained is:
.. warning::
This configuration file might change in future releases
```yaml
batch-size: 128
face-size: 112
face-output_size: 112
n-classes: 8631
## Backbone
backbone: 'resnet50'
head: 'arcface'
s: 64
bottleneck: 512
m: 0.5
# Training parameters
solver: "sgd"
lr: 0.1
dropout-rate: 0.5
epochs: 1047
train-tf-record-path: "<PATH>"
validation-tf-record-path: "<PATH>"
```
"""
def __init__(self, memory_demanding=False, **kwargs):
urls = [
"https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/resnet50_vgg2_arcface_2021.tar.gz",
"http://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/resnet50_vgg2_arcface_2021.tar.gz",
]
filename = get_file(
"resnet50_vgg2_arcface_2021.tar.gz",
urls,
cache_subdir="data/tensorflow/resnet50_vgg2_arcface_2021",
file_hash="64f89c8cb55e7a0d9c7e13ff412b6a13",
extract=True,
)
checkpoint_path = os.path.dirname(filename)
super(Resnet50_VGG2_ArcFace_2021, self).__init__(
checkpoint_path,
preprocessor=lambda X: X / 255.0,
memory_demanding=memory_demanding,
**kwargs,
)
def inference(self, X):
if self.preprocessor is not None:
X = self.preprocessor(tf.cast(X, "float32"))
prelogits = self.model.predict_on_batch(X)
embeddings = tf.math.l2_normalize(prelogits, axis=-1)
return embeddings
class MobileNetv2_MsCeleb_ArcFace_2021(TensorflowTransformer):
"""
MobileNet Backbone trained with the MSCeleb 1M database.
The bottleneck layer (a.k.a embedding) has 512d.
The configuration file used to trained is:
.. warning::
This configuration file might change in future releases
```yaml
batch-size: 128
face-size: 112
face-output_size: 112
n-classes: 85742
## Backbone
backbone: 'mobilenet-v2'
head: 'arcface'
s: 10
bottleneck: 512
m: 0.5
# Training parameters
solver: "sgd"
lr: 0.01
dropout-rate: 0.5
epochs: 500
train-tf-record-path: "<PATH>"
validation-tf-record-path: "<PATH>"
```
"""
def __init__(self, memory_demanding=False, **kwargs):
urls = [
"https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/mobilenet-v2-msceleb-arcface-2021-e012cb66.tar.gz",
"http://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/mobilenet-v2-msceleb-arcface-2021-e012cb66.tar.gz",
]
filename = get_file(
"mobilenet-v2-msceleb-arcface-2021-e012cb66.tar.gz",
urls,
cache_subdir="data/tensorflow/mobilenet-v2-msceleb-arcface-2021-e012cb66",
file_hash="dd1399b86f01725c7b07b480b703e02a",
extract=True,
)
checkpoint_path = os.path.dirname(filename)
super(MobileNetv2_MsCeleb_ArcFace_2021, self).__init__(
checkpoint_path,
preprocessor=lambda X: X / 255.0,
memory_demanding=memory_demanding,
**kwargs,
)
def facenet_template(embedding, annotation_type, fixed_positions=None):
"""
Facenet baseline template.
This one will crop the face at :math:`160 \\times 160`
Parameters
----------
embedding: obj
Transformer that takes a cropped face and extract the embeddings
annotation_type: str
Type of the annotations (e.g. `eyes-center')
fixed_positions: dict
Set it if in your face images are registered to a fixed position in the image
"""
# DEFINE CROPPING
cropped_image_size = (160, 160)
if annotation_type == "eyes-center" or annotation_type == "bounding-box":
# Hard coding eye positions for backward consistency
# cropped_positions = {
cropped_positions = dnn_default_cropping(
cropped_image_size, annotation_type="eyes-center"
)
if annotation_type == "bounding-box":
# This will allow us to use `BoundingBoxAnnotatorCrop`
cropped_positions.update(
{"topleft": (0, 0), "bottomright": cropped_image_size}
)
else:
cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
annotator = BobIpMTCNN(min_size=40, factor=0.709, thresholds=(0.1, 0.2, 0.2))
# ASSEMBLE TRANSFORMER
transformer = embedding_transformer(
cropped_image_size=cropped_image_size,
embedding=embedding,
cropped_positions=cropped_positions,
fixed_positions=fixed_positions,
color_channel="rgb",
annotator=annotator,
)
algorithm = Distance()
return VanillaBiometricsPipeline(transformer, algorithm)
def resnet_template(embedding, annotation_type, fixed_positions=None):
# DEFINE CROPPING
# cropped_image_size = (112, 112)
# if annotation_type == "eyes-center":
# # Hard coding eye positions for backward consistency
# cropped_positions = cropped_positions_arcface()
# else:
# cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
# DEFINE CROPPING
cropped_image_size = (112, 112)
if annotation_type == "eyes-center" or annotation_type == "bounding-box":
# Hard coding eye positions for backward consistency
# cropped_positions = {
cropped_positions = cropped_positions_arcface()
if annotation_type == "bounding-box":
# This will allow us to use `BoundingBoxAnnotatorCrop`
cropped_positions.update(
{"topleft": (0, 0), "bottomright": cropped_image_size}
)
else:
cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
annotator = BobIpMTCNN(min_size=40, factor=0.709, thresholds=(0.1, 0.2, 0.2))
transformer = embedding_transformer(
cropped_image_size=cropped_image_size,
embedding=embedding,
cropped_positions=cropped_positions,
fixed_positions=fixed_positions,
color_channel="rgb",
annotator="mtcnn",
)
algorithm = Distance()
return VanillaBiometricsPipeline(transformer, algorithm)
def resnet50_msceleb_arcface_2021(
annotation_type, fixed_positions=None, memory_demanding=False
):
"""
Get the Resnet50 pipeline which will crop the face :math:`112 \\times 112` and
use the :py:class:`Resnet50_MsCeleb_ArcFace_2021` to extract the features
Parameters
----------
annotation_type: str
Type of the annotations (e.g. `eyes-center')
fixed_positions: dict
Set it if in your face images are registered to a fixed position in the image
memory_demanding: bool
"""
return resnet_template(
embedding=Resnet50_MsCeleb_ArcFace_2021(memory_demanding=memory_demanding),
annotation_type=annotation_type,
fixed_positions=fixed_positions,
)
def resnet50_msceleb_arcface_20210521(
annotation_type, fixed_positions=None, memory_demanding=False
):
"""
Get the Resnet50 pipeline which will crop the face :math:`112 \\times 112` and
use the :py:class:`Resnet50_MsCeleb_ArcFace_20210521` to extract the features
Parameters
----------
annotation_type: str
Type of the annotations (e.g. `eyes-center')
fixed_positions: dict
Set it if in your face images are registered to a fixed position in the image
memory_demanding: bool
"""
return resnet_template(
embedding=Resnet50_MsCeleb_ArcFace_20210521(memory_demanding=memory_demanding),
annotation_type=annotation_type,
fixed_positions=fixed_positions,
)
def resnet101_msceleb_arcface_20210521(
annotation_type, fixed_positions=None, memory_demanding=False
):
"""
Get the Resnet50 pipeline which will crop the face :math:`112 \\times 112` and
use the :py:class:`Resnet50_MsCeleb_ArcFace_20210521` to extract the features
Parameters
----------
annotation_type: str
Type of the annotations (e.g. `eyes-center')
fixed_positions: dict
Set it if in your face images are registered to a fixed position in the image
memory_demanding: bool
"""
return resnet_template(
embedding=Resnet101_MsCeleb_ArcFace_20210521(memory_demanding=memory_demanding),
annotation_type=annotation_type,
fixed_positions=fixed_positions,
)
def iresnet50_msceleb_arcface_20210623(
annotation_type, fixed_positions=None, memory_demanding=False
):
"""
Get the iresnet50 pipeline which will crop the face :math:`112 \\times 112` and
use the :py:class:`IResnet50_MsCeleb_ArcFace_20210623` to extract the features
Parameters
----------
annotation_type: str
Type of the annotations (e.g. `eyes-center')
fixed_positions: dict
Set it if in your face images are registered to a fixed position in the image
memory_demanding: bool
"""
return resnet_template(
embedding=IResnet50_MsCeleb_ArcFace_20210623(memory_demanding=memory_demanding),
annotation_type=annotation_type,
fixed_positions=fixed_positions,
)
def iresnet100_msceleb_arcface_20210623(
annotation_type, fixed_positions=None, memory_demanding=False
):
"""
Get the iresnet100 pipeline which will crop the face :math:`112 \\times 112` and
use the :py:class:`IResnet100_MsCeleb_ArcFace_20210623` to extract the features
Parameters
----------
annotation_type: str
Type of the annotations (e.g. `eyes-center')
fixed_positions: dict
Set it if in your face images are registered to a fixed position in the image
memory_demanding: bool
"""
return resnet_template(
embedding=IResnet100_MsCeleb_ArcFace_20210623(
memory_demanding=memory_demanding
),
annotation_type=annotation_type,
fixed_positions=fixed_positions,
)
def resnet50_vgg2_arcface_2021(
annotation_type, fixed_positions=None, memory_demanding=False
):
"""
Get the Resnet50 pipeline which will crop the face :math:`112 \\times 112` and
use the :py:class:`Resnet50_VGG2_ArcFace_2021` to extract the features
Parameters
----------
annotation_type: str
Type of the annotations (e.g. `eyes-center')
fixed_positions: dict
Set it if in your face images are registered to a fixed position in the image
memory_demanding: bool
"""
return resnet_template(
embedding=Resnet50_VGG2_ArcFace_2021(memory_demanding=memory_demanding),
annotation_type=annotation_type,
fixed_positions=fixed_positions,
)
def mobilenetv2_msceleb_arcface_2021(
annotation_type, fixed_positions=None, memory_demanding=False
):
"""
Get the MobileNet pipeline which will crop the face :math:`112 \\times 112` and
use the :py:class:`MobileNetv2_MsCeleb_ArcFace_2021` to extract the features
Parameters
----------
annotation_type: str
Type of the annotations (e.g. `eyes-center')
fixed_positions: dict
Set it if in your face images are registered to a fixed position in the image
memory_demanding: bool
"""
return resnet_template(
embedding=MobileNetv2_MsCeleb_ArcFace_2021(memory_demanding=memory_demanding),
annotation_type=annotation_type,
fixed_positions=fixed_positions,
)
def facenet_sanderberg_20170512_110547(
annotation_type, fixed_positions=None, memory_demanding=False
):
"""
Get the Facenet pipeline which will crop the face :math:`160 \\times 160` and
use the :py:class:`FaceNetSanderberg_20170512_110547` to extract the features
Parameters
----------
annotation_type: str
Type of the annotations (e.g. `eyes-center')
fixed_positions: dict
Set it if in your face images are registered to a fixed position in the image
memory_demanding: bool
"""
return facenet_template(
embedding=FaceNetSanderberg_20170512_110547(memory_demanding=memory_demanding),
annotation_type=annotation_type,
fixed_positions=fixed_positions,
)
def inception_resnet_v1_casia_centerloss_2018(
annotation_type, fixed_positions=None, memory_demanding=False
):
"""
Get the Inception Resnet v1 pipeline which will crop the face :math:`160 \\times 160` and
use the :py:class:`InceptionResnetv1_Casia_CenterLoss_2018` to extract the features
Parameters
----------
annotation_type: str
Type of the annotations (e.g. `eyes-center')
fixed_positions: dict
Set it if in your face images are registered to a fixed position in the image
memory_demanding: bool
"""
return facenet_template(
embedding=InceptionResnetv1_Casia_CenterLoss_2018(
memory_demanding=memory_demanding
),
annotation_type=annotation_type,
fixed_positions=fixed_positions,
)
def inception_resnet_v2_casia_centerloss_2018(
annotation_type, fixed_positions=None, memory_demanding=False
):
"""
Get the Inception Resnet v2 pipeline which will crop the face :math:`160 \\times 160` and
use the :py:class:`InceptionResnetv2_Casia_CenterLoss_2018` to extract the features
Parameters
----------
annotation_type: str
Type of the annotations (e.g. `eyes-center')
fixed_positions: dict
Set it if in your face images are registered to a fixed position in the image
memory_demanding: bool
"""
return facenet_template(
embedding=InceptionResnetv2_Casia_CenterLoss_2018(
memory_demanding=memory_demanding
),
annotation_type=annotation_type,
fixed_positions=fixed_positions,
)
def inception_resnet_v1_msceleb_centerloss_2018(
annotation_type, fixed_positions=None, memory_demanding=False
):
"""
Get the Inception Resnet v1 pipeline which will crop the face :math:`160 \\times 160` and
use the :py:class:`InceptionResnetv1_MsCeleb_CenterLoss_2018` to extract the features
Parameters
----------
annotation_type: str
Type of the annotations (e.g. `eyes-center')
fixed_positions: dict
Set it if in your face images are registered to a fixed position in the image
memory_demanding: bool
"""
return facenet_template(
embedding=InceptionResnetv1_MsCeleb_CenterLoss_2018(
memory_demanding=memory_demanding
),
annotation_type=annotation_type,
fixed_positions=fixed_positions,
)
def inception_resnet_v2_msceleb_centerloss_2018(
annotation_type, fixed_positions=None, memory_demanding=False
):
"""
Get the Inception Resnet v2 pipeline which will crop the face :math:`160 \\times 160` and
use the :py:class:`InceptionResnetv2_MsCeleb_CenterLoss_2018` to extract the features
Parameters
----------
annotation_type: str
Type of the annotations (e.g. `eyes-center')
fixed_positions: dict
Set it if in your face images are registered to a fixed position in the image
memory_demanding: bool
"""
return facenet_template(
embedding=InceptionResnetv2_MsCeleb_CenterLoss_2018(
memory_demanding=memory_demanding
),
annotation_type=annotation_type,
fixed_positions=fixed_positions,
)
|
from .average_meter import AverageMeter
from .early_stopping import EarlyStopping
|
# ----------------------------------------------------------------------------
# Copyright 2014 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Simple multi-layer perceptron model.
"""
import logging
from neon.backends.backend import Block
from neon.models.model import Model
from neon.util.param import opt_param, req_param
logger = logging.getLogger(__name__)
class MLP(Model):
"""
Fully connected, feed-forward, multi-layer perceptron model
"""
def __init__(self, **kwargs):
self.initialized = False
self.__dict__.update(kwargs)
req_param(self, ['layers', 'batch_size'])
opt_param(self, ['step_print'], -1)
opt_param(self, ['accumulate'], False)
opt_param(self, ['reuse_deltas'], True)
opt_param(self, ['timing_plots'], False)
opt_param(self, ['serialize_schedule'])
def link(self, initlayer=None):
for ll, pl in zip(self.layers, [initlayer] + self.layers[:-1]):
ll.set_previous_layer(pl)
self.print_layers()
def initialize(self, backend, initlayer=None):
self.data_layer = self.layers[0]
self.cost_layer = self.layers[-1]
self.class_layer = self.layers[-2]
if not hasattr(self.cost_layer, 'ref_layer'):
self.cost_layer.ref_layer = self.data_layer
if self.initialized:
return
self.backend = backend
kwargs = {"backend": self.backend, "batch_size": self.batch_size,
"accumulate": self.accumulate}
for ll, pl in zip(self.layers, [initlayer] + self.layers[:-1]):
ll.initialize(kwargs)
self.nin_max = max(map(lambda x: x.nin, self.layers[1:-1]))
self.global_deltas = None
if self.reuse_deltas:
self.global_deltas = backend.zeros(
(2 * self.nin_max, self.batch_size),
dtype=self.layers[1].deltas_dtype)
self.global_deltas.name = "delta_pool"
for idx, ll in enumerate(self.layers[1:-1]):
ll.set_deltas_buf(self.global_deltas,
offset=((idx % 2) * self.nin_max))
self.initialized = True
# Make some scratch space for NervanaGPU backend:
if self.backend.__module__ == 'neon.backends.gpu':
self.backend.init_mempool((1, self.batch_size),
dtype=self.layers[1].deltas_dtype)
def fprop(self):
for ll, pl in zip(self.layers, [None] + self.layers[:-1]):
y = None if pl is None else pl.output
ll.fprop(y)
def bprop(self):
for ll, nl in zip(reversed(self.layers),
reversed(self.layers[1:] + [None])):
error = None if nl is None else nl.deltas
ll.bprop(error)
def print_layers(self, debug=False):
printfunc = logger.debug if debug else logger.info
netdesc = 'Layers:\n'
for layer in self.layers:
netdesc += '\t' + str(layer) + '\n'
printfunc("%s", netdesc)
def update(self, epoch):
for layer in self.layers:
layer.update(epoch)
def get_classifier_output(self):
return self.class_layer.output
def print_training_error(self, error, num_batches, partial=False):
rederr = self.backend.reduce_tensor(error)
if self.backend.rank() != 0:
return
if partial is True:
assert self.step_print != 0
logger.info('%d:%d training error: %0.5f', self.epochs_complete,
num_batches / self.step_print,
rederr)
else:
errorval = rederr / num_batches
logger.info('epoch: %d, training error: %0.5f',
self.epochs_complete,
errorval)
def print_test_error(self, setname, misclass, nrecs):
redmisclass = self.backend.reduce_tensor(misclass)
if self.backend.rank() != 0:
return
misclassval = redmisclass / nrecs
logging.info("%s set misclass rate: %0.5f%%",
setname, 100. * misclassval)
def fit(self, dataset):
"""
Learn model weights on the given datasets.
"""
error = self.backend.zeros((1, 1), dtype=self.cost_layer.weight_dtype)
self.data_layer.init_dataset(dataset)
self.data_layer.use_set('train')
logger.info('commencing model fitting')
while self.epochs_complete < self.num_epochs:
self.backend.begin(Block.epoch, self.epochs_complete)
error.fill(0.0)
mb_id = 1
self.data_layer.reset_counter()
while self.data_layer.has_more_data():
self.backend.begin(Block.minibatch, mb_id)
self.backend.begin(Block.fprop, mb_id)
self.fprop()
self.backend.end(Block.fprop, mb_id)
self.backend.begin(Block.bprop, mb_id)
self.bprop()
self.backend.end(Block.bprop, mb_id)
self.backend.begin(Block.update, mb_id)
self.update(self.epochs_complete)
self.backend.end(Block.update, mb_id)
if self.step_print > 0 and mb_id % self.step_print == 0:
self.print_training_error(self.cost_layer.get_cost(),
mb_id, partial=True)
self.backend.add(error, self.cost_layer.get_cost(), error)
self.backend.end(Block.minibatch, mb_id)
mb_id += 1
self.epochs_complete += 1
self.print_training_error(error, self.data_layer.num_batches)
self.print_layers(debug=True)
self.backend.end(Block.epoch, self.epochs_complete - 1)
self.save_snapshot()
self.data_layer.cleanup()
def set_train_mode(self, mode):
for ll in self.layers:
ll.set_train_mode(mode)
def predict_generator(self, dataset, setname):
"""
Generate predicitons and true labels for the given dataset, one
mini-batch at a time.
Agruments:
dataset: A neon dataset instance
setname: Which set to compute predictions for (test, train, val)
Returns:
tuple: on each call will yield a 2-tuple of outputs and references.
The first item is the model probabilities for each class,
and the second item is either the one-hot or raw labels with
ground truth.
See Also:
predict_fullset
"""
self.data_layer.init_dataset(dataset)
assert self.data_layer.has_set(setname)
self.data_layer.use_set(setname, predict=True)
self.data_layer.reset_counter()
nrecs = self.batch_size * 1
outputs = self.backend.empty((self.class_layer.nout, nrecs))
if self.data_layer.has_labels:
reference = self.backend.empty((1, nrecs))
else:
reference = self.backend.empty(outputs.shape)
while self.data_layer.has_more_data():
self.fprop()
outputs = self.get_classifier_output()
reference = self.cost_layer.get_reference()
yield (outputs, reference)
self.data_layer.cleanup()
def predict_fullset(self, dataset, setname):
"""
Generate predicitons and true labels for the given dataset.
Note that this requires enough memory to house the predictions and
labels for the entire dataset at one time (not recommended for large
datasets, see predict_generator instead).
Agruments:
dataset: A neon dataset instance
setname: Which set to compute predictions for (test, train, val)
Returns:
tuple: on each call will yield a 2-tuple of outputs and references.
The first item is the model probabilities for each class,
and the second item is either the one-hot or raw labels with
ground truth.
See Also:
predict_generator
"""
self.data_layer.init_dataset(dataset)
assert self.data_layer.has_set(setname)
self.data_layer.use_set(setname, predict=True)
nrecs = self.batch_size * self.data_layer.num_batches
outputs = self.backend.empty((self.class_layer.nout, nrecs))
if self.data_layer.has_labels:
reference = self.backend.empty((1, nrecs))
else:
reference = self.backend.empty(outputs.shape)
batch = 0
for batch_preds, batch_refs in self.predict_generator(dataset,
setname):
start = batch * self.batch_size
end = start + self.batch_size
outputs[:, start:end] = batch_preds
reference[:, start:end] = batch_refs
batch += 1
return outputs, reference
def predict_live_init(self, dataset):
self.data_layer.init_dataset(dataset)
for ll in self.layers:
ll.set_train_mode(False)
def predict_live(self):
self.fprop()
return self.get_classifier_output()
|
import numpy as np
from src.data import Problem, Case, Matter
class TrimBackground:
"""pick first matter and trim background"""
def __init__(self):
pass
@classmethod
def array(cls, x_arr, background=0):
"""
:param x_arr: np.array(int), array to trim
:param background: int, must be one of 0-9
:return: List[np.array(np.int)]
"""
x_sum = (x_arr != background).sum(axis=1)
y_sum = (x_arr != background).sum(axis=0)
if x_sum.sum() == 0:
return x_arr.copy(), (0, 0)
min_x = min([i for i in range(x_arr.shape[0]) if x_sum[i]])
max_x = max([i for i in range(x_arr.shape[0]) if x_sum[i]])
min_y = min([i for i in range(x_arr.shape[1]) if y_sum[i]])
max_y = max([i for i in range(x_arr.shape[1]) if y_sum[i]])
new_values = x_arr[min_x:max_x + 1, min_y:max_y + 1].copy()
return new_values, (0, 0)
@classmethod
def matter(cls, m: Matter) -> Matter:
new_values, xy0 = cls.array(m.values, m.background_color)
return Matter(new_values, 0, 0, m.background_color, new=True)
@classmethod
def case(cls, c: Case) -> Case:
new_case = c.copy()
new_case.matter_list = [cls.matter(c.matter_list[0])]
new_case.shape = new_case.matter_list[0].shape
return new_case
@classmethod
def problem(cls, p: Problem) -> Problem:
q: Problem = p.copy()
q.train_x_list = [cls.case(c) for c in p.train_x_list]
q.test_x_list = [cls.case(c) for c in p.test_x_list]
return q
if __name__ == "__main__":
x = np.array([[3, 2, 0], [0, 1, 0], [0, 0, 0]])
print(TrimBackground.array(x))
|
from .sector import Sector
from github import Github
import re
import numpy as np
class Repository(Sector):
def __init__(self, currency, github_api, github_url, **kwargs):
super(Repository, self).__init__(currency, **kwargs)
self.type = 'repository'
self.github_url = self.sanitize_url(github_url)
self.github_api = github_api
self.generate_repository_data()
def sanitize_url(self, github_url_raw):
github_url = re.sub('https://github.com/', '', github_url_raw)
github_url_list = github_url.split('/')
github_string = "/".join(github_url_list[:2])
return github_string
def generate_repository_data(self):
github_object = Github(self.github_api)
repo = github_object.get_repo(self.github_url)
contributors = repo.get_contributors()
stats_contributors = repo.get_stats_contributors()
contributor_list = [contributor.total for contributor in stats_contributors]
self.data = np.array(contributor_list)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from django.core.management import call_command
fixture = 'licences'
def load_fixture(apps, schema_editor):
call_command('loaddata', fixture, app_label='editor')
def unload_fixture(apps, schema_editor):
Licence = apps.get_model("editor", "Licence")
Licence.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
('editor', '0002_add_licence'),
]
operations = [
migrations.RunPython(load_fixture, reverse_code=unload_fixture),
]
|
from datetime import datetime, timedelta
import rest_framework
from rest_framework import serializers
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.exceptions import PermissionDenied
from rest_framework import status
from django.contrib.auth import get_user_model
from django.conf import settings
import jwt
from .serializers import UserRegisterSerializer
User = get_user_model()
class RegisterView(APIView):
def post(self, request):
user_to_create = UserRegisterSerializer(data=request.data)
if user_to_create.is_valid():
user_to_create.save()
return Response(
{'message': 'Registration Successful'},
status=status.HTTP_201_CREATED
)
return Response(user_to_create.errors, status=status.HTTP_422_UNPROCESSABLE_ENTITY)
class LoginView(APIView):
def post(self, request):
email = request.data.get('email')
password = request.data.get('password')
try:
user_to_login = User.objects.get(email=email)
except User.DoesNotExist:
raise PermissionDenied(detail='Unauthorized')
if not user_to_login.check_password(password):
raise PermissionDenied(detail='Unauthorized')
expiry_time = datetime.now() + timedelta(days=7)
token = jwt.encode(
{ 'sub': user_to_login.id, 'exp': int(expiry_time.strftime('%s'))},
settings.SECRET_KEY,
algorithm='HS256'
)
return Response({
'token': token,
'message': f'Welcome back {user_to_login.username}'
}, status=status.HTTP_200_OK) |
# -*- coding: utf-8 -*-
import click
from aiida.utils.cli import command
from aiida.utils.cli import options
from aiida_quantumespresso.utils.cli import options as options_qe
from aiida_quantumespresso.utils.cli import validate
@command()
@options.code(callback_kwargs={'entry_point': 'quantumespresso.pw'})
@options.structure()
@options.pseudo_family()
@options.kpoint_mesh()
@options.max_num_machines()
@options.max_wallclock_seconds()
@options.daemon()
@options_qe.ecutwfc()
@options_qe.ecutrho()
@options_qe.hubbard_u()
@options_qe.hubbard_v()
@options_qe.hubbard_file()
@options_qe.starting_magnetization()
@options_qe.smearing()
@options_qe.automatic_parallelization()
@options_qe.clean_workdir()
@click.option(
'-f', '--final-scf', is_flag=True, default=False, show_default=True,
help='run a final scf calculation for the final relaxed structure'
)
@click.option(
'-g', '--group', type=click.STRING, required=False,
help='the label of a Group to add the final PwCalculation to in case of success'
)
def launch(
code, structure, pseudo_family, kpoints, max_num_machines, max_wallclock_seconds, daemon, ecutwfc, ecutrho,
hubbard_u, hubbard_v, hubbard_file_pk, starting_magnetization, smearing, automatic_parallelization, clean_workdir,
final_scf, group):
"""
Run the PwRelaxWorkChain for a given input structure
"""
from aiida.orm.data.base import Bool, Str
from aiida.orm.data.parameter import ParameterData
from aiida.orm.utils import WorkflowFactory
from aiida.work.launch import run, submit
from aiida_quantumespresso.utils.resources import get_default_options, get_automatic_parallelization_options
PwRelaxWorkChain = WorkflowFactory('quantumespresso.pw.relax')
parameters = {
'SYSTEM': {
'ecutwfc': ecutwfc,
'ecutrho': ecutrho,
},
}
try:
hubbard_file = validate.validate_hubbard_parameters(structure, parameters, hubbard_u, hubbard_v, hubbard_file_pk)
except ValueError as exception:
raise click.BadParameter(exception.message)
try:
validate.validate_starting_magnetization(structure, parameters, starting_magnetization)
except ValueError as exception:
raise click.BadParameter(exception.message)
try:
validate.validate_smearing(parameters, smearing)
except ValueError as exception:
raise click.BadParameter(exception.message)
inputs = {
'structure': structure,
'base': {
'code': code,
'pseudo_family': Str(pseudo_family),
'kpoints': kpoints,
'parameters': ParameterData(dict=parameters),
}
}
if automatic_parallelization:
automatic_parallelization = get_automatic_parallelization_options(max_num_machines, max_wallclock_seconds)
inputs['base']['automatic_parallelization'] = ParameterData(dict=automatic_parallelization)
else:
options = get_default_options(max_num_machines, max_wallclock_seconds)
inputs['base']['options'] = ParameterData(dict=options)
if clean_workdir:
inputs['clean_workdir'] = Bool(True)
if final_scf:
inputs['final_scf'] = Bool(True)
if group:
inputs['group'] = Str(group)
if daemon:
workchain = submit(PwRelaxWorkChain, **inputs)
click.echo('Submitted {}<{}> to the daemon'.format(PwRelaxWorkChain.__name__, workchain.pk))
else:
run(PwRelaxWorkChain, **inputs)
|
import os
import time
import tkinter
import sqlite3
import json
import smtplib
import locale
import requests
import threading
import html2text as h2t
from tkinter import *
from tkinter.ttk import *
from tkinter import filedialog
from tkinter import messagebox
from email import encoders
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
version = "v1.4" # DO NOT CHANGE
print('Starting Lite Mails {} \n'.format(version))
file = None
toopen = None
# Inizializzazione finestra
window = Tk()
window.title('Lite Mails {}'.format(version)) # Titolo finestra
window.geometry('460x425') # Dimensione finestra
window.resizable(False, False) # Blocco ridimensionamento
window.style = Style()
window.style.theme_use('vista')
langsel = IntVar()
datesel = IntVar()
timesel = IntVar()
destination = StringVar()
subject = StringVar()
email = StringVar()
password = StringVar()
# Inizializzazione database
db = sqlite3.connect("config.db")
c = db.cursor()
# Table account
try:
c.execute("SELECT email FROM account")
print('Row "email" and table "account" loaded!')
except:
try:
c.execute("UPDATE account SET email = ? WHERE id = ? ", (None, 0))
db.commit()
print('Row "email" and table "account" loaded!')
except:
try:
c.execute("INSERT INTO account(email) VALUES(?)", (None))
db.commit()
print('Row "email" created and table "account" loaded!')
except:
c.execute("CREATE TABLE account(email TEXT, password TEXT, id INTEGER)")
db.commit()
c.execute("INSERT INTO account(email, password, id) VALUES(?,?,?)", (None, None, 0))
db.commit()
print('\nTable "account" created!')
print('Row "email" and table "account" loaded!')
try:
c.execute("SELECT password FROM account")
print('Row "password" and table "account" loaded!')
except:
try:
c.execute("UPDATE account SET password = ? WHERE id = ? ", (None, 0))
db.commit()
print('Row "password" and table "account" loaded!')
except:
try:
c.execute("INSERT INTO account(password) VALUES(?)", (None))
db.commit()
print('Row "password" created and table "account" loaded!')
except:
c.execute("CREATE TABLE account(email TEXT, password TEXT, id INTEGER)")
db.commit()
c.execute("INSERT INTO account(email, password, id) VALUES(?,?,?)", (None, None, 0))
db.commit()
print('\nTable "account" created!')
print('Row "password" and table "account" loaded!')
try:
c.execute("SELECT id FROM account")
print('Row "id" and table "account" loaded!')
except:
try:
c.execute("UPDATE account SET id = ? WHERE id = ? ", (None, 0))
db.commit()
print('Row "id" and table "account" loaded!')
except:
try:
c.execute("INSERT INTO account(id) VALUES(?)", (None))
db.commit()
print('Row "id" created and table "account" loaded!')
except:
c.execute("CREATE TABLE account(email TEXT, password TEXT, id INTEGER)")
db.commit()
c.execute("INSERT INTO account(email, password, id) VALUES(?,?,?)", (None, None, 0))
db.commit()
print('\nTable "account" created!')
print('Row "id" and table "account" loaded!')
# Table settings
try:
c.execute("SELECT language FROM settings")
print('Row "language" and table "settings" loaded!')
except:
try:
c.execute("UPDATE account SET language = ? WHERE id = ? ", (str(locale.getdefaultlocale()), 0))
db.commit()
print('Row "language" and table "settings" loaded!')
except:
try:
c.execute("INSERT INTO settings(language) VALUES(?)", (str(locale.getdefaultlocale())))
db.commit()
print('Row "language" and table "settings" loaded!')
except:
c.execute("CREATE TABLE settings(language TEXT, date_format INTEGER, time_format INTEGER, id INTEGER)")
db.commit()
c.execute("INSERT INTO settings(language, date_format, time_format, id) VALUES(?,?,?,?)", (str(locale.getdefaultlocale()), 1, 1, 0))
db.commit()
print('\nTable "settings" created!')
print('Row "language" and table "settings" loaded!')
try:
c.execute("SELECT date_format FROM settings")
print('Row "date_format" and table "settings" loaded!')
except:
try:
c.execute("UPDATE account SET date_format = ? WHERE id = ? ", (1, 0))
db.commit()
print('Row "date_format" and table "settings" loaded!')
except:
try:
c.execute("INSERT INTO settings(date_format) VALUES(?)", (1))
db.commit()
print('Row "date_format" and table "settings" loaded!')
except:
c.execute("CREATE TABLE settings(language TEXT, date_format INTEGER, time_format INTEGER, id INTEGER)")
db.commit()
c.execute("INSERT INTO settings(language, date_format, time_format, id) VALUES(?,?,?,?)", (str(locale.getdefaultlocale()), 1, 1, 0))
db.commit()
print('\nTable "settings" created!')
print('Row "date_format" and table "settings" loaded!')
try:
c.execute("SELECT time_format FROM settings")
print('Row "time_format" and table "settings" loaded!')
except:
try:
c.execute("UPDATE account SET time_format = ? WHERE id = ? ", (1, 0))
db.commit()
print('Row "time_format" and table "settings" loaded!')
except:
try:
c.execute("INSERT INTO settings(time_format) VALUES(?)", (1))
db.commit()
print('Row "time_format" and table "settings" loaded!')
except:
c.execute("CREATE TABLE settings(language TEXT, date_format INTEGER, time_format INTEGER, id INTEGER)")
db.commit()
c.execute("INSERT INTO settings(language, date_format, time_format, id) VALUES(?,?,?,?)", (str(locale.getdefaultlocale()), 1, 1, 0))
db.commit()
print('\nTable "settings" created!')
print('Row "time_format" and table "settings" loaded!')
try:
c.execute("SELECT id FROM settings")
print('Row "id" and table "settings" loaded!')
except:
try:
c.execute("UPDATE account SET id = ? WHERE id = ? ", (0, 0))
db.commit()
print('Row "id" and table "settings" loaded!')
except:
try:
c.execute("INSERT INTO settings(id) VALUES(?)", (0))
db.commit()
print('Row "id" and table "settings" loaded!')
except:
c.execute("CREATE TABLE settings(language TEXT, date_format INTEGER, time_format INTEGER, id INTEGER)")
db.commit()
c.execute("INSERT INTO settings(language, date_format, time_format, id) VALUES(?,?,?,?)", (str(locale.getdefaultlocale()), 1, 1, 0))
db.commit()
print('\nTable "settings" created!')
print('Row "id" and table "settings" loaded!')
c.execute("SELECT email, password FROM account")
credentials = list(c.fetchall())
c.execute("SELECT language FROM settings")
language = list(c.fetchall())
c.execute("SELECT date_format, time_format FROM settings")
datetime_format = list(c.fetchall())
if not os.path.isfile('version.txt'):
print('\nCreated version file.')
with open('version.txt', 'w') as f:
f.write(version)
f.close()
if not os.path.isdir("emails"):
os.makedirs("emails")
if 'en' in language[0][0]:
with open("languages/en-EN.json", "r") as read_file:
string = json.load(read_file)
langsel.set(1)
elif 'it' in language[0][0]:
with open("languages/it-IT.json", "r") as read_file:
string = json.load(read_file)
langsel.set(2)
else:
with open("languages/en-EN.json", "r") as read_file:
string = json.load(read_file)
langsel.set(1)
datesel.set(datetime_format[0][0])
timesel.set(datetime_format[0][1])
class message_handler: # Gestione messaggi
def auth_error_type2():
messagebox.showerror(string['error'], string['auth-error-type2'])
def auth_error_type1():
messagebox.showerror(string['error'], string['auth-error-type1'])
def mail_sent():
messagebox.showinfo(string['info'], string['mail-sent'])
def compile_error():
messagebox.showerror(string['error'], string['send-error'])
def apply_language():
messagebox.showinfo(string['info'], string['apply-language'])
def no_conn():
messagebox.showerror(string['error'], string['no-connection'])
def save_email(): # Salvataggio email
if not os.path.isdir("emails"):
os.makedirs("emails")
tosave = filedialog.asksaveasfile(defaultextension="*.litemail", initialdir="emails", title=string['save-email'], filetypes=[('E-Mail', "*.litemail")])
if tosave is None:
return
template = ("""{0}
{1}
{2}
-""").format(destination.get(), subject.get(), msg_input.get('1.0', 'end-1c'))
tosave.write(str(template))
tosave.close()
print('Email saved!')
to_save = str(tosave.name)
f_ = os.path.basename(to_save)
fn = list(f_.split('.'))
window.title('Lite Mails {0} - {1}'.format(version, fn[0]))
def open_email(): # Apertura emails
global toopen
toopen = filedialog.askopenfilename(initialdir="emails", title=string['open-email'], filetypes=[("E-Mail", "*.litemail")])
if toopen == '':
return
with open(toopen, 'r') as openedfile:
def clear():
dest_input.delete(0, 'end')
sub_input.delete(0, 'end')
msg_input.delete('1.0', 'end')
dest_input.insert(0, openedfile.readline().strip())
sub_input.insert(0, openedfile.readline(62).strip())
lines = openedfile.readlines()
msg_input.insert('1.0', (''.join(lines[0:-1])).strip())
fn = list(toopen.split('.'))
window.title('Lite Mails {0} - {1}'.format(version, os.path.basename(fn[0])))
if msg_input.get('1.0', 'end-1c') or destination.get() or subject.get():
quitquestion = messagebox.askyesnocancel(string['open-email'], string['quit-message'])
if quitquestion is True:
save_email()
clear()
elif quitquestion is False:
clear()
elif quitquestion is None:
pass
elif msg_input.get('1.0', 'end-1c') and destination.get() and subject.get() in open(toopen, 'r').read():
clear()
else:
clear()
def close_program(): # Funzione per chiudere il programma
if toopen:
if msg_input.get('1.0', 'end-1c') and destination.get() and subject.get() in open(toopen, 'r').read():
window.destroy()
os._exit(0)
else:
quitquestion = messagebox.askyesnocancel(string['quit'], string['quit-message'])
if quitquestion is True:
save_email()
elif quitquestion is False:
window.destroy()
os._exit(0)
elif quitquestion is None:
pass
elif msg_input.get('1.0', 'end-1c') or destination.get() or subject.get():
quitquestion = messagebox.askyesnocancel(string['quit'], string['quit-message'])
if quitquestion is True:
save_email()
elif quitquestion is False:
window.destroy()
os._exit(0)
elif quitquestion is None:
pass
else:
window.destroy()
os._exit(0)
def account(): # Impostazioni account
c.execute("SELECT email, password FROM account")
credentials = list(c.fetchall())
accountwin = Toplevel(window) # Creazione nuova finestra
accountwin.title(string['account-settings']) # Titolo finestra
accountwin.geometry('450x155') # Dimensione finestra
accountwin.resizable(False, False) # Blocco ridimensionamento
accountwin.iconbitmap('litemails.ico')
# Elementi finestra
user_label = Label(accountwin, text=string['email'], font=('Segoe UI', 13)).grid(row=0, pady=15, padx=5, sticky='w')
user_input = Entry(accountwin, textvariable=email, font=('Segoe UI', 10), width=45)
user_input.grid(row=0, column=1, pady=15, padx=5, sticky='w')
psw_label = Label(accountwin, text=string['password'], font=('Segoe UI', 13)).grid(row=1, pady=15, padx=5, sticky='w')
psw_input = Entry(accountwin, textvariable=password, font=('Segoe UI', 10), width=45, show='*')
psw_input.grid(row=1, column=1, pady=15, padx=5, sticky='w')
try:
user_input.delete(0, 'end')
psw_input.delete(0, 'end')
user_input.insert(0, credentials[0][0])
psw_input.insert(0, credentials[0][1])
except tkinter.TclError:
pass
def close_and_save():
print('Saving account data...')
c.execute("UPDATE account SET email = ? WHERE id = ? ", (email.get(), 0))
db.commit()
c.execute("UPDATE account SET password = ? WHERE id = ? ", (password.get(), 0))
db.commit()
accountwin.destroy()
ok_button = Button(accountwin, text=string['done'], width=10, command=lambda: close_and_save())
ok_button.grid(row=2, column=1, padx=25, sticky='se')
def language(lang): # Gestione lingua
global settings
c.execute("SELECT language FROM settings")
language = list(c.fetchall())
c.execute("UPDATE settings SET language = ? WHERE id = ? ", (lang, 0))
db.commit()
user_choice = messagebox.askokcancel(string['info'], string['apply-language'])
if user_choice:
window.destroy()
os._exit(0)
def check_for_updates(fromwhat=None): # Gestione aggiornamenti
try:
global r
r = requests.get('http://alex3025.github.io/litemails.html')
version_to_install = h2t.html2text(r.text).strip()
except:
version_to_install = None
pass
class RunUpdaterScript(threading.Thread):
def __init__(self):
Thread.__init__(self)
self.start()
window.destroy()
self._stop_event = threading.Event()
def stop(self):
self._stop_event.set()
def run(self):
os.chdir('..')
os.system('python Updater.py')
def start_updating():
db.commit()
db.close()
thread = RunUpdaterScript()
thread.stop()
os._exit(0)
if version_to_install:
if version < version_to_install:
uf = messagebox.askyesno(string['info'], string['update-found'])
if uf:
if toopen:
if msg_input.get('1.0', 'end-1c') and destination.get() and subject.get() in open(toopen, 'r').read():
start_updating()
else:
quitquestion = messagebox.askyesnocancel(string['quit'], string['quit-message'])
if quitquestion is True:
save_email()
elif quitquestion is False:
start_updating()
elif quitquestion is None:
pass
elif msg_input.get('1.0', 'end-1c') or destination.get() or subject.get():
quitquestion = messagebox.askyesnocancel(string['quit'], string['quit-message'])
if quitquestion is True:
save_email()
elif quitquestion is False:
start_updating()
elif quitquestion is None:
pass
else:
start_updating()
elif fromwhat == 'menu':
messagebox.showinfo(string['info'], string['no-update'])
elif fromwhat == 'menu':
message_handler.no_conn()
else:
print('No updates found!')
def add_attachment(): # Funzione per l'aggiunta dell'allegato
global file
file = filedialog.askopenfilename(title=string['add-attachment'])
if file:
send_button.configure(text=string['send-with-attachment'])
remove_attch_button.configure(state='active')
else:
send_button.configure(text=string['send'])
remove_attch_button.configure(state='disabled')
def remove_attch(): # Rimozione allegato
global file
if file:
send_button.configure(text=string['send'])
remove_attch_button.configure(state='disabled')
file = None
def add_date_time(date_or_time, format_=None): # Aggiunge la data corrente alla mail
global datetime_format
c.execute("SELECT date_format, time_format FROM settings")
datetime_format = list(c.fetchall())
if format_:
if format_ == string['date-format-type1']:
c.execute("UPDATE settings SET date_format = ? WHERE id = ? ", (1, 0))
db.commit()
elif format_ == string['date-format-type2']:
c.execute("UPDATE settings SET date_format = ? WHERE id = ? ", (2, 0))
db.commit()
elif format_ == string['time-format-type1']:
c.execute("UPDATE settings SET time_format = ? WHERE id = ? ", (1, 0))
db.commit()
elif format_ == string['time-format-type2']:
c.execute("UPDATE settings SET time_format = ? WHERE id = ? ", (2, 0))
db.commit()
else:
c.execute("SELECT date_format, time_format FROM settings")
datetime_format = list(c.fetchall())
if date_or_time:
if date_or_time == 'date':
if datetime_format[0][0] == 1:
msg_input.insert('insert', time.strftime("%d/%m/%Y"))
elif datetime_format[0][0] == 2:
msg_input.insert('insert', time.strftime("%d-%m-%Y"))
if date_or_time == 'time':
if datetime_format[0][1] == 1:
msg_input.insert('insert', time.strftime("%H:%M:%S"))
elif datetime_format[0][1] == 2:
msg_input.insert('insert', time.strftime("%H:%M"))
c.execute("SELECT date_format, time_format FROM settings")
datetime_format = list(c.fetchall())
def new_mail():
def clear_for_new_mail():
toopen = None
tosave = None
file = None
dest_input.delete(0, 'end')
sub_input.delete(0, 'end')
msg_input.delete('1.0', 'end')
window.title('Lite Mails {}'.format(version))
if toopen:
if msg_input.get('1.0', 'end-1c') and destination.get() and subject.get() in open(toopen, 'r').read():
clear_for_new_mail()
else:
quitquestion = messagebox.askyesnocancel(string['quit'], string['quit-message'])
if quitquestion is True:
save_email()
elif quitquestion is False:
clear_for_new_mail()
elif quitquestion is None:
pass
elif msg_input.get('1.0', 'end-1c') or destination.get() or subject.get():
quitquestion = messagebox.askyesnocancel(string['quit'], string['quit-message'])
if quitquestion is True:
save_email()
elif quitquestion is False:
clear_for_new_mail()
elif quitquestion is None:
pass
else:
clear_for_new_mail()
def send_email(): # Funzione per inviare la mail
c.execute("SELECT email, password FROM account")
credentials = list(c.fetchall())
if r:
try:
msg = MIMEMultipart()
msg['From'] = str(credentials[0][0])
msg['To'] = str(destination.get())
msg['Subject'] = str(subject.get())
msg.attach(MIMEText(msg_input.get('1.0', 'end-1c'), 'plain'))
if file:
attachment = open(file, "rb")
part = MIMEBase('application', 'octet-stream')
part.set_payload((attachment).read())
encoders.encode_base64(part)
part.add_header('Content-Disposition', "attachment; filename= %s" % os.path.basename(file))
msg.attach(part)
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login(str(credentials[0][0]), str(credentials[0][1]))
text = msg.as_string()
server.sendmail(str(credentials[0][0]), str(destination.get()), text)
server.quit()
print('Mail sent.')
message_handler.mail_sent()
except smtplib.SMTPAuthenticationError:
if email.get() or password.get() == None:
message_handler.auth_error_type2()
else:
message_handler.auth_error_type1()
except smtplib.SMTPRecipientsRefused:
message_handler.compile_error()
else:
message_handler.no_conn()
# Oggetti
main_frame = Frame(window)
main_frame.grid(row=0, column=0, pady=15, sticky='wn')
dest_label = Label(main_frame, text=string['to'], font=('Segoe UI', 13)).grid(row=0, padx=5, sticky='w')
dest_input = Entry(main_frame, textvariable=destination, font=('Segoe UI', 10), width=45)
dest_input.grid(row=0, column=1, padx=5, sticky='w')
sub_label = Label(main_frame, text=string['subject'], font=('Segoe UI', 13)).grid(row=1, pady=5, padx=5, sticky='w')
sub_input = Entry(main_frame, textvariable=subject, font=('Segoe UI', 10), width=45)
sub_input.grid(row=1, column=1, pady=5, padx=5, sticky='w')
msg_label = Label(main_frame, text=string['message'], font=('Segoe UI', 13)).grid(row=2, pady=15, padx=5, sticky='wn')
msg_input = Text(main_frame, font=('Segoe UI', 10), width=45, height=15)
msg_input.grid(row=2, column=1, pady=20, padx=5, sticky='w')
scroll = Scrollbar(main_frame, command=msg_input.yview, orient='vertical')
scroll.config(command=msg_input.yview)
msg_input.configure(yscrollcommand=scroll.set)
scroll.grid(row=2, column=2, ipady=105, sticky='e')
send_button = Button(main_frame, text=string['send'], width=20, command=lambda: send_email())
send_button.grid(row=3, column=1, padx=25, sticky='se')
remove_attch_button = Button(main_frame, text=string['remove-attachment'], state='disabled', width=20, command=lambda: remove_attch())
remove_attch_button.grid(row=3, column=1, padx=25, sticky='sw')
# Barre menu
menu_bar = Menu(window)
# Menu mail
menu_mail = Menu(menu_bar, tearoff=0)
menu_bar.add_cascade(label=string['mail'], menu=menu_mail)
menu_mail.add_command(label=string['new-mail'], command=lambda: new_mail())
menu_mail.add_command(label=string['save-email'], command=lambda: save_email())
menu_mail.add_command(label=string['open-email'], command=lambda: open_email())
# Menu formato datetime
menu_datetime_format = Menu(menu_bar, tearoff=0)
menu_datetime_format.add_radiobutton(label=string['date'].title() + ': ' + string['date-format-type1'], command=lambda: add_date_time(None, string['date-format-type1']), variable=datesel, value=1)
menu_datetime_format.add_radiobutton(label=string['date'].title() + ': ' + string['date-format-type2'], command=lambda: add_date_time(None, string['date-format-type2']), variable=datesel, value=2)
menu_datetime_format.add_radiobutton(label=string['time'].title() + ': ' + string['time-format-type1'], command=lambda: add_date_time(None, string['time-format-type1']), variable=timesel, value=1)
menu_datetime_format.add_radiobutton(label=string['time'].title() + ': ' + string['time-format-type2'], command=lambda: add_date_time(None, string['time-format-type2']), variable=timesel, value=2)
# Sottomenu datetime
menu_datetime_settings = Menu(menu_bar, tearoff=0)
menu_datetime_settings.add_command(label=string['insert'] + ' ' + string['date'], command=lambda: add_date_time('date'))
menu_datetime_settings.add_command(label=string['insert'] + ' ' + string['time'], command=lambda: add_date_time('time'))
menu_datetime_settings.add_cascade(label=string['date-time-settings'], menu=menu_datetime_format)
# Menu inserisci
menu_insert = Menu(menu_bar, tearoff=0)
menu_insert.add_cascade(label=string['date-time'], menu=menu_datetime_settings)
menu_insert.add_command(label=string['add-attachment'], command=lambda: add_attachment())
# Menu strumenti
menu_utility = Menu(menu_bar, tearoff=0)
menu_bar.add_cascade(label=string['utility'], menu=menu_utility)
menu_utility.add_cascade(label=string['insert'], menu=menu_insert)
# Menu lingue
menu_languages = Menu(menu_bar, tearoff=0)
menu_languages.add_radiobutton(label='English', command=lambda: language('en-EN'), variable=langsel, value=1)
menu_languages.add_radiobutton(label='Italiano', command=lambda: language('it-IT'), variable=langsel, value=2)
# Menu opzioni
menu_options = Menu(menu_bar, tearoff=0)
menu_bar.add_cascade(label=string['options'], menu=menu_options)
menu_options.add_command(label=string['account-settings'], command=lambda: account())
menu_options.add_cascade(label=string['language'], menu=menu_languages)
menu_options.add_command(label=string['check-updates'], command=lambda: check_for_updates('menu'))
menu_options.add_separator()
menu_options.add_command(label=string['close'] + ' Lite Mails', command=lambda: close_program())
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
window.config(menu=menu_bar) # Aggiunge la barra dei menu
window.iconbitmap('litemails.ico') # Icona programma
window.protocol("WM_DELETE_WINDOW", close_program) # Attiva funzione in caso di chiusura
check_for_updates() # Controlla gli aggiornamenti
window.mainloop() # Genera l'interfaccia grafica |
import argparse
import hashlib
import logging
logging.basicConfig(level=logging.INFO)
from urllib.parse import urlparse
import datetime
import os
import pandas as pd
logger = logging .getLogger(__name__)
def main(filename):
logger.info('starting cleaning process')
df = _read_data(filename)
df = _extract_host(df)
df = _drop_duplicates(df)
df = _drop_rows_with_missing_data(df)
df = _generate_ids_for_headlines(df)
_save_data(df, filename)
return df
def _read_data(filename):
logger.info('reading files {}'.format(filename))
return pd.read_csv(filename)
def _extract_host(df):
logger.info('Extracting host from urls')
df['host'] = df['link'].apply(lambda link:urlparse(link).netloc)
return df
def _drop_duplicates(df):
logger.info('removing duplicate entries')
df.drop_duplicates(subset=['headline'], keep='first', inplace=True)
return df
def _drop_rows_with_missing_data(df):
logger.info('dropping rows with mising values')
return df.dropna()
def _generate_ids_for_headlines(df):
logger.info('generating ids for each row')
ids = (df
.apply(lambda row: hashlib.md5(bytes(row['link'].encode())), axis=1)
.apply(lambda hash_object: hash_object.hexdigest())
)
df['id'] = ids
return df.set_index('id')
def _save_data(df, filename):
#now = datetime.datetime.now().strftime('%Y/%m/%d')
clean_filename = 'clean_headlines.csv'
logger.info('saving data at location: {}'.format(clean_filename))
df.to_csv(clean_filename, encoding='utf-8')
os.remove(filename)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('filename',
help='the path to the data',
type=str)
args = parser.parse_args()
df = main(args.filename) |
#!/usr/bin/env python3
"""
File: test_aml
Element: AML
Title: Test Address Match List
"""
import unittest
from bind9_parser.isc_utils import assertParserResultDict, acl_name, \
key_id, key_id_list, key_id_list_series, \
key_id_keyword_and_name_pair, \
parse_me
from bind9_parser.isc_aml import aml_choices, aml_nesting
class TestAML(unittest.TestCase):
""" Element AML; Address Match List (AML) """
def test_acl_names_passing(self):
""" Type ACL Name; passing """
assertParserResultDict(acl_name, 'example', {'acl_name': 'example'}, True)
assertParserResultDict(acl_name, '1.1.1.1', {'acl_name': '1.1.1.1'},
True) # Not valid, but an internal correct logic
assertParserResultDict(acl_name, 'example.com', {'acl_name': 'example.com'}, True)
assertParserResultDict(acl_name, 'example[com]', {'acl_name': 'example[com]'}, True)
assertParserResultDict(acl_name, 'example<com>', {'acl_name': 'example<com>'}, True)
assertParserResultDict(acl_name, 'example&com', {'acl_name': 'example&com'}, True)
def test_acl_names_failing(self):
""" Type ACL Name; failing """
assertParserResultDict(acl_name, 'example.com!', {}, False)
assertParserResultDict(acl_name, 'ex;mple', {},
False) # obviously cannot use semicolon in acl_name/master_id/aml
assertParserResultDict(acl_name, 'subdir/example', {}, False)
assertParserResultDict(acl_name, 'ex#mple', {}, False) # obviously cannot use hash in acl_name/master_id/aml
def test_isc_aml_key_id_passing(self):
""" Element AML; Type key_id; passing """
assertParserResultDict(key_id, 'myKeyID', {'key_id': 'myKeyID'}, True)
assertParserResultDict(key_id, 'my_key_id', {'key_id': 'my_key_id'}, True)
def test_isc_aml_key_id_failing(self):
""" Element AML; Type key_id; failing """
assertParserResultDict(key_id, 'myKey#ID', {}, False)
assertParserResultDict(key_id, 'my/key_id', {}, False)
def test_isc_aml_key_id_list_passing(self):
""" Element AML; Type key_id_list; passing """
assertParserResultDict(key_id_list('key_id'), 'myKey;', {'key_id': ['myKey']}, True)
def test_isc_aml_key_id_list_series_passing(self):
""" Element AML; Type key_id_list_series; passing """
assertParserResultDict(key_id_list_series('key_ids'), 'myKey; yourKey; ourKey;',
{'key_ids': ['myKey', 'yourKey', 'ourKey']}, True)
def test_isc_aml_key_id_keyword_and_name_element_passing(self):
""" Element AML; Type key_id; passing"""
assertParserResultDict(key_id_keyword_and_name_pair, 'key myKey2', {'key_id': 'myKey2'}, True)
def test_isc_aml_key_id_keyword_and_name_element_failing(self):
""" Element AML; Type key_id; passing"""
assertParserResultDict(key_id_keyword_and_name_pair, 'key myKey3', {'key_id_WRONG': 'myKey3'}, False)
def test_aml_choices_passing(self):
assertParserResultDict(aml_choices, 'any', {'addr': 'any'}, True)
assertParserResultDict(aml_choices, 'none', {'addr': 'none'}, True)
assertParserResultDict(aml_choices, 'localhost', {'addr': 'localhost'}, True)
assertParserResultDict(aml_choices, 'localnets', {'addr': 'localnets'}, True)
assertParserResultDict(aml_choices, '1.1.1.1', {'addr': '1.1.1.1'}, True)
assertParserResultDict(aml_choices, '2.2.2.2/2', {'addr': '2.2.2.2/2'}, True)
assertParserResultDict(aml_choices, 'fe03::3', {'addr': 'fe03::3'}, True)
assertParserResultDict(aml_choices, 'master_nameservers_acl',
{'acl_name': 'master_nameservers_acl'}, True)
assertParserResultDict(aml_choices, 'example', {'acl_name': 'example'}, True)
assertParserResultDict(aml_choices, 'key MyKeyId', {'key_id': ['MyKeyId']}, True)
test_datas = [
['key myKeyId', {'key_id': ['myKeyId']}],
['127.0.0.1', {'addr': '127.0.0.1'}],
['localnets', {'addr': 'localnets'}],
['any', {'addr': 'any'}],
['none', {'addr': 'none'}],
['localhost', {'addr': 'localhost'}],
['10.0.0.1/8', {'addr': '10.0.0.1/8'}],
['example.com', {'acl_name': 'example.com'}]
# FQDN-style are valid master name, but treated lik a hostname
]
for this_test_data, this_expected_result in test_datas:
assertParserResultDict(aml_choices, this_test_data, this_expected_result, True)
def test_aml_choices_failing(self):
""" Element AML; Choices AML; failing """
assertParserResultDict(aml_choices, 'master/nameservers_acl', {}, False)
assertParserResultDict(aml_choices, 'master_nameservers#acl', {}, False)
assertParserResultDict(aml_choices, 'master;nameservers_acl', {}, False)
def test_isc_aml_ip4s_prefix_passing(self):
""" Element AML; Type ip4s_prefix; passing"""
assertParserResultDict(aml_choices,
'10.10.10.10/10',
{'addr': '10.10.10.10/10'}
, True)
def test_isc_aml_ip4s_prefix_failing(self):
""" Element AML; Type ip4s_prefix; failing"""
assertParserResultDict(aml_choices, '10.10.10.10/1000', {'addr': ['10.10.10.10/1000']}, False)
def test_isc_aml_aml_nesting_failing(self):
"""Purposely failing Address Match List (AML) name"""
test_data = """ {
acl_mast!er_nameservers;
1.1,1.1;
acl_nameX&&&
{ &^%$#; }; }; """
expected_result = {}
assertParserResultDict(aml_nesting, test_data, expected_result, False)
assertParserResultDict(aml_nesting, '{ 5.5.5.5/55; }', {'aml': [{'5.5.5.5/55'}]}, False)
assertParserResultDict(aml_nesting, '{ 6.6.6.6/0;}', {'6.6.6.6/0'}, False) # w/o 'aml':
assertParserResultDict(aml_nesting, '7.7.7', {}, False)
assertParserResultDict(aml_nesting, '{ 8.8.8.8 };', {}, False)
def test_isc_aml_aml_nesting_passing(self):
""" Clause ACL; Element AML spacing; passing """
test_data = [
'{ localhost; any; none; };',
'{ localnets; localhost; none;};',
'{ !localhost; };',
'{any;};',
'{none;};',
'{localhost;};',
'{localnets;};',
'{ none; };',
'{ localhost; };',
'{ localnets; };',
'{ 11.11.11.11; };'
]
result = aml_nesting.runTests(test_data, failureTests=False)
self.assertTrue(result[0])
def test_isc_aml_aml_nesting_part2_failing(self):
""" Clause ACL; Element AML spacing; failing """
test_data = ['{ oops };']
result = aml_nesting.runTests(test_data, failureTests=True)
self.assertTrue(result[0])
test_data = [
""" {
key DDNS_UPDATER;
};
"""
]
result = aml_nesting.runTests(test_data, failureTests=False)
self.assertTrue(result[0])
test_data = """{
localhost;
127.0.0.1;
10.0.0.1/8;
{
master_nameservers;
slave_bastion_host;
};
{
any;
none;
localnets;
};
};"""
# Must be in same ordering as expected result
expected_result = {
'aml': [
{'addr': 'localhost'},
{'addr': '127.0.0.1'},
{'addr': '10.0.0.1/8'},
{'aml': [
{'acl_name_WRONG': 'master_nameservers'},
{'acl_name': 'slave_bastion_host'}
]},
{'aml': [
{'addr': 'any'},
{'addr': 'none'},
{'addr': 'localnets'}
]}
]}
assertParserResultDict(aml_nesting, test_data, expected_result, False)
def test_aml_choices_nested_passing(self):
""" Clause ACL; List AML Choices; passing """
assertParserResultDict(aml_choices, 'any', {'addr': 'any'}, True)
assertParserResultDict(aml_choices, 'none', {'addr': 'none'}, True)
assertParserResultDict(aml_choices, 'localhost', {'addr': 'localhost'}, True)
assertParserResultDict(aml_choices, 'localnets', {'addr': 'localnets'}, True)
assertParserResultDict(aml_choices, '1.1.1.1', {'addr': '1.1.1.1'}, True)
assertParserResultDict(aml_choices, '2.2.2.2/2', {'addr': '2.2.2.2/2'}, True)
assertParserResultDict(aml_choices, 'fe03::3', {'addr': 'fe03::3'}, True)
assertParserResultDict(aml_choices, 'key my_own_key_id', {'key_id': ['my_own_key_id']}, True)
assertParserResultDict(aml_choices, 'master_nameservers_acl', {'acl_name': 'master_nameservers_acl'}, True)
def test_isc_aml_aml_choices_finer(self):
parse_me(aml_choices, 'key\nA8', True)
parse_me(aml_choices, 'any', True)
parse_me(aml_choices, 'none', True)
# parse_me(aml_choices, 'oops;', False) # TODO expand AML (aml_nesting) firstly
# aml_choices('localhost;' == [['localhost']] because no exclamation '"' mark
parse_me(aml_choices, 'localhost', True)
parse_me(aml_choices, 'localnets', True)
# aml_choices('!localhost;' == [['!', 'localhost']] because no exclamation '"' mark
def test_aml_choices2_failing(self):
assertParserResultDict(aml_choices, 'master/nameservers_acl;', {}, False)
assertParserResultDict(aml_choices, 'master_nameservers#acl;', {}, False)
assertParserResultDict(aml_choices, 'master;nameservers_acl;', {}, False)
def test_aml_nesting_forward_passing(self):
assertParserResultDict(aml_nesting,
'{ 1.1.1.1; { 127.0.0.1;}; };',
{'aml': [{'addr': '1.1.1.1'}, {'aml': [{'addr': '127.0.0.1'}]}]},
True)
assertParserResultDict(aml_nesting,
'{ { 8.8.8.8; }; };',
{'aml': [{'aml': [{'addr': '8.8.8.8'}]}]},
True)
assertParserResultDict(aml_nesting,
'{ { { 9.9.9.9; }; }; };',
{'aml': [{'aml': [{'aml': [{'addr': '9.9.9.9'}]}]}]},
True)
def test_aml_nesting_forward_exclamation_passing(self):
assertParserResultDict(aml_nesting,
'{ ! { 1.1.1.1; { 127.0.0.1;}; }; };',
{
'aml': [
{
'aml': [
{'addr': '1.1.1.1'},
{'aml': [
{'addr': '127.0.0.1'}
]
}
],
'not': '!'
}
]
},
True)
assertParserResultDict(aml_nesting,
'{ ! 11.11.11.11; { 192.168.1.1;}; };',
{
'aml': [
{'addr': '11.11.11.11', 'not': '!'},
{'aml': [{'addr': '192.168.1.1'}]}
]
},
True)
assertParserResultDict(aml_nesting,
'{ 3.3.3.3; ! { 127.0.0.1;}; };',
{
'aml': [
{'addr': '3.3.3.3'},
{'aml': [{'addr': '127.0.0.1'}], 'not': '!'}
]},
True)
assertParserResultDict(aml_nesting,
'{ 4.4.4.4; { ! 127.0.0.1;}; };',
{
'aml': [
{'addr': '4.4.4.4'},
{'aml': [
{
'addr': '127.0.0.1',
'not': '!'
}
]}
]},
True)
assertParserResultDict(aml_nesting,
'{ 5.5.5.5; { 127.0.0.1;}; };',
{'aml': [
{'addr': '5.5.5.5'},
{'aml': [
{'addr': '127.0.0.1'}]}]},
True)
assertParserResultDict(aml_nesting,
'{ { 6.6.6.6; }; };',
{'aml': [
{'aml': [
{'addr': '6.6.6.6'}]}]},
True)
assertParserResultDict(aml_nesting,
'{ { { 7.7.7.7; }; }; };',
{'aml': [
{'aml': [
{'aml': [
{'addr': '7.7.7.7'}]}]}]},
True)
def test_aml_nesting_first_addr(self):
assertParserResultDict(aml_nesting.setDebug(True), '{ key mykey; };', {'aml': [{'key_id': ['mykey']}]}, True)
def test_aml_nesting_first_exclamation(self):
assertParserResultDict(aml_nesting.setDebug(True), '{ ! key mykey; };',
{'aml': [{'key_id': ['mykey'], 'not': '!'}]}, True)
def test_aml_nesting_first_addr_series(self):
test_data = """{ localhost; any; none; };"""
expected_result = {'aml': [{'addr': 'localhost'}, {'addr': 'any'}, {'addr': 'none'}]}
assertParserResultDict(aml_nesting, test_data, expected_result, True)
def test_aml_nesting_first_nest(self):
test_data = """{ localhost; }; """
expected_result = {'aml': [{'addr': 'localhost'}]}
assertParserResultDict(aml_nesting, test_data, expected_result, True)
def test_aml_nesting_first_two_nests(self):
test_data = """{ { localhost; }; }; """
expected_result = {'aml': [{'aml': [{'addr': 'localhost'}]}]}
assertParserResultDict(aml_nesting, test_data, expected_result, True)
def test_aml_nesting_first_combo(self):
test_data = """ { localhost; { none; }; };"""
expected_result = {'aml': [{'addr': 'localhost'}, {'aml': [{'addr': 'none'}]}]}
assertParserResultDict(aml_nesting, test_data, expected_result, True)
def test_aml_nesting_first_deep_combo(self):
test_data = """{ { none; }; localhost; { none; { any; }; }; };"""
expected_result = {'aml': [{'aml': [{'addr': 'none'}]},
{'addr': 'localhost'},
{'aml': [{'addr': 'none'}, {'aml': [{'addr': 'any'}]}]}]}
assertParserResultDict(aml_nesting, test_data, expected_result, True)
# test_aml_nesting_flat is not a valid ISC syntax but an interim syntax checker
def test_aml_nesting_flat(self):
test_data = """{ localhost; };"""
expected_result = {'aml': [{'addr': 'localhost'}]}
assertParserResultDict(aml_nesting, test_data, expected_result, True)
def test_isc_aml_aml_nesting_passing(self):
"""Address Match List (AML) name"""
test_data = [
'{ 1.1.1.1; };',
'{ 2.2.2.2/2; };',
'{ 333::1; };',
'{ any; };',
'{ none; };',
'{ localhost; };',
'{ localnets; };',
'{ 4.4.4.4; ma1ster_nameservers; };',
'{ 4.4.4.4; master_nameservers; };',
'{ 14.14.14.14; master_nameservers; 15.15.15.15/15; };',
'{ 5.5.5.5; fe02::1; };',
'{ fe02::1; 6.6.6.6; };',
'{ 7.7.7.7; fe03::1; slave_nameservers; };',
'{ fe01::1; master_nameservers; };',
'{ master_nameservers; };',
'{ "rndc-remote5" ; };'
]
aml_nesting.runTests(test_data, failureTests=True)
def test_aml_aml_nesting_failing(self):
assertParserResultDict(aml_nesting, '{ 23.23.23.23};', {}, False) # missing inside semicolon
assertParserResultDict(aml_nesting, '{ 23.23.23.23;}', {}, False) # missing outside semicolon
if __name__ == '__main__':
unittest.main()
|
from distutils.core import setup
setup(
name = 'SWMat',
packages = ['SWMat'],
version = '0.1.4',
license='Apache License 2.0',
description = 'A package for making stunning graphs/charts using matplotlib in just few lines of code!',
author = 'Puneet Grover',
author_email = '[email protected]',
url = 'https://github.com/PuneetGrov3r/SWMat',
download_url = 'https://github.com/PuneetGrov3r/SWMat/archive/v0.1.4-alpha.tar.gz',
keywords = ['plot', 'visualization', 'data', 'big data', 'exploration', 'data exploration', 'communication', 'python', 'matplotlib', 'machine learning', 'data science'],
install_requires=[
'matplotlib',
'numpy',
'pandas'
],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Visualization',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3 :: Only'
],
)
|
#/usr/bin/env python
#
# setup.py
#
# John Van Note
# 2017-03-05
#
# setup.py for the Markov Chain project
#
import os
from setuptools import setup
setup(
name='markov-tweetbot',
version='0.0.1',
author='John Van Note',
author_email='[email protected]',
description='Python implementation of the Markov Chain algorithm',
license='MIT',
keywords='Markov Chain',
url='https://github.com/JohnVanNote/markov-tweetbot',
packages=[]
) |
template = """
module rom32x4 (
input [4:0] addr,
input clk,
output [4:0] data);
wire [7:0] rdata;
wire [15:0] RDATA;
wire RCLK;
wire [10:0] RADDR;
SB_RAM40_4KNR #( // negative edge readclock so we can apply and addres on the positive edge and guarantee data is available on the next posedge
.WRITE_MODE(1),
.READ_MODE(1),
.INIT_0(256'h{init[0][0]:04x}{init[0][1]:04x}{init[0][2]:04x}{init[0][3]:04x}{init[0][4]:04x}{init[0][5]:04x}{init[0][6]:04x}{init[0][7]:04x}{init[0][8]:04x}{init[0][9]:04x}{init[0][10]:04x}{init[0][11]:04x}{init[0][12]:04x}{init[0][13]:04x}{init[0][14]:04x}{init[0][15]:04x}),
.INIT_1(256'h{init[1][0]:04x}{init[1][1]:04x}{init[1][2]:04x}{init[1][3]:04x}{init[1][4]:04x}{init[1][5]:04x}{init[1][6]:04x}{init[1][7]:04x}{init[1][8]:04x}{init[1][9]:04x}{init[1][10]:04x}{init[1][11]:04x}{init[1][12]:04x}{init[1][13]:04x}{init[1][14]:04x}{init[1][15]:04x}),
.INIT_2(256'h{init[2][0]:04x}{init[2][1]:04x}{init[2][2]:04x}{init[2][3]:04x}{init[2][4]:04x}{init[2][5]:04x}{init[2][6]:04x}{init[2][7]:04x}{init[2][8]:04x}{init[2][9]:04x}{init[2][10]:04x}{init[2][11]:04x}{init[2][12]:04x}{init[2][13]:04x}{init[2][14]:04x}{init[2][15]:04x}),
.INIT_3(256'h{init[3][0]:04x}{init[3][1]:04x}{init[3][2]:04x}{init[3][3]:04x}{init[3][4]:04x}{init[3][5]:04x}{init[3][6]:04x}{init[3][7]:04x}{init[3][8]:04x}{init[3][9]:04x}{init[3][10]:04x}{init[3][11]:04x}{init[3][12]:04x}{init[3][13]:04x}{init[3][14]:04x}{init[3][15]:04x}),
.INIT_4(256'h{init[4][0]:04x}{init[4][1]:04x}{init[4][2]:04x}{init[4][3]:04x}{init[4][4]:04x}{init[4][5]:04x}{init[4][6]:04x}{init[4][7]:04x}{init[4][8]:04x}{init[4][9]:04x}{init[4][10]:04x}{init[4][11]:04x}{init[4][12]:04x}{init[4][13]:04x}{init[4][14]:04x}{init[4][15]:04x}),
.INIT_5(256'h{init[5][0]:04x}{init[5][1]:04x}{init[5][2]:04x}{init[5][3]:04x}{init[5][4]:04x}{init[5][5]:04x}{init[5][6]:04x}{init[5][7]:04x}{init[5][8]:04x}{init[5][9]:04x}{init[5][10]:04x}{init[5][11]:04x}{init[5][12]:04x}{init[5][13]:04x}{init[5][14]:04x}{init[5][15]:04x}),
.INIT_6(256'h{init[6][0]:04x}{init[6][1]:04x}{init[6][2]:04x}{init[6][3]:04x}{init[6][4]:04x}{init[6][5]:04x}{init[6][6]:04x}{init[6][7]:04x}{init[6][8]:04x}{init[6][9]:04x}{init[6][10]:04x}{init[6][11]:04x}{init[6][12]:04x}{init[6][13]:04x}{init[6][14]:04x}{init[6][15]:04x}),
.INIT_7(256'h{init[7][0]:04x}{init[7][1]:04x}{init[7][2]:04x}{init[7][3]:04x}{init[7][4]:04x}{init[7][5]:04x}{init[7][6]:04x}{init[7][7]:04x}{init[7][8]:04x}{init[7][9]:04x}{init[7][10]:04x}{init[7][11]:04x}{init[7][12]:04x}{init[7][13]:04x}{init[7][14]:04x}{init[7][15]:04x}),
.INIT_8(256'h{init[8][0]:04x}{init[8][1]:04x}{init[8][2]:04x}{init[8][3]:04x}{init[8][4]:04x}{init[8][5]:04x}{init[8][6]:04x}{init[8][7]:04x}{init[8][8]:04x}{init[8][9]:04x}{init[8][10]:04x}{init[8][11]:04x}{init[8][12]:04x}{init[8][13]:04x}{init[8][14]:04x}{init[8][15]:04x}),
.INIT_9(256'h{init[9][0]:04x}{init[9][1]:04x}{init[9][2]:04x}{init[9][3]:04x}{init[9][4]:04x}{init[9][5]:04x}{init[9][6]:04x}{init[9][7]:04x}{init[9][8]:04x}{init[9][9]:04x}{init[9][10]:04x}{init[9][11]:04x}{init[9][12]:04x}{init[9][13]:04x}{init[9][14]:04x}{init[9][15]:04x}),
.INIT_A(256'h{init[10][0]:04x}{init[10][1]:04x}{init[10][2]:04x}{init[10][3]:04x}{init[10][4]:04x}{init[10][5]:04x}{init[10][6]:04x}{init[10][7]:04x}{init[10][8]:04x}{init[10][9]:04x}{init[10][10]:04x}{init[10][11]:04x}{init[10][12]:04x}{init[10][13]:04x}{init[10][14]:04x}{init[10][15]:04x}),
.INIT_B(256'h{init[11][0]:04x}{init[11][1]:04x}{init[11][2]:04x}{init[11][3]:04x}{init[11][4]:04x}{init[11][5]:04x}{init[11][6]:04x}{init[11][7]:04x}{init[11][8]:04x}{init[11][9]:04x}{init[11][10]:04x}{init[11][11]:04x}{init[11][12]:04x}{init[11][13]:04x}{init[11][14]:04x}{init[11][15]:04x}),
.INIT_C(256'h{init[12][0]:04x}{init[12][1]:04x}{init[12][2]:04x}{init[12][3]:04x}{init[12][4]:04x}{init[12][5]:04x}{init[12][6]:04x}{init[12][7]:04x}{init[12][8]:04x}{init[12][9]:04x}{init[12][10]:04x}{init[12][11]:04x}{init[12][12]:04x}{init[12][13]:04x}{init[12][14]:04x}{init[12][15]:04x}),
.INIT_D(256'h{init[13][0]:04x}{init[13][1]:04x}{init[13][2]:04x}{init[13][3]:04x}{init[13][4]:04x}{init[13][5]:04x}{init[13][6]:04x}{init[13][7]:04x}{init[13][8]:04x}{init[13][9]:04x}{init[13][10]:04x}{init[13][11]:04x}{init[13][12]:04x}{init[13][13]:04x}{init[13][14]:04x}{init[13][15]:04x}),
.INIT_E(256'h{init[14][0]:04x}{init[14][1]:04x}{init[14][2]:04x}{init[14][3]:04x}{init[14][4]:04x}{init[14][5]:04x}{init[14][6]:04x}{init[14][7]:04x}{init[14][8]:04x}{init[14][9]:04x}{init[14][10]:04x}{init[14][11]:04x}{init[14][12]:04x}{init[14][13]:04x}{init[14][14]:04x}{init[14][15]:04x}),
.INIT_F(256'h{init[15][0]:04x}{init[15][1]:04x}{init[15][2]:04x}{init[15][3]:04x}{init[15][4]:04x}{init[15][5]:04x}{init[15][6]:04x}{init[15][7]:04x}{init[15][8]:04x}{init[15][9]:04x}{init[15][10]:04x}{init[15][11]:04x}{init[15][12]:04x}{init[15][13]:04x}{init[15][14]:04x}{init[15][15]:04x})
) rom(
.RDATA(RDATA),
.RCLKN(RCLK), // negative edge readclock has an N appended
.RCLKE(1),
.RE(1),
.RADDR(RADDR),
.WCLK(0),
.WCLKE(0),
.WE(0),
.WADDR(11'hxxxx),
.MASK(16'hxxxx),
.WDATA(8'hxx)
);
assign rdata = {{RDATA[14],RDATA[12],RDATA[10],RDATA[8],RDATA[6],RDATA[4],RDATA[2],RDATA[0]}};
assign data = rdata[4:0];
assign RADDR = {{6'b0, addr}};
assign RCLK = clk;
endmodule
"""
# https://github.com/jamesbowman/swapforth/blob/master/j1a/mkrom.py
# https://stackoverflow.com/questions/41499494/how-can-i-use-ice40-4k-block-ram-in-512x8-read-mode-with-icestorm
def fanbits(byt):
f = 0
for i in range(8):
if byt & (1 << i):
f += 1 << i*2+1
return f
def genrom(data):
init = a=[[0] * 16 for i in range(16)]
for i,d in enumerate(data):
row = (i % 256) // 16
col = 15 - i % 16
bits= fanbits(d)
bits= (bits >> 1) if i < 256 else bits
init[row][col] |= bits
return template.format(init = init)
START = 0; # next is START unless overruled
FETCH = 1; # next state is always WAIT
DECODE = 2; # next is FETCH unless overruled
OPLOAD = 3; # next state is always DECODE
ECHO = 4; # next state is always ECHO1
ECHO1 = 5; # next is ECHO1 unless overruled
WAIT = 6; # next state is always OPLOAD
WAIT2 = 7; # next state is always OPLOAD2
OPLOAD2 = 8; # next state is always DECODE2
DECODE2 = 9; # next is FETCH unless overruled
WAIT3 = 10; # next state is always MEMLOAD
MEMLOAD = 11; # next state is always FETCH
READ = 12; # next is READ unless overruled
STACKPUSH = 13; # next state is always STACKPUSH2
STACKPUSH2= 14; # next state is always FETCH
CALL1 = 15; # next state is always CALL2
CALL2 = 16; # next state is always CALL3
CALL3 = 17; # next state is always CALL4
CALL4 = 18; # next state is always CALL5
CALL5 = 19; # next state is always FETCH
RETURN1 = 20; # next state is always RETURN2
RETURN2 = 21; # next state is always RETURN3
RETURN3 = 22; # next state is always RETURN4
RETURN4 = 23; # next state is always RETURN5
RETURN5 = 24; # next state is always FETCH
STIDPWAIT = 25; # next state is always STIDPWAIT1
WAITBASER = 26; # next state is always WAITBASER1
WAITBASER1= 27; # next state is always FETCH
STIDPWAIT1= 31; # next state is always FETCH
data = {
START :START,
FETCH :WAIT,
DECODE :FETCH,
OPLOAD :DECODE,
ECHO :ECHO1,
ECHO1 :ECHO1,
WAIT :OPLOAD,
WAIT2 :OPLOAD2,
OPLOAD2 :DECODE2,
DECODE2 :FETCH,
WAIT3 :MEMLOAD,
MEMLOAD :FETCH,
READ :READ,
STACKPUSH :STACKPUSH2,
STACKPUSH2:FETCH,
CALL1 :CALL2,
CALL2 :CALL3,
CALL3 :CALL4,
CALL4 :CALL5,
CALL5 :FETCH,
RETURN1 :RETURN2,
RETURN2 :RETURN3,
RETURN3 :RETURN4,
RETURN4 :RETURN5,
RETURN5 :FETCH,
STIDPWAIT :STIDPWAIT1,
WAITBASER :WAITBASER1,
WAITBASER1:FETCH,
STIDPWAIT1:FETCH,
}
data = [data[k] for k in sorted(data)]
nbytes = len(data)
data = data + [0] * (512 - nbytes)
print(genrom(data))
|
import numpy as np
from numba import prange
from ...geometry._algorithms.bounds import total_bounds_interleaved
from ...geometry._algorithms.orientation import triangle_orientation
from ...utils import ngjit, ngpjit
@ngjit
def segment_intersects_point(ax0, ay0, ax1, ay1, bx, by):
"""
Test whether a 2-dimensional line segment intersects with a point
Args:
ax0, ay0: coordinates of start of segment
ax1, ay1: coordinates of end of segment
bx, by: coordinates of point
Returns:
True if segment intersects point, False otherwise
"""
# Check bounds
if bx < min(ax0, ax1) or bx > max(ax0, ax1):
return False
if by < min(ay0, ay1) or by > max(ay0, ay1):
return False
# Use cross product to test whether point is exactly on line
# S is vector from segment start to segment end
sx = ax1 - ax0
sy = ay1 - ay0
# P is vector from segment start to point
px = bx - ax0
py = by - ay0
# Compute cross produce of S and P
sxp = sx * py - sy * px
return sxp == 0
@ngjit
def segments_intersect_1d(ax0, ax1, bx0, bx1):
"""
Test whether two 1-dimensional line segments overlap
Args:
ax0, ax1: coords of endpoints of first segment
bx0, bx1: coords of endpoints of second segment
Returns:
True if segments overlap, False otherwise
"""
# swap inputs so that *x1 >= *x0
if ax1 < ax0:
ax0, ax1 = ax1, ax0
if bx1 < bx0:
bx0, bx1 = bx1, bx0
return max(ax0, bx0) <= min(ax1, bx1)
@ngjit
def segments_intersect(ax0, ay0, ax1, ay1, bx0, by0, bx1, by1):
"""
Test whether two 2-dimensional line segments intersect
Args:
ax0, ay0: coordinates of start of first segment
ax1, ay1: coordinates of end of first segment
bx0, by0: coordinates of start of second segment
bx1, by1: coordinates of end of second segment
Returns:
True if segments intersect, False otherwise
"""
if not segments_intersect_1d(ax0, ax1, bx0, bx1):
# x projection of segments do not intersect, segments do not cross
return False
if not segments_intersect_1d(ay0, ay1, by0, by1):
# y projection of segments do not intersect, segments do not cross
return False
a_zero = ax0 == ax1 and ay0 == ay1
b_zero = bx0 == bx1 and by0 == by1
if a_zero and not b_zero and (
ax0 == bx0 and ay0 == by0 or ax0 == bx1 and ay0 == by1
):
# a is zero length line that is identical to an end point of b
return True
elif b_zero and not a_zero and (
bx0 == ax0 and by0 == ay0 or bx0 == ax1 and by0 == ay1
):
# a is zero length line that is identical to an end point of b
return True
elif a_zero or b_zero:
# a or b is zero length and does not match a vertex of the other line
return False
b0_orientation = triangle_orientation(ax0, ay0, ax1, ay1, bx0, by0)
b1_orientation = triangle_orientation(ax0, ay0, ax1, ay1, bx1, by1)
if b0_orientation == 0 and b1_orientation == 0:
# b0 and b1 lie on line from a0 to a1, segments are collinear and intersect
return True
elif b0_orientation == b1_orientation:
# b0 and b1 lie on the same side of line from a0 to a1, segments do not
# intersect
return False
a0_orientation = triangle_orientation(bx0, by0, bx1, by1, ax0, ay0)
a1_orientation = triangle_orientation(bx0, by0, bx1, by1, ax1, ay1)
if a0_orientation == 0 and a1_orientation == 0:
# a0 and a1 lie on line from b0 to b1, segments are collinear and cross
return True
elif a0_orientation == a1_orientation:
# a0 and a1 are on the same side of line from b0 to b1, segments do not cross
return False
return True
@ngjit
def point_intersects_polygon(x, y, values, value_offsets):
"""
Test whether a point intersects with a polygon
Args:
x, y: coordinates of test point
values: array of interleaved coordinates of the polygon and holes
value_offsets: array of offsets into values that separate the rings that
compose the polygon. The first ring is the outer shell and subsequent rings
are holes contained in this shell.
Returns:
True if the test point intersects with the polygon, False otherwise
"""
winding_number = 0
for i in range(len(value_offsets) - 1):
start = value_offsets[i]
stop = value_offsets[i + 1]
for k in range(start, stop - 2, 2):
x0 = values[k]
y0 = values[k + 1]
x1 = values[k + 2]
y1 = values[k + 3]
if y1 == y0:
# skip horizontal edges
continue
# Make sure the y1 > y0 and keep track of whether edge was
# originally ascending vertically
if y1 < y0:
ascending = -1
y0, y1 = y1, y0
x0, x1 = x1, x0
else:
ascending = 1
# Reject edges that are fully above, below, or to the left of test point
if y0 >= y or y1 < y or (x0 < x and x1 < x):
continue
if x0 >= x and x1 >= x:
# Edge is fully to the right of test point, so we know that a ray to
# the right will intersect the edge
winding_number += ascending
else:
# Check if edge is to the right of test point using cross product
# A is vector from test point to lower vertex
ax = x0 - x
ay = y0 - y
# B is vector from test point to upper vertex
bx = x1 - x
by = y1 - y
# Compute cross produce of A and B
axb = ax * by - ay * bx
if axb > 0 or (axb == 0 and ascending):
# Edge intersects with ray
winding_number += ascending
return winding_number != 0
@ngpjit
def multipoints_intersect_bounds(
x0, y0, x1, y1, flat_values, start_offsets, stop_offsets, result
):
"""
Test whether each multipoint in a collection of multipoints intersects with
the supplied bounds
Args:
x0, y0, x1, y1: Bounds coordinates
flat_values: Interleaved point coordinates
start_offsets, stop_offsets: start and stop offsets into flat_values
separating individual multipoints
result: boolean array to be provided by the caller into which intersection
results will be written; must be at least as long as start_offsets
Returns:
None
"""
# Initialize results
n = len(start_offsets)
result.fill(False)
# Orient rectangle
if x1 < x0:
x0, x1 = x1, x0
if y1 < y0:
y0, y1, = y1, y0
for i in prange(n):
start = start_offsets[i]
stop = stop_offsets[i]
# Check for points in rect
point_in_rect = False
for j in range(start, stop, 2):
x = flat_values[j]
y = flat_values[j + 1]
if x0 <= x and x <= x1 and y0 <= y and y <= y1:
point_in_rect = True
break
if point_in_rect:
result[i] = True
continue
return result
@ngjit
def _perform_line_intersect_bounds(
i, x0, y0, x1, y1, flat_values, start_offsets, stop_offsets, result
):
start = start_offsets[i]
stop = stop_offsets[i]
# compute bounding box for line
bounds = total_bounds_interleaved(flat_values[start:stop])
if bounds[0] > x1 or bounds[1] > y1 or bounds[2] < x0 or bounds[3] < y0:
# bounds outside of rect, does not intersect
return
if (bounds[0] >= x0 and bounds[2] <= x1 or
bounds[1] >= y0 and bounds[3] <= y1):
# bounds is fully contained in rect when both are projected onto the
# x or y axis
result[i] = True
return
# Check for vertices in rect
vert_in_rect = False
for j in range(start, stop, 2):
x = flat_values[j]
y = flat_values[j + 1]
if x0 <= x and x <= x1 and y0 <= y and y <= y1:
vert_in_rect = True
break
if vert_in_rect:
result[i] = True
return
# Check for segment that crosses rectangle edge
segment_intersects = False
for j in range(start, stop - 2, 2):
ex0 = flat_values[j]
ey0 = flat_values[j + 1]
ex1 = flat_values[j + 2]
ey1 = flat_values[j + 3]
# top
if segments_intersect(ex0, ey0, ex1, ey1, x0, y1, x1, y1):
segment_intersects = True
break
# bottom
if segments_intersect(ex0, ey0, ex1, ey1, x0, y0, x1, y0):
segment_intersects = True
break
# left
if segments_intersect(ex0, ey0, ex1, ey1, x0, y0, x0, y1):
segment_intersects = True
break
# right
if segments_intersect(ex0, ey0, ex1, ey1, x1, y0, x1, y1):
segment_intersects = True
break
if segment_intersects:
result[i] = True
@ngjit
def lines_intersect_bounds(
x0, y0, x1, y1, flat_values, start_offsets, stop_offsets, result
):
"""
Test whether each line in a collection of lines intersects with the supplied bounds
Args:
x0, y0, x1, y1: Bounds coordinates
flat_values: Interleaved line coordinates
start_offsets, stop_offsets: start and stop offsets into flat_values
separating individual lines
result: boolean array to be provided by the caller into which intersection
results will be written; must be at least as long as start_offsets
Returns:
None
"""
# Initialize results
n = len(start_offsets)
result.fill(False)
# Orient rectangle
if x1 < x0:
x0, x1 = x1, x0
if y1 < y0:
y0, y1, = y1, y0
if x0 == x1 or y0 == y1:
# Zero width/height rect does not intersect with anything
return
for i in range(n):
_perform_line_intersect_bounds(
i, x0, y0, x1, y1, flat_values, start_offsets, stop_offsets, result
)
return result
@ngjit
def multilines_intersect_bounds(
x0, y0, x1, y1, flat_values, start_offsets0, stop_offsets0, offsets1, result
):
"""
Test whether each multiline in a collection of multilines intersects with the
supplied bounds
Args:
x0, y0, x1, y1: Bounds coordinates
flat_values: Interleaved line coordinates
start_offsets0, stop_offsets0: start and stop offsets into offsets1
separating individual multilines
offsets1: Offsets into flat_values separating individual lines
result: boolean array to be provided by the caller into which intersection
results will be written; must be at least as long as start_offsets
Returns:
None
"""
# Initialize results
n = len(start_offsets0)
result.fill(False)
if len(start_offsets0) < 1:
# Empty array
return
# Orient rectangle
if x1 < x0:
x0, x1 = x1, x0
if y1 < y0:
y0, y1, = y1, y0
if x0 == x1 or y0 == y1:
# Zero width/height rect does not intersect with anything
return
# Populate results
for i in range(n):
# Numba has issues with following line when jit(parallel=True) is used:
# Invalid use of Function(<intrinsic wrap_index>) with argument(s) of type(s):
# (uint32, int64)
element_offsets = offsets1[start_offsets0[i]:stop_offsets0[i] + 1]
num_lines = len(element_offsets) - 1
element_result = np.zeros(num_lines, dtype=np.bool_)
for j in range(num_lines):
_perform_line_intersect_bounds(
j, x0, y0, x1, y1,
flat_values, element_offsets[:-1], element_offsets[1:], element_result
)
result[i] = element_result.any()
@ngjit
def _perform_polygon_intersect_bounds(i, x0, y0, x1, y1, flat_values, start_offsets0,
stop_offsets0, offsets1, result):
start0 = start_offsets0[i]
stop0 = stop_offsets0[i]
start1 = offsets1[start0]
stop1 = offsets1[stop0]
# compute bounding box for polygon.
bounds = total_bounds_interleaved(flat_values[start1:stop1])
if bounds[0] > x1 or bounds[1] > y1 or bounds[2] < x0 or bounds[3] < y0:
# bounds outside of rect, does not intersect
return
if (bounds[0] >= x0 and bounds[2] <= x1 or
bounds[1] >= y0 and bounds[3] <= y1):
# bounds is fully contained in rect when both are projected onto the
# x or y axis
result[i] = True
return
# Check for vertices in rect
vert_in_rect = False
for k in range(start1, stop1, 2):
x = flat_values[k]
y = flat_values[k + 1]
if x0 <= x and x <= x1 and y0 <= y and y <= y1:
vert_in_rect = True
break
if vert_in_rect:
result[i] = True
return
# Check for segment that crosses rectangle edge
segment_intersects = False
for j in range(start0, stop0):
for k in range(offsets1[j], offsets1[j + 1] - 2, 2):
ex0 = flat_values[k]
ey0 = flat_values[k + 1]
ex1 = flat_values[k + 2]
ey1 = flat_values[k + 3]
# top
if segments_intersect(ex0, ey0, ex1, ey1, x0, y1, x1, y1):
segment_intersects = True
break
# bottom
if segments_intersect(ex0, ey0, ex1, ey1, x0, y0, x1, y0):
segment_intersects = True
break
# left
if segments_intersect(ex0, ey0, ex1, ey1, x0, y0, x0, y1):
segment_intersects = True
break
# right
if segments_intersect(ex0, ey0, ex1, ey1, x1, y0, x1, y1):
segment_intersects = True
break
if segment_intersects:
result[i] = True
if segment_intersects:
return
# Check if a rectangle corners is in rect
polygon_offsets = offsets1[start0:stop0 + 1]
if point_intersects_polygon(x0, y0, flat_values, polygon_offsets):
result[i] = True
return
if point_intersects_polygon(x1, y0, flat_values, polygon_offsets):
result[i] = True
return
if point_intersects_polygon(x1, y1, flat_values, polygon_offsets):
result[i] = True
return
if point_intersects_polygon(x0, y1, flat_values, polygon_offsets):
result[i] = True
return
@ngjit
def polygons_intersect_bounds(
x0, y0, x1, y1, flat_values, start_offsets0, stop_offsets0, offsets1, result
):
"""
Test whether each polygon in a collection of polygons intersects with the
supplied bounds
Args:
x0, y0, x1, y1: Bounds coordinates
flat_values: Interleaved vertex coordinates
start_offsets0, stop_offsets0: start and stop offsets into offsets1 separating
individual polygons
offsets1: offsets into flat_values separating individual polygons rings
result: boolean array to be provided by the caller into which intersection
results will be written; must be at least as long as start_offsets
Returns:
None
"""
# Initialize results
n = len(start_offsets0)
result.fill(False)
# Orient rectangle
if x1 < x0:
x0, x1 = x1, x0
if y1 < y0:
y0, y1, = y1, y0
if x0 == x1 or y0 == y1:
# Zero width/height rect does not intersect with anything
return
for i in range(n):
_perform_polygon_intersect_bounds(
i, x0, y0, x1, y1, flat_values,
start_offsets0, stop_offsets0, offsets1, result
)
return result
@ngjit
def multipolygons_intersect_bounds(
x0, y0, x1, y1, flat_values,
start_offsets0, stop_offsets0, offsets1, offsets2, result
):
"""
Test whether each multipolygon in a collection of multipolygons intersects with
the supplied bounds
Args:
x0, y0, x1, y1: Bounds coordinates
flat_values: Interleaved vertex coordinates
start_offsets0, stop_offsets0: start and stop offsets into offsets1 separating
individual multipolygons
offsets1: offsets into offsets2 separating individual polygons
offsets2: offsets into flat_values separating individual polygon rings
result: boolean array to be provided by the caller into which intersection
results will be written; must be at least as long as start_offsets
Returns:
None
"""
# Initialize results
n = len(start_offsets0)
result.fill(False)
if len(start_offsets0) < 1:
# Empty array
return
# Orient rectangle
if x1 < x0:
x0, x1 = x1, x0
if y1 < y0:
y0, y1, = y1, y0
if x0 == x1 or y0 == y1:
# Zero width/height rect does not intersect with anything
return
# Populate results
for i in range(n):
polygon_offsets = offsets1[start_offsets0[i]:stop_offsets0[i] + 1]
num_polys = len(polygon_offsets) - 1
element_result = np.zeros(num_polys, dtype=np.bool_)
for j in range(num_polys):
_perform_polygon_intersect_bounds(
j, x0, y0, x1, y1, flat_values,
polygon_offsets[:-1], polygon_offsets[1:], offsets2, element_result
)
result[i] = element_result.any()
|
import numpy as np
def gamma(bilde, g=0.5):
"""
Gitt et HDR-bilde, returnerer resultatet av å gjennomføre enkel
rendring ved hjelp av en gamma-funksjon.
Parameters
----------
bilde : ndarray
Et bilde som kan ha ubegrenset antall dimensjoner/kanaler.
g : <0, 1> float
Gamma-verdien som brukes for å rendre bildet.
Returns
-------
gbilde : ndarray
Bildet etter å ha anvendt gammafunksjonen.
"""
if 0.0 < g and g < 1.0:
return bilde ** g
else:
raise ValueError("gammaverdien må være mellom 0 og 1")
def gamma_luminans(bilde, g=0.5):
"""
Gitt et HDR-bilde, returnerer resultatet av å gjennomføre enkel
rendring ved hjelp av en gamma-funksjon på luminansen til bildet.
Parameters
----------
bilde : {(X, Y, 3)} ndarray
Et bilde med 3 fargekanaler.
g : <0, 1> float
Gamma-verdien som brukes for å rendre bildet.
Returns
-------
gbilde : ndarray
Bildet etter å ha anvendt gammafunksjonen på luminansen.
"""
# Regn ut luminansen (R + G + B).
L = bilde.sum(axis=2)
return gamma(L[:, :, np.newaxis]) * (bilde / L[:, :, np.newaxis])
def gamma_kombo(bilde, v=0.5, g=0.5):
"""
Gitt et HDR-bilde, returnerer resultatet av å gjennomføre enkel
rendring ved hjelp av en vektet sum av gamma-funksjon på luminansen
og gamma-funksjonen på alle kanalene i bildet.
Parameters
----------
bilde : {(X, Y, 3)} ndarray
Et bilde med 3 fargekanaler.
v : <0, 1> float
Bestemmer hvor mye luminansen vektes (`v * gamma_luminans + (1 - v) *
gamma`).
g : <0, 1> float
Gamma-verdien som brukes for å rendre bildet.
Returns
-------
gbilde : ndarray
Bildet etter å ha anvendt gammafunksjonen.
"""
if 0.0 <= v and v <= 1.0:
return v * gamma_luminans(bilde, g) + (1 - v) * gamma(bilde, g)
else:
raise ValueError("vektentallet må være mellom 0 og 1")
|
import networkx as nx
import graph_embeddings_library as gel
from Analysis.aggregate_data.data_aggregation_tools import create_graph_from_AS_relationships
graph = nx.Graph()
method = 'node2vec'
if method == 'example':
graph = nx.Graph()
graph.add_nodes_from(['a', 'b', 'c'])
method = 'example'
dict_args = {'embedding_size': 4, 'default_value': 22}
data = gel.generate_ASN_embeddings_from_graph(graph, method, **dict_args)
elif method == 'node2vec':
graph = create_graph_from_AS_relationships()
print(nx.info(graph))
dict_args = {'dimensions': 64, 'walk_length': 10, 'num_walks': 80, 'workers': 1, 'window': 5, 'min_count': 1, 'batch_words': 4}
data = gel.generate_ASN_embeddings_from_graph(graph, method, **dict_args)
else:
raise Exception('Not defined type of embeddings') |
# -*- coding: utf-8 -*-
import sdata
from sdata.workbook import Workbook
import pandas as pd
import numpy as np
def test_blob():
wb = Workbook(name="workbook",
description="A excel like workbook",
)
print(wb)
assert wb.name == "workbook"
s0 = wb.create_sheet(1)
assert s0 is None
df1 = pd.DataFrame([1, 2, 3])
s1 = wb.create_sheet("df1")
s1.df = df1
assert s1.name == "df1"
s1.describe()
data2 = sdata.Data(name="data2", df=pd.DataFrame({"a": [1, 2, 3]}))
wb.add_sheet(data2)
assert data2 in wb
assert "data2" in wb.sheetnames
s3 = wb.add_sheet(1)
assert s3 is None
print(wb.sheetnames)
assert wb.sheetnames == ['df1', 'data2']
assert len(wb.sheets)==2 |
import FWCore.ParameterSet.Config as cms
# Calorimetry Digis (Ecal + Hcal) - * unsuppressed *
# returns sequence "calDigi"
from SimCalorimetry.Configuration.ecalDigiSequence_cff import *
from SimGeneral.Configuration.SimGeneral_cff import *
doAllDigi = cms.Sequence(ecalDigiSequence)
pdigi = cms.Sequence(cms.SequencePlaceholder("randomEngineStateProducer")*cms.SequencePlaceholder("mix")*doAllDigi)
|
import matplotlib.pyplot as plt
import numpy as np
from thermo import *
from fluids.numerics import logspace
ethanol_psat = VaporPressure(Tb=351.39, Tc=514.0, Pc=6137000.0, omega=0.635, CASRN='64-17-5')
ethanol_psat.plot_T_dependent_property(Tmin=400, Tmax=500, methods=['BOILING_CRITICAL', 'SANJARI', 'LEE_KESLER_PSAT', 'VDI_TABULAR', 'DIPPR_PERRY_8E'], pts=50, order=1)
|
from builtins import object
import glob
import os.path
from . import class_loader
class AlgorithmLoader(object):
@classmethod
def load_agent(cls, algorithm_path, algorithm_name):
return cls.load_algorithm_class(algorithm_path, algorithm_name, 'agent.Agent')
@classmethod
def load_parameter_server(cls, algorithm_path, algorithm_name):
return cls.load_algorithm_class(algorithm_path, algorithm_name, 'parameter_server.ParameterServer')
@classmethod
def load_algorithm_class(cls, algorithm_path, algorithm_name, suffix):
if algorithm_path is not None:
_, algorithm_name = os.path.split(algorithm_path)
elif '.' not in algorithm_name:
algorithm_name = 'relaax.algorithms.%s' % algorithm_name
return class_loader.ClassLoader.load(algorithm_path, '%s.%s' % (algorithm_name, suffix))
@classmethod
def model_packages(cls, algorithm_path, algorithm_name):
if algorithm_path is not None:
_, algorithm_name = os.path.split(algorithm_path)
elif '.' not in algorithm_name:
algorithm_name = 'relaax.algorithms.%s' % algorithm_name
m = class_loader.ClassLoader.import_module(algorithm_path, '%s.parameter_server' % algorithm_name)
mpath, = m.__path__
for f in glob.glob(os.path.join(mpath, '*model.py')):
name, _ = os.path.splitext(os.path.basename(f))
yield algorithm_path, '%s.%s' % (algorithm_name, name)
|
from experiments.gamut_games import *
from prettytable import PrettyTable
from algorithms.algorithms import global_sampling, psp
from structures.gamestructure import path_to_nfg_files
from experiments.noise import UniformNoise
from structures.bounds import HoeffdingBound
import pandas as pd
import ast
import time
def get_rg_ground_truth_game(params: Dict, game_params: Dict):
return RandomGames(title=game_params['title'],
num_players=params['num_players'],
num_strategies=params['num_strategies'],
max_payoff=params['max_payoff'],
min_payoff=params['min_payoff'],
noise=game_params['noise'])
def get_pd_ground_truth_game(params: Dict, game_params: Dict):
return PrisonersDilemma(title=game_params['title'],
max_payoff=params['max_payoff'],
min_payoff=params['min_payoff'],
noise=game_params['noise'])
def get_bs_ground_truth_game(params: Dict, game_params: Dict):
return BattleOfTheSexes(title=game_params['title'],
max_payoff=params['max_payoff'],
min_payoff=params['min_payoff'],
noise=game_params['noise'])
def get_cn_ground_truth_game(params: Dict, game_params: Dict):
return CongestionGame(title=game_params['title'],
num_players=params['num_players'],
num_facilities=params['num_facilities'],
max_payoff=params['max_payoff'],
min_payoff=params['min_payoff'],
noise=game_params['noise'])
def get_td_ground_truth_game(params: Dict, game_params: Dict):
return TravelersDilemma(title=game_params['title'],
num_players=params['num_players'],
num_strategies=params['num_strategies'],
max_payoff=params['max_payoff'],
min_payoff=params['min_payoff'],
noise=game_params['noise'])
def get_ch_ground_truth_game(params: Dict, game_params: Dict):
return Chicken(title=game_params['title'],
max_payoff=params['max_payoff'],
min_payoff=params['min_payoff'],
noise=game_params['noise'])
def get_me_ground_truth_game(params: Dict, game_params: Dict):
return MinimumEffort(title=game_params['title'],
num_players=params['num_players'],
num_strategies=params['num_strategies'],
max_payoff=params['max_payoff'],
min_payoff=params['min_payoff'],
noise=game_params['noise'])
def get_gd_ground_truth_game(params: Dict, game_params: Dict):
return GrabTheDollar(title=game_params['title'],
num_strategies=params['num_strategies'],
max_payoff=params['max_payoff'],
min_payoff=params['min_payoff'],
noise=game_params['noise'])
def get_zs_ground_truth_game(params: Dict, game_params: Dict):
return ZeroSum(title=game_params['title'],
num_strategies=params['num_strategies'],
max_payoff=params['max_payoff'],
min_payoff=params['min_payoff'],
noise=game_params['noise'])
def get_cg_ground_truth_game(params: Dict, game_params: Dict):
return CompoundGame(title=game_params['title'],
num_players=params['num_players'],
max_payoff=params['max_payoff'],
min_payoff=params['min_payoff'],
noise=game_params['noise'])
class Experiment(ABC):
# Declare the type of games we are allowed to experiment with.
game_generators_dict = {'rg': get_rg_ground_truth_game,
'pd': get_pd_ground_truth_game,
'bs': get_bs_ground_truth_game,
'cn': get_cn_ground_truth_game,
'td': get_td_ground_truth_game,
'ch': get_ch_ground_truth_game,
'me': get_me_ground_truth_game,
'gd': get_gd_ground_truth_game,
'zs': get_zs_ground_truth_game,
'cg': get_cg_ground_truth_game}
def __init__(self, params: Dict):
self.params = params
self.gt_generator = Experiment.game_generators_dict[self.params['ground_truth_game_generator']]
Experiment.generate_params_prettytable(params=self.params, meta_file_location=self.params['result_file_location'] + '.meta')
@abstractmethod
def run_experiment(self):
pass
@staticmethod
def generate_params_prettytable(params: Dict, meta_file_location: str) -> None:
"""
Generate a pretty table with the parameters of an experiment, print it and save it to a file.
:param params: a list of tuples (param, value)
:param meta_file_location: the location of the file where the pretty table of parameters will be stored
:return:
"""
#
t = PrettyTable()
t.field_names = ["Param", "Value"]
for param, value in params.items():
t.add_row([param, str(value)])
print(t)
# Save meta info file so we know what parameters were used to run the experiment.
with open(meta_file_location, 'w+') as meta_file:
meta_file.write(str(t))
class GSExperiments(Experiment):
def run_experiment(self):
# List for results
results = []
# Draw some number of ground-truth games.
for i in range(0, self.params['num_games']):
print(f'Game #{i}')
# Test different noise models.
for j, noise in enumerate(self.params['noise_models']):
print(f'Noise #{j}', end='\t ')
game = self.gt_generator(self.params,
{'title': 'expt_gs_game_' + self.params['ground_truth_game_generator'] + '_' + self.params['experiment_name'], 'noise': noise})
c = noise.get_c(self.params['max_payoff'], self.params['min_payoff'])
# For fix noise model and ground-truth game, perform multiple trials defined as runs of GS.
for t in range(0, self.params['num_trials']):
if t % 10 == 0:
print(t, end='\t')
df = pd.DataFrame(results, columns=['game', 'variance', 'bound', 'm', 'eps'])
df.to_csv(self.params['result_file_location'], index=False)
for m in self.params['m_test']:
# Run GS for each type of bound.
for bound in self.params['bounds']:
g = game.clone()
epsilon_gs, total_num_samples_gs = global_sampling(estimated_game=g, bound=bound, m=m, delta=self.params['delta'], c=c)
# Collect results in the form (game index, variance of the noise model, name of bound, number of samples, epsilon).
results += [[i, noise.get_variance(), str(bound)[0], m, epsilon_gs]]
print('')
# Convert results to DataFrame and save to a csv file
df = pd.DataFrame(results, columns=['game', 'variance', 'bound', 'm', 'eps'])
df.to_csv(self.params['result_file_location'], index=False)
class RegretExperiments(Experiment):
def run_experiment(self):
# List for results
results = []
# Draw some number of ground-truth games.
for i in range(self.params['num_games']):
print(f'Game #{i}')
ground_truth_game = None
list_of_nashs = []
while len(list_of_nashs) == 0:
# Create the ground truth game
ground_truth_game = self.gt_generator(self.params,
{'title': 'expt_regret_game_' + self.params['ground_truth_game_generator'] + '_' + self.params['experiment_name'],
'noise': None})
# Compute the Nash of the ground-truth game by calling gambit and read it back.
ground_truth_game.solve_nash()
with open(path_to_nfg_files + 'expt_regret_game_' + self.params['ground_truth_game_generator'] + '_' + self.params['experiment_name'] + '_sol', 'r') as sol_file:
list_of_nashs = ast.literal_eval(sol_file.read())
print(f'The game has {len(list_of_nashs)} many nashs')
for nash in list_of_nashs:
print(f'\t {nash}')
# Test different noise models.
for j, noise in enumerate(self.params['noise_models']):
print(f'Noise #{j}', end='\t ')
# Construct the game which we are going to estimate.
ground_truth_game.noise = noise
estimated_game = ground_truth_game.clone()
c = noise.get_c(self.params['max_payoff'], self.params['min_payoff'])
# Start Experiment
for t in range(0, self.params['num_trials']):
if t % 10 == 0:
print(t, end='\t')
# Convert results to DataFrame and save to a csv file
df = pd.DataFrame(results, columns=['game', 'variance', 'bound', 'm', 'eps', 'num_nash', 'max_regret'])
df.to_csv(self.params['result_file_location'], index=False)
for m in self.params['m_test']:
for bound in self.params['bounds']:
g = estimated_game.clone()
epsilon_gs, total_num_samples_gs = global_sampling(estimated_game=g, bound=bound, m=m, delta=self.params['delta'], c=c)
g.set_payoffs()
regrets = [g.regret(p, nash) for p in range(g.num_players) for nash in list_of_nashs]
max_regret = max(regrets)
# print(f'\t 2*eps = {2.0 * epsilon_gs:.4f} ', "\t".join(f'{regret_p:.4f}' for regret_p in regrets), f'max_regret = {max_regret:.4f}')
results += [[i, noise.get_variance(), str(bound)[0], m, epsilon_gs, len(list_of_nashs), max_regret]]
print('')
# Convert results to DataFrame and save to a csv file
df = pd.DataFrame(results, columns=['game', 'variance', 'bound', 'm', 'eps', 'num_nash', 'max_regret'])
df.to_csv(self.params['result_file_location'], index=False)
class PSPExperiments(Experiment):
def run_experiment(self):
# List for results
results = []
# Draw some number of ground-truth games.
for i in range(0, self.params['num_games']):
print(f'Game #{i}')
# Test different noise models.
for j, noise in enumerate(self.params['noise_models']):
print(f'Noise #{j}', end='\t ')
game = self.gt_generator(self.params, {'title': 'exp_psp_game_' + self.params['ground_truth_game_generator'] + '_' + self.params['experiment_name'],
'noise': noise})
c = noise.get_c(self.params['max_payoff'], self.params['min_payoff'])
# For fix noise model and ground-truth game, perform multiple trials defined as runs of GS.
for t in range(0, self.params['num_trials']):
if t % 10 == 0:
print(t, end='\t')
# Convert results to DataFrame and save to a csv file
df = pd.DataFrame(results, columns=['game', 'algo', 'variance', 'bound', 'm', 'eps', 'num_pruned', 'success'])
df.to_csv(self.params['result_file_location'], index=False)
for m in self.params['m_test']:
# Run GS for each type of bound.
for bound in self.params['bounds']:
# First, run GS
g = game.clone()
epsilon_gs, total_num_samples_gs = global_sampling(estimated_game=g, bound=bound, m=m, delta=self.params['delta'], c=c)
# Collect gs results
results += [[i, 'gs', noise.get_variance(), str(bound)[0], total_num_samples_gs, epsilon_gs, -1, True]]
# Second, run PSP with epsilon given by GS and a schedule that ends in the number of samples used by GS.
g = game.clone()
psp_success, total_num_samples, total_num_profiles_pruned, psp_epsilon = psp(estimated_game=g,
bound=bound,
# m_schedule=[int(m / 2 ** (3 - i)) for i in range(1, 6)], # Old Schedule.
m_schedule=[int((m / 4) * 2 ** i) for i in range(4)],
delta_schedule=[self.params['delta'] / 4.0] * 4,
# target_epsilon=epsilon_gs, # Old target epsilon.
target_epsilon=0.0,
c=c)
# Collect pss results
results += [[i, 'psp', noise.get_variance(), str(bound)[0], total_num_samples, psp_epsilon, total_num_profiles_pruned, psp_success]]
print('')
# Convert results to DataFrame and save to a csv file
df = pd.DataFrame(results, columns=['game', 'algo', 'variance', 'bound', 'm', 'eps', 'num_pruned', 'success'])
df.to_csv(self.params['result_file_location'], index=False)
class PSPExperimentsPart2(Experiment):
def run_experiment(self):
# List for results
results = []
noise = UniformNoise(low=-.5, high=.5)
game_index = 0
t0 = time.time()
for num_actions in self.params['num_actions']:
print(f'num_strategies #{num_actions}')
# self.params['num_strategies'] = num_actions
self.params['num_facilities'] = num_actions
# Draw some number of ground-truth games.
for _ in range(0, self.params['num_games']):
print(f'Game #{game_index}')
game = self.gt_generator(self.params, {'title': 'exp_psp_part2_game_' + self.params['ground_truth_game_generator'] + '_' + self.params['experiment_name'],
'noise': noise})
c = noise.get_c(self.params['max_payoff'], self.params['min_payoff'])
if game_index % 10 == 0:
print(f'Saving..., time so far = {time.time() - t0:.4f}')
# Convert results to DataFrame and save to a csv file
df = pd.DataFrame(results, columns=['game', 'num_strategies', 'algo', 'variance', 'bound', 'm', 'eps_index', 'eps', 'num_pruned'])
df.to_csv(self.params['result_file_location'], index=False)
for j, eps in enumerate(self.params['eps']):
m = int(HoeffdingBound.number_of_samples({'c': c, 'delta': self.params['delta'], 'estimated_game': game, 'eps': eps}))
for bound in self.params['bounds']:
# First, run GS
g = game.clone()
epsilon_gs, total_num_samples_gs = global_sampling(estimated_game=g, bound=bound, m=m, delta=self.params['delta'], c=c)
# Collect gs results
results += [[game_index, num_actions, 'gs', noise.get_variance(), str(bound)[0], total_num_samples_gs, j, epsilon_gs, -1]]
# Second, run PSP with epsilon given by GS and a schedule that ends in the number of samples used by GS.
g = game.clone()
psp_success, total_num_samples, total_num_profiles_pruned, psp_epsilon = psp(estimated_game=g,
bound=bound,
m_schedule=[int((m / 4) * 2 ** i) for i in range(4)],
delta_schedule=[self.params['delta'] / 4.0] * 4,
target_epsilon=0.0,
c=c)
# Collect pss results
results += [[game_index, num_actions, 'psp', noise.get_variance(), str(bound)[0], total_num_samples, j, psp_epsilon, total_num_profiles_pruned]]
game_index += 1
print('')
# Convert results to DataFrame and save to a csv file
print(f'Saving..., time so far = {time.time() - t0:.4f}')
df = pd.DataFrame(results, columns=['game', 'num_strategies', 'algo', 'variance', 'bound', 'm', 'eps_index', 'eps', 'num_pruned'])
df.to_csv(self.params['result_file_location'], index=False)
|
# Generated by Django 2.2.8 on 2020-02-21 23:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('submissions', '0010_submissioncomment'),
]
operations = [
migrations.AddField(
model_name='submission',
name='status',
field=models.CharField(choices=[('proposed', 'Proposed'), ('cancelled', 'Cancelled')], default='proposed', max_length=20, verbose_name='status'),
),
]
|
import re
import collections
import logging
from dynamo.registry.registry import RegistryDatabase
from dynamo.dataformat import Configuration, Block, ObjectError, ConfigurationError
LOG = logging.getLogger(__name__)
class UnhandledCopyExists(object):
"""
Check for pending transfer requests made to Dealer.
Sets one attr:
unhandled_copy_exists_to
"""
produces = ['unhandled_copy_exists']
def __init__(self, config = None):
config = Configuration(config)
self.registry = RegistryDatabase(config.get('registry', None))
def load(self, inventory):
# collect the name of items that are not yet activated or are activated but not queued
sql = 'SELECT i.`item` FROM `copy_request_items` AS i INNER JOIN `copy_requests` AS r ON r.`id` = i.`request_id`'
sql += ' WHERE r.`status` = \'new\''
items = self.registry.db.query(sql)
items += self.registry.db.query('SELECT `item` FROM `active_copies` WHERE `status` = \'new\'')
for item_name in items:
try:
dataset_name, block_name = Block.from_full_name(item_name)
except ObjectError:
dataset_name, block_name = item_name, None
try:
dataset = inventory.datasets[dataset_name]
except KeyError:
continue
if block_name is not None:
block = dataset.find_block(block_name)
if block is None:
continue
dataset.attr['unhandled_copy_exists'] = True
|
#
# ANIAnnc.py
#
# (c) 2020 by Andreas Kraft
# License: BSD 3-Clause License. See the LICENSE file for further details.
#
# ANI : Announceable variant
#
from .MgmtObjAnnc import *
from Types import ResourceTypes as T, JSON
from Validator import constructPolicy, addPolicy
# Attribute policies for this resource are constructed during startup of the CSE
aniAPolicies = constructPolicy([
'ant', 'ldv'
])
attributePolicies = addPolicy(mgmtObjAAttributePolicies, aniAPolicies)
# TODO resourceMappingRules, announceSyncType, owner
class ANIAnnc(MgmtObjAnnc):
def __init__(self, dct:JSON=None, pi:str=None, create:bool=False) -> None:
super().__init__(dct, pi, mgd=T.ANI, create=create, attributePolicies=attributePolicies)
|
#!/usr/bin/python
# Classification (U)
"""Program: masterrep_connect.py
Description: Unit testing of MasterRep.connect method in mysql_class.py.
Usage:
test/unit/mysql_class/masterrep_connect.py
Arguments:
"""
# Libraries and Global Variables
# Standard
import sys
import os
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
# Third-party
import mock
# Local
sys.path.append(os.getcwd())
import mysql_class
import lib.machine as machine
import version
__version__ = version.__version__
class UnitTest(unittest.TestCase):
"""Class: UnitTest
Description: Class which is a representation of a unit testing.
Methods:
setUp
test_silent_true
test_silent_false
test_silent_default
test_db_up
test_db_down
"""
def setUp(self):
"""Function: setUp
Description: Initialization for unit testing.
Arguments:
"""
self.name = "Mysql_Server"
self.server_id = 10
self.sql_user = "mysql_user"
self.sql_pass = "my_japd"
self.machine = getattr(machine, "Linux")()
self.host = "host_server"
self.port = 3307
self.defaults_file = "def_cfg_file"
self.extra_def_file = "extra_cfg_file"
self.rep_user = "rep_user"
self.rep_japd = "rep_japd"
self.mysqlrep = mysql_class.MasterRep(
self.name, self.server_id, self.sql_user, self.sql_pass,
os_type=self.machine, defaults_file=self.defaults_file,
rep_user=self.rep_user, rep_japd=self.rep_japd)
@mock.patch("mysql_class.Server.connect")
def test_silent_true(self, mock_server):
"""Function: test_silent_true
Description: Test with silent true set.
Arguments:
"""
mock_server.return_value = True
self.assertFalse(self.mysqlrep.connect(silent=True))
@mock.patch("mysql_class.Server.connect")
def test_silent_false(self, mock_server):
"""Function: test_silent_false
Description: Test with silent false set.
Arguments:
"""
mock_server.return_value = True
self.assertFalse(self.mysqlrep.connect(silent=False))
@mock.patch("mysql_class.Server.connect")
def test_silent_default(self, mock_server):
"""Function: test_silent_default
Description: Test with silent default setting.
Arguments:
"""
mock_server.return_value = True
self.assertFalse(self.mysqlrep.connect())
@mock.patch("mysql_class.MasterRep.upd_mst_status")
@mock.patch("mysql_class.Server.set_srv_gtid")
@mock.patch("mysql_class.Server.connect")
def test_db_up(self, mock_conn, mock_set, mock_update):
"""Function: test_db_up
Description: Test with connection up.
Arguments:
"""
mock_conn.return_value = True
mock_set.return_value = True
mock_update.return_value = True
self.mysqlrep.conn = True
self.assertFalse(self.mysqlrep.connect())
@mock.patch("mysql_class.Server.connect")
def test_db_down(self, mock_server):
"""Function: test_db_down
Description: Test with connection down.
Arguments:
"""
mock_server.return_value = True
self.assertFalse(self.mysqlrep.connect())
if __name__ == "__main__":
unittest.main()
|
from criaenvio.cliente import APIClienteCriaEnvio
class EnvioCriaEnvioAPI(APIClienteCriaEnvio):
RECURSO = 'envios'
|
#############################################################################
#Copyright (c) 2010, Jo Bovy, David W. Hogg, Dustin Lang
#All rights reserved.
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
#"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
#A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
#HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
#INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
#OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
#AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
#LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
#WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
#POSSIBILITY OF SUCH DAMAGE.
#############################################################################
import scipy as sc
from scipy import special
import math as m
import scipy.linalg as linalg
import scipy.optimize as optimize
import scipy.stats as stats
from generate_data import read_data
import matplotlib
matplotlib.use('Agg')
from pylab import *
from matplotlib.pyplot import *
from matplotlib import rc
import bovy_plot as plot
def ex10(exclude=sc.array([1,2,3,4]),
plotfilenameA='ex10a.png',
plotfilenameB='ex10b.png',
nburn=1000,nsamples=200000,
parsigma=[5,.075,0.1],
bovyprintargs={}):
"""ex10: solve exercise 10 using MCMC sampling
Input:
exclude - ID numbers to exclude from the analysis (can be None)
plotfilename - filename for the output plot
nburn - number of burn-in samples
nsamples - number of samples to take after burn-in
parsigma - proposal distribution width (Gaussian)
Output:
plot
History:
2010-05-07 - Written - Bovy (NYU)
"""
sc.random.seed(-1) #In the interest of reproducibility (if that's a word)
#Read the data
data= read_data('data_yerr.dat')
ndata= len(data)
if not exclude == None:
nsample= ndata- len(exclude)
else:
nsample= ndata
#First find the chi-squared solution, which we will use as an
#initial guess
#Put the data in the appropriate arrays and matrices
Y= sc.zeros(nsample)
X= sc.zeros(nsample)
A= sc.ones((nsample,2))
C= sc.zeros((nsample,nsample))
yerr= sc.zeros(nsample)
jj= 0
for ii in range(ndata):
if not exclude == None and sc.any(exclude == data[ii][0]):
pass
else:
Y[jj]= data[ii][1][1]
X[jj]= data[ii][1][0]
A[jj,1]= data[ii][1][0]
C[jj,jj]= data[ii][2]**2.
yerr[jj]= data[ii][2]
jj= jj+1
#Now compute the best fit and the uncertainties
bestfit= sc.dot(linalg.inv(C),Y.T)
bestfit= sc.dot(A.T,bestfit)
bestfitvar= sc.dot(linalg.inv(C),A)
bestfitvar= sc.dot(A.T,bestfitvar)
bestfitvar= linalg.inv(bestfitvar)
bestfit= sc.dot(bestfitvar,bestfit)
initialguess= sc.array([bestfit[0],bestfit[1],0.])#(m,b,logS)
#With this initial guess start off the sampling procedure
initialX= objective(initialguess,X,Y,yerr)
currentX= initialX
bestX= initialX
bestfit= initialguess
currentguess= initialguess
naccept= 0
samples= []
samples.append(currentguess)
for jj in range(nburn+nsamples):
#Draw a sample from the proposal distribution
newsample= sc.zeros(3)
newsample[0]= currentguess[0]+stats.norm.rvs()*parsigma[0]
newsample[1]= currentguess[1]+stats.norm.rvs()*parsigma[1]
newsample[2]= currentguess[2]+stats.norm.rvs()*parsigma[2]
#Calculate the objective function for the newsample
newX= objective(newsample,X,Y,yerr)
#Accept or reject
#Reject with the appropriate probability
u= stats.uniform.rvs()
accept=False
try:
test= m.exp(newX-currentX)
if u < test:
accept= True
except OverflowError:
accept= True
if accept:
#Accept
currentX= newX
currentguess= newsample
naccept= naccept+1
if currentX > bestX:
bestfit= currentguess
bestX= currentX
samples.append(currentguess)
if double(naccept)/(nburn+nsamples) < .5 or double(naccept)/(nburn+nsamples) > .8:
print "Acceptance ratio was "+str(double(naccept)/(nburn+nsamples))
samples= sc.array(samples).T[:,nburn:-1]
print "Best-fit, overall"
print bestfit, sc.mean(samples[2,:]), sc.median(samples[2,:])
histmb,edges= sc.histogramdd(samples.T[:,0:2],bins=round(sc.sqrt(nsamples)/2.))
indxi= sc.argmax(sc.amax(histmb,axis=1))
indxj= sc.argmax(sc.amax(histmb,axis=0))
print "Best-fit, marginalized"
print edges[0][indxi-1], edges[1][indxj-1]
print edges[0][indxi], edges[1][indxj]
print edges[0][indxi+1], edges[1][indxj+1]
print "Best-fit for S marginalized"
histS,edgesS= sc.histogram(samples.T[:,2],bins=round(sc.sqrt(nsamples)/2.))
indx= sc.argmax(histS)
#Data with MAP line and sampling
plot.bovy_print(**bovyprintargs)
bestb= bestfit[0]
bestm= bestfit[1]
xrange=[0,300]
yrange=[0,700]
plot.bovy_plot(xrange,bestm*sc.array(xrange)+bestb,'k-',
xrange=xrange,yrange=yrange,
xlabel=r'$x$',ylabel=r'$y$',zorder=2)
errorbar(X,Y,sc.exp(bestfit[2]/2.),
marker='o',color='k',linestyle='None',zorder=1)
plot.bovy_text(r'$\mathrm{MAP}\ :\ y = %4.2f \,x+ %4.0f' % (bestfit[1], bestfit[0])+r'$'+'\n'+r'$\mathrm{MAP}\ :\ \sqrt{S} = %3.1f$' % (sc.exp(bestfit[2]/2.)),
bottom_right=True)
plot.bovy_end_print(plotfilenameA)
#Data with MAP line and sampling
plot.bovy_print(**bovyprintargs)
bestb= edges[0][indxi]
bestm= edges[1][indxj]
bestS= edgesS[indx]
xrange=[0,300]
yrange=[0,700]
plot.bovy_plot(xrange,bestm*sc.array(xrange)+bestb,'k-',
xrange=xrange,yrange=yrange,
xlabel=r'$x$',ylabel=r'$y$',zorder=2)
errorbar(X,Y,sc.exp(bestS/2.),
marker='o',color='k',linestyle='None',zorder=1)
plot.bovy_text(r'$\mathrm{marginalized\ over\ S}\ :\ y = %4.2f \,x+ %4.0f' % (bestm, bestb)+r'$'+'\n'+r'$\mathrm{marginalized\ over}\ (m,b)\ :\ \sqrt{S} = %3.1f$' % (sc.exp(bestS/2.)),
bottom_right=True)
plot.bovy_end_print(plotfilenameB)
return
def objective(pars,X,Y,yerr):
"""The objective function"""
b= pars[0]
s= pars[1]
S= sc.exp(pars[2])
return -0.5*sc.sum((Y-s*X-b)**2./S+pars[2])
|
# Copyright 2019 Jake Magers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import contextlib
import random
import threading
from collections import deque
from numbers import Number
from . import _buffers as bufs
from . import transducers as xf
__all__ = ['chan', 'alt', 'b_alt', 'QueueSizeError']
MAX_QUEUE_SIZE = 1024
class QueueSizeError(Exception):
"""Maximum pending channel operations exceeded.
Raised when too many operations have been enqueued on a channel.
Consider using a windowing buffer to prevent enqueuing too many puts or
altering your design to have less asynchronous "processes" access the
channel at once.
Note:
This exception is an indication of a design error. It should NOT be
caught and discarded.
"""
class Promise:
def __init__(self):
self._lock = threading.Lock()
self._value = None
self._is_realized = False
self._realized = threading.Condition(self._lock)
def deliver(self, value):
with self._lock:
if self._is_realized:
return False
self._value = value
self._is_realized = True
self._realized.notify_all()
return True
def deref(self):
with self._lock:
self._realized.wait_for(lambda: self._is_realized)
return self._value
class FlagFuture(asyncio.Future):
def __init__(self, flag):
self.__flag = flag
self.__result = None
super().__init__(loop=asyncio.get_running_loop())
def set_result(self, result):
raise AssertionError('cannot call set_result on a future provided by '
'a channel')
def set_exception(self, exception):
raise AssertionError('cannot call set_exception on a future provided '
'by a channel')
def cancel(self):
with self.__flag['lock']:
if self.__flag['is_active']:
self.__flag['is_active'] = False
elif not super().done():
# This case is when value has been committed but
# future hasn't been set because call_soon_threadsafe()
# callback hasn't been invoked yet
super().set_result(self.__result)
return super().cancel()
def future_deliver_fn(future):
def set_result(result):
try:
asyncio.Future.set_result(future, result)
except asyncio.InvalidStateError:
assert future.result() is result
def deliver(result):
future._FlagFuture__result = result
future.get_loop().call_soon_threadsafe(set_result, result)
return deliver
def create_flag():
return {'lock': threading.Lock(), 'is_active': True}
class HandlerManagerMixin:
def __enter__(self):
return self.acquire()
def __exit__(self, e_type, e_val, traceback):
self.release()
class FnHandler(HandlerManagerMixin):
def __init__(self, cb, is_waitable=True):
self._cb = cb
self.is_waitable = is_waitable
self.lock_id = 0
self.is_active = True
def acquire(self):
return True
def release(self):
pass
def commit(self):
return self._cb
class FlagHandler(HandlerManagerMixin):
def __init__(self, flag, cb, is_waitable=True):
self._flag = flag
self._cb = cb
self.is_waitable = is_waitable
self.lock_id = id(flag)
@property
def is_active(self):
return self._flag['is_active']
def acquire(self):
return self._flag['lock'].acquire()
def release(self):
self._flag['lock'].release()
def commit(self):
self._flag['is_active'] = False
return self._cb
@contextlib.contextmanager
def acquire_handlers(*handlers):
"""Returns a context manager for acquiring `handlers` without deadlock."""
# Acquire locks in consistent order
for h in sorted(handlers, key=lambda h: h.lock_id):
is_acquired = h.acquire()
assert is_acquired
try:
yield True
finally:
for h in handlers:
h.release()
def nop_ex_handler(e):
raise e
class chan:
"""A CSP channel with optional buffer, transducer, and exception handler.
Channels support multiple producers and consumers and may be buffered or
unbuffered. Additionally, buffered channels can optionally have a
transformation applied to the values put to them through the use of a
:any:`transducer`.
Channels may be used by threads with or without a running asyncio event
loop. The :meth:`get`, :meth:`put`, and :func:`alt` functions provide
direct support for asyncio by returning awaitables. Channels additionally
can be used as asynchronous generators when used with ``async for``.
:meth:`b_get`, :meth:`b_put`, :func:`b_alt`, and :meth:`to_iter` provide
blocking alternatives for threads which do not wish to use asyncio.
Channels can even be used with callback based code via :meth:`f_put` and
:meth:`f_get`. A very valuable feature of channels is that producers and
consumers of them need not be of the same type. For example, a value placed
onto a channel with :meth:`put` (asyncio) can be taken by a call to
:meth:`b_get` (blocking) from a separate thread.
A select/alt feature is also available using the :func:`alt` and
:func:`b_alt` functions. This feature allows one to attempt many operations
on a channel at once and only have the first operation to complete actually
committed.
Once closed, future puts will be unsuccessful but any pending puts will
remain until consumed or until a :any:`reduced` value is returned from
the transformation. Once exhausted, all future gets will complete with
the value None. Because of this, None cannot be put onto a channel either
directly or indirectly through a transformation.
Args:
buf_or_n: An optional buffer that may be expressed as a positive number.
If it's a number, a fixed buffer of that capacity will be used.
If None, the channel will be unbuffered.
xform: An optional :any:`transducer <transducers>` for transforming
elements put onto the channel. `buf_or_n` must not be None if this
is provided.
ex_handler: An optional function to handle exceptions raised during
transformation. Must accept the raised exception as a parameter.
Any non-None return value will be put onto the buffer.
See Also:
:any:`buffer()`
:any:`dropping_buffer()`
:any:`sliding_buffer()`
"""
def __init__(self, buf_or_n=None, xform=None, ex_handler=None):
if buf_or_n is None:
if xform is not None:
raise TypeError('unbuffered channels cannot have an xform')
if ex_handler is not None:
raise TypeError('unbuffered channels cannot have an ex_handler')
self._buf = (bufs.FixedBuffer(buf_or_n)
if isinstance(buf_or_n, Number)
else buf_or_n)
xform = xf.identity if xform is None else xform
ex_handler = nop_ex_handler if ex_handler is None else ex_handler
self._takes = deque()
self._puts = deque()
self._is_closed = False
self._buf_rf_is_completed = False
self._lock = threading.Lock()
@xform
@xf.completing
def xrf(_, val):
if val is None:
raise AssertionError('xform cannot produce None')
self._buf.put(val)
def ex_handler_rf(*args):
try:
return xrf(*args)
except Exception as e:
val = ex_handler(e)
if val is not None:
self._buf.put(val)
self._buf_rf = ex_handler_rf
def put(self, val, *, wait=True):
"""Attempts to put `val` onto the channel.
Puts will fail in the following cases:
* the channel is already closed
* ``wait=False`` and `val` cannot be immediately put onto the channel
* a :any:`reduced` value is returned during transformation
Args:
val: A non-None value to put onto the channel.
wait: An optional bool that if False, fails the put operation when
it cannot complete immediately.
Returns:
An awaitable that will evaluate to True if `val` is accepted onto
the channel or False if it's not.
Raises:
RuntimeError: If the calling thread has no running event loop.
QueueSizeError: If the channel has too many pending put operations.
"""
flag = create_flag()
future = FlagFuture(flag)
handler = FlagHandler(flag, future_deliver_fn(future), wait)
ret = self._p_put(handler, val)
if ret is not None:
asyncio.Future.set_result(future, ret[0])
return future
def get(self, *, wait=True):
"""Attempts to take a value from the channel.
Gets will fail if the channel is exhausted or if ``wait=False`` and a
value is not immediately available.
Args:
wait: An optional bool that if False, fails the get operation when
a value is not immediately available.
Returns:
An awaitable that evaluates to a value taken from the channel or
None if the operation fails.
Raises:
RuntimeError: If the calling thread has no running event loop.
QueueSizeError: If the channel has too many pending get operations.
"""
flag = create_flag()
future = FlagFuture(flag)
handler = FlagHandler(flag, future_deliver_fn(future), wait)
ret = self._p_get(handler)
if ret is not None:
asyncio.Future.set_result(future, ret[0])
return future
def b_put(self, val, *, wait=True):
"""Same as :meth:`put` except it blocks instead of returning an awaitable.
Does not require an event loop.
"""
prom = Promise()
ret = self._p_put(FnHandler(prom.deliver, wait), val)
if ret is not None:
return ret[0]
return prom.deref()
def b_get(self, *, wait=True):
"""Same as :meth:`get` except it blocks instead of returning an awaitable.
Does not require an event loop.
"""
prom = Promise()
ret = self._p_get(FnHandler(prom.deliver, wait))
if ret is not None:
return ret[0]
return prom.deref()
def f_put(self, val, f=None):
"""Asynchronously puts `val` onto the channel and calls `f` when complete.
Does not require an event loop.
Args:
val: A non-None value to put onto the channel.
f: An optional non-blocking function accepting the completion
status of the put operation.
Returns:
False if the channel is already closed or True if it's not.
Raises:
QueueSizeError: If the channel has too many pending put operations.
"""
f = (lambda _: None) if f is None else f
ret = self._p_put(FnHandler(f), val)
if ret is None:
return True
f(ret[0])
return ret[0]
def f_get(self, f):
"""Asynchronously takes a value from the channel and calls `f` with it.
Does not require an event loop.
Args:
f: A non-blocking function accepting a single argument. Will be
passed the value taken from the channel or None if the channel
is exhausted.
Raises:
QueueSizeError: If the channel has too many pending get operations.
"""
ret = self._p_get(FnHandler(f))
if ret is None:
return
f(ret[0])
def offer(self, val):
"""Same as :meth:`b_put(val, wait=False) <b_put>`."""
return self.b_put(val, wait=False)
def poll(self):
"""Same as :meth:`b_get(wait=False) <b_get>`."""
return self.b_get(wait=False)
def close(self):
"""Closes the channel."""
with self._lock:
self._cleanup()
self._close()
async def __aiter__(self):
"""Returns an asynchronous iterator over the channel's values."""
while True:
value = await self.get()
if value is None:
break
yield value
def to_iter(self):
"""Returns an iterator over the channel's values.
Calling ``next()`` on the returned iterator may block.
Does not require an event loop.
"""
while True:
val = self.b_get()
if val is None:
break
yield val
def _p_put(self, handler, val):
"""Commits or enqueues a put operation to the channel.
If the put operation completes immediately, then the `handler` will be
committed but its callback will not be invoked. The completion status
of the operation will be wrapped in a tuple and returned. The status
will be True if `val` was accepted onto the channel or False otherwise.
If the operation is unable to complete immediately, then `handler` and
`val` will be enqueued and None will be returned. When the operation
eventually completes, the `handler` will be committed and its callback
will be invoked with the completion status.
Args:
handler: A handler that will be committed upon completion. Its
callback will only be invoked if the operation is enqueued.
val: A non-None value to put onto the channel.
Returns:
A tuple containing the completion status if the operation completes
immediately. None if the operation is enqueued.
Raises:
QueueSizeError: If the channel has too many pending put operations.
"""
if val is None:
raise TypeError('item cannot be None')
with self._lock:
self._cleanup()
if self._is_closed:
return self._fail_op(handler, False)
# Attempt to transfer val onto buf
if self._buf is not None and not self._buf.is_full():
with handler:
if not handler.is_active:
return None
handler.commit()
self._buf_put(val)
self._transfer_buf_vals_to_takers()
return True,
# Attempt to transfer val to a taker
if self._buf is None:
while len(self._takes) > 0:
taker = self._takes[0]
with acquire_handlers(handler, taker):
if not handler.is_active:
return None
self._takes.popleft()
if taker.is_active:
handler.commit()
taker.commit()(val)
return True,
if not handler.is_waitable:
return self._fail_op(handler, False)
# Attempt to enqueue the operation
if len(self._puts) >= MAX_QUEUE_SIZE:
raise QueueSizeError('channel has too many pending puts')
self._puts.append((handler, val))
def _p_get(self, handler):
"""Commits or enqueues a get operation to the channel.
If the get operation completes immediately, then the `handler` will be
committed but its callback will not be invoked. If the channel is not
already exhausted, then the value taken from the channel will be
wrapped in a tuple and returned. If the channel is already exhausted
then the tuple, ``(None,)``, will be returned.
If the operation is unable to complete immediately, then `handler` and
`val` will be enqueued and None will be returned. When the operation
eventually completes, the `handler` will be committed and its callback
will be invoked with the value taken from the channel or None if its
exhausted.
Args:
handler: A handler that will be committed upon completion. Its
callback will only be invoked if the operation is enqueued.
Returns:
A tuple containing the result of the get operation if it completes
immediately. None if the operation is enqueued.
Raises:
QueueSizeError: If the channel has too many pending get operations.
"""
with self._lock:
self._cleanup()
# Attempt to take val from buf
if self._buf is not None and len(self._buf) > 0:
with handler:
if not handler.is_active:
return None
handler.commit()
ret = self._buf.get()
# Transfer vals from putters onto buf
while len(self._puts) > 0 and not self._buf.is_full():
putter, val = self._puts.popleft()
with putter:
if putter.is_active:
putter.commit()(True)
self._buf_put(val)
self._complete_buf_rf_if_ready()
return ret,
# Attempt to take val from a putter
if self._buf is None:
while len(self._puts) > 0:
putter, val = self._puts[0]
with acquire_handlers(handler, putter):
if not handler.is_active:
return None
self._puts.popleft()
if putter.is_active:
handler.commit()
putter.commit()(True)
return val,
if self._is_closed or not handler.is_waitable:
return self._fail_op(handler, None)
# Attempt to enqueue the operation
if len(self._takes) >= MAX_QUEUE_SIZE:
raise QueueSizeError('channel has too many pending gets')
self._takes.append(handler)
def _cleanup(self):
"""Removes enqueued operations that are no longer active."""
self._takes = deque(h for h in self._takes if h.is_active)
self._puts = deque((h, v) for h, v in self._puts if h.is_active)
@staticmethod
def _fail_op(handler, val):
with handler:
if handler.is_active:
handler.commit()
return val,
return None
def _buf_put(self, val):
if xf.is_reduced(self._buf_rf(None, val)):
# If reduced value is returned then no more input is allowed onto
# buf. To ensure this, remove all pending puts and close ch.
for putter, _ in self._puts:
with putter:
if putter.is_active:
putter.commit()(False)
self._puts.clear()
self._close()
def _transfer_buf_vals_to_takers(self):
while len(self._takes) > 0 and len(self._buf) > 0:
taker = self._takes.popleft()
with taker:
if taker.is_active:
taker.commit()(self._buf.get())
def _complete_buf_rf_if_ready(self):
"""Calls buf_rf completion arity once if all input has been put to buf.
Guarantees that the buf_rf completion arity will be invoked only after
all input has been placed onto the buffer and that it will never be
called more than once. Invoking the completion arity will flush any
remaining values from the transformed reducing function onto buf.
"""
if (self._is_closed and
len(self._puts) == 0 and
not self._buf_rf_is_completed):
self._buf_rf_is_completed = True
self._buf_rf(None)
def _close(self):
self._is_closed = True
if self._buf is not None:
self._complete_buf_rf_if_ready()
self._transfer_buf_vals_to_takers()
# Remove pending takes
# No-op if there are pending puts or buffer isn't empty
for taker in self._takes:
with taker:
if taker.is_active:
taker.commit()(None)
self._takes.clear()
class _Undefined:
"""A default parameter value that a user could never pass in."""
def _alts(flag, deliver_fn, ops, priority, default):
ops = list(ops)
if len(ops) == 0:
raise ValueError('alts must have at least one channel operation')
if not priority:
random.shuffle(ops)
ch_ops = {}
# Parse ops into ch_ops
for raw_op in ops:
try:
ch, val = raw_op
op = {'type': 'put', 'value': val}
except TypeError:
ch = raw_op
op = {'type': 'get'}
if ch_ops.get(ch, op)['type'] != op['type']:
raise ValueError('cannot get and put to same channel')
ch_ops[ch] = op
def create_handler(ch):
return FlagHandler(flag, lambda val: deliver_fn((val, ch)))
# Start ops
for ch, op in ch_ops.items():
if op['type'] == 'get':
ret = ch._p_get(create_handler(ch))
elif op['type'] == 'put':
ret = ch._p_put(create_handler(ch), op['value'])
if ret is not None:
return ret[0], ch
if default is not _Undefined:
with flag['lock']:
if flag['is_active']:
flag['is_active'] = False
return default, 'default'
def alt(*ops, priority=False, default=_Undefined):
"""
alt(*ops, priority=False, default=Undefined)
Returns an awaitable representing the first and only channel operation to finish.
Accepts a variable number of operations that either get from or put to a
channel and commits only one of them. If no `default` is provided, then
only the first op to finish will be committed. If `default` is provided and
none of the `ops` finish immediately, then no operation will be committed
and `default` will instead be used to complete the returned awaitable.
Args:
ops: Operations that either get from or put to a channel.
A get operation is represented as simply a channel to get from.
A put operation is represented as an iterable of the form
``[channel, val]``, where `val` is an item to put onto `channel`.
priority: An optional bool. If True, operations will be tried in order.
If False, operations will be tried in random order.
default: An optional value to use in case no operation finishes
immediately.
Returns:
An awaitable that evaluates to a tuple of the form ``(val, ch)``.
If `default` is not provided, then `val` will be what the first
successful operation returned and `ch` will be the channel used in that
operation. If `default` is provided and none of the operations complete
immediately, then the awaitable will evaluate to
``(default, 'default')``.
Raises:
ValueError: If `ops` is empty or contains both a get and put operation
to the same channel.
RuntimeError: If the calling thread has no running event loop.
See Also:
:func:`b_alt`
"""
flag = create_flag()
future = FlagFuture(flag)
ret = _alts(flag, future_deliver_fn(future), ops, priority, default)
if ret is not None:
asyncio.Future.set_result(future, ret)
return future
def b_alt(*ops, priority=False, default=_Undefined):
"""
b_alt(*ops, priority=False, default=Undefined)
Same as :func:`alt` except it blocks instead of returning an awaitable.
Does not require an event loop.
"""
prom = Promise()
ret = _alts(create_flag(), prom.deliver, ops, priority, default)
return prom.deref() if ret is None else ret
|
import os
import unittest
from unittest.mock import patch, Mock
os.environ["STAGE"] = "test"
os.environ["autotest"] = "True"
from sosw.worker_assistant import WorkerAssistant
from sosw.test.variables import TEST_WORKER_ASSISTANT_CONFIG
class WorkerAssistant_UnitTestCase(unittest.TestCase):
TEST_CONFIG = TEST_WORKER_ASSISTANT_CONFIG
def setUp(self):
with patch('boto3.client'):
self.worker_assistant = WorkerAssistant(custom_config=self.TEST_CONFIG)
def test_call__unknown_action__raises(self):
event = {
'action': 'unknown_action'
}
with self.assertRaises(Exception):
self.worker_assistant(event)
def test_call__mark_task_as_closed(self):
event = {
'action': 'mark_task_as_completed',
'task_id': '123',
'stats': '{"s_key": "value"}',
'result': '{"r_key": "value"}'
}
self.worker_assistant.mark_task_as_completed = Mock(return_value=None)
self.worker_assistant(event)
self.worker_assistant.mark_task_as_completed.assert_called_once_with(task_id='123', stats={"s_key": "value"},
result={"r_key": "value"})
def test_call__mark_task_as_closed__no_task_id__raises(self):
event = {
'action': 'mark_task_as_completed'
}
with self.assertRaises(Exception):
self.worker_assistant(event)
|
# *************************************************************************** #
# HyperNet #
# --------------------------------------------------------------------------- #
# Machine Learning-Based library for modeling #
# multi-component non-equilibrium thermochemical processes #
# #
# *************************************************************************** #
# --------------------------- CONFIGURATION FILE ---------------------------- #
# HyperNet --------------------------------------------------------------------
headers = {
'main': '[HyperNet]: ',
'warning': '[HyperNet]: WARNING! ',
'val_err': 'from HyperNet\n>>> '
}
values = {
'SMALL': 1.e-15,
'BIG': 1.e+15
}
# TensorFlow ------------------------------------------------------------------
tf_setup = {
'EPSILON': 1.e-15,
'DTYPE': 'float64',
'NUM_THREADS': 16,
'TF_CPP_MIN_LOG_LEVEL': '3'
}
|
from Tkinter import *
from hummingbird import Hummingbird
import time
'''
Hummingbird Hardware Components
Connect TriColor LED to Port #1
Connect Servo Motor to port #1
Connect Gear Motor to port #1
Connect Temperature Sensor to sensor port #1
Connect Distance Sensor to sensor port #2
Connect Rotary Sensor to sensor port #3
Connect Sound Sensor to sensor port #4
'''
#The following functions are tied to key presses
def instructions():
print """
Welcome to my hummingbird:
a. Turn on LED light
s. Start on a motor
d. Stop the motor
f. Print distances
g. Print temperature
h. Use rotary sensor
j. Sound Sensor
k. Vibration Motor
l. Set Servo
space: Turn Off
"""
def tri_color():
#Set LED in port 1 to red
humm.set_tricolor_led(1, 255, 0, 0)
time.sleep(1)
humm.set_tricolor_led(1, 0, 255, 0)
time.sleep(1)
humm.set_tricolor_led(1, 0, 0, 255)
def start_motor():
humm.set_motor(1, 255)
def stop_motor():
humm.set_motor(1, 0)
def distance_sensor():
#Loop until object <20cm away detected
distance = humm.get_distance(2)
print "I will print the distance for the next 5 seconds"
start = time.time()
stop = start + 5
while(start < stop):
print "Distance: ", humm.get_distance(2)
start = time.time()
def temp_sensor():
temp = humm.get_temperature(1)
print "The temperature is", temp
def rotary_values():
rotary = humm.get_knob_value(3)
while(rotary !=0):
print "Rotary value:", rotary
rotary = humm.get_knob_value(3)
def get_sound():
print "I will record the sound levels for the next 5 seconds"
start = time.time()
stop = start + 5
while(start < stop):
print "Sound level: ", humm.get_sound_sensor(4)
start = time.time()
def vibrate():
#Vibrate 5 times
for i in range(0,5):
humm.set_vibration_motor(1, 255)
time.sleep(1)
humm.set_vibration_motor(1, 0)
time.sleep(1)
def set_servo():
print "I will set the servo"
for i in range(0,3):
humm.set_servo(1, 0)
time.sleep(1)
humm.set_servo(1, 90)
time.sleep(1)
def turn_off():
global keep_going
keep_going = False
print "I'm turning off the hummingbird"
humm.close()
'''
Used to tie key presses to humminbird functions
'''
def key(event):
key_pressed = event.char
if key_pressed == "a":
print "Setting LED to red, green and blue"
tri_color()
elif key_pressed == "s":
print "Starting Gear Motor..."
start_motor()
elif key_pressed == "d":
print "Stopping Gear Motor..."
stop_motor()
elif key_pressed == "f":
distance_sensor()
elif key_pressed == "g":
temp_sensor()
elif key_pressed == "h":
rotary_values()
elif key_pressed == "j":
get_sound()
elif key_pressed == "k":
vibrate()
elif key_pressed == "l":
set_servo()
else:
turn_off()
instructions()
################################################################
# Don't modify the following code, GUI components #
################################################################
root = Tk()
text = Text(root)
text.insert(INSERT, """
Welcome to my hummingbird:
Use the following keys to activate hummingbird components
a. Turn on LED light
s. Start on a motor
d. Stop the motor
f. Print distances
g. Print temperature
h. Use rotary sensor
j. Sound Sensor
k. Vibration Motor
l. Set Servo
space: Turn Off
""")
text.insert(END, "Click the button below to start.....")
text.pack()
def button_handler():
print "Here we go!"
frame.focus_set()
start_button = Button(root, text="Click me to Start", command=button_handler, bg="yellow")
start_button.config( height = 10, width = 50 )
start_button.pack()
#Creates Hummingbird object
humm = Hummingbird()
def callback(event):
frame.focus_set()
print "clicked at", event.x, event.y
frame = Frame(root, width=300, height=150)
frame.bind("<Key>", key)
frame.bind("<Button-1>", callback)
frame.pack()
root.mainloop()
instructions()
|
from django.apps import AppConfig
class AccomplishmentConfig(AppConfig):
name = 'accomplishment'
|
import math
from typing import Callable, Dict, Optional
from c2cwsgiutils import stats
from tilecloud import Tile
class Statistics:
def __init__(self, format: str = "%f"):
self.format = format
self.n = 0
self.sum = 0.0
self.sum_of_squares = 0.0
self.minimum: Optional[float] = None
self.maximum: Optional[float] = None
def add(self, x: float) -> None:
self.n += 1
self.sum += x
self.sum_of_squares += x * x
self.minimum = x if self.minimum is None else min(self.minimum, x)
self.maximum = x if self.maximum is None else max(self.maximum, x)
def __str__(self) -> str:
result = []
if self.n:
result.append("/".join(self.format % value for value in (self.minimum, self.mean, self.maximum)))
result.append(f"(n={self.n:d})")
return " ".join(result)
@property
def mean(self) -> Optional[float]:
return self.sum / self.n if self.n else None
@property
def variance(self) -> float:
return self.sum_of_squares / self.n - (self.sum / self.n) ** 2
@property
def standard_deviation(self) -> float:
return math.sqrt(self.variance)
class Benchmark:
def __init__(self, attr: str = "benchmark"):
self.attr = attr
self.statisticss: Dict[str, Statistics] = {}
def sample(self, key: Optional[str] = None) -> Callable[[Tile], Tile]:
if key:
if key in self.statisticss:
statistics: Optional[Statistics] = self.statisticss[key]
else:
statistics = Statistics("%.3fs")
self.statisticss[key] = statistics
else:
statistics = None
def callback(tile: Tile) -> Tile:
if tile:
if hasattr(tile, self.attr):
timer = getattr(tile, self.attr)
delta_t = timer.stop()
if statistics:
statistics.add(delta_t)
else:
setattr(tile, self.attr, stats.timer([key]))
return tile
return callback
class StatsdCountTiles:
def __call__(self, tile: Tile) -> Tile:
if tile:
stats.increment_counter(["tiles"])
return tile
class StatsdCountErrors:
def __call__(self, tile: Tile) -> Tile:
if tile and tile.error:
stats.increment_counter(["errors"])
return tile
|
import os
import six
import json
from requests_oauthlib import OAuth1Session
consumer_key = 'XJCbpn5nHHDNW48NBMx0eg'
consumer_secret = 'gcxv78Aq6kBulp663LFgug'
class Figshare(object):
def __init__(self, consumer_key, consumer_secret, access_token,
access_token_secret):
"""
Connects to the figshare API.
"""
self.client = OAuth1Session(
consumer_key, consumer_secret, access_token, access_token_secret)
self.endpoint = 'http://api.figshare.com/v1/my_data'
def article(self, article_id):
"""
Returns a single article.
"""
response = self.client.get(self.endpoint + '/articles/%s' % article_id)
return response.json()['items'][0]
def delete_article(self, article_id):
"""
Deletes article `article_id`.
"""
response = self.client.delete(
self.endpoint + '/articles/%s' % article_id)
return json.loads(response.content)
def articles(self, limit=None):
"""
Parameters
----------
limit : int or None
If not None, then limit the number of articles returned.
Returns
-------
Dict of {count: integer count of articles, items: dictionary
representing each article}
"""
# API only returns 10 results at a time, so keep asking for more pages
# until we can't get any more...
all_articles = []
count = 0
page = 1
while True:
if limit is not None and (len(all_articles) < limit):
break
data = {'page': page}
response = self.client.get(
self.endpoint + '/articles',
params={'page': page}
)
# Keep the response around for debugging if needed; get a separate
# results dict
results = response.json()
if results['count'] == 0:
break
all_articles.extend(results['items'])
count += results['count']
page += 1
# Reconstruct the JSON dict in the same format returned by a single
# response (with keys [count, items])
assert count == len(all_articles)
return {'count': count, 'items': all_articles}
def create_article(self, title, description, defined_type='dataset'):
"""
Create an article.
`title`, `description` are required; other `defined_type` value can be
"fileset". There are likely others (e.g., "figure", "code", "media",
etc) but these are currently undocumented by the API, so use at your
own risk.
"""
response = self.client.post(
self.endpoint + '/articles',
data=json.dumps({'title': title,
'description': description,
'defined_type': defined_type,
}),
headers={'content-type': 'application/json'})
return response.json()
def make_private(self, article_id):
"""
Make an article private.
If an article was just created, it is still in draft form. This method
will turn it into a private article.
"""
response = self.client.post(
'%s/articles/%s/action/make_private' % (self.endpoint, article_id))
return response.json()
def update_article(self, article_id, title=None, description=None,
defined_type=None):
"""
Update title, description, and defined_type.
Any of these values can be None if you don't want to change them.
"""
data = {'title': title,
'description': description,
'defined_type': defined_type}
data = dict((k, v) for k, v in data.items() if v is not None)
response = self.client.put(
'%s/articles/%s' % (self.endpoint, article_id),
data=json.dumps(data),
headers={'content-type': 'application/json'})
return response.json()
def upload_file(self, article_id, filepath_or_buffer):
"""
Upload a file.
`filepath_or_buffer` can be a string or an open file object.
"""
if isinstance(filepath_or_buffer, six.string_types):
file = open(filepath_or_buffer, 'rb')
own_handle = True
else:
file = filepath_or_buffer
own_handle = False
try:
files = {'filedata': (os.path.basename(file.name), file)}
response = self.client.put(
'%s/articles/%s/files' % (self.endpoint, article_id),
files=files)
return response.json()
finally:
if own_handle:
file.close()
def delete_file(self, article_id, file_id):
"""
Delete a file.
"""
response = self.client.delete(
'%s/articles/%s/files/%s' % (self.endpoint, article_id, file_id)
)
return response.json()
def add_link(self, article_id, link):
"""
Add a link.
"""
response = self.client.put(
self.endpoint + '/articles/%s/links' % article_id,
data=json.dumps({'link': link}),
headers={'content-type': 'application/json'}
)
return response.json()
def delete_link(self, article_id, link_id):
"""
Delete a link.
This requires a link ID, which you can get from inspecting the article
JSON. For example::
first_link_id = article['links'][0]['id']
"""
response = self.client.delete(
self.endpoint + '/articles/%s/links/%s' % (article_id, link_id)
)
return reponse.json()
def versions(self, article_id):
"""
Show the versions of this article
"""
response = self.client.get(
self.endpoint + '/articles/%s/versions' % article_id
)
return response.json()
def get_version(self, article_id, version_number):
"""
Get a particular version of this article.
"""
response = self.client.get(
self.endpoint + '/articles/%s/versions/%s' % (article_id, version_number)
)
return response.json()
def categories(self):
"""
Show the possible categories supplied by figshare.
"""
response = self.client.get(
self.endpoint.replace('/my_data', '') + '/categories'
)
return response.json()
def add_tag(self, article_id, tag):
"""
Add a tag to an article.
"""
response = self.client.put(
self.endpoint + '/articles/%s/tags' % article_id,
data=json.dumps({'tag_name': tag}),
headers={'content-type': 'application/json'})
return response.json()
def delete_tag(self, article_id, tag_id):
"""
Delete a tag from an article.
This requires a tag ID, which you can get from inspecting the article JSON. For example::
first_tag_id = article['tags'][0]['id']
"""
response = self.client.delete(
self.endpoint + '/articles/%s/categories/%s' % (article_id, tag_id)
)
return response.json()
def add_category(self, article_id, category_id):
"""
Add a category to an article.
See the categories() method to see the options and to select
a `category_id`.
"""
response = self.client.put(
self.endpoint + '/articles/%s/categories' % article_id,
data=json.dumps({'category_id': category_id}),
headers={'content-type': 'application/json'})
return response.json()
def delete_category(self, article_id, category_id):
"""
Delete a category from an article.
"""
response = self.client.delete(
self.endpoint + '/articles/%s/categories/%s' %
(article_id, category_id)
)
return response.json()
def add_author(self, article_id, author_id):
"""
Add an author to an article.
"""
response = self.client.put(
self.endpoint + '/articles/%s/categories' % article_id,
data=json.dumps({'author_id': author_id}),
headers={'content-type': 'application/json'})
return response.json()
def delete_author(self, article_id, author_id):
"""
Delete an author from an article
"""
response = self.client.delete(
self.endpoint + '/articles/%s/categories/%s' %
(article_id, author_id)
)
return response.json()
|
def make_room(Room, title, description, floor):
return Room(
title = title,
description = description,
floor = floor,
items = None,
NPCs = None,
mobs = None,
north = None,
east = None,
south = None,
west = None
)
def make_floor(Room):
return {
#room_key: make_room(Room, title, description, floor)
"3-a1": make_room(Room, "3-a1", "A stone throne stands on a foot-high circular dais in the center of this cold chamber. The throne and dais bear the simple adornments of patterns of crossed lines -- a pattern also employed around each door to the room.", "Floor 3"),
"3-a2": make_room(Room, "3-a2", "A wall that holds a seat with a hole in it is in this chamber. It's a privy! The noisome stench from the hole leads you to believe that the privy sees regular use.", "Floor 3"),
"3-a3": make_room(Room, "3-a3", "The burble of water reaches your ears after you open the door to this room. You see the source of the noise in the far wall: a large fountain artfully carved to look like a seashell with the figure of a seacat spewing clear water into its basin.", "Floor 3"),
"3-a4": make_room(Room, "3-a4", "This room holds six dry circular basins large enough to hold a man and a dry fountain at its center. All possess chipped carvings of merfolk and other sea creatures. It looks like this room once served some group of people as a bath.", "Floor 3"),
"3-a5": make_room(Room, "3-a5", "Fifth times the charm", "Floor 3"),
"3-b1": make_room(Room, "3-b1", "A glow escapes this room through its open doorways. The masonry between every stone emanates an unnatural orange radiance. Glancing quickly about the room, you note that each stone bears the carving of someone's name.", "Floor 3"),
"3-b2": make_room(Room, "3-b2", "A huge stewpot hangs from a thick iron tripod over a crackling fire in the center of this chamber. A hole in the ceiling allows some of the smoke from the fire to escape, but much of it expands across the ceiling and rolls down to fill the room in a dark fog. Other details are difficult to make out, but some creature must be nearby, because it smells like a good soup is cooking.", "Floor 3"),
"3-b3": make_room(Room, "3-b3", "You've opened the door to a torture chamber. Several devices of degradation, pain, and death stand about the room, all of them showing signs of regular use. The wood of the rack is worn smooth by struggling bodies, and the iron maiden appears to be occupied by a corpse.", "Floor 3"),
"3-b4": make_room(Room, "3-b4", "This short hall leads to another door. On either side of the hall, niches are set into the wall within which stand clay urns. One of the urns has been shattered, and its contents have spilled onto its shelf and the floor. Amid the ash it held, you see blackened chunks of something that might be bone.", "Floor 3"),
"3-b5": make_room(Room, "3-b5", "Corpses and pieces of corpses hang from hooks that dangle from chains attached to thick iron rings. Most appear humanoid but a few of the body parts appear more monstrous. You don't see any heads, hands, or feet -- all seem to have been chopped or torn off. Neither do you see any guts in the horrible array, but several thick leather sacks hang from hooks in the walls, and they are suspiciously wet and the leather looks extremely taut -- as if it' under great strain.", "Floor 3"),
"3-c1": make_room(Room, "3-c1", "This small chamber seems divided into three parts. The first has several hooks on the walls from which hang dusty robes. An open curtain separates that space from the next, which has a dry basin set in the floor. Beyond that lies another parted curtain behind which you can see several straw mats in a semicircle pointing toward a statue of a dog-headed man.", "Floor 3"),
"3-c2": make_room(Room, "3-c2", "When looking into this chamber, you're confronted by a thousand reflections of yourself looking back. Mirrored walls set at different angles fill the room. A path seems to wind through the mirrors, although you can't tell where it leads.", "Floor 3"),
"3-c3": make_room(Room, "3-c3", "A large forge squats against the far wall of this room, and coals glow dimly inside. Before the forge stands a wide block of iron with a heavy-looking hammer lying atop it, no doubt for use in pounding out shapes in hot metal. Other forge tools hang in racks nearby, and a barrel of water and bellows rest on the floor nearby.", "Floor 3"),
"3-c4": make_room(Room, "3-c4", "This chamber is clearly a prison. Small barred cells line the walls, leaving a 15-foot-wide pathway for a guard to walk. Channels run down either side of the path next to the cages, probably to allow the prisoners' waste to flow through the grates on the other side of the room. The cells appear empty but your vantage point doesn't allow you to see the full extent of them all.", "Floor 3"),
"3-c5": make_room(Room, "3-c5", "You push open the stone door to this room and note that the only other exit is a door made of wood. It and the table shoved against it are warped and swollen. Indeed, the table only barely deserves that description. Its surface is rippled into waves and one leg doesn't even touch the floor. The door shows signs of someone trying to chop through from the other side, but it looks like they gave up.", "Floor 3"),
"3-d1": make_room(Room, "3-d1", "This otherwise bare room has one distinguishing feature. The stone around one of the other doors has been pulled over its edges, as though the rock were as soft as clay and could be moved with fingers. The stone of the door and wall seems hastily molded together.", "Floor 3"),
"3-d2": make_room(Room, "3-d2", "You enter a small room and your steps echo. Looking about, you're uncertain why, but then a wall vanishes and reveals an enormous chamber. The wall was an illusion and whoever cast it must be nearby!", "Floor 3"),
"3-d3": make_room(Room, "3-d3", "You feel a sense of foreboding upon peering into this cavernous chamber. At its center lies a low heap of refuse, rubble, and bones atop which sit several huge broken eggshells. Judging by their shattered remains, the eggs were big enough to hold a crouching man, making you wonder how large -- and where -- the mother is.", "Floor 3"),
"3-d4": make_room(Room, "3-d4", "This small chamber is a bloody mess. The corpse of a minotaur lies on the floor, its belly carved out. The creature's innards are largely missing, and yet you detect no other wounds. Bloody, froglike footprints track away from the corpse and out an open door.", "Floor 3"),
"3-d5": make_room(Room, "3-d5", "A chill crawls up your spine and out over your skin as you look upon this room. The carvings on the wall are magnificent, a symphony in stonework -- but given the themes represented, it might be better described as a requiem. Scenes of death, both violent and peaceful, appear on every wall framed by grinning skeletons and ghoulish forms in ragged cloaks.", "Floor 3"),
"3-e1": make_room(Room, "3-e1", "A pungent, earthy odor greets you as you pull open the door and peer into this room. Mushrooms grow in clusters of hundreds all over the floor. Looking into the room is like looking down on a forest. Tall tangles of fungus resemble forested hills, the barren floor looks like a plain between the woods, and even a trickle of water and a puddle of water that pools in a low spot bears a resemblance to a river and lake, respectively.", "Floor 3"),
"3-e2": make_room(Room, "3-e2", "You pull open the door and hear the scrape of its opening echo throughout what must be a massive room. Peering inside, you see a vast cavern. Stalactites drip down from the ceiling in sharp points while flowstone makes strange shapes on the floor.", "Floor 3"),
"3-e3": make_room(Room, "3-e3", "You find this chamber lit dimly by guttering candles that squat in small hills of melted wax. The smell of their smoke hits your nose along with an odor that is reminiscent of the sea. Someone has taken a large amount of salt and drawn a broad circular symbol on the floor with the candles situated equidistantly around it. Atop the salt, someone traced the symbol with a black powder that glints a dull silver in the candlelight.", "Floor 3"),
"3-e4": make_room(Room, "3-e4", "This chamber holds one occupant: the statue of a male figure with elven features but the broad, muscular body of a hale human. It kneels on the floor as though fallen to that posture. Both its arms reach upward in supplication, and its face is a mask of grief. Two great feathered wings droop from its back, both sculpted to look broken. The statue is skillfully crafted.", "Floor 3"),
"3-e5": make_room(Room, "3-e5", "The door to this room swings open easily on well-oiled hinges. Beyond it you see that the chamber walls have been disguised by wood paneling, and the stone ceiling and floor are hidden by bright marble tiles. Several large and well-stuffed chairs are arranged about the room along with some small reading tables.", "Floor 3"),
}
def link_rooms(rooms):
# from left to right then top to bottom direction -- rooms["3-"].east = rooms["3-"].title
rooms["3-a1"].east = rooms["3-a2"].title
rooms["3-a1"].south = rooms["3-b1"].title
rooms["3-a2"].east = rooms["3-a3"].title
rooms["3-a3"].south = rooms["3-b3"].title
rooms["3-a4"].east = rooms["3-a5"].title
rooms["3-a5"].south = rooms["3-b5"].title
rooms["3-b1"].east = rooms["3-b2"].title
rooms["3-b3"].east = rooms["3-b4"].title
rooms["3-b4"].south = rooms["3-c4"].title
rooms["3-b5"].south = rooms["3-c5"].title
rooms["3-c1"].east = rooms["3-c2"].title
rooms["3-c1"].south = rooms["3-d1"].title
rooms["3-c2"].east = rooms["3-c3"].title
rooms["3-c3"].south = rooms["3-d3"].title
rooms["3-c4"].east = rooms["3-c5"].title
rooms["3-c5"].south = rooms["3-d5"].title
rooms["3-d1"].east = rooms["3-d2"].title
rooms["3-d1"].south = rooms["3-e1"].title
rooms["3-d2"].south = rooms["3-e2"].title
rooms["3-d3"].east = rooms["3-d4"].title
rooms["3-d4"].south = rooms["3-e4"].title
rooms["3-d5"].south = rooms["3-e5"].title
rooms["3-e1"].west = rooms["4-e5"].title
rooms["3-e2"].east = rooms["3-e3"].title
rooms["3-e4"].east = rooms["3-e5"].title
|
from __future__ import absolute_import
from .celeryapp import app as celery_app
|
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from experiments import default_reader
def load_and_process_data(path):
"""Loads breast cancer data from `path`"""
df = pd.read_csv(path, header=None)
#Remove the first column of the data (the ID)
df.drop([0], axis=1, inplace=True)
#replace labels 'B' with 0 and 'M' with 1
df[1] = df[1].replace({'B': 0, 'M': 1})
data_matrix = df.values
#Split the data into 70% training and 30% test set
data_labels = data_matrix[:, :1].ravel()
data_matrix = data_matrix[:, 1:]
train_data, test_data, train_labels, test_labels = train_test_split(data_matrix, data_labels, test_size=0.3, shuffle=True, stratify=data_labels)
#Normalize the features of the data
scaler = preprocessing.StandardScaler().fit(train_data)
train_data = scaler.transform(train_data)
test_data = scaler.transform(test_data)
assert train_labels.size == train_data.shape[0]
assert test_labels.size == test_data.shape[0]
data = {}
val_data, weak_supervision_data, val_labels, weak_supervision_labels = train_test_split(train_data, train_labels, test_size=0.4285, shuffle=True, stratify=train_labels)
data['training_data'] = weak_supervision_data, weak_supervision_labels
data['validation_data'] = val_data, val_labels
data['test_data'] = test_data, test_labels
return data
def run_experiment(run, save):
"""
:param run: method that runs real experiment given data
:type: function
:param save: method that saves experiment results to JSON file
:type: function
:return: none
"""
#for breast cancer classification dataset, select the mean radius, radius se and worst radius as weak signals
views = {0:0, 1:10, 2:20}
datapath = 'datasets/breast-cancer/wdbc.data'
savepath = 'results/json/breast_cancer.json'
default_reader.run_experiment(run, save, views, datapath, load_and_process_data, savepath)
def run_bounds_experiment(run):
"""
:param run: method that runs real experiment given data
:type: function
:return: none
"""
#for breast cancer classification dataset, select the mean radius, radius se and worst radius as weak signals
views = {0:0, 1:10, 2:20}
path = 'results/json/bc_bounds.json'
data_and_weak_signal_data = default_reader.create_weak_signal_view('datasets/breast-cancer/wdbc.data', views, load_and_process_data)
default_reader.run_bounds_experiment(run, data_and_weak_signal_data, path) |
from .lunch import Lunchbreak
|
#!/usr/bin/env python3
import build_file
import repository
import label
import argparse
import json
import os
import logging
from typing import List
class StoreKeyValuePair(argparse.Action):
"""
Parser action that populates a dictionary with '=' separated key-value
pairs.
"""
def __call__(self, parser, namespace, values, option_string=None):
key, value = values.split('=')
dest_dict = {}
if hasattr(namespace, self.dest) and getattr(namespace, self.dest):
dest_dict = getattr(namespace, self.dest)
dest_dict[key] = value
setattr(namespace, self.dest, dest_dict)
def dump_exported_symbols(args):
"""Print all symbols exported using include_defs in a build file."""
logging.debug('Dumping exported symbols for ' + args.build_file)
bf = build_file.from_path(args.build_file)
repo = repository.Repository(args.repository, args.cell_roots)
symbols = bf.get_exported_symbols_transitive_closure(repo)
if args.json:
print(json.dumps(symbols))
else:
print(os.linesep.join(symbols))
def dump_export_map(args):
"""
Prints export map that includes all included definitions and symbols they
export.
"""
logging.debug('Dumping export map for ' + args.build_file)
bf = build_file.from_path(args.build_file)
repo = repository.Repository(args.repository, args.cell_roots)
export_map = bf.get_export_map(repo)
def to_load_import_string(import_label: label):
pkg = import_label.package
# include_defs package includes a file name, so we have to split it
# into file name
file_name = pkg.split('/')[-1]
# and it's prefix - which is the new package
pkg = '/'.join(pkg.split('/')[:-1])
load_fn_cell = args.cell_prefix + import_label.cell \
if import_label.cell else ''
return load_fn_cell + '//' + pkg + ':' + file_name
if args.use_load_function_import_string_format:
new_export_map = {}
for import_string, exported_symbols in export_map.items():
new_export_map[to_load_import_string(
label.from_string(import_string))] = exported_symbols
export_map = new_export_map
if args.print_as_load_functions:
def to_load_function(import_label: label, symbols: List[str]):
import_string = to_load_import_string(import_label)
function_args = map(lambda s: '"%s"' % s, symbols)
return 'load("%s", %s)' % (import_string, ','.join(function_args))
load_functions = []
for import_string, exported_symbols in export_map.items():
load_functions.append(
to_load_function(
label.from_string(import_string), exported_symbols))
if args.json:
print(json.dumps(load_functions))
else:
print(os.linesep.join(load_functions))
elif args.json:
print(json.dumps(export_map))
else:
for import_string, exported_symbols in export_map.items():
print(import_string + ':')
for exported_symbol in exported_symbols:
print(' ' * 2 + exported_symbol)
def main():
parser = argparse.ArgumentParser(
description='Dumps requested build file information.')
subparsers = parser.add_subparsers()
exported_symbols_parser = subparsers.add_parser('exported_symbols')
exported_symbols_parser.set_defaults(func=dump_exported_symbols)
export_map_parser = subparsers.add_parser('export_map')
export_map_parser.add_argument(
'--print_as_load_functions',
action='store_true',
help='Print export map as a series of load functions which import all '
'symbols exported by respective imported files.')
export_map_parser.add_argument(
'--cell_prefix',
default='',
help='The prefix to use for cells in import strings.')
export_map_parser.add_argument(
'--use_load_function_import_string_format',
action='store_true',
help='Use load function import string syntax instead of include_defs.')
export_map_parser.set_defaults(func=dump_export_map)
parser.add_argument('build_file', metavar='FILE')
parser.add_argument('--json', action='store_true')
parser.add_argument(
'--cell_root',
action=StoreKeyValuePair,
metavar='CELL=PATH',
dest='cell_roots')
parser.add_argument(
'--repository',
metavar='PATH',
help='Path to the repository managed by Buck.')
parser.add_argument(
'-v',
'--verbose',
action='store_true',
help='Enabled verbose diagnostic.')
args = parser.parse_args()
logging_level = logging.DEBUG if args.verbose else logging.INFO
logging.basicConfig(
level=logging_level,
format=(
'%(asctime)s [%(levelname)s][%(filename)s:%(lineno)d] %(message)s'
))
args.func(args)
if __name__ == '__main__':
main()
|
import time
from queue import Queue
import pytest
from polog.core.engine.real_engines.multithreaded.worker import Worker
from polog.core.stores.settings.settings_store import SettingsStore
from polog.core.log_item import LogItem
queue = Queue()
worker = Worker(queue, 1, SettingsStore())
def test_do(handler):
"""
Проверяем, что хендлер вызывается.
"""
handler.clean()
log = LogItem()
log.set_handlers([handler])
log.set_data({'lol': 'kek'})
worker.do_anything(log)
assert handler.last is not None
worker.set_stop_flag()
worker.stop()
|
import numpy as np
from tqdm import tqdm
class UserBased:
def __init__(self, M, N, neighbors=20, min_common_items=5, min_values=1.0, max_value=5.0):
self.M = M
self.N = N
self.neighbors = neighbors
self.min_common_items = min_common_items
self.min_value = min_values
self.max_value = max_value
self.neighbors_weights = np.empty((M,), dtype=list)
self.user_deviations = np.empty((M,), dtype=dict)
def fit(self, train_ratings, user_to_items):
"""Fit user-base collaborative filtering model"""
# set user_mean with default value of global mean
self.user_means = np.full(
(self.M,),
fill_value=sum(train_ratings.values()) / len(train_ratings.values())
)
neighbours_correlation = np.empty((self.M,))
for i in range(self.M):
if i not in user_to_items:
self.neighbors_weights[i] = []
continue
items_i = set(user_to_items[i])
ratings_i = {
item: train_ratings[(i, item)] for item in items_i
}
mean_i = np.mean(list(ratings_i.values()))
deviation_i = {
item: (rating - mean_i) for item, rating in ratings_i.items()
}
self.user_means[i] = mean_i
self.user_deviations[i] = deviation_i
for j in range(self.M):
if i == j or j not in user_to_items: # can't include itself as neighbor
neighbours_correlation[j] = 0
continue
items_j = set(user_to_items[j])
common = list(items_i.intersection(items_j))
if len(common) < self.min_common_items: # don't include users that have to few items in common
neighbours_correlation[j] = 0
continue
ratings_j = {
item: train_ratings[(j, item)] for item in items_j
}
mean_j = np.mean(list(ratings_j.values()))
deviation_j = {
item: (rating - mean_j) for item, rating in ratings_j.items()
}
# correlation between user i and j
common_dev_i = np.array([deviation_i[k] for k in common])
common_dev_j = np.array([deviation_j[k] for k in common])
neighbours_correlation[j] = \
np.dot(common_dev_i, common_dev_j) / np.linalg.norm(common_dev_i) / np.linalg.norm(common_dev_j)
top_k_idx = np.argpartition(-np.abs(neighbours_correlation), self.neighbors)[:self.neighbors]
top_k_idx = [k for k in top_k_idx if neighbours_correlation[k] != 0]
self.neighbors_weights[i] = [
(j, neighbours_correlation[j]) for j in top_k_idx if neighbours_correlation[j] != -np.inf
]
return self
def predict(self, i, k):
"""Predict score(i, k)"""
neighbours = self.neighbors_weights[i]
weighted_deviations, weights = 0, 0
for j, c_ij in neighbours:
if k in self.user_deviations[j]:
weighted_deviations += c_ij * self.user_deviations[j][k]
weights += np.abs(c_ij)
# if no neighbors are found, predict the mean for that user
# also if neighbors didn't bought item k - predict mean
if weights != 0:
score = self.user_means[i] + weighted_deviations / weights
else:
score = self.user_means[i]
if score < self.min_value:
return self.min_value
return min(score, self.max_value)
def score(self, test_ratings):
"""Return RMSE for given test set"""
rmse = 0
for (i, k), y_true in test_ratings.items():
y_pred = self.predict(i, k)
rmse += (y_pred - y_true) ** 2
return np.sqrt(rmse / len(test_ratings))
|
#!/usr/bin/env python2
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class ImportMultiTest (BitcoinTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
def setup_network(self, split=False):
self.nodes = start_nodes(2, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
self.is_network_split=False
self.sync_all()
def run_test (self):
import time
begintime = int(time.time())
print "Mining blocks..."
self.nodes[0].generate(1)
# sync
self.sync_all()
# keyword definition
PRIV_KEY = 'privkey'
PUB_KEY = 'pubkey'
ADDRESS_KEY = 'address'
SCRIPT_KEY = 'script'
# address
address1 = self.nodes[0].getnewaddress()
# pubkey
address2 = self.nodes[0].getnewaddress()
address2_pubkey = self.nodes[0].validateaddress(address2)['pubkey'] # Using pubkey
# privkey
address3 = self.nodes[0].getnewaddress()
address3_privkey = self.nodes[0].dumpprivkey(address3) # Using privkey
# scriptPubKey
address4 = self.nodes[0].getnewaddress()
address4_scriptpubkey = self.nodes[0].validateaddress(address4)['scriptPubKey'] # Using scriptpubkey
#Check only one address
address_info = self.nodes[0].validateaddress(address1)
assert_equal(address_info['ismine'], True)
self.sync_all()
#Node 1 sync test
assert_equal(self.nodes[1].getblockcount(),1)
#Address Test - before import
address_info = self.nodes[1].validateaddress(address1)
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
address_info = self.nodes[1].validateaddress(address2)
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
address_info = self.nodes[1].validateaddress(address3)
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
# import multi
result1 = self.nodes[1].importmulti( [
{ "type": ADDRESS_KEY, "value": address1 , "label":"new account 1" , "timestamp": begintime } ,
{ "type": PUB_KEY , "value": address2_pubkey , "label":"new account 1", "timestamp": begintime},
{ "type": PRIV_KEY , "value": address3_privkey , "timestamp": begintime},
{ "type": SCRIPT_KEY , "value": address4_scriptpubkey , "timestamp": begintime},
])
#Addresses Test - after import
address_info = self.nodes[1].validateaddress(address1)
assert_equal(address_info['iswatchonly'], True)
assert_equal(address_info['ismine'], False)
address_info = self.nodes[1].validateaddress(address2)
assert_equal(address_info['iswatchonly'], True)
assert_equal(address_info['ismine'], False)
address_info = self.nodes[1].validateaddress(address3)
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], True)
address_info = self.nodes[1].validateaddress(address4)
assert_equal(address_info['iswatchonly'], True)
assert_equal(address_info['ismine'], False)
assert_equal(result1[0]['result'], True)
assert_equal(result1[1]['result'], True)
assert_equal(result1[2]['result'], True)
assert_equal(result1[3]['result'], True)
#importmulti without rescan
result2 = self.nodes[1].importmulti( [
{ "type": ADDRESS_KEY, "value": self.nodes[0].getnewaddress() } ,
{ "type": ADDRESS_KEY, "value": self.nodes[0].getnewaddress() } ,
{ "type": ADDRESS_KEY, "value": self.nodes[0].getnewaddress() , "label":"random account" } ,
{ "type": PUB_KEY, "value": self.nodes[0].validateaddress(self.nodes[0].getnewaddress())['pubkey'] } ,
{ "type": SCRIPT_KEY, "value": self.nodes[0].validateaddress(self.nodes[0].getnewaddress())['scriptPubKey'] },
], { "rescan":False } )
# all succeed
assert_equal(result2[0]['result'], True)
assert_equal(result2[1]['result'], True)
assert_equal(result2[2]['result'], True)
assert_equal(result2[3]['result'], True)
assert_equal(result2[4]['result'], True)
# empty json case
try:
self.nodes[1].importmulti()
raise
except:
pass
# parcial success case
result3 = self.nodes[1].importmulti( [
{ "type": ADDRESS_KEY, "value": self.nodes[0].getnewaddress() } ,
{ "type": PUB_KEY} ] )
assert_equal(result3[0]['result'], True)
assert_equal(result3[1]['result'], False)
if __name__ == '__main__':
ImportMultiTest ().main ()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
CS224N 2019-20: Homework 5
"""
import torch
import torch.nn as nn
class CharDecoder(nn.Module):
def __init__(self, hidden_size, char_embedding_size=50, target_vocab=None):
""" Init Character Decoder.
@param hidden_size (int): Hidden size of the decoder LSTM
@param char_embedding_size (int): dimensionality of character embeddings
@param target_vocab (VocabEntry): vocabulary for the target language. See vocab.py for documentation.
"""
super(CharDecoder, self).__init__()
self.target_vocab = target_vocab
self.charDecoder = nn.LSTM(char_embedding_size, hidden_size)
self.char_output_projection = nn.Linear(hidden_size, len(self.target_vocab.char2id))
self.decoderCharEmb = nn.Embedding(len(self.target_vocab.char2id), char_embedding_size,
padding_idx=self.target_vocab.char_pad)
self.softmax = nn.Softmax(dim=-1)
self.criterion = nn.CrossEntropyLoss(ignore_index=self.target_vocab.char_pad , reduction='sum')
def forward(self, input, dec_hidden=None):
""" Forward pass of character decoder.
@param input (Tensor): tensor of integers, shape (length, batch_size)
@param dec_hidden (tuple(Tensor, Tensor)): internal state of the LSTM before reading the input characters. A tuple of two tensors of shape (1, batch, hidden_size)
@returns scores (Tensor): called s_t in the PDF, shape (length, batch_size, self.vocab_size)
@returns dec_hidden (tuple(Tensor, Tensor)): internal state of the LSTM after reading the input characters. A tuple of two tensors of shape (1, batch, hidden_size)
"""
### YOUR CODE HERE for part 2a
### TODO - Implement the forward pass of the character decoder.
input_embedded = self.decoderCharEmb(input) # (length, batch_size, char_embedding_size)
output, dec_hidden = self.charDecoder(input_embedded, dec_hidden)
scores = self.char_output_projection(output)
return scores, dec_hidden
### END YOUR CODE
def train_forward(self, char_sequence, dec_hidden=None):
""" Forward computation during training.
@param char_sequence (Tensor): tensor of integers, shape (length, batch_size). Note that "length" here and in forward() need not be the same.
@param dec_hidden (tuple(Tensor, Tensor)): initial internal state of the LSTM, obtained from the output of the word-level decoder. A tuple of two tensors of shape (1, batch_size, hidden_size)
@returns The cross-entropy loss (Tensor), computed as the *sum* of cross-entropy losses of all the words in the batch.
"""
### YOUR CODE HERE for part 2b
### TODO - Implement training forward pass.
###
### Hint: - Make sure padding characters do not contribute to the cross-entropy loss. Check vocab.py to find the padding token's index.
### - char_sequence corresponds to the sequence x_1 ... x_{n+1} (e.g., <START>,m,u,s,i,c,<END>). Read the handout about how to construct input and target sequence of CharDecoderLSTM.
### - Carefully read the documentation for nn.CrossEntropyLoss and our handout to see what this criterion have already included:
### https://pytorch.org/docs/stable/nn.html#crossentropyloss
input = char_sequence[:-1] # (length-1, batch_size)
output = char_sequence[1:] # (length-1, batch_size)
scores, dec_hidden = self.forward(input, dec_hidden) # scores: (length-1, batch_size, self.vocab_size)
loss_cross_entropy = self.criterion(scores.reshape([-1, scores.shape[-1]]), output.reshape(-1))
return loss_cross_entropy
### END YOUR CODE
def decode_greedy(self, initialStates, device, max_length=21):
""" Greedy decoding
@param initialStates (tuple(Tensor, Tensor)): initial internal state of the LSTM, a tuple of two tensors of size (1, batch_size, hidden_size)
@param device: torch.device (indicates whether the model is on CPU or GPU)
@param max_length (int): maximum length of words to decode
@returns decodedWords (List[str]): a list (of length batch_size) of strings, each of which has length <= max_length.
The decoded strings should NOT contain the start-of-word and end-of-word characters.
"""
### YOUR CODE HERE for part 2c
### TODO - Implement greedy decoding.
### Hints:
### - Use initialStates to get batch_size.
### - Use target_vocab.char2id and target_vocab.id2char to convert between integers and characters
### - Use torch.tensor(..., device=device) to turn a list of character indices into a tensor.
### - You may find torch.argmax or torch.argmax useful
### - We use curly brackets as start-of-word and end-of-word characters. That is, use the character '{' for <START> and '}' for <END>.
### Their indices are self.target_vocab.start_of_word and self.target_vocab.end_of_word, respectively.
# START_TOKEN = '{'
# END_TOKEN = '}'
START_TOKEN_INDEX = self.target_vocab.start_of_word
END_TOKEN_INDEX = self.target_vocab.end_of_word
batch_size = initialStates[0].shape[1]
output_words = torch.zeros([max_length, batch_size], device=device) # (max_length, batch_size)
current_char = torch.tensor([[START_TOKEN_INDEX] for i in range(batch_size)], device=device).t() # (length, batch_size), length = 1
dec_hidden = initialStates
for j in range(max_length):
score, dec_hidden = self.forward(current_char, dec_hidden) # score: (length, batch_size, self.vocab_size)
prob = self.softmax(score) # (length, batch_size, self.vocab_size)
current_char = torch.argmax(prob, dim=-1) # (length, batch_size)
output_words[j, :] = current_char
decodedWords = []
for b in range(batch_size):
decodedWord = ''
c_indices = output_words[:, b] # (max_length, )
for c in c_indices:
if c == END_TOKEN_INDEX:
break
decodedWord += self.target_vocab.id2char[c.item()]
decodedWords.append(decodedWord)
return decodedWords
### END YOUR CODE
|
import os
import re
from learning_objective.hidden_function import true_evaluate, get_settings
lim_domain = get_settings(lim_domain_only=True)
scribe = open("./data/regret_analysis/gp_hm.csv", 'w')
for f in os.listdir("./data/regret_analysis"):
if f.startswith("gp_hm"):
print f
f = "data/regret_analysis/" + f
for line in open(f, 'r'):
r = re.compile('Tasks done:(.*?). New')
m = r.search(line)
if m:
print line,
r = re.compile('\[ (.*?)\]')
n = r.search(line)
print n.group(1).split()
val = n.group(1).split()
val[0] = val[0].replace("[", "")
print val
query = [float(elem) for elem in val[0:4]]
print query
tasknum = int(m.group(1))
y_val = true_evaluate(query, lim_domain)[0, -1]
scribe.write(str(tasknum) + "," + str(y_val) + "\n")
scribe.close()
|
import os
import platform
home_dir = os.path.expanduser("~")
os_name = platform.system()
def env(name: str):
return os.getenv(name)
def macos(name: str):
"""
docstring
"""
return os.path.join(home_dir, "Library", "Caches", name)
def windows(name: str):
appData = env("APPDATA") or os.path.join(home_dir, "AppData", "Roaming")
localAppData = env("LOCALAPPDATA") or os.path.join(home_dir, "AppData", "Local")
return os.path.join(localAppData, name, "Cache")
def linux(name: str):
username = os.path.basename(home_dir)
return os.path.join(env("XDG_CACHE_HOME") or os.path.join(home_dir, ".cache"), name)
def cache_dir(name: str):
"""
docstring
"""
if os_name == "Darwin":
return macos(name)
elif os_name == "Windows":
return windows(name)
def env_paths(naem: str):
"""
docstring
"""
pass |
from transformers import BertForMaskedLM, BertModel, DistilBertForMaskedLM, DistilBertModel
def get_kobert_model():
""" Return BertModel for Kobert """
model = BertModel.from_pretrained("monologg/kobert")
return model
def get_kobert_lm():
""" Return BertForMaskedLM for Kobert """
model = BertForMaskedLM.from_pretrained("monologg/kobert-lm")
return model
def get_distilkobert_model():
""" Return DistilBertModel for DistilKobert """
model = DistilBertModel.from_pretrained("monologg/distilkobert")
return model
def get_distilkobert_lm():
""" Return DistilBertForMaskedLM for DistilKobert """
model = DistilBertForMaskedLM.from_pretrained("monologg/distilkobert")
return model
|
class StringBuff(object):
def __init__(self, init_string: str = ''):
self._string_buff = [init_string]
def __add__(self, other: str):
self._string_buff.append(other)
return self
def __str__(self):
return ''.join(self._string_buff)
__repr__ = __str__
def to_string(self, connector: str = ''):
return connector.join(self._string_buff)
class classonlymethod(classmethod):
"""
Convert a function to be a class only method.
This has the same usage as classmethod, except that it can only be used in class.
"""
def __get__(self, instance, owner):
if instance is not None:
raise AttributeError("Method %s() is only allowed in class." % self.__func__.__name__)
return super().__get__(instance, owner)
|
from amaranth_boards.sk_xc6slx9 import *
from amaranth_boards.sk_xc6slx9 import __all__
import warnings
warnings.warn("instead of nmigen_boards.sk_xc6slx9, use amaranth_boards.sk_xc6slx9",
DeprecationWarning, stacklevel=2)
|
import figurefirst as fifi
layout = fifi.svg_to_axes.FigureLayout('example_negative_labels.svg')
layout.make_mplfigures()
layout.fig.set_facecolor('None')
ex = layout.axes['ex']
ex.plot([1, 2], [3, 4])
fifi.mpl_functions.adjust_spines(ex, spines='left', yticks=[-1, -2])
layout.insert_figures('panels', cleartarget=True)
layout.write_svg('negative_labels_output.svg') |
# pylint: disable=preferred-module # FIXME: remove once migrated per GH-725
import os
import unittest
from pathlib import Path
import pytest
from ansiblelint.testing import run_ansible_lint
from ansiblelint.text import strip_ansi_escape
class TestCliRolePaths(unittest.TestCase):
def setUp(self):
self.local_test_dir = os.path.realpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "examples")
)
def test_run_single_role_path_no_trailing_slash_module(self):
cwd = self.local_test_dir
role_path = 'roles/test-role'
result = run_ansible_lint(role_path, cwd=cwd)
self.assertIn(
'Use shell only when shell functionality is required', result.stdout
)
def test_run_single_role_path_no_trailing_slash_script(self):
cwd = self.local_test_dir
role_path = 'roles/test-role'
result = run_ansible_lint(role_path, cwd=cwd, executable="ansible-lint")
self.assertIn(
'Use shell only when shell functionality is required', result.stdout
)
def test_run_single_role_path_with_trailing_slash(self):
cwd = self.local_test_dir
role_path = 'roles/test-role/'
result = run_ansible_lint(role_path, cwd=cwd)
self.assertIn(
'Use shell only when shell functionality is required', result.stdout
)
def test_run_multiple_role_path_no_trailing_slash(self):
cwd = self.local_test_dir
role_path = 'roles/test-role'
result = run_ansible_lint(role_path, cwd=cwd)
self.assertIn(
'Use shell only when shell functionality is required', result.stdout
)
def test_run_multiple_role_path_with_trailing_slash(self):
cwd = self.local_test_dir
role_path = 'roles/test-role/'
result = run_ansible_lint(role_path, cwd=cwd)
self.assertIn(
'Use shell only when shell functionality is required', result.stdout
)
def test_run_inside_role_dir(self):
cwd = os.path.join(self.local_test_dir, 'roles/test-role/')
role_path = '.'
result = run_ansible_lint(role_path, cwd=cwd)
self.assertIn(
'Use shell only when shell functionality is required', result.stdout
)
def test_run_role_three_dir_deep(self):
cwd = self.local_test_dir
role_path = 'testproject/roles/test-role'
result = run_ansible_lint(role_path, cwd=cwd)
self.assertIn(
'Use shell only when shell functionality is required', result.stdout
)
def test_run_playbook(self):
"""Call ansible-lint the way molecule does."""
cwd = os.path.abspath(os.path.join(self.local_test_dir, 'roles/test-role'))
lintable = 'molecule/default/include-import-role.yml'
role_path = str(Path(cwd).parent.resolve())
env = os.environ.copy()
env['ANSIBLE_ROLES_PATH'] = role_path
result = run_ansible_lint(lintable, cwd=cwd, env=env)
self.assertIn(
'Use shell only when shell functionality is required', result.stdout
)
def test_run_role_name_invalid(self):
cwd = self.local_test_dir
role_path = 'roles/invalid-name'
result = run_ansible_lint(role_path, cwd=cwd)
assert 'role-name: Role name invalid-name does not match' in strip_ansi_escape(
result.stdout
)
def test_run_role_name_with_prefix(self):
cwd = self.local_test_dir
role_path = 'roles/ansible-role-foo'
result = run_ansible_lint(role_path, cwd=cwd)
assert len(result.stdout) == 0
assert (
"Added ANSIBLE_ROLES_PATH=~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles:roles"
in result.stderr
)
assert result.returncode == 0
def test_run_role_name_from_meta(self):
cwd = self.local_test_dir
role_path = 'roles/valid-due-to-meta'
result = run_ansible_lint(role_path, cwd=cwd)
assert len(result.stdout) == 0
assert (
"Added ANSIBLE_ROLES_PATH=~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles:roles"
in result.stderr
)
assert result.returncode == 0
def test_run_invalid_role_name_from_meta(self):
cwd = self.local_test_dir
role_path = 'roles/invalid_due_to_meta'
result = run_ansible_lint(role_path, cwd=cwd)
assert (
'role-name: Role name invalid-due-to-meta does not match'
in strip_ansi_escape(result.stdout)
)
def test_run_single_role_path_with_roles_path_env(self):
"""Test for role name collision with ANSIBLE_ROLES_PATH.
Test if ansible-lint chooses the role in the current directory when the role
specified as parameter exists in the current directory and the ANSIBLE_ROLES_PATH.
"""
cwd = self.local_test_dir
role_path = 'roles/test-role'
env = os.environ.copy()
env['ANSIBLE_ROLES_PATH'] = os.path.realpath(
os.path.join(cwd, "../examples/roles")
)
result = run_ansible_lint(role_path, cwd=cwd, env=env)
assert 'Use shell only when shell functionality is required' in result.stdout
@pytest.mark.parametrize(
('result', 'env'),
((True, {"GITHUB_ACTIONS": "true", "GITHUB_WORKFLOW": "foo"}), (False, None)),
ids=("on", "off"),
)
def test_run_playbook_github(result, env):
"""Call ansible-lint simulating GitHub Actions environment."""
cwd = str(Path(__file__).parent.parent.resolve())
role_path = 'examples/playbooks/example.yml'
if env is None:
env = {}
env['PATH'] = os.environ['PATH']
result_gh = run_ansible_lint(role_path, cwd=cwd, env=env)
expected = (
'::warning file=examples/playbooks/example.yml,line=44,severity=VERY_LOW::package-latest '
'Package installs should not use latest'
)
assert (expected in result_gh.stdout) is result
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from unittest import TestCase
from datetime import date, timedelta
from dateutil.relativedelta import relativedelta
from peyps.run_utils import str2date, makedate
class TestMakeDate(TestCase):
def test_makedate(self):
self.assertEqual(
date(2017, 1, 1),
makedate('2017-01-01'),
)
class TestStr2Date(TestCase):
def test_iso(self):
self.assertEqual(
date(2017, 1, 1),
str2date('2017-01-01'),
)
def test_rel_month(self):
d = date.today() - relativedelta(months=1)
self.assertEqual(d, str2date('1m'))
self.assertEqual(d, str2date('1month'))
d = date.today() - relativedelta(months=3)
self.assertEqual(d, str2date('3months'))
def test_rel_week(self):
d = date.today() - timedelta(days=7)
self.assertEqual(d, str2date('1w'))
self.assertEqual(d, str2date('1week'))
d = date.today() - timedelta(days=21)
self.assertEqual(d, str2date('3weeks'))
def test_rel_day(self):
d = date.today() - timedelta(days=6)
self.assertEqual(d, str2date('6d'))
self.assertEqual(d, str2date('6days'))
d = date.today() - timedelta(days=2)
self.assertEqual(d, str2date('2day'))
self.assertEqual(d, str2date('2day'))
def test_rel_multi(self):
d = date.today() - relativedelta(months=1, weeks=2, days=1)
self.assertEqual(d, str2date('1m2w1'))
self.assertEqual(d, str2date('1m2w1d'))
self.assertEqual(d, str2date('1month2weeks1day'))
|
from __future__ import absolute_import
from hls4ml.report.vivado_report import read_vivado_report
from hls4ml.report.vivado_report import parse_vivado_report
from hls4ml.report.quartus_report import read_quartus_report
from hls4ml.report.quartus_report import parse_quartus_report |
import bs4 as bs
import requests
class Access_Tickers:
def __init__(self):
self.tickers = []
def save_sp500_tickers(self):
resp = requests.get('http://en.wikipedia.org/wiki/List_of_S%26P_500_companies')
soup = bs.BeautifulSoup(resp.text, 'lxml')
table = soup.find('table', {'class': 'wikitable sortable'})
tickers = []
for row in table.findAll('tr')[1:]:
ticker = row.findAll('td')[0].text
tickers.append(ticker.strip())
with open('S&P500.txt', 'w') as filehandle:
filehandle.writelines("%s\n" % ticker for ticker in tickers)
def save_top_etfs(self):
resp = requests.get('https://etfdb.com/compare/volume/')
soup = bs.BeautifulSoup(resp.text, 'lxml')
tickers = []
for n in soup.find_all(attrs={"data-th": "Symbol"}):
tickers.append(n.find('a').contents[0])
with open('TopETFs.txt', 'w') as filehandle:
filehandle.writelines("%s\n" % ticker for ticker in tickers)
def update_txt(self):
self.save_top_etfs()
self.save_sp500_tickers()
def get_stocks(self):
self.update_txt()
self.tickers = []
f = open("S&P500.txt", "r")
for x in f:
x=x.strip()
if x.isalpha():
self.tickers.append(x)
return self.tickers
|
# Hack Computer
from ._x__components import * |
import classad
import collections
import concurrent
import datetime
import htcondor
import logging
import os
import sys
import time
from configparser import NoSectionError, NoOptionError
from . import Executor
logger = logging.getLogger(__name__)
# context in strategy pattern
class HTCondor(Executor):
def __init__(self, config):
super().__init__(config)
self.ids = []
try:
self.refresh_rate = int(self.submitf['darwin']['refresh_rate'])
except (KeyError, NoSectionError, NoOptionError) as e:
self.refresh_rate = 60
logging.warning('refresh_rate not find, fallback to default: 60s')
def _coreExecution(self, handler, particles):
schedd = htcondor.Schedd()
conf = self.submitf
executable = conf['darwin']['executable']
executable_path = os.path.join(handler.optdir, executable)
conf['htcondor']['executable'] = executable_path
if not os.path.exists(executable_path):
logger.error('executable "{}" not found'.format(executable_path))
sys.exit(1)
# secure the job id from condor
self.ids = []
for p in particles:
arguments = p.coordinate.format()
formatted_args = ['-{} {}'.format(k, v) for k,v in arguments.items()]
conf['htcondor']['arguments'] = ' '.join(formatted_args)
conf['htcondor']['initialdir'] = handler.particlepath(p.name)
# get redirect of htcondor submit file to a dict
sub = htcondor.Submit(dict(conf.items('htcondor')))
with schedd.transaction() as txn:
ads = []
clusterid = sub.queue(txn, ad_results=ads)
self.ids.append(clusterid)
if 'should_transfer_files' in conf['htcondor'] and \
conf['htcondor']['should_transfer_files'] in ('YES',):
schedd.spool(ads)
req = ' || '.join('(ClusterId == {})'.format(id) for id in self.ids)
proj = ['ClusterId', 'JobStatus']
finished = False
while not finished:
count = 0
for data in schedd.xquery(requirements=req, projection=proj):
count += 1
if count == 0:
finished = True
else:
time.sleep(self.refresh_rate)
if 'should_transfer_files' in conf['htcondor'] and \
conf['htcondor']['should_transfer_files'] in ('YES',):
for clusterid in self.ids:
self._schedd.retrieve("ClusterId == %d".format(clusterid))
def _interruptHandler(self):
self._cleanUp()
def _cleanUp(self):
schedd = htcondor.Schedd()
req = ' || '.join('(ClusterId == {})'.format(id) for id in self.ids)
schedd.act(htcondor.JobAction.Remove, req)
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import os
import os.path
import sys
import itertools
import warnings
import functools
import posixpath
import ntpath
import errno
FS_ENCODING = sys.getfilesystemencoding()
PY_LEGACY = sys.version_info < (3, )
TRUE_VALUES = frozenset(('true', 'yes', '1', 'enable', 'enabled', True, 1))
try:
from os import scandir, walk
except ImportError:
from scandir import scandir, walk # noqa
try:
from shutil import get_terminal_size
except ImportError:
from backports.shutil_get_terminal_size import get_terminal_size # noqa
def isexec(path):
'''
Check if given path points to an executable file.
:param path: file path
:type path: str
:return: True if executable, False otherwise
:rtype: bool
'''
return os.path.isfile(path) and os.access(path, os.X_OK)
def fsdecode(path, os_name=os.name, fs_encoding=FS_ENCODING, errors=None):
'''
Decode given path.
:param path: path will be decoded if using bytes
:type path: bytes or str
:param os_name: operative system name, defaults to os.name
:type os_name: str
:param fs_encoding: current filesystem encoding, defaults to autodetected
:type fs_encoding: str
:return: decoded path
:rtype: str
'''
if not isinstance(path, bytes):
return path
if not errors:
use_strict = PY_LEGACY or os_name == 'nt'
errors = 'strict' if use_strict else 'surrogateescape'
return path.decode(fs_encoding, errors=errors)
def fsencode(path, os_name=os.name, fs_encoding=FS_ENCODING, errors=None):
'''
Encode given path.
:param path: path will be encoded if not using bytes
:type path: bytes or str
:param os_name: operative system name, defaults to os.name
:type os_name: str
:param fs_encoding: current filesystem encoding, defaults to autodetected
:type fs_encoding: str
:return: encoded path
:rtype: bytes
'''
if isinstance(path, bytes):
return path
if not errors:
use_strict = PY_LEGACY or os_name == 'nt'
errors = 'strict' if use_strict else 'surrogateescape'
return path.encode(fs_encoding, errors=errors)
def getcwd(fs_encoding=FS_ENCODING, cwd_fnc=os.getcwd):
'''
Get current work directory's absolute path.
Like os.getcwd but garanteed to return an unicode-str object.
:param fs_encoding: filesystem encoding, defaults to autodetected
:type fs_encoding: str
:param cwd_fnc: callable used to get the path, defaults to os.getcwd
:type cwd_fnc: Callable
:return: path
:rtype: str
'''
path = fsdecode(cwd_fnc(), fs_encoding=fs_encoding)
return os.path.abspath(path)
def getdebug(environ=os.environ, true_values=TRUE_VALUES):
'''
Get if app is expected to be ran in debug mode looking at environment
variables.
:param environ: environment dict-like object
:type environ: collections.abc.Mapping
:returns: True if debug contains a true-like string, False otherwise
:rtype: bool
'''
return environ.get('DEBUG', '').lower() in true_values
def deprecated(func_or_text, environ=os.environ):
'''
Decorator used to mark functions as deprecated. It will result in a
warning being emmitted hen the function is called.
Usage:
>>> @deprecated
... def fnc():
... pass
Usage (custom message):
>>> @deprecated('This is deprecated')
... def fnc():
... pass
:param func_or_text: message or callable to decorate
:type func_or_text: callable
:param environ: optional environment mapping
:type environ: collections.abc.Mapping
:returns: nested decorator or new decorated function (depending on params)
:rtype: callable
'''
def inner(func):
message = (
'Deprecated function {}.'.format(func.__name__)
if callable(func_or_text) else
func_or_text
)
@functools.wraps(func)
def new_func(*args, **kwargs):
with warnings.catch_warnings():
if getdebug(environ):
warnings.simplefilter('always', DeprecationWarning)
warnings.warn(message, category=DeprecationWarning,
stacklevel=3)
return func(*args, **kwargs)
return new_func
return inner(func_or_text) if callable(func_or_text) else inner
def usedoc(other):
'''
Decorator which copies __doc__ of given object into decorated one.
Usage:
>>> def fnc1():
... """docstring"""
... pass
>>> @usedoc(fnc1)
... def fnc2():
... pass
>>> fnc2.__doc__
'docstring'collections.abc.D
:param other: anything with a __doc__ attribute
:type other: any
:returns: decorator function
:rtype: callable
'''
def inner(fnc):
fnc.__doc__ = fnc.__doc__ or getattr(other, '__doc__')
return fnc
return inner
def pathsplit(value, sep=os.pathsep):
'''
Get enviroment PATH elements as list.
This function only cares about spliting across OSes.
:param value: path string, as given by os.environ['PATH']
:type value: str
:param sep: PATH separator, defaults to os.pathsep
:type sep: str
:yields: every path
:ytype: str
'''
for part in value.split(sep):
if part[:1] == part[-1:] == '"' or part[:1] == part[-1:] == '\'':
part = part[1:-1]
yield part
def pathparse(value, sep=os.pathsep, os_sep=os.sep):
'''
Get enviroment PATH directories as list.
This function cares about spliting, escapes and normalization of paths
across OSes.
:param value: path string, as given by os.environ['PATH']
:type value: str
:param sep: PATH separator, defaults to os.pathsep
:type sep: str
:param os_sep: OS filesystem path separator, defaults to os.sep
:type os_sep: str
:yields: every path
:ytype: str
'''
escapes = []
normpath = ntpath.normpath if os_sep == '\\' else posixpath.normpath
if '\\' not in (os_sep, sep):
escapes.extend((
('\\\\', '<ESCAPE-ESCAPE>', '\\'),
('\\"', '<ESCAPE-DQUOTE>', '"'),
('\\\'', '<ESCAPE-SQUOTE>', '\''),
('\\%s' % sep, '<ESCAPE-PATHSEP>', sep),
))
for original, escape, unescape in escapes:
value = value.replace(original, escape)
for part in pathsplit(value, sep=sep):
if part[-1:] == os_sep and part != os_sep:
part = part[:-1]
for original, escape, unescape in escapes:
part = part.replace(escape, unescape)
yield normpath(fsdecode(part))
def pathconf(path,
os_name=os.name,
isdir_fnc=os.path.isdir,
pathconf_fnc=getattr(os, 'pathconf', None),
pathconf_names=getattr(os, 'pathconf_names', ())):
'''
Get all pathconf variables for given path.
:param path: absolute fs path
:type path: str
:returns: dictionary containing pathconf keys and their values (both str)
:rtype: dict
'''
if pathconf_fnc and pathconf_names:
pathconf_output = {}
for key in pathconf_names:
try:
pathconf_output[key] = pathconf_fnc(path, key)
except OSError as exc:
if exc.errno != errno.EINVAL:
raise
return pathconf_output
if os_name == 'nt':
maxpath = 246 if isdir_fnc(path) else 259 # 260 minus <END>
else:
maxpath = 255 # conservative sane default
return {
'PC_PATH_MAX': maxpath,
'PC_NAME_MAX': maxpath - len(path),
}
ENV_PATH = tuple(pathparse(os.getenv('PATH', '')))
ENV_PATHEXT = tuple(pathsplit(os.getenv('PATHEXT', '')))
def which(name,
env_path=ENV_PATH,
env_path_ext=ENV_PATHEXT,
is_executable_fnc=isexec,
path_join_fnc=os.path.join,
os_name=os.name):
'''
Get command absolute path.
:param name: name of executable command
:type name: str
:param env_path: OS environment executable paths, defaults to autodetected
:type env_path: list of str
:param is_executable_fnc: callable will be used to detect if path is
executable, defaults to `isexec`
:type is_executable_fnc: Callable
:param path_join_fnc: callable will be used to join path components
:type path_join_fnc: Callable
:param os_name: os name, defaults to os.name
:type os_name: str
:return: absolute path
:rtype: str or None
'''
for path in env_path:
for suffix in env_path_ext:
exe_file = path_join_fnc(path, name) + suffix
if is_executable_fnc(exe_file):
return exe_file
return None
def re_escape(pattern, chars=frozenset("()[]{}?*+|^$\\.-#")):
'''
Escape all special regex characters in pattern.
Logic taken from regex module.
:param pattern: regex pattern to escape
:type patterm: str
:returns: escaped pattern
:rtype: str
'''
escape = '\\{}'.format
return ''.join(
escape(c) if c in chars or c.isspace() else
'\\000' if c == '\x00' else c
for c in pattern
)
if PY_LEGACY:
FileNotFoundError = OSError # noqa
range = xrange # noqa
filter = itertools.ifilter
basestring = basestring # noqa
unicode = unicode # noqa
chr = unichr # noqa
bytes = str # noqa
else:
FileNotFoundError = FileNotFoundError
range = range
filter = filter
basestring = str
unicode = str
chr = chr
bytes = bytes
|
#!/usr/bin/python
# Copyright (c) 2011 Tuenti Technologies
# See LICENSE for details
import base64
import os
import os.path
import socket
from keyring.backend import KeyringBackend
#TODO: Better exceptions
class HousekeeperClientException(Exception): pass
if os.environ.has_key('HOUSEKEEPER_SOCKET'):
default_socket = os.environ['HOUSEKEEPER_SOCKET']
else:
default_socket = os.path.expanduser('~/.housekeeper/socket')
class HousekeeperClient:
def __init__(self, socketfile=default_socket):
self.socketfile = socketfile
def __request(self, cmd):
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.connect(self.socketfile)
f = s.makefile('wr')
f.write(cmd + '\r\n')
f.flush()
ret = f.read().strip().split('\r\n')
if len(ret) == 1:
if ret[0] == 'OK':
return
else:
raise HousekeeperClientException
else:
response, result_code = ret
s.close()
if result_code == 'OK':
return response
else:
raise HousekeeperClientException(response)
def set(self, service, password, timeout=-1):
cmd = "SET %s %s" % (service, base64.b64encode(password))
if timeout >= 0:
cmd += " %d" % timeout
self.__request(cmd)
def get(self, service):
password = self.__request("GET %s" % service)
try:
return base64.b64decode(password)
except TypeError:
# Old running instances of housekeeper can have unencoded passwords
# TODO: Improve protocol for 2.0, and remove this
return password
class HousekeeperKeyringBackend(KeyringBackend):
def __init__(self, socketfile=default_socket, timeout=600):
self.socketfile = socketfile
self.client = HousekeeperClient(socketfile)
self.timeout = timeout
def supported(self):
if os.path.exists(self.socketfile):
return 1
else:
return -1
def get_password(self, service, username):
try:
return self.client.get("%s@%s" % (username, service))
except HousekeeperClientException:
return ''
def set_password(self, service, username, password):
self.client.set("%s@%s" % (username, service), password, self.timeout)
return 0
|
######## Useful Tools ########
#This is meant to be imported by modules_and_pip.py
import random
feet_in_mile = 5280
meters_in_kilometer = 1000
beatles = ["John Lennon", "Paul McCartney", "George Harrison", "Ringo Starr"]
def get_file_ext(filename):
return filename[filename.index(".") + 1:]
def roll_dice(num):
return random.randint(1, num) |
from rest_framework import serializers
from . import models
class UserSerilizer(serializers.Serializer):
"Serialize the name of user"
name=serializers.CharField(max_length=10)
class UserProfileSerilizer(serializers.ModelSerializer):
class Meta:
model=models.UserProfile
fields =('name','email','id','password')
extra_kwargs={
'password':{
'write_only':True,
'style':{'input_type':'password'}
}
}
def create(self,validated_data):
"""Create new user and return it"""
user=models.UserProfile.objects.create_user(
email=validated_data['email'],
name=validated_data['name'],
password=validated_data['password']
)
return user
class ProfileFeedItemSerializer(serializers.ModelSerializer):
class Meta:
model=models.ProfileFeedItem
fields=('id','user_profile','created_on','status_text')
extra_kwargs={'user_profile':{'read_only':True}}
|
# Functions
def greet():
print('Hello')
print('Good morning')
greet()
def add(x,y):
c = x + y
print(c)
add(5,3)
def add_sub(x,y):
c = x + y
d = x - y
print(c,d)
add_sub(5,6)
def add_mult(x,y):
c = x + y
d = x * y
return c,d
addition,multiplication = add_mult(5,6)
print(addition,multiplication)
def person(name,age):
print('my name is ', name ,', and I am ', age , 'years old.')
person(name='Rawlings', age=23)
def baby(name, month=10):
print(name, ' is a baby because he/she is ', month, 'months old')
baby('elijah')
baby('elijah', 6)
def sum(a,*b):
c = a
for i in b:
c = c + i
print(c)
sum(2,2,2,2,4)
def person(name, **data):
print(name)
print(data)
person('rollex', age = 23, city = 'PH', mob = 90345)
def person(name, **data):
print(name)
for i,j in data.items():
print(i,j)
person('rollex', age = 23, city = 'PH', mob = 90345)
|
# BSD 3-Clause License
#
# Copyright (c) 2020, princeton-vl
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
class FlowHead(nn.Module):
def __init__(self, input_dim: int = 128, hidden_dim: int = 256) -> None:
super(FlowHead, self).__init__()
self.conv1 = nn.Conv2d(input_dim, hidden_dim, 3, padding=1)
self.conv2 = nn.Conv2d(hidden_dim, 2, 3, padding=1)
self.relu = nn.ReLU(inplace=True)
def forward(self, x: Tensor) -> Tensor:
return self.conv2(self.relu(self.conv1(x)))
class ConvGRU(nn.Module):
def __init__(self, hidden_dim: int = 128, input_dim: int = (192 + 128)) -> None:
super(ConvGRU, self).__init__()
self.convz = nn.Conv2d(hidden_dim + input_dim, hidden_dim, 3, padding=1)
self.convr = nn.Conv2d(hidden_dim + input_dim, hidden_dim, 3, padding=1)
self.convq = nn.Conv2d(hidden_dim + input_dim, hidden_dim, 3, padding=1)
def forward(self, h: Tensor, x: Tensor) -> Tensor:
hx = torch.cat([h, x], dim=1)
z = torch.sigmoid(self.convz(hx))
r = torch.sigmoid(self.convr(hx))
q = torch.tanh(self.convq(torch.cat([r * h, x], dim=1)))
h = (1 - z) * h + z * q
return h
class SepConvGRU(nn.Module):
def __init__(self, hidden_dim: int = 128, input_dim: int = (192 + 128)) -> None:
super(SepConvGRU, self).__init__()
self.convz1 = nn.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convr1 = nn.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convq1 = nn.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convz2 = nn.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
self.convr2 = nn.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
self.convq2 = nn.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
def forward(self, h: Tensor, x: Tensor) -> Tensor:
# horizontal
hx = torch.cat([h, x], dim=1)
z = torch.sigmoid(self.convz1(hx))
r = torch.sigmoid(self.convr1(hx))
q = torch.tanh(self.convq1(torch.cat([r * h, x], dim=1)))
h = (1 - z) * h + z * q
# vertical
hx = torch.cat([h, x], dim=1)
z = torch.sigmoid(self.convz2(hx))
r = torch.sigmoid(self.convr2(hx))
q = torch.tanh(self.convq2(torch.cat([r * h, x], dim=1)))
h = (1 - z) * h + z * q
return h
class BasicMotionEncoder(nn.Module):
def __init__(self, corr_levels: int, corr_radius: int) -> None:
super(BasicMotionEncoder, self).__init__()
corr_planes = corr_levels * (2 * corr_radius + 1) ** 2
self.convc1 = nn.Conv2d(corr_planes, 256, 1, padding=0)
self.convc2 = nn.Conv2d(256, 192, 3, padding=1)
self.convf1 = nn.Conv2d(2, 128, 7, padding=3)
self.convf2 = nn.Conv2d(128, 64, 3, padding=1)
self.conv = nn.Conv2d(64 + 192, 128 - 2, 3, padding=1)
def forward(self, flow: Tensor, corr: Tensor) -> Tensor:
cor = F.relu(self.convc1(corr))
cor = F.relu(self.convc2(cor))
flo = F.relu(self.convf1(flow))
flo = F.relu(self.convf2(flo))
cor_flo = torch.cat([cor, flo], dim=1)
out = F.relu(self.conv(cor_flo))
return torch.cat([out, flow], dim=1)
class BasicUpdateBlock(nn.Module):
def __init__(
self, corr_levels: int, corr_radius: int, hidden_dim: int = 128
) -> None:
super(BasicUpdateBlock, self).__init__()
self.encoder = BasicMotionEncoder(corr_levels, corr_radius)
self.gru = SepConvGRU(hidden_dim=hidden_dim, input_dim=(128 + hidden_dim))
self.flow_head = FlowHead(hidden_dim, hidden_dim=256)
self.mask = nn.Sequential(
nn.Conv2d(128, 256, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 64 * 9, 1, padding=0),
)
def forward(
self,
net: Tensor,
inp: Tensor,
corr: Tensor,
flow: Tensor,
) -> Tuple[Tensor, Tensor, Tensor]:
motion_features = self.encoder(flow, corr)
inp = torch.cat([inp, motion_features], dim=1)
net = self.gru(net, inp)
delta_flow = self.flow_head(net)
# scale mask to balence gradients
mask = 0.25 * self.mask(net)
return net, mask, delta_flow
|
# -*- coding: utf-8 -*-
from datetime import datetime
from billiard.exceptions import SoftTimeLimitExceeded
from rpc.client.result import ResultClient
from webs.api.models.db_proxy import crawl_task_model_proxy, result_model_proxy
from worker import celery_app
from worker.library.playwright import PlayWrightHandler
@celery_app.task(
name='fetch_tasks', queue='priority_fetch', acks_late=True, soft_time_limit=1000, max_retries=1,
default_retry_delay=30, autoretry_for=(Exception,))
def fetch_tasks(crawl_task_id):
"""
通过优先级队列取得任务进行抓取
"""
crawl_task_obj = crawl_task_model_proxy.find_one_with_condition(
crawl_task_model_proxy.model.id == crawl_task_id,
crawl_task_model_proxy.model.process_state != 'finished'
)
if not crawl_task_obj:
return
# 设置爬取任务开始
if crawl_task_obj.process_state == 'readying':
crawl_task_model_proxy.set_attr(crawl_task_obj, 'process_state', 'running')
url_nested_list = crawl_task_obj.url_nested_list
# 导致此情况原因为worker进程异常退出,rabbitmq未确认此消息,worker重启此任务再次被投递
else: # crawl_task_obj.process_state == 'running'
already_url_ids = result_model_proxy.query_already_crawl_url_ids(subtask_id=crawl_task_obj.subtask_id)
url_nested_list = [
url_info for url_info in crawl_task_obj.url_nested_list
if url_info['url_id'] not in already_url_ids
]
undone_url_ids = []
if url_nested_list:
# 执行抓取
playwright_handler = PlayWrightHandler(
subtask_id=crawl_task_obj.subtask_id,
url_nested_list=url_nested_list,
options=crawl_task_obj.options)
undone_url_ids = playwright_handler.run()
# 设置爬取状态、结束时间、抓取失败的urls
crawl_task_model_proxy.set_many_attr(
obj=crawl_task_obj,
fields_v={
'process_state': 'finished',
'finished_at': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
'failure_url_ids': undone_url_ids
}
)
####### 调用engine端rpc服务设置subtask爬取状态
# 连接grpc服务
grpc_result_client = ResultClient(crawl_task_obj.options.get('rpc_server'))
# 设置Subtask爬取状态
grpc_result_client.set_subtask_status(
subtask_id=crawl_task_obj.subtask_id, status=True, finished_at=datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
|
import threading
import numpy as np
import os
import io
from flask import Flask, request, render_template, redirect, Response
from flask_classful import FlaskView, route
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.mlab import griddata
from matplotlib.offsetbox import AnchoredText
from mpl_toolkits.mplot3d import Axes3D
import scipy.interpolate as interp
import traffic_simulator
CWD = os.path.dirname(os.path.realpath(__file__))
debug = True
app = Flask(__name__)
app.config.from_object(__name__)
class HomeView(FlaskView):
route_base='/home'
def index(self):
return render_template("index.html")
class GetTimingView(FlaskView):
route_base='/get-timings'
def index(self):
return render_template("env_setup.html")
class SimulateView(FlaskView):
route_base='/simulate'
@route('/submit', methods=['GET', 'POST'])
def submit_simulation(self):
self.distanceLtoL = float(request.form["distanceLtoL"])
self.speedLimitLtoL = float(request.form["speedLimitLtoL"])
self.timeUpQueue = float(request.form["timeUpQueue"])
if "lightsDetectMovement" in request.form:
self.lightsDetectMovement = True
else:
self.lightsDetectMovement = False
self.simTime = int(request.form["simTime"])
self.lightGreenTimesRange = [int(request.form["lightGreenTimeStart"]), int(request.form["lightGreenTimeEnd"])]
self.roadUsagesRange = [float(request.form["roadUsageStart"]), float(request.form["roadUsageEnd"])]
self.simulationThread = SimulationThread(self.distanceLtoL, self.speedLimitLtoL, self.timeUpQueue, self.lightsDetectMovement, self.simTime, self.lightGreenTimesRange, self.roadUsagesRange)
self.simulationThread.name = "simThread"
self.simulationThread.start()
return redirect("/simulate/simulation-running")
@route('/simulation-running')
def running_simulation(self):
response = Response(redirect('/simulate/simulation-results'))
activeThreadNames = list(thread.name for thread in threading.enumerate())
if "simThread" in activeThreadNames:
simThread = threading.enumerate()[activeThreadNames.index("simThread")]
return render_template('simulation_running.html', currentIteration=simThread.currentIteration, totalIterations=simThread.numberOfIterations, percentageDone=round((simThread.currentIteration/simThread.numberOfIterations) * 100), distanceLtoL=simThread.distanceLtoL, speedLimitLtoL=simThread.speedLimitLtoL, timeUpQueue=simThread.timeUpQueue, simTime=simThread.simTime, lightsDetectMovement=simThread.lightsDetectMovement)
else:
return redirect('/simulate/simulation-results')
@route('/simulation-results')
def finished_simulation(self):
return render_template('simulation_results.html')
class SimulationThread(threading.Thread):
def __init__(self, distanceLtoL, speedLimitLtoL, timeUpQueue, lightsDetectMovement, simTime, lightGreenTimesRange, roadUsagesRange):
threading.Thread.__init__(self)
self.distanceLtoL = distanceLtoL
self.speedLimitLtoL = speedLimitLtoL
self.timeUpQueue = timeUpQueue
self.lightsDetectMovement = lightsDetectMovement
self.simTime = simTime
self.lightGreenTimesRange = lightGreenTimesRange
self.roadUsageRange = roadUsagesRange
self.timeGreenStep = 2
self.roadUsageStep = 0.02
self.numberOfIterations = round((((self.roadUsageRange[1]-self.roadUsageRange[0])/self.roadUsageStep)+2)*(((self.lightGreenTimesRange[1]-self.lightGreenTimesRange[0])/self.timeGreenStep)+2))
def run(self):
self.currentIteration = 0
x = []
y = []
z = []
for timeGreen in range(self.lightGreenTimesRange[0], self.lightGreenTimesRange[1]+self.timeGreenStep, self.timeGreenStep):
for roadUsage in np.arange(self.roadUsageRange[0], self.roadUsageRange[1]+self.roadUsageStep, self.roadUsageStep):
self.currentIteration += 1
tenv = traffic_simulator.TrafficEnvironment()
tenv.set_env_variables(self.distanceLtoL, self.speedLimitLtoL, self.timeUpQueue, self.lightsDetectMovement, self.simTime, timeGreen, roadUsage)
tenv.start_simulation()
if tenv.averageTimeStopped <= 60:
z.append(tenv.averageTimeStopped)
else:
z.append(60)
x.append(roadUsage)
y.append(timeGreen)
minimasx = []
minimasy = []
minimasz = []
for roadUsage in np.arange(self.roadUsageRange[0], self.roadUsageRange[1]+self.roadUsageStep, self.roadUsageStep):
indexesAtCurrentRoadUsage = []
for index1 in range(len(x)):
if x[index1] == roadUsage:
indexesAtCurrentRoadUsage.append(index1)
tempZList = []
tempYList = []
for index2 in indexesAtCurrentRoadUsage:
tempZList.append(z[index2])
tempYList.append(y[index2])
minIndex = np.argmin(tempZList)
minimasx.append(roadUsage)
minimasy.append(tempYList[minIndex])
minimasz.append(tempZList[minIndex])
plotx,ploty = np.meshgrid(np.linspace(np.min(x),np.max(x),50),\
np.linspace(np.min(y),np.max(y),50))
plotz = interp.griddata((x,y),z,(plotx,ploty), method='linear')
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot(minimasx, minimasy, minimasz, '--k', label="Most Efficient")
ax.legend()
surf = ax.plot_surface(plotx,ploty,plotz,cstride=1,rstride=1,cmap=cm.jet)
cbar = fig.colorbar(surf, shrink=0.5, aspect=10)
ax.set_title("lightGreenTime vs. roadUsage")
ax.set_xlabel('roadUsage')
ax.set_ylabel('lightGreenTime')
ax.set_zlabel('averageTimeStopped')
ax.view_init(elev=30, azim=-150)
buf = io.BytesIO()
with open(os.path.join(CWD, "static/images/tempGraph.svg"), "wb") as tempGraphFile:
plt.savefig(tempGraphFile, format='svg')
plt.close()
plt_bytes = buf.getvalue()
buf.close()
self.graphImageBytes = plt_bytes
HomeView.register(app)
GetTimingView.register(app)
SimulateView.register(app)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=80, debug=debug) |
"""
This is a test applet deployed on a BDC to test fetching from HDFS from within
app-deploy.
"""
import re
import subprocess
import json
PRAXXIS_ROOT = "http://nmnode-0-svc:50070/webhdfs/v1/praxxis/"
def test(loc):
"""
collect the sequences for all users and all scenes.
should we record user/scene/time info for training?
"""
import requests
from datetime import date, timedelta
date_path = (date.today() - timedelta(days=int(loc))).strftime('%Y/%m/%d/ipynb')
global PRAXXIS_ROOT
basepath = PRAXXIS_ROOT + date_path
params = (
('op', 'LISTSTATUS'),
)
response = requests.get(basepath, params=params)
users = (response.json())['FileStatuses']['FileStatus']
sequences = []
for user in users:
user_basepath = basepath + "/" + user['pathSuffix']
response = requests.get(user_basepath, params=params)
scenes = (response.json())['FileStatuses']['FileStatus']
for scene in scenes:
scene_basepath = user_basepath + "/" + scene['pathSuffix']
response = requests.get(scene_basepath, params=params)
files = (response.json())['FileStatuses']['FileStatus']
sequence = []
for f in files:
fullname = f['pathSuffix']
fullname.rstrip('.ipynb')
# removes datetime info at start of string, and library name
filename = ('-'.join(fullname.split('-')[3:])) # .rstrip('.ipynb')
filename = filename[0:len(filename) - 6] # removes .ipynb
sequence.append(filename)
sequences.append(sequence)
return sequences
"""http://nmnode-0-svc:50070/webhdfs/v1/praxxis?op=LISTSTATUS"""
"""
1. find model file
2. load_model() on filename
3. fit_on_batch or train_on_batch of sequences? OHE first tho
- make sure we're training on full names, not just prefixes!
4. save model to current date!
"""
|
import sys
from typing import Union
def prefsorted(seq: list,
preferred: Union[str, list, None] = None,
reverse: bool = False) -> list:
if isinstance(preferred, str):
preferred = preferred.split()
elif preferred is None:
preferred = []
taken = []
rest = list(seq)[:]
for p in preferred:
try:
while True:
rest.remove(p)
taken.append(p)
except ValueError:
pass
if reverse:
return rest + taken
else:
return taken + rest |
""" Contains the cli interface for calculator """
import calculator.expression as expression
import calculator.expression_tree as expression_tree
ENTRY = "Welcome to Calculator!"
TAKE_INPUT = "Enter the expression:"
OUTPUT = "Answer is:"
def run():
print(ENTRY)
inp = input(TAKE_INPUT)
exp = expression.ExpressionObjectList(inp)
exp.objectify()
stack = expression_tree.Stack(exp.objects)
stack.make()
stack.tree.solve()
print (OUTPUT + str(stack.tree.result)) |
# region imports
from fxpt.side_utils import pyperclip
from fxpt.qt.pyside import shiboken2, QtCore, QtGui, QtWidgets, isPySide2
import maya.OpenMayaUI as omui
import pymel.core as pm
from fxpt.fx_prefsaver import prefsaver, serializers
import searchers
from com import *
if isPySide2():
from fxpt.fx_search.main_window_ui2 import Ui_MainWindow
else:
from fxpt.fx_search.main_window_ui import Ui_MainWindow
# endregion imports
mainWin = None
# TODO: if string in table is too long, popup full string in some widget
OPT_VAR_NAME = 'fx_search_prefs'
# noinspection PyMethodMayBeStatic,PyUnusedLocal
class SearchUI(QtWidgets.QMainWindow, Ui_MainWindow):
def __init__(self):
# noinspection PyArgumentList
ptr = omui.MQtUtil.mainWindow()
mainWinQObject = None
if ptr is not None:
mainWinQObject = shiboken2.wrapInstance(long(ptr), QtWidgets.QWidget) # or you can use QMainWindow
else:
m.error('cannot find main Maya window.')
super(SearchUI, self).__init__(mainWinQObject)
self.setupUi(self)
self.ctxMenu = None
self.generateCtxMenu()
self.currentSearcherLink = None
self.searchers = []
self.initSearchersAndControls()
self.ui_setState(SEARCH_STATE_WELCOME)
self.prefSaver = prefsaver.PrefSaver(serializers.SerializerOptVar(OPT_VAR_NAME))
self.ui_initSettings()
self.ui_loadSettings()
# noinspection PyMethodMayBeStatic
def ui_initSettings(self):
self.prefSaver.addControl(self, prefsaver.UIType.PYSIDEWindow, (100, 100, 900, 600))
self.prefSaver.addControl(self.ui_LED_search, prefsaver.UIType.PYSIDELineEdit)
self.prefSaver.addControl(self.ui_BTN_soCaseSensitive, prefsaver.UIType.PYSIDECheckButton, False)
self.prefSaver.addControl(self.ui_BTN_soRegex, prefsaver.UIType.PYSIDECheckButton, False)
self.prefSaver.addControl(self.ui_BTN_soSelectFound, prefsaver.UIType.PYSIDECheckButton, False)
self.prefSaver.addControl(self.ui_BTN_soIncludeShapes, prefsaver.UIType.PYSIDECheckButton, False)
self.prefSaver.addControl(self.ui_BTN_soSearchSelected, prefsaver.UIType.PYSIDECheckButton, False)
self.prefSaver.addControl(self.ui_ACT_useAllTabs, prefsaver.UIType.PYSIDECheckAction, False)
for btn in self.getCatButtons():
self.prefSaver.addControl(btn, prefsaver.UIType.PYSIDECheckButton, True)
def ui_loadSettings(self):
self.prefSaver.loadPrefs()
def ui_saveSettings(self):
self.prefSaver.savePrefs()
def ui_resetSettings(self):
self.prefSaver.resetPrefs()
def initSearchersAndControls(self):
self.searchers = [
SearcherLink(searchers.SearcherNodes('All Nodes'), QtWidgets.QPushButton('All Nodes'),
QtWidgets.QTableView(), QtWidgets.QWidget()),
SearcherLink(searchers.SearcherDagNodes('DAG Nodes'), QtWidgets.QPushButton('DAG Nodes'),
QtWidgets.QTableView(), QtWidgets.QWidget()),
SearcherLink(searchers.SearcherFxRefs('FX References'), QtWidgets.QPushButton('FX References'),
QtWidgets.QTableView(), QtWidgets.QWidget()),
SearcherLink(searchers.SearcherTexturedBy('Textured By'), QtWidgets.QPushButton('Textured By'),
QtWidgets.QTableView(), QtWidgets.QWidget()),
SearcherLink(searchers.SearcherTextures('Textures'), QtWidgets.QPushButton('Textures'),
QtWidgets.QTableView(), QtWidgets.QWidget()),
SearcherLink(searchers.SearcherTransforms('Transforms'), QtWidgets.QPushButton('Transforms'),
QtWidgets.QTableView(), QtWidgets.QWidget()),
SearcherLink(searchers.SearcherType('Type'), QtWidgets.QPushButton('Type'),
QtWidgets.QTableView(), QtWidgets.QWidget())
]
for sl in self.searchers:
layout = QtWidgets.QHBoxLayout()
sl.tabWidget.setLayout(layout)
layout.addWidget(sl.table)
layout.setContentsMargins(4, 4, 4, 4)
sl.table.setModel(sl.searcher.getModel())
self.setTableProps(sl.table)
sl.button.setCheckable(True)
buttonLabel = sl.button.text()
sl.button.setObjectName('uiBTN_' + buttonLabel[0].lower() + buttonLabel[1:].replace(' ', ''))
# sl.button.setStyleSheet(CHECKED_BUTTON_STYLE)
self.ui_LAY_catButtons.addWidget(sl.button)
sl.table.connect(
sl.table.selectionModel(),
QtCore.SIGNAL('selectionChanged(QItemSelection, QItemSelection)'),
self.ui_onTableSelectionChanged
)
sl.table.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
sl.table.connect(sl.table, QtCore.SIGNAL('customContextMenuRequested(QPoint)'),
self.ui_onCtxMenuPopupRequest)
# noinspection PyArgumentList
self.ui_LAY_catButtons.addItem(QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding))
self.resetResultTablesAndTabs()
def setTableProps(self, table):
table.setShowGrid(False)
table.verticalHeader().setVisible(False)
table.verticalHeader().setDefaultSectionSize(15)
table.setSelectionBehavior(QtWidgets.QTableView.SelectRows)
table.setSelectionMode(QtWidgets.QTableView.ExtendedSelection)
table.setFont(FONT_MONOSPACE_QFONT)
table.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
table.setSortingEnabled(True)
table.sortByColumn(0, QtCore.Qt.AscendingOrder)
table.setAlternatingRowColors(True)
table.setHorizontalScrollMode(QtWidgets.QAbstractItemView.ScrollPerPixel)
table.horizontalHeader().setStretchLastSection(True)
table.horizontalHeader().setDefaultAlignment(QtCore.Qt.AlignLeft)
# custom "resize to fit" section, cause standard table.resizeColumnsToContents() may be very slow on huge data
model = table.model()
columnCount = model.columnCount()
for col in range(columnCount):
stringLengths = [len(model.index(row, col).data(QtCore.Qt.DisplayRole)) for row in range(model.rowCount())]
stringLengths.append(len(str(model.headerData(col, QtCore.Qt.Horizontal, QtCore.Qt.DisplayRole))) + TABLE_HEADER_TITLE_OFFSET)
columnMaxLength = min(max(stringLengths), TABLE_MAX_COLUMN_SIZE)
table.horizontalHeader().resizeSection(col, columnMaxLength * FONT_MONOSPACE_LETTER_SIZE + TABLE_COLUMN_RIGHT_OFFSET)
def generateCtxMenu(self):
self.ctxMenu = QtWidgets.QMenu()
self.ctxMenu.addAction(self.ui_ACT_selectAll)
self.ctxMenu.addAction(self.ui_ACT_deselectAll)
self.ctxMenu.addSeparator()
self.ctxMenu.addAction(self.ui_ACT_copyFullNamesToClipboard)
self.ctxMenu.addAction(self.ui_ACT_copyShortNamesToClipboard)
self.ctxMenu.addSeparator()
self.ctxMenu.addAction(self.ui_ACT_useAllTabs)
# noinspection PyArgumentList
def ui_onCtxMenuPopupRequest(self, *arg, **kwarg):
self.ctxMenu.popup(QtGui.QCursor.pos())
def ui_onCatOnOffClicked(self):
checkState = True if self.sender() == self.ui_BTN_catAllOn else False
for btn in self.getCatButtons():
btn.setChecked(checkState)
def getCurrentOrAllTables(self):
if self.ui_isUseAllTabs():
tables = self.getTables()
else:
currentSearcherLink = self.getCurrentSearcherLink()
if currentSearcherLink:
tables = [self.getCurrentSearcherLink().table]
else:
return None
return tables
def ui_onTableSelectionChanged(self):
selectionList = []
getSelectedNodeInfos = self.getSelectedNodeInfos()
if not getSelectedNodeInfos:
return
for ni in getSelectedNodeInfos:
if ni.selectionString:
selectionList.extend(ni.selectionString)
if selectionList:
m.select(selectionList)
else:
m.select(clear=True)
def ui_onSearchClicked(self):
self.ui_tryToSaveTabOptVar()
self.ui_setState(SEARCH_STATE_SEARCHING)
self.repaint()
self.resetResultTablesAndTabs()
searchDesc = self.getSearchDesc()
self.setActiveSearchers()
somethingFound = False
for sl in self.searchers:
sl.searcher.search(searchDesc)
if sl.searcher.hasResult():
somethingFound = True
self.ui_TAB_results.addTab(sl.tabWidget, sl.searcher.getName())
self.setTableProps(sl.table)
sl.attachedToTabWidget = True
if somethingFound:
self.ui_setState(SEARCH_STATE_RESULTS)
if searchDesc.selectFound:
for table in self.getTables():
table.selectAll()
self.ui_tryShowSavedTab()
else:
self.ui_setState(SEARCH_STATE_NOTHING_FOUND)
def ui_onResultTabChanged(self):
self.ui_onTableSelectionChanged()
# noinspection PyTypeChecker
def ui_onSelectAllClicked(self):
tables = self.getCurrentOrAllTables()
if not tables:
return
for table in tables:
table.selectAll()
# noinspection PyTypeChecker
def ui_onDeselectAllClicked(self):
tables = self.getCurrentOrAllTables()
if not tables:
return
for table in tables:
table.clearSelection()
# noinspection PyTypeChecker
def getSelectedNodeInfos(self):
nodeInfoList = []
tables = self.getCurrentOrAllTables()
if not tables:
return nodeInfoList
for table in tables:
selectedIndexes = table.selectionModel().selectedRows() # column = 0 by default
for index in selectedIndexes:
nodeInfo = index.data(role=QtCore.Qt.UserRole)
nodeInfoList.append(nodeInfo)
return nodeInfoList
def ui_onCopyFullNameClicked(self):
nameList = [ni.fullPathName for ni in self.getSelectedNodeInfos() if ni.fullPathName]
nameString = ''
for name in nameList:
nameString += name + '\r\n'
pyperclip.copy(nameString)
def ui_onCopyShortNameClicked(self):
nameList = [ni.shortName for ni in self.getSelectedNodeInfos() if ni.shortName]
nameString = ''
for name in nameList:
nameString += name + '\r\n'
pyperclip.copy(nameString)
def resetResultTablesAndTabs(self):
for sl in self.searchers:
sl.searcher.reset()
sl.attachedToTabWidget = False
self.ui_TAB_results.clear()
def ui_setState(self, state):
stack = self.ui_STK_results
if state == SEARCH_STATE_WELCOME:
stack.setCurrentWidget(self.ui_STKPG_status)
self.ui_TXT_status.setText('type something to search\n and press "enter"')
elif state == SEARCH_STATE_SEARCHING:
stack.setCurrentWidget(self.ui_STKPG_status)
self.ui_TXT_status.setText('searching...')
elif state == SEARCH_STATE_NOTHING_FOUND:
stack.setCurrentWidget(self.ui_STKPG_status)
self.ui_TXT_status.setText('nothing found')
elif state == SEARCH_STATE_RESULTS:
stack.setCurrentWidget(self.ui_STKPG_results)
def setActiveSearchers(self):
for sl in self.searchers:
sl.searcher.setActive(sl.button.isChecked())
def getSearchers(self):
return [sl.searcher for sl in self.searchers]
def getCatButtons(self):
return [sl.button for sl in self.searchers]
def getTables(self):
return [sl.table for sl in self.searchers]
def ui_isUseAllTabs(self):
return self.ui_ACT_useAllTabs.isChecked()
def getSearchDesc(self):
sd = searchers.SearchDesc()
sd.searchString = str(self.ui_LED_search.text()).strip()
sd.caseSensitive = self.ui_BTN_soCaseSensitive.isChecked()
sd.regex = self.ui_BTN_soRegex.isChecked()
sd.selectFound = self.ui_BTN_soSelectFound.isChecked()
sd.includeShapes = self.ui_BTN_soIncludeShapes.isChecked()
sd.searchSelected = self.ui_BTN_soSearchSelected.isChecked()
return sd
def ui_tryToSaveTabOptVar(self):
optVars = pm.env.optionVars
currentWidget = self.ui_TAB_results.currentWidget()
currentSL = None
for sl in self.searchers:
if currentWidget is sl.tabWidget:
currentSL = sl
break
if currentSL:
optVars[OPT_VAR_CURRENT_TAB] = currentSL.searcher.getName()
def ui_tryShowSavedTab(self):
optVars = pm.env.optionVars
savedTabName = optVars[OPT_VAR_CURRENT_TAB]
if not savedTabName:
return
for sl in self.searchers:
if sl.attachedToTabWidget and sl.searcher.getName() == savedTabName:
self.ui_TAB_results.setCurrentWidget(sl.tabWidget)
return
def getCurrentSearcherLink(self):
currentWidget = self.ui_TAB_results.currentWidget()
for sl in self.searchers:
if sl.tabWidget is currentWidget:
return sl
def ui_onShowHelpClicked(self):
# return
# noinspection PyArgumentList,PyCallByClass
import webbrowser
webbrowser.open('http://davydenko.info/searcher/', new=0, autoraise=True)
def ui_onCloseClicked(self):
self.close()
# noinspection PyMethodOverriding
def closeEvent(self, event):
self.ui_saveSettings()
self.ui_tryToSaveTabOptVar()
global mainWin
mainWin = None
event.accept()
class OptionVarLink(object):
def __init__(self, ovName, defaultValue, getFromControlFunc, setToControlFunc):
self.ovName = ovName
self.defaultValue = defaultValue
self.getFromControlFunc = getFromControlFunc
self.setToControlFunc = setToControlFunc
def init(self):
optVars = pm.env.optionVars
if self.ovName not in optVars:
optVars[self.ovName] = self.defaultValue
def applyToControl(self):
optVars = pm.env.optionVars
self.setToControlFunc(optVars[self.ovName])
def getFromControl(self):
optVars = pm.env.optionVars
optVars[self.ovName] = self.getFromControlFunc()
def reset(self):
optVars = pm.env.optionVars
optVars.pop(self.ovName)
self.init()
self.applyToControl()
class SearcherLink(object):
def __init__(self, searcher, button, table, tabWidget):
self.searcher = searcher
self.button = button
self.table = table
self.tabWidget = tabWidget
self.attachedToTabWidget = False
def run():
# from pydev import pydevd
# pydevd.settrace('localhost', port=62882, stdoutToServer=True, stderrToServer=True)
global mainWin
if not mainWin:
mainWin = SearchUI()
mainWin.show()
mainWin.raise_()
|
#!/usr/bin/env python3
import sys
if len(sys.argv) != 2:
print("Help: {} <filename>".format(sys.argv[0]))
sys.exit(0)
with open(sys.argv[1]) as file:
segments = file.readline().rstrip().split(", ")
target = ([int(x) for x in segments[0][15:].split("..")], [int(x) for x in segments[1][2:].split("..")])
def eval(vel):
max_y = 0
pos = [0,0]
while pos[1]>=target[1][0]:
pos[0] += vel[0]
pos[1] += vel[1]
if vel[0]>0:
vel[0]-=1
elif vel[0]<0:
vel[0]+=1
vel[1]-=1
if pos[1] > max_y:
max_y = pos[1]
if pos[0]>=target[0][0] and pos[0]<=target[0][1] and pos[1]>=target[1][0] and pos[1]<=target[1][1]:
return True,max_y
return False,None
best_max_y = target[1][0]-1
pos_for_max_y = [None,None]
within_target = 0
for x in range(0, target[0][1]+1):
for y in range(target[1][0], 400):# not really a great way to set the maximum y-velocity
in_target,max_y = eval([x,y])
if in_target:
within_target+=1
if max_y > best_max_y:
best_max_y = max_y
pos_for_max_y = [x,y]
print("Max y={} at {}".format(best_max_y,pos_for_max_y))
print("{} valid initial velocities".format(within_target))
|
from __future__ import annotations
import logging
from abc import abstractmethod
from typing import Dict, Generic, Type, TypeVar, final
from dramatiq import Message
from common.runtimes import Runtime, RuntimeConfig
from deployments.deployment_actor import DeploymentActor, DeploymentDescription
from deployments.entities.deployment_info import DeploymentInfo
from deployments.entities.deployment_task import DeploymentTask
from deployments.entities.pending_deployment import PendingDeployment
from models import ModelService
logger = logging.getLogger("uvicorn")
ConfigType = TypeVar('ConfigType', bound=RuntimeConfig)
class DeploymentRuntime(Runtime, Generic[ConfigType]):
_known_environments: Dict[str, Type[DeploymentRuntime]] = {}
def __init__(self, config: ConfigType):
super().__init__(config)
@final
def deploy(self, deployment_task: DeploymentTask) -> PendingDeployment:
from deployments.deployment_service import DeploymentService
model = ModelService().get_deployable_model(deployment_task.run_id)
deployment_id = DeploymentService.create_deployment_id()
actor: Type[DeploymentActor] = self.deployment_actor()
deployment_description = DeploymentDescription(
deployment_id=deployment_id,
model=model,
runtime_config=self.config.dict(),
parameters=deployment_task.parameters.dict(),
env_vars=self.environment())
logger.info(f"Deploying in actor {actor.__name__}")
message: Message = actor.send(deployment_description.json())
return PendingDeployment(deployment_id=deployment_id, runtime=self.config.name, message=message.encode())
@abstractmethod
def deployment_actor(self) -> Type[DeploymentActor]:
raise NotImplementedError()
def undeploy(self, deployment_id: str) -> None:
raise NotImplementedError()
def get_deployment(self, deployment_id: str) -> DeploymentInfo:
raise NotImplementedError()
@classmethod
def type(cls, name: str):
"""
Indicates for which deployment runtime env type the class is responsible.
:param name: the dataset type
"""
def decorator(clazz):
if clazz not in DeploymentRuntime._known_environments:
DeploymentRuntime._known_environments[name] = clazz
return decorator
@classmethod
def get_known_environments(cls) -> Dict[str, Type[DeploymentRuntime]]:
"""
Returns a dictionary of the known environments.
:return: Dictionary of the known environments.
"""
return cls._known_environments
|
description = 'Laser Safety Shutter'
prefix = '14IDB:B1Bi0'
target = 0.0
command_value = 1.0
auto_open = 0.0
EPICS_enabled = True |
import numpy as np
import matplotlib.pyplot as plt
import cv2
class GridWorld:
_direction_deltas = [
(-1,0),
(1,0),
(0,1),
(0,-1)
]
_num_actions = len(_direction_deltas)
def __init__(self, reward_grid, terminal_mask, obstacle_mask, action_probabilities, no_action_probability):
self.reward_grid = reward_grid
self.terminal_mask = terminal_mask
self.obstacle_mask = obstacle_mask
self._T = self._create_transition_matrix(
action_probabilities,
no_action_probability,
obstacle_mask
)
@property
def shape(self):
return self.reward_grid.shape
@property
def size(self):
return self.reward_grid.size
def run_value_iterations(self, discount = 1.0, iterations = 10):
utility_grids, policy_grids = self._init_utility_policy_storage(iterations)
utility_grid = np.zeros_like(self.reward_grid)
for i in range(iterations):
utility_grid = self._value_iteration(utility_grid=utility_grid)
policy_grids[:,:,i] = self.best_policy(utility_grid)
utility_grids[:,:,i] = utility_grid
return policy_grids, utility_grids
def run_policy_iterations(self, discount = 1.0, iterations = 10):
utility_grids, policy_grids = self._init_utility_policy_storage(iterations)
policy_grid = np.random.randint(0, self._num_actions, self.shape)
utility_grid = self.reward_grid.copy()
for i in range(iterations):
policy_grid, utility_grid = self._policy_iteration(policy_grid=policy_grid,utility_grid=utility_grid)
policy_grids[:,:,i]=policy_grid
utility_grids[:,:,i]=utility_grid
return policy_grids,utility_grids
def generate_experience(self, current_state_idx, action_index):
sr, sc = self.grid_indices_to_coordinates(current_state_idx)
next_state_probs = self._T[sr, sc, action_index, :, :].flatten()
next_state_idx = np.random.choice(np.arange(next_state_probs.size),p=next_state_probs)
return (next_state_idx, self.reward_grid.flatten()[next_state_idx], self.terminal_mask.flatten()[next_state_idx])
def grid_indices_to_coordinates(self, indices=None):
if indices is None:
indices = np.arange(self.size)
return np.unravel_index(indices, self.shape)
def grid_coordinates_to_indices(self, coordinates = None):
if coordinates is None:
return np.arange(self.size)
return np.ravel_multi_index(coordinates, self.shape)
def best_policy(self, utility_grid):
M, N = self.shape
return np.argmax((utility_grid.reshape((1, 1, 1, M, N)) * self._T)
.sum(axis=-1).sum(axis=-1), axis=2)
def _init_utility_policy_storage(self, depth):
M, N = self.shape
utility_grids = np.zeros((M, N, depth))
policy_grids = np.zeros_like(utility_grids)
return utility_grids, policy_grids
def _create_transition_matrix(self,
action_probabilities,
no_action_probability,
obstacle_mask):
M, N = self.shape
T = np.zeros((M, N, self._num_actions, M, N))
r0, c0 = self.grid_indices_to_coordinates()
T[r0, c0, :, r0, c0] += no_action_probability
for action in range(self._num_actions):
for offset, P in action_probabilities:
direction = (action + offset) % self._num_actions
dr, dc = self._direction_deltas[direction]
r1 = np.clip(r0 + dr, 0, M - 1)
c1 = np.clip(c0 + dc, 0, N - 1)
temp_mask = obstacle_mask[r1, c1].flatten()
r1[temp_mask] = r0[temp_mask]
c1[temp_mask] = c0[temp_mask]
T[r0, c0, action, r1, c1] += P
terminal_locs = np.where(self.terminal_mask.flatten())[0]
T[r0[terminal_locs], c0[terminal_locs], :, :, :] = 0
return T
def _value_iteration(self, utility_grid, discount=1.0):
out = np.zeros_like(utility_grid)
M, N = self.shape
for i in range(M):
for j in range(N):
out[i, j] = self._calculate_utility((i, j), discount, utility_grid)
return out
def _policy_iteration(self, *, utility_grid,
policy_grid, discount=1.0):
r, c = self.grid_indices_to_coordinates()
M, N = self.shape
utility_grid = (
self.reward_grid +
discount * ((utility_grid.reshape((1, 1, 1, M, N)) * self._T)
.sum(axis=-1).sum(axis=-1))[r, c, policy_grid.flatten()]
.reshape(self.shape)
)
utility_grid[self.terminal_mask] = self.reward_grid[self.terminal_mask]
return self.best_policy(utility_grid), utility_grid
def _calculate_utility(self, loc, discount, utility_grid):
if self.terminal_mask[loc]:
return self.reward_grid[loc]
row, col = loc
return np.max(
discount * np.sum(
np.sum(self._T[row, col, :, :, :] * utility_grid,
axis=-1),
axis=-1)
) + self.reward_grid[loc]
def plot_policy(self, utility_grid, policy_grid=None):
if policy_grid is None:
policy_grid = self.best_policy(utility_grid)
markers = "^>v<"
marker_size = 200
marker_edge_width = marker_size // 10
marker_fill_color = 'w'
no_action_mask = self.terminal_mask | self.obstacle_mask
utility_normalized = (utility_grid - utility_grid.min()) / \
(utility_grid.max() - utility_grid.min())
utility_normalized = (255*utility_normalized).astype(np.uint8)
utility_rgb = cv2.applyColorMap(utility_normalized, cv2.COLORMAP_JET)
for i in range(3):
channel = utility_rgb[:, :, i]
channel[self.obstacle_mask] = 0
plt.imshow(utility_rgb[:, :, ::-1], interpolation='none')
for i, marker in enumerate(markers):
y, x = np.where((policy_grid == i) & np.logical_not(no_action_mask))
plt.plot(x, y, marker, ms=marker_size, mew=marker_edge_width,
color=marker_fill_color)
y, x = np.where(self.terminal_mask)
plt.plot(x, y, 'o', ms=marker_size, mew=marker_edge_width,
color=marker_fill_color)
tick_step_options = np.array([1, 2, 5, 10, 20, 50, 100])
tick_step = np.max(policy_grid.shape)/8
best_option = np.argmin(np.abs(np.log(tick_step) - np.log(tick_step_options)))
tick_step = tick_step_options[best_option]
plt.xticks(np.arange(0, policy_grid.shape[1] - 0.5, tick_step))
plt.yticks(np.arange(0, policy_grid.shape[0] - 0.5, tick_step))
plt.xlim([-0.5, policy_grid.shape[0]-0.5])
plt.xlim([-0.5, policy_grid.shape[1]-0.5]) |
Subsets and Splits