hexsha
stringlengths 40
40
| size
int64 7
1.04M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
247
| max_stars_repo_name
stringlengths 4
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
368k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
247
| max_issues_repo_name
stringlengths 4
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
247
| max_forks_repo_name
stringlengths 4
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
1.04M
| avg_line_length
float64 1.77
618k
| max_line_length
int64 1
970k
| alphanum_fraction
float64 0
1
| original_content
stringlengths 7
1.04M
| filtered:remove_non_ascii
int64 0
514k
| filtered:remove_delete_markers
int64 0
0
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e7e9653d546ade6c8ce9b53c49b25b1b21568a5c | 5,267 | py | Python | VisualGimp/Markup.py | duangsuse/VisualGimp | 79776fded12595ab3c56855b5ae56e2242780b2e | [
"MIT"
] | 2 | 2019-05-07T12:09:11.000Z | 2019-05-08T09:31:44.000Z | VisualGimp/Markup.py | duangsuse-valid-projects/VisualGimp | 79776fded12595ab3c56855b5ae56e2242780b2e | [
"MIT"
] | null | null | null | VisualGimp/Markup.py | duangsuse-valid-projects/VisualGimp | 79776fded12595ab3c56855b5ae56e2242780b2e | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
# -*- encoding: utf-8 -*-
# Gimp Markup Builder
# author: duangsuse
# date: Thu May 02 2019 CST
from os import linesep
from Util import stream_join
class MarkupBuilder:
''' Gimp Markup SGML builder '''
def __init__(self, indent = -1, nl = linesep, buffer = str):
self.marks = buffer()
self.tag_stack = list()
self.nl = nl
self.indent = indent
self.last_spaces = 0
self.revert_last_indent_size = 0
self.last_is_text = False
'''
Indent rules:
when starting new tag, write last spaces, last spaces += indent
if new tag is not text tag start (inner is just text), write newline
when leaving tag, last spaces -= indent
'''
def useindent(self): return self.indent != -1
indented = property(useindent)
def wnewline(self):
''' see use_indent'''
self.marks += self.nl
def windent(self):
''' see use_indent'''
wrote = 0
for _ in range(0, self.last_spaces):
self.marks += ' '
wrote += 1 # dummy?
return wrote
def cancel_indent(self):
''' cancel last indent '''
if self.indented: self.marks = self.marks[:-self.revert_last_indent_size]
def do_indent(self, entering = True):
''' Write indent, increase last_spaces, saving wrote spaces and newline to revert_last_indent_size '''
def do():
self.wnewline()
if (entering):
self.last_spaces += self.indent
else: self.last_spaces -= self.indent
self.revert_last_indent_size = self.windent() +1
if self.indented: do()
def do_last_indent(self, *args, **kwargs):
''' write indenting for last block '''
self.last_spaces -= self.indent
self.do_indent(*args, **kwargs)
self.last_spaces += self.indent
def begin(self, tag, attrs = {}):
'''
Make a tag with name and attributes
Attribute name, value and tag name is escaped
'''
self.last_is_text = False
attrst = str()
tagscape = self.escape(tag)
ary = list(stream_join(attrs.keys(), attrs.values())) if attrs.__class__ is dict else list(attrs)
if len(attrs) != 0:
for n in range(0, len(ary), 2):
attrst += self.escape(str(ary[n]))
attrst += '='
#print(ary)
#print(n)
attrst += "\"%s\"" % self.escape(str(ary[n+1]))
self.marks += '<' + tagscape
if len(attrs) != 0: self.marks += ' '
self.marks += attrst + '>'
# always write indents for next line
# makes its possible to drop last indent (text tag)
self.do_indent()
self.tag_stack.append(tagscape)
return self
def make(self): return self.marks
def tag(self, *args, **kwargs):
r'''
EDSL using __close__ with syntax
create nodes like:
with xml.tag('span', {color: '#66ccff'}):
xml % 'Q \w\ Q'
'''
self.last_is_text = False
class TagBuilder:
def __init__(self, xml):
self.xml = xml
def __enter__(self):
self.xml.begin(*args, attrs = kwargs)
def __exit__(self, *lveinfo):
self.xml.end()
return TagBuilder(self)
def text(self, content):
''' append text content '''
self.last_is_text = True
if self.indented: self.cancel_indent()
self.marks += self.escape(content)
return self
#@staticmethod
#def test():
# m = MarkupBuilder()
# m > 'html'
# m > 'head'
# m > 'title'
# m < 'Hello World'
# m <= 2
# m > 'body'
# m > 'text'
# with m.tag("b"):
# m < 'String'
# m >= ['a', {'id': 'str'}]
# m < '|sg.'
# m <= 4
# return m
def end(self):
''' delimites last tag '''
if not self.last_is_text: # cancel indentation
#print(self.indent, self.tag_stack)
self.cancel_indent()
self.do_indent(False)
self.marks += '</' + self.tag_stack.pop() + '>'
self.do_indent(False)
self.last_is_text = False
# Not cared by Markup indent emitter
def raw(self, raw):
''' write raw text (unescaped) '''
self.marks += raw
return self
def rawtag(self, rawtext):
''' append unescaped raw <> text '''
self.marks += '<'
self.marks += rawtext
self.marks += '>'
def _escape(self, xml):
'''
Escape XML string
' is replaced with '
" is replaced with "
& is replaced with &
< is replaced with <
> is replaced with >
'''
escapes = frozenset("'\"&<>")
replacement = { '\'': 'apos', '"': 'quot', '&': 'amp', '<': 'lt', '>': 'gt' }
if len(xml) < 1: return
output = str()
for i in range(0, len(xml)):
char = xml[i]
if (char in escapes):
output += '&'
output += replacement[char]
output += ';'
else: output += char
return output
escape = classmethod(_escape)
def __str__(self):
''' M(marks)..[tag stack] '''
return 'M(' + self.marks + ')..' + str(self.tag_stack)
__lt__ = text # chain
__gt__ = begin # chain
__add__ = raw # chain
def __contains__(self, tag):
''' is tag inside enclosing tags ? '''
return tag in self.tag_stack
def __ge__(self, tag_attr):
''' xml >= ['markup', {'name': 'abcs'}] '''
mark = tag_attr[0]
attr = tag_attr[1]
self.begin(mark, attr)
def __le__(self, n = 1):
''' Leave (close) N tags '''
while n > 0:
self.end()
n -= 1
| 24.962085 | 106 | 0.584204 | #!/usr/bin/env python2
# -*- encoding: utf-8 -*-
# Gimp Markup Builder
# author: duangsuse
# date: Thu May 02 2019 CST
from os import linesep
from Util import stream_join
class MarkupBuilder:
''' Gimp Markup SGML builder '''
def __init__(self, indent = -1, nl = linesep, buffer = str):
self.marks = buffer()
self.tag_stack = list()
self.nl = nl
self.indent = indent
self.last_spaces = 0
self.revert_last_indent_size = 0
self.last_is_text = False
'''
Indent rules:
when starting new tag, write last spaces, last spaces += indent
if new tag is not text tag start (inner is just text), write newline
when leaving tag, last spaces -= indent
'''
def useindent(self): return self.indent != -1
indented = property(useindent)
def wnewline(self):
''' see use_indent'''
self.marks += self.nl
def windent(self):
''' see use_indent'''
wrote = 0
for _ in range(0, self.last_spaces):
self.marks += ' '
wrote += 1 # dummy?
return wrote
def cancel_indent(self):
''' cancel last indent '''
if self.indented: self.marks = self.marks[:-self.revert_last_indent_size]
def do_indent(self, entering = True):
''' Write indent, increase last_spaces, saving wrote spaces and newline to revert_last_indent_size '''
def do():
self.wnewline()
if (entering):
self.last_spaces += self.indent
else: self.last_spaces -= self.indent
self.revert_last_indent_size = self.windent() +1
if self.indented: do()
def do_last_indent(self, *args, **kwargs):
''' write indenting for last block '''
self.last_spaces -= self.indent
self.do_indent(*args, **kwargs)
self.last_spaces += self.indent
def begin(self, tag, attrs = {}):
'''
Make a tag with name and attributes
Attribute name, value and tag name is escaped
'''
self.last_is_text = False
attrst = str()
tagscape = self.escape(tag)
ary = list(stream_join(attrs.keys(), attrs.values())) if attrs.__class__ is dict else list(attrs)
if len(attrs) != 0:
for n in range(0, len(ary), 2):
attrst += self.escape(str(ary[n]))
attrst += '='
#print(ary)
#print(n)
attrst += "\"%s\"" % self.escape(str(ary[n+1]))
self.marks += '<' + tagscape
if len(attrs) != 0: self.marks += ' '
self.marks += attrst + '>'
# always write indents for next line
# makes its possible to drop last indent (text tag)
self.do_indent()
self.tag_stack.append(tagscape)
return self
def make(self): return self.marks
def tag(self, *args, **kwargs):
r'''
EDSL using __close__ with syntax
create nodes like:
with xml.tag('span', {color: '#66ccff'}):
xml % 'Q \w\ Q'
'''
self.last_is_text = False
class TagBuilder:
def __init__(self, xml):
self.xml = xml
def __enter__(self):
self.xml.begin(*args, attrs = kwargs)
def __exit__(self, *lveinfo):
self.xml.end()
return TagBuilder(self)
def text(self, content):
''' append text content '''
self.last_is_text = True
if self.indented: self.cancel_indent()
self.marks += self.escape(content)
return self
#@staticmethod
#def test():
# m = MarkupBuilder()
# m > 'html'
# m > 'head'
# m > 'title'
# m < 'Hello World'
# m <= 2
# m > 'body'
# m > 'text'
# with m.tag("b"):
# m < 'String'
# m >= ['a', {'id': 'str'}]
# m < '|sg.'
# m <= 4
# return m
def end(self):
''' delimites last tag '''
if not self.last_is_text: # cancel indentation
#print(self.indent, self.tag_stack)
self.cancel_indent()
self.do_indent(False)
self.marks += '</' + self.tag_stack.pop() + '>'
self.do_indent(False)
self.last_is_text = False
# Not cared by Markup indent emitter
def raw(self, raw):
''' write raw text (unescaped) '''
self.marks += raw
return self
def rawtag(self, rawtext):
''' append unescaped raw <> text '''
self.marks += '<'
self.marks += rawtext
self.marks += '>'
def _escape(self, xml):
'''
Escape XML string
' is replaced with '
" is replaced with "
& is replaced with &
< is replaced with <
> is replaced with >
'''
escapes = frozenset("'\"&<>")
replacement = { '\'': 'apos', '"': 'quot', '&': 'amp', '<': 'lt', '>': 'gt' }
if len(xml) < 1: return
output = str()
for i in range(0, len(xml)):
char = xml[i]
if (char in escapes):
output += '&'
output += replacement[char]
output += ';'
else: output += char
return output
escape = classmethod(_escape)
def __str__(self):
''' M(marks)..[tag stack] '''
return 'M(' + self.marks + ')..' + str(self.tag_stack)
__lt__ = text # chain
__gt__ = begin # chain
__add__ = raw # chain
def __contains__(self, tag):
''' is tag inside enclosing tags ? '''
return tag in self.tag_stack
def __ge__(self, tag_attr):
''' xml >= ['markup', {'name': 'abcs'}] '''
mark = tag_attr[0]
attr = tag_attr[1]
self.begin(mark, attr)
def __le__(self, n = 1):
''' Leave (close) N tags '''
while n > 0:
self.end()
n -= 1
| 0 | 0 |
8796a12ade2e6974f6dfc98adc77e755604d7da8 | 895 | py | Python | sqlalchemy_redshift/__init__.py | Hivestack/sqlalchemy-redshift | 6226ffe4c6f3583606016492641e1bd5d351933a | [
"MIT"
] | null | null | null | sqlalchemy_redshift/__init__.py | Hivestack/sqlalchemy-redshift | 6226ffe4c6f3583606016492641e1bd5d351933a | [
"MIT"
] | null | null | null | sqlalchemy_redshift/__init__.py | Hivestack/sqlalchemy-redshift | 6226ffe4c6f3583606016492641e1bd5d351933a | [
"MIT"
] | null | null | null | from pkg_resources import DistributionNotFound, get_distribution, parse_version
try:
import psycopg2 # noqa: F401
except ImportError:
raise ImportError(
'No module named psycopg2. Please install either '
'psycopg2 or psycopg2-binary package for CPython '
'or psycopg2cffi for Pypy.'
)
for package in ['psycopg2', 'psycopg2-binary', 'psycopg2cffi']:
try:
if get_distribution(package).parsed_version < parse_version('2.5'):
raise ImportError('Minimum required version for psycopg2 is 2.5')
break
except DistributionNotFound:
pass
__version__ = get_distribution('hs-sqlalchemy-redshift').version
from sqlalchemy.dialects import registry
registry.register("redshift", "sqlalchemy_redshift.dialect", "RedshiftDialect")
registry.register(
"redshift.psycopg2", "sqlalchemy_redshift.dialect", "RedshiftDialect"
)
| 31.964286 | 79 | 0.727374 | from pkg_resources import DistributionNotFound, get_distribution, parse_version
try:
import psycopg2 # noqa: F401
except ImportError:
raise ImportError(
'No module named psycopg2. Please install either '
'psycopg2 or psycopg2-binary package for CPython '
'or psycopg2cffi for Pypy.'
)
for package in ['psycopg2', 'psycopg2-binary', 'psycopg2cffi']:
try:
if get_distribution(package).parsed_version < parse_version('2.5'):
raise ImportError('Minimum required version for psycopg2 is 2.5')
break
except DistributionNotFound:
pass
__version__ = get_distribution('hs-sqlalchemy-redshift').version
from sqlalchemy.dialects import registry
registry.register("redshift", "sqlalchemy_redshift.dialect", "RedshiftDialect")
registry.register(
"redshift.psycopg2", "sqlalchemy_redshift.dialect", "RedshiftDialect"
)
| 0 | 0 |
fdbf1c941811766f3c215aa9700b09effe98e5e6 | 134 | py | Python | ch2/chapter2_features_of_fastapi_02.py | PacktPublishing/Understanding-How-Web-APIs-Work | 63220e7bf6b31315c46650e45c670ca9a01011fc | [
"MIT"
] | 2 | 2021-10-03T09:34:34.000Z | 2021-10-04T04:52:48.000Z | ch2/chapter2_features_of_fastapi_02.py | PacktPublishing/Understanding-How-Web-APIs-Work | 63220e7bf6b31315c46650e45c670ca9a01011fc | [
"MIT"
] | 1 | 2021-04-25T05:57:34.000Z | 2021-04-25T14:49:24.000Z | ch2/chapter2_features_of_fastapi_02.py | PacktPublishing/Understanding-How-Web-APIs-Work | 63220e7bf6b31315c46650e45c670ca9a01011fc | [
"MIT"
] | 3 | 2021-05-13T09:39:27.000Z | 2021-06-29T05:51:46.000Z | # -*- coding: utf-8 -*-
def message(age: int = 0, name: str = 'stranger') -> str:
return f'Hello {name}, you are {age} years old'
| 33.5 | 57 | 0.58209 | # -*- coding: utf-8 -*-
def message(age: int = 0, name: str = 'stranger') -> str:
return f'Hello {name}, you are {age} years old'
| 0 | 0 |
515654029ae48e70e4487c739d107ea440403f1d | 8,124 | py | Python | Lib/site-packages/hackedit/app/templates.py | fochoao/cpython | 3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9 | [
"bzip2-1.0.6",
"0BSD"
] | null | null | null | Lib/site-packages/hackedit/app/templates.py | fochoao/cpython | 3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9 | [
"bzip2-1.0.6",
"0BSD"
] | 20 | 2021-05-03T18:02:23.000Z | 2022-03-12T12:01:04.000Z | Lib/site-packages/hackedit/app/templates.py | fochoao/cpython | 3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9 | [
"bzip2-1.0.6",
"0BSD"
] | null | null | null | """
This module contains the top level API for managing the project/file templates.
"""
import json
import logging
import os
import re
from binaryornot.check import is_binary
from hackedit.app import settings
def create(template, dest_dir, answers):
"""
Creates a file/project from the specified template, at the specified directory.
:param template: Template data.
:param dest_dir: Destination directory where to create the file/project
:param answers: Dict of answers for substitution variables
"""
def get_paths(root, path, src_dir, dest_dir):
src_path = os.path.join(root, path)
rel_path = os.path.relpath(src_path, src_dir)
dst_path = os.path.join(dest_dir, rel_path)
return src_path, dst_path
def get_file_encoding(path):
if is_binary(path):
return 'binary'
try:
encodings = template['encodings']
except KeyError:
encodings = ['utf-8', 'cp1252']
for encoding in encodings:
try:
with open(path, encoding=encoding) as f:
f.read()
except UnicodeDecodeError:
continue
else:
return encoding
def open_file(path, encoding, to_write=None):
if encoding == 'binary':
if to_write is None:
mode = 'rb'
else:
mode = 'wb'
encoding = None
else:
if to_write is None:
mode = 'r'
else:
mode = 'w'
content = None
with open(path, mode, encoding=encoding) as f:
if to_write is not None:
f.write(to_write)
else:
content = f.read()
return content
def subsitute_vars(string):
for var, value in answers.items():
string = re.sub('@%s@' % var, value, string)
return string
ret_val = []
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
src_dir = template['path']
for root, dirs, files in os.walk(src_dir):
for file in files:
if file == 'template.json' or file.endswith('.pyc'):
continue
src, dst = get_paths(root, file, src_dir, dest_dir)
dst = subsitute_vars(dst)
encoding = get_file_encoding(src)
try:
content = open_file(src, encoding)
except OSError:
_logger().exception('failed to open file: %r', src)
if encoding != 'binary':
content = subsitute_vars(content)
if file == 'btpad_btn_img_0.png':
print(len(content), encoding)
try:
open_file(dst, encoding, to_write=content)
except PermissionError:
_logger().exception('failed to write file: %r', dst)
else:
ret_val.append(dst)
assert open_file(dst, encoding) == content
for directory in dirs:
src, dst = get_paths(root, directory, src_dir, dest_dir)
dst = subsitute_vars(dst)
try:
os.mkdir(dst)
except PermissionError:
_logger().exception('failed to create directory: %r', dst)
return ret_val
def get_sources():
"""
Returns the template sources (directory associated with a label).
"""
s = settings.load()
tmpl_sources = s.value('_templates/sources', '[]')
tmpl_sources = json.loads(tmpl_sources)
return sorted(tmpl_sources, key=lambda x: x['label'])
def add_source(label, path):
"""
Adds a template source
:param label: Name of the template source.
:param path: Path of the template source.
"""
tmpl_sources = get_sources()
tmpl_sources.append({'label': label, 'path': path})
s = settings.load()
s.setValue('_templates/sources', json.dumps(tmpl_sources))
def rm_source(label):
"""
Removes the specified template source.
:param label: Name of the template source to remove.
"""
tmpl_sources = get_sources()
for src in tmpl_sources:
if src['label'] == label:
tmpl_sources.remove(src)
s = settings.load()
s.setValue('_templates/sources', json.dumps(tmpl_sources))
def clear_sources():
"""
Clear template sources.
"""
s = settings.load()
s.setValue('_templates/sources', json.dumps([]))
def get_templates(category='', source_filter=''):
"""
Gets the list of templates.
:param category: Template category to retrieve.
- use "Project" to get project templates
- use "File" to get file templates
- use an empty string to retrieve them all (default).
:param source: Label of the source of the templates to retrieve. Use an empty string to retrieve
templates from all sources.
"""
def filtered_sources():
"""
Filter list of sources based on the ``source`` parameter.
"""
tmpl_sources = get_sources()
filtered = []
if source_filter:
# only keep the requested template source
for src in tmpl_sources:
if src['label'] == source_filter:
filtered.append(src)
break
else:
filtered = tmpl_sources
return filtered
def get_template(tdir):
"""
Returns template data for the given template directory.
Returns None if the template is invalid.
:param tdir: Template directory to get data from.
"""
tmpl = None
template_json = os.path.join(tdir, 'template.json')
if not os.path.exists(template_json):
# no template.json -> invalid template
_logger().warn('"template.json" not found in template directory: %r', tdir)
else:
try:
with open(template_json) as f:
tmpl = json.loads(f.read())
except (OSError, json.JSONDecodeError):
# unreadable template.json -> invalid template
_logger().exception('failed to read %r', template_json)
tmpl = None
else:
try:
tmpl_cat = tmpl['category']
except KeyError:
# no metadata or no category in template.json -> invalid template
_logger().exception('failed to read category from template metadata, '
'incomplete template.json?')
tmpl = None
else:
# valid template (finally).
tmpl['source'] = src
if category and category != tmpl_cat:
_logger().debug('rejecting template directory: %r, invalid category', tdir)
tmpl = None
return tmpl
def listdir(directory):
"""
Securely list subdirectories of ``directory``.
Returns an empty list of an OSError occurred.
"""
try:
return os.listdir(directory)
except OSError:
return []
for src in filtered_sources():
for tdir in listdir(src['path']):
tdir = os.path.join(src['path'], tdir)
if os.path.isfile(tdir):
continue
tmpl = get_template(tdir)
if tmpl:
tmpl['path'] = tdir
yield tmpl
def get_template(source, template):
"""
Returns the specified template data.
"""
for t in get_templates(source_filter=source):
if t['name'] == template:
return t
return None
def _logger():
return logging.getLogger(__name__)
if __name__ == '__main__':
clear_sources()
add_source('COBOL', '/home/colin/Documents/hackedit-cobol/hackedit_cobol/templates')
add_source('Python', '/home/colin/Documents/hackedit-python/hackedit_python/templates')
for tmpl in get_templates():
print(json.dumps(tmpl, indent=4, sort_keys=True))
| 31.126437 | 100 | 0.563269 | """
This module contains the top level API for managing the project/file templates.
"""
import json
import logging
import os
import re
from binaryornot.check import is_binary
from hackedit.app import settings
def create(template, dest_dir, answers):
"""
Creates a file/project from the specified template, at the specified directory.
:param template: Template data.
:param dest_dir: Destination directory where to create the file/project
:param answers: Dict of answers for substitution variables
"""
def get_paths(root, path, src_dir, dest_dir):
src_path = os.path.join(root, path)
rel_path = os.path.relpath(src_path, src_dir)
dst_path = os.path.join(dest_dir, rel_path)
return src_path, dst_path
def get_file_encoding(path):
if is_binary(path):
return 'binary'
try:
encodings = template['encodings']
except KeyError:
encodings = ['utf-8', 'cp1252']
for encoding in encodings:
try:
with open(path, encoding=encoding) as f:
f.read()
except UnicodeDecodeError:
continue
else:
return encoding
def open_file(path, encoding, to_write=None):
if encoding == 'binary':
if to_write is None:
mode = 'rb'
else:
mode = 'wb'
encoding = None
else:
if to_write is None:
mode = 'r'
else:
mode = 'w'
content = None
with open(path, mode, encoding=encoding) as f:
if to_write is not None:
f.write(to_write)
else:
content = f.read()
return content
def subsitute_vars(string):
for var, value in answers.items():
string = re.sub('@%s@' % var, value, string)
return string
ret_val = []
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
src_dir = template['path']
for root, dirs, files in os.walk(src_dir):
for file in files:
if file == 'template.json' or file.endswith('.pyc'):
continue
src, dst = get_paths(root, file, src_dir, dest_dir)
dst = subsitute_vars(dst)
encoding = get_file_encoding(src)
try:
content = open_file(src, encoding)
except OSError:
_logger().exception('failed to open file: %r', src)
if encoding != 'binary':
content = subsitute_vars(content)
if file == 'btpad_btn_img_0.png':
print(len(content), encoding)
try:
open_file(dst, encoding, to_write=content)
except PermissionError:
_logger().exception('failed to write file: %r', dst)
else:
ret_val.append(dst)
assert open_file(dst, encoding) == content
for directory in dirs:
src, dst = get_paths(root, directory, src_dir, dest_dir)
dst = subsitute_vars(dst)
try:
os.mkdir(dst)
except PermissionError:
_logger().exception('failed to create directory: %r', dst)
return ret_val
def get_sources():
"""
Returns the template sources (directory associated with a label).
"""
s = settings.load()
tmpl_sources = s.value('_templates/sources', '[]')
tmpl_sources = json.loads(tmpl_sources)
return sorted(tmpl_sources, key=lambda x: x['label'])
def add_source(label, path):
"""
Adds a template source
:param label: Name of the template source.
:param path: Path of the template source.
"""
tmpl_sources = get_sources()
tmpl_sources.append({'label': label, 'path': path})
s = settings.load()
s.setValue('_templates/sources', json.dumps(tmpl_sources))
def rm_source(label):
"""
Removes the specified template source.
:param label: Name of the template source to remove.
"""
tmpl_sources = get_sources()
for src in tmpl_sources:
if src['label'] == label:
tmpl_sources.remove(src)
s = settings.load()
s.setValue('_templates/sources', json.dumps(tmpl_sources))
def clear_sources():
"""
Clear template sources.
"""
s = settings.load()
s.setValue('_templates/sources', json.dumps([]))
def get_templates(category='', source_filter=''):
"""
Gets the list of templates.
:param category: Template category to retrieve.
- use "Project" to get project templates
- use "File" to get file templates
- use an empty string to retrieve them all (default).
:param source: Label of the source of the templates to retrieve. Use an empty string to retrieve
templates from all sources.
"""
def filtered_sources():
"""
Filter list of sources based on the ``source`` parameter.
"""
tmpl_sources = get_sources()
filtered = []
if source_filter:
# only keep the requested template source
for src in tmpl_sources:
if src['label'] == source_filter:
filtered.append(src)
break
else:
filtered = tmpl_sources
return filtered
def get_template(tdir):
"""
Returns template data for the given template directory.
Returns None if the template is invalid.
:param tdir: Template directory to get data from.
"""
tmpl = None
template_json = os.path.join(tdir, 'template.json')
if not os.path.exists(template_json):
# no template.json -> invalid template
_logger().warn('"template.json" not found in template directory: %r', tdir)
else:
try:
with open(template_json) as f:
tmpl = json.loads(f.read())
except (OSError, json.JSONDecodeError):
# unreadable template.json -> invalid template
_logger().exception('failed to read %r', template_json)
tmpl = None
else:
try:
tmpl_cat = tmpl['category']
except KeyError:
# no metadata or no category in template.json -> invalid template
_logger().exception('failed to read category from template metadata, '
'incomplete template.json?')
tmpl = None
else:
# valid template (finally).
tmpl['source'] = src
if category and category != tmpl_cat:
_logger().debug('rejecting template directory: %r, invalid category', tdir)
tmpl = None
return tmpl
def listdir(directory):
"""
Securely list subdirectories of ``directory``.
Returns an empty list of an OSError occurred.
"""
try:
return os.listdir(directory)
except OSError:
return []
for src in filtered_sources():
for tdir in listdir(src['path']):
tdir = os.path.join(src['path'], tdir)
if os.path.isfile(tdir):
continue
tmpl = get_template(tdir)
if tmpl:
tmpl['path'] = tdir
yield tmpl
def get_template(source, template):
"""
Returns the specified template data.
"""
for t in get_templates(source_filter=source):
if t['name'] == template:
return t
return None
def _logger():
return logging.getLogger(__name__)
if __name__ == '__main__':
clear_sources()
add_source('COBOL', '/home/colin/Documents/hackedit-cobol/hackedit_cobol/templates')
add_source('Python', '/home/colin/Documents/hackedit-python/hackedit_python/templates')
for tmpl in get_templates():
print(json.dumps(tmpl, indent=4, sort_keys=True))
| 0 | 0 |
1a60970d1a4cf3ecc7aacdd16b38eca549a34840 | 1,845 | py | Python | src/tubize/videotomp4.py | olivervinn/tubizescripts | 8756f322d3e31f76f8b77cb8e084ded5941e29fa | [
"MIT"
] | null | null | null | src/tubize/videotomp4.py | olivervinn/tubizescripts | 8756f322d3e31f76f8b77cb8e084ded5941e29fa | [
"MIT"
] | null | null | null | src/tubize/videotomp4.py | olivervinn/tubizescripts | 8756f322d3e31f76f8b77cb8e084ded5941e29fa | [
"MIT"
] | null | null | null | """
Convert video format x to MP4/H.264.
"""
import os
import sys
import logging
from .videometainfo import VideoMetaInfo
from .utils import sizeof_fmt, time_fmt, find_files, check_dependencies, call, ffmpeg
logger = logging.getLogger(__name__)
class VideoToMP4:
"""To Mp4"""
SUPPORTED_EXTENSIONS = ".wmv, .avi, .mkv, .mov, .flv"
RULES = {
".wmv": "-c:v libx264 -crf 19 ",
".avi":
"-vf yadif=1 -c:v h264_nvenc -preset slow -tune film -crf 17",
".mkv": "-c copy",
".mov": "-vcodec h264 -acodec aac -strict -2 -crf 19 ",
".flv": " -r 20 ",
}
def process(self, video_file: str):
"""Convert video files to MP4 container format."""
name = os.path.splitext(video_file)[0]
ext = os.path.splitext(video_file)[1]
new_name = f"{name}.mp4"
if os.path.exists(new_name):
logger.info(f"Skipping file {new_name} already exists!")
elif ext not in VideoToMP4.RULES:
logger.error(f"Skipping unsupported type {ext}!")
else:
print(f'Convert {ext} to MP4 {new_name} ... ')
meta_info = VideoMetaInfo(video_file)
rule = VideoToMP4.RULES[ext]
flags = "-movflags +faststart -pix_fmt yuv420p"
ffmpeg(
f'-i "{video_file}" {flags} {rule} -metadata date="{meta_info.original_date}" "{new_name}"'
)
def file(self, filename: str) -> None:
logger.debug(f"converting file {filename}")
self.process(filename)
def directory(self, path: str, extension: str) -> int:
files = find_files(path, extension)
if len(files) < 1:
print("No matching files found in directory!", file=sys.stderr)
else:
for f in files:
self.file(f)
| 32.368421 | 107 | 0.571816 | """
Convert video format x to MP4/H.264.
"""
import os
import sys
import logging
from .videometainfo import VideoMetaInfo
from .utils import sizeof_fmt, time_fmt, find_files, check_dependencies, call, ffmpeg
logger = logging.getLogger(__name__)
class VideoToMP4:
"""To Mp4"""
SUPPORTED_EXTENSIONS = ".wmv, .avi, .mkv, .mov, .flv"
RULES = {
".wmv": "-c:v libx264 -crf 19 ",
".avi":
"-vf yadif=1 -c:v h264_nvenc -preset slow -tune film -crf 17",
".mkv": "-c copy",
".mov": "-vcodec h264 -acodec aac -strict -2 -crf 19 ",
".flv": " -r 20 ",
}
def process(self, video_file: str):
"""Convert video files to MP4 container format."""
name = os.path.splitext(video_file)[0]
ext = os.path.splitext(video_file)[1]
new_name = f"{name}.mp4"
if os.path.exists(new_name):
logger.info(f"Skipping file {new_name} already exists!")
elif ext not in VideoToMP4.RULES:
logger.error(f"Skipping unsupported type {ext}!")
else:
print(f'Convert {ext} to MP4 {new_name} ... ')
meta_info = VideoMetaInfo(video_file)
rule = VideoToMP4.RULES[ext]
flags = "-movflags +faststart -pix_fmt yuv420p"
ffmpeg(
f'-i "{video_file}" {flags} {rule} -metadata date="{meta_info.original_date}" "{new_name}"'
)
def file(self, filename: str) -> None:
logger.debug(f"converting file {filename}")
self.process(filename)
def directory(self, path: str, extension: str) -> int:
files = find_files(path, extension)
if len(files) < 1:
print("No matching files found in directory!", file=sys.stderr)
else:
for f in files:
self.file(f)
| 0 | 0 |
8052d0446907259540de210ff2c92410c7342f2e | 117 | py | Python | setup.py | snasiriany/parasol | 88b99704676fb1253b8bc6402665a3174a00072d | [
"MIT"
] | 66 | 2019-01-07T23:59:26.000Z | 2021-12-29T16:51:56.000Z | setup.py | snasiriany/parasol | 88b99704676fb1253b8bc6402665a3174a00072d | [
"MIT"
] | 8 | 2019-01-09T01:35:54.000Z | 2021-08-23T20:05:03.000Z | setup.py | snasiriany/parasol | 88b99704676fb1253b8bc6402665a3174a00072d | [
"MIT"
] | 21 | 2019-03-26T01:02:33.000Z | 2022-01-26T20:34:34.000Z | from setuptools import setup
setup(
name='parasol',
dependency_links=[
],
install_requires=[
]
)
| 13 | 28 | 0.623932 | from setuptools import setup
setup(
name='parasol',
dependency_links=[
],
install_requires=[
]
)
| 0 | 0 |
79299c770a188b579e6412af89f2263960e65f50 | 568 | py | Python | app/migrations/0007_auto_20211102_1946.py | Rqwannn/Rudemy | fe2d84540f3cc64c0ff6821e5f2fac22675fd381 | [
"MIT"
] | 3 | 2021-12-27T06:16:26.000Z | 2022-01-20T02:13:03.000Z | app/migrations/0007_auto_20211102_1946.py | Rqwannn/Rudemy | fe2d84540f3cc64c0ff6821e5f2fac22675fd381 | [
"MIT"
] | null | null | null | app/migrations/0007_auto_20211102_1946.py | Rqwannn/Rudemy | fe2d84540f3cc64c0ff6821e5f2fac22675fd381 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.8 on 2021-11-02 12:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0006_auto_20211102_1928'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='skill',
),
migrations.AddField(
model_name='profile',
name='tags',
field=models.ManyToManyField(blank=True, to='app.Tag'),
),
migrations.DeleteModel(
name='Skill',
),
]
| 21.846154 | 67 | 0.549296 | # Generated by Django 3.2.8 on 2021-11-02 12:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0006_auto_20211102_1928'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='skill',
),
migrations.AddField(
model_name='profile',
name='tags',
field=models.ManyToManyField(blank=True, to='app.Tag'),
),
migrations.DeleteModel(
name='Skill',
),
]
| 0 | 0 |
752ee840202809a32e9848a1a2c9a1828e74e71c | 5,132 | py | Python | oasislmf/model_execution/conf.py | ibailey-SCOR/OasisLMF | 966b4de4e1e64851970f4291c5bdfe7edc20cb7a | [
"BSD-3-Clause"
] | null | null | null | oasislmf/model_execution/conf.py | ibailey-SCOR/OasisLMF | 966b4de4e1e64851970f4291c5bdfe7edc20cb7a | [
"BSD-3-Clause"
] | null | null | null | oasislmf/model_execution/conf.py | ibailey-SCOR/OasisLMF | 966b4de4e1e64851970f4291c5bdfe7edc20cb7a | [
"BSD-3-Clause"
] | null | null | null | import csv
import io
import json
import logging
import os
import warnings
from collections import defaultdict
from ..utils.exceptions import OasisException
from ..utils.log import oasis_log
from .files import GENERAL_SETTINGS_FILE, GUL_SUMMARIES_FILE, IL_SUMMARIES_FILE, MODEL_SETTINGS_FILE
def _get_summaries(summary_file):
"""
Get a list representation of a summary file.
"""
summaries_dict = defaultdict(lambda: {'leccalc': {}})
with io.open(summary_file, 'r', encoding='utf-8') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
id = int(row[0])
if row[1].startswith('leccalc'):
summaries_dict[id]['leccalc'][row[1]] = row[2].lower() == 'true'
else:
summaries_dict[id][row[1]] = row[2].lower() == 'true'
summaries = list()
for id in sorted(summaries_dict):
summaries_dict[id]['id'] = id
summaries.append(summaries_dict[id])
return summaries
@oasis_log
def create_analysis_settings_json(directory):
"""
Generate an analysis settings JSON from a set of
CSV files in a specified directory.
Args:
``directory`` (string): the directory containing the CSV files.
Returns:
The analysis settings JSON.
"""
if not os.path.exists(directory):
error_message = "Directory does not exist: {}".format(directory)
logging.getLogger().error(error_message)
raise OasisException(error_message)
general_settings_file = os.path.join(directory, GENERAL_SETTINGS_FILE)
model_settings_file = os.path.join(directory, MODEL_SETTINGS_FILE)
gul_summaries_file = os.path.join(directory, GUL_SUMMARIES_FILE)
il_summaries_file = os.path.join(directory, IL_SUMMARIES_FILE)
for file in [general_settings_file, model_settings_file, gul_summaries_file, il_summaries_file]:
if not os.path.exists(file):
error_message = "File does not exist: {}".format(directory)
logging.getLogger().error(error_message)
raise OasisException(error_message)
general_settings = dict()
with io.open(general_settings_file, 'r', encoding='utf-8') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
general_settings[row[0]] = eval("{}('{}')".format(row[2], row[1]))
model_settings = dict()
with io.open(model_settings_file, 'r', encoding='utf-8') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
model_settings[row[0]] = eval("{}('{}')".format(row[2], row[1]))
gul_summaries = _get_summaries(gul_summaries_file)
il_summaries = _get_summaries(il_summaries_file)
analysis_settings = general_settings
analysis_settings['model_settings'] = model_settings
analysis_settings['gul_summaries'] = gul_summaries
analysis_settings['il_summaries'] = il_summaries
output_json = json.dumps(analysis_settings)
logging.getLogger().info("Analysis settings json: {}".format(output_json))
return output_json
def read_analysis_settings(analysis_settings_fp, il_files_exist=False,
ri_files_exist=False):
"""Read the analysis settings file"""
# Load analysis_settings file
try:
# Load as a json
with io.open(analysis_settings_fp, 'r', encoding='utf-8') as f:
analysis_settings = json.load(f)
# Extract the analysis_settings part within the json
if analysis_settings.get('analysis_settings'):
analysis_settings = analysis_settings['analysis_settings']
except (IOError, TypeError, ValueError):
raise OasisException('Invalid analysis settings file or file path: {}.'.format(
analysis_settings_fp))
# Reset il_output if the files are not there
if not il_files_exist or 'il_output' not in analysis_settings:
# No insured loss output
analysis_settings['il_output'] = False
analysis_settings['il_summaries'] = []
# Same for ri_output
if not ri_files_exist or 'ri_output' not in analysis_settings:
# No reinsured loss output
analysis_settings['ri_output'] = False
analysis_settings['ri_summaries'] = []
# If we want ri_output, we will need il_output, which needs il_files
if analysis_settings['ri_output'] and not analysis_settings['il_output']:
if not il_files_exist:
warnings.warn("ri_output selected, but il files not found")
analysis_settings['ri_output'] = False
analysis_settings['ri_summaries'] = []
else:
analysis_settings['il_output'] = True
# guard - Check if at least one output type is selected
if not any([
analysis_settings['gul_output'] if 'gul_output' in analysis_settings else False,
analysis_settings['il_output'] if 'il_output' in analysis_settings else False,
analysis_settings['ri_output'] if 'ri_output' in analysis_settings else False,
]):
raise OasisException(
'No valid output settings in: {}'.format(analysis_settings_fp))
return analysis_settings
| 36.657143 | 100 | 0.677319 | import csv
import io
import json
import logging
import os
import warnings
from collections import defaultdict
from ..utils.exceptions import OasisException
from ..utils.log import oasis_log
from .files import GENERAL_SETTINGS_FILE, GUL_SUMMARIES_FILE, IL_SUMMARIES_FILE, MODEL_SETTINGS_FILE
def _get_summaries(summary_file):
"""
Get a list representation of a summary file.
"""
summaries_dict = defaultdict(lambda: {'leccalc': {}})
with io.open(summary_file, 'r', encoding='utf-8') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
id = int(row[0])
if row[1].startswith('leccalc'):
summaries_dict[id]['leccalc'][row[1]] = row[2].lower() == 'true'
else:
summaries_dict[id][row[1]] = row[2].lower() == 'true'
summaries = list()
for id in sorted(summaries_dict):
summaries_dict[id]['id'] = id
summaries.append(summaries_dict[id])
return summaries
@oasis_log
def create_analysis_settings_json(directory):
"""
Generate an analysis settings JSON from a set of
CSV files in a specified directory.
Args:
``directory`` (string): the directory containing the CSV files.
Returns:
The analysis settings JSON.
"""
if not os.path.exists(directory):
error_message = "Directory does not exist: {}".format(directory)
logging.getLogger().error(error_message)
raise OasisException(error_message)
general_settings_file = os.path.join(directory, GENERAL_SETTINGS_FILE)
model_settings_file = os.path.join(directory, MODEL_SETTINGS_FILE)
gul_summaries_file = os.path.join(directory, GUL_SUMMARIES_FILE)
il_summaries_file = os.path.join(directory, IL_SUMMARIES_FILE)
for file in [general_settings_file, model_settings_file, gul_summaries_file, il_summaries_file]:
if not os.path.exists(file):
error_message = "File does not exist: {}".format(directory)
logging.getLogger().error(error_message)
raise OasisException(error_message)
general_settings = dict()
with io.open(general_settings_file, 'r', encoding='utf-8') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
general_settings[row[0]] = eval("{}('{}')".format(row[2], row[1]))
model_settings = dict()
with io.open(model_settings_file, 'r', encoding='utf-8') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
model_settings[row[0]] = eval("{}('{}')".format(row[2], row[1]))
gul_summaries = _get_summaries(gul_summaries_file)
il_summaries = _get_summaries(il_summaries_file)
analysis_settings = general_settings
analysis_settings['model_settings'] = model_settings
analysis_settings['gul_summaries'] = gul_summaries
analysis_settings['il_summaries'] = il_summaries
output_json = json.dumps(analysis_settings)
logging.getLogger().info("Analysis settings json: {}".format(output_json))
return output_json
def read_analysis_settings(analysis_settings_fp, il_files_exist=False,
ri_files_exist=False):
"""Read the analysis settings file"""
# Load analysis_settings file
try:
# Load as a json
with io.open(analysis_settings_fp, 'r', encoding='utf-8') as f:
analysis_settings = json.load(f)
# Extract the analysis_settings part within the json
if analysis_settings.get('analysis_settings'):
analysis_settings = analysis_settings['analysis_settings']
except (IOError, TypeError, ValueError):
raise OasisException('Invalid analysis settings file or file path: {}.'.format(
analysis_settings_fp))
# Reset il_output if the files are not there
if not il_files_exist or 'il_output' not in analysis_settings:
# No insured loss output
analysis_settings['il_output'] = False
analysis_settings['il_summaries'] = []
# Same for ri_output
if not ri_files_exist or 'ri_output' not in analysis_settings:
# No reinsured loss output
analysis_settings['ri_output'] = False
analysis_settings['ri_summaries'] = []
# If we want ri_output, we will need il_output, which needs il_files
if analysis_settings['ri_output'] and not analysis_settings['il_output']:
if not il_files_exist:
warnings.warn("ri_output selected, but il files not found")
analysis_settings['ri_output'] = False
analysis_settings['ri_summaries'] = []
else:
analysis_settings['il_output'] = True
# guard - Check if at least one output type is selected
if not any([
analysis_settings['gul_output'] if 'gul_output' in analysis_settings else False,
analysis_settings['il_output'] if 'il_output' in analysis_settings else False,
analysis_settings['ri_output'] if 'ri_output' in analysis_settings else False,
]):
raise OasisException(
'No valid output settings in: {}'.format(analysis_settings_fp))
return analysis_settings
| 0 | 0 |
cb8ea6149e57e707c1ee331f670e37c8feb61914 | 6,815 | py | Python | codes/functions.py | Wenupi/protoplanetary_disks | 51f8decbec5415e1da9893316f03d32ca5ab27de | [
"MIT"
] | null | null | null | codes/functions.py | Wenupi/protoplanetary_disks | 51f8decbec5415e1da9893316f03d32ca5ab27de | [
"MIT"
] | null | null | null | codes/functions.py | Wenupi/protoplanetary_disks | 51f8decbec5415e1da9893316f03d32ca5ab27de | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#--------------------------------------------------------------------------------
#Changes the sky coordinates (x,y,z) to the disk coordinates (x_d,y_d,z_d)
#The x axis is the rotation axis
def FUN_rotation(x,y,z):
x_d = x
y_d = y*np.cos(inc) - z*np.sin(inc)
z_d = y*np.sin(inc) + z*np.cos(inc)
return x_d,y_d,z_d
#--------------------------------------------------------------------------------
#Radiative transfer equation
def FUN_intensity(I,z,x,y,optde):
x_d,y_d,z_d = FUN_rotation(x,y,z)
density = EQ_density(x_d,y_d,z_d)
amax = EQ_amax(x_d,y_d,z_d)
opa = function_ext(amax)
S = funcion_S([z_d,y_d,x_d])
# print ('x,y,z', x,y,z)
# print (S, x_d, y_d, z_d)
# print (optde(z))
dIdz = -S*opa*density*np.exp(-optde(z)) #z es la variable de integracion (debe ser evaluada en cualquier punto)
return dIdz
#--------------------------------------------------------------------------------
#Optical depth
def FUN_tau(tt,z,x,y):
x_d,y_d,z_d = FUN_rotation(x,y,z)
density = EQ_density(x_d,y_d,z_d)
amax = EQ_amax(x_d,y_d,z_d)
opa = function_ext(amax)
dtau = -opa*density
return dtau
#--------------------------------------------------------------------------------
def FUN_tau_zaxis(tt,z,x,y):
x_d,y_d,z_d = x,y,z
density = EQ_density(x_d,y_d,z_d)
amax = EQ_amax(x_d,y_d,z_d)
opa = function_ext(amax)
dtau = -opa*density
return dtau
#--------------------------------------------------------------------------------
#Black body radiation
def FUN_BB(nu,T):
# B = 2.*hP*nu**3/clight**2/( np.exp(hP*nu/kB/T) - 1.)
B = 1./( np.exp(hP*nu/kB/T) - 1.)
return B
#--------------------------------------------------------------------------------
def FUN_limits_mult(xx,yy):
Hout = EQ_Height(Rout)
lim_z = Rout*np.sin(inc) + 2.*Hout*np.cos(inc) #Based on the geometry of the disk
lim_y = Rout*np.cos(inc) + 2.*Hout*np.sin(inc) #Based on the geometry of the disk
z_arr = np.linspace(1.1*lim_z, -1.1*lim_z, 200)
z_crit = []
if ((np.abs(xx) <=Rout) and (np.abs(yy) <= lim_y)):
xd,yd,zd = FUN_rotation(xx,yy,z_arr)
crit = np.zeros((len(z_arr)))
###############################################################################
#Funciona pero podria ser optimizado
###############################################################################
for ii in range(len(z_arr)): #Crea un vector de densidad en la linea de vision
if (EQ_density(xd,yd[ii],zd[ii]) == 0.):
crit[ii] = 0
else:
crit[ii] = 1
for ii in range(len(z_arr)): #Ve los indices donde cambia de 0 a algun valor, o de algun valor a 0 (fronteras)
if ( (ii != 0) and (crit[ii] - crit[ii-1] != 0 )):
z_crit.append(z_arr[ii])
elif(ii == 0 and crit[0] == 1):
z_crit.append(z_arr[0])
###############################################################################
return z_crit
#--------------------------------------------------------------------------------
def FUN_creates_source_function(x_array,y_array):
#Arrays and limits
Hout = EQ_Height(Rout)
z_array = np.linspace(-2.*Hout, 2.*Hout, 200)
Sfunction = np.zeros((len(z_array),len(y_array),len(x_array)))
Temfunction = np.zeros((len(z_array),len(y_array),len(x_array)))
op_depth_p = np.zeros((len(y_array),len(x_array)))
#Computes the optical depth (perpendicular to the disk midplane)
for j in range(len(y_array)):
for i in range(len(x_array)):
if(x_array[i] == 0. and y_array[j] == 0.):
Sfunction[:,j,i] = 0.
Temfunction[:,j,i] = 0.
else:
rad = np.sqrt(x_array[i]**2 + y_array[j]**2)
Hscale = EQ_Height(rad)
z_integ = np.linspace(2.*Hscale,-2.*Hscale,200)
sol = odeint(FUN_tau_zaxis,0.,z_integ,args=(x_array[i],y_array[j])).T[0]
op_depth_p[j][i] = sol[len(z_integ)-1]
inter_opt = interpolate.interp1d(z_integ,sol,kind='linear', bounds_error=False,fill_value=0.)
for k in range(len(z_array)):
amax = EQ_amax(x_array[i],y_array[j],z_array[k])
albedo = function_alb(amax)
##########Temperature##########
Omega2 = Ggrav*Mstar/(rad*AU)**3
Teff4 = 3.*Mdot*Omega2/8./np.pi/sigmaB
Tacc4 = 3./4.*(7.*inter_opt(abs(z_array[k])) + 2./3.)*Teff4
Tirr4 = Tstar**4./4.*(Rstar/rad/AU)**2*np.exp(-7.*inter_opt(abs(z_array[k]))/phi_angle)
Temfunction[k,j,i] = (Tacc4 + Tirr4)**(0.25)
#Temfunction[k,j,i] = EQ_temperature(x_array[i],y_array[j],z_array[k])
###############################
Sfunction[k,j,i] = FUN_BB(nu,Temfunction[k,j,i])*(1.+ albedo*FUN_f(inter_opt(z_array[k]),op_depth_p[j][i],albedo))
#Crea funcion fuente y temperatura en 3D
funcion_S = RegularGridInterpolator((z_array, y_array, x_array), Sfunction,bounds_error=False,fill_value=None)
funcion_T = RegularGridInterpolator((z_array, y_array, x_array), Temfunction,bounds_error=False,fill_value=None)
return funcion_S, funcion_T
#--------------------------------------------------------------------------------
def FUN_f(t,tau,alb):
eps = np.sqrt(1.-alb)
fff = np.exp(-np.sqrt(3.)*eps*t) + np.exp(np.sqrt(3.)*eps*(t-tau))
fff = fff/( np.exp(-np.sqrt(3.)*eps*tau)*(eps-1.) - (eps+1.) )
return fff
#--------------------------------------------------------------------------------
#Lee las tablas de opacidad DSHARP
#Load opacities
with np.load('default_opacities_smooth.npz') as d:
a_w = d['a']
gsca_w = d['g']
lam_w = d['lam']
k_abs_w = d['k_abs']
k_sca_w = d['k_sca']
lam_avgs = wl
# We split the opacities within the range of frequency to make the calculations faster
k_abs_w = k_abs_w[(0.9*lam_avgs<lam_w) & (1.1*lam_avgs>lam_w),:]
k_sca_w = k_sca_w[(0.9*lam_avgs<lam_w) & (1.1*lam_avgs>lam_w),:]
k_sca_w = k_sca_w*(1. - gsca_w[(0.9*lam_avgs<lam_w) & (1.1*lam_avgs>lam_w),:])
lam_w = lam_w[(0.9*lam_avgs<lam_w) & (1.1*lam_avgs>lam_w)]
opac_grid = opacity.size_average_opacity(lam_avgs, a_w, lam_w, k_abs_w.T, k_sca_w.T, q=3.5, plot=True)
function_ext = interpolate.interp1d(a_w, opac_grid['ka'][:]+opac_grid['ks'][:],kind='cubic')
function_alb = interpolate.interp1d(a_w, opac_grid['ks'][:]/(opac_grid['ka'][:]+opac_grid['ks'][:]),kind='cubic')
if not scattering:
function_alb = interpolate.interp1d(a_w, np.zeros((np.shape(opac_grid['ks'][:]))),kind='cubic')
| 43.685897 | 134 | 0.501981 | #!/usr/bin/env python
#--------------------------------------------------------------------------------
#Changes the sky coordinates (x,y,z) to the disk coordinates (x_d,y_d,z_d)
#The x axis is the rotation axis
def FUN_rotation(x,y,z):
x_d = x
y_d = y*np.cos(inc) - z*np.sin(inc)
z_d = y*np.sin(inc) + z*np.cos(inc)
return x_d,y_d,z_d
#--------------------------------------------------------------------------------
#Radiative transfer equation
def FUN_intensity(I,z,x,y,optde):
x_d,y_d,z_d = FUN_rotation(x,y,z)
density = EQ_density(x_d,y_d,z_d)
amax = EQ_amax(x_d,y_d,z_d)
opa = function_ext(amax)
S = funcion_S([z_d,y_d,x_d])
# print ('x,y,z', x,y,z)
# print (S, x_d, y_d, z_d)
# print (optde(z))
dIdz = -S*opa*density*np.exp(-optde(z)) #z es la variable de integracion (debe ser evaluada en cualquier punto)
return dIdz
#--------------------------------------------------------------------------------
#Optical depth
def FUN_tau(tt,z,x,y):
x_d,y_d,z_d = FUN_rotation(x,y,z)
density = EQ_density(x_d,y_d,z_d)
amax = EQ_amax(x_d,y_d,z_d)
opa = function_ext(amax)
dtau = -opa*density
return dtau
#--------------------------------------------------------------------------------
def FUN_tau_zaxis(tt,z,x,y):
x_d,y_d,z_d = x,y,z
density = EQ_density(x_d,y_d,z_d)
amax = EQ_amax(x_d,y_d,z_d)
opa = function_ext(amax)
dtau = -opa*density
return dtau
#--------------------------------------------------------------------------------
#Black body radiation
def FUN_BB(nu,T):
# B = 2.*hP*nu**3/clight**2/( np.exp(hP*nu/kB/T) - 1.)
B = 1./( np.exp(hP*nu/kB/T) - 1.)
return B
#--------------------------------------------------------------------------------
def FUN_limits_mult(xx,yy):
Hout = EQ_Height(Rout)
lim_z = Rout*np.sin(inc) + 2.*Hout*np.cos(inc) #Based on the geometry of the disk
lim_y = Rout*np.cos(inc) + 2.*Hout*np.sin(inc) #Based on the geometry of the disk
z_arr = np.linspace(1.1*lim_z, -1.1*lim_z, 200)
z_crit = []
if ((np.abs(xx) <=Rout) and (np.abs(yy) <= lim_y)):
xd,yd,zd = FUN_rotation(xx,yy,z_arr)
crit = np.zeros((len(z_arr)))
###############################################################################
#Funciona pero podria ser optimizado
###############################################################################
for ii in range(len(z_arr)): #Crea un vector de densidad en la linea de vision
if (EQ_density(xd,yd[ii],zd[ii]) == 0.):
crit[ii] = 0
else:
crit[ii] = 1
for ii in range(len(z_arr)): #Ve los indices donde cambia de 0 a algun valor, o de algun valor a 0 (fronteras)
if ( (ii != 0) and (crit[ii] - crit[ii-1] != 0 )):
z_crit.append(z_arr[ii])
elif(ii == 0 and crit[0] == 1):
z_crit.append(z_arr[0])
###############################################################################
return z_crit
#--------------------------------------------------------------------------------
def FUN_creates_source_function(x_array,y_array):
#Arrays and limits
Hout = EQ_Height(Rout)
z_array = np.linspace(-2.*Hout, 2.*Hout, 200)
Sfunction = np.zeros((len(z_array),len(y_array),len(x_array)))
Temfunction = np.zeros((len(z_array),len(y_array),len(x_array)))
op_depth_p = np.zeros((len(y_array),len(x_array)))
#Computes the optical depth (perpendicular to the disk midplane)
for j in range(len(y_array)):
for i in range(len(x_array)):
if(x_array[i] == 0. and y_array[j] == 0.):
Sfunction[:,j,i] = 0.
Temfunction[:,j,i] = 0.
else:
rad = np.sqrt(x_array[i]**2 + y_array[j]**2)
Hscale = EQ_Height(rad)
z_integ = np.linspace(2.*Hscale,-2.*Hscale,200)
sol = odeint(FUN_tau_zaxis,0.,z_integ,args=(x_array[i],y_array[j])).T[0]
op_depth_p[j][i] = sol[len(z_integ)-1]
inter_opt = interpolate.interp1d(z_integ,sol,kind='linear', bounds_error=False,fill_value=0.)
for k in range(len(z_array)):
amax = EQ_amax(x_array[i],y_array[j],z_array[k])
albedo = function_alb(amax)
##########Temperature##########
Omega2 = Ggrav*Mstar/(rad*AU)**3
Teff4 = 3.*Mdot*Omega2/8./np.pi/sigmaB
Tacc4 = 3./4.*(7.*inter_opt(abs(z_array[k])) + 2./3.)*Teff4
Tirr4 = Tstar**4./4.*(Rstar/rad/AU)**2*np.exp(-7.*inter_opt(abs(z_array[k]))/phi_angle)
Temfunction[k,j,i] = (Tacc4 + Tirr4)**(0.25)
#Temfunction[k,j,i] = EQ_temperature(x_array[i],y_array[j],z_array[k])
###############################
Sfunction[k,j,i] = FUN_BB(nu,Temfunction[k,j,i])*(1.+ albedo*FUN_f(inter_opt(z_array[k]),op_depth_p[j][i],albedo))
#Crea funcion fuente y temperatura en 3D
funcion_S = RegularGridInterpolator((z_array, y_array, x_array), Sfunction,bounds_error=False,fill_value=None)
funcion_T = RegularGridInterpolator((z_array, y_array, x_array), Temfunction,bounds_error=False,fill_value=None)
return funcion_S, funcion_T
#--------------------------------------------------------------------------------
def FUN_f(t,tau,alb):
eps = np.sqrt(1.-alb)
fff = np.exp(-np.sqrt(3.)*eps*t) + np.exp(np.sqrt(3.)*eps*(t-tau))
fff = fff/( np.exp(-np.sqrt(3.)*eps*tau)*(eps-1.) - (eps+1.) )
return fff
#--------------------------------------------------------------------------------
#Lee las tablas de opacidad DSHARP
#Load opacities
with np.load('default_opacities_smooth.npz') as d:
a_w = d['a']
gsca_w = d['g']
lam_w = d['lam']
k_abs_w = d['k_abs']
k_sca_w = d['k_sca']
lam_avgs = wl
# We split the opacities within the range of frequency to make the calculations faster
k_abs_w = k_abs_w[(0.9*lam_avgs<lam_w) & (1.1*lam_avgs>lam_w),:]
k_sca_w = k_sca_w[(0.9*lam_avgs<lam_w) & (1.1*lam_avgs>lam_w),:]
k_sca_w = k_sca_w*(1. - gsca_w[(0.9*lam_avgs<lam_w) & (1.1*lam_avgs>lam_w),:])
lam_w = lam_w[(0.9*lam_avgs<lam_w) & (1.1*lam_avgs>lam_w)]
opac_grid = opacity.size_average_opacity(lam_avgs, a_w, lam_w, k_abs_w.T, k_sca_w.T, q=3.5, plot=True)
function_ext = interpolate.interp1d(a_w, opac_grid['ka'][:]+opac_grid['ks'][:],kind='cubic')
function_alb = interpolate.interp1d(a_w, opac_grid['ks'][:]/(opac_grid['ka'][:]+opac_grid['ks'][:]),kind='cubic')
if not scattering:
function_alb = interpolate.interp1d(a_w, np.zeros((np.shape(opac_grid['ks'][:]))),kind='cubic')
| 0 | 0 |
d281bf9d519356903906b4ce02f43f84e40f8147 | 2,893 | py | Python | F0AM_Tools/TUV_to_mat.py | jdhask/pyMCM | 32b65e1dff2e9626df5d52623fd1ac4af29f8c57 | [
"MIT"
] | 1 | 2021-11-15T19:24:40.000Z | 2021-11-15T19:24:40.000Z | F0AM_Tools/TUV_to_mat.py | jdhask/pyMCM | 32b65e1dff2e9626df5d52623fd1ac4af29f8c57 | [
"MIT"
] | null | null | null | F0AM_Tools/TUV_to_mat.py | jdhask/pyMCM | 32b65e1dff2e9626df5d52623fd1ac4af29f8c57 | [
"MIT"
] | 2 | 2021-11-15T19:23:46.000Z | 2021-11-29T12:42:26.000Z | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 16 18:06:05 2021
@author: jhask
"""
import csv
import pandas as pd
import numpy as np
import re
import scipy.io as sio
import os
# Map MCM names to TUV labels
j_vals_dict= dict({
'O3 -> O2 + O(1D)':'J1',
'O3 -> O2 + O(3P)':'J2',
'H2O2 -> 2 OH':'J3',
'NO2 -> NO + O(3P)':'J4',
'NO3 -> NO + O2':'J5',
'NO3 -> NO2 + O(3P)':'J6',
'HNO2 -> OH + NO':'J7',
'HNO3 -> OH + NO2':'J8',
'CH2O -> H + HCO':'J11',
'CH2O -> H2 + CO':'J12',
'CH3CHO -> CH3 + HCO':'J13',
'C2H5CHO -> C2H5 + HCO':'J14',
'CH2=C(CH3)CHO -> Products':'J18',
'CH3COCH3 -> CH3CO + CH3':'J21',
'CH3COCH2CH3 -> CH3CO + CH2CH3':'J22',
'CH3COCH=CH2 -> Products':'J23',
'CHOCHO -> H2 + 2CO':'J31',
'CHOCHO -> CH2O + CO':'J32',
'CHOCHO -> HCO + HCO':'J33',
'CH3COCHO -> CH3CO + HCO':'J34',
'CH3COCOCH3 -> Products':'J35',
'CH3OOH -> CH3O + OH':'J41',
'CH3ONO2 -> CH3O + NO2':'J51',
'C2H5ONO2 -> C2H5O + NO2':'J52',
'n-C3H7ONO2 -> C3H7O + NO2':'J53',
'CH3CHONO2CH3 -> CH3CHOCH3 + NO2':'J54',
'C(CH3)3(ONO2) -> C(CH3)3(O.) + NO2':'J55',
'CH3COCH2(ONO2) -> CH3COCH2(O.) + NO2':'J56',
'CH2(OH)COCH3 -> CH3CO + CH2(OH)':'Jn10',
'CH2=CHCHO -> Products':'Jn11',
'CH3CO(OONO2) -> CH3CO(OO) + NO2':'Jn14',
'CH3CO(OONO2) -> CH3CO(O) + NO3':'Jn15',
'CH3(OONO2) -> CH3(OO) + NO2':'Jn16',
'CH3(OONO2) -> CH3(OO) + NO2':'Jn17',
'N2O5 -> NO3 + NO2':'Jn19',
'N2O5 -> NO3 + NO + O(3P)':'Jn20',
'HNO4 -> HO2 + NO2':'Jn21'})
#TUV output file.
file= 'C:/Users/jhask/OneDrive/Documents/MATLAB/F0AM/Setups/SOAS_RCIM/foam_6_29_out.txt'
with open(file, "r",errors="ignore") as f: # read line by line.
reader = csv.reader(f, delimiter="\t")
# Initialize vars we fill in reading the file.
ln_num = 0; map_cols=dict({})
in_species_list=False;
pass_go=False
for row in reader:
line = " ".join(row) # read line by line.
hdrs= [key for key in list(j_vals_dict.keys()) if key in line]
if len(hdrs) > 0 :
headers= re.search(r"[\d]*[\=\w]", line)
print(line, hdrs, j_vals_dict[ hdrs[:][0]])
if headers: map_cols[headers.group()]=j_vals_dict[ hdrs[:][0]]
if (pass_go is True) and ('------' not in line ):
# Append the j-values to the dataframe at this point in time.
splt= [float(item) for item in line.split(" ") if item !='']
df.loc[len(df)]=np.array(splt)
if 'time, hrs. sza, deg.' in line:
pass_go=True
df=pd.DataFrame(columns= ['time', 'sza']+ list(map_cols.values()))
to_mat={name: col.values for name, col in df.items()}
filename= os.path.join('C:/Users/jhask/OneDrive/Documents/MATLAB/F0AM/Setups/SOAS_RCIM/'+'F0AM_tuv.mat')
sio.savemat(filename, to_mat)
print(filename)
| 30.452632 | 105 | 0.5458 | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 16 18:06:05 2021
@author: jhask
"""
import csv
import pandas as pd
import numpy as np
import re
import scipy.io as sio
import os
# Map MCM names to TUV labels
j_vals_dict= dict({
'O3 -> O2 + O(1D)':'J1',
'O3 -> O2 + O(3P)':'J2',
'H2O2 -> 2 OH':'J3',
'NO2 -> NO + O(3P)':'J4',
'NO3 -> NO + O2':'J5',
'NO3 -> NO2 + O(3P)':'J6',
'HNO2 -> OH + NO':'J7',
'HNO3 -> OH + NO2':'J8',
'CH2O -> H + HCO':'J11',
'CH2O -> H2 + CO':'J12',
'CH3CHO -> CH3 + HCO':'J13',
'C2H5CHO -> C2H5 + HCO':'J14',
'CH2=C(CH3)CHO -> Products':'J18',
'CH3COCH3 -> CH3CO + CH3':'J21',
'CH3COCH2CH3 -> CH3CO + CH2CH3':'J22',
'CH3COCH=CH2 -> Products':'J23',
'CHOCHO -> H2 + 2CO':'J31',
'CHOCHO -> CH2O + CO':'J32',
'CHOCHO -> HCO + HCO':'J33',
'CH3COCHO -> CH3CO + HCO':'J34',
'CH3COCOCH3 -> Products':'J35',
'CH3OOH -> CH3O + OH':'J41',
'CH3ONO2 -> CH3O + NO2':'J51',
'C2H5ONO2 -> C2H5O + NO2':'J52',
'n-C3H7ONO2 -> C3H7O + NO2':'J53',
'CH3CHONO2CH3 -> CH3CHOCH3 + NO2':'J54',
'C(CH3)3(ONO2) -> C(CH3)3(O.) + NO2':'J55',
'CH3COCH2(ONO2) -> CH3COCH2(O.) + NO2':'J56',
'CH2(OH)COCH3 -> CH3CO + CH2(OH)':'Jn10',
'CH2=CHCHO -> Products':'Jn11',
'CH3CO(OONO2) -> CH3CO(OO) + NO2':'Jn14',
'CH3CO(OONO2) -> CH3CO(O) + NO3':'Jn15',
'CH3(OONO2) -> CH3(OO) + NO2':'Jn16',
'CH3(OONO2) -> CH3(OO) + NO2':'Jn17',
'N2O5 -> NO3 + NO2':'Jn19',
'N2O5 -> NO3 + NO + O(3P)':'Jn20',
'HNO4 -> HO2 + NO2':'Jn21'})
#TUV output file.
file= 'C:/Users/jhask/OneDrive/Documents/MATLAB/F0AM/Setups/SOAS_RCIM/foam_6_29_out.txt'
with open(file, "r",errors="ignore") as f: # read line by line.
reader = csv.reader(f, delimiter="\t")
# Initialize vars we fill in reading the file.
ln_num = 0; map_cols=dict({})
in_species_list=False;
pass_go=False
for row in reader:
line = " ".join(row) # read line by line.
hdrs= [key for key in list(j_vals_dict.keys()) if key in line]
if len(hdrs) > 0 :
headers= re.search(r"[\d]*[\=\w]", line)
print(line, hdrs, j_vals_dict[ hdrs[:][0]])
if headers: map_cols[headers.group()]=j_vals_dict[ hdrs[:][0]]
if (pass_go is True) and ('------' not in line ):
# Append the j-values to the dataframe at this point in time.
splt= [float(item) for item in line.split(" ") if item !='']
df.loc[len(df)]=np.array(splt)
if 'time, hrs. sza, deg.' in line:
pass_go=True
df=pd.DataFrame(columns= ['time', 'sza']+ list(map_cols.values()))
to_mat={name: col.values for name, col in df.items()}
filename= os.path.join('C:/Users/jhask/OneDrive/Documents/MATLAB/F0AM/Setups/SOAS_RCIM/'+'F0AM_tuv.mat')
sio.savemat(filename, to_mat)
print(filename)
| 0 | 0 |
1d7b25e9a1db4f378a05b7199423917d7b5b9f81 | 1,343 | py | Python | extract_url.py | nickinack/extract_url | d084ca0a791d5c50ab2accaee7cb4d0b981bd132 | [
"MIT"
] | 2 | 2022-02-07T05:51:36.000Z | 2022-02-07T05:52:11.000Z | extract_url.py | nickinack/extract_url | d084ca0a791d5c50ab2accaee7cb4d0b981bd132 | [
"MIT"
] | null | null | null | extract_url.py | nickinack/extract_url | d084ca0a791d5c50ab2accaee7cb4d0b981bd132 | [
"MIT"
] | 1 | 2020-05-18T08:29:22.000Z | 2020-05-18T08:29:22.000Z | '''
Imports
'''
from config import *
from newspaper import Article
import sys as sys
import pandas as pd
import csv
from collections import defaultdict
import re
'''
URL Extract
'''
columns = defaultdict(list)
with open('SecurityIDRBT.csv') as f:
reader = csv.DictReader(f) # read rows into a dictionary format
for row in reader: # read a row as {column1: value1, column2: value2,...}
for (k,v) in row.items(): # go over each column name and value
columns[k].append(v) # append the value into the appropriate list
url_list = [] # based on column name k
for element in range(len(columns['Body'])):
urls = re.findall('https?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+', columns['Body'][element])
for url in urls:
url_list.append(url)
'''
Find Unique URLs and filter with semantic search results
'''
url_unique = []
for element in url_list:
if element not in url_unique:
if element not in common_urls_http:
if element not in common_urls_https:
url_unique.append(element)
'''
Write it in a new CSV
'''
with open('url.csv', 'w',newline='') as myfile:
wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
for word in url_unique:
wr.writerow([word])
| 29.844444 | 95 | 0.603127 | '''
Imports
'''
from config import *
from newspaper import Article
import sys as sys
import pandas as pd
import csv
from collections import defaultdict
import re
'''
URL Extract
'''
columns = defaultdict(list)
with open('SecurityIDRBT.csv') as f:
reader = csv.DictReader(f) # read rows into a dictionary format
for row in reader: # read a row as {column1: value1, column2: value2,...}
for (k,v) in row.items(): # go over each column name and value
columns[k].append(v) # append the value into the appropriate list
url_list = [] # based on column name k
for element in range(len(columns['Body'])):
urls = re.findall('https?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+', columns['Body'][element])
for url in urls:
url_list.append(url)
'''
Find Unique URLs and filter with semantic search results
'''
url_unique = []
for element in url_list:
if element not in url_unique:
if element not in common_urls_http:
if element not in common_urls_https:
url_unique.append(element)
'''
Write it in a new CSV
'''
with open('url.csv', 'w',newline='') as myfile:
wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
for word in url_unique:
wr.writerow([word])
| 0 | 0 |
56b682792eb61ccb189ac68b9d7a874cbd6c0a60 | 3,279 | py | Python | test/python/test_mapper_coupling.py | kifumi/qiskit-terra | 203fca6d694a18824a6b12cbabd3dd2c64dd12ae | [
"Apache-2.0"
] | 1 | 2018-11-01T01:35:43.000Z | 2018-11-01T01:35:43.000Z | test/python/test_mapper_coupling.py | a-amaral/qiskit-terra | e73beba1e68de2617046a7e1e9eeac375b61de81 | [
"Apache-2.0"
] | null | null | null | test/python/test_mapper_coupling.py | a-amaral/qiskit-terra | e73beba1e68de2617046a7e1e9eeac375b61de81 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2018, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
# pylint: disable=missing-docstring
from qiskit.mapper import _coupling
from .common import QiskitTestCase
class CouplingTest(QiskitTestCase):
def test_coupling_dict2list(self):
input_dict = {0: [1, 2], 1: [2]}
result = _coupling.coupling_dict2list(input_dict)
expected = [[0, 1], [0, 2], [1, 2]]
self.assertEqual(expected, result)
def test_coupling_dict2list_empty_dict(self):
self.assertIsNone(_coupling.coupling_dict2list({}))
def test_coupling_list2dict(self):
input_list = [[0, 1], [0, 2], [1, 2]]
result = _coupling.coupling_list2dict(input_list)
expected = {0: [1, 2], 1: [2]}
self.assertEqual(expected, result)
def test_coupling_list2dict_empty_list(self):
self.assertIsNone(_coupling.coupling_list2dict([]))
def test_empty_coupling_class(self):
coupling = _coupling.Coupling()
self.assertEqual(0, coupling.size())
self.assertEqual([], coupling.get_qubits())
self.assertEqual([], coupling.get_edges())
self.assertFalse(coupling.connected())
self.assertEqual("", str(coupling))
def test_coupling_str(self):
coupling_dict = {0: [1, 2], 1: [2]}
coupling = _coupling.Coupling(coupling_dict)
expected = ("qubits: q[0] @ 1, q[1] @ 2, q[2] @ 3\n"
"edges: q[0]-q[1], q[0]-q[2], q[1]-q[2]")
self.assertEqual(expected, str(coupling))
def test_coupling_compute_distance(self):
coupling_dict = {0: [1, 2], 1: [2]}
coupling = _coupling.Coupling(coupling_dict)
self.assertTrue(coupling.connected())
coupling.compute_distance()
qubits = coupling.get_qubits()
result = coupling.distance(qubits[0], qubits[1])
self.assertEqual(1, result)
def test_coupling_compute_distance_coupling_error(self):
coupling = _coupling.Coupling()
self.assertRaises(_coupling.CouplingError, coupling.compute_distance)
def test_add_qubit(self):
coupling = _coupling.Coupling()
self.assertEqual("", str(coupling))
coupling.add_qubit(('q', 0))
self.assertEqual("qubits: q[0] @ 1", str(coupling))
def test_add_qubit_not_tuple(self):
coupling = _coupling.Coupling()
self.assertRaises(_coupling.CouplingError, coupling.add_qubit, 'q0')
def test_add_qubit_tuple_incorrect_form(self):
coupling = _coupling.Coupling()
self.assertRaises(_coupling.CouplingError, coupling.add_qubit,
('q', '0'))
def test_add_edge(self):
coupling = _coupling.Coupling()
self.assertEqual("", str(coupling))
coupling.add_edge(("q", 0), ('q', 1))
expected = ("qubits: q[0] @ 1, q[1] @ 2\n"
"edges: q[0]-q[1]")
self.assertEqual(expected, str(coupling))
def test_distance_error(self):
"""Test distance method validation."""
graph = _coupling.Coupling({0: [1, 2], 1: [2]})
self.assertRaises(_coupling.CouplingError, graph.distance, ('q0', 0), ('q1', 1))
| 36.433333 | 88 | 0.633425 | # -*- coding: utf-8 -*-
# Copyright 2018, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
# pylint: disable=missing-docstring
from qiskit.mapper import _coupling
from .common import QiskitTestCase
class CouplingTest(QiskitTestCase):
def test_coupling_dict2list(self):
input_dict = {0: [1, 2], 1: [2]}
result = _coupling.coupling_dict2list(input_dict)
expected = [[0, 1], [0, 2], [1, 2]]
self.assertEqual(expected, result)
def test_coupling_dict2list_empty_dict(self):
self.assertIsNone(_coupling.coupling_dict2list({}))
def test_coupling_list2dict(self):
input_list = [[0, 1], [0, 2], [1, 2]]
result = _coupling.coupling_list2dict(input_list)
expected = {0: [1, 2], 1: [2]}
self.assertEqual(expected, result)
def test_coupling_list2dict_empty_list(self):
self.assertIsNone(_coupling.coupling_list2dict([]))
def test_empty_coupling_class(self):
coupling = _coupling.Coupling()
self.assertEqual(0, coupling.size())
self.assertEqual([], coupling.get_qubits())
self.assertEqual([], coupling.get_edges())
self.assertFalse(coupling.connected())
self.assertEqual("", str(coupling))
def test_coupling_str(self):
coupling_dict = {0: [1, 2], 1: [2]}
coupling = _coupling.Coupling(coupling_dict)
expected = ("qubits: q[0] @ 1, q[1] @ 2, q[2] @ 3\n"
"edges: q[0]-q[1], q[0]-q[2], q[1]-q[2]")
self.assertEqual(expected, str(coupling))
def test_coupling_compute_distance(self):
coupling_dict = {0: [1, 2], 1: [2]}
coupling = _coupling.Coupling(coupling_dict)
self.assertTrue(coupling.connected())
coupling.compute_distance()
qubits = coupling.get_qubits()
result = coupling.distance(qubits[0], qubits[1])
self.assertEqual(1, result)
def test_coupling_compute_distance_coupling_error(self):
coupling = _coupling.Coupling()
self.assertRaises(_coupling.CouplingError, coupling.compute_distance)
def test_add_qubit(self):
coupling = _coupling.Coupling()
self.assertEqual("", str(coupling))
coupling.add_qubit(('q', 0))
self.assertEqual("qubits: q[0] @ 1", str(coupling))
def test_add_qubit_not_tuple(self):
coupling = _coupling.Coupling()
self.assertRaises(_coupling.CouplingError, coupling.add_qubit, 'q0')
def test_add_qubit_tuple_incorrect_form(self):
coupling = _coupling.Coupling()
self.assertRaises(_coupling.CouplingError, coupling.add_qubit,
('q', '0'))
def test_add_edge(self):
coupling = _coupling.Coupling()
self.assertEqual("", str(coupling))
coupling.add_edge(("q", 0), ('q', 1))
expected = ("qubits: q[0] @ 1, q[1] @ 2\n"
"edges: q[0]-q[1]")
self.assertEqual(expected, str(coupling))
def test_distance_error(self):
"""Test distance method validation."""
graph = _coupling.Coupling({0: [1, 2], 1: [2]})
self.assertRaises(_coupling.CouplingError, graph.distance, ('q0', 0), ('q1', 1))
| 0 | 0 |
b9bba5bf54a9320d3bc8a8acd4f1c2d5d0aaa83f | 66,268 | py | Python | discretisedfield/tests/test_field.py | StephenPotato/discretisedfield | de49577b47acadd9372854252688194c348844a3 | [
"BSD-3-Clause"
] | 1 | 2021-03-13T09:43:52.000Z | 2021-03-13T09:43:52.000Z | discretisedfield/tests/test_field.py | StephenPotato/discretisedfield | de49577b47acadd9372854252688194c348844a3 | [
"BSD-3-Clause"
] | null | null | null | discretisedfield/tests/test_field.py | StephenPotato/discretisedfield | de49577b47acadd9372854252688194c348844a3 | [
"BSD-3-Clause"
] | null | null | null | import os
import re
import k3d
import types
import random
import pytest
import numbers
import tempfile
import itertools
import numpy as np
import discretisedfield as df
import matplotlib.pyplot as plt
from .test_mesh import TestMesh
def check_field(field):
assert isinstance(field.mesh, df.Mesh)
assert isinstance(field.dim, int)
assert field.dim > 0
assert isinstance(field.array, np.ndarray)
assert field.array.shape == (*field.mesh.n, field.dim)
average = field.average
assert isinstance(average, (tuple, numbers.Real))
rstr = repr(field)
assert isinstance(rstr, str)
pattern = (r'^Field\(mesh=Mesh\(region=Region\(p1=\(.+\), '
r'p2=\(.+\)\), .+\), dim=\d+\)$')
assert re.search(pattern, rstr)
assert isinstance(field.__iter__(), types.GeneratorType)
assert len(list(field)) == len(field.mesh)
line = field.line(p1=field.mesh.region.pmin,
p2=field.mesh.region.pmax,
n=5)
assert isinstance(line, df.Line)
assert line.n == 5
plane = field.plane('z', n=(2, 2))
assert isinstance(plane, df.Field)
assert len(plane.mesh) == 4
assert plane.mesh.n == (2, 2, 1)
project = field.project('z')
assert isinstance(project, df.Field)
assert project.mesh.n[2] == 1
assert isinstance(field(field.mesh.region.centre), (tuple, numbers.Real))
assert isinstance(field(field.mesh.region.random_point()),
(tuple, numbers.Real))
assert field == field
assert not field != field
assert +field == field
assert -(-field) == field
assert field + field == 2*field
assert field - (-field) == field + field
assert 1*field == field
assert -1*field == -field
if field.dim == 1:
grad = field.grad
assert isinstance(grad, df.Field)
assert grad.dim == 3
assert all(i not in dir(field) for i in 'xyz')
assert isinstance((field * df.dx).integral(), numbers.Real)
assert isinstance((field * df.dy).integral(), numbers.Real)
assert isinstance((field * df.dz).integral(), numbers.Real)
assert isinstance((field * df.dV).integral(), numbers.Real)
assert isinstance((field.plane('z') * df.dS).integral(), tuple)
assert isinstance((field.plane('z') * abs(df.dS)).integral(),
numbers.Real)
if field.dim == 3:
norm = field.norm
assert isinstance(norm, df.Field)
assert norm == abs(field)
assert norm.dim == 1
assert isinstance(field.x, df.Field)
assert field.x.dim == 1
assert isinstance(field.y, df.Field)
assert field.y.dim == 1
assert isinstance(field.z, df.Field)
assert field.z.dim == 1
div = field.div
assert isinstance(div, df.Field)
assert div.dim == 1
curl = field.curl
assert isinstance(curl, df.Field)
assert curl.dim == 3
field_plane = field.plane('z')
assert isinstance((field * df.dx).integral(), tuple)
assert isinstance((field * df.dy).integral(), tuple)
assert isinstance((field * df.dz).integral(), tuple)
assert isinstance((field * df.dV).integral(), tuple)
assert isinstance((field.plane('z') @ df.dS).integral(), numbers.Real)
assert isinstance((field.plane('z') * abs(df.dS)).integral(), tuple)
orientation = field.orientation
assert isinstance(orientation, df.Field)
assert orientation.dim == 3
assert all(i in dir(field) for i in 'xyz')
class TestField:
def setup(self):
# Get meshes using valid arguments from TestMesh.
tm = TestMesh()
tm.setup()
self.meshes = []
for p1, p2, n, cell in tm.valid_args:
region = df.Region(p1=p1, p2=p2)
mesh = df.Mesh(region=region, n=n, cell=cell)
self.meshes.append(mesh)
# Create lists of field values.
self.consts = [0, -5., np.pi, 1e-15, 1.2e12, random.random()]
self.iters = [(0, 0, 1),
(0, -5.1, np.pi),
[70, 1e15, 2*np.pi],
[5, random.random(), np.pi],
np.array([4, -1, 3.7]),
np.array([2.1, 0.0, -5*random.random()])]
self.sfuncs = [lambda c: 1,
lambda c: -2.4,
lambda c: -6.4e-15,
lambda c: c[0] + c[1] + c[2] + 1,
lambda c: (c[0]-1)**2 - c[1]+7 + c[2]*0.1,
lambda c: np.sin(c[0]) + np.cos(c[1]) - np.sin(2*c[2])]
self.vfuncs = [lambda c: (1, 2, 0),
lambda c: (-2.4, 1e-3, 9),
lambda c: (c[0], c[1], c[2] + 100),
lambda c: (c[0]+c[2]+10, c[1], c[2]+1),
lambda c: (c[0]-1, c[1]+70, c[2]*0.1),
lambda c: (np.sin(c[0]), np.cos(c[1]), -np.sin(2*c[2]))]
# Create a field for plotting tests
mesh = df.Mesh(p1=(-5e-9, -5e-9, -5e-9),
p2=(5e-9, 5e-9, 5e-9),
n=(5, 5, 5))
def norm_fun(point):
x, y, z = point
if x**2 + y**2 <= (5e-9)**2:
return 1e5
else:
return 0
def value_fun(point):
x, y, z = point
if x <= 0:
return (0, 0, 1)
else:
return (0, 0, -1)
self.pf = df.Field(mesh, dim=3, value=value_fun, norm=norm_fun)
def test_init_valid_args(self):
for mesh in self.meshes:
for value in self.consts + self.sfuncs:
f = df.Field(mesh, dim=1, value=value)
check_field(f)
for value in self.iters + self.vfuncs:
f = df.Field(mesh, dim=3, value=value)
check_field(f)
def test_init_invalid_args(self):
with pytest.raises(TypeError):
mesh = 'meaningless_mesh_string'
f = df.Field(mesh, dim=1)
for mesh in self.meshes:
for dim in [0, -1, 'dim', (2, 3)]:
with pytest.raises((ValueError, TypeError)):
f = df.Field(mesh, dim=dim)
def test_set_with_ndarray(self):
for mesh in self.meshes:
f = df.Field(mesh, dim=3)
f.value = np.ones((*f.mesh.n, f.dim,))
check_field(f)
assert isinstance(f.value, np.ndarray)
assert f.average == (1, 1, 1)
with pytest.raises(ValueError):
f.value = np.ones((2, 2))
def test_set_with_callable(self):
for mesh in self.meshes:
for func in self.sfuncs:
f = df.Field(mesh, dim=1, value=func)
check_field(f)
rp = f.mesh.region.random_point()
# Make sure to be at the centre of the cell
rp = f.mesh.index2point(f.mesh.point2index(rp))
assert f(rp) == func(rp)
for mesh in self.meshes:
for func in self.vfuncs:
f = df.Field(mesh, dim=3, value=func)
check_field(f)
rp = f.mesh.region.random_point()
rp = f.mesh.index2point(f.mesh.point2index(rp))
assert np.all(f(rp) == func(rp))
def test_set_with_dict(self):
p1 = (0, 0, 0)
p2 = (10e-9, 10e-9, 10e-9)
n = (5, 5, 5)
subregions = {'r1': df.Region(p1=(0, 0, 0), p2=(4e-9, 10e-9, 10e-9)),
'r2': df.Region(p1=(4e-9, 0, 0),
p2=(10e-9, 10e-9, 10e-9))}
mesh = df.Mesh(p1=p1, p2=p2, n=n, subregions=subregions)
field = df.Field(mesh, dim=3, value={'r1': (0, 0, 1),
'r2': (0, 0, 2),
'r1:r2': (0, 0, 5)})
assert np.all(field((3e-9, 7e-9, 9e-9)) == (0, 0, 1))
assert np.all(field((8e-9, 2e-9, 9e-9)) == (0, 0, 2))
def test_set_exception(self):
for mesh in self.meshes:
with pytest.raises(ValueError):
f = df.Field(mesh, dim=3, value='meaningless_string')
with pytest.raises(ValueError):
f = df.Field(mesh, dim=3, value=5+5j)
def test_value(self):
p1 = (0, 0, 0)
p2 = (10e-9, 10e-9, 10e-9)
n = (5, 5, 5)
mesh = df.Mesh(p1=p1, p2=p2, n=n)
f = df.Field(mesh, dim=3)
f.value = (1, 1, 1)
assert f.value == (1, 1, 1)
f.array[0, 0, 0, 0] = 3
assert isinstance(f.value, np.ndarray)
def test_norm(self):
mesh = df.Mesh(p1=(0, 0, 0), p2=(10, 10, 10), cell=(5, 5, 5))
f = df.Field(mesh, dim=3, value=(2, 2, 2))
assert np.all(f.norm.value == 2*np.sqrt(3))
assert np.all(f.norm.array == 2*np.sqrt(3))
assert np.all(f.array == 2)
f.norm = 1
assert np.all(f.norm.value == 1)
assert np.all(f.norm.array == 1)
assert np.all(f.array == 1/np.sqrt(3))
f.array[0, 0, 0, 0] = 3
assert isinstance(f.norm.value, np.ndarray)
assert not np.all(f.norm.value == 1)
for mesh in self.meshes:
for value in self.iters + self.vfuncs:
for norm_value in [1, 2.1, 50, 1e-3, np.pi]:
f = df.Field(mesh, dim=3, value=value, norm=norm_value)
# Compute norm.
norm = f.array[..., 0]**2
norm += f.array[..., 1]**2
norm += f.array[..., 2]**2
norm = np.sqrt(norm)
assert norm.shape == f.mesh.n
assert f.norm.array.shape == (*f.mesh.n, 1)
assert np.all(abs(norm - norm_value) < 1e-12)
# Exception
mesh = df.Mesh(p1=(0, 0, 0), p2=(10, 10, 10), cell=(1, 1, 1))
f = df.Field(mesh, dim=1, value=-5)
with pytest.raises(ValueError):
f.norm = 5
def test_norm_is_not_preserved(self):
p1 = (0, 0, 0)
p2 = (10e-9, 10e-9, 10e-9)
n = (5, 5, 5)
mesh = df.Mesh(p1=p1, p2=p2, n=n)
f = df.Field(mesh, dim=3)
f.value = (0, 3, 0)
f.norm = 1
assert np.all(f.norm.array == 1)
f.value = (0, 2, 0)
assert np.all(f.norm.value != 1)
assert np.all(f.norm.array == 2)
def test_norm_zero_field_exception(self):
p1 = (0, 0, 0)
p2 = (10e-9, 10e-9, 10e-9)
n = (5, 5, 5)
mesh = df.Mesh(p1=p1, p2=p2, n=n)
f = df.Field(mesh, dim=3, value=(0, 0, 0))
with pytest.raises(ValueError):
f.norm = 1
def test_zero(self):
p1 = (0, 0, 0)
p2 = (10e-9, 10e-9, 10e-9)
n = (5, 5, 5)
mesh = df.Mesh(p1=p1, p2=p2, n=n)
f = df.Field(mesh, dim=1, value=1e-6)
zf = f.zero
assert f.mesh == zf.mesh
assert f.dim == zf.dim
assert not np.any(zf.array)
f = df.Field(mesh, dim=3, value=(5, -7, 1e3))
zf = f.zero
assert f.mesh == zf.mesh
assert f.dim == zf.dim
assert not np.any(zf.array)
def test_orientation(self):
p1 = (-5e-9, -5e-9, -5e-9)
p2 = (5e-9, 5e-9, 5e-9)
cell = (1e-9, 1e-9, 1e-9)
mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
# No zero-norm cells
f = df.Field(mesh, dim=3, value=(2, 0, 0))
assert f.orientation.average == (1, 0, 0)
# With zero-norm cells
def value_fun(point):
x, y, z = point
if x <= 0:
return (0, 0, 0)
else:
return (3, 0, 4)
f = df.Field(mesh, dim=3, value=value_fun)
assert f.orientation((-1.5e-9, 3e-9, 0)) == (0, 0, 0)
assert f.orientation((1.5e-9, 3e-9, 0)) == (0.6, 0, 0.8)
f = df.Field(mesh, dim=1, value=0)
with pytest.raises(ValueError):
of = f.orientation
def test_average(self):
value = -1e-3 + np.pi
tol = 1e-12
p1 = (-5e-9, -5e-9, -5e-9)
p2 = (5e-9, 5e-9, 5e-9)
cell = (1e-9, 1e-9, 1e-9)
mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
f = df.Field(mesh, dim=1, value=2)
assert abs(f.average - 2) < tol
f = df.Field(mesh, dim=3, value=(0, 1, 2))
assert np.allclose(f.average, (0, 1, 2))
def test_field_component(self):
for mesh in self.meshes:
f = df.Field(mesh, dim=3, value=(1, 2, 3))
assert all(isinstance(getattr(f, i), df.Field) for i in 'xyz')
assert all(getattr(f, i).dim == 1 for i in 'xyz')
f = df.Field(mesh, dim=2, value=(1, 2))
assert all(isinstance(getattr(f, i), df.Field) for i in 'xy')
assert all(getattr(f, i).dim == 1 for i in 'xy')
# Exception.
f = df.Field(mesh, dim=1, value=1)
with pytest.raises(AttributeError):
fx = f.x.dim
def test_get_attribute_exception(self):
for mesh in self.meshes:
f = df.Field(mesh, dim=3)
with pytest.raises(AttributeError) as excinfo:
f.__getattr__('nonexisting_attribute')
assert 'has no attribute' in str(excinfo.value)
def test_dir(self):
for mesh in self.meshes:
f = df.Field(mesh, dim=3, value=(5, 6, -9))
assert all(attr in dir(f) for attr in ['x', 'y', 'z', 'div'])
assert 'grad' not in dir(f)
f = df.Field(mesh, dim=1, value=1)
assert all(attr not in dir(f) for attr in ['x', 'y', 'z', 'div'])
assert 'grad' in dir(f)
def test_eq(self):
p1 = (-5e-9, -5e-9, -5e-9)
p2 = (15e-9, 5e-9, 5e-9)
cell = (5e-9, 1e-9, 2.5e-9)
mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
f1 = df.Field(mesh, dim=1, value=0.2)
f2 = df.Field(mesh, dim=1, value=0.2)
f3 = df.Field(mesh, dim=1, value=3.1)
f4 = df.Field(mesh, dim=3, value=(1, -6, 0))
f5 = df.Field(mesh, dim=3, value=(1, -6, 0))
assert f1 == f2
assert not f1 != f2
assert not f1 == f3
assert f1 != f3
assert not f2 == f4
assert f2 != f4
assert f4 == f5
assert not f4 != f5
assert not f1 == 0.2
assert f1 != 0.2
def test_allclose(self):
p1 = (-5e-9, -5e-9, -5e-9)
p2 = (15e-9, 5e-9, 5e-9)
cell = (5e-9, 1e-9, 2.5e-9)
mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
f1 = df.Field(mesh, dim=1, value=0.2)
f2 = df.Field(mesh, dim=1, value=0.2+1e-9)
f3 = df.Field(mesh, dim=1, value=0.21)
f4 = df.Field(mesh, dim=3, value=(1, -6, 0))
f5 = df.Field(mesh, dim=3, value=(1, -6+1e-8, 0))
f6 = df.Field(mesh, dim=3, value=(1, -6.01, 0))
assert f1.allclose(f2)
assert not f1.allclose(f3)
assert not f1.allclose(f5)
assert f4.allclose(f5)
assert not f4.allclose(f6)
with pytest.raises(TypeError):
f1.allclose(2)
def test_point_neg(self):
p1 = (-5e-9, -5e-9, -5e-9)
p2 = (5e-9, 5e-9, 5e-9)
cell = (1e-9, 1e-9, 1e-9)
mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
# Scalar field
f = df.Field(mesh, dim=1, value=3)
res = -f
check_field(res)
assert res.average == -3
assert f == +f
assert f == -(-f)
assert f == +(-(-f))
# Vector field
f = df.Field(mesh, dim=3, value=(1, 2, -3))
res = -f
check_field(res)
assert res.average == (-1, -2, 3)
assert f == +f
assert f == -(-f)
assert f == +(-(-f))
def test_pow(self):
p1 = (0, 0, 0)
p2 = (15e-9, 6e-9, 6e-9)
cell = (3e-9, 3e-9, 3e-9)
mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
# Scalar field
f = df.Field(mesh, dim=1, value=2)
res = f**2
assert res.average == 4
res = f**(-1)
assert res.average == 0.5
# Attempt vector field
f = df.Field(mesh, dim=3, value=(1, 2, -2))
with pytest.raises(ValueError):
res = f**2
# Attempt to raise to non numbers.Real
f = df.Field(mesh, dim=1, value=2)
with pytest.raises(TypeError):
res = f**'a'
with pytest.raises(TypeError):
res = f**f
def test_add_subtract(self):
p1 = (0, 0, 0)
p2 = (5e-9, 10e-9, -5e-9)
n = (2, 2, 1)
mesh = df.Mesh(p1=p1, p2=p2, n=n)
# Scalar fields
f1 = df.Field(mesh, dim=1, value=1.2)
f2 = df.Field(mesh, dim=1, value=-0.2)
res = f1 + f2
assert res.average == 1
res = f1 - f2
assert res.average == 1.4
f1 += f2
assert f1.average == 1
f1 -= f2
assert f1.average == 1.2
# Vector fields
f1 = df.Field(mesh, dim=3, value=(1, 2, 3))
f2 = df.Field(mesh, dim=3, value=(-1, -3, -5))
res = f1 + f2
assert res.average == (0, -1, -2)
res = f1 - f2
assert res.average == (2, 5, 8)
f1 += f2
assert f1.average == (0, -1, -2)
f1 -= f2
assert f1.average == (1, 2, 3)
# Artithmetic checks
assert f1 + f2 + (1, 1, 1) == (1, 1, 1) + f2 + f1
assert f1 - f2 - (0, 0, 0) == (0, 0, 0) - (f2 - f1)
assert f1 + (f1 + f2) == (f1 + f1) + f2
assert f1 - (f1 + f2) == f1 - f1 - f2
assert f1 + f2 - f1 == f2 + (0, 0, 0)
# Constants
f1 = df.Field(mesh, dim=1, value=1.2)
f2 = df.Field(mesh, dim=3, value=(-1, -3, -5))
res = f1 + 2
assert res.average == 3.2
res = f1 - 1.2
assert res.average == 0
f1 += 2.5
assert f1.average == 3.7
f1 -= 3.7
assert f1.average == 0
res = f2 + (1, 3, 5)
assert res.average == (0, 0, 0)
res = f2 - (1, 2, 3)
assert res.average == (-2, -5, -8)
f2 += (1, 1, 1)
assert f2.average == (0, -2, -4)
f2 -= (-1, -2, 3)
assert f2.average == (1, 0, -7)
# Exceptions
with pytest.raises(TypeError):
res = f1 + '2'
# Fields with different dimensions
with pytest.raises(ValueError):
res = f1 + f2
# Fields defined on different meshes
mesh1 = df.Mesh(p1=(0, 0, 0), p2=(5, 5, 5), n=(1, 1, 1))
mesh2 = df.Mesh(p1=(0, 0, 0), p2=(3, 3, 3), n=(1, 1, 1))
f1 = df.Field(mesh1, dim=1, value=1.2)
f2 = df.Field(mesh2, dim=1, value=1)
with pytest.raises(ValueError):
res = f1 + f2
with pytest.raises(ValueError):
f1 += f2
with pytest.raises(ValueError):
f1 -= f2
def test_mul_truediv(self):
p1 = (0, 0, 0)
p2 = (5e-9, 5e-9, 5e-9)
cell = (1e-9, 5e-9, 1e-9)
mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
# Scalar fields
f1 = df.Field(mesh, dim=1, value=1.2)
f2 = df.Field(mesh, dim=1, value=-2)
res = f1 * f2
assert res.average == -2.4
res = f1 / f2
assert res.average == -0.6
f1 *= f2
assert f1.average == -2.4
f1 /= f2
assert f1.average == 1.2
# Scalar field with a constant
f = df.Field(mesh, dim=1, value=5)
res = f * 2
assert res.average == 10
res = 3 * f
assert res.average == 15
res = f * (1, 2, 3)
assert res.average == (5, 10, 15)
res = (1, 2, 3) * f
assert res.average == (5, 10, 15)
res = f / 2
assert res.average == 2.5
res = 10 / f
assert res.average == 2
res = (5, 10, 15) / f
assert res.average == (1, 2, 3)
f *= 10
assert f.average == 50
f /= 10
assert f.average == 5
# Scalar field with a vector field
f1 = df.Field(mesh, dim=1, value=2)
f2 = df.Field(mesh, dim=3, value=(-1, -3, 5))
res = f1 * f2 # __mul__
assert res.average == (-2, -6, 10)
res = f2 * f1 # __rmul__
assert res.average == (-2, -6, 10)
res = f2 / f1 # __truediv__
assert res.average == (-0.5, -1.5, 2.5)
f2 *= f1 # __imul__
assert f2.average == (-2, -6, 10)
f2 /= f1 # __truediv__
assert f2.average == (-1, -3, 5)
with pytest.raises(ValueError):
res = f1 / f2 # __rtruediv__
# Vector field with a scalar
f = df.Field(mesh, dim=3, value=(1, 2, 0))
res = f * 2
assert res.average == (2, 4, 0)
res = 5 * f
assert res.average == (5, 10, 0)
res = f / 2
assert res.average == (0.5, 1, 0)
f *= 2
assert f.average == (2, 4, 0)
f /= 2
assert f.average == (1, 2, 0)
with pytest.raises(ValueError):
res = 10 / f
# Further checks
f1 = df.Field(mesh, dim=1, value=2)
f2 = df.Field(mesh, dim=3, value=(-1, -3, -5))
assert f1 * f2 == f2 * f1
assert 1.3 * f2 == f2 * 1.3
assert -5 * f2 == f2 * (-5)
assert (1, 2.2, -1) * f1 == f1 * (1, 2.2, -1)
assert f1 * (f1 * f2) == (f1 * f1) * f2
assert f1 * f2 / f1 == f2
# Exceptions
f1 = df.Field(mesh, dim=1, value=1.2)
f2 = df.Field(mesh, dim=3, value=(-1, -3, -5))
with pytest.raises(TypeError):
res = f2 * 'a'
with pytest.raises(TypeError):
res = 'a' / f1
with pytest.raises(ValueError):
res = f2 * f2
with pytest.raises(ValueError):
res = f2 / f2
with pytest.raises(ValueError):
res = 1 / f2
with pytest.raises(ValueError):
res = f1 / f2
with pytest.raises(TypeError):
f2 *= 'a'
with pytest.raises(TypeError):
f2 /= 'a'
with pytest.raises(ValueError):
f1 /= f2
# Fields defined on different meshes
mesh1 = df.Mesh(p1=(0, 0, 0), p2=(5, 5, 5), n=(1, 1, 1))
mesh2 = df.Mesh(p1=(0, 0, 0), p2=(3, 3, 3), n=(1, 1, 1))
f1 = df.Field(mesh1, dim=1, value=1.2)
f2 = df.Field(mesh2, dim=1, value=1)
with pytest.raises(ValueError):
res = f1 * f2
with pytest.raises(ValueError):
res = f1 / f2
with pytest.raises(ValueError):
f1 *= f2
with pytest.raises(ValueError):
f1 /= f2
def test_dot(self):
p1 = (0, 0, 0)
p2 = (10, 10, 10)
cell = (2, 2, 2)
mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
# Zero vectors
f1 = df.Field(mesh, dim=3, value=(0, 0, 0))
res = f1@f1
assert res.dim == 1
assert res.average == 0
# Orthogonal vectors
f1 = df.Field(mesh, dim=3, value=(1, 0, 0))
f2 = df.Field(mesh, dim=3, value=(0, 1, 0))
f3 = df.Field(mesh, dim=3, value=(0, 0, 1))
assert (f1 @ f2).average == 0
assert (f1 @ f3).average == 0
assert (f2 @ f3).average == 0
assert (f1 @ f1).average == 1
assert (f2 @ f2).average == 1
assert (f3 @ f3).average == 1
# Check if commutative
assert f1 @ f2 == f2 @ f1
assert f1 @ (-1, 3, 2.2) == (-1, 3, 2.2) @ f1
# Vector field with a constant
f = df.Field(mesh, dim=3, value=(1, 2, 3))
res = (1, 1, 1) @ f
assert res.average == 6
res = f @ [1, 1, 1]
assert res.average == 6
# Spatially varying vectors
def value_fun1(point):
x, y, z = point
return (x, y, z)
def value_fun2(point):
x, y, z = point
return (z, x, y)
f1 = df.Field(mesh, dim=3, value=value_fun1)
f2 = df.Field(mesh, dim=3, value=value_fun2)
# Check if commutative
assert f1 @ f2 == f2 @ f1
# The dot product should be x*z + y*x + z*y
assert (f1 @ f2)((1, 1, 1)) == 3
assert (f1 @ f2)((3, 1, 1)) == 7
assert (f1 @ f2)((5, 7, 1)) == 47
# Check norm computed using dot product
assert f1.norm == (f1 @ f1)**(0.5)
# Exceptions
f1 = df.Field(mesh, dim=1, value=1.2)
f2 = df.Field(mesh, dim=3, value=(-1, -3, -5))
with pytest.raises(ValueError):
res = f1 @ f2
with pytest.raises(ValueError):
res = f1 @ f2
with pytest.raises(TypeError):
res = f1 @ 3
# Fields defined on different meshes
mesh1 = df.Mesh(p1=(0, 0, 0), p2=(5, 5, 5), n=(1, 1, 1))
mesh2 = df.Mesh(p1=(0, 0, 0), p2=(3, 3, 3), n=(1, 1, 1))
f1 = df.Field(mesh1, dim=3, value=(1, 2, 3))
f2 = df.Field(mesh2, dim=3, value=(3, 2, 1))
with pytest.raises(ValueError):
res = f1 @ f2
def test_cross(self):
p1 = (0, 0, 0)
p2 = (10, 10, 10)
cell = (2, 2, 2)
mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
# Zero vectors
f1 = df.Field(mesh, dim=3, value=(0, 0, 0))
res = f1 & f1
assert res.dim == 3
assert res.average == (0, 0, 0)
# Orthogonal vectors
f1 = df.Field(mesh, dim=3, value=(1, 0, 0))
f2 = df.Field(mesh, dim=3, value=(0, 1, 0))
f3 = df.Field(mesh, dim=3, value=(0, 0, 1))
assert (f1 & f2).average == (0, 0, 1)
assert (f1 & f3).average == (0, -1, 0)
assert (f2 & f3).average == (1, 0, 0)
assert (f1 & f1).average == (0, 0, 0)
assert (f2 & f2).average == (0, 0, 0)
assert (f3 & f3).average == (0, 0, 0)
# Constants
assert (f1 & (0, 1, 0)).average == (0, 0, 1)
assert ((0, 1, 0) & f1).average == (0, 0, 1)
# Check if not comutative
assert f1 & f2 == -(f2 & f1)
assert f1 & f3 == -(f3 & f1)
assert f2 & f3 == -(f3 & f2)
f1 = df.Field(mesh, dim=3, value=lambda point: (point[0],
point[1],
point[2]))
f2 = df.Field(mesh, dim=3, value=lambda point: (point[2],
point[0],
point[1]))
# The cross product should be
# (y**2-x*z, z**2-x*y, x**2-y*z)
assert (f1 & f2)((1, 1, 1)) == (0, 0, 0)
assert (f1 & f2)((3, 1, 1)) == (-2, -2, 8)
assert (f2 & f1)((3, 1, 1)) == (2, 2, -8)
assert (f1 & f2)((5, 7, 1)) == (44, -34, 18)
# Exceptions
f1 = df.Field(mesh, dim=1, value=1.2)
f2 = df.Field(mesh, dim=3, value=(-1, -3, -5))
with pytest.raises(TypeError):
res = f1 & 2
with pytest.raises(ValueError):
res = f1 & f2
# Fields defined on different meshes
mesh1 = df.Mesh(p1=(0, 0, 0), p2=(5, 5, 5), n=(1, 1, 1))
mesh2 = df.Mesh(p1=(0, 0, 0), p2=(3, 3, 3), n=(1, 1, 1))
f1 = df.Field(mesh1, dim=3, value=(1, 2, 3))
f2 = df.Field(mesh2, dim=3, value=(3, 2, 1))
with pytest.raises(ValueError):
res = f1 & f2
def test_lshift(self):
p1 = (0, 0, 0)
p2 = (10e6, 10e6, 10e6)
cell = (5e6, 5e6, 5e6)
mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
f1 = df.Field(mesh, dim=1, value=1)
f2 = df.Field(mesh, dim=1, value=-3)
f3 = df.Field(mesh, dim=1, value=5)
res = f1 << f2 << f3
assert res.dim == 3
assert res.average == (1, -3, 5)
# Different dimensions
f1 = df.Field(mesh, dim=1, value=1.2)
f2 = df.Field(mesh, dim=2, value=(-1, -3))
res = f1 << f2
assert res.average == (1.2, -1, -3)
res = f2 << f1
assert res.average == (-1, -3, 1.2)
# Constants
f1 = df.Field(mesh, dim=1, value=1.2)
res = f1 << 2
assert res.average == (1.2, 2)
res = f1 << (1, -1)
assert res.average == (1.2, 1, -1)
res = 3 << f1
assert res.average == (3, 1.2)
res = (1.2, 3) << f1 << 3
assert res.average == (1.2, 3, 1.2, 3)
# Exceptions
with pytest.raises(TypeError):
res = 'a' << f1
with pytest.raises(TypeError):
res = f1 << 'a'
# Fields defined on different meshes
mesh1 = df.Mesh(p1=(0, 0, 0), p2=(5, 5, 5), n=(1, 1, 1))
mesh2 = df.Mesh(p1=(0, 0, 0), p2=(3, 3, 3), n=(1, 1, 1))
f1 = df.Field(mesh1, dim=1, value=1.2)
f2 = df.Field(mesh2, dim=1, value=1)
with pytest.raises(ValueError):
res = f1 << f2
def test_all_operators(self):
p1 = (0, 0, 0)
p2 = (5e-9, 5e-9, 10e-9)
n = (2, 2, 1)
mesh = df.Mesh(p1=p1, p2=p2, n=n)
f1 = df.Field(mesh, dim=1, value=2)
f2 = df.Field(mesh, dim=3, value=(-4, 0, 1))
res = ((+f1/2 + f2.x)**2 - 2*f1*3)/(-f2.z) - 2*f2.y + 1/f2.z**2 + f2@f2
assert np.all(res.array == 21)
res = 1 + f1 + 0*f2.x - 3*f2.y/3
assert res.average == 3
def test_pad(self):
p1 = (0, 0, 0)
p2 = (10, 8, 2)
cell = (1, 1, 1)
mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
field = df.Field(mesh, dim=1, value=1)
pf = field.pad({'x': (1, 1)}, mode='constant') # zeros padded
assert pf.array.shape == (12, 8, 2, 1)
def test_derivative(self):
p1 = (0, 0, 0)
p2 = (10, 10, 10)
cell = (2, 2, 2)
# f(x, y, z) = 0 -> grad(f) = (0, 0, 0)
# No BC
mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
f = df.Field(mesh, dim=1, value=0)
check_field(f.derivative('x'))
assert f.derivative('x', n=1).average == 0
assert f.derivative('y', n=1).average == 0
assert f.derivative('z', n=1).average == 0
assert f.derivative('x', n=2).average == 0
assert f.derivative('y', n=2).average == 0
assert f.derivative('z', n=2).average == 0
# f(x, y, z) = x + y + z -> grad(f) = (1, 1, 1)
# No BC
mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
def value_fun(point):
x, y, z = point
return x + y + z
f = df.Field(mesh, dim=1, value=value_fun)
assert f.derivative('x', n=1).average == 1
assert f.derivative('y', n=1).average == 1
assert f.derivative('z', n=1).average == 1
assert f.derivative('x', n=2).average == 0
assert f.derivative('y', n=2).average == 0
assert f.derivative('z', n=2).average == 0
# f(x, y, z) = x*y + 2*y + x*y*z ->
# grad(f) = (y+y*z, x+2+x*z, x*y)
# No BC
mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
def value_fun(point):
x, y, z = point
return x*y + 2*y + x*y*z
f = df.Field(mesh, dim=1, value=value_fun)
assert f.derivative('x')((7, 5, 1)) == 10
assert f.derivative('y')((7, 5, 1)) == 16
assert f.derivative('z')((7, 5, 1)) == 35
assert f.derivative('x', n=2)((1, 1, 1)) == 0
assert f.derivative('y', n=2)((1, 1, 1)) == 0
assert f.derivative('z', n=2)((1, 1, 1)) == 0
# f(x, y, z) = (0, 0, 0)
# -> dfdx = (0, 0, 0)
# -> dfdy = (0, 0, 0)
# -> dfdz = (0, 0, 0)
# No BC
mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
f = df.Field(mesh, dim=3, value=(0, 0, 0))
check_field(f.derivative('y'))
assert f.derivative('x').average == (0, 0, 0)
assert f.derivative('y').average == (0, 0, 0)
assert f.derivative('z').average == (0, 0, 0)
# f(x, y, z) = (x, y, z)
# -> dfdx = (1, 0, 0)
# -> dfdy = (0, 1, 0)
# -> dfdz = (0, 0, 1)
def value_fun(point):
x, y, z = point
return (x, y, z)
f = df.Field(mesh, dim=3, value=value_fun)
assert f.derivative('x').average == (1, 0, 0)
assert f.derivative('y').average == (0, 1, 0)
assert f.derivative('z').average == (0, 0, 1)
# f(x, y, z) = (x*y, y*z, x*y*z)
# -> dfdx = (y, 0, y*z)
# -> dfdy = (x, z, x*z)
# -> dfdz = (0, y, x*y)
def value_fun(point):
x, y, z = point
return (x*y, y*z, x*y*z)
f = df.Field(mesh, dim=3, value=value_fun)
assert f.derivative('x')((3, 1, 3)) == (1, 0, 3)
assert f.derivative('y')((3, 1, 3)) == (3, 3, 9)
assert f.derivative('z')((3, 1, 3)) == (0, 1, 3)
assert f.derivative('x')((5, 3, 5)) == (3, 0, 15)
assert f.derivative('y')((5, 3, 5)) == (5, 5, 25)
assert f.derivative('z')((5, 3, 5)) == (0, 3, 15)
# f(x, y, z) = (3+x*y, x-2*y, x*y*z)
# -> dfdx = (y, 1, y*z)
# -> dfdy = (x, -2, x*z)
# -> dfdz = (0, 0, x*y)
def value_fun(point):
x, y, z = point
return (3+x*y, x-2*y, x*y*z)
f = df.Field(mesh, dim=3, value=value_fun)
assert f.derivative('x')((7, 5, 1)) == (5, 1, 5)
assert f.derivative('y')((7, 5, 1)) == (7, -2, 7)
assert f.derivative('z')((7, 5, 1)) == (0, 0, 35)
# f(x, y, z) = 2*x*x + 2*y*y + 3*z*z
# -> grad(f) = (4, 4, 6)
def value_fun(point):
x, y, z = point
return 2*x*x + 2*y*y + 3*z*z
f = df.Field(mesh, dim=1, value=value_fun)
assert f.derivative('x', n=2).average == 4
assert f.derivative('y', n=2).average == 4
assert f.derivative('z', n=2).average == 6
# f(x, y, z) = (2*x*x, 2*y*y, 3*z*z)
def value_fun(point):
x, y, z = point
return (2*x*x, 2*y*y, 3*z*z)
f = df.Field(mesh, dim=3, value=value_fun)
assert f.derivative('x', n=2).average == (4, 0, 0)
assert f.derivative('y', n=2).average == (0, 4, 0)
assert f.derivative('z', n=2).average == (0, 0, 6)
with pytest.raises(NotImplementedError):
res = f.derivative('x', n=3)
def test_derivative_pbc(self):
p1 = (0, 0, 0)
p2 = (10, 8, 6)
cell = (2, 2, 2)
mesh_nopbc = df.Mesh(p1=p1, p2=p2, cell=cell)
mesh_pbc = df.Mesh(p1=p1, p2=p2, cell=cell, bc='xyz')
# Scalar field
def value_fun(point):
return point[0]*point[1]*point[2]
# No PBC
f = df.Field(mesh_nopbc, dim=1, value=value_fun)
assert f.derivative('x')((9, 1, 1)) == 1
assert f.derivative('y')((1, 7, 1)) == 1
assert f.derivative('z')((1, 1, 5)) == 1
# PBC
f = df.Field(mesh_pbc, dim=1, value=value_fun)
assert f.derivative('x')((9, 1, 1)) == -1.5
assert f.derivative('y')((1, 7, 1)) == -1
assert f.derivative('z')((1, 1, 5)) == -0.5
# Vector field
def value_fun(point):
return (point[0]*point[1]*point[2],) * 3
# No PBC
f = df.Field(mesh_nopbc, dim=3, value=value_fun)
assert f.derivative('x')((9, 1, 1)) == (1, 1, 1)
assert f.derivative('y')((1, 7, 1)) == (1, 1, 1)
assert f.derivative('z')((1, 1, 5)) == (1, 1, 1)
# PBC
f = df.Field(mesh_pbc, dim=3, value=value_fun)
assert f.derivative('x')((9, 1, 1)) == (-1.5, -1.5, -1.5)
assert f.derivative('y')((1, 7, 1)) == (-1, -1, -1)
assert f.derivative('z')((1, 1, 5)) == (-0.5, -0.5, -0.5)
def test_derivative_neumann(self):
p1 = (0, 0, 0)
p2 = (10, 8, 6)
cell = (2, 2, 2)
mesh_noneumann = df.Mesh(p1=p1, p2=p2, cell=cell)
mesh_neumann = df.Mesh(p1=p1, p2=p2, cell=cell, bc='neumann')
# Scalar field
def value_fun(point):
return point[0]*point[1]*point[2]
# No Neumann
f1 = df.Field(mesh_noneumann, dim=1, value=value_fun)
assert f1.derivative('x')((9, 1, 1)) == 1
assert f1.derivative('y')((1, 7, 1)) == 1
assert f1.derivative('z')((1, 1, 5)) == 1
# Neumann
f2 = df.Field(mesh_neumann, dim=1, value=value_fun)
assert (f1.derivative('x')(f1.mesh.region.centre) ==
f2.derivative('x')(f2.mesh.region.centre))
assert (f1.derivative('x')((1, 7, 1)) !=
f2.derivative('x')((1, 7, 1)))
def test_derivative_single_cell(self):
p1 = (0, 0, 0)
p2 = (10, 10, 2)
cell = (2, 2, 2)
mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
# Scalar field: f(x, y, z) = x + y + z
# -> grad(f) = (1, 1, 1)
def value_fun(point):
x, y, z = point
return x + y + z
f = df.Field(mesh, dim=1, value=value_fun)
# only one cell in the z-direction
assert f.plane('x').derivative('x').average == 0
assert f.plane('y').derivative('y').average == 0
assert f.derivative('z').average == 0
# Vector field: f(x, y, z) = (x, y, z)
# -> grad(f) = (1, 1, 1)
def value_fun(point):
x, y, z = point
return (x, y, z)
f = df.Field(mesh, dim=3, value=value_fun)
# only one cell in the z-direction
assert f.plane('x').derivative('x').average == (0, 0, 0)
assert f.plane('y').derivative('y').average == (0, 0, 0)
assert f.derivative('z').average == (0, 0, 0)
def test_grad(self):
p1 = (0, 0, 0)
p2 = (10, 10, 10)
cell = (2, 2, 2)
mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
# f(x, y, z) = 0 -> grad(f) = (0, 0, 0)
f = df.Field(mesh, dim=1, value=0)
check_field(f.grad)
assert f.grad.average == (0, 0, 0)
# f(x, y, z) = x + y + z -> grad(f) = (1, 1, 1)
def value_fun(point):
x, y, z = point
return x + y + z
f = df.Field(mesh, dim=1, value=value_fun)
assert f.grad.average == (1, 1, 1)
# f(x, y, z) = x*y + y + z -> grad(f) = (y, x+1, 1)
def value_fun(point):
x, y, z = point
return x*y + y + z
f = df.Field(mesh, dim=1, value=value_fun)
assert f.grad((3, 1, 3)) == (1, 4, 1)
assert f.grad((5, 3, 5)) == (3, 6, 1)
# f(x, y, z) = x*y + 2*y + x*y*z ->
# grad(f) = (y+y*z, x+2+x*z, x*y)
def value_fun(point):
x, y, z = point
return x*y + 2*y + x*y*z
f = df.Field(mesh, dim=1, value=value_fun)
assert f.grad((7, 5, 1)) == (10, 16, 35)
assert f.grad.x == f.derivative('x')
assert f.grad.y == f.derivative('y')
assert f.grad.z == f.derivative('z')
# Exception
f = df.Field(mesh, dim=3, value=(1, 2, 3))
with pytest.raises(ValueError):
res = f.grad
def test_div_curl(self):
p1 = (0, 0, 0)
p2 = (10, 10, 10)
cell = (2, 2, 2)
mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
# f(x, y, z) = (0, 0, 0)
# -> div(f) = 0
# -> curl(f) = (0, 0, 0)
f = df.Field(mesh, dim=3, value=(0, 0, 0))
check_field(f.div)
assert f.div.dim == 1
assert f.div.average == 0
check_field(f.curl)
assert f.curl.dim == 3
assert f.curl.average == (0, 0, 0)
# f(x, y, z) = (x, y, z)
# -> div(f) = 3
# -> curl(f) = (0, 0, 0)
def value_fun(point):
x, y, z = point
return (x, y, z)
f = df.Field(mesh, dim=3, value=value_fun)
assert f.div.average == 3
assert f.curl.average == (0, 0, 0)
# f(x, y, z) = (x*y, y*z, x*y*z)
# -> div(f) = y + z + x*y
# -> curl(f) = (x*z-y, -y*z, -x)
def value_fun(point):
x, y, z = point
return (x*y, y*z, x*y*z)
f = df.Field(mesh, dim=3, value=value_fun)
assert f.div((3, 1, 3)) == 7
assert f.div((5, 3, 5)) == 23
assert f.curl((3, 1, 3)) == (8, -3, -3)
assert f.curl((5, 3, 5)) == (22, -15, -5)
# f(x, y, z) = (3+x*y, x-2*y, x*y*z)
# -> div(f) = y - 2 + x*y
# -> curl(f) = (x*z, -y*z, 1-x)
def value_fun(point):
x, y, z = point
return (3+x*y, x-2*y, x*y*z)
f = df.Field(mesh, dim=3, value=value_fun)
assert f.div((7, 5, 1)) == 38
assert f.curl((7, 5, 1)) == (7, -5, -6)
# Exception
f = df.Field(mesh, dim=1, value=3.11)
with pytest.raises(ValueError):
res = f.div
with pytest.raises(ValueError):
res = f.curl
def test_laplace(self):
p1 = (0, 0, 0)
p2 = (10, 10, 10)
cell = (2, 2, 2)
mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
# f(x, y, z) = (0, 0, 0)
# -> laplace(f) = 0
f = df.Field(mesh, dim=3, value=(0, 0, 0))
check_field(f.laplace)
assert f.laplace.dim == 3
assert f.laplace.average == (0, 0, 0)
# f(x, y, z) = x + y + z
# -> laplace(f) = 0
def value_fun(point):
x, y, z = point
return x + y + z
f = df.Field(mesh, dim=1, value=value_fun)
check_field(f.laplace)
assert f.laplace.average == 0
# f(x, y, z) = 2*x*x + 2*y*y + 3*z*z
# -> laplace(f) = 4 + 4 + 6 = 14
def value_fun(point):
x, y, z = point
return 2*x*x + 2*y*y + 3*z*z
f = df.Field(mesh, dim=1, value=value_fun)
assert f.laplace.average == 14
# f(x, y, z) = (2*x*x, 2*y*y, 3*z*z)
# -> laplace(f) = (4, 4, 6)
def value_fun(point):
x, y, z = point
return (2*x*x, 2*y*y, 3*z*z)
f = df.Field(mesh, dim=3, value=value_fun)
assert f.laplace.average == (4, 4, 6)
def test_integral(self):
# Volume integral.
p1 = (0, 0, 0)
p2 = (10, 10, 10)
cell = (1, 1, 1)
mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
f = df.Field(mesh, dim=1, value=0)
assert (f * df.dV).integral() == 0
assert (f * df.dx*df.dy*df.dz).integral() == 0
f = df.Field(mesh, dim=1, value=2)
assert (f * df.dV).integral() == 2000
assert (f * df.dx*df.dy*df.dz).integral() == 2000
f = df.Field(mesh, dim=3, value=(-1, 0, 3))
assert (f * df.dV).integral() == (-1000, 0, 3000)
assert (f * df.dx*df.dy*df.dz).integral() == (-1000, 0, 3000)
def value_fun(point):
x, y, z = point
if x <= 5:
return (-1, -2, -3)
else:
return (1, 2, 3)
f = df.Field(mesh, dim=3, value=value_fun)
assert (f * df.dV).integral() == (0, 0, 0)
assert (f * df.dx*df.dy*df.dz).integral() == (0, 0, 0)
# Surface integral.
p1 = (0, 0, 0)
p2 = (10, 5, 3)
cell = (1, 1, 1)
mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
f = df.Field(mesh, dim=1, value=0)
assert (f.plane('x') * abs(df.dS)).integral() == 0
assert (f.plane('x') * df.dy*df.dz).integral() == 0
f = df.Field(mesh, dim=1, value=2)
assert (f.plane('x') * abs(df.dS)).integral() == 30
assert (f.plane('x') * df.dy*df.dz).integral() == 30
assert (f.plane('y') * abs(df.dS)).integral() == 60
assert (f.plane('y') * df.dx*df.dz).integral() == 60
assert (f.plane('z') * abs(df.dS)).integral() == 100
assert (f.plane('z') * df.dx*df.dy).integral() == 100
f = df.Field(mesh, dim=3, value=(-1, 0, 3))
assert (f.plane('x') * abs(df.dS)).integral() == (-15, 0, 45)
assert (f.plane('y') * abs(df.dS)).integral() == (-30, 0, 90)
assert (f.plane('z') * abs(df.dS)).integral() == (-50, 0, 150)
f = df.Field(mesh, dim=3, value=(-1, 0, 3))
assert df.integral(f.plane('x') @ df.dS) == -15
assert df.integral(f.plane('y') @ df.dS) == 0
assert df.integral(f.plane('z') @ df.dS) == 150
# Directional integral
p1 = (0, 0, 0)
p2 = (10, 10, 10)
cell = (1, 1, 1)
mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
f = df.Field(mesh, dim=3, value=(1, 1, 1))
f = f.integral(direction='x')
assert isinstance(f, df.Field)
assert f.dim == 3
assert f.mesh.n == (1, 10, 10)
assert f.average == (10, 10, 10)
f = f.integral(direction='x').integral(direction='y')
assert isinstance(f, df.Field)
assert f.dim == 3
assert f.mesh.n == (1, 1, 10)
assert f.average == (100, 100, 100)
f = f.integral('x').integral('y').integral('z')
assert f.dim == 3
assert f.mesh.n == (1, 1, 1)
assert f.average == (1000, 1000, 1000)
assert (f.integral('x').integral('y').integral('z').average ==
f.integral())
# Improper integral
p1 = (0, 0, 0)
p2 = (10, 10, 10)
cell = (1, 1, 1)
mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
f = df.Field(mesh, dim=3, value=(1, 1, 1))
f = f.integral(direction='x', improper=True)
assert isinstance(f, df.Field)
assert f.dim == 3
assert f.mesh.n == (10, 10, 10)
assert f.average == (5.5, 5.5, 5.5)
assert f((0, 0, 0)) == (1, 1, 1)
assert f((10, 10, 10)) == (10, 10, 10)
# Exceptions
with pytest.raises(ValueError):
res = f.integral(direction='xy', improper=True)
def test_line(self):
mesh = df.Mesh(p1=(0, 0, 0), p2=(10, 10, 10), n=(10, 10, 10))
f = df.Field(mesh, dim=3, value=(1, 2, 3))
check_field(f)
line = f.line(p1=(0, 0, 0), p2=(5, 5, 5), n=20)
assert isinstance(line, df.Line)
assert line.n == 20
assert line.dim == 3
def test_plane(self):
for mesh, direction in itertools.product(self.meshes, ['x', 'y', 'z']):
f = df.Field(mesh, dim=1, value=3)
check_field(f)
plane = f.plane(direction, n=(3, 3))
assert isinstance(plane, df.Field)
p, v = zip(*list(plane))
assert len(p) == 9
assert len(v) == 9
def test_getitem(self):
p1 = (0, 0, 0)
p2 = (90, 50, 10)
cell = (5, 5, 5)
subregions = {'r1': df.Region(p1=(0, 0, 0), p2=(30, 50, 10)),
'r2': df.Region(p1=(30, 0, 0), p2=(90, 50, 10))}
mesh = df.Mesh(p1=p1, p2=p2, cell=cell, subregions=subregions)
def value_fun(point):
x, y, z = point
if x <= 60:
return (-1, -2, -3)
else:
return (1, 2, 3)
f = df.Field(mesh, dim=3, value=value_fun)
check_field(f)
check_field(f['r1'])
check_field(f['r2'])
check_field(f[subregions['r1']])
check_field(f[subregions['r2']])
assert f['r1'].average == (-1, -2, -3)
assert f['r2'].average == (0, 0, 0)
assert f[subregions['r1']].average == (-1, -2, -3)
assert f[subregions['r2']].average == (0, 0, 0)
assert len(f['r1'].mesh) + len(f['r2'].mesh) == len(f.mesh)
# Meshes are not aligned
subregion = df.Region(p1=(1.1, 0, 0), p2=(9.9, 15, 5))
assert f[subregion].array.shape == (2, 3, 1, 3)
def test_project(self):
p1 = (-5, -5, -5)
p2 = (5, 5, 5)
cell = (1, 1, 1)
mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
# Constant scalar field
f = df.Field(mesh, dim=1, value=5)
check_field(f)
assert f.project('x').array.shape == (1, 10, 10, 1)
assert f.project('y').array.shape == (10, 1, 10, 1)
assert f.project('z').array.shape == (10, 10, 1, 1)
# Constant vector field
f = df.Field(mesh, dim=3, value=(1, 2, 3))
assert f.project('x').array.shape == (1, 10, 10, 3)
assert f.project('y').array.shape == (10, 1, 10, 3)
assert f.project('z').array.shape == (10, 10, 1, 3)
# Spatially varying scalar field
def value_fun(point):
x, y, z = point
if z <= 0:
return 1
else:
return -1
f = df.Field(mesh, dim=1, value=value_fun)
sf = f.project('z')
assert sf.array.shape == (10, 10, 1, 1)
assert sf.average == 0
# Spatially varying vector field
def value_fun(point):
x, y, z = point
if z <= 0:
return (3, 2, 1)
else:
return (3, 2, -1)
f = df.Field(mesh, dim=3, value=value_fun)
sf = f.project('z')
assert sf.array.shape == (10, 10, 1, 3)
assert sf.average == (3, 2, 0)
def test_angle(self):
p1 = (0, 0, 0)
p2 = (8e-9, 2e-9, 2e-9)
cell = (2e-9, 2e-9, 2e-9)
mesh = df.Mesh(region=df.Region(p1=p1, p2=p2), cell=cell)
def value_fun(point):
x, y, z = point
if x < 2e-9:
return (1, 1, 1)
elif 2e-9 <= x < 4e-9:
return (1, -1, 0)
elif 4e-9 <= x < 6e-9:
return (-1, -1, 0)
elif 6e-9 <= x < 8e-9:
return (-1, 1, 0)
f = df.Field(mesh, dim=3, value=value_fun)
assert abs(f.plane('z').angle((1e-9, 2e-9, 2e-9)) - np.pi/4) < 1e-3
assert abs(f.plane('z').angle((3e-9, 2e-9, 2e-9)) - 7*np.pi/4) < 1e-3
assert abs(f.plane('z').angle((5e-9, 2e-9, 2e-9)) - 5*np.pi/4) < 1e-3
assert abs(f.plane('z').angle((7e-9, 2e-9, 2e-9)) - 3*np.pi/4) < 1e-3
# Exception
with pytest.raises(ValueError):
res = f.angle # the field is not sliced
def test_write_read_ovf(self):
representations = ['txt', 'bin4', 'bin8']
filename = 'testfile.ovf'
p1 = (0, 0, 0)
p2 = (8e-9, 5e-9, 3e-9)
cell = (1e-9, 1e-9, 1e-9)
mesh = df.Mesh(region=df.Region(p1=p1, p2=p2), cell=cell)
# Write/read
for dim, value in [(1, lambda point: point[0] + point[1] + point[2]),
(3, lambda point: (point[0], point[1], point[2]))]:
f = df.Field(mesh, dim=dim, value=value)
for rep in representations:
with tempfile.TemporaryDirectory() as tmpdir:
tmpfilename = os.path.join(tmpdir, filename)
f.write(tmpfilename, representation=rep)
f_read = df.Field.fromfile(tmpfilename)
assert f.allclose(f_read)
# Extend scalar
for rep in representations:
f = df.Field(mesh, dim=1,
value=lambda point: point[0]+point[1]+point[2])
with tempfile.TemporaryDirectory() as tmpdir:
tmpfilename = os.path.join(tmpdir, filename)
f.write(tmpfilename, extend_scalar=True)
f_read = df.Field.fromfile(tmpfilename)
assert f.allclose(f_read.x)
# Read different OOMMF representations
# (OVF1, OVF2) x (txt, bin4, bin8)
filenames = ['oommf-ovf2-txt.omf',
'oommf-ovf2-bin4.omf',
'oommf-ovf2-bin8.omf',
'oommf-ovf1-txt.omf',
'oommf-ovf1-bin4.omf',
'oommf-ovf1-bin8.omf']
dirname = os.path.join(os.path.dirname(__file__), 'test_sample')
for filename in filenames:
omffilename = os.path.join(dirname, filename)
f_read = df.Field.fromfile(omffilename)
if 'ovf2' in filename:
# The magnetisation is in the x-direction in OVF2 files.
assert abs(f_read.orientation.x.average - 1) < 1e-2
else:
# The norm of magnetisation is known.
assert abs(f_read.norm.average - 1261566.2610100) < 1e-3
# Read different mumax3 bin4 files (made on linux and windows)
filenames = ['mumax-bin4-linux.ovf', 'mumax-bin4-windows.ovf']
dirname = os.path.join(os.path.dirname(__file__), 'test_sample')
for filename in filenames:
omffilename = os.path.join(dirname, filename)
f_read = df.Field.fromfile(omffilename)
# We know the saved magentisation.
f_saved = df.Field(f_read.mesh, dim=3, value=(1, 0.1, 0), norm=1)
assert f_saved.allclose(f_read)
# Exception (dim=2)
f = df.Field(mesh, dim=2, value=(1, 2))
with pytest.raises(TypeError) as excinfo:
f.write(filename)
def test_write_read_vtk(self):
filename = 'testfile.vtk'
p1 = (0, 0, 0)
p2 = (1e-9, 2e-9, 1e-9)
cell = (1e-9, 1e-9, 1e-9)
mesh = df.Mesh(region=df.Region(p1=p1, p2=p2), cell=cell)
for dim, value in [(1, -1.2), (3, (1e-3, -5e6, 5e6))]:
f = df.Field(mesh, dim=dim, value=value)
with tempfile.TemporaryDirectory() as tmpdir:
tmpfilename = os.path.join(tmpdir, filename)
f.write(tmpfilename)
f_read = df.Field.fromfile(tmpfilename)
assert np.allclose(f.array, f_read.array)
assert np.allclose(f.mesh.region.pmin, f_read.mesh.region.pmin)
assert np.allclose(f.mesh.region.pmax, f_read.mesh.region.pmax)
assert np.allclose(f.mesh.cell, f_read.mesh.cell)
assert f.mesh.n == f_read.mesh.n
def test_write_read_hdf5(self):
filenames = ['testfile.hdf5', 'testfile.h5']
p1 = (0, 0, 0)
p2 = (10e-12, 5e-12, 5e-12)
cell = (1e-12, 1e-12, 1e-12)
mesh = df.Mesh(region=df.Region(p1=p1, p2=p2), cell=cell)
for dim, value in [(1, -1.23), (3, (1e-3 + np.pi, -5e6, 6e6))]:
f = df.Field(mesh, dim=dim, value=value)
for filename in filenames:
with tempfile.TemporaryDirectory() as tmpdir:
tmpfilename = os.path.join(tmpdir, filename)
f.write(tmpfilename)
f_read = df.Field.fromfile(tmpfilename)
assert f == f_read
def test_read_write_invalid_extension(self):
filename = 'testfile.jpg'
p1 = (0, 0, 0)
p2 = (10e-12, 5e-12, 3e-12)
cell = (1e-12, 1e-12, 1e-12)
mesh = df.Mesh(region=df.Region(p1=p1, p2=p2), cell=cell)
f = df.Field(mesh, dim=1, value=5e-12)
with pytest.raises(ValueError) as excinfo:
f.write(filename)
with pytest.raises(ValueError) as excinfo:
f = df.Field.fromfile(filename)
def test_mpl_scalar(self):
# No axes
self.pf.x.plane('x', n=(3, 4)).mpl_scalar()
# Axes
fig = plt.figure()
ax = fig.add_subplot(111)
self.pf.x.plane('x', n=(3, 4)).mpl_scalar(ax=ax)
# All arguments
self.pf.x.plane('x').mpl_scalar(figsize=(10, 10),
filter_field=self.pf.norm,
colorbar=True,
colorbar_label='something',
multiplier=1e-6, cmap='hsv',
clim=(-1, 1))
# Lightness field
filenames = ['skyrmion.omf', 'skyrmion-disk.omf']
for i in filenames:
filename = os.path.join(os.path.dirname(__file__),
'test_sample', i)
field = df.Field.fromfile(filename)
field.plane('z').angle.mpl_scalar(lightness_field=field.z)
field.plane('z').angle.mpl_scalar(lightness_field=-field.z,
filter_field=field.norm)
field.plane('z').mpl(scalar_lightness_field=-field.z)
# Saving plot
filename = 'testfigure.pdf'
with tempfile.TemporaryDirectory() as tmpdir:
tmpfilename = os.path.join(tmpdir, filename)
self.pf.x.plane('x', n=(3, 4)).mpl_scalar(filename=tmpfilename)
# Exceptions
with pytest.raises(ValueError):
self.pf.x.mpl_scalar() # not sliced
with pytest.raises(ValueError):
self.pf.plane('z').mpl_scalar() # vector field
with pytest.raises(ValueError):
# wrong filter field
self.pf.x.plane('z').mpl_scalar(filter_field=self.pf)
with pytest.raises(ValueError):
# wrong filter field
self.pf.x.plane('z').mpl_scalar(lightness_field=self.pf)
plt.close('all')
def test_mpl_vector(self):
# No axes
self.pf.plane('x', n=(3, 4)).mpl_vector()
# Axes
fig = plt.figure()
ax = fig.add_subplot(111)
self.pf.plane('x', n=(3, 4)).mpl_vector(ax=ax)
# All arguments
self.pf.plane('x').mpl_vector(figsize=(10, 10),
color_field=self.pf.y,
colorbar=True,
colorbar_label='something',
multiplier=1e-6, cmap='hsv',
clim=(-1, 1))
# Saving plot
filename = 'testfigure.pdf'
with tempfile.TemporaryDirectory() as tmpdir:
tmpfilename = os.path.join(tmpdir, filename)
self.pf.plane('x', n=(3, 4)).mpl_vector(filename=tmpfilename)
# Exceptions
with pytest.raises(ValueError) as excinfo:
self.pf.mpl_vector() # not sliced
with pytest.raises(ValueError) as excinfo:
self.pf.y.plane('z').mpl_vector() # scalar field
with pytest.raises(ValueError) as excinfo:
# wrong color field
self.pf.plane('z').mpl_vector(color_field=self.pf)
plt.close('all')
def test_mpl(self):
# No axes
self.pf.plane('x', n=(3, 4)).mpl()
# Axes
fig = plt.figure()
ax = fig.add_subplot(111)
self.pf.x.plane('x', n=(3, 4)).mpl(ax=ax)
# All arguments for a vector field
self.pf.plane('x').mpl(figsize=(12, 6),
scalar_field=self.pf.plane('x').angle,
scalar_filter_field=self.pf.norm,
scalar_colorbar_label='something',
scalar_cmap='twilight',
vector_field=self.pf,
vector_color_field=self.pf.y,
vector_color=True,
vector_colorbar=True,
vector_colorbar_label='vector',
vector_cmap='hsv', vector_clim=(0, 1e6),
multiplier=1e-12)
# All arguments for a scalar field
self.pf.z.plane('x').mpl(figsize=(12, 6),
scalar_field=self.pf.x,
scalar_filter_field=self.pf.norm,
scalar_colorbar_label='something',
scalar_cmap='twilight',
vector_field=self.pf,
vector_color_field=self.pf.y,
vector_color=True,
vector_colorbar=True,
vector_colorbar_label='vector',
vector_cmap='hsv', vector_clim=(0, 1e6),
multiplier=1e-12)
# Saving plot
filename = 'testfigure.pdf'
with tempfile.TemporaryDirectory() as tmpdir:
tmpfilename = os.path.join(tmpdir, filename)
self.pf.plane('x', n=(3, 4)).mpl(filename=tmpfilename)
# Exception
with pytest.raises(ValueError):
self.pf.mpl()
plt.close('all')
def test_k3d_nonzero(self):
# Default
self.pf.norm.k3d_nonzero()
# Color
self.pf.x.k3d_nonzero(color=0xff00ff)
# Multiplier
self.pf.x.k3d_nonzero(color=0xff00ff, multiplier=1e-6)
# Interactive field
self.pf.x.plane('z').k3d_nonzero(color=0xff00ff,
multiplier=1e-6,
interactive_field=self.pf)
# kwargs
self.pf.x.plane('z').k3d_nonzero(color=0xff00ff,
multiplier=1e-6,
interactive_field=self.pf,
wireframe=True)
# Plot
plot = k3d.plot()
plot.display()
self.pf.x.plane(z=0).k3d_nonzero(plot=plot,
color=0xff00ff,
multiplier=1e-6,
interactive_field=self.pf)
# Continuation for interactive plot testing.
self.pf.x.plane(z=1e-9).k3d_nonzero(plot=plot,
color=0xff00ff,
multiplier=1e-6,
interactive_field=self.pf)
assert len(plot.objects) == 2
with pytest.raises(ValueError) as excinfo:
self.pf.k3d_nonzero()
def test_k3d_scalar(self):
# Default
self.pf.y.k3d_scalar()
# Filter field
self.pf.y.k3d_scalar(filter_field=self.pf.norm)
# Colormap
self.pf.x.k3d_scalar(filter_field=self.pf.norm,
cmap='hsv',
color=0xff00ff)
# Multiplier
self.pf.y.k3d_scalar(filter_field=self.pf.norm,
color=0xff00ff,
multiplier=1e-6)
# Interactive field
self.pf.y.k3d_scalar(filter_field=self.pf.norm,
color=0xff00ff,
multiplier=1e-6,
interactive_field=self.pf)
# kwargs
self.pf.y.k3d_scalar(filter_field=self.pf.norm,
color=0xff00ff,
multiplier=1e-6,
interactive_field=self.pf,
wireframe=True)
# Plot
plot = k3d.plot()
plot.display()
self.pf.y.plane(z=0).k3d_scalar(plot=plot,
filter_field=self.pf.norm,
color=0xff00ff,
multiplier=1e-6,
interactive_field=self.pf)
# Continuation for interactive plot testing.
self.pf.y.plane(z=1e-9).k3d_scalar(plot=plot,
filter_field=self.pf.norm,
color=0xff00ff,
multiplier=1e-6,
interactive_field=self.pf)
assert len(plot.objects) == 2
# Exceptions
with pytest.raises(ValueError) as excinfo:
self.pf.k3d_scalar()
with pytest.raises(ValueError):
self.pf.x.k3d_scalar(filter_field=self.pf) # filter field dim=3
def test_k3d_vector(self):
# Default
self.pf.k3d_vector()
# Color field
self.pf.k3d_vector(color_field=self.pf.x)
# Colormap
self.pf.k3d_vector(color_field=self.pf.norm,
cmap='hsv')
# Head size
self.pf.k3d_vector(color_field=self.pf.norm,
cmap='hsv',
head_size=3)
# Points
self.pf.k3d_vector(color_field=self.pf.norm,
cmap='hsv',
head_size=3,
points=False)
# Point size
self.pf.k3d_vector(color_field=self.pf.norm,
cmap='hsv',
head_size=3,
points=False,
point_size=1)
# Vector multiplier
self.pf.k3d_vector(color_field=self.pf.norm,
cmap='hsv',
head_size=3,
points=False,
point_size=1,
vector_multiplier=1)
# Multiplier
self.pf.k3d_vector(color_field=self.pf.norm,
cmap='hsv',
head_size=3,
points=False,
point_size=1,
vector_multiplier=1,
multiplier=1e-6)
# Interactive field
self.pf.plane('z').k3d_vector(color_field=self.pf.norm,
cmap='hsv',
head_size=3,
points=False,
point_size=1,
vector_multiplier=1,
multiplier=1e-6,
interactive_field=self.pf)
# Plot
plot = k3d.plot()
plot.display()
self.pf.plane(z=0).k3d_vector(plot=plot, interactive_field=self.pf)
# Continuation for interactive plot testing.
self.pf.plane(z=1e-9).k3d_vector(plot=plot, interactive_field=self.pf)
assert len(plot.objects) == 3
# Exceptions
with pytest.raises(ValueError) as excinfo:
self.pf.x.k3d_vector()
with pytest.raises(ValueError):
self.pf.k3d_vector(color_field=self.pf) # filter field dim=3
def test_plot_large_sample(self):
p1 = (0, 0, 0)
p2 = (50e9, 50e9, 50e9)
cell = (25e9, 25e9, 25e9)
mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
value = (1e6, 1e6, 1e6)
field = df.Field(mesh, dim=3, value=value)
field.plane('z').mpl()
field.norm.k3d_nonzero()
field.x.k3d_scalar()
field.k3d_vector()
| 33.621512 | 79 | 0.470333 | import os
import re
import k3d
import types
import random
import pytest
import numbers
import tempfile
import itertools
import numpy as np
import discretisedfield as df
import matplotlib.pyplot as plt
from .test_mesh import TestMesh
def check_field(field):
assert isinstance(field.mesh, df.Mesh)
assert isinstance(field.dim, int)
assert field.dim > 0
assert isinstance(field.array, np.ndarray)
assert field.array.shape == (*field.mesh.n, field.dim)
average = field.average
assert isinstance(average, (tuple, numbers.Real))
rstr = repr(field)
assert isinstance(rstr, str)
pattern = (r'^Field\(mesh=Mesh\(region=Region\(p1=\(.+\), '
r'p2=\(.+\)\), .+\), dim=\d+\)$')
assert re.search(pattern, rstr)
assert isinstance(field.__iter__(), types.GeneratorType)
assert len(list(field)) == len(field.mesh)
line = field.line(p1=field.mesh.region.pmin,
p2=field.mesh.region.pmax,
n=5)
assert isinstance(line, df.Line)
assert line.n == 5
plane = field.plane('z', n=(2, 2))
assert isinstance(plane, df.Field)
assert len(plane.mesh) == 4
assert plane.mesh.n == (2, 2, 1)
project = field.project('z')
assert isinstance(project, df.Field)
assert project.mesh.n[2] == 1
assert isinstance(field(field.mesh.region.centre), (tuple, numbers.Real))
assert isinstance(field(field.mesh.region.random_point()),
(tuple, numbers.Real))
assert field == field
assert not field != field
assert +field == field
assert -(-field) == field
assert field + field == 2*field
assert field - (-field) == field + field
assert 1*field == field
assert -1*field == -field
if field.dim == 1:
grad = field.grad
assert isinstance(grad, df.Field)
assert grad.dim == 3
assert all(i not in dir(field) for i in 'xyz')
assert isinstance((field * df.dx).integral(), numbers.Real)
assert isinstance((field * df.dy).integral(), numbers.Real)
assert isinstance((field * df.dz).integral(), numbers.Real)
assert isinstance((field * df.dV).integral(), numbers.Real)
assert isinstance((field.plane('z') * df.dS).integral(), tuple)
assert isinstance((field.plane('z') * abs(df.dS)).integral(),
numbers.Real)
if field.dim == 3:
norm = field.norm
assert isinstance(norm, df.Field)
assert norm == abs(field)
assert norm.dim == 1
assert isinstance(field.x, df.Field)
assert field.x.dim == 1
assert isinstance(field.y, df.Field)
assert field.y.dim == 1
assert isinstance(field.z, df.Field)
assert field.z.dim == 1
div = field.div
assert isinstance(div, df.Field)
assert div.dim == 1
curl = field.curl
assert isinstance(curl, df.Field)
assert curl.dim == 3
field_plane = field.plane('z')
assert isinstance((field * df.dx).integral(), tuple)
assert isinstance((field * df.dy).integral(), tuple)
assert isinstance((field * df.dz).integral(), tuple)
assert isinstance((field * df.dV).integral(), tuple)
assert isinstance((field.plane('z') @ df.dS).integral(), numbers.Real)
assert isinstance((field.plane('z') * abs(df.dS)).integral(), tuple)
orientation = field.orientation
assert isinstance(orientation, df.Field)
assert orientation.dim == 3
assert all(i in dir(field) for i in 'xyz')
class TestField:
def setup(self):
# Get meshes using valid arguments from TestMesh.
tm = TestMesh()
tm.setup()
self.meshes = []
for p1, p2, n, cell in tm.valid_args:
region = df.Region(p1=p1, p2=p2)
mesh = df.Mesh(region=region, n=n, cell=cell)
self.meshes.append(mesh)
# Create lists of field values.
self.consts = [0, -5., np.pi, 1e-15, 1.2e12, random.random()]
self.iters = [(0, 0, 1),
(0, -5.1, np.pi),
[70, 1e15, 2*np.pi],
[5, random.random(), np.pi],
np.array([4, -1, 3.7]),
np.array([2.1, 0.0, -5*random.random()])]
self.sfuncs = [lambda c: 1,
lambda c: -2.4,
lambda c: -6.4e-15,
lambda c: c[0] + c[1] + c[2] + 1,
lambda c: (c[0]-1)**2 - c[1]+7 + c[2]*0.1,
lambda c: np.sin(c[0]) + np.cos(c[1]) - np.sin(2*c[2])]
self.vfuncs = [lambda c: (1, 2, 0),
lambda c: (-2.4, 1e-3, 9),
lambda c: (c[0], c[1], c[2] + 100),
lambda c: (c[0]+c[2]+10, c[1], c[2]+1),
lambda c: (c[0]-1, c[1]+70, c[2]*0.1),
lambda c: (np.sin(c[0]), np.cos(c[1]), -np.sin(2*c[2]))]
# Create a field for plotting tests
mesh = df.Mesh(p1=(-5e-9, -5e-9, -5e-9),
p2=(5e-9, 5e-9, 5e-9),
n=(5, 5, 5))
def norm_fun(point):
x, y, z = point
if x**2 + y**2 <= (5e-9)**2:
return 1e5
else:
return 0
def value_fun(point):
x, y, z = point
if x <= 0:
return (0, 0, 1)
else:
return (0, 0, -1)
self.pf = df.Field(mesh, dim=3, value=value_fun, norm=norm_fun)
def test_init_valid_args(self):
for mesh in self.meshes:
for value in self.consts + self.sfuncs:
f = df.Field(mesh, dim=1, value=value)
check_field(f)
for value in self.iters + self.vfuncs:
f = df.Field(mesh, dim=3, value=value)
check_field(f)
def test_init_invalid_args(self):
with pytest.raises(TypeError):
mesh = 'meaningless_mesh_string'
f = df.Field(mesh, dim=1)
for mesh in self.meshes:
for dim in [0, -1, 'dim', (2, 3)]:
with pytest.raises((ValueError, TypeError)):
f = df.Field(mesh, dim=dim)
def test_set_with_ndarray(self):
for mesh in self.meshes:
f = df.Field(mesh, dim=3)
f.value = np.ones((*f.mesh.n, f.dim,))
check_field(f)
assert isinstance(f.value, np.ndarray)
assert f.average == (1, 1, 1)
with pytest.raises(ValueError):
f.value = np.ones((2, 2))
def test_set_with_callable(self):
for mesh in self.meshes:
for func in self.sfuncs:
f = df.Field(mesh, dim=1, value=func)
check_field(f)
rp = f.mesh.region.random_point()
# Make sure to be at the centre of the cell
rp = f.mesh.index2point(f.mesh.point2index(rp))
assert f(rp) == func(rp)
for mesh in self.meshes:
for func in self.vfuncs:
f = df.Field(mesh, dim=3, value=func)
check_field(f)
rp = f.mesh.region.random_point()
rp = f.mesh.index2point(f.mesh.point2index(rp))
assert np.all(f(rp) == func(rp))
def test_set_with_dict(self):
p1 = (0, 0, 0)
p2 = (10e-9, 10e-9, 10e-9)
n = (5, 5, 5)
subregions = {'r1': df.Region(p1=(0, 0, 0), p2=(4e-9, 10e-9, 10e-9)),
'r2': df.Region(p1=(4e-9, 0, 0),
p2=(10e-9, 10e-9, 10e-9))}
mesh = df.Mesh(p1=p1, p2=p2, n=n, subregions=subregions)
field = df.Field(mesh, dim=3, value={'r1': (0, 0, 1),
'r2': (0, 0, 2),
'r1:r2': (0, 0, 5)})
assert np.all(field((3e-9, 7e-9, 9e-9)) == (0, 0, 1))
assert np.all(field((8e-9, 2e-9, 9e-9)) == (0, 0, 2))
def test_set_exception(self):
for mesh in self.meshes:
with pytest.raises(ValueError):
f = df.Field(mesh, dim=3, value='meaningless_string')
with pytest.raises(ValueError):
f = df.Field(mesh, dim=3, value=5+5j)
def test_value(self):
p1 = (0, 0, 0)
p2 = (10e-9, 10e-9, 10e-9)
n = (5, 5, 5)
mesh = df.Mesh(p1=p1, p2=p2, n=n)
f = df.Field(mesh, dim=3)
f.value = (1, 1, 1)
assert f.value == (1, 1, 1)
f.array[0, 0, 0, 0] = 3
assert isinstance(f.value, np.ndarray)
def test_norm(self):
mesh = df.Mesh(p1=(0, 0, 0), p2=(10, 10, 10), cell=(5, 5, 5))
f = df.Field(mesh, dim=3, value=(2, 2, 2))
assert np.all(f.norm.value == 2*np.sqrt(3))
assert np.all(f.norm.array == 2*np.sqrt(3))
assert np.all(f.array == 2)
f.norm = 1
assert np.all(f.norm.value == 1)
assert np.all(f.norm.array == 1)
assert np.all(f.array == 1/np.sqrt(3))
f.array[0, 0, 0, 0] = 3
assert isinstance(f.norm.value, np.ndarray)
assert not np.all(f.norm.value == 1)
for mesh in self.meshes:
for value in self.iters + self.vfuncs:
for norm_value in [1, 2.1, 50, 1e-3, np.pi]:
f = df.Field(mesh, dim=3, value=value, norm=norm_value)
# Compute norm.
norm = f.array[..., 0]**2
norm += f.array[..., 1]**2
norm += f.array[..., 2]**2
norm = np.sqrt(norm)
assert norm.shape == f.mesh.n
assert f.norm.array.shape == (*f.mesh.n, 1)
assert np.all(abs(norm - norm_value) < 1e-12)
# Exception
mesh = df.Mesh(p1=(0, 0, 0), p2=(10, 10, 10), cell=(1, 1, 1))
f = df.Field(mesh, dim=1, value=-5)
with pytest.raises(ValueError):
f.norm = 5
def test_norm_is_not_preserved(self):
p1 = (0, 0, 0)
p2 = (10e-9, 10e-9, 10e-9)
n = (5, 5, 5)
mesh = df.Mesh(p1=p1, p2=p2, n=n)
f = df.Field(mesh, dim=3)
f.value = (0, 3, 0)
f.norm = 1
assert np.all(f.norm.array == 1)
f.value = (0, 2, 0)
assert np.all(f.norm.value != 1)
assert np.all(f.norm.array == 2)
def test_norm_zero_field_exception(self):
p1 = (0, 0, 0)
p2 = (10e-9, 10e-9, 10e-9)
n = (5, 5, 5)
mesh = df.Mesh(p1=p1, p2=p2, n=n)
f = df.Field(mesh, dim=3, value=(0, 0, 0))
with pytest.raises(ValueError):
f.norm = 1
def test_zero(self):
p1 = (0, 0, 0)
p2 = (10e-9, 10e-9, 10e-9)
n = (5, 5, 5)
mesh = df.Mesh(p1=p1, p2=p2, n=n)
f = df.Field(mesh, dim=1, value=1e-6)
zf = f.zero
assert f.mesh == zf.mesh
assert f.dim == zf.dim
assert not np.any(zf.array)
f = df.Field(mesh, dim=3, value=(5, -7, 1e3))
zf = f.zero
assert f.mesh == zf.mesh
assert f.dim == zf.dim
assert not np.any(zf.array)
def test_orientation(self):
p1 = (-5e-9, -5e-9, -5e-9)
p2 = (5e-9, 5e-9, 5e-9)
cell = (1e-9, 1e-9, 1e-9)
mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
# No zero-norm cells
f = df.Field(mesh, dim=3, value=(2, 0, 0))
assert f.orientation.average == (1, 0, 0)
# With zero-norm cells
def value_fun(point):
x, y, z = point
if x <= 0:
return (0, 0, 0)
else:
return (3, 0, 4)
f = df.Field(mesh, dim=3, value=value_fun)
assert f.orientation((-1.5e-9, 3e-9, 0)) == (0, 0, 0)
assert f.orientation((1.5e-9, 3e-9, 0)) == (0.6, 0, 0.8)
f = df.Field(mesh, dim=1, value=0)
with pytest.raises(ValueError):
of = f.orientation
def test_average(self):
value = -1e-3 + np.pi
tol = 1e-12
p1 = (-5e-9, -5e-9, -5e-9)
p2 = (5e-9, 5e-9, 5e-9)
cell = (1e-9, 1e-9, 1e-9)
mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
f = df.Field(mesh, dim=1, value=2)
assert abs(f.average - 2) < tol
f = df.Field(mesh, dim=3, value=(0, 1, 2))
assert np.allclose(f.average, (0, 1, 2))
def test_field_component(self):
for mesh in self.meshes:
f = df.Field(mesh, dim=3, value=(1, 2, 3))
assert all(isinstance(getattr(f, i), df.Field) for i in 'xyz')
assert all(getattr(f, i).dim == 1 for i in 'xyz')
f = df.Field(mesh, dim=2, value=(1, 2))
assert all(isinstance(getattr(f, i), df.Field) for i in 'xy')
assert all(getattr(f, i).dim == 1 for i in 'xy')
# Exception.
f = df.Field(mesh, dim=1, value=1)
with pytest.raises(AttributeError):
fx = f.x.dim
def test_get_attribute_exception(self):
for mesh in self.meshes:
f = df.Field(mesh, dim=3)
with pytest.raises(AttributeError) as excinfo:
f.__getattr__('nonexisting_attribute')
assert 'has no attribute' in str(excinfo.value)
def test_dir(self):
for mesh in self.meshes:
f = df.Field(mesh, dim=3, value=(5, 6, -9))
assert all(attr in dir(f) for attr in ['x', 'y', 'z', 'div'])
assert 'grad' not in dir(f)
f = df.Field(mesh, dim=1, value=1)
assert all(attr not in dir(f) for attr in ['x', 'y', 'z', 'div'])
assert 'grad' in dir(f)
def test_eq(self):
p1 = (-5e-9, -5e-9, -5e-9)
p2 = (15e-9, 5e-9, 5e-9)
cell = (5e-9, 1e-9, 2.5e-9)
mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
f1 = df.Field(mesh, dim=1, value=0.2)
f2 = df.Field(mesh, dim=1, value=0.2)
f3 = df.Field(mesh, dim=1, value=3.1)
f4 = df.Field(mesh, dim=3, value=(1, -6, 0))
f5 = df.Field(mesh, dim=3, value=(1, -6, 0))
assert f1 == f2
assert not f1 != f2
assert not f1 == f3
assert f1 != f3
assert not f2 == f4
assert f2 != f4
assert f4 == f5
assert not f4 != f5
assert not f1 == 0.2
assert f1 != 0.2
def test_allclose(self):
p1 = (-5e-9, -5e-9, -5e-9)
p2 = (15e-9, 5e-9, 5e-9)
cell = (5e-9, 1e-9, 2.5e-9)
mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
f1 = df.Field(mesh, dim=1, value=0.2)
f2 = df.Field(mesh, dim=1, value=0.2+1e-9)
f3 = df.Field(mesh, dim=1, value=0.21)
f4 = df.Field(mesh, dim=3, value=(1, -6, 0))
f5 = df.Field(mesh, dim=3, value=(1, -6+1e-8, 0))
f6 = df.Field(mesh, dim=3, value=(1, -6.01, 0))
assert f1.allclose(f2)
assert not f1.allclose(f3)
assert not f1.allclose(f5)
assert f4.allclose(f5)
assert not f4.allclose(f6)
with pytest.raises(TypeError):
f1.allclose(2)
def test_point_neg(self):
p1 = (-5e-9, -5e-9, -5e-9)
p2 = (5e-9, 5e-9, 5e-9)
cell = (1e-9, 1e-9, 1e-9)
mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
# Scalar field
f = df.Field(mesh, dim=1, value=3)
res = -f
check_field(res)
assert res.average == -3
assert f == +f
assert f == -(-f)
assert f == +(-(-f))
# Vector field
f = df.Field(mesh, dim=3, value=(1, 2, -3))
res = -f
check_field(res)
assert res.average == (-1, -2, 3)
assert f == +f
assert f == -(-f)
assert f == +(-(-f))
def test_pow(self):
p1 = (0, 0, 0)
p2 = (15e-9, 6e-9, 6e-9)
cell = (3e-9, 3e-9, 3e-9)
mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
# Scalar field
f = df.Field(mesh, dim=1, value=2)
res = f**2
assert res.average == 4
res = f**(-1)
assert res.average == 0.5
# Attempt vector field
f = df.Field(mesh, dim=3, value=(1, 2, -2))
with pytest.raises(ValueError):
res = f**2
# Attempt to raise to non numbers.Real
f = df.Field(mesh, dim=1, value=2)
with pytest.raises(TypeError):
res = f**'a'
with pytest.raises(TypeError):
res = f**f
def test_add_subtract(self):
p1 = (0, 0, 0)
p2 = (5e-9, 10e-9, -5e-9)
n = (2, 2, 1)
mesh = df.Mesh(p1=p1, p2=p2, n=n)
# Scalar fields
f1 = df.Field(mesh, dim=1, value=1.2)
f2 = df.Field(mesh, dim=1, value=-0.2)
res = f1 + f2
assert res.average == 1
res = f1 - f2
assert res.average == 1.4
f1 += f2
assert f1.average == 1
f1 -= f2
assert f1.average == 1.2
# Vector fields
f1 = df.Field(mesh, dim=3, value=(1, 2, 3))
f2 = df.Field(mesh, dim=3, value=(-1, -3, -5))
res = f1 + f2
assert res.average == (0, -1, -2)
res = f1 - f2
assert res.average == (2, 5, 8)
f1 += f2
assert f1.average == (0, -1, -2)
f1 -= f2
assert f1.average == (1, 2, 3)
# Artithmetic checks
assert f1 + f2 + (1, 1, 1) == (1, 1, 1) + f2 + f1
assert f1 - f2 - (0, 0, 0) == (0, 0, 0) - (f2 - f1)
assert f1 + (f1 + f2) == (f1 + f1) + f2
assert f1 - (f1 + f2) == f1 - f1 - f2
assert f1 + f2 - f1 == f2 + (0, 0, 0)
# Constants
f1 = df.Field(mesh, dim=1, value=1.2)
f2 = df.Field(mesh, dim=3, value=(-1, -3, -5))
res = f1 + 2
assert res.average == 3.2
res = f1 - 1.2
assert res.average == 0
f1 += 2.5
assert f1.average == 3.7
f1 -= 3.7
assert f1.average == 0
res = f2 + (1, 3, 5)
assert res.average == (0, 0, 0)
res = f2 - (1, 2, 3)
assert res.average == (-2, -5, -8)
f2 += (1, 1, 1)
assert f2.average == (0, -2, -4)
f2 -= (-1, -2, 3)
assert f2.average == (1, 0, -7)
# Exceptions
with pytest.raises(TypeError):
res = f1 + '2'
# Fields with different dimensions
with pytest.raises(ValueError):
res = f1 + f2
# Fields defined on different meshes
mesh1 = df.Mesh(p1=(0, 0, 0), p2=(5, 5, 5), n=(1, 1, 1))
mesh2 = df.Mesh(p1=(0, 0, 0), p2=(3, 3, 3), n=(1, 1, 1))
f1 = df.Field(mesh1, dim=1, value=1.2)
f2 = df.Field(mesh2, dim=1, value=1)
with pytest.raises(ValueError):
res = f1 + f2
with pytest.raises(ValueError):
f1 += f2
with pytest.raises(ValueError):
f1 -= f2
def test_mul_truediv(self):
p1 = (0, 0, 0)
p2 = (5e-9, 5e-9, 5e-9)
cell = (1e-9, 5e-9, 1e-9)
mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
# Scalar fields
f1 = df.Field(mesh, dim=1, value=1.2)
f2 = df.Field(mesh, dim=1, value=-2)
res = f1 * f2
assert res.average == -2.4
res = f1 / f2
assert res.average == -0.6
f1 *= f2
assert f1.average == -2.4
f1 /= f2
assert f1.average == 1.2
# Scalar field with a constant
f = df.Field(mesh, dim=1, value=5)
res = f * 2
assert res.average == 10
res = 3 * f
assert res.average == 15
res = f * (1, 2, 3)
assert res.average == (5, 10, 15)
res = (1, 2, 3) * f
assert res.average == (5, 10, 15)
res = f / 2
assert res.average == 2.5
res = 10 / f
assert res.average == 2
res = (5, 10, 15) / f
assert res.average == (1, 2, 3)
f *= 10
assert f.average == 50
f /= 10
assert f.average == 5
# Scalar field with a vector field
f1 = df.Field(mesh, dim=1, value=2)
f2 = df.Field(mesh, dim=3, value=(-1, -3, 5))
res = f1 * f2 # __mul__
assert res.average == (-2, -6, 10)
res = f2 * f1 # __rmul__
assert res.average == (-2, -6, 10)
res = f2 / f1 # __truediv__
assert res.average == (-0.5, -1.5, 2.5)
f2 *= f1 # __imul__
assert f2.average == (-2, -6, 10)
f2 /= f1 # __truediv__
assert f2.average == (-1, -3, 5)
with pytest.raises(ValueError):
res = f1 / f2 # __rtruediv__
# Vector field with a scalar
f = df.Field(mesh, dim=3, value=(1, 2, 0))
res = f * 2
assert res.average == (2, 4, 0)
res = 5 * f
assert res.average == (5, 10, 0)
res = f / 2
assert res.average == (0.5, 1, 0)
f *= 2
assert f.average == (2, 4, 0)
f /= 2
assert f.average == (1, 2, 0)
with pytest.raises(ValueError):
res = 10 / f
# Further checks
f1 = df.Field(mesh, dim=1, value=2)
f2 = df.Field(mesh, dim=3, value=(-1, -3, -5))
assert f1 * f2 == f2 * f1
assert 1.3 * f2 == f2 * 1.3
assert -5 * f2 == f2 * (-5)
assert (1, 2.2, -1) * f1 == f1 * (1, 2.2, -1)
assert f1 * (f1 * f2) == (f1 * f1) * f2
assert f1 * f2 / f1 == f2
# Exceptions
f1 = df.Field(mesh, dim=1, value=1.2)
f2 = df.Field(mesh, dim=3, value=(-1, -3, -5))
with pytest.raises(TypeError):
res = f2 * 'a'
with pytest.raises(TypeError):
res = 'a' / f1
with pytest.raises(ValueError):
res = f2 * f2
with pytest.raises(ValueError):
res = f2 / f2
with pytest.raises(ValueError):
res = 1 / f2
with pytest.raises(ValueError):
res = f1 / f2
with pytest.raises(TypeError):
f2 *= 'a'
with pytest.raises(TypeError):
f2 /= 'a'
with pytest.raises(ValueError):
f1 /= f2
# Fields defined on different meshes
mesh1 = df.Mesh(p1=(0, 0, 0), p2=(5, 5, 5), n=(1, 1, 1))
mesh2 = df.Mesh(p1=(0, 0, 0), p2=(3, 3, 3), n=(1, 1, 1))
f1 = df.Field(mesh1, dim=1, value=1.2)
f2 = df.Field(mesh2, dim=1, value=1)
with pytest.raises(ValueError):
res = f1 * f2
with pytest.raises(ValueError):
res = f1 / f2
with pytest.raises(ValueError):
f1 *= f2
with pytest.raises(ValueError):
f1 /= f2
def test_dot(self):
p1 = (0, 0, 0)
p2 = (10, 10, 10)
cell = (2, 2, 2)
mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
# Zero vectors
f1 = df.Field(mesh, dim=3, value=(0, 0, 0))
res = f1@f1
assert res.dim == 1
assert res.average == 0
# Orthogonal vectors
f1 = df.Field(mesh, dim=3, value=(1, 0, 0))
f2 = df.Field(mesh, dim=3, value=(0, 1, 0))
f3 = df.Field(mesh, dim=3, value=(0, 0, 1))
assert (f1 @ f2).average == 0
assert (f1 @ f3).average == 0
assert (f2 @ f3).average == 0
assert (f1 @ f1).average == 1
assert (f2 @ f2).average == 1
assert (f3 @ f3).average == 1
# Check if commutative
assert f1 @ f2 == f2 @ f1
assert f1 @ (-1, 3, 2.2) == (-1, 3, 2.2) @ f1
# Vector field with a constant
f = df.Field(mesh, dim=3, value=(1, 2, 3))
res = (1, 1, 1) @ f
assert res.average == 6
res = f @ [1, 1, 1]
assert res.average == 6
# Spatially varying vectors
def value_fun1(point):
x, y, z = point
return (x, y, z)
def value_fun2(point):
x, y, z = point
return (z, x, y)
f1 = df.Field(mesh, dim=3, value=value_fun1)
f2 = df.Field(mesh, dim=3, value=value_fun2)
# Check if commutative
assert f1 @ f2 == f2 @ f1
# The dot product should be x*z + y*x + z*y
assert (f1 @ f2)((1, 1, 1)) == 3
assert (f1 @ f2)((3, 1, 1)) == 7
assert (f1 @ f2)((5, 7, 1)) == 47
# Check norm computed using dot product
assert f1.norm == (f1 @ f1)**(0.5)
# Exceptions
f1 = df.Field(mesh, dim=1, value=1.2)
f2 = df.Field(mesh, dim=3, value=(-1, -3, -5))
with pytest.raises(ValueError):
res = f1 @ f2
with pytest.raises(ValueError):
res = f1 @ f2
with pytest.raises(TypeError):
res = f1 @ 3
# Fields defined on different meshes
mesh1 = df.Mesh(p1=(0, 0, 0), p2=(5, 5, 5), n=(1, 1, 1))
mesh2 = df.Mesh(p1=(0, 0, 0), p2=(3, 3, 3), n=(1, 1, 1))
f1 = df.Field(mesh1, dim=3, value=(1, 2, 3))
f2 = df.Field(mesh2, dim=3, value=(3, 2, 1))
with pytest.raises(ValueError):
res = f1 @ f2
def test_cross(self):
p1 = (0, 0, 0)
p2 = (10, 10, 10)
cell = (2, 2, 2)
mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
# Zero vectors
f1 = df.Field(mesh, dim=3, value=(0, 0, 0))
res = f1 & f1
assert res.dim == 3
assert res.average == (0, 0, 0)
# Orthogonal vectors
f1 = df.Field(mesh, dim=3, value=(1, 0, 0))
f2 = df.Field(mesh, dim=3, value=(0, 1, 0))
f3 = df.Field(mesh, dim=3, value=(0, 0, 1))
assert (f1 & f2).average == (0, 0, 1)
assert (f1 & f3).average == (0, -1, 0)
assert (f2 & f3).average == (1, 0, 0)
assert (f1 & f1).average == (0, 0, 0)
assert (f2 & f2).average == (0, 0, 0)
assert (f3 & f3).average == (0, 0, 0)
# Constants
assert (f1 & (0, 1, 0)).average == (0, 0, 1)
assert ((0, 1, 0) & f1).average == (0, 0, 1)
# Check if not comutative
assert f1 & f2 == -(f2 & f1)
assert f1 & f3 == -(f3 & f1)
assert f2 & f3 == -(f3 & f2)
f1 = df.Field(mesh, dim=3, value=lambda point: (point[0],
point[1],
point[2]))
f2 = df.Field(mesh, dim=3, value=lambda point: (point[2],
point[0],
point[1]))
# The cross product should be
# (y**2-x*z, z**2-x*y, x**2-y*z)
assert (f1 & f2)((1, 1, 1)) == (0, 0, 0)
assert (f1 & f2)((3, 1, 1)) == (-2, -2, 8)
assert (f2 & f1)((3, 1, 1)) == (2, 2, -8)
assert (f1 & f2)((5, 7, 1)) == (44, -34, 18)
# Exceptions
f1 = df.Field(mesh, dim=1, value=1.2)
f2 = df.Field(mesh, dim=3, value=(-1, -3, -5))
with pytest.raises(TypeError):
res = f1 & 2
with pytest.raises(ValueError):
res = f1 & f2
# Fields defined on different meshes
mesh1 = df.Mesh(p1=(0, 0, 0), p2=(5, 5, 5), n=(1, 1, 1))
mesh2 = df.Mesh(p1=(0, 0, 0), p2=(3, 3, 3), n=(1, 1, 1))
f1 = df.Field(mesh1, dim=3, value=(1, 2, 3))
f2 = df.Field(mesh2, dim=3, value=(3, 2, 1))
with pytest.raises(ValueError):
res = f1 & f2
def test_lshift(self):
p1 = (0, 0, 0)
p2 = (10e6, 10e6, 10e6)
cell = (5e6, 5e6, 5e6)
mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
f1 = df.Field(mesh, dim=1, value=1)
f2 = df.Field(mesh, dim=1, value=-3)
f3 = df.Field(mesh, dim=1, value=5)
res = f1 << f2 << f3
assert res.dim == 3
assert res.average == (1, -3, 5)
# Different dimensions
f1 = df.Field(mesh, dim=1, value=1.2)
f2 = df.Field(mesh, dim=2, value=(-1, -3))
res = f1 << f2
assert res.average == (1.2, -1, -3)
res = f2 << f1
assert res.average == (-1, -3, 1.2)
# Constants
f1 = df.Field(mesh, dim=1, value=1.2)
res = f1 << 2
assert res.average == (1.2, 2)
res = f1 << (1, -1)
assert res.average == (1.2, 1, -1)
res = 3 << f1
assert res.average == (3, 1.2)
res = (1.2, 3) << f1 << 3
assert res.average == (1.2, 3, 1.2, 3)
# Exceptions
with pytest.raises(TypeError):
res = 'a' << f1
with pytest.raises(TypeError):
res = f1 << 'a'
# Fields defined on different meshes
mesh1 = df.Mesh(p1=(0, 0, 0), p2=(5, 5, 5), n=(1, 1, 1))
mesh2 = df.Mesh(p1=(0, 0, 0), p2=(3, 3, 3), n=(1, 1, 1))
f1 = df.Field(mesh1, dim=1, value=1.2)
f2 = df.Field(mesh2, dim=1, value=1)
with pytest.raises(ValueError):
res = f1 << f2
def test_all_operators(self):
p1 = (0, 0, 0)
p2 = (5e-9, 5e-9, 10e-9)
n = (2, 2, 1)
mesh = df.Mesh(p1=p1, p2=p2, n=n)
f1 = df.Field(mesh, dim=1, value=2)
f2 = df.Field(mesh, dim=3, value=(-4, 0, 1))
res = ((+f1/2 + f2.x)**2 - 2*f1*3)/(-f2.z) - 2*f2.y + 1/f2.z**2 + f2@f2
assert np.all(res.array == 21)
res = 1 + f1 + 0*f2.x - 3*f2.y/3
assert res.average == 3
def test_pad(self):
p1 = (0, 0, 0)
p2 = (10, 8, 2)
cell = (1, 1, 1)
mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
field = df.Field(mesh, dim=1, value=1)
pf = field.pad({'x': (1, 1)}, mode='constant') # zeros padded
assert pf.array.shape == (12, 8, 2, 1)
def test_derivative(self):
p1 = (0, 0, 0)
p2 = (10, 10, 10)
cell = (2, 2, 2)
# f(x, y, z) = 0 -> grad(f) = (0, 0, 0)
# No BC
mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
f = df.Field(mesh, dim=1, value=0)
check_field(f.derivative('x'))
assert f.derivative('x', n=1).average == 0
assert f.derivative('y', n=1).average == 0
assert f.derivative('z', n=1).average == 0
assert f.derivative('x', n=2).average == 0
assert f.derivative('y', n=2).average == 0
assert f.derivative('z', n=2).average == 0
# f(x, y, z) = x + y + z -> grad(f) = (1, 1, 1)
# No BC
mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
def value_fun(point):
x, y, z = point
return x + y + z
f = df.Field(mesh, dim=1, value=value_fun)
assert f.derivative('x', n=1).average == 1
assert f.derivative('y', n=1).average == 1
assert f.derivative('z', n=1).average == 1
assert f.derivative('x', n=2).average == 0
assert f.derivative('y', n=2).average == 0
assert f.derivative('z', n=2).average == 0
# f(x, y, z) = x*y + 2*y + x*y*z ->
# grad(f) = (y+y*z, x+2+x*z, x*y)
# No BC
mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
def value_fun(point):
x, y, z = point
return x*y + 2*y + x*y*z
f = df.Field(mesh, dim=1, value=value_fun)
assert f.derivative('x')((7, 5, 1)) == 10
assert f.derivative('y')((7, 5, 1)) == 16
assert f.derivative('z')((7, 5, 1)) == 35
assert f.derivative('x', n=2)((1, 1, 1)) == 0
assert f.derivative('y', n=2)((1, 1, 1)) == 0
assert f.derivative('z', n=2)((1, 1, 1)) == 0
# f(x, y, z) = (0, 0, 0)
# -> dfdx = (0, 0, 0)
# -> dfdy = (0, 0, 0)
# -> dfdz = (0, 0, 0)
# No BC
mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
f = df.Field(mesh, dim=3, value=(0, 0, 0))
check_field(f.derivative('y'))
assert f.derivative('x').average == (0, 0, 0)
assert f.derivative('y').average == (0, 0, 0)
assert f.derivative('z').average == (0, 0, 0)
# f(x, y, z) = (x, y, z)
# -> dfdx = (1, 0, 0)
# -> dfdy = (0, 1, 0)
# -> dfdz = (0, 0, 1)
def value_fun(point):
x, y, z = point
return (x, y, z)
f = df.Field(mesh, dim=3, value=value_fun)
assert f.derivative('x').average == (1, 0, 0)
assert f.derivative('y').average == (0, 1, 0)
assert f.derivative('z').average == (0, 0, 1)
# f(x, y, z) = (x*y, y*z, x*y*z)
# -> dfdx = (y, 0, y*z)
# -> dfdy = (x, z, x*z)
# -> dfdz = (0, y, x*y)
def value_fun(point):
x, y, z = point
return (x*y, y*z, x*y*z)
f = df.Field(mesh, dim=3, value=value_fun)
assert f.derivative('x')((3, 1, 3)) == (1, 0, 3)
assert f.derivative('y')((3, 1, 3)) == (3, 3, 9)
assert f.derivative('z')((3, 1, 3)) == (0, 1, 3)
assert f.derivative('x')((5, 3, 5)) == (3, 0, 15)
assert f.derivative('y')((5, 3, 5)) == (5, 5, 25)
assert f.derivative('z')((5, 3, 5)) == (0, 3, 15)
# f(x, y, z) = (3+x*y, x-2*y, x*y*z)
# -> dfdx = (y, 1, y*z)
# -> dfdy = (x, -2, x*z)
# -> dfdz = (0, 0, x*y)
def value_fun(point):
x, y, z = point
return (3+x*y, x-2*y, x*y*z)
f = df.Field(mesh, dim=3, value=value_fun)
assert f.derivative('x')((7, 5, 1)) == (5, 1, 5)
assert f.derivative('y')((7, 5, 1)) == (7, -2, 7)
assert f.derivative('z')((7, 5, 1)) == (0, 0, 35)
# f(x, y, z) = 2*x*x + 2*y*y + 3*z*z
# -> grad(f) = (4, 4, 6)
def value_fun(point):
x, y, z = point
return 2*x*x + 2*y*y + 3*z*z
f = df.Field(mesh, dim=1, value=value_fun)
assert f.derivative('x', n=2).average == 4
assert f.derivative('y', n=2).average == 4
assert f.derivative('z', n=2).average == 6
# f(x, y, z) = (2*x*x, 2*y*y, 3*z*z)
def value_fun(point):
x, y, z = point
return (2*x*x, 2*y*y, 3*z*z)
f = df.Field(mesh, dim=3, value=value_fun)
assert f.derivative('x', n=2).average == (4, 0, 0)
assert f.derivative('y', n=2).average == (0, 4, 0)
assert f.derivative('z', n=2).average == (0, 0, 6)
with pytest.raises(NotImplementedError):
res = f.derivative('x', n=3)
def test_derivative_pbc(self):
p1 = (0, 0, 0)
p2 = (10, 8, 6)
cell = (2, 2, 2)
mesh_nopbc = df.Mesh(p1=p1, p2=p2, cell=cell)
mesh_pbc = df.Mesh(p1=p1, p2=p2, cell=cell, bc='xyz')
# Scalar field
def value_fun(point):
return point[0]*point[1]*point[2]
# No PBC
f = df.Field(mesh_nopbc, dim=1, value=value_fun)
assert f.derivative('x')((9, 1, 1)) == 1
assert f.derivative('y')((1, 7, 1)) == 1
assert f.derivative('z')((1, 1, 5)) == 1
# PBC
f = df.Field(mesh_pbc, dim=1, value=value_fun)
assert f.derivative('x')((9, 1, 1)) == -1.5
assert f.derivative('y')((1, 7, 1)) == -1
assert f.derivative('z')((1, 1, 5)) == -0.5
# Vector field
def value_fun(point):
return (point[0]*point[1]*point[2],) * 3
# No PBC
f = df.Field(mesh_nopbc, dim=3, value=value_fun)
assert f.derivative('x')((9, 1, 1)) == (1, 1, 1)
assert f.derivative('y')((1, 7, 1)) == (1, 1, 1)
assert f.derivative('z')((1, 1, 5)) == (1, 1, 1)
# PBC
f = df.Field(mesh_pbc, dim=3, value=value_fun)
assert f.derivative('x')((9, 1, 1)) == (-1.5, -1.5, -1.5)
assert f.derivative('y')((1, 7, 1)) == (-1, -1, -1)
assert f.derivative('z')((1, 1, 5)) == (-0.5, -0.5, -0.5)
def test_derivative_neumann(self):
p1 = (0, 0, 0)
p2 = (10, 8, 6)
cell = (2, 2, 2)
mesh_noneumann = df.Mesh(p1=p1, p2=p2, cell=cell)
mesh_neumann = df.Mesh(p1=p1, p2=p2, cell=cell, bc='neumann')
# Scalar field
def value_fun(point):
return point[0]*point[1]*point[2]
# No Neumann
f1 = df.Field(mesh_noneumann, dim=1, value=value_fun)
assert f1.derivative('x')((9, 1, 1)) == 1
assert f1.derivative('y')((1, 7, 1)) == 1
assert f1.derivative('z')((1, 1, 5)) == 1
# Neumann
f2 = df.Field(mesh_neumann, dim=1, value=value_fun)
assert (f1.derivative('x')(f1.mesh.region.centre) ==
f2.derivative('x')(f2.mesh.region.centre))
assert (f1.derivative('x')((1, 7, 1)) !=
f2.derivative('x')((1, 7, 1)))
def test_derivative_single_cell(self):
p1 = (0, 0, 0)
p2 = (10, 10, 2)
cell = (2, 2, 2)
mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
# Scalar field: f(x, y, z) = x + y + z
# -> grad(f) = (1, 1, 1)
def value_fun(point):
x, y, z = point
return x + y + z
f = df.Field(mesh, dim=1, value=value_fun)
# only one cell in the z-direction
assert f.plane('x').derivative('x').average == 0
assert f.plane('y').derivative('y').average == 0
assert f.derivative('z').average == 0
# Vector field: f(x, y, z) = (x, y, z)
# -> grad(f) = (1, 1, 1)
def value_fun(point):
x, y, z = point
return (x, y, z)
f = df.Field(mesh, dim=3, value=value_fun)
# only one cell in the z-direction
assert f.plane('x').derivative('x').average == (0, 0, 0)
assert f.plane('y').derivative('y').average == (0, 0, 0)
assert f.derivative('z').average == (0, 0, 0)
def test_grad(self):
p1 = (0, 0, 0)
p2 = (10, 10, 10)
cell = (2, 2, 2)
mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
# f(x, y, z) = 0 -> grad(f) = (0, 0, 0)
f = df.Field(mesh, dim=1, value=0)
check_field(f.grad)
assert f.grad.average == (0, 0, 0)
# f(x, y, z) = x + y + z -> grad(f) = (1, 1, 1)
def value_fun(point):
x, y, z = point
return x + y + z
f = df.Field(mesh, dim=1, value=value_fun)
assert f.grad.average == (1, 1, 1)
# f(x, y, z) = x*y + y + z -> grad(f) = (y, x+1, 1)
def value_fun(point):
x, y, z = point
return x*y + y + z
f = df.Field(mesh, dim=1, value=value_fun)
assert f.grad((3, 1, 3)) == (1, 4, 1)
assert f.grad((5, 3, 5)) == (3, 6, 1)
# f(x, y, z) = x*y + 2*y + x*y*z ->
# grad(f) = (y+y*z, x+2+x*z, x*y)
def value_fun(point):
x, y, z = point
return x*y + 2*y + x*y*z
f = df.Field(mesh, dim=1, value=value_fun)
assert f.grad((7, 5, 1)) == (10, 16, 35)
assert f.grad.x == f.derivative('x')
assert f.grad.y == f.derivative('y')
assert f.grad.z == f.derivative('z')
# Exception
f = df.Field(mesh, dim=3, value=(1, 2, 3))
with pytest.raises(ValueError):
res = f.grad
def test_div_curl(self):
p1 = (0, 0, 0)
p2 = (10, 10, 10)
cell = (2, 2, 2)
mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
# f(x, y, z) = (0, 0, 0)
# -> div(f) = 0
# -> curl(f) = (0, 0, 0)
f = df.Field(mesh, dim=3, value=(0, 0, 0))
check_field(f.div)
assert f.div.dim == 1
assert f.div.average == 0
check_field(f.curl)
assert f.curl.dim == 3
assert f.curl.average == (0, 0, 0)
# f(x, y, z) = (x, y, z)
# -> div(f) = 3
# -> curl(f) = (0, 0, 0)
def value_fun(point):
x, y, z = point
return (x, y, z)
f = df.Field(mesh, dim=3, value=value_fun)
assert f.div.average == 3
assert f.curl.average == (0, 0, 0)
# f(x, y, z) = (x*y, y*z, x*y*z)
# -> div(f) = y + z + x*y
# -> curl(f) = (x*z-y, -y*z, -x)
def value_fun(point):
x, y, z = point
return (x*y, y*z, x*y*z)
f = df.Field(mesh, dim=3, value=value_fun)
assert f.div((3, 1, 3)) == 7
assert f.div((5, 3, 5)) == 23
assert f.curl((3, 1, 3)) == (8, -3, -3)
assert f.curl((5, 3, 5)) == (22, -15, -5)
# f(x, y, z) = (3+x*y, x-2*y, x*y*z)
# -> div(f) = y - 2 + x*y
# -> curl(f) = (x*z, -y*z, 1-x)
def value_fun(point):
x, y, z = point
return (3+x*y, x-2*y, x*y*z)
f = df.Field(mesh, dim=3, value=value_fun)
assert f.div((7, 5, 1)) == 38
assert f.curl((7, 5, 1)) == (7, -5, -6)
# Exception
f = df.Field(mesh, dim=1, value=3.11)
with pytest.raises(ValueError):
res = f.div
with pytest.raises(ValueError):
res = f.curl
def test_laplace(self):
p1 = (0, 0, 0)
p2 = (10, 10, 10)
cell = (2, 2, 2)
mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
# f(x, y, z) = (0, 0, 0)
# -> laplace(f) = 0
f = df.Field(mesh, dim=3, value=(0, 0, 0))
check_field(f.laplace)
assert f.laplace.dim == 3
assert f.laplace.average == (0, 0, 0)
# f(x, y, z) = x + y + z
# -> laplace(f) = 0
def value_fun(point):
x, y, z = point
return x + y + z
f = df.Field(mesh, dim=1, value=value_fun)
check_field(f.laplace)
assert f.laplace.average == 0
# f(x, y, z) = 2*x*x + 2*y*y + 3*z*z
# -> laplace(f) = 4 + 4 + 6 = 14
def value_fun(point):
x, y, z = point
return 2*x*x + 2*y*y + 3*z*z
f = df.Field(mesh, dim=1, value=value_fun)
assert f.laplace.average == 14
# f(x, y, z) = (2*x*x, 2*y*y, 3*z*z)
# -> laplace(f) = (4, 4, 6)
def value_fun(point):
x, y, z = point
return (2*x*x, 2*y*y, 3*z*z)
f = df.Field(mesh, dim=3, value=value_fun)
assert f.laplace.average == (4, 4, 6)
def test_integral(self):
# Volume integral.
p1 = (0, 0, 0)
p2 = (10, 10, 10)
cell = (1, 1, 1)
mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
f = df.Field(mesh, dim=1, value=0)
assert (f * df.dV).integral() == 0
assert (f * df.dx*df.dy*df.dz).integral() == 0
f = df.Field(mesh, dim=1, value=2)
assert (f * df.dV).integral() == 2000
assert (f * df.dx*df.dy*df.dz).integral() == 2000
f = df.Field(mesh, dim=3, value=(-1, 0, 3))
assert (f * df.dV).integral() == (-1000, 0, 3000)
assert (f * df.dx*df.dy*df.dz).integral() == (-1000, 0, 3000)
def value_fun(point):
x, y, z = point
if x <= 5:
return (-1, -2, -3)
else:
return (1, 2, 3)
f = df.Field(mesh, dim=3, value=value_fun)
assert (f * df.dV).integral() == (0, 0, 0)
assert (f * df.dx*df.dy*df.dz).integral() == (0, 0, 0)
# Surface integral.
p1 = (0, 0, 0)
p2 = (10, 5, 3)
cell = (1, 1, 1)
mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
f = df.Field(mesh, dim=1, value=0)
assert (f.plane('x') * abs(df.dS)).integral() == 0
assert (f.plane('x') * df.dy*df.dz).integral() == 0
f = df.Field(mesh, dim=1, value=2)
assert (f.plane('x') * abs(df.dS)).integral() == 30
assert (f.plane('x') * df.dy*df.dz).integral() == 30
assert (f.plane('y') * abs(df.dS)).integral() == 60
assert (f.plane('y') * df.dx*df.dz).integral() == 60
assert (f.plane('z') * abs(df.dS)).integral() == 100
assert (f.plane('z') * df.dx*df.dy).integral() == 100
f = df.Field(mesh, dim=3, value=(-1, 0, 3))
assert (f.plane('x') * abs(df.dS)).integral() == (-15, 0, 45)
assert (f.plane('y') * abs(df.dS)).integral() == (-30, 0, 90)
assert (f.plane('z') * abs(df.dS)).integral() == (-50, 0, 150)
f = df.Field(mesh, dim=3, value=(-1, 0, 3))
assert df.integral(f.plane('x') @ df.dS) == -15
assert df.integral(f.plane('y') @ df.dS) == 0
assert df.integral(f.plane('z') @ df.dS) == 150
# Directional integral
p1 = (0, 0, 0)
p2 = (10, 10, 10)
cell = (1, 1, 1)
mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
f = df.Field(mesh, dim=3, value=(1, 1, 1))
f = f.integral(direction='x')
assert isinstance(f, df.Field)
assert f.dim == 3
assert f.mesh.n == (1, 10, 10)
assert f.average == (10, 10, 10)
f = f.integral(direction='x').integral(direction='y')
assert isinstance(f, df.Field)
assert f.dim == 3
assert f.mesh.n == (1, 1, 10)
assert f.average == (100, 100, 100)
f = f.integral('x').integral('y').integral('z')
assert f.dim == 3
assert f.mesh.n == (1, 1, 1)
assert f.average == (1000, 1000, 1000)
assert (f.integral('x').integral('y').integral('z').average ==
f.integral())
# Improper integral
p1 = (0, 0, 0)
p2 = (10, 10, 10)
cell = (1, 1, 1)
mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
f = df.Field(mesh, dim=3, value=(1, 1, 1))
f = f.integral(direction='x', improper=True)
assert isinstance(f, df.Field)
assert f.dim == 3
assert f.mesh.n == (10, 10, 10)
assert f.average == (5.5, 5.5, 5.5)
assert f((0, 0, 0)) == (1, 1, 1)
assert f((10, 10, 10)) == (10, 10, 10)
# Exceptions
with pytest.raises(ValueError):
res = f.integral(direction='xy', improper=True)
def test_line(self):
mesh = df.Mesh(p1=(0, 0, 0), p2=(10, 10, 10), n=(10, 10, 10))
f = df.Field(mesh, dim=3, value=(1, 2, 3))
check_field(f)
line = f.line(p1=(0, 0, 0), p2=(5, 5, 5), n=20)
assert isinstance(line, df.Line)
assert line.n == 20
assert line.dim == 3
def test_plane(self):
for mesh, direction in itertools.product(self.meshes, ['x', 'y', 'z']):
f = df.Field(mesh, dim=1, value=3)
check_field(f)
plane = f.plane(direction, n=(3, 3))
assert isinstance(plane, df.Field)
p, v = zip(*list(plane))
assert len(p) == 9
assert len(v) == 9
def test_getitem(self):
p1 = (0, 0, 0)
p2 = (90, 50, 10)
cell = (5, 5, 5)
subregions = {'r1': df.Region(p1=(0, 0, 0), p2=(30, 50, 10)),
'r2': df.Region(p1=(30, 0, 0), p2=(90, 50, 10))}
mesh = df.Mesh(p1=p1, p2=p2, cell=cell, subregions=subregions)
def value_fun(point):
x, y, z = point
if x <= 60:
return (-1, -2, -3)
else:
return (1, 2, 3)
f = df.Field(mesh, dim=3, value=value_fun)
check_field(f)
check_field(f['r1'])
check_field(f['r2'])
check_field(f[subregions['r1']])
check_field(f[subregions['r2']])
assert f['r1'].average == (-1, -2, -3)
assert f['r2'].average == (0, 0, 0)
assert f[subregions['r1']].average == (-1, -2, -3)
assert f[subregions['r2']].average == (0, 0, 0)
assert len(f['r1'].mesh) + len(f['r2'].mesh) == len(f.mesh)
# Meshes are not aligned
subregion = df.Region(p1=(1.1, 0, 0), p2=(9.9, 15, 5))
assert f[subregion].array.shape == (2, 3, 1, 3)
def test_project(self):
p1 = (-5, -5, -5)
p2 = (5, 5, 5)
cell = (1, 1, 1)
mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
# Constant scalar field
f = df.Field(mesh, dim=1, value=5)
check_field(f)
assert f.project('x').array.shape == (1, 10, 10, 1)
assert f.project('y').array.shape == (10, 1, 10, 1)
assert f.project('z').array.shape == (10, 10, 1, 1)
# Constant vector field
f = df.Field(mesh, dim=3, value=(1, 2, 3))
assert f.project('x').array.shape == (1, 10, 10, 3)
assert f.project('y').array.shape == (10, 1, 10, 3)
assert f.project('z').array.shape == (10, 10, 1, 3)
# Spatially varying scalar field
def value_fun(point):
x, y, z = point
if z <= 0:
return 1
else:
return -1
f = df.Field(mesh, dim=1, value=value_fun)
sf = f.project('z')
assert sf.array.shape == (10, 10, 1, 1)
assert sf.average == 0
# Spatially varying vector field
def value_fun(point):
x, y, z = point
if z <= 0:
return (3, 2, 1)
else:
return (3, 2, -1)
f = df.Field(mesh, dim=3, value=value_fun)
sf = f.project('z')
assert sf.array.shape == (10, 10, 1, 3)
assert sf.average == (3, 2, 0)
def test_angle(self):
p1 = (0, 0, 0)
p2 = (8e-9, 2e-9, 2e-9)
cell = (2e-9, 2e-9, 2e-9)
mesh = df.Mesh(region=df.Region(p1=p1, p2=p2), cell=cell)
def value_fun(point):
x, y, z = point
if x < 2e-9:
return (1, 1, 1)
elif 2e-9 <= x < 4e-9:
return (1, -1, 0)
elif 4e-9 <= x < 6e-9:
return (-1, -1, 0)
elif 6e-9 <= x < 8e-9:
return (-1, 1, 0)
f = df.Field(mesh, dim=3, value=value_fun)
assert abs(f.plane('z').angle((1e-9, 2e-9, 2e-9)) - np.pi/4) < 1e-3
assert abs(f.plane('z').angle((3e-9, 2e-9, 2e-9)) - 7*np.pi/4) < 1e-3
assert abs(f.plane('z').angle((5e-9, 2e-9, 2e-9)) - 5*np.pi/4) < 1e-3
assert abs(f.plane('z').angle((7e-9, 2e-9, 2e-9)) - 3*np.pi/4) < 1e-3
# Exception
with pytest.raises(ValueError):
res = f.angle # the field is not sliced
def test_write_read_ovf(self):
representations = ['txt', 'bin4', 'bin8']
filename = 'testfile.ovf'
p1 = (0, 0, 0)
p2 = (8e-9, 5e-9, 3e-9)
cell = (1e-9, 1e-9, 1e-9)
mesh = df.Mesh(region=df.Region(p1=p1, p2=p2), cell=cell)
# Write/read
for dim, value in [(1, lambda point: point[0] + point[1] + point[2]),
(3, lambda point: (point[0], point[1], point[2]))]:
f = df.Field(mesh, dim=dim, value=value)
for rep in representations:
with tempfile.TemporaryDirectory() as tmpdir:
tmpfilename = os.path.join(tmpdir, filename)
f.write(tmpfilename, representation=rep)
f_read = df.Field.fromfile(tmpfilename)
assert f.allclose(f_read)
# Extend scalar
for rep in representations:
f = df.Field(mesh, dim=1,
value=lambda point: point[0]+point[1]+point[2])
with tempfile.TemporaryDirectory() as tmpdir:
tmpfilename = os.path.join(tmpdir, filename)
f.write(tmpfilename, extend_scalar=True)
f_read = df.Field.fromfile(tmpfilename)
assert f.allclose(f_read.x)
# Read different OOMMF representations
# (OVF1, OVF2) x (txt, bin4, bin8)
filenames = ['oommf-ovf2-txt.omf',
'oommf-ovf2-bin4.omf',
'oommf-ovf2-bin8.omf',
'oommf-ovf1-txt.omf',
'oommf-ovf1-bin4.omf',
'oommf-ovf1-bin8.omf']
dirname = os.path.join(os.path.dirname(__file__), 'test_sample')
for filename in filenames:
omffilename = os.path.join(dirname, filename)
f_read = df.Field.fromfile(omffilename)
if 'ovf2' in filename:
# The magnetisation is in the x-direction in OVF2 files.
assert abs(f_read.orientation.x.average - 1) < 1e-2
else:
# The norm of magnetisation is known.
assert abs(f_read.norm.average - 1261566.2610100) < 1e-3
# Read different mumax3 bin4 files (made on linux and windows)
filenames = ['mumax-bin4-linux.ovf', 'mumax-bin4-windows.ovf']
dirname = os.path.join(os.path.dirname(__file__), 'test_sample')
for filename in filenames:
omffilename = os.path.join(dirname, filename)
f_read = df.Field.fromfile(omffilename)
# We know the saved magentisation.
f_saved = df.Field(f_read.mesh, dim=3, value=(1, 0.1, 0), norm=1)
assert f_saved.allclose(f_read)
# Exception (dim=2)
f = df.Field(mesh, dim=2, value=(1, 2))
with pytest.raises(TypeError) as excinfo:
f.write(filename)
def test_write_read_vtk(self):
filename = 'testfile.vtk'
p1 = (0, 0, 0)
p2 = (1e-9, 2e-9, 1e-9)
cell = (1e-9, 1e-9, 1e-9)
mesh = df.Mesh(region=df.Region(p1=p1, p2=p2), cell=cell)
for dim, value in [(1, -1.2), (3, (1e-3, -5e6, 5e6))]:
f = df.Field(mesh, dim=dim, value=value)
with tempfile.TemporaryDirectory() as tmpdir:
tmpfilename = os.path.join(tmpdir, filename)
f.write(tmpfilename)
f_read = df.Field.fromfile(tmpfilename)
assert np.allclose(f.array, f_read.array)
assert np.allclose(f.mesh.region.pmin, f_read.mesh.region.pmin)
assert np.allclose(f.mesh.region.pmax, f_read.mesh.region.pmax)
assert np.allclose(f.mesh.cell, f_read.mesh.cell)
assert f.mesh.n == f_read.mesh.n
def test_write_read_hdf5(self):
filenames = ['testfile.hdf5', 'testfile.h5']
p1 = (0, 0, 0)
p2 = (10e-12, 5e-12, 5e-12)
cell = (1e-12, 1e-12, 1e-12)
mesh = df.Mesh(region=df.Region(p1=p1, p2=p2), cell=cell)
for dim, value in [(1, -1.23), (3, (1e-3 + np.pi, -5e6, 6e6))]:
f = df.Field(mesh, dim=dim, value=value)
for filename in filenames:
with tempfile.TemporaryDirectory() as tmpdir:
tmpfilename = os.path.join(tmpdir, filename)
f.write(tmpfilename)
f_read = df.Field.fromfile(tmpfilename)
assert f == f_read
def test_read_write_invalid_extension(self):
filename = 'testfile.jpg'
p1 = (0, 0, 0)
p2 = (10e-12, 5e-12, 3e-12)
cell = (1e-12, 1e-12, 1e-12)
mesh = df.Mesh(region=df.Region(p1=p1, p2=p2), cell=cell)
f = df.Field(mesh, dim=1, value=5e-12)
with pytest.raises(ValueError) as excinfo:
f.write(filename)
with pytest.raises(ValueError) as excinfo:
f = df.Field.fromfile(filename)
def test_mpl_scalar(self):
# No axes
self.pf.x.plane('x', n=(3, 4)).mpl_scalar()
# Axes
fig = plt.figure()
ax = fig.add_subplot(111)
self.pf.x.plane('x', n=(3, 4)).mpl_scalar(ax=ax)
# All arguments
self.pf.x.plane('x').mpl_scalar(figsize=(10, 10),
filter_field=self.pf.norm,
colorbar=True,
colorbar_label='something',
multiplier=1e-6, cmap='hsv',
clim=(-1, 1))
# Lightness field
filenames = ['skyrmion.omf', 'skyrmion-disk.omf']
for i in filenames:
filename = os.path.join(os.path.dirname(__file__),
'test_sample', i)
field = df.Field.fromfile(filename)
field.plane('z').angle.mpl_scalar(lightness_field=field.z)
field.plane('z').angle.mpl_scalar(lightness_field=-field.z,
filter_field=field.norm)
field.plane('z').mpl(scalar_lightness_field=-field.z)
# Saving plot
filename = 'testfigure.pdf'
with tempfile.TemporaryDirectory() as tmpdir:
tmpfilename = os.path.join(tmpdir, filename)
self.pf.x.plane('x', n=(3, 4)).mpl_scalar(filename=tmpfilename)
# Exceptions
with pytest.raises(ValueError):
self.pf.x.mpl_scalar() # not sliced
with pytest.raises(ValueError):
self.pf.plane('z').mpl_scalar() # vector field
with pytest.raises(ValueError):
# wrong filter field
self.pf.x.plane('z').mpl_scalar(filter_field=self.pf)
with pytest.raises(ValueError):
# wrong filter field
self.pf.x.plane('z').mpl_scalar(lightness_field=self.pf)
plt.close('all')
def test_mpl_vector(self):
# No axes
self.pf.plane('x', n=(3, 4)).mpl_vector()
# Axes
fig = plt.figure()
ax = fig.add_subplot(111)
self.pf.plane('x', n=(3, 4)).mpl_vector(ax=ax)
# All arguments
self.pf.plane('x').mpl_vector(figsize=(10, 10),
color_field=self.pf.y,
colorbar=True,
colorbar_label='something',
multiplier=1e-6, cmap='hsv',
clim=(-1, 1))
# Saving plot
filename = 'testfigure.pdf'
with tempfile.TemporaryDirectory() as tmpdir:
tmpfilename = os.path.join(tmpdir, filename)
self.pf.plane('x', n=(3, 4)).mpl_vector(filename=tmpfilename)
# Exceptions
with pytest.raises(ValueError) as excinfo:
self.pf.mpl_vector() # not sliced
with pytest.raises(ValueError) as excinfo:
self.pf.y.plane('z').mpl_vector() # scalar field
with pytest.raises(ValueError) as excinfo:
# wrong color field
self.pf.plane('z').mpl_vector(color_field=self.pf)
plt.close('all')
def test_mpl(self):
# No axes
self.pf.plane('x', n=(3, 4)).mpl()
# Axes
fig = plt.figure()
ax = fig.add_subplot(111)
self.pf.x.plane('x', n=(3, 4)).mpl(ax=ax)
# All arguments for a vector field
self.pf.plane('x').mpl(figsize=(12, 6),
scalar_field=self.pf.plane('x').angle,
scalar_filter_field=self.pf.norm,
scalar_colorbar_label='something',
scalar_cmap='twilight',
vector_field=self.pf,
vector_color_field=self.pf.y,
vector_color=True,
vector_colorbar=True,
vector_colorbar_label='vector',
vector_cmap='hsv', vector_clim=(0, 1e6),
multiplier=1e-12)
# All arguments for a scalar field
self.pf.z.plane('x').mpl(figsize=(12, 6),
scalar_field=self.pf.x,
scalar_filter_field=self.pf.norm,
scalar_colorbar_label='something',
scalar_cmap='twilight',
vector_field=self.pf,
vector_color_field=self.pf.y,
vector_color=True,
vector_colorbar=True,
vector_colorbar_label='vector',
vector_cmap='hsv', vector_clim=(0, 1e6),
multiplier=1e-12)
# Saving plot
filename = 'testfigure.pdf'
with tempfile.TemporaryDirectory() as tmpdir:
tmpfilename = os.path.join(tmpdir, filename)
self.pf.plane('x', n=(3, 4)).mpl(filename=tmpfilename)
# Exception
with pytest.raises(ValueError):
self.pf.mpl()
plt.close('all')
def test_k3d_nonzero(self):
# Default
self.pf.norm.k3d_nonzero()
# Color
self.pf.x.k3d_nonzero(color=0xff00ff)
# Multiplier
self.pf.x.k3d_nonzero(color=0xff00ff, multiplier=1e-6)
# Interactive field
self.pf.x.plane('z').k3d_nonzero(color=0xff00ff,
multiplier=1e-6,
interactive_field=self.pf)
# kwargs
self.pf.x.plane('z').k3d_nonzero(color=0xff00ff,
multiplier=1e-6,
interactive_field=self.pf,
wireframe=True)
# Plot
plot = k3d.plot()
plot.display()
self.pf.x.plane(z=0).k3d_nonzero(plot=plot,
color=0xff00ff,
multiplier=1e-6,
interactive_field=self.pf)
# Continuation for interactive plot testing.
self.pf.x.plane(z=1e-9).k3d_nonzero(plot=plot,
color=0xff00ff,
multiplier=1e-6,
interactive_field=self.pf)
assert len(plot.objects) == 2
with pytest.raises(ValueError) as excinfo:
self.pf.k3d_nonzero()
def test_k3d_scalar(self):
# Default
self.pf.y.k3d_scalar()
# Filter field
self.pf.y.k3d_scalar(filter_field=self.pf.norm)
# Colormap
self.pf.x.k3d_scalar(filter_field=self.pf.norm,
cmap='hsv',
color=0xff00ff)
# Multiplier
self.pf.y.k3d_scalar(filter_field=self.pf.norm,
color=0xff00ff,
multiplier=1e-6)
# Interactive field
self.pf.y.k3d_scalar(filter_field=self.pf.norm,
color=0xff00ff,
multiplier=1e-6,
interactive_field=self.pf)
# kwargs
self.pf.y.k3d_scalar(filter_field=self.pf.norm,
color=0xff00ff,
multiplier=1e-6,
interactive_field=self.pf,
wireframe=True)
# Plot
plot = k3d.plot()
plot.display()
self.pf.y.plane(z=0).k3d_scalar(plot=plot,
filter_field=self.pf.norm,
color=0xff00ff,
multiplier=1e-6,
interactive_field=self.pf)
# Continuation for interactive plot testing.
self.pf.y.plane(z=1e-9).k3d_scalar(plot=plot,
filter_field=self.pf.norm,
color=0xff00ff,
multiplier=1e-6,
interactive_field=self.pf)
assert len(plot.objects) == 2
# Exceptions
with pytest.raises(ValueError) as excinfo:
self.pf.k3d_scalar()
with pytest.raises(ValueError):
self.pf.x.k3d_scalar(filter_field=self.pf) # filter field dim=3
def test_k3d_vector(self):
# Default
self.pf.k3d_vector()
# Color field
self.pf.k3d_vector(color_field=self.pf.x)
# Colormap
self.pf.k3d_vector(color_field=self.pf.norm,
cmap='hsv')
# Head size
self.pf.k3d_vector(color_field=self.pf.norm,
cmap='hsv',
head_size=3)
# Points
self.pf.k3d_vector(color_field=self.pf.norm,
cmap='hsv',
head_size=3,
points=False)
# Point size
self.pf.k3d_vector(color_field=self.pf.norm,
cmap='hsv',
head_size=3,
points=False,
point_size=1)
# Vector multiplier
self.pf.k3d_vector(color_field=self.pf.norm,
cmap='hsv',
head_size=3,
points=False,
point_size=1,
vector_multiplier=1)
# Multiplier
self.pf.k3d_vector(color_field=self.pf.norm,
cmap='hsv',
head_size=3,
points=False,
point_size=1,
vector_multiplier=1,
multiplier=1e-6)
# Interactive field
self.pf.plane('z').k3d_vector(color_field=self.pf.norm,
cmap='hsv',
head_size=3,
points=False,
point_size=1,
vector_multiplier=1,
multiplier=1e-6,
interactive_field=self.pf)
# Plot
plot = k3d.plot()
plot.display()
self.pf.plane(z=0).k3d_vector(plot=plot, interactive_field=self.pf)
# Continuation for interactive plot testing.
self.pf.plane(z=1e-9).k3d_vector(plot=plot, interactive_field=self.pf)
assert len(plot.objects) == 3
# Exceptions
with pytest.raises(ValueError) as excinfo:
self.pf.x.k3d_vector()
with pytest.raises(ValueError):
self.pf.k3d_vector(color_field=self.pf) # filter field dim=3
def test_plot_large_sample(self):
p1 = (0, 0, 0)
p2 = (50e9, 50e9, 50e9)
cell = (25e9, 25e9, 25e9)
mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
value = (1e6, 1e6, 1e6)
field = df.Field(mesh, dim=3, value=value)
field.plane('z').mpl()
field.norm.k3d_nonzero()
field.x.k3d_scalar()
field.k3d_vector()
| 0 | 0 |
d991aedad470b351e70cf5b10b085c74cc95e474 | 462 | py | Python | env/Lib/site-packages/values/__init__.py | KaceyHirth/Library-DBMS-System | 40b425ed5c7b46627b7c48724b2d20e7a64cf025 | [
"MIT"
] | 4 | 2022-02-06T00:54:58.000Z | 2022-02-25T12:44:43.000Z | env/Lib/site-packages/values/__init__.py | KaceyHirth/Library-DBMS-System | 40b425ed5c7b46627b7c48724b2d20e7a64cf025 | [
"MIT"
] | 3 | 2021-03-23T04:58:47.000Z | 2021-04-02T02:40:54.000Z | env/Lib/site-packages/values/__init__.py | KaceyHirth/Library-DBMS-System | 40b425ed5c7b46627b7c48724b2d20e7a64cf025 | [
"MIT"
] | 1 | 2022-02-08T13:43:20.000Z | 2022-02-08T13:43:20.000Z | __all__ = ['get']
import collections
def _iterable(obj):
return isinstance(obj, collections.Iterable)
def _string(value):
try:
return isinstance(value, basestring)
except NameError:
return isinstance(value, str)
def get(input):
"""return a list with input values or [] if input is None"""
if input is None:
return []
if not _iterable(input) or _string(input):
return [input]
return list(input)
| 18.48 | 64 | 0.645022 | __all__ = ['get']
import collections
def _iterable(obj):
return isinstance(obj, collections.Iterable)
def _string(value):
try:
return isinstance(value, basestring)
except NameError:
return isinstance(value, str)
def get(input):
"""return a list with input values or [] if input is None"""
if input is None:
return []
if not _iterable(input) or _string(input):
return [input]
return list(input)
| 0 | 0 |
d0e19b396bd5c3861e79601ace321dbbd96d9384 | 165 | py | Python | vnpy/app/strategy_reviewer/ui/__init__.py | xyh888/vnpy | 7b51716928ab9574f171a2eda190b37b4f393bb1 | [
"MIT"
] | 5 | 2019-05-24T05:19:55.000Z | 2020-07-29T13:21:49.000Z | vnpy/app/strategy_reviewer/ui/__init__.py | xyh888/vnpy | 7b51716928ab9574f171a2eda190b37b4f393bb1 | [
"MIT"
] | null | null | null | vnpy/app/strategy_reviewer/ui/__init__.py | xyh888/vnpy | 7b51716928ab9574f171a2eda190b37b4f393bb1 | [
"MIT"
] | 2 | 2019-07-01T02:14:04.000Z | 2020-07-29T13:21:53.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/8/20 0020 16:49
# @Author : Hadrianl
# @File : __init__.py
from .widget import StrategyReviewer | 23.571429 | 36 | 0.630303 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/8/20 0020 16:49
# @Author : Hadrianl
# @File : __init__.py
from .widget import StrategyReviewer | 0 | 0 |
4a04e22adafbd1373a9d9fc82325fd3d15005b8b | 647 | py | Python | Lesson 13.gf/xml_Leader2.py | gfoo003/programming-together | 225e0a2255dd8da1f1ef32d2a88deea27c050f10 | [
"MIT"
] | null | null | null | Lesson 13.gf/xml_Leader2.py | gfoo003/programming-together | 225e0a2255dd8da1f1ef32d2a88deea27c050f10 | [
"MIT"
] | null | null | null | Lesson 13.gf/xml_Leader2.py | gfoo003/programming-together | 225e0a2255dd8da1f1ef32d2a88deea27c050f10 | [
"MIT"
] | null | null | null | import xml.etree.ElementTree as ET
xml_string = '''
<stuff>
<users>
<user x = "2">
<id>001</id>
<name>Chuck</name>
</user>
<user x = "7">
<id>007</id>
<name>Brent</name>
</user>
</users>
</stuff>
'''
root_stuff = ET.fromstring(xml_string)
#don't usually refer to root element
user_elements = root_stuff.findall('users/user')
print ('user count:', len(user_elements))
for user in user_elements:
print('name:', user.find('name').text)
print('id:', user.find('id').text)
print('attribute(x):', user.get('x'))
#to identify attribute use 'get's
| 23.107143 | 48 | 0.565688 | import xml.etree.ElementTree as ET
xml_string = '''
<stuff>
<users>
<user x = "2">
<id>001</id>
<name>Chuck</name>
</user>
<user x = "7">
<id>007</id>
<name>Brent</name>
</user>
</users>
</stuff>
'''
root_stuff = ET.fromstring(xml_string)
#don't usually refer to root element
user_elements = root_stuff.findall('users/user')
print ('user count:', len(user_elements))
for user in user_elements:
print('name:', user.find('name').text)
print('id:', user.find('id').text)
print('attribute(x):', user.get('x'))
#to identify attribute use 'get's
| 0 | 0 |
bebc974c59298f013c68b5d5e434ba4b2d82a0a8 | 213 | py | Python | 第4章/program/Chapter_4_dummy.py | kingname/SourceCodeOfBook | ab7275108994dca564905818b678bbd2f771c18e | [
"MIT"
] | 274 | 2018-10-01T11:07:25.000Z | 2022-03-17T13:48:45.000Z | 第4章/program/Chapter_4_dummy.py | kingname/SourceCodeOfBook | ab7275108994dca564905818b678bbd2f771c18e | [
"MIT"
] | 6 | 2019-02-28T14:18:21.000Z | 2022-03-02T14:57:39.000Z | 第4章/program/Chapter_4_dummy.py | kingname/SourceCodeOfBook | ab7275108994dca564905818b678bbd2f771c18e | [
"MIT"
] | 110 | 2018-10-16T06:08:37.000Z | 2022-03-16T08:19:29.000Z | from multiprocessing.dummy import Pool
def calc_power2(num):
return num * num
pool = Pool(3)
origin_num = [x for x in range(10)]
result = pool.map(calc_power2, origin_num)
print(f'1-10{result}')
| 16.384615 | 42 | 0.71831 | from multiprocessing.dummy import Pool
def calc_power2(num):
return num * num
pool = Pool(3)
origin_num = [x for x in range(10)]
result = pool.map(calc_power2, origin_num)
print(f'计算1-10的平方分别为:{result}')
| 27 | 0 |
9c1b437a67fd15632bb77976584935abcfb546e4 | 92 | py | Python | traf_stat/apps.py | bashmak/djing | 8cc0c670600254d288178acd47965f7b3db6856e | [
"Unlicense"
] | 23 | 2017-04-27T20:13:22.000Z | 2022-03-16T12:47:29.000Z | traf_stat/apps.py | bashmak/djing | 8cc0c670600254d288178acd47965f7b3db6856e | [
"Unlicense"
] | 2 | 2017-04-04T15:03:12.000Z | 2021-01-26T15:30:57.000Z | traf_stat/apps.py | bashmak/djing | 8cc0c670600254d288178acd47965f7b3db6856e | [
"Unlicense"
] | 13 | 2017-08-22T16:00:03.000Z | 2022-03-20T03:12:15.000Z | from django.apps import AppConfig
class TrafStatConfig(AppConfig):
name = 'traf_stat'
| 15.333333 | 33 | 0.76087 | from django.apps import AppConfig
class TrafStatConfig(AppConfig):
name = 'traf_stat'
| 0 | 0 |
6996b5b815f2d10dc544bc52eb21ec8c9cd0c496 | 1,424 | py | Python | entries/views.py | acdh-oeaw/vhioe | 83c8bce83d7cb21150f404409477d2cd1c7ee240 | [
"MIT"
] | null | null | null | entries/views.py | acdh-oeaw/vhioe | 83c8bce83d7cb21150f404409477d2cd1c7ee240 | [
"MIT"
] | 10 | 2020-02-11T23:56:16.000Z | 2021-12-13T19:45:38.000Z | entries/views.py | acdh-oeaw/vhioe | 83c8bce83d7cb21150f404409477d2cd1c7ee240 | [
"MIT"
] | null | null | null | from django.core.urlresolvers import reverse
from django.views.generic.detail import DetailView
from django.views.generic.list import ListView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.core.urlresolvers import reverse_lazy
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from .models import Eintrag
from .forms import EintragForm
class EintragDetailView(DetailView):
model = Eintrag
class EintragListView(ListView):
model = Eintrag
class EintragCreate(CreateView):
model = Eintrag
template_name_suffix = '_create'
form_class = EintragForm
success_url = '.'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(EintragCreate, self).dispatch(*args, **kwargs)
class EintragUpdate(UpdateView):
model = Eintrag
form_class = EintragForm
template_name_suffix = '_create'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(EintragUpdate, self).dispatch(*args, **kwargs)
class EintragDelete(DeleteView):
model = Eintrag
template_name = 'vocabs/confirm_delete.html'
success_url = reverse_lazy('browsing:browse_entries')
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(EintragDelete, self).dispatch(*args, **kwargs)
| 26.867925 | 72 | 0.752809 | from django.core.urlresolvers import reverse
from django.views.generic.detail import DetailView
from django.views.generic.list import ListView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.core.urlresolvers import reverse_lazy
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from .models import Eintrag
from .forms import EintragForm
class EintragDetailView(DetailView):
model = Eintrag
class EintragListView(ListView):
model = Eintrag
class EintragCreate(CreateView):
model = Eintrag
template_name_suffix = '_create'
form_class = EintragForm
success_url = '.'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(EintragCreate, self).dispatch(*args, **kwargs)
class EintragUpdate(UpdateView):
model = Eintrag
form_class = EintragForm
template_name_suffix = '_create'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(EintragUpdate, self).dispatch(*args, **kwargs)
class EintragDelete(DeleteView):
model = Eintrag
template_name = 'vocabs/confirm_delete.html'
success_url = reverse_lazy('browsing:browse_entries')
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(EintragDelete, self).dispatch(*args, **kwargs)
| 0 | 0 |
cee8341ee37a27bddc6bb669594ab3c522880752 | 11,688 | py | Python | pystiche_papers/li_wand_2016/_loss.py | pystiche/papers | 0d8179dc51f6eda0b27fa525dc0b86b866bc88e1 | [
"BSD-3-Clause"
] | 1 | 2021-09-30T09:30:07.000Z | 2021-09-30T09:30:07.000Z | pystiche_papers/li_wand_2016/_loss.py | pystiche/papers | 0d8179dc51f6eda0b27fa525dc0b86b866bc88e1 | [
"BSD-3-Clause"
] | 20 | 2021-10-10T13:37:25.000Z | 2022-03-31T07:31:45.000Z | pystiche_papers/li_wand_2016/_loss.py | pystiche/papers | 0d8179dc51f6eda0b27fa525dc0b86b866bc88e1 | [
"BSD-3-Clause"
] | null | null | null | from typing import Any, Optional, Tuple, Union
import torch
from torch.nn.functional import mse_loss
import pystiche
import pystiche.loss.functional as F
from pystiche import enc, loss
from pystiche_papers.utils import HyperParameters
from ._utils import (
extract_normalized_patches2d,
hyper_parameters as _hyper_parameters,
multi_layer_encoder as _multi_layer_encoder,
target_transforms as _target_transforms,
)
__all__ = [
"FeatureReconstructionLoss",
"content_loss",
"MRFLoss",
"style_loss",
"TotalVariationLoss",
"regularization",
"perceptual_loss",
]
class FeatureReconstructionLoss(loss.FeatureReconstructionLoss):
r"""Feature reconstruction loss from :cite:`LW2016`.
Args:
encoder: Encoder used to encode the input.
impl_params: If ``False``, calculate the score with the squared error (SE)
instead of the mean squared error (MSE).
**feature_reconstruction_loss_kwargs: Additional parameters of a
:class:`pystiche.loss.FeatureReconstructionLoss`.
.. seealso::
:class:`pystiche.loss.FeatureReconstructionLoss`
"""
def __init__(
self,
encoder: enc.Encoder,
impl_params: bool = True,
**feature_reconstruction_loss_kwargs: Any,
):
super().__init__(encoder, **feature_reconstruction_loss_kwargs)
# https://github.com/pmeier/CNNMRF/blob/fddcf4d01e2a6ce201059d8bc38597f74a09ba3f/mylib/content.lua#L15
# nn.MSECriterion() was used as criterion to calculate the content loss, which
# by default uses reduction="mean"
self.loss_reduction = "mean" if impl_params else "sum"
def calculate_score(
self,
input_repr: torch.Tensor,
target_repr: torch.Tensor,
ctx: Optional[torch.Tensor],
) -> torch.Tensor:
return mse_loss(input_repr, target_repr, reduction=self.loss_reduction)
def content_loss(
impl_params: bool = True,
multi_layer_encoder: Optional[enc.MultiLayerEncoder] = None,
hyper_parameters: Optional[HyperParameters] = None,
) -> FeatureReconstructionLoss:
r"""Content loss from :cite:`LW2016`.
Args:
impl_params: Switch the behavior and hyper-parameters between the reference
implementation of the original authors and what is described in the paper.
For details see :ref:`here <li_wand_2016-impl_params>`.
multi_layer_encoder: Pretrained multi-layer encoder. If
omitted, :func:`~pystiche_papers.li_wand_2016.multi_layer_encoder` is used.
hyper_parameters: Hyper parameters. If omitted,
:func:`~pystiche_papers.li_wand_2016.hyper_parameters` is used.
.. seealso::
:class:`pystiche_papers.li_wand_2016.FeatureReconstructionLoss`
"""
if multi_layer_encoder is None:
multi_layer_encoder = _multi_layer_encoder()
if hyper_parameters is None:
hyper_parameters = _hyper_parameters(impl_params=impl_params)
return FeatureReconstructionLoss(
multi_layer_encoder.extract_encoder(hyper_parameters.content_loss.layer),
impl_params=impl_params,
score_weight=hyper_parameters.content_loss.score_weight,
)
class MRFLoss(loss.MRFLoss):
r"""MRF loss from :cite:`LW2016`.
Args:
encoder: Encoder used to encode the input.
patch_size: Spatial size of the neural patches.
impl_params: If ``True``, normalize the gradient of the neural patches. If
``False``, use a score correction factor of 1/2.
**mrf_loss_kwargs: Additional parameters of a :class:`pystiche.loss.MRFLoss`.
In contrast to :class:`pystiche.loss.MRFLoss`, the score is calculated with the
squared error (SE) instead of the mean squared error (MSE).
.. seealso::
- :class:`pystiche.loss.MRFLoss`
- :func:`pystiche_papers.li_wand_2016.extract_normalized_patches2d`
"""
def __init__(
self,
encoder: enc.Encoder,
patch_size: Union[int, Tuple[int, int]],
impl_params: bool = True,
**mrf_loss_kwargs: Any,
):
super().__init__(encoder, patch_size, **mrf_loss_kwargs)
# https://github.com/pmeier/CNNMRF/blob/fddcf4d01e2a6ce201059d8bc38597f74a09ba3f/mylib/mrf.lua#L221
# https://github.com/pmeier/CNNMRF/blob/fddcf4d01e2a6ce201059d8bc38597f74a09ba3f/mylib/mrf.lua#L224
# They use normalized patches instead of the unnormalized patches described in
# the paper.
self.normalize_patches_grad = impl_params
self.loss_reduction = "sum"
# The score correction factor is not visible in the reference implementation
# of the original authors, since the calculation is performed with respect to
# the gradient and not the score. Roughly speaking, since the calculation
# comprises a *squared* distance, we need a factor of 1/2 in the forward pass.
# https://github.com/pmeier/CNNMRF/blob/fddcf4d01e2a6ce201059d8bc38597f74a09ba3f/mylib/mrf.lua#L220
self.score_correction_factor = 1.0 / 2.0 if impl_params else 1.0
def enc_to_repr(self, enc: torch.Tensor, is_guided: bool) -> torch.Tensor:
if self.normalize_patches_grad:
repr = extract_normalized_patches2d(enc, self.patch_size, self.stride)
else:
repr = pystiche.extract_patches2d(enc, self.patch_size, self.stride)
if not is_guided:
return repr
return self._guide_repr(repr)
def calculate_score(
self,
input_repr: torch.Tensor,
target_repr: torch.Tensor,
ctx: Optional[torch.Tensor],
) -> torch.Tensor:
score = F.mrf_loss(
input_repr, target_repr, reduction=self.loss_reduction, batched_input=True
)
return score * self.score_correction_factor
def style_loss(
impl_params: bool = True,
multi_layer_encoder: Optional[enc.MultiLayerEncoder] = None,
hyper_parameters: Optional[HyperParameters] = None,
) -> loss.MultiLayerEncodingLoss:
r"""Style loss from :cite:`LW2016`.
Args:
impl_params: Switch the behavior and hyper-parameters between the reference
implementation of the original authors and what is described in the paper.
For details see :ref:`here <li_wand_2016-impl_params>`.
multi_layer_encoder: Pretrained multi-layer encoder. If
omitted, :func:`~pystiche_papers.li_wand_2016.multi_layer_encoder` is used.
hyper_parameters: Hyper parameters. If omitted,
:func:`~pystiche_papers.li_wand_2016.hyper_parameters` is used.
.. seealso::
- :class:`pystiche_papers.li_wand_2016.MRFLoss`
"""
if multi_layer_encoder is None:
multi_layer_encoder = _multi_layer_encoder()
if hyper_parameters is None:
hyper_parameters = _hyper_parameters(impl_params=impl_params)
def encoding_loss_fn(encoder: enc.Encoder, layer_weight: float) -> MRFLoss:
return MRFLoss(
encoder,
hyper_parameters.style_loss.patch_size, # type: ignore[union-attr]
impl_params=impl_params,
stride=hyper_parameters.style_loss.stride, # type: ignore[union-attr]
target_transforms=_target_transforms(
impl_params=impl_params, hyper_parameters=hyper_parameters
),
score_weight=layer_weight,
)
return loss.MultiLayerEncodingLoss(
multi_layer_encoder,
hyper_parameters.style_loss.layers,
encoding_loss_fn,
layer_weights=hyper_parameters.style_loss.layer_weights,
score_weight=hyper_parameters.style_loss.score_weight,
)
class TotalVariationLoss(loss.TotalVariationLoss):
r"""Total variation loss from :cite:`LW2016`.
Args:
impl_params: If ``False``, use a score correction factor of 1/2.
**total_variation_loss_kwargs: Additional parameters of a
:class:`pystiche.loss.TotalVariationLoss`.
In contrast to :class:`pystiche.loss.TotalVariationLoss`, the the score is
calculated with the squared error (SE) instead of the mean squared error (MSE).
.. seealso::
- :class:`pystiche.loss.TotalVariationLoss`
"""
def __init__(self, impl_params: bool = True, **total_variation_loss_kwargs: Any):
super().__init__(**total_variation_loss_kwargs)
self.loss_reduction = "sum"
# The score correction factor is not visible in the reference implementation
# of the original authors, since the calculation is performed with respect to
# the gradient and not the score. Roughly speaking, since the calculation
# comprises a *squared* distance, we need a factor of 1/2 in the forward pass.
# https://github.com/pmeier/CNNMRF/blob/fddcf4d01e2a6ce201059d8bc38597f74a09ba3f/mylib/tv.lua#L20-L30
self.score_correction_factor = 1.0 / 2.0 if impl_params else 1.0
def calculate_score(self, input_repr: torch.Tensor) -> torch.Tensor:
score = F.total_variation_loss(
input_repr, exponent=self.exponent, reduction=self.loss_reduction
)
return score * self.score_correction_factor
def regularization(
impl_params: bool = True,
hyper_parameters: Optional[HyperParameters] = None,
) -> TotalVariationLoss:
r"""Regularization from :cite:`LW2016`.
Args:
impl_params: Switch the behavior and hyper-parameters between the reference
implementation of the original authors and what is described in the paper.
For details see :ref:`here <li_wand_2016-impl_params>`.
hyper_parameters: Hyper parameters. If omitted,
:func:`~pystiche_papers.li_wand_2016.hyper_parameters` is used.
.. seealso::
- :class:`pystiche_papers.li_wand_2016.TotalVariationLoss`
"""
if hyper_parameters is None:
hyper_parameters = _hyper_parameters()
return TotalVariationLoss(
impl_params=impl_params,
score_weight=hyper_parameters.regularization.score_weight,
)
def perceptual_loss(
impl_params: bool = True,
multi_layer_encoder: Optional[enc.MultiLayerEncoder] = None,
hyper_parameters: Optional[HyperParameters] = None,
) -> loss.PerceptualLoss:
r"""Perceptual loss from :cite:`LW2016`.
Args:
impl_params: Switch the behavior and hyper-parameters between the reference
implementation of the original authors and what is described in the paper.
For details see :ref:`here <li_wand_2016-impl_params>`.
multi_layer_encoder: Pretrained multi-layer encoder. If
omitted, :func:`~pystiche_papers.li_wand_2016.multi_layer_encoder` is used.
hyper_parameters: Hyper parameters. If omitted,
:func:`~pystiche_papers.li_wand_2016.hyper_parameters` is used.
.. seealso::
- :func:`pystiche_papers.li_wand_2016.content_loss`
- :func:`pystiche_papers.li_wand_2016.style_loss`
- :func:`pystiche_papers.li_wand_2016.regularization`
"""
if multi_layer_encoder is None:
multi_layer_encoder = _multi_layer_encoder()
if hyper_parameters is None:
hyper_parameters = _hyper_parameters()
return loss.PerceptualLoss(
content_loss(
impl_params=impl_params,
multi_layer_encoder=multi_layer_encoder,
hyper_parameters=hyper_parameters,
),
style_loss(
impl_params=impl_params,
multi_layer_encoder=multi_layer_encoder,
hyper_parameters=hyper_parameters,
),
regularization(impl_params=impl_params, hyper_parameters=hyper_parameters),
)
| 37.461538 | 110 | 0.693703 | from typing import Any, Optional, Tuple, Union
import torch
from torch.nn.functional import mse_loss
import pystiche
import pystiche.loss.functional as F
from pystiche import enc, loss
from pystiche_papers.utils import HyperParameters
from ._utils import (
extract_normalized_patches2d,
hyper_parameters as _hyper_parameters,
multi_layer_encoder as _multi_layer_encoder,
target_transforms as _target_transforms,
)
__all__ = [
"FeatureReconstructionLoss",
"content_loss",
"MRFLoss",
"style_loss",
"TotalVariationLoss",
"regularization",
"perceptual_loss",
]
class FeatureReconstructionLoss(loss.FeatureReconstructionLoss):
r"""Feature reconstruction loss from :cite:`LW2016`.
Args:
encoder: Encoder used to encode the input.
impl_params: If ``False``, calculate the score with the squared error (SE)
instead of the mean squared error (MSE).
**feature_reconstruction_loss_kwargs: Additional parameters of a
:class:`pystiche.loss.FeatureReconstructionLoss`.
.. seealso::
:class:`pystiche.loss.FeatureReconstructionLoss`
"""
def __init__(
self,
encoder: enc.Encoder,
impl_params: bool = True,
**feature_reconstruction_loss_kwargs: Any,
):
super().__init__(encoder, **feature_reconstruction_loss_kwargs)
# https://github.com/pmeier/CNNMRF/blob/fddcf4d01e2a6ce201059d8bc38597f74a09ba3f/mylib/content.lua#L15
# nn.MSECriterion() was used as criterion to calculate the content loss, which
# by default uses reduction="mean"
self.loss_reduction = "mean" if impl_params else "sum"
def calculate_score(
self,
input_repr: torch.Tensor,
target_repr: torch.Tensor,
ctx: Optional[torch.Tensor],
) -> torch.Tensor:
return mse_loss(input_repr, target_repr, reduction=self.loss_reduction)
def content_loss(
impl_params: bool = True,
multi_layer_encoder: Optional[enc.MultiLayerEncoder] = None,
hyper_parameters: Optional[HyperParameters] = None,
) -> FeatureReconstructionLoss:
r"""Content loss from :cite:`LW2016`.
Args:
impl_params: Switch the behavior and hyper-parameters between the reference
implementation of the original authors and what is described in the paper.
For details see :ref:`here <li_wand_2016-impl_params>`.
multi_layer_encoder: Pretrained multi-layer encoder. If
omitted, :func:`~pystiche_papers.li_wand_2016.multi_layer_encoder` is used.
hyper_parameters: Hyper parameters. If omitted,
:func:`~pystiche_papers.li_wand_2016.hyper_parameters` is used.
.. seealso::
:class:`pystiche_papers.li_wand_2016.FeatureReconstructionLoss`
"""
if multi_layer_encoder is None:
multi_layer_encoder = _multi_layer_encoder()
if hyper_parameters is None:
hyper_parameters = _hyper_parameters(impl_params=impl_params)
return FeatureReconstructionLoss(
multi_layer_encoder.extract_encoder(hyper_parameters.content_loss.layer),
impl_params=impl_params,
score_weight=hyper_parameters.content_loss.score_weight,
)
class MRFLoss(loss.MRFLoss):
r"""MRF loss from :cite:`LW2016`.
Args:
encoder: Encoder used to encode the input.
patch_size: Spatial size of the neural patches.
impl_params: If ``True``, normalize the gradient of the neural patches. If
``False``, use a score correction factor of 1/2.
**mrf_loss_kwargs: Additional parameters of a :class:`pystiche.loss.MRFLoss`.
In contrast to :class:`pystiche.loss.MRFLoss`, the score is calculated with the
squared error (SE) instead of the mean squared error (MSE).
.. seealso::
- :class:`pystiche.loss.MRFLoss`
- :func:`pystiche_papers.li_wand_2016.extract_normalized_patches2d`
"""
def __init__(
self,
encoder: enc.Encoder,
patch_size: Union[int, Tuple[int, int]],
impl_params: bool = True,
**mrf_loss_kwargs: Any,
):
super().__init__(encoder, patch_size, **mrf_loss_kwargs)
# https://github.com/pmeier/CNNMRF/blob/fddcf4d01e2a6ce201059d8bc38597f74a09ba3f/mylib/mrf.lua#L221
# https://github.com/pmeier/CNNMRF/blob/fddcf4d01e2a6ce201059d8bc38597f74a09ba3f/mylib/mrf.lua#L224
# They use normalized patches instead of the unnormalized patches described in
# the paper.
self.normalize_patches_grad = impl_params
self.loss_reduction = "sum"
# The score correction factor is not visible in the reference implementation
# of the original authors, since the calculation is performed with respect to
# the gradient and not the score. Roughly speaking, since the calculation
# comprises a *squared* distance, we need a factor of 1/2 in the forward pass.
# https://github.com/pmeier/CNNMRF/blob/fddcf4d01e2a6ce201059d8bc38597f74a09ba3f/mylib/mrf.lua#L220
self.score_correction_factor = 1.0 / 2.0 if impl_params else 1.0
def enc_to_repr(self, enc: torch.Tensor, is_guided: bool) -> torch.Tensor:
if self.normalize_patches_grad:
repr = extract_normalized_patches2d(enc, self.patch_size, self.stride)
else:
repr = pystiche.extract_patches2d(enc, self.patch_size, self.stride)
if not is_guided:
return repr
return self._guide_repr(repr)
def calculate_score(
self,
input_repr: torch.Tensor,
target_repr: torch.Tensor,
ctx: Optional[torch.Tensor],
) -> torch.Tensor:
score = F.mrf_loss(
input_repr, target_repr, reduction=self.loss_reduction, batched_input=True
)
return score * self.score_correction_factor
def style_loss(
impl_params: bool = True,
multi_layer_encoder: Optional[enc.MultiLayerEncoder] = None,
hyper_parameters: Optional[HyperParameters] = None,
) -> loss.MultiLayerEncodingLoss:
r"""Style loss from :cite:`LW2016`.
Args:
impl_params: Switch the behavior and hyper-parameters between the reference
implementation of the original authors and what is described in the paper.
For details see :ref:`here <li_wand_2016-impl_params>`.
multi_layer_encoder: Pretrained multi-layer encoder. If
omitted, :func:`~pystiche_papers.li_wand_2016.multi_layer_encoder` is used.
hyper_parameters: Hyper parameters. If omitted,
:func:`~pystiche_papers.li_wand_2016.hyper_parameters` is used.
.. seealso::
- :class:`pystiche_papers.li_wand_2016.MRFLoss`
"""
if multi_layer_encoder is None:
multi_layer_encoder = _multi_layer_encoder()
if hyper_parameters is None:
hyper_parameters = _hyper_parameters(impl_params=impl_params)
def encoding_loss_fn(encoder: enc.Encoder, layer_weight: float) -> MRFLoss:
return MRFLoss(
encoder,
hyper_parameters.style_loss.patch_size, # type: ignore[union-attr]
impl_params=impl_params,
stride=hyper_parameters.style_loss.stride, # type: ignore[union-attr]
target_transforms=_target_transforms(
impl_params=impl_params, hyper_parameters=hyper_parameters
),
score_weight=layer_weight,
)
return loss.MultiLayerEncodingLoss(
multi_layer_encoder,
hyper_parameters.style_loss.layers,
encoding_loss_fn,
layer_weights=hyper_parameters.style_loss.layer_weights,
score_weight=hyper_parameters.style_loss.score_weight,
)
class TotalVariationLoss(loss.TotalVariationLoss):
r"""Total variation loss from :cite:`LW2016`.
Args:
impl_params: If ``False``, use a score correction factor of 1/2.
**total_variation_loss_kwargs: Additional parameters of a
:class:`pystiche.loss.TotalVariationLoss`.
In contrast to :class:`pystiche.loss.TotalVariationLoss`, the the score is
calculated with the squared error (SE) instead of the mean squared error (MSE).
.. seealso::
- :class:`pystiche.loss.TotalVariationLoss`
"""
def __init__(self, impl_params: bool = True, **total_variation_loss_kwargs: Any):
super().__init__(**total_variation_loss_kwargs)
self.loss_reduction = "sum"
# The score correction factor is not visible in the reference implementation
# of the original authors, since the calculation is performed with respect to
# the gradient and not the score. Roughly speaking, since the calculation
# comprises a *squared* distance, we need a factor of 1/2 in the forward pass.
# https://github.com/pmeier/CNNMRF/blob/fddcf4d01e2a6ce201059d8bc38597f74a09ba3f/mylib/tv.lua#L20-L30
self.score_correction_factor = 1.0 / 2.0 if impl_params else 1.0
def calculate_score(self, input_repr: torch.Tensor) -> torch.Tensor:
score = F.total_variation_loss(
input_repr, exponent=self.exponent, reduction=self.loss_reduction
)
return score * self.score_correction_factor
def regularization(
impl_params: bool = True,
hyper_parameters: Optional[HyperParameters] = None,
) -> TotalVariationLoss:
r"""Regularization from :cite:`LW2016`.
Args:
impl_params: Switch the behavior and hyper-parameters between the reference
implementation of the original authors and what is described in the paper.
For details see :ref:`here <li_wand_2016-impl_params>`.
hyper_parameters: Hyper parameters. If omitted,
:func:`~pystiche_papers.li_wand_2016.hyper_parameters` is used.
.. seealso::
- :class:`pystiche_papers.li_wand_2016.TotalVariationLoss`
"""
if hyper_parameters is None:
hyper_parameters = _hyper_parameters()
return TotalVariationLoss(
impl_params=impl_params,
score_weight=hyper_parameters.regularization.score_weight,
)
def perceptual_loss(
impl_params: bool = True,
multi_layer_encoder: Optional[enc.MultiLayerEncoder] = None,
hyper_parameters: Optional[HyperParameters] = None,
) -> loss.PerceptualLoss:
r"""Perceptual loss from :cite:`LW2016`.
Args:
impl_params: Switch the behavior and hyper-parameters between the reference
implementation of the original authors and what is described in the paper.
For details see :ref:`here <li_wand_2016-impl_params>`.
multi_layer_encoder: Pretrained multi-layer encoder. If
omitted, :func:`~pystiche_papers.li_wand_2016.multi_layer_encoder` is used.
hyper_parameters: Hyper parameters. If omitted,
:func:`~pystiche_papers.li_wand_2016.hyper_parameters` is used.
.. seealso::
- :func:`pystiche_papers.li_wand_2016.content_loss`
- :func:`pystiche_papers.li_wand_2016.style_loss`
- :func:`pystiche_papers.li_wand_2016.regularization`
"""
if multi_layer_encoder is None:
multi_layer_encoder = _multi_layer_encoder()
if hyper_parameters is None:
hyper_parameters = _hyper_parameters()
return loss.PerceptualLoss(
content_loss(
impl_params=impl_params,
multi_layer_encoder=multi_layer_encoder,
hyper_parameters=hyper_parameters,
),
style_loss(
impl_params=impl_params,
multi_layer_encoder=multi_layer_encoder,
hyper_parameters=hyper_parameters,
),
regularization(impl_params=impl_params, hyper_parameters=hyper_parameters),
)
| 0 | 0 |
1b3186c99a60818dc9d24b438538877520aa1347 | 2,640 | py | Python | tests/conftest.py | Z2PackDev/bands_inspect | 76fdb0130d9ff64c738365a1911bc61f035927f2 | [
"Apache-2.0"
] | 1 | 2017-12-19T07:21:56.000Z | 2017-12-19T07:21:56.000Z | tests/conftest.py | Z2PackDev/bands-inspect | 76fdb0130d9ff64c738365a1911bc61f035927f2 | [
"Apache-2.0"
] | 3 | 2018-02-27T09:07:46.000Z | 2018-03-06T12:26:04.000Z | tests/conftest.py | Z2PackDev/bands_inspect | 76fdb0130d9ff64c738365a1911bc61f035927f2 | [
"Apache-2.0"
] | 1 | 2017-12-19T07:21:55.000Z | 2017-12-19T07:21:55.000Z | # -*- coding: utf-8 -*-
# (c) 2017-2019, ETH Zurich, Institut fuer Theoretische Physik
# Author: Dominik Gresch <[email protected]>
"""
Configuration file for the pytest tests.
"""
import os
import json
import pytest
import numpy as np
import bands_inspect as bi
import parameters # pylint: disable=wrong-import-order
#--------------------------FIXTURES-------------------------------------#
@pytest.fixture
def test_name(request):
"""Returns module_name.function_name for a given test"""
return request.module.__name__ + '/' + request._parent_request._pyfuncitem.name # pylint: disable=protected-access
@pytest.fixture
def compare_data(request, test_name, scope="session"): # pylint: disable=unused-argument,redefined-outer-name
"""Returns a function which either saves some data to a file or (if that file exists already) compares it to pre-existing data using a given comparison function."""
def inner(compare_fct, data, tag=None):
full_name = test_name + (tag or '')
# get rid of json-specific quirks
# store as string because I cannot add the decoder to the pytest cache
data_str = json.dumps(data)
data = json.loads(data_str)
val = json.loads(request.config.cache.get(full_name, 'null'))
if val is None:
request.config.cache.set(full_name, data_str)
raise ValueError('Reference data does not exist.')
assert compare_fct(val, data)
return inner
@pytest.fixture
def compare_equal(compare_data): # pylint: disable=redefined-outer-name
"""
Returns a function which checks that a given data is equal to the stored reference.
"""
return lambda data, tag=None: compare_data(lambda x, y: x == y, data, tag)
@pytest.fixture
def assert_equal():
"""
Returns a function which checks that two bands-inspect object instances are equal.
"""
def inner(obj1, obj2):
if isinstance(obj1, bi.kpoints.KpointsBase):
np.testing.assert_equal(
obj1.kpoints_explicit, obj2.kpoints_explicit
)
elif isinstance(obj1, bi.eigenvals.EigenvalsData):
np.testing.assert_equal(
obj1.kpoints.kpoints_explicit, obj2.kpoints.kpoints_explicit
)
np.testing.assert_equal(obj1.eigenvals, obj2.eigenvals)
else:
raise ValueError("Unknown type {}".format(type(obj1)))
return inner
@pytest.fixture
def sample():
"""
Returns the absolute path of the sample with a given name.
"""
def inner(name):
return os.path.join(parameters.SAMPLES_DIR, name)
return inner
| 30.697674 | 168 | 0.659848 | # -*- coding: utf-8 -*-
# (c) 2017-2019, ETH Zurich, Institut fuer Theoretische Physik
# Author: Dominik Gresch <[email protected]>
"""
Configuration file for the pytest tests.
"""
import os
import json
import pytest
import numpy as np
import bands_inspect as bi
import parameters # pylint: disable=wrong-import-order
#--------------------------FIXTURES-------------------------------------#
@pytest.fixture
def test_name(request):
"""Returns module_name.function_name for a given test"""
return request.module.__name__ + '/' + request._parent_request._pyfuncitem.name # pylint: disable=protected-access
@pytest.fixture
def compare_data(request, test_name, scope="session"): # pylint: disable=unused-argument,redefined-outer-name
"""Returns a function which either saves some data to a file or (if that file exists already) compares it to pre-existing data using a given comparison function."""
def inner(compare_fct, data, tag=None):
full_name = test_name + (tag or '')
# get rid of json-specific quirks
# store as string because I cannot add the decoder to the pytest cache
data_str = json.dumps(data)
data = json.loads(data_str)
val = json.loads(request.config.cache.get(full_name, 'null'))
if val is None:
request.config.cache.set(full_name, data_str)
raise ValueError('Reference data does not exist.')
assert compare_fct(val, data)
return inner
@pytest.fixture
def compare_equal(compare_data): # pylint: disable=redefined-outer-name
"""
Returns a function which checks that a given data is equal to the stored reference.
"""
return lambda data, tag=None: compare_data(lambda x, y: x == y, data, tag)
@pytest.fixture
def assert_equal():
"""
Returns a function which checks that two bands-inspect object instances are equal.
"""
def inner(obj1, obj2):
if isinstance(obj1, bi.kpoints.KpointsBase):
np.testing.assert_equal(
obj1.kpoints_explicit, obj2.kpoints_explicit
)
elif isinstance(obj1, bi.eigenvals.EigenvalsData):
np.testing.assert_equal(
obj1.kpoints.kpoints_explicit, obj2.kpoints.kpoints_explicit
)
np.testing.assert_equal(obj1.eigenvals, obj2.eigenvals)
else:
raise ValueError("Unknown type {}".format(type(obj1)))
return inner
@pytest.fixture
def sample():
"""
Returns the absolute path of the sample with a given name.
"""
def inner(name):
return os.path.join(parameters.SAMPLES_DIR, name)
return inner
| 0 | 0 |
a365ad738e2f0d42460bbe15195bfcc181ad7c09 | 3,518 | py | Python | src/tensorrt/tools/caffe_engine/call_engine_to_infer_all.py | aimuch/AIEnvConfig | 4ccd54e9c601e8c91efebcec1a50115d75d0cf96 | [
"MIT"
] | 250 | 2019-06-14T16:12:20.000Z | 2022-03-27T09:56:26.000Z | src/tensorrt/tools/caffe_engine/call_engine_to_infer_all.py | aimuch/AIEnvConfig | 4ccd54e9c601e8c91efebcec1a50115d75d0cf96 | [
"MIT"
] | 6 | 2018-08-10T07:15:39.000Z | 2018-10-23T01:51:17.000Z | src/tensorrt/tools/caffe_engine/call_engine_to_infer_all.py | aimuch/AIEnvConfig | 4ccd54e9c601e8c91efebcec1a50115d75d0cf96 | [
"MIT"
] | 41 | 2019-08-16T13:42:13.000Z | 2022-02-23T03:38:09.000Z | import os
# import tensorflow as tf
import tensorrt as trt
from tensorrt.parsers import uffparser
import pycuda.driver as cuda
# import uff
import cv2
import numpy as np
from tqdm import tqdm
TEST_PATH = "/media/andy/Data/DevWorkSpace/Projects/imageClassifier/data/test/"
LABEL = 0
ENGINE_PATH = "/home/andy/caffe/examples/mydata/slot_classifier/engine/px2_classifier.engine"
NET_INPUT_SHAPE = (256, 256)
NET_OUTPUT_SHAPE = 5
class_labels = ['error', 'half', 'invlb', 'invls', 'valid']
# Load Image
def load_image(img_path, net_input_shape):
img = cv2.resize(cv2.imread(img_path), net_input_shape)
# BGR -> RGB
#img = img[:,:, (2, 1, 0)]
## Method 1
# imgT = np.transpose(img, (2, 0, 1)) # c,w,h
# imgF = np.asarray(imgT, dtype=np.float32)
# mean = [[[88.159309]], [[97.966286]], [[103.66106]]] # Caffe image mean
# imgS = np.subtract(imgF,mean)
## Method 2
imgF = np.asarray(img, dtype=np.float32)
mean = [88.159309, 97.966286, 103.66106] # Caffe image mean
imgSS = np.subtract(imgF, mean)
imgS = np.transpose(imgSS, (2, 0, 1)) # CHW
# RGB_MEAN_PIXELS = np.array([88.159309, 97.966286, 103.66106]).reshape((1,1,1,3)).astype(np.float32)
return np.ascontiguousarray(imgS, dtype=np.float32) # avoid error: ndarray is not contiguous
def test_Loader(TEST_PATH, net_input_shape):
label_list = []
img_list = []
pair = []
folders = os.listdir(TEST_PATH)
for folder in folders:
folder_path = os.path.join(TEST_PATH, folder)
imgs = os.listdir(folder_path)
for img in tqdm(imgs):
img_path = os.path.join(folder_path, img)
img = load_image(img_path, net_input_shape)
label = class_labels.index(folder)
img_list.append(img)
label_list.append(label)
pair.append((img, label))
return pair, (img_list, label_list)
imgTestData = test_Loader(TEST_PATH, NET_INPUT_SHAPE)
# Load Engine file
G_LOGGER = trt.infer.ConsoleLogger(trt.infer.LogSeverity.ERROR)
engine = trt.utils.load_engine(G_LOGGER, ENGINE_PATH)
context = engine.create_execution_context()
runtime = trt.infer.create_infer_runtime(G_LOGGER)
# output = np.empty(1, dtype = np.float32)
# # Alocate device memory
# d_input = cuda.mem_alloc(1 * imgTestData[0][0][0].nbytes)
# d_output = cuda.mem_alloc(NET_OUTPUT_SHAPE * output.nbytes)
# bindings = [int(d_input), int(d_output)]
# stream = cuda.Stream()
predicts = []
pair = imgTestData[0]
for img, label in pair:
output = np.empty(NET_OUTPUT_SHAPE, dtype = np.float32)
# Alocate device memory
d_input = cuda.mem_alloc(1 * img.nbytes)
d_output = cuda.mem_alloc(1 * output.nbytes)
bindings = [int(d_input), int(d_output)]
stream = cuda.Stream()
# Transfer input data to device
cuda.memcpy_htod_async(d_input, img, stream)
# Execute model
context.enqueue(1, bindings, stream.handle, None)
# Transfer predictions back
cuda.memcpy_dtoh_async(output, d_output, stream)
# Syncronize threads
stream.synchronize()
softmax = np.exp(output) / np.sum(np.exp(output))
predict = np.argmax(softmax)
predicts.append(predict)
print("True = ",label, ", predict = ", predict, ", softmax = ", softmax)
grandTrue = np.array(imgTestData[1][1])
predicts = np.array(predicts)
error = predicts[predicts!=grandTrue]
print(imgTestData[1][1])
print("-------")
print(predicts)
print("-------")
print(len(error))
print((len(imgTestData[0])-len(error))/len(imgTestData[0])) | 30.327586 | 105 | 0.677658 | import os
# import tensorflow as tf
import tensorrt as trt
from tensorrt.parsers import uffparser
import pycuda.driver as cuda
# import uff
import cv2
import numpy as np
from tqdm import tqdm
TEST_PATH = "/media/andy/Data/DevWorkSpace/Projects/imageClassifier/data/test/"
LABEL = 0
ENGINE_PATH = "/home/andy/caffe/examples/mydata/slot_classifier/engine/px2_classifier.engine"
NET_INPUT_SHAPE = (256, 256)
NET_OUTPUT_SHAPE = 5
class_labels = ['error', 'half', 'invlb', 'invls', 'valid']
# Load Image
def load_image(img_path, net_input_shape):
img = cv2.resize(cv2.imread(img_path), net_input_shape)
# BGR -> RGB
#img = img[:,:, (2, 1, 0)]
## Method 1
# imgT = np.transpose(img, (2, 0, 1)) # c,w,h
# imgF = np.asarray(imgT, dtype=np.float32)
# mean = [[[88.159309]], [[97.966286]], [[103.66106]]] # Caffe image mean
# imgS = np.subtract(imgF,mean)
## Method 2
imgF = np.asarray(img, dtype=np.float32)
mean = [88.159309, 97.966286, 103.66106] # Caffe image mean
imgSS = np.subtract(imgF, mean)
imgS = np.transpose(imgSS, (2, 0, 1)) # CHW
# RGB_MEAN_PIXELS = np.array([88.159309, 97.966286, 103.66106]).reshape((1,1,1,3)).astype(np.float32)
return np.ascontiguousarray(imgS, dtype=np.float32) # avoid error: ndarray is not contiguous
def test_Loader(TEST_PATH, net_input_shape):
label_list = []
img_list = []
pair = []
folders = os.listdir(TEST_PATH)
for folder in folders:
folder_path = os.path.join(TEST_PATH, folder)
imgs = os.listdir(folder_path)
for img in tqdm(imgs):
img_path = os.path.join(folder_path, img)
img = load_image(img_path, net_input_shape)
label = class_labels.index(folder)
img_list.append(img)
label_list.append(label)
pair.append((img, label))
return pair, (img_list, label_list)
imgTestData = test_Loader(TEST_PATH, NET_INPUT_SHAPE)
# Load Engine file
G_LOGGER = trt.infer.ConsoleLogger(trt.infer.LogSeverity.ERROR)
engine = trt.utils.load_engine(G_LOGGER, ENGINE_PATH)
context = engine.create_execution_context()
runtime = trt.infer.create_infer_runtime(G_LOGGER)
# output = np.empty(1, dtype = np.float32)
# # Alocate device memory
# d_input = cuda.mem_alloc(1 * imgTestData[0][0][0].nbytes)
# d_output = cuda.mem_alloc(NET_OUTPUT_SHAPE * output.nbytes)
# bindings = [int(d_input), int(d_output)]
# stream = cuda.Stream()
predicts = []
pair = imgTestData[0]
for img, label in pair:
output = np.empty(NET_OUTPUT_SHAPE, dtype = np.float32)
# Alocate device memory
d_input = cuda.mem_alloc(1 * img.nbytes)
d_output = cuda.mem_alloc(1 * output.nbytes)
bindings = [int(d_input), int(d_output)]
stream = cuda.Stream()
# Transfer input data to device
cuda.memcpy_htod_async(d_input, img, stream)
# Execute model
context.enqueue(1, bindings, stream.handle, None)
# Transfer predictions back
cuda.memcpy_dtoh_async(output, d_output, stream)
# Syncronize threads
stream.synchronize()
softmax = np.exp(output) / np.sum(np.exp(output))
predict = np.argmax(softmax)
predicts.append(predict)
print("True = ",label, ", predict = ", predict, ", softmax = ", softmax)
grandTrue = np.array(imgTestData[1][1])
predicts = np.array(predicts)
error = predicts[predicts!=grandTrue]
print(imgTestData[1][1])
print("-------")
print(predicts)
print("-------")
print(len(error))
print((len(imgTestData[0])-len(error))/len(imgTestData[0])) | 0 | 0 |
3db8ec872b628c2d5573b83d71f828295df1aa7e | 2,054 | py | Python | machineLearning.py | z-Wind/EQOptimum | c046daec2c6218277a3fec9fa0c87bea0b30ff2f | [
"MIT"
] | null | null | null | machineLearning.py | z-Wind/EQOptimum | c046daec2c6218277a3fec9fa0c87bea0b30ff2f | [
"MIT"
] | null | null | null | machineLearning.py | z-Wind/EQOptimum | c046daec2c6218277a3fec9fa0c87bea0b30ff2f | [
"MIT"
] | null | null | null | import filters
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import freqz
from sklearn.neural_network import MLPRegressor
def filterModel(x):
# [fc, bandwidth, gain]
w_final = None
db_final = 0
fs = 44100
for fc, BW, gain in x:
b, a = filters.bandpass_peaking(fc=fc, gain=gain, BW=BW)
w, h = freqz(b, a, worN=np.linspace(np.pi*2/fs*20, np.pi*2/fs*20e3, 500))
db = 20 * np.log10(abs(h))
w_final = w
db_final += db
# plt.semilogx(w_final * fs / (2*np.pi), db_final)
return w_final*fs/(2*np.pi), db_final
def genXY(n, filtersNum):
total = n * filtersNum
fc = np.random.uniform(20, 20e3, size=(total,1))
bw = np.random.uniform(100, 10000, size=(total,1))
gain = np.random.uniform(0, 20, size=(total,1))
Y = np.concatenate((fc,bw,gain), axis=1)
Y = Y.reshape(n, filtersNum, 3)
X = []
for paras in Y:
f, db = filterModel(paras)
X.append(db)
X = np.array(X)
Y = Y.reshape(n, filtersNum*3)
return X, Y
if __name__ == "__main__":
# Create a random dataset
# [fc, bandwidth, gain]
n = 100
filtersNum = 1
X, Y = genXY(n=n, filtersNum=filtersNum)
# Fit regression model
regr = MLPRegressor(hidden_layer_sizes=(10,), max_iter=10000)
regr.fit(X, Y)
print('train loss', regr.loss_)
# Predict
X_test, Y_test = genXY(n=n, filtersNum=filtersNum)
print('test loss', ((Y_test - regr.predict(X_test)) ** 2).mean())
# paras = [(1e4, 2500, 3), (300, 201, 10), (400, 600, 5), (600, 200, 8),
# (2000, 3500, 13), (6000, 4000, 3), (8500, 6000, 2.75),]
paras = [(1e4, 2500, 3),]
f, db = filterModel(paras)
plt.semilogx(f, db, label="target", color='red')
y_pred = regr.predict([db])
f, db = filterModel(y_pred.reshape(filtersNum, 3))
plt.semilogx(f, db, label="NN")
plt.legend()
plt.show() | 27.026316 | 82 | 0.556962 | import filters
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import freqz
from sklearn.neural_network import MLPRegressor
def filterModel(x):
# [fc, bandwidth, gain]
w_final = None
db_final = 0
fs = 44100
for fc, BW, gain in x:
b, a = filters.bandpass_peaking(fc=fc, gain=gain, BW=BW)
w, h = freqz(b, a, worN=np.linspace(np.pi*2/fs*20, np.pi*2/fs*20e3, 500))
db = 20 * np.log10(abs(h))
w_final = w
db_final += db
# plt.semilogx(w_final * fs / (2*np.pi), db_final)
return w_final*fs/(2*np.pi), db_final
def genXY(n, filtersNum):
total = n * filtersNum
fc = np.random.uniform(20, 20e3, size=(total,1))
bw = np.random.uniform(100, 10000, size=(total,1))
gain = np.random.uniform(0, 20, size=(total,1))
Y = np.concatenate((fc,bw,gain), axis=1)
Y = Y.reshape(n, filtersNum, 3)
X = []
for paras in Y:
f, db = filterModel(paras)
X.append(db)
X = np.array(X)
Y = Y.reshape(n, filtersNum*3)
return X, Y
if __name__ == "__main__":
# Create a random dataset
# [fc, bandwidth, gain]
n = 100
filtersNum = 1
X, Y = genXY(n=n, filtersNum=filtersNum)
# Fit regression model
regr = MLPRegressor(hidden_layer_sizes=(10,), max_iter=10000)
regr.fit(X, Y)
print('train loss', regr.loss_)
# Predict
X_test, Y_test = genXY(n=n, filtersNum=filtersNum)
print('test loss', ((Y_test - regr.predict(X_test)) ** 2).mean())
# paras = [(1e4, 2500, 3), (300, 201, 10), (400, 600, 5), (600, 200, 8),
# (2000, 3500, 13), (6000, 4000, 3), (8500, 6000, 2.75),]
paras = [(1e4, 2500, 3),]
f, db = filterModel(paras)
plt.semilogx(f, db, label="target", color='red')
y_pred = regr.predict([db])
f, db = filterModel(y_pred.reshape(filtersNum, 3))
plt.semilogx(f, db, label="NN")
plt.legend()
plt.show() | 0 | 0 |
6f7dc504b463999eb2e9b24300c31ee083334da5 | 980 | py | Python | src/utils/dist.py | shaoeric/torch-atom | 7688fc38c0d19fe4d13a9773115df911ffe6eaaa | [
"MIT"
] | 28 | 2022-03-06T06:04:54.000Z | 2022-03-27T04:14:33.000Z | src/utils/dist.py | shaoeric/torch-atom | 7688fc38c0d19fe4d13a9773115df911ffe6eaaa | [
"MIT"
] | null | null | null | src/utils/dist.py | shaoeric/torch-atom | 7688fc38c0d19fe4d13a9773115df911ffe6eaaa | [
"MIT"
] | 3 | 2022-03-11T07:01:58.000Z | 2022-03-17T05:34:41.000Z | import torch.distributed as dist
import torch
def get_world_size():
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def synchronize():
"""
Helper function to synchronize (barrier) among all processes when
using distributed training
"""
if not dist.is_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier()
def reduce_value(value, average=True):
world_size = get_world_size()
if world_size < 2:
return value
with torch.no_grad():
dist.all_reduce(value)
if average:
value /= world_size
return value | 22.790698 | 72 | 0.626531 | import torch.distributed as dist
import torch
def get_world_size():
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def synchronize():
"""
Helper function to synchronize (barrier) among all processes when
using distributed training
"""
if not dist.is_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier()
def reduce_value(value, average=True):
world_size = get_world_size()
if world_size < 2:
return value
with torch.no_grad():
dist.all_reduce(value)
if average:
value /= world_size
return value | 0 | 0 |
7d4f4e96803718430d878ca088bcaed92b3079cc | 3,822 | py | Python | base_pool/mysql_pool/mysql_views.py | zhanzhangwei/kafka-study | 6be4167319b855c9560e92932aae628f87a5e680 | [
"Apache-2.0"
] | null | null | null | base_pool/mysql_pool/mysql_views.py | zhanzhangwei/kafka-study | 6be4167319b855c9560e92932aae628f87a5e680 | [
"Apache-2.0"
] | null | null | null | base_pool/mysql_pool/mysql_views.py | zhanzhangwei/kafka-study | 6be4167319b855c9560e92932aae628f87a5e680 | [
"Apache-2.0"
] | null | null | null | import json
import pymysql
import datetime
from dbutils.pooled_db import PooledDB
import pymysql
from conf.common import *
class MysqlClient(object):
__pool = None
def __init__(self):
"""
:param mincached:
:param maxcached:
:param maxshared:
:param maxconnections:
:param blocking:Truefalse
:param maxusage:
:param setsession:optional list of SQL commands that may serve to prepare
the session, e.g. ["set datestyle to ...", "set time zone ..."]
:param reset:how connections should be reset when returned to the pool
(False or None to rollback transcations started with begin(),
True to always issue a rollback for safety's sake)
:param host:ip
:param port:
:param db:
:param user:
:param passwd:
:param charset:
"""
mincached = 10
maxcached = 20
maxshared = 10
maxconnections = 200
blocking = True
maxusage = 100
setsession = None
reset = True
host = MYSQL_HOST
port = MYSQL_PORT
db = DATABASE
user = USER
passwd = PASSWORD
charset = 'utf8mb4'
if not self.__pool:
self.__class__.__pool = PooledDB(pymysql,
mincached, maxcached,
maxshared, maxconnections, blocking,
maxusage, setsession, reset,
host=host, port=port, db=db,
user=user, passwd=passwd,
charset=charset,
cursorclass=pymysql.cursors.DictCursor
)
self._conn = None
self._cursor = None
self.__get_conn()
def __get_conn(self):
self._conn = self.__pool.connection()
self._cursor = self._conn.cursor()
def close(self):
try:
self._cursor.close()
self._conn.close()
except Exception as e:
print(e)
def __execute(self, sql, param=()):
count = self._cursor.execute(sql, param)
print(count)
return count
@staticmethod
def __dict_datetime_obj_to_str(result_dict):
"""datatimejson"""
if result_dict:
result_replace = {k: v.__str__() for k, v in result_dict.items() if isinstance(v, datetime.datetime)}
result_dict.update(result_replace)
return result_dict
def select_one(self, sql, param=()):
""""""
count = self.__execute(sql, param)
result = self._cursor.fetchone()
""":type result:dict"""
result = self.__dict_datetime_obj_to_str(result)
return count, result
def select_many(self, sql, param=()):
"""
:param sql: qsl
:param param: sql
:return:
"""
count = self.__execute(sql, param)
result = self._cursor.fetchall()
""":type result:list"""
[self.__dict_datetime_obj_to_str(row_dict) for row_dict in result]
return count, result
def execute(self, sql, param=()):
count = self.__execute(sql, param)
return count
def begin(self):
""""""
self._conn.autocommit(0)
def end(self, option='commit'):
""""""
if option == 'commit':
self._conn.autocommit()
else:
self._conn.rollback()
mysql_client = MysqlClient()
| 30.576 | 113 | 0.545526 | import json
import pymysql
import datetime
from dbutils.pooled_db import PooledDB
import pymysql
from conf.common import *
class MysqlClient(object):
__pool = None
def __init__(self):
"""
:param mincached:连接池中空闲连接的初始数量
:param maxcached:连接池中空闲连接的最大数量
:param maxshared:共享连接的最大数量
:param maxconnections:创建连接池的最大数量
:param blocking:超过最大连接数量时候的表现,为True等待连接数量下降,为false直接报错处理
:param maxusage:单个连接的最大重复使用次数
:param setsession:optional list of SQL commands that may serve to prepare
the session, e.g. ["set datestyle to ...", "set time zone ..."]
:param reset:how connections should be reset when returned to the pool
(False or None to rollback transcations started with begin(),
True to always issue a rollback for safety's sake)
:param host:数据库ip地址
:param port:数据库端口
:param db:库名
:param user:用户名
:param passwd:密码
:param charset:字符编码
"""
mincached = 10
maxcached = 20
maxshared = 10
maxconnections = 200
blocking = True
maxusage = 100
setsession = None
reset = True
host = MYSQL_HOST
port = MYSQL_PORT
db = DATABASE
user = USER
passwd = PASSWORD
charset = 'utf8mb4'
if not self.__pool:
self.__class__.__pool = PooledDB(pymysql,
mincached, maxcached,
maxshared, maxconnections, blocking,
maxusage, setsession, reset,
host=host, port=port, db=db,
user=user, passwd=passwd,
charset=charset,
cursorclass=pymysql.cursors.DictCursor
)
self._conn = None
self._cursor = None
self.__get_conn()
def __get_conn(self):
self._conn = self.__pool.connection()
self._cursor = self._conn.cursor()
def close(self):
try:
self._cursor.close()
self._conn.close()
except Exception as e:
print(e)
def __execute(self, sql, param=()):
count = self._cursor.execute(sql, param)
print(count)
return count
@staticmethod
def __dict_datetime_obj_to_str(result_dict):
"""把字典里面的datatime对象转成字符串,使json转换不出错"""
if result_dict:
result_replace = {k: v.__str__() for k, v in result_dict.items() if isinstance(v, datetime.datetime)}
result_dict.update(result_replace)
return result_dict
def select_one(self, sql, param=()):
"""查询单个结果"""
count = self.__execute(sql, param)
result = self._cursor.fetchone()
""":type result:dict"""
result = self.__dict_datetime_obj_to_str(result)
return count, result
def select_many(self, sql, param=()):
"""
查询多个结果
:param sql: qsl语句
:param param: sql参数
:return: 结果数量和查询结果集
"""
count = self.__execute(sql, param)
result = self._cursor.fetchall()
""":type result:list"""
[self.__dict_datetime_obj_to_str(row_dict) for row_dict in result]
return count, result
def execute(self, sql, param=()):
count = self.__execute(sql, param)
return count
def begin(self):
"""开启事务"""
self._conn.autocommit(0)
def end(self, option='commit'):
"""结束事务"""
if option == 'commit':
self._conn.autocommit()
else:
self._conn.rollback()
mysql_client = MysqlClient()
| 492 | 0 |
fbbdf9d38ba25ab279b3c1a4de1e0e092ad03325 | 8,998 | py | Python | scripts/jupyter_vdi.py | ScottWales/cosima-cookbook | 0ed83e2165efe5badfca59e2dccf835ab7acecca | [
"Apache-2.0"
] | null | null | null | scripts/jupyter_vdi.py | ScottWales/cosima-cookbook | 0ed83e2165efe5badfca59e2dccf835ab7acecca | [
"Apache-2.0"
] | null | null | null | scripts/jupyter_vdi.py | ScottWales/cosima-cookbook | 0ed83e2165efe5badfca59e2dccf835ab7acecca | [
"Apache-2.0"
] | 1 | 2020-01-30T05:36:08.000Z | 2020-01-30T05:36:08.000Z | #!/usr/bin/env python
"""
Script to launch a VDI session (or connect to already running session)
and start a Jupyter server on the VDI
A ssh tunnel from the local machine to the VDI is set up and the local
webbrowser is spawned.
This is a python3 script (uses unicode strings). If you don't have
python3 on your local machine, try installing Miniconda3
The only external module is pexpect which may need to be installed
using conda or pip.
Usage:
- if you use a password, the script will ask for your password when needed
- if you have already set up SSH public key with Strudel, try running
$ ssh-add ~/.ssh/MassiveLauncherKey
to add your public key to the ssh key agent.
Author: James Munroe, 2017
"""
from __future__ import print_function
import re
import sys
import time
import getpass
import pexpect
import os
import configparser
# Requires future module https://pypi.org/project/future/
from builtins import input
import argparse
import logging
logging.basicConfig(format='[%(asctime)s jupyter_vdi.py] %(message)s',
datefmt='%H:%M:%S',
level=logging.INFO)
try:
import appscript
except ImportError:
import webbrowser
is_mac = False
else:
is_mac = True
DEFAULTS = {
'user' : getpass.getuser(),
'JupyterPort' : '8889',
'BokehPort' : '8787',
'execHost' : 'vdi.nci.org.au'
}
verbose = 0
config_path = os.path.expanduser('~/cosima_cookbook.conf')
parser = configparser.ConfigParser(defaults=DEFAULTS)
if os.path.exists(config_path):
logging.info('Using config file: {}'.format(config_path))
parser.read(config_path)
else:
logging.warn('No config file found. Creating default {} file.'.format(config_path))
logging.warn('*** Please edit this file as needed. ***')
while DEFAULTS['user']==getpass.getuser() or DEFAULTS['user']=="":
DEFAULTS['user']=input('What is your NCI username? ')
parser = configparser.ConfigParser(defaults=DEFAULTS)
with open(config_path, 'w') as f:
parser.write(f)
params = parser.defaults()
def parse_args(args):
parser = argparse.ArgumentParser(description="Log into the VDI, start a jupyter notebook session and ssh tunnel to local machine")
parser.add_argument("-v","--verbose", help="Increase verbosity", action='count', default=0)
return parser.parse_args(args)
def clean_params(params):
for key, value in params.items():
try:
params[key] = value.decode()
except AttributeError:
pass
def ssh(cmd, params, login_timeout=10):
"""
Run a remote command via SSH
"""
clean_params(params)
cmd = ("ssh -x -l {user} {exechost} " + cmd).format(**params)
if verbose > 0: logging.info(cmd)
s = pexpect.spawn(cmd)
# SSH pexpect logic taken from pxshh:
i = s.expect(["(?i)are you sure you want to continue connecting",
"(?i)(?:password)|(?:passphrase for key)",
"(?i)permission denied",
"(?i)connection closed by remote host",
pexpect.EOF, pexpect.TIMEOUT], timeout=login_timeout)
# First phase
if i == 0:
# New certificate -- always accept it.
# This is what you get if SSH does not have the remote host's
# public key stored in the 'known_hosts' cache.
s.sendline("yes")
i = s.expect(["(?i)are you sure you want to continue connecting",
"(?i)(?:password)|(?:passphrase for key)",
"(?i)permission denied",
"(?i)connection closed by remote host",
pexpect.EOF, pexpect.TIMEOUT], timeout=login_timeout)
if i == 1: # password or passphrase
if 'password' not in params:
params['password'] = getpass.getpass('password: ')
s.sendline(params['password'])
i = s.expect(["(?i)are you sure you want to continue connecting",
"(?i)(?:password)|(?:passphrase for key)",
"(?i)permission denied",
"(?i)connection closed by remote host",
pexpect.EOF, pexpect.TIMEOUT], timeout=login_timeout)
# TODO: check if ssh connection is successful
return s
def session(func, *args, **kwargs):
"""wrapper for sending session-ctl commands"""
cmd = '/opt/vdi/bin/session-ctl --configver=20151620513 ' + func
s = ssh(cmd, *args, **kwargs)
s.close()
return s
def open_jupyter_url(params):
# Open browser locally
status = ''
url = 'http://localhost:{jupyterport}/?token={token}'.format(**params)
if is_mac:
status = "Using appscript to open {}".format(url)
safari = appscript.app("Safari")
safari.make(new=appscript.k.document, with_properties={appscript.k.URL: url})
else:
status = "Opening {}".format(url)
webbrowser.open(url)
return status
tunnel_started = False
tunnel = None
def start_tunnel(params):
# Create ssh tunnel for local access to jupyter notebook
cmd = ' '.join(['-N -f -L {jupyterport}:localhost:{jupyterport}',
'-L {bokehport}:localhost:{bokehport}'])
# This print statement is needed as there are /r/n line endings from
# the jupyter notebook output that are difficult to suppress
logging.info("Starting ssh tunnel...")
tunnel = ssh(cmd, params, login_timeout=2)
tunnel.expect (pexpect.EOF)
# Open web browser and log result
logging.info(open_jupyter_url(params))
def main(args):
# global verbose means it doesn't need to be passed to every routine
global verbose
verbose = args.verbose
logging.info("Checking SSH keys to VDI are configured...")
r = session('hello --partition main', params)
if r.exitstatus != 0:
# suggest setting up SSH keys
logging.error("Error with ssh keys/password and VDI.")
logging.error(" Incorrect user name in ~/cosima_cookbook.conf file?")
logging.error(" Edit ~/cosima_cookbook.conf before continuing.")
sys.exit(1)
logging.info("SSH keys configured OK")
logging.info("Determine if VDI session is already running...")
r = session('list-avail --partition main', params)
m = re.search('#~#id=(?P<jobid>(?P<jobidNumber>.*?))#~#state=(?P<state>.*?)(?:#~#time_rem=(?P<remainingWalltime>.*?))?#~#', r.before.decode())
if m is not None:
params.update(m.groupdict())
w = int(params['remainingWalltime'])
remainingWalltime = '{:02}:{:02}:{:02}'.format(
w // 3600, w % 3600 // 60, w % 60)
logging.info('Time remaining: %s', remainingWalltime)
# TODO: should give user option of starting a new session if the remaining walltime is short
else:
logging.info('No VDI session found')
logging.info("Launching a new VDI session...")
r = session('launch --partition main', params)
m = re.search('#~#id=(?P<jobid>(?P<jobidNumber>.*?))#~#',
r.before.decode())
if m is None:
logging.info('Unable to launch new VDI session:\n'+r.before.decode())
params.update(m.groupdict())
time.sleep(2) # TODO: instead of waiting, should check for confirmation
# use has-started
logging.info("Determine jobid for VDI session...{jobid}".format(**params))
logging.info("Get exechost for VDI session...")
r = session('get-host --jobid {jobid}', params)
m = re.search('#~#host=(?P<exechost>.*?)#~#', r.before.decode())
params.update(m.groupdict())
logging.info('exechost: {exechost}'.format(**params))
logging.info("Running Jupyter on VDI...")
setupconda = params.get('setupconda',
"""module use /g/data3/hh5/public/modules
&& module load conda/analysis3
""".replace('\n', ' '))
jupyterapp = params.get('jupyterapp', "notebook")
run_jupyter = "jupyter %s --no-browser --port {jupyterport}" % jupyterapp
run_jupyter = setupconda + ' && ' + run_jupyter
cmd = ' '.join(['-t', """'bash -l -c "%s"'""" % run_jupyter])
logging.info("Waiting for Jupyter to start...")
# Launch jupyter on VDI
s = ssh(cmd, params, login_timeout=2)
ret = s.expect('http://\S*:(?P<jupyterport>\d+)/\?token=(?P<token>[a-zA-Z0-9]+)')
if s.match:
params.update(s.match.groupdict())
start_tunnel(params)
else:
logging.info("Could not find url information in jupyter output")
sys.exit(1)
# Grab all the output up to the incorrect URL -- uses the token twice, which is unhelpful
ret = s.expect('http://.*')
logging.info("Use Control-C to stop the Notebook server and shut down all kernels (twice to skip confirmation)\n\n")
# give control over to user
s.interact()
logging.info('end of script')
# optional: terminate to close the vdi session?
def main_argv():
args = parse_args(sys.argv[1:])
main(args)
if __name__ == "__main__":
main_argv()
| 33.574627 | 146 | 0.629362 | #!/usr/bin/env python
"""
Script to launch a VDI session (or connect to already running session)
and start a Jupyter server on the VDI
A ssh tunnel from the local machine to the VDI is set up and the local
webbrowser is spawned.
This is a python3 script (uses unicode strings). If you don't have
python3 on your local machine, try installing Miniconda3
The only external module is pexpect which may need to be installed
using conda or pip.
Usage:
- if you use a password, the script will ask for your password when needed
- if you have already set up SSH public key with Strudel, try running
$ ssh-add ~/.ssh/MassiveLauncherKey
to add your public key to the ssh key agent.
Author: James Munroe, 2017
"""
from __future__ import print_function
import re
import sys
import time
import getpass
import pexpect
import os
import configparser
# Requires future module https://pypi.org/project/future/
from builtins import input
import argparse
import logging
logging.basicConfig(format='[%(asctime)s jupyter_vdi.py] %(message)s',
datefmt='%H:%M:%S',
level=logging.INFO)
try:
import appscript
except ImportError:
import webbrowser
is_mac = False
else:
is_mac = True
DEFAULTS = {
'user' : getpass.getuser(),
'JupyterPort' : '8889',
'BokehPort' : '8787',
'execHost' : 'vdi.nci.org.au'
}
verbose = 0
config_path = os.path.expanduser('~/cosima_cookbook.conf')
parser = configparser.ConfigParser(defaults=DEFAULTS)
if os.path.exists(config_path):
logging.info('Using config file: {}'.format(config_path))
parser.read(config_path)
else:
logging.warn('No config file found. Creating default {} file.'.format(config_path))
logging.warn('*** Please edit this file as needed. ***')
while DEFAULTS['user']==getpass.getuser() or DEFAULTS['user']=="":
DEFAULTS['user']=input('What is your NCI username? ')
parser = configparser.ConfigParser(defaults=DEFAULTS)
with open(config_path, 'w') as f:
parser.write(f)
params = parser.defaults()
def parse_args(args):
parser = argparse.ArgumentParser(description="Log into the VDI, start a jupyter notebook session and ssh tunnel to local machine")
parser.add_argument("-v","--verbose", help="Increase verbosity", action='count', default=0)
return parser.parse_args(args)
def clean_params(params):
for key, value in params.items():
try:
params[key] = value.decode()
except AttributeError:
pass
def ssh(cmd, params, login_timeout=10):
"""
Run a remote command via SSH
"""
clean_params(params)
cmd = ("ssh -x -l {user} {exechost} " + cmd).format(**params)
if verbose > 0: logging.info(cmd)
s = pexpect.spawn(cmd)
# SSH pexpect logic taken from pxshh:
i = s.expect(["(?i)are you sure you want to continue connecting",
"(?i)(?:password)|(?:passphrase for key)",
"(?i)permission denied",
"(?i)connection closed by remote host",
pexpect.EOF, pexpect.TIMEOUT], timeout=login_timeout)
# First phase
if i == 0:
# New certificate -- always accept it.
# This is what you get if SSH does not have the remote host's
# public key stored in the 'known_hosts' cache.
s.sendline("yes")
i = s.expect(["(?i)are you sure you want to continue connecting",
"(?i)(?:password)|(?:passphrase for key)",
"(?i)permission denied",
"(?i)connection closed by remote host",
pexpect.EOF, pexpect.TIMEOUT], timeout=login_timeout)
if i == 1: # password or passphrase
if 'password' not in params:
params['password'] = getpass.getpass('password: ')
s.sendline(params['password'])
i = s.expect(["(?i)are you sure you want to continue connecting",
"(?i)(?:password)|(?:passphrase for key)",
"(?i)permission denied",
"(?i)connection closed by remote host",
pexpect.EOF, pexpect.TIMEOUT], timeout=login_timeout)
# TODO: check if ssh connection is successful
return s
def session(func, *args, **kwargs):
"""wrapper for sending session-ctl commands"""
cmd = '/opt/vdi/bin/session-ctl --configver=20151620513 ' + func
s = ssh(cmd, *args, **kwargs)
s.close()
return s
def open_jupyter_url(params):
# Open browser locally
status = ''
url = 'http://localhost:{jupyterport}/?token={token}'.format(**params)
if is_mac:
status = "Using appscript to open {}".format(url)
safari = appscript.app("Safari")
safari.make(new=appscript.k.document, with_properties={appscript.k.URL: url})
else:
status = "Opening {}".format(url)
webbrowser.open(url)
return status
tunnel_started = False
tunnel = None
def start_tunnel(params):
# Create ssh tunnel for local access to jupyter notebook
cmd = ' '.join(['-N -f -L {jupyterport}:localhost:{jupyterport}',
'-L {bokehport}:localhost:{bokehport}'])
# This print statement is needed as there are /r/n line endings from
# the jupyter notebook output that are difficult to suppress
logging.info("Starting ssh tunnel...")
tunnel = ssh(cmd, params, login_timeout=2)
tunnel.expect (pexpect.EOF)
# Open web browser and log result
logging.info(open_jupyter_url(params))
def main(args):
# global verbose means it doesn't need to be passed to every routine
global verbose
verbose = args.verbose
logging.info("Checking SSH keys to VDI are configured...")
r = session('hello --partition main', params)
if r.exitstatus != 0:
# suggest setting up SSH keys
logging.error("Error with ssh keys/password and VDI.")
logging.error(" Incorrect user name in ~/cosima_cookbook.conf file?")
logging.error(" Edit ~/cosima_cookbook.conf before continuing.")
sys.exit(1)
logging.info("SSH keys configured OK")
logging.info("Determine if VDI session is already running...")
r = session('list-avail --partition main', params)
m = re.search('#~#id=(?P<jobid>(?P<jobidNumber>.*?))#~#state=(?P<state>.*?)(?:#~#time_rem=(?P<remainingWalltime>.*?))?#~#', r.before.decode())
if m is not None:
params.update(m.groupdict())
w = int(params['remainingWalltime'])
remainingWalltime = '{:02}:{:02}:{:02}'.format(
w // 3600, w % 3600 // 60, w % 60)
logging.info('Time remaining: %s', remainingWalltime)
# TODO: should give user option of starting a new session if the remaining walltime is short
else:
logging.info('No VDI session found')
logging.info("Launching a new VDI session...")
r = session('launch --partition main', params)
m = re.search('#~#id=(?P<jobid>(?P<jobidNumber>.*?))#~#',
r.before.decode())
if m is None:
logging.info('Unable to launch new VDI session:\n'+r.before.decode())
params.update(m.groupdict())
time.sleep(2) # TODO: instead of waiting, should check for confirmation
# use has-started
logging.info("Determine jobid for VDI session...{jobid}".format(**params))
logging.info("Get exechost for VDI session...")
r = session('get-host --jobid {jobid}', params)
m = re.search('#~#host=(?P<exechost>.*?)#~#', r.before.decode())
params.update(m.groupdict())
logging.info('exechost: {exechost}'.format(**params))
logging.info("Running Jupyter on VDI...")
setupconda = params.get('setupconda',
"""module use /g/data3/hh5/public/modules
&& module load conda/analysis3
""".replace('\n', ' '))
jupyterapp = params.get('jupyterapp', "notebook")
run_jupyter = "jupyter %s --no-browser --port {jupyterport}" % jupyterapp
run_jupyter = setupconda + ' && ' + run_jupyter
cmd = ' '.join(['-t', """'bash -l -c "%s"'""" % run_jupyter])
logging.info("Waiting for Jupyter to start...")
# Launch jupyter on VDI
s = ssh(cmd, params, login_timeout=2)
ret = s.expect('http://\S*:(?P<jupyterport>\d+)/\?token=(?P<token>[a-zA-Z0-9]+)')
if s.match:
params.update(s.match.groupdict())
start_tunnel(params)
else:
logging.info("Could not find url information in jupyter output")
sys.exit(1)
# Grab all the output up to the incorrect URL -- uses the token twice, which is unhelpful
ret = s.expect('http://.*')
logging.info("Use Control-C to stop the Notebook server and shut down all kernels (twice to skip confirmation)\n\n")
# give control over to user
s.interact()
logging.info('end of script')
# optional: terminate to close the vdi session?
def main_argv():
args = parse_args(sys.argv[1:])
main(args)
if __name__ == "__main__":
main_argv()
| 0 | 0 |
1fa6873ff966dcc647833979508b75f9d44bd7bd | 2,703 | py | Python | utils/data.py | YOUSIKI/PyTorch-FBS | 5e94c3183f064ef5ed7f4b7d82b076056200b368 | [
"Apache-2.0"
] | 10 | 2020-09-14T02:40:37.000Z | 2022-01-13T11:13:36.000Z | utils/data.py | YOUSIKI/PyTorch-FBS | 5e94c3183f064ef5ed7f4b7d82b076056200b368 | [
"Apache-2.0"
] | 2 | 2020-11-28T05:48:45.000Z | 2022-03-11T13:44:50.000Z | utils/data.py | YOUSIKI/PyTorch-FBS | 5e94c3183f064ef5ed7f4b7d82b076056200b368 | [
"Apache-2.0"
] | 2 | 2020-11-28T02:27:08.000Z | 2021-11-24T03:10:10.000Z | # -*- coding=utf-8 -*-
__all__ = [
'tiny_imagenet',
'imagewoof2',
'imagenette2'
]
import os
import torch
import torchvision
_default_batch_size = 32
_default_num_workers = 4
def _transform(train=True):
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
if train:
return torchvision.transforms.Compose([
torchvision.transforms.RandomResizedCrop(224),
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean, std)
])
else:
return torchvision.transforms.Compose([
torchvision.transforms.CenterCrop(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean, std)
])
def tiny_imagenet(name='train',
batch_size=_default_batch_size,
num_workers=_default_num_workers):
dataset = torchvision.datasets.ImageFolder(
os.path.join('datasets', 'tiny-imagenet-200', name),
transform=_transform(name == 'train')
)
dataloader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
num_workers=num_workers,
drop_last=True,
shuffle=name == 'train')
return dataloader
def imagewoof2(name='train',
batch_size=_default_batch_size,
num_workers=_default_num_workers):
dataset = torchvision.datasets.ImageFolder(
os.path.join('datasets', 'imagewoof2', name),
transform=_transform(name == 'train')
)
dataloader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
num_workers=num_workers,
drop_last=True,
shuffle=name == 'train')
return dataloader
def imagenette2(name='train',
batch_size=_default_batch_size,
num_workers=_default_num_workers):
dataset = torchvision.datasets.ImageFolder(
os.path.join('datasets', 'imagenette2', name),
transform=_transform(name == 'train')
)
dataloader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
num_workers=num_workers,
drop_last=True,
shuffle=name == 'train')
return dataloader
| 34.653846 | 69 | 0.532741 | # -*- coding=utf-8 -*-
__all__ = [
'tiny_imagenet',
'imagewoof2',
'imagenette2'
]
import os
import torch
import torchvision
_default_batch_size = 32
_default_num_workers = 4
def _transform(train=True):
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
if train:
return torchvision.transforms.Compose([
torchvision.transforms.RandomResizedCrop(224),
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean, std)
])
else:
return torchvision.transforms.Compose([
torchvision.transforms.CenterCrop(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean, std)
])
def tiny_imagenet(name='train',
batch_size=_default_batch_size,
num_workers=_default_num_workers):
dataset = torchvision.datasets.ImageFolder(
os.path.join('datasets', 'tiny-imagenet-200', name),
transform=_transform(name == 'train')
)
dataloader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
num_workers=num_workers,
drop_last=True,
shuffle=name == 'train')
return dataloader
def imagewoof2(name='train',
batch_size=_default_batch_size,
num_workers=_default_num_workers):
dataset = torchvision.datasets.ImageFolder(
os.path.join('datasets', 'imagewoof2', name),
transform=_transform(name == 'train')
)
dataloader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
num_workers=num_workers,
drop_last=True,
shuffle=name == 'train')
return dataloader
def imagenette2(name='train',
batch_size=_default_batch_size,
num_workers=_default_num_workers):
dataset = torchvision.datasets.ImageFolder(
os.path.join('datasets', 'imagenette2', name),
transform=_transform(name == 'train')
)
dataloader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
num_workers=num_workers,
drop_last=True,
shuffle=name == 'train')
return dataloader
| 0 | 0 |
24a125f749e07df3306878e5eb148d1afc6b30c4 | 2,766 | py | Python | NewsPaper/NewsPaper/NewsPaper/news/models.py | PavelPopkov/D3.4.-Practice-Popkov | 46de6209bad81c17882520397fbb358c0834e753 | [
"MIT"
] | null | null | null | NewsPaper/NewsPaper/NewsPaper/news/models.py | PavelPopkov/D3.4.-Practice-Popkov | 46de6209bad81c17882520397fbb358c0834e753 | [
"MIT"
] | null | null | null | NewsPaper/NewsPaper/NewsPaper/news/models.py | PavelPopkov/D3.4.-Practice-Popkov | 46de6209bad81c17882520397fbb358c0834e753 | [
"MIT"
] | null | null | null | from django.db import models
from django.contrib.auth.models import User
from django.db.models import Sum
from datetime import datetime
class Author(models.Model):
author = models.CharField(max_length=100)
rating = models.IntegerField(default=0)
user = models.OneToOneField(User, on_delete=models.CASCADE)
def __str__(self):
return self.author
# .aggregate(Sum("rating"))
def update_rating(self):
auth = Author.objects.get(author=self.author)
sum_rat_post = 0
posts = auth.post_set.all()
for post in posts:
sum_rat_post += post.rating_post * 3
usr = auth.one_to_one_rel
sum_rat_comm = 0
comments = usr.comment_set.all()
for comm in comments:
sum_rat_comm += comm.rating_comm
sum_rat_auth = 0
# comments_posts = auth.post_set.comment_set.all()
for post in posts:
comm_posts = post.comment_set.all()
for comm_post in comm_posts:
sum_rat_auth += comm_post.rating_comm
self.rating = sum_rat_post + sum_rat_comm + sum_rat_auth
self.save()
class Category(models.Model):
category = models.CharField(max_length=100, unique=True)
class Post(models.Model):
article = 'AR'
new = 'NE'
POSITIONS = [
(article, ''),
(new, '')
]
ar_or_new = models.CharField(max_length=2,
choices=POSITIONS,
default=article)
created = models.DateTimeField(auto_now_add=True)
post_name = models.CharField(max_length=250)
content = models.TextField()
rating = models.IntegerField(default=0)
author = models.ForeignKey(Author, on_delete=models.CASCADE)
category = models.ManyToManyField(Category, through='PostCategory')
def like(self):
self.rating += 1
self.save()
def dislike(self):
self.rating -= 1
if self.rating_comm < 0:
self.rating_comm = 0
self.save()
def preview(self):
prev = self.content[:124] + '...'
return prev
class PostCategory(models.Model):
post = models.ForeignKey(Post, on_delete=models.CASCADE)
category = models.ForeignKey(Category, on_delete=models.CASCADE)
class Comment(models.Model):
comment = models.TextField()
created = models.DateTimeField(auto_now_add=True)
rating = models.IntegerField(default=0)
post = models.ForeignKey(Post, on_delete=models.CASCADE)
user = models.ForeignKey(User, on_delete=models.CASCADE)
def like(self):
self.rating += 1
self.save()
def dislike(self):
self.rating -= 1
if self.rating < 0:
self.rating = 0
self.save()
| 29.115789 | 71 | 0.630152 | from django.db import models
from django.contrib.auth.models import User
from django.db.models import Sum
from datetime import datetime
class Author(models.Model):
author = models.CharField(max_length=100)
rating = models.IntegerField(default=0)
user = models.OneToOneField(User, on_delete=models.CASCADE)
def __str__(self):
return self.author
# .aggregate(Sum("rating"))
def update_rating(self):
auth = Author.objects.get(author=self.author)
sum_rat_post = 0
posts = auth.post_set.all()
for post in posts:
sum_rat_post += post.rating_post * 3
usr = auth.one_to_one_rel
sum_rat_comm = 0
comments = usr.comment_set.all()
for comm in comments:
sum_rat_comm += comm.rating_comm
sum_rat_auth = 0
# comments_posts = auth.post_set.comment_set.all()
for post in posts:
comm_posts = post.comment_set.all()
for comm_post in comm_posts:
sum_rat_auth += comm_post.rating_comm
self.rating = sum_rat_post + sum_rat_comm + sum_rat_auth
self.save()
class Category(models.Model):
category = models.CharField(max_length=100, unique=True)
class Post(models.Model):
article = 'AR'
new = 'NE'
POSITIONS = [
(article, 'Статья'),
(new, 'Новость')
]
ar_or_new = models.CharField(max_length=2,
choices=POSITIONS,
default=article)
created = models.DateTimeField(auto_now_add=True)
post_name = models.CharField(max_length=250)
content = models.TextField()
rating = models.IntegerField(default=0)
author = models.ForeignKey(Author, on_delete=models.CASCADE)
category = models.ManyToManyField(Category, through='PostCategory')
def like(self):
self.rating += 1
self.save()
def dislike(self):
self.rating -= 1
if self.rating_comm < 0:
self.rating_comm = 0
self.save()
def preview(self):
prev = self.content[:124] + '...'
return prev
class PostCategory(models.Model):
post = models.ForeignKey(Post, on_delete=models.CASCADE)
category = models.ForeignKey(Category, on_delete=models.CASCADE)
class Comment(models.Model):
comment = models.TextField()
created = models.DateTimeField(auto_now_add=True)
rating = models.IntegerField(default=0)
post = models.ForeignKey(Post, on_delete=models.CASCADE)
user = models.ForeignKey(User, on_delete=models.CASCADE)
def like(self):
self.rating += 1
self.save()
def dislike(self):
self.rating -= 1
if self.rating < 0:
self.rating = 0
self.save()
| 26 | 0 |
838d22d0dea3f0cea788de6ba72e416ad4ef2add | 1,917 | py | Python | tests/e2e/runner.py | wilzbach/storyscript-sls | d71d74a53852ebae54bdaab341678b04f2775411 | [
"Apache-2.0"
] | null | null | null | tests/e2e/runner.py | wilzbach/storyscript-sls | d71d74a53852ebae54bdaab341678b04f2775411 | [
"Apache-2.0"
] | null | null | null | tests/e2e/runner.py | wilzbach/storyscript-sls | d71d74a53852ebae54bdaab341678b04f2775411 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env pytest
import io
import json
from os import path
from pytest import fixture, mark
from sls import App
import storyscript.hub.Hub as StoryHub
from storyhub.sdk.AutoUpdateThread import AutoUpdateThread
from tests.e2e.utils.features import parse_options
from tests.e2e.utils.fixtures import find_test_files, hub, test_dir
test_files = find_test_files(relative=True)
@fixture
def patched_storyhub(mocker, scope="module"):
mocker.patch.object(StoryHub, "StoryscriptHub", return_value=hub)
mocker.patch.object(AutoUpdateThread, "dispatch_update")
# compile a story and compare its completion with the expected tree
def run_test_completion(uri, source, expected, patch, options):
action = options.pop("action", "complete")
if action == "complete":
result = App(hub=hub).complete(uri=uri, text=source, **options)
else:
assert action == "click"
result = App(hub=hub).click(uri=uri, text=source, **options)
assert result == expected
# load a story from the file system and load its expected result file (.json)
def run_test(story_path, patch):
story_string = None
with io.open(story_path, "r") as f:
story_string = f.read()
expected_path = path.splitext(story_path)[0]
assert path.isfile(
expected_path + ".json"
), f"Path: `{expected_path}.json` does not exist."
expected_completion = None
with io.open(expected_path + ".json", "r") as f:
expected_completion = f.read()
# deserialize the expected completion
expected = json.loads(expected_completion)
options = parse_options(story_string)
return run_test_completion(
story_path, story_string, expected, patch, options
)
@mark.usefixtures("patched_storyhub")
@mark.parametrize("test_file", test_files)
def test_story(test_file, patch):
test_file = path.join(test_dir, test_file)
run_test(test_file, patch)
| 28.191176 | 77 | 0.720396 | #!/usr/bin/env pytest
import io
import json
from os import path
from pytest import fixture, mark
from sls import App
import storyscript.hub.Hub as StoryHub
from storyhub.sdk.AutoUpdateThread import AutoUpdateThread
from tests.e2e.utils.features import parse_options
from tests.e2e.utils.fixtures import find_test_files, hub, test_dir
test_files = find_test_files(relative=True)
@fixture
def patched_storyhub(mocker, scope="module"):
mocker.patch.object(StoryHub, "StoryscriptHub", return_value=hub)
mocker.patch.object(AutoUpdateThread, "dispatch_update")
# compile a story and compare its completion with the expected tree
def run_test_completion(uri, source, expected, patch, options):
action = options.pop("action", "complete")
if action == "complete":
result = App(hub=hub).complete(uri=uri, text=source, **options)
else:
assert action == "click"
result = App(hub=hub).click(uri=uri, text=source, **options)
assert result == expected
# load a story from the file system and load its expected result file (.json)
def run_test(story_path, patch):
story_string = None
with io.open(story_path, "r") as f:
story_string = f.read()
expected_path = path.splitext(story_path)[0]
assert path.isfile(
expected_path + ".json"
), f"Path: `{expected_path}.json` does not exist."
expected_completion = None
with io.open(expected_path + ".json", "r") as f:
expected_completion = f.read()
# deserialize the expected completion
expected = json.loads(expected_completion)
options = parse_options(story_string)
return run_test_completion(
story_path, story_string, expected, patch, options
)
@mark.usefixtures("patched_storyhub")
@mark.parametrize("test_file", test_files)
def test_story(test_file, patch):
test_file = path.join(test_dir, test_file)
run_test(test_file, patch)
| 0 | 0 |
a028f9eab21f99b975a3ac640714e3b636189bcc | 342 | py | Python | Misc/Become_a_Python_Developer/2_Programming Fundamentals in the Real World/Ex_Files_Programming_Realworld/Exercise Files/Ch05/05_03/start_05_03_coordinates.py | specter01wj/LAB-Lynda | 1915ada66f4498cdf15a0e2a068c938e325e9ba3 | [
"MIT"
] | null | null | null | Misc/Become_a_Python_Developer/2_Programming Fundamentals in the Real World/Ex_Files_Programming_Realworld/Exercise Files/Ch05/05_03/start_05_03_coordinates.py | specter01wj/LAB-Lynda | 1915ada66f4498cdf15a0e2a068c938e325e9ba3 | [
"MIT"
] | 8 | 2020-07-08T06:20:03.000Z | 2022-03-02T10:05:06.000Z | Misc/Become_a_Python_Developer/2_Programming Fundamentals in the Real World/Ex_Files_Programming_Realworld/Exercise Files/Ch05/05_03/start_05_03_coordinates.py | specter01wj/LAB-Lynda | 1915ada66f4498cdf15a0e2a068c938e325e9ba3 | [
"MIT"
] | null | null | null | """ Where's My Mouse? """
import tkinter
def mouse_click(event):
# retrieve XY coords as a tuple
coords = root.winfo_pointerxy()
print('coords: {}'.format(coords))
print('X: {}'.format(coords[0]))
print('Y: {}'.format(coords[1]))
root = tkinter.Tk()
root.bind('<Button>', mouse_click)
root.mainloop()
| 22.8 | 39 | 0.599415 | """ Where's My Mouse? """
import tkinter
def mouse_click(event):
# retrieve XY coords as a tuple
coords = root.winfo_pointerxy()
print('coords: {}'.format(coords))
print('X: {}'.format(coords[0]))
print('Y: {}'.format(coords[1]))
root = tkinter.Tk()
root.bind('<Button>', mouse_click)
root.mainloop()
| 0 | 0 |
b485f685ca90029c0dd0acd04f32bc0b55820f14 | 2,906 | py | Python | examples/fsm/bot/middleware.py | ExpressApp/pybotx | 97c8b1ce5d45a05567ed01d545cb43174a2dcbb9 | [
"MIT"
] | 13 | 2021-01-21T12:43:10.000Z | 2022-03-23T11:11:59.000Z | examples/fsm/bot/middleware.py | ExpressApp/pybotx | 97c8b1ce5d45a05567ed01d545cb43174a2dcbb9 | [
"MIT"
] | 259 | 2020-02-26T08:51:03.000Z | 2022-03-23T11:08:36.000Z | examples/fsm/bot/middleware.py | ExpressApp/pybotx | 97c8b1ce5d45a05567ed01d545cb43174a2dcbb9 | [
"MIT"
] | 5 | 2019-12-02T16:19:22.000Z | 2021-11-22T20:33:34.000Z | from dataclasses import dataclass
from enum import Enum
from typing import Callable, Dict, Final, Optional, Type, Union
from botx import Bot, Collector, Message
from botx.concurrency import callable_to_coroutine
from botx.middlewares.base import BaseMiddleware
from botx.typing import Executor
_default_transition: Final = object()
@dataclass
class Transition:
on_failure: Optional[Union[Enum, object]] = _default_transition
on_success: Optional[Union[Enum, object]] = _default_transition
class FlowError(Exception):
pass
class FSM:
def __init__(self, states: Type[Enum]) -> None:
self.transitions: Dict[Enum, Transition] = {}
self.collector = Collector()
self.states = states
def handler(
self,
on_state: Enum,
next_state: Optional[Union[Enum, object]] = _default_transition,
on_failure: Optional[Union[Enum, object]] = _default_transition,
) -> Callable:
def decorator(handler: Callable) -> Callable:
self.collector.add_handler(
handler,
body=on_state.name,
name=on_state.name,
include_in_status=False,
)
self.transitions[on_state] = Transition(
on_success=next_state, on_failure=on_failure,
)
return handler
return decorator
def change_state(message: Message, new_state: Optional[Enum]) -> None:
message.bot.state.fsm_state[(message.user_huid, message.group_chat_id)] = new_state
class FSMMiddleware(BaseMiddleware):
def __init__(
self,
executor: Executor,
bot: Bot,
fsm: FSM,
initial_state: Optional[Enum] = None,
) -> None:
super().__init__(executor)
bot.state.fsm_state = {}
self.fsm = fsm
self.initial_state = initial_state
for state in self.fsm.states:
# check that for each state there is registered handler
assert state in self.fsm.transitions
async def dispatch(self, message: Message, call_next: Executor) -> None:
current_state: Enum = message.bot.state.fsm_state.setdefault(
(message.user_huid, message.group_chat_id), self.initial_state,
)
if current_state is not None:
transition = self.fsm.transitions[current_state]
handler = self.fsm.collector.handler_for(current_state.name)
try:
await handler(message)
except Exception as exc:
if transition.on_failure is not _default_transition:
change_state(message, transition.on_failure)
raise exc
else:
if transition.on_success is not _default_transition:
change_state(message, transition.on_success)
else:
await callable_to_coroutine(call_next, message)
| 32.651685 | 87 | 0.639023 | from dataclasses import dataclass
from enum import Enum
from typing import Callable, Dict, Final, Optional, Type, Union
from botx import Bot, Collector, Message
from botx.concurrency import callable_to_coroutine
from botx.middlewares.base import BaseMiddleware
from botx.typing import Executor
_default_transition: Final = object()
@dataclass
class Transition:
on_failure: Optional[Union[Enum, object]] = _default_transition
on_success: Optional[Union[Enum, object]] = _default_transition
class FlowError(Exception):
pass
class FSM:
def __init__(self, states: Type[Enum]) -> None:
self.transitions: Dict[Enum, Transition] = {}
self.collector = Collector()
self.states = states
def handler(
self,
on_state: Enum,
next_state: Optional[Union[Enum, object]] = _default_transition,
on_failure: Optional[Union[Enum, object]] = _default_transition,
) -> Callable:
def decorator(handler: Callable) -> Callable:
self.collector.add_handler(
handler,
body=on_state.name,
name=on_state.name,
include_in_status=False,
)
self.transitions[on_state] = Transition(
on_success=next_state, on_failure=on_failure,
)
return handler
return decorator
def change_state(message: Message, new_state: Optional[Enum]) -> None:
message.bot.state.fsm_state[(message.user_huid, message.group_chat_id)] = new_state
class FSMMiddleware(BaseMiddleware):
def __init__(
self,
executor: Executor,
bot: Bot,
fsm: FSM,
initial_state: Optional[Enum] = None,
) -> None:
super().__init__(executor)
bot.state.fsm_state = {}
self.fsm = fsm
self.initial_state = initial_state
for state in self.fsm.states:
# check that for each state there is registered handler
assert state in self.fsm.transitions
async def dispatch(self, message: Message, call_next: Executor) -> None:
current_state: Enum = message.bot.state.fsm_state.setdefault(
(message.user_huid, message.group_chat_id), self.initial_state,
)
if current_state is not None:
transition = self.fsm.transitions[current_state]
handler = self.fsm.collector.handler_for(current_state.name)
try:
await handler(message)
except Exception as exc:
if transition.on_failure is not _default_transition:
change_state(message, transition.on_failure)
raise exc
else:
if transition.on_success is not _default_transition:
change_state(message, transition.on_success)
else:
await callable_to_coroutine(call_next, message)
| 0 | 0 |
42f3981074dbd8b6458eb716c4608442ffca1db6 | 6,411 | py | Python | webenmr/lib/convrdc.py | andreagia/WEBNMR | 512a8cc04cf69300796585feae722614501389a9 | [
"Apache-2.0"
] | null | null | null | webenmr/lib/convrdc.py | andreagia/WEBNMR | 512a8cc04cf69300796585feae722614501389a9 | [
"Apache-2.0"
] | null | null | null | webenmr/lib/convrdc.py | andreagia/WEBNMR | 512a8cc04cf69300796585feae722614501389a9 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
'''
This program attempts to convert XPLOR Pseudocontact shift restraints in AMBER format
XPLOR:
assign ( resid 200 and name OO ) ( resid 200 and name Z ) ( resid 200 and name X ) (resid 200 and name Y ) ( resid 13 and name C ) 0.2400 0.2000
assign ( resid 200 and name OO ) ( resid 200 and name Z ) ( resid 200 and name X ) ( resid 200 and name Y ) ( resid 13 and name CA ) 0.4300 0.2000
assign ( resid 200 and name OO ) ( resid 200 and name Z ) ( resid 200 and name X ) ( resid 200 and name Y )( resid 13 and name CB ) 0.1000 0.2000
AMBER:
&align
num_datasets=2,
dcut= -1.0, freezemol= .false.,
ndip= 10, dwt= 5*0.1, 5*0.1
gigj= 5*-3.1631,5*-3.1631,
dij= 5*1.041,5*1.041,
s11= -4.236,-4.236
s12= 56.860,56.860
s13= -34.696,-34.696
s22= -27.361,-27.361
s23= -12.867,-12.867
dataset=1,
id(1)=20, jd(1)=19, dobsl(1)=-2.13, dobsu(1)=-2.13,
id(2)=31, jd(2)=30, dobsl(2)= 1.10, dobsu(2)= 1.10,
id(3)=43, jd(3)=42, dobsl(3)=-5.54, dobsu(3)=-5.54,
...
...
&end
'''
import sys
import os
import commands
from optparse import OptionParser
from xml_parser import *
from normalize_tbl import normalize
from constants import convtable
def searchres(nres, lpdb):
for l in lpdb:
if l.strip().lower().startswith('atom'):
s=l.split()
if int(nres)==int(s[4]):
return s[3]
def searchC(outx):
i=0
c=[]
while i<len(outx):
if outx[i].strip().startswith('XDIPO_RDC>frun'):
while i<len(outx):
i+=1
if i>=len(outx):
break
if outx[i].strip().startswith('C1='):
t=[]
l=outx[i].split()
for x in range(1,len(l),2):
t.append(l[x])
c.append(t)
break
i+=1
return c
def convert(pdb, new, wd):
if new.calculation.protocol.xrdc:
xfiles=[]
if len(new.calculation.protocol.xrdc)==1:
xfiles.append(new.calculation.protocol.xrdc.attrib_.xrdc_file)
else:
for i in range(len(new.calculation.protocol.xrdc)):
xfiles.append(new.calculation.protocol.xrdc[i].attrib_.xrdc_file)
else:
sys.exit('%s: RDC not found\n' % sys.argv[0])
try:
lpdb=open(pdb, 'r').readlines()
except IOError, (errno, strerror):
sys.exit('%s: IOError(%s): %s %s\n' % (sys.argv[0], errno, pdb, strerror))
numMap = {}
for l in lpdb:
if l.strip().lower().startswith('atom'):
ls=l.split()
k='%s:%s' % (ls[4],ls[2])
numMap[k]=ls[1]
cmd=' /opt/local_prog/xplor-nih-2.22/bin/xplor tensor.inp'
outx=commands.getoutput(cmd)
outx=outx.split('\n')
#outx=open('xplor.outx').readlines()
c=searchC(outx)
out=[' &align\n']
out.append(' num_datasets=%d,\n' % len(xfiles))
out.append(' dcut=-1.0, freezemol=.false.,\n')
out.append(' ndip=10,')
out.append(' dcut=-1.0,dwt=92*0.1,\n')
out.append(' gigj=92*-3.163,\n')
out.append(' dij=92*1.01,\n')
s11=' s11='
s12=' s12='
s13=' s13='
s22=' s22='
s23=' s23='
for i in range(len(c)):
s11='%s%s,' % (s11, c[i][0])
s12='%s%s,' % (s12, c[i][1])
s13='%s%s,' % (s13, c[i][2])
s22='%s%s,' % (s22, c[i][3])
s23='%s%s,' % (s23, c[i][4])
out.append('%s\n' % s11)
out.append('%s\n' % s12)
out.append('%s\n' % s13)
out.append('%s\n' % s22)
out.append('%s\n' % s23)
counter=0
nrdc=0
for xfile in xfiles:
counter+=1
nxfile=os.path.join(wd, 'rdc_%d_web_enmr_normalized.tbl' % counter)
xfile=os.path.join(wd, xfile)
try:
normalize(xfile, nxfile, new, wd)
except:
sys.exit('%s: unable to normalize %s tbl file\n' % (sys.argv[0], xfile))
try:
xp=open(nxfile,'r').readlines()
except IOError, (errno, strerror):
sys.exit('%s: IOError(%s): %s %s\n' % (sys.argv[0], errno, nxfile, strerror))
out.append(' dataset=%d,\n' % counter)
for l in xp:
if l.strip().startswith('assign'):
nrdc+=1
ls=l.split()
res=searchres(ls[31], lpdb)
kk='%s:%s' % (res, ls[34])
if convtable.has_key(kk):
ls[34]=convtable[kk].split(':')[1]
k='%s:%s' % (ls[31], ls[34])
natm1=numMap[k]
res=searchres(ls[38], lpdb)
kk='%s:%s' % (res, ls[41])
if convtable.has_key(kk):
ls[41]=convtable[kk].split(':')[1]
k='%s:%s' % (ls[38], ls[41])
natm2=numMap[k]
out.append(' id(%s)=%s, jd(%s)=%s, dobsl(%s)=%s, dobsu(%s)=%s, \n' %
(nrdc, natm1, nrdc, natm2, nrdc, ls[43], nrdc, ls[43]))
out[3]=' ndip=%d,' % nrdc
out.append(' &end')
return out
if __name__ == '__main__':
usage = "usage: %prog -w working_directory -p pdb_filename -o out_filename"
parser = OptionParser(usage)
parser.add_option("-w", "--wdir", dest="wd",
help="Working directory", metavar="WORKDIR")
parser.add_option("-p", "--pdbfile", dest="pdbfile",
help="PDB filename", metavar="FILE")
parser.add_option("-o", "--outfile", dest="outfile",
help="Output filename", metavar="FILE")
(options, args) = parser.parse_args()
if not options.wd:
parser.error("Working directory is required")
wd=os.path.abspath(options.wd)+'/'
if options.pdbfile:
pdbfile=os.path.join(wd, options.pdbfile)
else:
parser.error("PDB filename is required")
if options.outfile:
outfile=os.path.join(wd, options.outfile)
else:
parser.error("Output filename is required")
xml_input=os.path.join(wd,'input.xml')
doc = etree.parse(xml_input)
ndoc = etree.tostring(doc)
new=parse_node(etree.fromstring(ndoc))
out=convert(pdbfile, new, wd)
fout=open(outfile,'w')
fout.writelines(out)
fout.close() | 31.426471 | 154 | 0.511777 | #!/usr/bin/env python
'''
This program attempts to convert XPLOR Pseudocontact shift restraints in AMBER format
XPLOR:
assign ( resid 200 and name OO ) ( resid 200 and name Z ) ( resid 200 and name X ) (resid 200 and name Y ) ( resid 13 and name C ) 0.2400 0.2000
assign ( resid 200 and name OO ) ( resid 200 and name Z ) ( resid 200 and name X ) ( resid 200 and name Y ) ( resid 13 and name CA ) 0.4300 0.2000
assign ( resid 200 and name OO ) ( resid 200 and name Z ) ( resid 200 and name X ) ( resid 200 and name Y )( resid 13 and name CB ) 0.1000 0.2000
AMBER:
&align
num_datasets=2,
dcut= -1.0, freezemol= .false.,
ndip= 10, dwt= 5*0.1, 5*0.1
gigj= 5*-3.1631,5*-3.1631,
dij= 5*1.041,5*1.041,
s11= -4.236,-4.236
s12= 56.860,56.860
s13= -34.696,-34.696
s22= -27.361,-27.361
s23= -12.867,-12.867
dataset=1,
id(1)=20, jd(1)=19, dobsl(1)=-2.13, dobsu(1)=-2.13,
id(2)=31, jd(2)=30, dobsl(2)= 1.10, dobsu(2)= 1.10,
id(3)=43, jd(3)=42, dobsl(3)=-5.54, dobsu(3)=-5.54,
...
...
&end
'''
import sys
import os
import commands
from optparse import OptionParser
from xml_parser import *
from normalize_tbl import normalize
from constants import convtable
def searchres(nres, lpdb):
for l in lpdb:
if l.strip().lower().startswith('atom'):
s=l.split()
if int(nres)==int(s[4]):
return s[3]
def searchC(outx):
i=0
c=[]
while i<len(outx):
if outx[i].strip().startswith('XDIPO_RDC>frun'):
while i<len(outx):
i+=1
if i>=len(outx):
break
if outx[i].strip().startswith('C1='):
t=[]
l=outx[i].split()
for x in range(1,len(l),2):
t.append(l[x])
c.append(t)
break
i+=1
return c
def convert(pdb, new, wd):
if new.calculation.protocol.xrdc:
xfiles=[]
if len(new.calculation.protocol.xrdc)==1:
xfiles.append(new.calculation.protocol.xrdc.attrib_.xrdc_file)
else:
for i in range(len(new.calculation.protocol.xrdc)):
xfiles.append(new.calculation.protocol.xrdc[i].attrib_.xrdc_file)
else:
sys.exit('%s: RDC not found\n' % sys.argv[0])
try:
lpdb=open(pdb, 'r').readlines()
except IOError, (errno, strerror):
sys.exit('%s: IOError(%s): %s %s\n' % (sys.argv[0], errno, pdb, strerror))
numMap = {}
for l in lpdb:
if l.strip().lower().startswith('atom'):
ls=l.split()
k='%s:%s' % (ls[4],ls[2])
numMap[k]=ls[1]
cmd=' /opt/local_prog/xplor-nih-2.22/bin/xplor tensor.inp'
outx=commands.getoutput(cmd)
outx=outx.split('\n')
#outx=open('xplor.outx').readlines()
c=searchC(outx)
out=[' &align\n']
out.append(' num_datasets=%d,\n' % len(xfiles))
out.append(' dcut=-1.0, freezemol=.false.,\n')
out.append(' ndip=10,')
out.append(' dcut=-1.0,dwt=92*0.1,\n')
out.append(' gigj=92*-3.163,\n')
out.append(' dij=92*1.01,\n')
s11=' s11='
s12=' s12='
s13=' s13='
s22=' s22='
s23=' s23='
for i in range(len(c)):
s11='%s%s,' % (s11, c[i][0])
s12='%s%s,' % (s12, c[i][1])
s13='%s%s,' % (s13, c[i][2])
s22='%s%s,' % (s22, c[i][3])
s23='%s%s,' % (s23, c[i][4])
out.append('%s\n' % s11)
out.append('%s\n' % s12)
out.append('%s\n' % s13)
out.append('%s\n' % s22)
out.append('%s\n' % s23)
counter=0
nrdc=0
for xfile in xfiles:
counter+=1
nxfile=os.path.join(wd, 'rdc_%d_web_enmr_normalized.tbl' % counter)
xfile=os.path.join(wd, xfile)
try:
normalize(xfile, nxfile, new, wd)
except:
sys.exit('%s: unable to normalize %s tbl file\n' % (sys.argv[0], xfile))
try:
xp=open(nxfile,'r').readlines()
except IOError, (errno, strerror):
sys.exit('%s: IOError(%s): %s %s\n' % (sys.argv[0], errno, nxfile, strerror))
out.append(' dataset=%d,\n' % counter)
for l in xp:
if l.strip().startswith('assign'):
nrdc+=1
ls=l.split()
res=searchres(ls[31], lpdb)
kk='%s:%s' % (res, ls[34])
if convtable.has_key(kk):
ls[34]=convtable[kk].split(':')[1]
k='%s:%s' % (ls[31], ls[34])
natm1=numMap[k]
res=searchres(ls[38], lpdb)
kk='%s:%s' % (res, ls[41])
if convtable.has_key(kk):
ls[41]=convtable[kk].split(':')[1]
k='%s:%s' % (ls[38], ls[41])
natm2=numMap[k]
out.append(' id(%s)=%s, jd(%s)=%s, dobsl(%s)=%s, dobsu(%s)=%s, \n' %
(nrdc, natm1, nrdc, natm2, nrdc, ls[43], nrdc, ls[43]))
out[3]=' ndip=%d,' % nrdc
out.append(' &end')
return out
if __name__ == '__main__':
usage = "usage: %prog -w working_directory -p pdb_filename -o out_filename"
parser = OptionParser(usage)
parser.add_option("-w", "--wdir", dest="wd",
help="Working directory", metavar="WORKDIR")
parser.add_option("-p", "--pdbfile", dest="pdbfile",
help="PDB filename", metavar="FILE")
parser.add_option("-o", "--outfile", dest="outfile",
help="Output filename", metavar="FILE")
(options, args) = parser.parse_args()
if not options.wd:
parser.error("Working directory is required")
wd=os.path.abspath(options.wd)+'/'
if options.pdbfile:
pdbfile=os.path.join(wd, options.pdbfile)
else:
parser.error("PDB filename is required")
if options.outfile:
outfile=os.path.join(wd, options.outfile)
else:
parser.error("Output filename is required")
xml_input=os.path.join(wd,'input.xml')
doc = etree.parse(xml_input)
ndoc = etree.tostring(doc)
new=parse_node(etree.fromstring(ndoc))
out=convert(pdbfile, new, wd)
fout=open(outfile,'w')
fout.writelines(out)
fout.close() | 0 | 0 |
1441c3ed71c2dc67d784d782e0dab2d91d827d06 | 2,134 | py | Python | lptrack/versions.py | gieseladev/lptrack | fb4c64021c23522f96733db41ceb69f0ccb9b713 | [
"MIT"
] | null | null | null | lptrack/versions.py | gieseladev/lptrack | fb4c64021c23522f96733db41ceb69f0ccb9b713 | [
"MIT"
] | null | null | null | lptrack/versions.py | gieseladev/lptrack | fb4c64021c23522f96733db41ceb69f0ccb9b713 | [
"MIT"
] | null | null | null | """Versioned body readers and writers for track message bodies.
Attributes:
LATEST_VERSION (int): Latest version supported by the library.
"""
from typing import Callable, Tuple
from . import TrackInfo, codec
LATEST_VERSION = 2
def _read_body_v1_2(stream: codec.Reader, version: int) -> TrackInfo:
return TrackInfo(
title=stream.read_utf(),
author=stream.read_utf(),
duration=stream.read_long() / 1000,
identifier=stream.read_utf(),
is_stream=stream.read_bool(),
uri=stream.read_optional_utf() if version >= 2 else None,
)
def read_body_v1(stream: codec.Reader) -> TrackInfo:
return _read_body_v1_2(stream, 1)
def read_body_v2(stream: codec.Reader) -> TrackInfo:
return _read_body_v1_2(stream, 2)
def _write_body_v1_2(stream: codec.Writer, track: TrackInfo, version: int) -> None:
stream.write_utf(track.title)
stream.write_utf(track.author)
stream.write_long(int(track.duration * 1000))
stream.write_utf(track.identifier)
stream.write_bool(track.is_stream)
if version >= 2:
stream.write_optional_utf(track.uri)
def write_body_v1(stream: codec.Writer, track: TrackInfo) -> None:
_write_body_v1_2(stream, track, 1)
def write_body_v2(stream: codec.Writer, track: TrackInfo) -> None:
_write_body_v1_2(stream, track, 2)
ReaderType = Callable[[codec.Reader], TrackInfo]
WriterType = Callable[[codec.Writer, TrackInfo], None]
_FORMAT_VERSIONS = {
1: (read_body_v1, write_body_v1),
2: (read_body_v2, write_body_v2),
}
def _get_format(version: int) -> Tuple:
try:
return _FORMAT_VERSIONS[version]
except KeyError:
raise ValueError(f"Unsupported version: {version}") from None
def get_reader(version: int) -> ReaderType:
"""Get a body reader for the given version.
Raises:
ValueError: If the version isn't supported.
"""
return _get_format(version)[0]
def get_writer(version: int) -> WriterType:
"""Get a body writer for the given version.
Raises:
ValueError: If the version isn't supported.
"""
return _get_format(version)[1]
| 25.404762 | 83 | 0.698219 | """Versioned body readers and writers for track message bodies.
Attributes:
LATEST_VERSION (int): Latest version supported by the library.
"""
from typing import Callable, Tuple
from . import TrackInfo, codec
LATEST_VERSION = 2
def _read_body_v1_2(stream: codec.Reader, version: int) -> TrackInfo:
return TrackInfo(
title=stream.read_utf(),
author=stream.read_utf(),
duration=stream.read_long() / 1000,
identifier=stream.read_utf(),
is_stream=stream.read_bool(),
uri=stream.read_optional_utf() if version >= 2 else None,
)
def read_body_v1(stream: codec.Reader) -> TrackInfo:
return _read_body_v1_2(stream, 1)
def read_body_v2(stream: codec.Reader) -> TrackInfo:
return _read_body_v1_2(stream, 2)
def _write_body_v1_2(stream: codec.Writer, track: TrackInfo, version: int) -> None:
stream.write_utf(track.title)
stream.write_utf(track.author)
stream.write_long(int(track.duration * 1000))
stream.write_utf(track.identifier)
stream.write_bool(track.is_stream)
if version >= 2:
stream.write_optional_utf(track.uri)
def write_body_v1(stream: codec.Writer, track: TrackInfo) -> None:
_write_body_v1_2(stream, track, 1)
def write_body_v2(stream: codec.Writer, track: TrackInfo) -> None:
_write_body_v1_2(stream, track, 2)
ReaderType = Callable[[codec.Reader], TrackInfo]
WriterType = Callable[[codec.Writer, TrackInfo], None]
_FORMAT_VERSIONS = {
1: (read_body_v1, write_body_v1),
2: (read_body_v2, write_body_v2),
}
def _get_format(version: int) -> Tuple:
try:
return _FORMAT_VERSIONS[version]
except KeyError:
raise ValueError(f"Unsupported version: {version}") from None
def get_reader(version: int) -> ReaderType:
"""Get a body reader for the given version.
Raises:
ValueError: If the version isn't supported.
"""
return _get_format(version)[0]
def get_writer(version: int) -> WriterType:
"""Get a body writer for the given version.
Raises:
ValueError: If the version isn't supported.
"""
return _get_format(version)[1]
| 0 | 0 |
45b20d04060d1b766f35010e3ce9fedfd6a34eba | 96 | py | Python | venv/lib/python3.8/site-packages/poetry/core/toml/__init__.py | Retraces/UkraineBot | 3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71 | [
"MIT"
] | 2 | 2022-03-13T01:58:52.000Z | 2022-03-31T06:07:54.000Z | venv/lib/python3.8/site-packages/poetry/core/toml/__init__.py | DesmoSearch/Desmobot | b70b45df3485351f471080deb5c785c4bc5c4beb | [
"MIT"
] | 19 | 2021-11-20T04:09:18.000Z | 2022-03-23T15:05:55.000Z | venv/lib/python3.8/site-packages/poetry/core/toml/__init__.py | DesmoSearch/Desmobot | b70b45df3485351f471080deb5c785c4bc5c4beb | [
"MIT"
] | null | null | null | /home/runner/.cache/pip/pool/f3/de/85/7dca1e096a43e00e6ff1ca900dda1ca91c8c5c3a1d6798e466a9173a00 | 96 | 96 | 0.895833 | /home/runner/.cache/pip/pool/f3/de/85/7dca1e096a43e00e6ff1ca900dda1ca91c8c5c3a1d6798e466a9173a00 | 0 | 0 |
4053282fdcb4c61c6094cfb3f6a832822c2a096c | 2,371 | py | Python | venv/lib/python2.7/site-packages/cement/ext/ext_alarm.py | zwachtel11/fruitful-backend | 45b8994917182e7b684b9e25944cc79c9494c9f3 | [
"MIT"
] | 95 | 2018-06-05T10:49:32.000Z | 2019-12-31T11:07:36.000Z | v_env/lib/python3.7/site-packages/cement/ext/ext_alarm.py | buds-lab/expanded-psychrometric-chart | e7267f57584d8ba645507189ea4a8e474c67e0de | [
"MIT"
] | 51 | 2019-10-08T01:53:02.000Z | 2021-06-04T22:02:21.000Z | v_env/lib/python3.7/site-packages/cement/ext/ext_alarm.py | buds-lab/expanded-psychrometric-chart | e7267f57584d8ba645507189ea4a8e474c67e0de | [
"MIT"
] | 29 | 2018-09-17T06:10:32.000Z | 2022-03-19T13:15:30.000Z | """
The Alarm Extension provides easy access to setting an application alarm to
handle timing out operations. See the
`Python Signal Library <https://docs.python.org/3.5/library/signal.html>`_.
Requirements
------------
* No external dependencies.
* Only available on Unix/Linux
Configuration
-------------
This extension does not honor any application configuration settings.
Usage
-----
.. code-block:: python
import time
from cement.core.foundation import CementApp
from cement.core.exc import CaughtSignal
class MyApp(CementApp):
class Meta:
label = 'myapp'
exit_on_close = True
extensions = ['alarm']
with MyApp() as app:
try:
app.run()
app.alarm.set(3, "The operation timed out after 3 seconds!")
# do something that takes time to operate
time.sleep(5)
app.alarm.stop()
except CaughtSignal as e:
print(e.msg)
app.exit_code = 1
Looks like:
.. code-block:: console
$ python myapp.py
ERROR: The operation timed out after 3 seconds!
Caught signal 14
"""
import signal
from ..utils.misc import minimal_logger
LOG = minimal_logger(__name__)
def alarm_handler(app, signum, frame):
if signum == signal.SIGALRM:
app.log.error(app.alarm.msg)
class AlarmManager(object):
"""
Lets the developer easily set and stop an alarm. If the
alarm exceeds the given time it will raise ``signal.SIGALRM``.
"""
def __init__(self, *args, **kw):
super(AlarmManager, self).__init__(*args, **kw)
self.msg = None
def set(self, time, msg):
"""
Set the application alarm to ``time`` seconds. If the time is
exceeded ``signal.SIGALRM`` is raised.
:param time: The time in seconds to set the alarm to.
:param msg: The message to display if the alarm is triggered.
"""
LOG.debug('setting application alarm for %s seconds' % time)
self.msg = msg
signal.alarm(int(time))
def stop(self):
"""
Stop the application alarm.
"""
LOG.debug('stopping application alarm')
signal.alarm(0)
def load(app):
app.catch_signal(signal.SIGALRM)
app.extend('alarm', AlarmManager())
app.hook.register('signal', alarm_handler)
| 22.158879 | 75 | 0.619148 | """
The Alarm Extension provides easy access to setting an application alarm to
handle timing out operations. See the
`Python Signal Library <https://docs.python.org/3.5/library/signal.html>`_.
Requirements
------------
* No external dependencies.
* Only available on Unix/Linux
Configuration
-------------
This extension does not honor any application configuration settings.
Usage
-----
.. code-block:: python
import time
from cement.core.foundation import CementApp
from cement.core.exc import CaughtSignal
class MyApp(CementApp):
class Meta:
label = 'myapp'
exit_on_close = True
extensions = ['alarm']
with MyApp() as app:
try:
app.run()
app.alarm.set(3, "The operation timed out after 3 seconds!")
# do something that takes time to operate
time.sleep(5)
app.alarm.stop()
except CaughtSignal as e:
print(e.msg)
app.exit_code = 1
Looks like:
.. code-block:: console
$ python myapp.py
ERROR: The operation timed out after 3 seconds!
Caught signal 14
"""
import signal
from ..utils.misc import minimal_logger
LOG = minimal_logger(__name__)
def alarm_handler(app, signum, frame):
if signum == signal.SIGALRM:
app.log.error(app.alarm.msg)
class AlarmManager(object):
"""
Lets the developer easily set and stop an alarm. If the
alarm exceeds the given time it will raise ``signal.SIGALRM``.
"""
def __init__(self, *args, **kw):
super(AlarmManager, self).__init__(*args, **kw)
self.msg = None
def set(self, time, msg):
"""
Set the application alarm to ``time`` seconds. If the time is
exceeded ``signal.SIGALRM`` is raised.
:param time: The time in seconds to set the alarm to.
:param msg: The message to display if the alarm is triggered.
"""
LOG.debug('setting application alarm for %s seconds' % time)
self.msg = msg
signal.alarm(int(time))
def stop(self):
"""
Stop the application alarm.
"""
LOG.debug('stopping application alarm')
signal.alarm(0)
def load(app):
app.catch_signal(signal.SIGALRM)
app.extend('alarm', AlarmManager())
app.hook.register('signal', alarm_handler)
| 0 | 0 |
020f0bdb4147d07bfcd522f1fe1fb911459c901a | 8,550 | py | Python | meross_iot/cloud/devices/power_plugs.py | robertodormepoco/MerossIot | 95f7c235d0db7d07823cab7f8daed693fe35af96 | [
"MIT"
] | null | null | null | meross_iot/cloud/devices/power_plugs.py | robertodormepoco/MerossIot | 95f7c235d0db7d07823cab7f8daed693fe35af96 | [
"MIT"
] | null | null | null | meross_iot/cloud/devices/power_plugs.py | robertodormepoco/MerossIot | 95f7c235d0db7d07823cab7f8daed693fe35af96 | [
"MIT"
] | null | null | null | from meross_iot.cloud.abilities import *
from meross_iot.cloud.device import AbstractMerossDevice
from meross_iot.logger import POWER_PLUGS_LOGGER as l
from meross_iot.meross_event import DeviceSwitchStatusEvent
class GenericPlug(AbstractMerossDevice):
# Channels
_channels = []
# Dictionary {channel->status}
_state = {}
def __init__(self, cloud_client, device_uuid, **kwords):
super(GenericPlug, self).__init__(cloud_client, device_uuid, **kwords)
def _get_consumptionx(self):
return self.execute_command("GET", CONSUMPTIONX, {})
def _get_electricity(self):
return self.execute_command("GET", ELECTRICITY, {})
def _toggle(self, status, callback=None):
payload = {"channel": 0, "toggle": {"onoff": status}}
return self.execute_command("SET", TOGGLE, payload, callback=callback)
def _togglex(self, channel, status, callback=None):
payload = {'togglex': {"onoff": status, "channel": channel}}
return self.execute_command("SET", TOGGLEX, payload, callback=callback)
def _channel_control_impl(self, channel, status, callback=None):
if TOGGLE in self.get_abilities():
return self._toggle(status, callback=callback)
elif TOGGLEX in self.get_abilities():
return self._togglex(channel, status, callback=callback)
else:
raise Exception("The current device does not support neither TOGGLE nor TOGGLEX.")
def _handle_push_notification(self, namespace, payload, from_myself=False):
def fire_switch_state_change(dev, channel_id, o_state, n_state, f_myself):
if o_state != n_state:
evt = DeviceSwitchStatusEvent(dev=dev, channel_id=channel_id, switch_state=n_state,
generated_by_myself=f_myself)
self.fire_event(evt)
with self._state_lock:
if namespace == TOGGLE:
# Update the local state and fire the event only if the state actually changed
channel_index = 0
old_switch_state = self._state.get(channel_index)
switch_state = payload['toggle']['onoff'] == 1
self._state[channel_index] = switch_state
fire_switch_state_change(self, channel_index, old_switch_state, switch_state, from_myself)
elif namespace == TOGGLEX:
if isinstance(payload['togglex'], list):
for c in payload['togglex']:
# Update the local state and fire the event only if the state actually changed
channel_index = c['channel']
old_switch_state = self._state.get(channel_index)
switch_state = c['onoff'] == 1
self._state[channel_index] = switch_state
fire_switch_state_change(self, channel_index, old_switch_state, switch_state, from_myself)
elif isinstance(payload['togglex'], dict):
# Update the local state and fire the event only if the state actually changed
channel_index = payload['togglex']['channel']
old_switch_state = self._state.get(channel_index)
switch_state = payload['togglex']['onoff'] == 1
self._state[channel_index] = switch_state
fire_switch_state_change(self, channel_index, old_switch_state, switch_state, from_myself)
elif namespace == REPORT or namespace == CONSUMPTIONX:
# For now, we simply ignore push notification of these kind.
# In the future, we might think of handling such notification by caching them
# and avoid the network round-trip when asking for power consumption (if the latest report is
# recent enough)
pass
else:
l.error("Unknown/Unsupported namespace/command: %s" % namespace)
def _get_status_impl(self):
res = {}
data = self.get_sys_data()['all']
if 'digest' in data:
for c in data['digest']['togglex']:
res[c['channel']] = c['onoff'] == 1
elif 'control' in data:
res[0] = data['control']['toggle']['onoff'] == 1
return res
def _get_channel_id(self, channel):
# Otherwise, if the passed channel looks like the channel spec, lookup its array indexindex
if channel in self._channels:
return self._channels.index(channel)
# if a channel name is given, lookup the channel id from the name
if isinstance(channel, str):
for i, c in enumerate(self.get_channels()):
if c['devName'] == channel:
return c['channel']
# If an integer is given assume that is the channel ID
elif isinstance(channel, int):
return channel
# In other cases return an error
raise Exception("Invalid channel specified.")
def get_status(self, channel=0):
# In order to optimize the network traffic, we don't call the get_status() api at every request.
# On the contrary, we only call it the first time. Then, the rest of the API will silently listen
# for state changes and will automatically update the self._state structure listening for
# messages of the device.
# Such approach, however, has a side effect. If we call TOGGLE/TOGGLEX and immediately after we call
# get_status(), the reported status will be still the old one. This is a race condition because the
# "status" RESPONSE will be delivered some time after the TOGGLE REQUEST. It's not a big issue for now,
# and synchronizing the two things would be inefficient and probably not very useful.
# Just remember to wait some time before testing the status of the item after a toggle.
with self._state_lock:
c = self._get_channel_id(channel)
if self._state == {}:
self._state = self._get_status_impl()
return self._state[c]
def get_power_consumption(self):
if CONSUMPTIONX in self.get_abilities():
return self._get_consumptionx()['consumptionx']
else:
# Not supported!
return None
def get_electricity(self):
if ELECTRICITY in self.get_abilities():
return self._get_electricity()['electricity']
else:
# Not supported!
return None
def get_channels(self):
return self._channels
def get_channel_status(self, channel):
c = self._get_channel_id(channel)
return self.get_status(c)
def turn_on_channel(self, channel, callback=None):
c = self._get_channel_id(channel)
return self._channel_control_impl(c, 1, callback=callback)
def turn_off_channel(self, channel, callback=None):
c = self._get_channel_id(channel)
return self._channel_control_impl(c, 0, callback=callback)
def turn_on(self, channel=0, callback=None):
c = self._get_channel_id(channel)
return self._channel_control_impl(c, 1, callback=callback)
def turn_off(self, channel=0, callback=None):
c = self._get_channel_id(channel)
return self._channel_control_impl(c, 0, callback=callback)
def get_usb_channel_index(self):
# Look for the usb channel
for i, c in enumerate(self.get_channels()):
if 'type' in c and c['type'] == 'USB':
return i
return None
def enable_usb(self, callback=None):
c = self.get_usb_channel_index()
if c is None:
return
else:
return self.turn_on_channel(c, callback=callback)
def disable_usb(self, callback=None):
c = self.get_usb_channel_index()
if c is None:
return
else:
return self.turn_off_channel(c, callback=callback)
def get_usb_status(self):
c = self.get_usb_channel_index()
if c is None:
return
else:
return self.get_channel_status(c)
def __str__(self):
base_str = super().__str__()
with self._state_lock:
if not self.online:
return base_str
channels = "Channels: "
channels += ",".join(["%d = %s" % (k, "ON" if v else "OFF") for k, v in enumerate(self._state)])
return base_str + "\n" + "\n" + channels
| 42.75 | 114 | 0.619649 | from meross_iot.cloud.abilities import *
from meross_iot.cloud.device import AbstractMerossDevice
from meross_iot.logger import POWER_PLUGS_LOGGER as l
from meross_iot.meross_event import DeviceSwitchStatusEvent
class GenericPlug(AbstractMerossDevice):
# Channels
_channels = []
# Dictionary {channel->status}
_state = {}
def __init__(self, cloud_client, device_uuid, **kwords):
super(GenericPlug, self).__init__(cloud_client, device_uuid, **kwords)
def _get_consumptionx(self):
return self.execute_command("GET", CONSUMPTIONX, {})
def _get_electricity(self):
return self.execute_command("GET", ELECTRICITY, {})
def _toggle(self, status, callback=None):
payload = {"channel": 0, "toggle": {"onoff": status}}
return self.execute_command("SET", TOGGLE, payload, callback=callback)
def _togglex(self, channel, status, callback=None):
payload = {'togglex': {"onoff": status, "channel": channel}}
return self.execute_command("SET", TOGGLEX, payload, callback=callback)
def _channel_control_impl(self, channel, status, callback=None):
if TOGGLE in self.get_abilities():
return self._toggle(status, callback=callback)
elif TOGGLEX in self.get_abilities():
return self._togglex(channel, status, callback=callback)
else:
raise Exception("The current device does not support neither TOGGLE nor TOGGLEX.")
def _handle_push_notification(self, namespace, payload, from_myself=False):
def fire_switch_state_change(dev, channel_id, o_state, n_state, f_myself):
if o_state != n_state:
evt = DeviceSwitchStatusEvent(dev=dev, channel_id=channel_id, switch_state=n_state,
generated_by_myself=f_myself)
self.fire_event(evt)
with self._state_lock:
if namespace == TOGGLE:
# Update the local state and fire the event only if the state actually changed
channel_index = 0
old_switch_state = self._state.get(channel_index)
switch_state = payload['toggle']['onoff'] == 1
self._state[channel_index] = switch_state
fire_switch_state_change(self, channel_index, old_switch_state, switch_state, from_myself)
elif namespace == TOGGLEX:
if isinstance(payload['togglex'], list):
for c in payload['togglex']:
# Update the local state and fire the event only if the state actually changed
channel_index = c['channel']
old_switch_state = self._state.get(channel_index)
switch_state = c['onoff'] == 1
self._state[channel_index] = switch_state
fire_switch_state_change(self, channel_index, old_switch_state, switch_state, from_myself)
elif isinstance(payload['togglex'], dict):
# Update the local state and fire the event only if the state actually changed
channel_index = payload['togglex']['channel']
old_switch_state = self._state.get(channel_index)
switch_state = payload['togglex']['onoff'] == 1
self._state[channel_index] = switch_state
fire_switch_state_change(self, channel_index, old_switch_state, switch_state, from_myself)
elif namespace == REPORT or namespace == CONSUMPTIONX:
# For now, we simply ignore push notification of these kind.
# In the future, we might think of handling such notification by caching them
# and avoid the network round-trip when asking for power consumption (if the latest report is
# recent enough)
pass
else:
l.error("Unknown/Unsupported namespace/command: %s" % namespace)
def _get_status_impl(self):
res = {}
data = self.get_sys_data()['all']
if 'digest' in data:
for c in data['digest']['togglex']:
res[c['channel']] = c['onoff'] == 1
elif 'control' in data:
res[0] = data['control']['toggle']['onoff'] == 1
return res
def _get_channel_id(self, channel):
# Otherwise, if the passed channel looks like the channel spec, lookup its array indexindex
if channel in self._channels:
return self._channels.index(channel)
# if a channel name is given, lookup the channel id from the name
if isinstance(channel, str):
for i, c in enumerate(self.get_channels()):
if c['devName'] == channel:
return c['channel']
# If an integer is given assume that is the channel ID
elif isinstance(channel, int):
return channel
# In other cases return an error
raise Exception("Invalid channel specified.")
def get_status(self, channel=0):
# In order to optimize the network traffic, we don't call the get_status() api at every request.
# On the contrary, we only call it the first time. Then, the rest of the API will silently listen
# for state changes and will automatically update the self._state structure listening for
# messages of the device.
# Such approach, however, has a side effect. If we call TOGGLE/TOGGLEX and immediately after we call
# get_status(), the reported status will be still the old one. This is a race condition because the
# "status" RESPONSE will be delivered some time after the TOGGLE REQUEST. It's not a big issue for now,
# and synchronizing the two things would be inefficient and probably not very useful.
# Just remember to wait some time before testing the status of the item after a toggle.
with self._state_lock:
c = self._get_channel_id(channel)
if self._state == {}:
self._state = self._get_status_impl()
return self._state[c]
def get_power_consumption(self):
if CONSUMPTIONX in self.get_abilities():
return self._get_consumptionx()['consumptionx']
else:
# Not supported!
return None
def get_electricity(self):
if ELECTRICITY in self.get_abilities():
return self._get_electricity()['electricity']
else:
# Not supported!
return None
def get_channels(self):
return self._channels
def get_channel_status(self, channel):
c = self._get_channel_id(channel)
return self.get_status(c)
def turn_on_channel(self, channel, callback=None):
c = self._get_channel_id(channel)
return self._channel_control_impl(c, 1, callback=callback)
def turn_off_channel(self, channel, callback=None):
c = self._get_channel_id(channel)
return self._channel_control_impl(c, 0, callback=callback)
def turn_on(self, channel=0, callback=None):
c = self._get_channel_id(channel)
return self._channel_control_impl(c, 1, callback=callback)
def turn_off(self, channel=0, callback=None):
c = self._get_channel_id(channel)
return self._channel_control_impl(c, 0, callback=callback)
def get_usb_channel_index(self):
# Look for the usb channel
for i, c in enumerate(self.get_channels()):
if 'type' in c and c['type'] == 'USB':
return i
return None
def enable_usb(self, callback=None):
c = self.get_usb_channel_index()
if c is None:
return
else:
return self.turn_on_channel(c, callback=callback)
def disable_usb(self, callback=None):
c = self.get_usb_channel_index()
if c is None:
return
else:
return self.turn_off_channel(c, callback=callback)
def get_usb_status(self):
c = self.get_usb_channel_index()
if c is None:
return
else:
return self.get_channel_status(c)
def __str__(self):
base_str = super().__str__()
with self._state_lock:
if not self.online:
return base_str
channels = "Channels: "
channels += ",".join(["%d = %s" % (k, "ON" if v else "OFF") for k, v in enumerate(self._state)])
return base_str + "\n" + "\n" + channels
| 0 | 0 |
65badce87f001e1fa721ea024ae15e4646615075 | 568 | py | Python | django_sphinx_db/backend/sphinx/introspection.py | petekalo/django-sphinx-db | b9190fafab62e69f84b4474f65c1d77f04c313f1 | [
"BSD-3-Clause"
] | null | null | null | django_sphinx_db/backend/sphinx/introspection.py | petekalo/django-sphinx-db | b9190fafab62e69f84b4474f65c1d77f04c313f1 | [
"BSD-3-Clause"
] | null | null | null | django_sphinx_db/backend/sphinx/introspection.py | petekalo/django-sphinx-db | b9190fafab62e69f84b4474f65c1d77f04c313f1 | [
"BSD-3-Clause"
] | null | null | null | from django.db.backends.mysql.introspection import *
from django.db.backends.mysql.introspection import DatabaseIntrospection as MYSQLDatabaseIntrospection
from django.utils.functional import cached_property
class DatabaseIntrospection(MYSQLDatabaseIntrospection):
def get_table_list(self, cursor):
"""
Returns a list of table and view names in the current database.
"""
cursor.execute("SHOW TABLES")
return [TableInfo(row[0], {'BASE TABLE': 't', 'VIEW': 'v'}.get(row[1]))
for row in cursor.fetchall()]
| 33.411765 | 102 | 0.702465 | from django.db.backends.mysql.introspection import *
from django.db.backends.mysql.introspection import DatabaseIntrospection as MYSQLDatabaseIntrospection
from django.utils.functional import cached_property
class DatabaseIntrospection(MYSQLDatabaseIntrospection):
def get_table_list(self, cursor):
"""
Returns a list of table and view names in the current database.
"""
cursor.execute("SHOW TABLES")
return [TableInfo(row[0], {'BASE TABLE': 't', 'VIEW': 'v'}.get(row[1]))
for row in cursor.fetchall()]
| 0 | 0 |
c4fb2fe7e75e47425721a49f845719bb9e6c655f | 1,634 | py | Python | tests/test_highiq.py | ClariNerd617/HighIQ | 0305902f889da869535834620bb4fb15ac54b11d | [
"BSD-3-Clause"
] | 6 | 2020-03-16T14:14:45.000Z | 2021-09-21T06:39:57.000Z | tests/test_highiq.py | ClariNerd617/HighIQ | 0305902f889da869535834620bb4fb15ac54b11d | [
"BSD-3-Clause"
] | null | null | null | tests/test_highiq.py | ClariNerd617/HighIQ | 0305902f889da869535834620bb4fb15ac54b11d | [
"BSD-3-Clause"
] | 3 | 2019-12-16T19:56:35.000Z | 2021-06-09T14:14:47.000Z | import highiq
import numpy as np
def test_io():
my_ds = highiq.io.load_arm_netcdf(highiq.testing.TEST_FILE)
assert 'acf' in my_ds.variables.keys()
assert 'acf_bkg' in my_ds.variables.keys()
my_ds.close()
def test_spectra():
my_ds = highiq.io.load_arm_netcdf(highiq.testing.TEST_FILE)
my_spectra = highiq.calc.get_psd(my_ds)
assert 'power_spectral_density_interp' in my_spectra.variables.keys()
assert 'power_spectral_density' in my_spectra.variables.keys()
psd = my_spectra['power_spectra_normed'].sel(range=400, method='nearest')
vel_bins = my_spectra['vel_bins']
dV = vel_bins[1] - vel_bins[0]
np.testing.assert_almost_equal(psd.sum()*dV.values, 100)
my_ds.close()
my_spectra.close()
def test_moments():
my_ds = highiq.io.load_arm_netcdf(highiq.testing.TEST_FILE)
my_spectra = highiq.calc.get_psd(my_ds)
my_moments = highiq.calc.get_lidar_moments(my_spectra)
intensity = my_moments['intensity'].values
velocity = my_moments['doppler_velocity'].values
assert np.nanmin(intensity) > 1.
assert np.nanmin(velocity) < -2.
my_ds.close()
my_spectra.close()
def test_peaks():
my_ds = highiq.io.load_arm_netcdf(highiq.testing.TEST_FILE)
my_spectra = highiq.calc.get_psd(my_ds)
my_peaks = highiq.calc.calc_num_peaks(my_spectra)
my_moments = highiq.calc.get_lidar_moments(my_spectra)
my_peaks['npeaks'] = my_peaks['npeaks'].where(my_moments.intensity > 1.1)
num_peaks = my_peaks['npeaks'].values
assert np.nanmax(num_peaks) == 3
my_ds.close()
my_spectra.close()
my_peaks.close()
my_moments.close()
| 32.68 | 77 | 0.719706 | import highiq
import numpy as np
def test_io():
my_ds = highiq.io.load_arm_netcdf(highiq.testing.TEST_FILE)
assert 'acf' in my_ds.variables.keys()
assert 'acf_bkg' in my_ds.variables.keys()
my_ds.close()
def test_spectra():
my_ds = highiq.io.load_arm_netcdf(highiq.testing.TEST_FILE)
my_spectra = highiq.calc.get_psd(my_ds)
assert 'power_spectral_density_interp' in my_spectra.variables.keys()
assert 'power_spectral_density' in my_spectra.variables.keys()
psd = my_spectra['power_spectra_normed'].sel(range=400, method='nearest')
vel_bins = my_spectra['vel_bins']
dV = vel_bins[1] - vel_bins[0]
np.testing.assert_almost_equal(psd.sum()*dV.values, 100)
my_ds.close()
my_spectra.close()
def test_moments():
my_ds = highiq.io.load_arm_netcdf(highiq.testing.TEST_FILE)
my_spectra = highiq.calc.get_psd(my_ds)
my_moments = highiq.calc.get_lidar_moments(my_spectra)
intensity = my_moments['intensity'].values
velocity = my_moments['doppler_velocity'].values
assert np.nanmin(intensity) > 1.
assert np.nanmin(velocity) < -2.
my_ds.close()
my_spectra.close()
def test_peaks():
my_ds = highiq.io.load_arm_netcdf(highiq.testing.TEST_FILE)
my_spectra = highiq.calc.get_psd(my_ds)
my_peaks = highiq.calc.calc_num_peaks(my_spectra)
my_moments = highiq.calc.get_lidar_moments(my_spectra)
my_peaks['npeaks'] = my_peaks['npeaks'].where(my_moments.intensity > 1.1)
num_peaks = my_peaks['npeaks'].values
assert np.nanmax(num_peaks) == 3
my_ds.close()
my_spectra.close()
my_peaks.close()
my_moments.close()
| 0 | 0 |
77ab3b36a849175fa4c24f12a76941077ea58584 | 570 | py | Python | scripts/docker/migrate.py | guligon90/uac-registry | cb5afe941919c2d9ceffa8d8bf220613b7a20613 | [
"MIT"
] | null | null | null | scripts/docker/migrate.py | guligon90/uac-registry | cb5afe941919c2d9ceffa8d8bf220613b7a20613 | [
"MIT"
] | null | null | null | scripts/docker/migrate.py | guligon90/uac-registry | cb5afe941919c2d9ceffa8d8bf220613b7a20613 | [
"MIT"
] | null | null | null | # Base imports
import subprocess
from typing import Iterable, Optional
# Project imports
from docker import common
from docker.run import run
def migrate(arguments: Iterable[str], deps: Optional[bool] = True) -> int:
print(">>>>>>>>>> Running database migration <<<<<<<<<<")
run(['backend', 'python3', common.MANAGE_PY, 'migrate'], deps)
def make_migrations(arguments: Iterable[str], deps: Optional[bool] = True) -> int:
print(">>>>>>>>>> Running database migration <<<<<<<<<<")
run(['backend', 'python3', common.MANAGE_PY, 'makemigrations'], deps)
| 31.666667 | 82 | 0.670175 | # Base imports
import subprocess
from typing import Iterable, Optional
# Project imports
from docker import common
from docker.run import run
def migrate(arguments: Iterable[str], deps: Optional[bool] = True) -> int:
print(">>>>>>>>>> Running database migration <<<<<<<<<<")
run(['backend', 'python3', common.MANAGE_PY, 'migrate'], deps)
def make_migrations(arguments: Iterable[str], deps: Optional[bool] = True) -> int:
print(">>>>>>>>>> Running database migration <<<<<<<<<<")
run(['backend', 'python3', common.MANAGE_PY, 'makemigrations'], deps)
| 0 | 0 |
f979d82751598eba221d7677df764b4451b8c896 | 971 | py | Python | adw_test/make_small_dataset.py | clinfo/DeepKF | ee4f1be28e5f3bfa46bb47dbdc4d5f678eed36c1 | [
"MIT"
] | 5 | 2019-12-19T13:33:36.000Z | 2021-06-01T06:08:16.000Z | adw_test/make_small_dataset.py | clinfo/DeepKF | ee4f1be28e5f3bfa46bb47dbdc4d5f678eed36c1 | [
"MIT"
] | 24 | 2020-03-03T19:40:55.000Z | 2021-05-26T15:27:38.000Z | adw_test/make_small_dataset.py | clinfo/DeepKF | ee4f1be28e5f3bfa46bb47dbdc4d5f678eed36c1 | [
"MIT"
] | 1 | 2019-12-19T13:35:07.000Z | 2019-12-19T13:35:07.000Z | import json
import glob
import numpy as np
import os
path = "data_state_space_v3/"
out_path = "small_data/"
files = glob.glob(path + "*.npy") #
train_data_num = 100
test_data_num = 10
train_data = {}
test_data = {}
for filename in files:
obj = np.load(filename)
if filename.find("_test.npy") >= 0:
test_data[filename] = obj
else:
train_data[filename] = obj
os.makedirs(out_path, exist_ok=True)
for k, v in train_data.items():
b = os.path.basename(k)
print(b, v.shape)
o = v[:train_data_num]
np.save(out_path + b, o)
for k, v in test_data.items():
b = os.path.basename(k)
print(b, v.shape)
o = v[:test_data_num]
np.save(out_path + b, o)
fp = open(path + "pack_selected_info.json")
obj = json.load(fp)
obj["pid_list_train"] = obj["pid_list_train"][:train_data_num]
obj["pid_list_test"] = obj["pid_list_test"][:test_data_num]
fp = open(out_path + "pack_selected_info.json", "w")
json.dump(obj, fp)
| 26.243243 | 62 | 0.669413 | import json
import glob
import numpy as np
import os
path = "data_state_space_v3/"
out_path = "small_data/"
files = glob.glob(path + "*.npy") # ワイルドカードが使用可能
train_data_num = 100
test_data_num = 10
train_data = {}
test_data = {}
for filename in files:
obj = np.load(filename)
if filename.find("_test.npy") >= 0:
test_data[filename] = obj
else:
train_data[filename] = obj
os.makedirs(out_path, exist_ok=True)
for k, v in train_data.items():
b = os.path.basename(k)
print(b, v.shape)
o = v[:train_data_num]
np.save(out_path + b, o)
for k, v in test_data.items():
b = os.path.basename(k)
print(b, v.shape)
o = v[:test_data_num]
np.save(out_path + b, o)
fp = open(path + "pack_selected_info.json")
obj = json.load(fp)
obj["pid_list_train"] = obj["pid_list_train"][:train_data_num]
obj["pid_list_test"] = obj["pid_list_test"][:test_data_num]
fp = open(out_path + "pack_selected_info.json", "w")
json.dump(obj, fp)
| 36 | 0 |
991fa516fb5524187777ee16359f8b1f0cb6ad59 | 859 | py | Python | 3M/W9/7.py | allenalvin333/Hackerrank_Prep | 26ed5b874daba4775d006824d36f9e82ea5ff1ea | [
"MIT"
] | 2 | 2021-11-25T13:38:36.000Z | 2021-11-25T13:42:56.000Z | 3M/W9/7.py | allenalvin333/Hackerrank_Prep | 26ed5b874daba4775d006824d36f9e82ea5ff1ea | [
"MIT"
] | null | null | null | 3M/W9/7.py | allenalvin333/Hackerrank_Prep | 26ed5b874daba4775d006824d36f9e82ea5ff1ea | [
"MIT"
] | 1 | 2021-11-25T13:38:43.000Z | 2021-11-25T13:38:43.000Z | # https://www.hackerrank.com/challenges/three-month-preparation-kit-maxsubarray/problem
#!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'maxSubarray' function below.
#
# The function is expected to return an INTEGER_ARRAY.
# The function accepts INTEGER_ARRAY arr as parameter.
#
def maxSubarray(arr):
p = max(0,arr[0])
l = e = m = arr[0]
for z in arr[1:]:
e,m,l,p = max(z,e+z),max(m,max(z,e+z)),max(l,z),max(0,z)+p
return m,l if(l<0) else p
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
t = int(input().strip())
for t_itr in range(t):
n = int(input().strip())
arr = list(map(int, input().rstrip().split()))
result = maxSubarray(arr)
fptr.write(' '.join(map(str, result)))
fptr.write('\n')
fptr.close() | 21.475 | 87 | 0.615832 | # https://www.hackerrank.com/challenges/three-month-preparation-kit-maxsubarray/problem
#!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'maxSubarray' function below.
#
# The function is expected to return an INTEGER_ARRAY.
# The function accepts INTEGER_ARRAY arr as parameter.
#
def maxSubarray(arr):
p = max(0,arr[0])
l = e = m = arr[0]
for z in arr[1:]:
e,m,l,p = max(z,e+z),max(m,max(z,e+z)),max(l,z),max(0,z)+p
return m,l if(l<0) else p
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
t = int(input().strip())
for t_itr in range(t):
n = int(input().strip())
arr = list(map(int, input().rstrip().split()))
result = maxSubarray(arr)
fptr.write(' '.join(map(str, result)))
fptr.write('\n')
fptr.close() | 0 | 0 |
8c69cd5831be940ef0acbf79e3188bc3d1996e08 | 2,343 | py | Python | models/correlation_package/correlation.py | HimankSehgal/DOCS-pytorch | b870babf2bbe2673f68e9134f071d990fb6f4693 | [
"MIT"
] | 55 | 2018-11-27T13:00:55.000Z | 2022-03-22T15:08:20.000Z | models/correlation_package/correlation.py | HimankSehgal/DOCS-pytorch | b870babf2bbe2673f68e9134f071d990fb6f4693 | [
"MIT"
] | 2 | 2018-12-27T07:41:16.000Z | 2020-08-14T01:37:46.000Z | models/correlation_package/correlation.py | HimankSehgal/DOCS-pytorch | b870babf2bbe2673f68e9134f071d990fb6f4693 | [
"MIT"
] | 14 | 2019-01-17T08:10:08.000Z | 2021-12-21T09:44:20.000Z | import torch
from torch.nn.modules.module import Module
from torch.autograd import Function
import correlation_cuda
class CorrelationFunction(Function):
@staticmethod
def forward(ctx, input1, input2, param_dict):
ctx.save_for_backward(input1, input2)
ctx.pad_size = param_dict["pad_size"]
ctx.kernel_size = param_dict["kernel_size"]
ctx.max_disp = param_dict["max_disp"]
ctx.stride1 = param_dict["stride1"]
ctx.stride2 = param_dict["stride2"]
ctx.corr_multiply = param_dict["corr_multiply"]
with torch.cuda.device_of(input1):
rbot1 = input1.new()
rbot2 = input2.new()
output = input1.new()
correlation_cuda.forward(input1, input2, rbot1, rbot2, output,
ctx.pad_size, ctx.kernel_size, ctx.max_disp, ctx.stride1, ctx.stride2, ctx.corr_multiply)
return output
@staticmethod
def backward(ctx, grad_output):
input1, input2 = ctx.saved_tensors
with torch.cuda.device_of(input1):
rbot1 = input1.new()
rbot2 = input2.new()
grad_input1 = input1.new()
grad_input2 = input2.new()
correlation_cuda.backward(input1, input2, rbot1, rbot2, grad_output, grad_input1, grad_input2,
ctx.pad_size, ctx.kernel_size, ctx.max_disp, ctx.stride1, ctx.stride2, ctx.corr_multiply)
return grad_input1, grad_input2, None
class Correlation(Module):
def __init__(self, pad_size=0, kernel_size=0, max_displacement=0, stride1=1, stride2=2, corr_multiply=1):
super(Correlation, self).__init__()
self.pad_size = pad_size
self.kernel_size = kernel_size
self.max_displacement = max_displacement
self.stride1 = stride1
self.stride2 = stride2
self.corr_multiply = corr_multiply
self.out_channels = ((max_displacement/stride2)*2 + 1) * ((max_displacement/stride2)*2 + 1)
def forward(self, input1, input2):
param_dict = {'pad_size':self.pad_size, 'kernel_size':self.kernel_size,
'max_disp':self.max_displacement, 'stride1':self.stride1,
'stride2':self.stride2, 'corr_multiply':self.corr_multiply}
result = CorrelationFunction.apply(input1, input2, param_dict)
return result
| 36.046154 | 109 | 0.654716 | import torch
from torch.nn.modules.module import Module
from torch.autograd import Function
import correlation_cuda
class CorrelationFunction(Function):
@staticmethod
def forward(ctx, input1, input2, param_dict):
ctx.save_for_backward(input1, input2)
ctx.pad_size = param_dict["pad_size"]
ctx.kernel_size = param_dict["kernel_size"]
ctx.max_disp = param_dict["max_disp"]
ctx.stride1 = param_dict["stride1"]
ctx.stride2 = param_dict["stride2"]
ctx.corr_multiply = param_dict["corr_multiply"]
with torch.cuda.device_of(input1):
rbot1 = input1.new()
rbot2 = input2.new()
output = input1.new()
correlation_cuda.forward(input1, input2, rbot1, rbot2, output,
ctx.pad_size, ctx.kernel_size, ctx.max_disp, ctx.stride1, ctx.stride2, ctx.corr_multiply)
return output
@staticmethod
def backward(ctx, grad_output):
input1, input2 = ctx.saved_tensors
with torch.cuda.device_of(input1):
rbot1 = input1.new()
rbot2 = input2.new()
grad_input1 = input1.new()
grad_input2 = input2.new()
correlation_cuda.backward(input1, input2, rbot1, rbot2, grad_output, grad_input1, grad_input2,
ctx.pad_size, ctx.kernel_size, ctx.max_disp, ctx.stride1, ctx.stride2, ctx.corr_multiply)
return grad_input1, grad_input2, None
class Correlation(Module):
def __init__(self, pad_size=0, kernel_size=0, max_displacement=0, stride1=1, stride2=2, corr_multiply=1):
super(Correlation, self).__init__()
self.pad_size = pad_size
self.kernel_size = kernel_size
self.max_displacement = max_displacement
self.stride1 = stride1
self.stride2 = stride2
self.corr_multiply = corr_multiply
self.out_channels = ((max_displacement/stride2)*2 + 1) * ((max_displacement/stride2)*2 + 1)
def forward(self, input1, input2):
param_dict = {'pad_size':self.pad_size, 'kernel_size':self.kernel_size,
'max_disp':self.max_displacement, 'stride1':self.stride1,
'stride2':self.stride2, 'corr_multiply':self.corr_multiply}
result = CorrelationFunction.apply(input1, input2, param_dict)
return result
| 0 | 0 |
be789e67d9aef4a43064ec6b0aac240e98f4e74f | 4,286 | py | Python | hep_cnn/tensorrt/convert_tensorrt_tf_integrated.py | NERSC/inference_benchmarks | e51453a755aaece91f7e08e92453a4050722071a | [
"BSD-3-Clause"
] | 1 | 2019-08-29T03:33:58.000Z | 2019-08-29T03:33:58.000Z | hep_cnn/tensorrt/convert_tensorrt_tf_integrated.py | NERSC/inference_benchmarks | e51453a755aaece91f7e08e92453a4050722071a | [
"BSD-3-Clause"
] | null | null | null | hep_cnn/tensorrt/convert_tensorrt_tf_integrated.py | NERSC/inference_benchmarks | e51453a755aaece91f7e08e92453a4050722071a | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.ops import data_flow_ops
import tensorflow.contrib.tensorrt as trt
import numpy as np
import time
from tensorflow.python.platform import gfile
from tensorflow.python.client import timeline
import argparse, sys, itertools,datetime
import json
tf.logging.set_verbosity(tf.logging.INFO)
import os
from utils import *
def getGraph(filename):
with gfile.FastGFile(filename, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
return graph_def
def getFP32(input_file, output_prefix, output, batch_size=128, workspace_size=1<<20):
trt_graph = trt.create_inference_graph(getGraph(input_file), output,
max_batch_size=batch_size,
max_workspace_size_bytes=workspace_size,
precision_mode="FP32") # Get optimized graph
with gfile.FastGFile(output_prefix+'.FP32.pb', 'wb') as f:
f.write(trt_graph.SerializeToString())
return trt_graph
def getFP16(input_file, output_prefix, output, batch_size=128, workspace_size=1<<20):
trt_graph = trt.create_inference_graph(getGraph(input_file), output,
max_batch_size=batch_size,
max_workspace_size_bytes=workspace_size,
precision_mode="FP16") # Get optimized graph
with gfile.FastGFile(output_prefix+'.FP16.pb','wb') as f:
f.write(trt_graph.SerializeToString())
return trt_graph
def getINT8CalibGraph(input_file, output_prefix, output, batch_size=128, workspace_size=1<<20):
trt_graph = trt.create_inference_graph(getGraph(input_file), output,
max_batch_size=batch_size,
max_workspace_size_bytes=workspace_size,
precision_mode="INT8") # calibration
with gfile.FastGFile(output_prefix+'.INT8Calib.pb','wb') as f:
f.write(trt_graph.SerializeToString())
return trt_graph
def getINT8InferenceGraph(output_prefix, calibGraph):
trt_graph=trt.calib_graph_to_infer_graph(calibGraph)
with gfile.FastGFile(output_prefix+'.INT8.pb','wb') as f:
f.write(trt_graph.SerializeToString())
return trt_graph
#main
if "__main__" in __name__:
P=argparse.ArgumentParser(prog="trt_convert")
P.add_argument('--FP32',action='store_true')
P.add_argument('--FP16',action='store_true')
P.add_argument('--INT8',action='store_true')
P.add_argument('--input_file',type=str)
P.add_argument('--input_path_calibration',type=str,default='./',help="path to read input files from for calibration mode")
P.add_argument('--output_prefix',type=str)
P.add_argument('--batch_size',type=int, default=32)
P.add_argument('--num_calibration_runs',type=int, default=10)
P.add_argument('--workspace_size',type=int, default=1<<20,help="workspace size in MB")
P.add_argument('--gpu', type=int, default=0)
#P.add_argument('--update_graphdef',action='store_true')
#parse args
f,unparsed=P.parse_known_args()
#select the GPU
os.environ["CUDA_VISIBLE_DEVICES"]=str(f.gpu) #selects a specific device
#create a session just in case
sess = tf.Session()
#print graph
print_graph(f.input_file)
#do the conversion
if f.FP32:
getFP32(input_file=f.input_file, output_prefix=f.output_prefix, output=["Softmax"], batch_size=f.batch_size, workspace_size=f.workspace_size)
if f.FP16:
getFP16(input_file=f.input_file, output_prefix=f.output_prefix, output=["Softmax"], batch_size=f.batch_size, workspace_size=f.workspace_size)
if f.INT8:
calibGraph = getINT8CalibGraph(input_file=f.input_file, output_prefix=f.output_prefix, output=["Softmax"], batch_size=f.batch_size, workspace_size=f.workspace_size)
print('Calibrating Graph...')
#run graph
runGraph(calibGraph, f.batch_size, f.num_calibration_runs, "Placeholder", ["Softmax"], dtype=np.float32, input_data=f.input_path_calibration)
print('done...')
#get int8 graph
getINT8InferenceGraph(output_prefix=f.output_prefix, calibGraph=calibGraph)
sys.exit(0)
| 41.61165 | 168 | 0.704153 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.ops import data_flow_ops
import tensorflow.contrib.tensorrt as trt
import numpy as np
import time
from tensorflow.python.platform import gfile
from tensorflow.python.client import timeline
import argparse, sys, itertools,datetime
import json
tf.logging.set_verbosity(tf.logging.INFO)
import os
from utils import *
def getGraph(filename):
with gfile.FastGFile(filename, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
return graph_def
def getFP32(input_file, output_prefix, output, batch_size=128, workspace_size=1<<20):
trt_graph = trt.create_inference_graph(getGraph(input_file), output,
max_batch_size=batch_size,
max_workspace_size_bytes=workspace_size,
precision_mode="FP32") # Get optimized graph
with gfile.FastGFile(output_prefix+'.FP32.pb', 'wb') as f:
f.write(trt_graph.SerializeToString())
return trt_graph
def getFP16(input_file, output_prefix, output, batch_size=128, workspace_size=1<<20):
trt_graph = trt.create_inference_graph(getGraph(input_file), output,
max_batch_size=batch_size,
max_workspace_size_bytes=workspace_size,
precision_mode="FP16") # Get optimized graph
with gfile.FastGFile(output_prefix+'.FP16.pb','wb') as f:
f.write(trt_graph.SerializeToString())
return trt_graph
def getINT8CalibGraph(input_file, output_prefix, output, batch_size=128, workspace_size=1<<20):
trt_graph = trt.create_inference_graph(getGraph(input_file), output,
max_batch_size=batch_size,
max_workspace_size_bytes=workspace_size,
precision_mode="INT8") # calibration
with gfile.FastGFile(output_prefix+'.INT8Calib.pb','wb') as f:
f.write(trt_graph.SerializeToString())
return trt_graph
def getINT8InferenceGraph(output_prefix, calibGraph):
trt_graph=trt.calib_graph_to_infer_graph(calibGraph)
with gfile.FastGFile(output_prefix+'.INT8.pb','wb') as f:
f.write(trt_graph.SerializeToString())
return trt_graph
#main
if "__main__" in __name__:
P=argparse.ArgumentParser(prog="trt_convert")
P.add_argument('--FP32',action='store_true')
P.add_argument('--FP16',action='store_true')
P.add_argument('--INT8',action='store_true')
P.add_argument('--input_file',type=str)
P.add_argument('--input_path_calibration',type=str,default='./',help="path to read input files from for calibration mode")
P.add_argument('--output_prefix',type=str)
P.add_argument('--batch_size',type=int, default=32)
P.add_argument('--num_calibration_runs',type=int, default=10)
P.add_argument('--workspace_size',type=int, default=1<<20,help="workspace size in MB")
P.add_argument('--gpu', type=int, default=0)
#P.add_argument('--update_graphdef',action='store_true')
#parse args
f,unparsed=P.parse_known_args()
#select the GPU
os.environ["CUDA_VISIBLE_DEVICES"]=str(f.gpu) #selects a specific device
#create a session just in case
sess = tf.Session()
#print graph
print_graph(f.input_file)
#do the conversion
if f.FP32:
getFP32(input_file=f.input_file, output_prefix=f.output_prefix, output=["Softmax"], batch_size=f.batch_size, workspace_size=f.workspace_size)
if f.FP16:
getFP16(input_file=f.input_file, output_prefix=f.output_prefix, output=["Softmax"], batch_size=f.batch_size, workspace_size=f.workspace_size)
if f.INT8:
calibGraph = getINT8CalibGraph(input_file=f.input_file, output_prefix=f.output_prefix, output=["Softmax"], batch_size=f.batch_size, workspace_size=f.workspace_size)
print('Calibrating Graph...')
#run graph
runGraph(calibGraph, f.batch_size, f.num_calibration_runs, "Placeholder", ["Softmax"], dtype=np.float32, input_data=f.input_path_calibration)
print('done...')
#get int8 graph
getINT8InferenceGraph(output_prefix=f.output_prefix, calibGraph=calibGraph)
sys.exit(0)
| 0 | 0 |
745b720c7aee2c3450c7326ecfc4595bc580fc48 | 1,812 | py | Python | SSD1306.py | krandor/weather_station | 06a0c88d92893a95aaabd5bbc2892a99ae4be8e1 | [
"MIT"
] | null | null | null | SSD1306.py | krandor/weather_station | 06a0c88d92893a95aaabd5bbc2892a99ae4be8e1 | [
"MIT"
] | null | null | null | SSD1306.py | krandor/weather_station | 06a0c88d92893a95aaabd5bbc2892a99ae4be8e1 | [
"MIT"
] | null | null | null | import Adafruit_SSD1306
import Image
import ImageDraw
import ImageFont
# I2C ADDRESS / BITS
SSD1306_ADDRESS = 0x3C
class Ssd1306(object):
_display = None
_draw = None
_image = None
_font = None
_height = 0
_width = 0
def __init__(self, i2c_bus = 0, ssd1306_rst = "22"):
"""
:type i2c_bus: int specifying i2c bus number
:type ssd1306_rst: string specifying GPIO pin for RST
"""
# 128x32 display with hardware I2C:
self._display = Adafruit_SSD1306.SSD1306_128_32(rst=ssd1306_rst, i2c_bus=i2c_bus)
# Initialize library.
self._display.begin()
# Clear display.
self._display.clear()
self._display.display()
# Create blank image for drawing.
# Make sure to create image with mode '1' for 1-bit color.
self._width = self._display.width
self._height = self._display.height
self._image = Image.new('1', (self._width, self._height))
# Get drawing object to draw on image.
self._draw = ImageDraw.Draw(self._image)
# Load default font.
self._font = ImageFont.load_default()
@property
def height(self):
return self._height
@property
def width(self):
return self._width
def clear_display(self):
self._draw.rectangle((0, 0, self._width, self._height), outline=0, fill=0)
def draw_text(self, texttowrite, x, y):
self._draw.text((x, y), texttowrite, font=self._font, fill=255)
def display_image(self):
self._display.image(self._image)
self._display.display()
def image_width(self):
width, height = self._image.size
return width
def get_text_width(self, text):
width, height = self._font.getsize(text)
return width
| 27.876923 | 89 | 0.631898 | import Adafruit_SSD1306
import Image
import ImageDraw
import ImageFont
# I2C ADDRESS / BITS
SSD1306_ADDRESS = 0x3C
class Ssd1306(object):
_display = None
_draw = None
_image = None
_font = None
_height = 0
_width = 0
def __init__(self, i2c_bus = 0, ssd1306_rst = "22"):
"""
:type i2c_bus: int specifying i2c bus number
:type ssd1306_rst: string specifying GPIO pin for RST
"""
# 128x32 display with hardware I2C:
self._display = Adafruit_SSD1306.SSD1306_128_32(rst=ssd1306_rst, i2c_bus=i2c_bus)
# Initialize library.
self._display.begin()
# Clear display.
self._display.clear()
self._display.display()
# Create blank image for drawing.
# Make sure to create image with mode '1' for 1-bit color.
self._width = self._display.width
self._height = self._display.height
self._image = Image.new('1', (self._width, self._height))
# Get drawing object to draw on image.
self._draw = ImageDraw.Draw(self._image)
# Load default font.
self._font = ImageFont.load_default()
@property
def height(self):
return self._height
@property
def width(self):
return self._width
def clear_display(self):
self._draw.rectangle((0, 0, self._width, self._height), outline=0, fill=0)
def draw_text(self, texttowrite, x, y):
self._draw.text((x, y), texttowrite, font=self._font, fill=255)
def display_image(self):
self._display.image(self._image)
self._display.display()
def image_width(self):
width, height = self._image.size
return width
def get_text_width(self, text):
width, height = self._font.getsize(text)
return width
| 0 | 0 |
9235c6c3f07aa312f105c296304b0e62256a9961 | 260 | py | Python | sandbox/flask/multi_page_form/compute.py | carlosal1015/proofofconcept | 579873aff082e6fa497a387e0d0a5f8e5ec3ecd2 | [
"CC-BY-4.0"
] | 14 | 2015-01-02T19:39:36.000Z | 2022-03-09T06:08:10.000Z | sandbox/flask/multi_page_form/compute.py | carlosal1015/proofofconcept | 579873aff082e6fa497a387e0d0a5f8e5ec3ecd2 | [
"CC-BY-4.0"
] | 242 | 2015-01-02T13:59:58.000Z | 2022-03-27T17:22:21.000Z | sandbox/flask/multi_page_form/compute.py | carlosal1015/proofofconcept | 579873aff082e6fa497a387e0d0a5f8e5ec3ecd2 | [
"CC-BY-4.0"
] | 6 | 2015-02-13T16:00:25.000Z | 2020-08-05T17:51:26.000Z |
def arg_count(inference_rule):
# these should be determined based on string match in CSV file
num_input=2
num_feed=1
num_output=1
return num_input,num_feed,num_output
if __name__ == '__main__':
print compute(1, 0.1) # default values
| 21.666667 | 66 | 0.711538 |
def arg_count(inference_rule):
# these should be determined based on string match in CSV file
num_input=2
num_feed=1
num_output=1
return num_input,num_feed,num_output
if __name__ == '__main__':
print compute(1, 0.1) # default values
| 0 | 0 |
53fad1f9197d87945dd3f90bc49bebbc3ce82648 | 6,042 | py | Python | nevergrad/functions/corefuncs.py | se4u/nevergrad | 38924bc7b0bff834316ccf974922db2c22be1606 | [
"MIT"
] | 1 | 2021-04-21T09:19:44.000Z | 2021-04-21T09:19:44.000Z | nevergrad/functions/corefuncs.py | se4u/nevergrad | 38924bc7b0bff834316ccf974922db2c22be1606 | [
"MIT"
] | null | null | null | nevergrad/functions/corefuncs.py | se4u/nevergrad | 38924bc7b0bff834316ccf974922db2c22be1606 | [
"MIT"
] | 1 | 2019-12-12T10:36:54.000Z | 2019-12-12T10:36:54.000Z | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from ..optimization import discretization
from ..common.decorators import Registry
registry = Registry()
def _onemax(x: np.ndarray) -> float:
return len(x) - sum(1 if int(round(w)) == 1 else 0 for w in x)
def _leadingones(x: np.ndarray) -> float:
for i, x_ in enumerate(list(x)):
if int(round(x_)) != 1:
return len(x) - i
return 0
def _jump(x: np.ndarray) -> float: # TODO: docstring?
n = len(x)
m = n // 4
o = n - _onemax(x)
if o == n or o <= n - m:
return n - m - o
return o # Deceptive part.
def _styblinksitang(x: np.ndarray, noise: float) -> float:
x = np.asarray(x)
val = np.sum(np.power(x, 4) - 16 * np.power(x, 2) + 5 * x)
# return a positive value for maximization
return float(39.16599 * len(x) + 1 * 0.5 * val + noise * np.random.normal(size=val.shape))
@registry.register
def sphere(x: np.ndarray) -> float:
return float(np.sum(x**2))
@registry.register
def sphere1(x: np.ndarray) -> float:
return float(np.sum((x - 1.)**2))
@registry.register
def sphere2(x: np.ndarray) -> float:
return float(np.sum((x - 2.)**2))
@registry.register
def sphere4(x: np.ndarray) -> float:
return float(np.sum((x - 4.)**2))
@registry.register
def maxdeceptive(x: np.ndarray) -> float:
dec = 3 * x**2 - (2 / (3**(x - 2)**2 + .1))
return float(np.max(dec))
@registry.register
def sumdeceptive(x: np.ndarray) -> float:
dec = 3 * x**2 - (2 / (3**(x - 2)**2 + .1))
return float(np.sum(dec))
@registry.register
def cigar(x: np.ndarray) -> float:
return float(x[0]**2 + 1000000. * np.sum(x[1:]**2))
@registry.register
def ellipsoid(x: np.ndarray) -> float:
return sum((10**(6 * (i - 1) / float(len(x) - 1))) * (x[i]**2) for i in range(len(x)))
@registry.register
def rastrigin(x: np.ndarray) -> float:
cosi = float(np.sum(np.cos(2 * np.pi * x)))
return float(10 * (len(x) - cosi) + sphere(x))
@registry.register
def hm(x: np.ndarray) -> float:
return float(np.sum((x**2) * (1.1 + np.cos(1. / x))))
@registry.register
def rosenbrock(x: np.ndarray) -> float:
return sum(100.0*(x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0)
@registry.register
def lunacek(x: np.ndarray) -> float:
""" Based on https://www.cs.unm.edu/~neal.holts/dga/benchmarkFunction/lunacek.html."""
problemDimensions = len(x)
s = 1.0 - (1.0 / (2.0 * np.sqrt(problemDimensions + 20.0) - 8.2))
mu1 = 2.5
mu2 = - np.sqrt(abs((mu1**2 - 1.0) / s))
firstSum = 0.0
secondSum = 0.0
thirdSum = 0.0
for i in range(problemDimensions):
firstSum += (x[i]-mu1)**2
secondSum += (x[i]-mu2)**2
thirdSum += 1.0 - np.cos(2*np.pi*(x[i]-mu1))
return min(firstSum, 1.0*problemDimensions + secondSum)+10*thirdSum
# following functions using discretization should not be used with translation/rotation
@registry.register_with_info(no_transfrom=True)
def hardonemax(y: np.ndarray) -> float:
return _onemax(discretization.threshold_discretization(y))
@registry.register_with_info(no_transfrom=True)
def hardjump(y: np.ndarray) -> float:
return _jump(discretization.threshold_discretization(y))
@registry.register_with_info(no_transfrom=True)
def hardleadingones(y: np.ndarray) -> float:
return _leadingones(discretization.threshold_discretization(y))
@registry.register_with_info(no_transfrom=True)
def hardonemax5(y: np.ndarray) -> float:
return _onemax(discretization.threshold_discretization(y, 5))
@registry.register_with_info(no_transfrom=True)
def hardjump5(y: np.ndarray) -> float:
return _jump(discretization.threshold_discretization(y, 5))
@registry.register_with_info(no_transfrom=True)
def hardleadingones5(y: np.ndarray) -> float:
return _leadingones(discretization.threshold_discretization(y, 5))
@registry.register_with_info(no_transfrom=True)
def onemax(y: np.ndarray) -> float:
return _onemax(discretization.softmax_discretization(y))
@registry.register_with_info(no_transfrom=True)
def jump(y: np.ndarray) -> float:
return _jump(discretization.softmax_discretization(y))
@registry.register_with_info(no_transfrom=True)
def leadingones(y: np.ndarray) -> float:
return _leadingones(discretization.softmax_discretization(y))
@registry.register_with_info(no_transfrom=True)
def onemax5(y: np.ndarray) -> float:
return _onemax(discretization.softmax_discretization(y, 5))
@registry.register_with_info(no_transfrom=True)
def jump5(y: np.ndarray) -> float:
return _jump(discretization.softmax_discretization(y, 5))
@registry.register_with_info(no_transfrom=True)
def leadingones5(y: np.ndarray) -> float:
return _leadingones(discretization.softmax_discretization(y, 5))
@registry.register_with_info(no_transfrom=True)
def genzcornerpeak(y: np.ndarray) -> float:
value = float(1 + np.mean(np.tanh(y)))
if value == 0:
return float("inf")
return value**(-len(y) - 1)
@registry.register_with_info(no_transfrom=True)
def minusgenzcornerpeak(y: np.ndarray) -> float:
return -float(genzcornerpeak(y))
@registry.register
def genzgaussianpeakintegral(x: np.ndarray) -> float:
return float(np.exp(-np.sum(x**2 / 4.)))
@registry.register
def minusgenzgaussianpeakintegral(x: np.ndarray) -> float:
return -float(np.exp(-sum(x**2 / 4.)))
@registry.register
def slope(x: np.ndarray) -> float:
return sum(x)
@registry.register
def linear(x: np.ndarray) -> float:
return float(np.tanh(x[0]))
@registry.register
def st0(x: np.ndarray) -> float:
return _styblinksitang(x, 0)
@registry.register
def st1(x: np.ndarray) -> float:
return _styblinksitang(x, 1)
@registry.register
def st10(x: np.ndarray) -> float:
return _styblinksitang(x, 10)
@registry.register
def st100(x: np.ndarray) -> float:
return _styblinksitang(x, 100)
| 26.269565 | 94 | 0.677756 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from ..optimization import discretization
from ..common.decorators import Registry
registry = Registry()
def _onemax(x: np.ndarray) -> float:
return len(x) - sum(1 if int(round(w)) == 1 else 0 for w in x)
def _leadingones(x: np.ndarray) -> float:
for i, x_ in enumerate(list(x)):
if int(round(x_)) != 1:
return len(x) - i
return 0
def _jump(x: np.ndarray) -> float: # TODO: docstring?
n = len(x)
m = n // 4
o = n - _onemax(x)
if o == n or o <= n - m:
return n - m - o
return o # Deceptive part.
def _styblinksitang(x: np.ndarray, noise: float) -> float:
x = np.asarray(x)
val = np.sum(np.power(x, 4) - 16 * np.power(x, 2) + 5 * x)
# return a positive value for maximization
return float(39.16599 * len(x) + 1 * 0.5 * val + noise * np.random.normal(size=val.shape))
@registry.register
def sphere(x: np.ndarray) -> float:
return float(np.sum(x**2))
@registry.register
def sphere1(x: np.ndarray) -> float:
return float(np.sum((x - 1.)**2))
@registry.register
def sphere2(x: np.ndarray) -> float:
return float(np.sum((x - 2.)**2))
@registry.register
def sphere4(x: np.ndarray) -> float:
return float(np.sum((x - 4.)**2))
@registry.register
def maxdeceptive(x: np.ndarray) -> float:
dec = 3 * x**2 - (2 / (3**(x - 2)**2 + .1))
return float(np.max(dec))
@registry.register
def sumdeceptive(x: np.ndarray) -> float:
dec = 3 * x**2 - (2 / (3**(x - 2)**2 + .1))
return float(np.sum(dec))
@registry.register
def cigar(x: np.ndarray) -> float:
return float(x[0]**2 + 1000000. * np.sum(x[1:]**2))
@registry.register
def ellipsoid(x: np.ndarray) -> float:
return sum((10**(6 * (i - 1) / float(len(x) - 1))) * (x[i]**2) for i in range(len(x)))
@registry.register
def rastrigin(x: np.ndarray) -> float:
cosi = float(np.sum(np.cos(2 * np.pi * x)))
return float(10 * (len(x) - cosi) + sphere(x))
@registry.register
def hm(x: np.ndarray) -> float:
return float(np.sum((x**2) * (1.1 + np.cos(1. / x))))
@registry.register
def rosenbrock(x: np.ndarray) -> float:
return sum(100.0*(x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0)
@registry.register
def lunacek(x: np.ndarray) -> float:
""" Based on https://www.cs.unm.edu/~neal.holts/dga/benchmarkFunction/lunacek.html."""
problemDimensions = len(x)
s = 1.0 - (1.0 / (2.0 * np.sqrt(problemDimensions + 20.0) - 8.2))
mu1 = 2.5
mu2 = - np.sqrt(abs((mu1**2 - 1.0) / s))
firstSum = 0.0
secondSum = 0.0
thirdSum = 0.0
for i in range(problemDimensions):
firstSum += (x[i]-mu1)**2
secondSum += (x[i]-mu2)**2
thirdSum += 1.0 - np.cos(2*np.pi*(x[i]-mu1))
return min(firstSum, 1.0*problemDimensions + secondSum)+10*thirdSum
# following functions using discretization should not be used with translation/rotation
@registry.register_with_info(no_transfrom=True)
def hardonemax(y: np.ndarray) -> float:
return _onemax(discretization.threshold_discretization(y))
@registry.register_with_info(no_transfrom=True)
def hardjump(y: np.ndarray) -> float:
return _jump(discretization.threshold_discretization(y))
@registry.register_with_info(no_transfrom=True)
def hardleadingones(y: np.ndarray) -> float:
return _leadingones(discretization.threshold_discretization(y))
@registry.register_with_info(no_transfrom=True)
def hardonemax5(y: np.ndarray) -> float:
return _onemax(discretization.threshold_discretization(y, 5))
@registry.register_with_info(no_transfrom=True)
def hardjump5(y: np.ndarray) -> float:
return _jump(discretization.threshold_discretization(y, 5))
@registry.register_with_info(no_transfrom=True)
def hardleadingones5(y: np.ndarray) -> float:
return _leadingones(discretization.threshold_discretization(y, 5))
@registry.register_with_info(no_transfrom=True)
def onemax(y: np.ndarray) -> float:
return _onemax(discretization.softmax_discretization(y))
@registry.register_with_info(no_transfrom=True)
def jump(y: np.ndarray) -> float:
return _jump(discretization.softmax_discretization(y))
@registry.register_with_info(no_transfrom=True)
def leadingones(y: np.ndarray) -> float:
return _leadingones(discretization.softmax_discretization(y))
@registry.register_with_info(no_transfrom=True)
def onemax5(y: np.ndarray) -> float:
return _onemax(discretization.softmax_discretization(y, 5))
@registry.register_with_info(no_transfrom=True)
def jump5(y: np.ndarray) -> float:
return _jump(discretization.softmax_discretization(y, 5))
@registry.register_with_info(no_transfrom=True)
def leadingones5(y: np.ndarray) -> float:
return _leadingones(discretization.softmax_discretization(y, 5))
@registry.register_with_info(no_transfrom=True)
def genzcornerpeak(y: np.ndarray) -> float:
value = float(1 + np.mean(np.tanh(y)))
if value == 0:
return float("inf")
return value**(-len(y) - 1)
@registry.register_with_info(no_transfrom=True)
def minusgenzcornerpeak(y: np.ndarray) -> float:
return -float(genzcornerpeak(y))
@registry.register
def genzgaussianpeakintegral(x: np.ndarray) -> float:
return float(np.exp(-np.sum(x**2 / 4.)))
@registry.register
def minusgenzgaussianpeakintegral(x: np.ndarray) -> float:
return -float(np.exp(-sum(x**2 / 4.)))
@registry.register
def slope(x: np.ndarray) -> float:
return sum(x)
@registry.register
def linear(x: np.ndarray) -> float:
return float(np.tanh(x[0]))
@registry.register
def st0(x: np.ndarray) -> float:
return _styblinksitang(x, 0)
@registry.register
def st1(x: np.ndarray) -> float:
return _styblinksitang(x, 1)
@registry.register
def st10(x: np.ndarray) -> float:
return _styblinksitang(x, 10)
@registry.register
def st100(x: np.ndarray) -> float:
return _styblinksitang(x, 100)
| 0 | 0 |
13c3bb6deea1739d745d8f14fbc2523bef0ec1cb | 187 | py | Python | HackerRank/Python/Collections/CompanyLogo.py | AdityaChirravuri/CompetitiveProgramming | 642550e8916b3f7939a1fdd52d10f5f8ae43f161 | [
"MIT"
] | 1 | 2021-07-13T01:49:25.000Z | 2021-07-13T01:49:25.000Z | HackerRank/Python/Collections/CompanyLogo.py | AdityaChirravuri/CompetitiveProgramming | 642550e8916b3f7939a1fdd52d10f5f8ae43f161 | [
"MIT"
] | null | null | null | HackerRank/Python/Collections/CompanyLogo.py | AdityaChirravuri/CompetitiveProgramming | 642550e8916b3f7939a1fdd52d10f5f8ae43f161 | [
"MIT"
] | null | null | null | string = input()
d = {}
for i in string:
if i in d:
d[i] += 1
else:
d[i] = 1
s = sorted(sorted(d), key = d.get, reverse = True)
for i in s[:3]:
print(i, d[i])
| 17 | 50 | 0.470588 | string = input()
d = {}
for i in string:
if i in d:
d[i] += 1
else:
d[i] = 1
s = sorted(sorted(d), key = d.get, reverse = True)
for i in s[:3]:
print(i, d[i])
| 0 | 0 |
559efba2e8c036ea8426a51546bc4e2aa0ff4a87 | 775 | py | Python | src/arrays/spiral-order-matrix.py | vighnesh153/ds-algo | 79c401dad2d2e575ce1913184ca8665f2712a5b8 | [
"MIT"
] | null | null | null | src/arrays/spiral-order-matrix.py | vighnesh153/ds-algo | 79c401dad2d2e575ce1913184ca8665f2712a5b8 | [
"MIT"
] | null | null | null | src/arrays/spiral-order-matrix.py | vighnesh153/ds-algo | 79c401dad2d2e575ce1913184ca8665f2712a5b8 | [
"MIT"
] | 1 | 2020-08-09T06:37:21.000Z | 2020-08-09T06:37:21.000Z | def solve(n):
result = [[0 for _ in range(n)] for __ in range(n)]
counter = 1
iteration = 0
while counter <= n * n:
i = j = iteration
while j < n - iteration:
result[i][j] = counter
counter += 1
j += 1
j -= 1
i += 1
while i < n - iteration:
result[i][j] = counter
counter += 1
i += 1
i -= 1
j -= 1
while j >= iteration:
result[i][j] = counter
counter += 1
j -= 1
j += 1
i -= 1
while i > iteration:
result[i][j] = counter
counter += 1
i -= 1
iteration += 1
return result
for row in solve(10):
print(row)
| 18.902439 | 55 | 0.390968 | def solve(n):
result = [[0 for _ in range(n)] for __ in range(n)]
counter = 1
iteration = 0
while counter <= n * n:
i = j = iteration
while j < n - iteration:
result[i][j] = counter
counter += 1
j += 1
j -= 1
i += 1
while i < n - iteration:
result[i][j] = counter
counter += 1
i += 1
i -= 1
j -= 1
while j >= iteration:
result[i][j] = counter
counter += 1
j -= 1
j += 1
i -= 1
while i > iteration:
result[i][j] = counter
counter += 1
i -= 1
iteration += 1
return result
for row in solve(10):
print(row)
| 0 | 0 |
992cbbffb5ba87a6ff522180da17ac5a5f6a4ccf | 181 | py | Python | codesignal/arcade/python/intro_52_longest_word.py | tinesife94/random | b802924dce4635ae074d30dc03962d4301bd6d8b | [
"MIT"
] | null | null | null | codesignal/arcade/python/intro_52_longest_word.py | tinesife94/random | b802924dce4635ae074d30dc03962d4301bd6d8b | [
"MIT"
] | null | null | null | codesignal/arcade/python/intro_52_longest_word.py | tinesife94/random | b802924dce4635ae074d30dc03962d4301bd6d8b | [
"MIT"
] | null | null | null | def solution(text):
letters = string.ascii_letters
s = ''
for c in text:
s = '{}{}'.format(s, c if c in letters else ' ')
return max(s.split(), key=len)
| 25.857143 | 60 | 0.541436 | def solution(text):
letters = string.ascii_letters
s = ''
for c in text:
s = '{}{}'.format(s, c if c in letters else ' ')
return max(s.split(), key=len)
| 0 | 0 |
521e8eaf2200d791a592c600d965d7937ff762c4 | 10,855 | py | Python | src/database.py | vimc/montagu | c9682b3e57cf25e75b5b7688f748c8dbe882666d | [
"MIT"
] | null | null | null | src/database.py | vimc/montagu | c9682b3e57cf25e75b5b7688f748c8dbe882666d | [
"MIT"
] | 59 | 2017-07-28T09:27:15.000Z | 2021-11-01T17:16:59.000Z | src/database.py | vimc/montagu | c9682b3e57cf25e75b5b7688f748c8dbe882666d | [
"MIT"
] | 1 | 2020-09-23T11:08:34.000Z | 2020-09-23T11:08:34.000Z | import random
import string
from subprocess import run
from types import SimpleNamespace
import psycopg2
import versions
from docker_helpers import get_image_name, pull, exec_safely
from service_config import api_db_user
from settings import get_secret
root_user = "vimc"
# these tables should only be modified via sql migrations
protected_tables = ["gavi_support_level", "activity_type",
"burden_outcome",
"gender",
"responsibility_set_status",
"impact_outcome",
"gavi_support_level",
"support_type",
"touchstone_status",
"permission",
"role",
"role_permission"]
def user_configs(password_group):
# Later, read these from a yml file?
return [
UserConfig(api_db_user, 'all',
VaultPassword(password_group, api_db_user)),
UserConfig('import', 'all', VaultPassword(password_group, 'import')),
UserConfig('orderly', 'all', VaultPassword(password_group, 'orderly')),
UserConfig('readonly', 'readonly',
VaultPassword(password_group, 'readonly')),
]
class GeneratePassword:
def get(self):
return ''.join(random.SystemRandom().choice(
string.ascii_uppercase + string.digits) for _ in range(50))
def __str__(self):
return "Generated"
class VaultPassword:
def __init__(self, password_group, username):
self.password_group = password_group
self.username = username
def get(self):
if self.password_group is None:
return "changeme" if self.username == "vimc" else self.username
else:
return get_secret(self._path(), field="password")
def _path(self):
if self.password_group is None:
raise Exception("_path() is not defined without a password group")
else:
return "database/{password_group}/users/{username}".format(
password_group=self.password_group, username=self.username)
def __str__(self):
if self.password_group is None:
return "Using default password value"
else:
return "From vault at " + self._path()
class UserConfig:
def __init__(self, name, permissions, password_source, option=None):
self.name = name
self.permissions = permissions # Currently, this can only be 'all', but the idea is to extend this config later
self.password_source = password_source
self.option = option.upper() if option else ""
self._password = None
# Lazy password resolution
@property
def password(self):
if self._password is None:
self._password = self.password_source.get()
return self._password
@classmethod
def create(self, name, permissions, password_group, option):
password = VaultPassword(password_group, name)
return UserConfig(name, permissions, password, option)
def set_root_password(service, password):
query = "ALTER USER {user} WITH PASSWORD '{password}'".format(
user=root_user, password=password)
service.db.exec_run(
'psql -U {user} -d postgres -c "{query}"'.format(user=root_user,
query=query))
def connect(user, password, host="localhost", port=5432):
conn_settings = {
"host": host,
"port": port,
"name": "montagu",
"user": user,
"password": password
}
conn_string_template = "host='{host}' port='{port}' dbname='{name}' user='{user}' password='{password}'"
conn_string = conn_string_template.format(**conn_settings)
return psycopg2.connect(conn_string)
def create_user(db, user):
sql = """DO
$body$
BEGIN
IF NOT EXISTS (SELECT FROM pg_catalog.pg_user WHERE usename = '{name}') THEN
CREATE ROLE {name} {option} LOGIN PASSWORD '{password}';
END IF;
END
$body$""".format(name=user.name, password=user.password, option=user.option)
db.execute(sql)
def set_password(db, user):
db.execute(
"ALTER USER {name} WITH PASSWORD '{password}'".format(name=user.name,
password=user.password))
def revoke_all(db, user):
def revoke_all_on(what):
db.execute(
"REVOKE ALL PRIVILEGES ON ALL {what} IN SCHEMA public FROM {name}".format(
name=user.name, what=what))
revoke_all_on("tables")
revoke_all_on("sequences")
revoke_all_on("functions")
def revoke_write_on_protected_tables(db, user):
def revoke_specific_on(what):
db.execute(
"REVOKE INSERT, UPDATE, DELETE ON {what} FROM {name}".format(
name=user.name, what=what))
for table in protected_tables:
revoke_specific_on(table)
def grant_all(db, user):
def grant_all_on(what):
db.execute(
"GRANT ALL PRIVILEGES ON ALL {what} IN SCHEMA public TO {name}".format(
name=user.name, what=what))
print(" - Granting all permissions to {name}".format(name=user.name))
grant_all_on("tables")
grant_all_on("sequences")
grant_all_on("functions")
def grant_readonly(db, user):
print(" - Granting readonly permissions to {name}".format(name=user.name))
db.execute("GRANT SELECT ON ALL TABLES IN SCHEMA public TO {name}".format(
name=user.name))
def set_permissions(db, user):
revoke_all(db, user)
if user.permissions == 'all':
grant_all(db, user)
elif user.permissions == 'readonly':
grant_readonly(db, user)
elif user.permissions == 'pass':
pass
else:
template = "Unhandled permission type '{permissions}' for user '{name}'"
raise Exception(
template.format(name=user.name, permissions=user.permissions))
def migrate_schema_core(service, root_password):
network_name = service.network_name
print("- migrating schema")
image = "vimc/{name}:{version}".format(name="montagu-migrate",
version=versions.db)
pull(image)
cmd = ["docker", "run", "--rm", "--network=" + network_name, image] + \
["-user=vimc", "-password=" + root_password, "migrate"]
run(cmd, check=True)
def setup_user(db, user):
print(" - " + user.name)
create_user(db, user)
set_password(db, user)
set_permissions(db, user)
def for_each_user(root_password, users, operation):
"""Operation is a callback (function) that takes the connection cursor
and a UserConfig object"""
with connect(root_user, root_password) as conn:
with conn.cursor() as cur:
for user in users:
operation(cur, user)
conn.commit()
def setup(service):
print("Waiting for the database to accept connections")
exec_safely(service.db, ["montagu-wait.sh", "7200"], check=True)
password_group = service.settings["password_group"]
print("Setting up database users")
print("- Scrambling root password")
if password_group is not None:
root_password = GeneratePassword().get()
else:
root_password = 'changeme'
set_root_password(service, root_password)
print("- Getting user configurations")
users = user_configs(password_group)
print("- Getting user passwords")
passwords = {}
for user in users:
print(" - {name}: {source}".format(name=user.name,
source=user.password_source))
passwords[user.name] = user.password
# NOTE: As I work through this - why not set up users *after* the
# schema migration? This is because the migration user needs to
# exist, though in practice we don't use them so this could be
# reordered later.
print("- Updating database users")
for_each_user(root_password, users, setup_user)
print("- Migrating database schema")
migrate_schema_core(service, root_password)
print("- Refreshing permissions")
# The migrations may have added new tables, so we should set the permissions
# again, in case users need to have permissions on these new tables
for_each_user(root_password, users, set_permissions)
# Revoke specific permissions now that all tables have been created.
for_each_user(root_password, users, revoke_write_on_protected_tables)
setup_streaming_replication(root_password, service)
return passwords
# NOTE: it might be worth revisiting this to not run this script
# directly (that requires corresponding changes in montagu-db to move
# the inline sql into a standalone .sql file and then getting psql to
# run it via docker exec - it must run as the vimc user). The
# passwords might move directly under control here using set_password
# (but these are special users so we'd not want to use the rest of the
# user machinery). But I suggest waiting until the restore is done
# VIMC-1560) because that is likely to affect how we deal with users
def setup_streaming_replication(root_password, service):
if service.settings['enable_db_replication']:
print("Setting up streaming replication")
password_group = service.settings['password_group']
barman = UserConfig.create("barman", "pass",
password_group, "superuser")
streaming_barman = UserConfig.create("streaming_barman", "pass",
password_group, "replication")
with connect(root_user, root_password) as conn:
with conn.cursor() as db:
create_user(db, barman)
create_user(db, streaming_barman)
pw_barman = VaultPassword(password_group, "barman").get()
pw_stream = VaultPassword(password_group, "streaming_barman").get()
cmd = ["enable-replication.sh", pw_barman, pw_stream]
exec_safely(service.db, cmd, check=True)
def prepare_db_for_import(service):
print("Preparing databse for import")
## NOTE: this could otherwise be done by connecting using the
## connection function, but that that requires further changes to
## the connect function to allow connection to the postgres
## maintenance database. This way works for now. This also
## allows us to avoid working out what the root password will be
## because we're interating without passwords over exec.
db = service.db
print("- deleting and recreating database")
db.exec_run(["dropdb", "-U", "vimc", "--if-exists", "montagu"])
db.exec_run(["createdb", "-U", "vimc", "montagu"])
print("- configuring users")
users = user_configs(service.settings["password_group"])
for user in users:
db.exec_run(["createuser", "-U", "vimc", user.name])
| 35.825083 | 120 | 0.64597 | import random
import string
from subprocess import run
from types import SimpleNamespace
import psycopg2
import versions
from docker_helpers import get_image_name, pull, exec_safely
from service_config import api_db_user
from settings import get_secret
root_user = "vimc"
# these tables should only be modified via sql migrations
protected_tables = ["gavi_support_level", "activity_type",
"burden_outcome",
"gender",
"responsibility_set_status",
"impact_outcome",
"gavi_support_level",
"support_type",
"touchstone_status",
"permission",
"role",
"role_permission"]
def user_configs(password_group):
# Later, read these from a yml file?
return [
UserConfig(api_db_user, 'all',
VaultPassword(password_group, api_db_user)),
UserConfig('import', 'all', VaultPassword(password_group, 'import')),
UserConfig('orderly', 'all', VaultPassword(password_group, 'orderly')),
UserConfig('readonly', 'readonly',
VaultPassword(password_group, 'readonly')),
]
class GeneratePassword:
def get(self):
return ''.join(random.SystemRandom().choice(
string.ascii_uppercase + string.digits) for _ in range(50))
def __str__(self):
return "Generated"
class VaultPassword:
def __init__(self, password_group, username):
self.password_group = password_group
self.username = username
def get(self):
if self.password_group is None:
return "changeme" if self.username == "vimc" else self.username
else:
return get_secret(self._path(), field="password")
def _path(self):
if self.password_group is None:
raise Exception("_path() is not defined without a password group")
else:
return "database/{password_group}/users/{username}".format(
password_group=self.password_group, username=self.username)
def __str__(self):
if self.password_group is None:
return "Using default password value"
else:
return "From vault at " + self._path()
class UserConfig:
def __init__(self, name, permissions, password_source, option=None):
self.name = name
self.permissions = permissions # Currently, this can only be 'all', but the idea is to extend this config later
self.password_source = password_source
self.option = option.upper() if option else ""
self._password = None
# Lazy password resolution
@property
def password(self):
if self._password is None:
self._password = self.password_source.get()
return self._password
@classmethod
def create(self, name, permissions, password_group, option):
password = VaultPassword(password_group, name)
return UserConfig(name, permissions, password, option)
def set_root_password(service, password):
query = "ALTER USER {user} WITH PASSWORD '{password}'".format(
user=root_user, password=password)
service.db.exec_run(
'psql -U {user} -d postgres -c "{query}"'.format(user=root_user,
query=query))
def connect(user, password, host="localhost", port=5432):
conn_settings = {
"host": host,
"port": port,
"name": "montagu",
"user": user,
"password": password
}
conn_string_template = "host='{host}' port='{port}' dbname='{name}' user='{user}' password='{password}'"
conn_string = conn_string_template.format(**conn_settings)
return psycopg2.connect(conn_string)
def create_user(db, user):
sql = """DO
$body$
BEGIN
IF NOT EXISTS (SELECT FROM pg_catalog.pg_user WHERE usename = '{name}') THEN
CREATE ROLE {name} {option} LOGIN PASSWORD '{password}';
END IF;
END
$body$""".format(name=user.name, password=user.password, option=user.option)
db.execute(sql)
def set_password(db, user):
db.execute(
"ALTER USER {name} WITH PASSWORD '{password}'".format(name=user.name,
password=user.password))
def revoke_all(db, user):
def revoke_all_on(what):
db.execute(
"REVOKE ALL PRIVILEGES ON ALL {what} IN SCHEMA public FROM {name}".format(
name=user.name, what=what))
revoke_all_on("tables")
revoke_all_on("sequences")
revoke_all_on("functions")
def revoke_write_on_protected_tables(db, user):
def revoke_specific_on(what):
db.execute(
"REVOKE INSERT, UPDATE, DELETE ON {what} FROM {name}".format(
name=user.name, what=what))
for table in protected_tables:
revoke_specific_on(table)
def grant_all(db, user):
def grant_all_on(what):
db.execute(
"GRANT ALL PRIVILEGES ON ALL {what} IN SCHEMA public TO {name}".format(
name=user.name, what=what))
print(" - Granting all permissions to {name}".format(name=user.name))
grant_all_on("tables")
grant_all_on("sequences")
grant_all_on("functions")
def grant_readonly(db, user):
print(" - Granting readonly permissions to {name}".format(name=user.name))
db.execute("GRANT SELECT ON ALL TABLES IN SCHEMA public TO {name}".format(
name=user.name))
def set_permissions(db, user):
revoke_all(db, user)
if user.permissions == 'all':
grant_all(db, user)
elif user.permissions == 'readonly':
grant_readonly(db, user)
elif user.permissions == 'pass':
pass
else:
template = "Unhandled permission type '{permissions}' for user '{name}'"
raise Exception(
template.format(name=user.name, permissions=user.permissions))
def migrate_schema_core(service, root_password):
network_name = service.network_name
print("- migrating schema")
image = "vimc/{name}:{version}".format(name="montagu-migrate",
version=versions.db)
pull(image)
cmd = ["docker", "run", "--rm", "--network=" + network_name, image] + \
["-user=vimc", "-password=" + root_password, "migrate"]
run(cmd, check=True)
def setup_user(db, user):
print(" - " + user.name)
create_user(db, user)
set_password(db, user)
set_permissions(db, user)
def for_each_user(root_password, users, operation):
"""Operation is a callback (function) that takes the connection cursor
and a UserConfig object"""
with connect(root_user, root_password) as conn:
with conn.cursor() as cur:
for user in users:
operation(cur, user)
conn.commit()
def setup(service):
print("Waiting for the database to accept connections")
exec_safely(service.db, ["montagu-wait.sh", "7200"], check=True)
password_group = service.settings["password_group"]
print("Setting up database users")
print("- Scrambling root password")
if password_group is not None:
root_password = GeneratePassword().get()
else:
root_password = 'changeme'
set_root_password(service, root_password)
print("- Getting user configurations")
users = user_configs(password_group)
print("- Getting user passwords")
passwords = {}
for user in users:
print(" - {name}: {source}".format(name=user.name,
source=user.password_source))
passwords[user.name] = user.password
# NOTE: As I work through this - why not set up users *after* the
# schema migration? This is because the migration user needs to
# exist, though in practice we don't use them so this could be
# reordered later.
print("- Updating database users")
for_each_user(root_password, users, setup_user)
print("- Migrating database schema")
migrate_schema_core(service, root_password)
print("- Refreshing permissions")
# The migrations may have added new tables, so we should set the permissions
# again, in case users need to have permissions on these new tables
for_each_user(root_password, users, set_permissions)
# Revoke specific permissions now that all tables have been created.
for_each_user(root_password, users, revoke_write_on_protected_tables)
setup_streaming_replication(root_password, service)
return passwords
# NOTE: it might be worth revisiting this to not run this script
# directly (that requires corresponding changes in montagu-db to move
# the inline sql into a standalone .sql file and then getting psql to
# run it via docker exec - it must run as the vimc user). The
# passwords might move directly under control here using set_password
# (but these are special users so we'd not want to use the rest of the
# user machinery). But I suggest waiting until the restore is done
# VIMC-1560) because that is likely to affect how we deal with users
def setup_streaming_replication(root_password, service):
if service.settings['enable_db_replication']:
print("Setting up streaming replication")
password_group = service.settings['password_group']
barman = UserConfig.create("barman", "pass",
password_group, "superuser")
streaming_barman = UserConfig.create("streaming_barman", "pass",
password_group, "replication")
with connect(root_user, root_password) as conn:
with conn.cursor() as db:
create_user(db, barman)
create_user(db, streaming_barman)
pw_barman = VaultPassword(password_group, "barman").get()
pw_stream = VaultPassword(password_group, "streaming_barman").get()
cmd = ["enable-replication.sh", pw_barman, pw_stream]
exec_safely(service.db, cmd, check=True)
def prepare_db_for_import(service):
print("Preparing databse for import")
## NOTE: this could otherwise be done by connecting using the
## connection function, but that that requires further changes to
## the connect function to allow connection to the postgres
## maintenance database. This way works for now. This also
## allows us to avoid working out what the root password will be
## because we're interating without passwords over exec.
db = service.db
print("- deleting and recreating database")
db.exec_run(["dropdb", "-U", "vimc", "--if-exists", "montagu"])
db.exec_run(["createdb", "-U", "vimc", "montagu"])
print("- configuring users")
users = user_configs(service.settings["password_group"])
for user in users:
db.exec_run(["createuser", "-U", "vimc", user.name])
| 0 | 0 |
d81293ab153a2b3620035047792f5df592c73a94 | 1,225 | py | Python | Chapter05/poplib/mailbox_basic_params.py | yangwawa0323/Learning-Python-Networking-Second-Edition | 5460fe4fb6acc5d0df19bf36e52ac09e9a11eb8b | [
"MIT"
] | 52 | 2018-12-17T19:33:06.000Z | 2022-03-25T18:14:02.000Z | Chapter05/poplib/mailbox_basic_params.py | barretthugh/Learning-Python-Networking-Second-Edition | 0f00b8b20c1c85e76754e47113dff8ca9e99d5ca | [
"MIT"
] | null | null | null | Chapter05/poplib/mailbox_basic_params.py | barretthugh/Learning-Python-Networking-Second-Edition | 0f00b8b20c1c85e76754e47113dff8ca9e99d5ca | [
"MIT"
] | 38 | 2018-12-18T09:08:43.000Z | 2022-02-06T02:53:05.000Z | #!/usr/bin/env python3
import poplib
import argparse
def main(hostname,port,user,password):
mailbox = poplib.POP3_SSL(hostname,port)
try:
mailbox.user(user)
mailbox.pass_(password)
response, listings, octet_count = mailbox.list()
for listing in listings:
number, size = listing.decode('ascii').split()
print("Message %s has %s bytes" % (number, size))
except poplib.error_proto as exception:
print("Login failed:", exception)
finally:
mailbox.quit()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='MailBox basic params')
parser.add_argument('--hostname', action="store", dest="hostname")
parser.add_argument('--port', action="store", dest="port")
parser.add_argument('--user', action="store", dest="user")
given_args = parser.parse_args()
hostname = given_args.hostname
port = given_args.port
user = given_args.user
import getpass
password = getpass.getpass(prompt='Enter your password:')
main(hostname,port,user,password)
| 30.625 | 76 | 0.59102 | #!/usr/bin/env python3
import poplib
import argparse
def main(hostname,port,user,password):
mailbox = poplib.POP3_SSL(hostname,port)
try:
mailbox.user(user)
mailbox.pass_(password)
response, listings, octet_count = mailbox.list()
for listing in listings:
number, size = listing.decode('ascii').split()
print("Message %s has %s bytes" % (number, size))
except poplib.error_proto as exception:
print("Login failed:", exception)
finally:
mailbox.quit()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='MailBox basic params')
parser.add_argument('--hostname', action="store", dest="hostname")
parser.add_argument('--port', action="store", dest="port")
parser.add_argument('--user', action="store", dest="user")
given_args = parser.parse_args()
hostname = given_args.hostname
port = given_args.port
user = given_args.user
import getpass
password = getpass.getpass(prompt='Enter your password:')
main(hostname,port,user,password)
| 0 | 0 |
9b34dbdf2931cc53d5622eda4c91b6c6fb2da69b | 6,219 | py | Python | openob/manager.py | sreimers/openob | 8b82e38c37f0a7d748c076a0dc14fc8994bb5998 | [
"Unlicense"
] | 2 | 2018-04-26T11:27:10.000Z | 2021-06-01T03:59:41.000Z | openob/manager.py | sreimers/openob | 8b82e38c37f0a7d748c076a0dc14fc8994bb5998 | [
"Unlicense"
] | null | null | null | openob/manager.py | sreimers/openob | 8b82e38c37f0a7d748c076a0dc14fc8994bb5998 | [
"Unlicense"
] | null | null | null | import sys
import time
import redis
from openob.rtp.tx import RTPTransmitter
from openob.rtp.rx import RTPReceiver
import gst
from colorama import Fore, Back, Style
# OpenOB Link Manager
# One of these runs at each end and negotiates everything (RX pushes config info to TX), reconnects when links fail, and so on.
class Manager:
'''OpenOB Manager. Handles management of links, mostly recovery from failures.'''
def run(self, opts):
print("-- OpenOB Audio Link")
print(" -- Starting Up")
print(" -- Parameters: %s" % opts)
# We're now entering the realm where we should desperately try and maintain a link under all circumstances forever.
while True:
try:
# Set up redis and connect
config = None
while True:
try:
config = redis.Redis(opts.config_host)
print(" -- Connected to configuration server")
break
except Exception, e:
print(Fore.BLACK + Back.RED + " -- Couldn't connect to Redis! Ensure your configuration host is set properly, and you can connect to the default Redis port on that host from here (%s)." % e)
print(" Waiting half a second and attempting to connect again." + Fore.RESET + Back.RESET)
time.sleep(0.5)
# So if we're a transmitter, let's set the options the receiver needs to know about
link_key = "openob2:"+opts.link_name+":"
if opts.mode == 'tx':
if opts.encoding == 'celt' and int(opts.bitrate) > 192:
print(Fore.BLACK + Back.YELLOW + " -- WARNING: Can't use bitrates higher than 192kbps for CELT, limiting" + Fore.RESET + Back.RESET)
opts.bitrate = 192
# We're a transmitter!
config.set(link_key+"port", opts.port)
config.set(link_key+"ipv6", opts.ipv6)
config.set(link_key+"jitter_buffer", opts.jitter_buffer)
config.set(link_key+"encoding", opts.encoding)
config.set(link_key+"bitrate", opts.bitrate)
print(" -- Configured receiver with:")
print(" - Base Port: %s" % config.get(link_key+"port"))
print(" - Jitter Buffer: %s ms" % config.get(link_key+"jitter_buffer"))
print(" - Encoding: %s" % config.get(link_key+"encoding"))
print(" - Bitrate: %s kbit/s" % config.get(link_key+"bitrate"))
# Okay, we can't set caps yet - we need to configure ourselves first.
opus_opts = {'audio': True, 'bandwidth': -1000, 'frame-size': opts.framesize, 'complexity': opts.complexity, 'constrained-vbr': True, 'inband-fec': opts.fec, 'packet-loss-percentage': opts.loss, 'dtx': opts.dtx}
try:
transmitter = RTPTransmitter(audio_input=opts.audio_input, audio_device=opts.device, base_port=opts.port, ipv6=opts.ipv6, encoding=opts.encoding, bitrate=opts.bitrate, jack_name=("openob_tx_%s" % opts.link_name), receiver_address=opts.receiver_host, opus_options=opus_opts)
# Set it up, get caps
try:
transmitter.run()
config.set(link_key+"caps", transmitter.get_caps())
print(" - Caps: %s" % config.get(link_key+"caps"))
transmitter.loop()
except Exception, e:
print(Fore.BLACK + Back.RED + " -- Lost connection or otherwise had the transmitter fail on us, restarting (%s)" % e)
time.sleep(0.5)
except gst.ElementNotFoundError, e:
print(Fore.BLACK + Back.RED + (" -- Couldn't fulfill our gstreamer module dependencies! You don't have the following element available: %s" % e) + Fore.RESET + Back.RESET)
sys.exit(1)
else:
# We're a receiver!
# Default values.
port = 3000
caps = ''
jitter_buffer = 150
encoding = 'opus'
bitrate = '96'
while True:
try:
if config.get(link_key+"port") == None:
print(Fore.BLACK + Back.YELLOW + " -- Unable to configure myself from the configuration host; has the transmitter been started yet, and have you got the same link name on each end?")
print(" Waiting half a second and attempting to reconfigure myself." + Fore.RESET + Back.RESET)
time.sleep(0.5)
port = int(config.get(link_key+"port"))
ipv6 = int(config.get(link_key+"ipv6"))
caps = config.get(link_key+"caps")
jitter_buffer = int(config.get(link_key+"jitter_buffer"))
encoding = config.get(link_key+"encoding")
bitrate = int(config.get(link_key+"bitrate"))
print(" -- Configured from transmitter with:")
print(" - Base Port: %s" % port)
print(" - Jitter Buffer: %s ms" % caps)
print(" - Encoding: %s" % encoding)
print(" - Bitrate: %s kbit/s" % bitrate)
print(" - Caps: %s" % caps)
break
except Exception, e:
print(Fore.BLACK + Back.YELLOW + " -- Unable to configure myself from the configuration host; has the transmitter been started yet? (%s)" % e)
print(" Waiting half a second and attempting to reconfigure myself." + Fore.RESET + Back.RESET)
time.sleep(0.5)
#raise
# Okay, we can now configure ourself
receiver = RTPReceiver(audio_output=opts.audio_output, audio_device=opts.device, base_port=port, ipv6=ipv6, encoding=encoding, caps=caps, bitrate=bitrate, jitter_buffer=jitter_buffer, jack_name=("openob_tx_%s" % opts.link_name) )
try:
receiver.run()
receiver.loop()
except Exception, e:
print(Fore.BLACK + Back.RED + (" -- Lost connection or otherwise had the receiver fail on us, restarting (%s)" % e) + Fore.RESET + Back.RESET)
time.sleep(0.5)
except Exception, e:
print(Fore.BLACK + Back.RED + " -- Unhandled exception occured, please report this as a bug!" + Fore.RESET + Back.RESET)
raise
| 57.583333 | 286 | 0.589323 | import sys
import time
import redis
from openob.rtp.tx import RTPTransmitter
from openob.rtp.rx import RTPReceiver
import gst
from colorama import Fore, Back, Style
# OpenOB Link Manager
# One of these runs at each end and negotiates everything (RX pushes config info to TX), reconnects when links fail, and so on.
class Manager:
'''OpenOB Manager. Handles management of links, mostly recovery from failures.'''
def run(self, opts):
print("-- OpenOB Audio Link")
print(" -- Starting Up")
print(" -- Parameters: %s" % opts)
# We're now entering the realm where we should desperately try and maintain a link under all circumstances forever.
while True:
try:
# Set up redis and connect
config = None
while True:
try:
config = redis.Redis(opts.config_host)
print(" -- Connected to configuration server")
break
except Exception, e:
print(Fore.BLACK + Back.RED + " -- Couldn't connect to Redis! Ensure your configuration host is set properly, and you can connect to the default Redis port on that host from here (%s)." % e)
print(" Waiting half a second and attempting to connect again." + Fore.RESET + Back.RESET)
time.sleep(0.5)
# So if we're a transmitter, let's set the options the receiver needs to know about
link_key = "openob2:"+opts.link_name+":"
if opts.mode == 'tx':
if opts.encoding == 'celt' and int(opts.bitrate) > 192:
print(Fore.BLACK + Back.YELLOW + " -- WARNING: Can't use bitrates higher than 192kbps for CELT, limiting" + Fore.RESET + Back.RESET)
opts.bitrate = 192
# We're a transmitter!
config.set(link_key+"port", opts.port)
config.set(link_key+"ipv6", opts.ipv6)
config.set(link_key+"jitter_buffer", opts.jitter_buffer)
config.set(link_key+"encoding", opts.encoding)
config.set(link_key+"bitrate", opts.bitrate)
print(" -- Configured receiver with:")
print(" - Base Port: %s" % config.get(link_key+"port"))
print(" - Jitter Buffer: %s ms" % config.get(link_key+"jitter_buffer"))
print(" - Encoding: %s" % config.get(link_key+"encoding"))
print(" - Bitrate: %s kbit/s" % config.get(link_key+"bitrate"))
# Okay, we can't set caps yet - we need to configure ourselves first.
opus_opts = {'audio': True, 'bandwidth': -1000, 'frame-size': opts.framesize, 'complexity': opts.complexity, 'constrained-vbr': True, 'inband-fec': opts.fec, 'packet-loss-percentage': opts.loss, 'dtx': opts.dtx}
try:
transmitter = RTPTransmitter(audio_input=opts.audio_input, audio_device=opts.device, base_port=opts.port, ipv6=opts.ipv6, encoding=opts.encoding, bitrate=opts.bitrate, jack_name=("openob_tx_%s" % opts.link_name), receiver_address=opts.receiver_host, opus_options=opus_opts)
# Set it up, get caps
try:
transmitter.run()
config.set(link_key+"caps", transmitter.get_caps())
print(" - Caps: %s" % config.get(link_key+"caps"))
transmitter.loop()
except Exception, e:
print(Fore.BLACK + Back.RED + " -- Lost connection or otherwise had the transmitter fail on us, restarting (%s)" % e)
time.sleep(0.5)
except gst.ElementNotFoundError, e:
print(Fore.BLACK + Back.RED + (" -- Couldn't fulfill our gstreamer module dependencies! You don't have the following element available: %s" % e) + Fore.RESET + Back.RESET)
sys.exit(1)
else:
# We're a receiver!
# Default values.
port = 3000
caps = ''
jitter_buffer = 150
encoding = 'opus'
bitrate = '96'
while True:
try:
if config.get(link_key+"port") == None:
print(Fore.BLACK + Back.YELLOW + " -- Unable to configure myself from the configuration host; has the transmitter been started yet, and have you got the same link name on each end?")
print(" Waiting half a second and attempting to reconfigure myself." + Fore.RESET + Back.RESET)
time.sleep(0.5)
port = int(config.get(link_key+"port"))
ipv6 = int(config.get(link_key+"ipv6"))
caps = config.get(link_key+"caps")
jitter_buffer = int(config.get(link_key+"jitter_buffer"))
encoding = config.get(link_key+"encoding")
bitrate = int(config.get(link_key+"bitrate"))
print(" -- Configured from transmitter with:")
print(" - Base Port: %s" % port)
print(" - Jitter Buffer: %s ms" % caps)
print(" - Encoding: %s" % encoding)
print(" - Bitrate: %s kbit/s" % bitrate)
print(" - Caps: %s" % caps)
break
except Exception, e:
print(Fore.BLACK + Back.YELLOW + " -- Unable to configure myself from the configuration host; has the transmitter been started yet? (%s)" % e)
print(" Waiting half a second and attempting to reconfigure myself." + Fore.RESET + Back.RESET)
time.sleep(0.5)
#raise
# Okay, we can now configure ourself
receiver = RTPReceiver(audio_output=opts.audio_output, audio_device=opts.device, base_port=port, ipv6=ipv6, encoding=encoding, caps=caps, bitrate=bitrate, jitter_buffer=jitter_buffer, jack_name=("openob_tx_%s" % opts.link_name) )
try:
receiver.run()
receiver.loop()
except Exception, e:
print(Fore.BLACK + Back.RED + (" -- Lost connection or otherwise had the receiver fail on us, restarting (%s)" % e) + Fore.RESET + Back.RESET)
time.sleep(0.5)
except Exception, e:
print(Fore.BLACK + Back.RED + " -- Unhandled exception occured, please report this as a bug!" + Fore.RESET + Back.RESET)
raise
| 0 | 0 |
ad9efb2e6f4829da3b80bcc3b918afaea610d7d7 | 10,777 | py | Python | tests/test_packages/test_skills/test_confirmation_aw2/test_strategy.py | bryanchriswhite/agents-aea | d3f177a963eb855d9528555167255bf2b478f4ba | [
"Apache-2.0"
] | 126 | 2019-09-07T09:32:44.000Z | 2022-03-29T14:28:41.000Z | tests/test_packages/test_skills/test_confirmation_aw2/test_strategy.py | salman6049/agents-aea | d3f177a963eb855d9528555167255bf2b478f4ba | [
"Apache-2.0"
] | 1,814 | 2019-08-24T10:08:07.000Z | 2022-03-31T14:28:36.000Z | tests/test_packages/test_skills/test_confirmation_aw2/test_strategy.py | salman6049/agents-aea | d3f177a963eb855d9528555167255bf2b478f4ba | [
"Apache-2.0"
] | 46 | 2019-09-03T22:13:58.000Z | 2022-03-22T01:25:16.000Z | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the tests of the strategy class of the confirmation aw2 skill."""
import datetime
import logging
from pathlib import Path
from typing import cast
from unittest.mock import Mock, patch
import pytest
from packages.fetchai.skills.confirmation_aw2.registration_db import RegistrationDB
from packages.fetchai.skills.confirmation_aw2.strategy import Strategy
from tests.conftest import ROOT_DIR
from tests.test_packages.test_skills.test_confirmation_aw2.intermediate_class import (
ConfirmationAW2TestCase,
)
class TestStrategy(ConfirmationAW2TestCase):
"""Test Strategy of confirmation aw2."""
path_to_skill = Path(ROOT_DIR, "packages", "fetchai", "skills", "confirmation_aw2")
@classmethod
def setup(cls):
"""Setup the test class."""
super().setup()
cls.minimum_hours_between_txs = 4
cls.minimum_minutes_since_last_attempt = 2
cls.strategy = Strategy(
aw1_aea="some_aw1_aea",
mininum_hours_between_txs=cls.minimum_hours_between_txs,
minimum_minutes_since_last_attempt=cls.minimum_minutes_since_last_attempt,
name="strategy",
skill_context=cls._skill.skill_context,
)
cls.address = "some_address"
cls.info = {
"ethereum_address": "some_value",
"signature_of_ethereum_address": "some_signature_of_ethereum_address",
"signature_of_fetchai_address": "some_signature_of_fetchai_address",
"developer_handle": "some_developer_handle",
"tweet": "some_tweet",
}
cls.logger = cls._skill.skill_context.logger
cls.db = cast(RegistrationDB, cls._skill.skill_context.registration_db)
cls.counterparty = "couterparty_1"
def test__init__i(self):
"""Test the __init__ of Strategy class."""
assert self.strategy.aw1_aea == self.aw1_aea
assert self.strategy.minimum_hours_between_txs == self.minimum_hours_between_txs
assert (
self.strategy.minimum_minutes_since_last_attempt
== self.minimum_minutes_since_last_attempt
)
def test__init__ii(self):
"""Test the __init__ of Strategy class where aw1_aea is None."""
with pytest.raises(ValueError, match="aw1_aea must be provided!"):
Strategy(
aw1_aea=None,
mininum_hours_between_txs=self.minimum_hours_between_txs,
minimum_minutes_since_last_attempt=self.minimum_minutes_since_last_attempt,
name="strategy",
skill_context=self.skill.skill_context,
)
def test_get_acceptable_counterparties(self):
"""Test the get_acceptable_counterparties method of the Strategy class."""
# setup
couterparties = ("couterparty_1", "couterparty_2", "couterparty_3")
is_valid_counterparty = [True, False, True]
# operation
with patch.object(
self.strategy, "is_valid_counterparty", side_effect=is_valid_counterparty
):
actual_acceptable_counterparties = self.strategy.get_acceptable_counterparties(
couterparties
)
# after
assert actual_acceptable_counterparties == ("couterparty_1", "couterparty_3")
def test_is_enough_time_since_last_attempt_i(self):
"""Test the is_enough_time_since_last_attempt method of the Strategy class where now IS greater than last attempt + min minutes."""
# setup
counterparty_last_attempt_time_str = "2020-12-22 20:30:00.000000"
counterparty_last_attempt_time = datetime.datetime.strptime(
counterparty_last_attempt_time_str, "%Y-%m-%d %H:%M:%S.%f"
)
mocked_now_greater_than_last_plus_minimum = "2020-12-22 20:33:00.000000"
datetime_mock = Mock(wraps=datetime.datetime)
datetime_mock.now.return_value = datetime.datetime.strptime(
mocked_now_greater_than_last_plus_minimum, "%Y-%m-%d %H:%M:%S.%f"
)
self.strategy.last_attempt = {self.counterparty: counterparty_last_attempt_time}
# operation
with patch("datetime.datetime", new=datetime_mock):
is_enough_time = self.strategy.is_enough_time_since_last_attempt(
self.counterparty
)
# after
assert is_enough_time is True
def test_is_enough_time_since_last_attempt_ii(self):
"""Test the is_enough_time_since_last_attempt method of the Strategy class where now is NOT greater than last attempt + min minutes."""
# setup
counterparty_last_attempt_time_str = "2020-12-22 20:30:00.000000"
counterparty_last_attempt_time = datetime.datetime.strptime(
counterparty_last_attempt_time_str, "%Y-%m-%d %H:%M:%S.%f"
)
mocked_now_less_than_last_plus_minimum = "2020-12-22 20:31:00.000000"
datetime_mock = Mock(wraps=datetime.datetime)
datetime_mock.now.return_value = datetime.datetime.strptime(
mocked_now_less_than_last_plus_minimum, "%Y-%m-%d %H:%M:%S.%f"
)
self.strategy.last_attempt = {self.counterparty: counterparty_last_attempt_time}
# operation
with patch("datetime.datetime", new=datetime_mock):
is_enough_time = self.strategy.is_enough_time_since_last_attempt(
self.counterparty
)
# after
assert is_enough_time is False
def test_is_enough_time_since_last_attempt_iii(self):
"""Test the is_enough_time_since_last_attempt method of the Strategy class where now counterparty is NOT in last_attempt."""
# setup
self.strategy.last_attempt = {}
# operation
is_enough_time = self.strategy.is_enough_time_since_last_attempt(
self.counterparty
)
# after
assert is_enough_time is True
def test_is_valid_counterparty_i(self):
"""Test the is_valid_counterparty method of the Strategy class where is_registered is False."""
# operation
with patch.object(self.db, "is_registered", return_value=False):
with patch.object(self.logger, "log") as mock_logger:
is_valid = self.strategy.is_valid_counterparty(self.counterparty)
# after
mock_logger.assert_any_call(
logging.INFO, f"Invalid counterparty={self.counterparty}, not registered!",
)
assert is_valid is False
def test_is_valid_counterparty_ii(self):
"""Test the is_valid_counterparty method of the Strategy class where is_enough_time_since_last_attempt is False."""
# operation
with patch.object(self.db, "is_registered", return_value=True):
with patch.object(
self.strategy, "is_enough_time_since_last_attempt", return_value=False
):
with patch.object(self.logger, "log") as mock_logger:
is_valid = self.strategy.is_valid_counterparty(self.counterparty)
# after
mock_logger.assert_any_call(
logging.DEBUG,
f"Not enough time since last attempt for counterparty={self.counterparty}!",
)
assert is_valid is False
def test_is_valid_counterparty_iii(self):
"""Test the is_valid_counterparty method of the Strategy class where is_allowed_to_trade is False."""
# operation
with patch.object(self.db, "is_registered", return_value=True):
with patch.object(
self.strategy, "is_enough_time_since_last_attempt", return_value=True
):
with patch.object(self.db, "is_allowed_to_trade", return_value=False):
is_valid = self.strategy.is_valid_counterparty(self.counterparty)
# after
assert is_valid is False
def test_is_valid_counterparty_iv(self):
"""Test the is_valid_counterparty method of the Strategy class where it succeeds."""
# operation
with patch.object(self.db, "is_registered", return_value=True):
with patch.object(
self.strategy, "is_enough_time_since_last_attempt", return_value=True
):
with patch.object(self.db, "is_allowed_to_trade", return_value=True):
is_valid = self.strategy.is_valid_counterparty(self.counterparty)
# after
assert is_valid is True
def test_successful_trade_with_counterparty(self):
"""Test the successful_trade_with_counterparty method of the Strategy class."""
# setup
data = {"some_key_1": "some_value_1", "some_key_2": "some_value_2"}
mocked_now_str = "2020-12-22 20:33:00.000000"
mock_now = datetime.datetime.strptime(mocked_now_str, "%Y-%m-%d %H:%M:%S.%f")
datetime_mock = Mock(wraps=datetime.datetime)
datetime_mock.now.return_value = mock_now
# operation
with patch.object(self.db, "set_trade") as mock_set_trade:
with patch("datetime.datetime", new=datetime_mock):
with patch.object(self.logger, "log") as mock_logger:
self.strategy.successful_trade_with_counterparty(
self.counterparty, data
)
# after
mock_set_trade.assert_any_call(self.counterparty, mock_now, data)
mock_logger.assert_any_call(
logging.INFO,
f"Successful trade with={self.counterparty}. Data acquired={data}!",
)
def test_register_counterparty(self):
"""Test the register_counterparty method of the Strategy class."""
# setup
developer_handle = "some_developer_handle"
# operation
with patch.object(self.db, "set_registered") as mock_set_registered:
self.strategy.register_counterparty(self.counterparty, developer_handle)
# after
mock_set_registered.assert_any_call(self.counterparty, developer_handle)
| 41.133588 | 143 | 0.661409 | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the tests of the strategy class of the confirmation aw2 skill."""
import datetime
import logging
from pathlib import Path
from typing import cast
from unittest.mock import Mock, patch
import pytest
from packages.fetchai.skills.confirmation_aw2.registration_db import RegistrationDB
from packages.fetchai.skills.confirmation_aw2.strategy import Strategy
from tests.conftest import ROOT_DIR
from tests.test_packages.test_skills.test_confirmation_aw2.intermediate_class import (
ConfirmationAW2TestCase,
)
class TestStrategy(ConfirmationAW2TestCase):
"""Test Strategy of confirmation aw2."""
path_to_skill = Path(ROOT_DIR, "packages", "fetchai", "skills", "confirmation_aw2")
@classmethod
def setup(cls):
"""Setup the test class."""
super().setup()
cls.minimum_hours_between_txs = 4
cls.minimum_minutes_since_last_attempt = 2
cls.strategy = Strategy(
aw1_aea="some_aw1_aea",
mininum_hours_between_txs=cls.minimum_hours_between_txs,
minimum_minutes_since_last_attempt=cls.minimum_minutes_since_last_attempt,
name="strategy",
skill_context=cls._skill.skill_context,
)
cls.address = "some_address"
cls.info = {
"ethereum_address": "some_value",
"signature_of_ethereum_address": "some_signature_of_ethereum_address",
"signature_of_fetchai_address": "some_signature_of_fetchai_address",
"developer_handle": "some_developer_handle",
"tweet": "some_tweet",
}
cls.logger = cls._skill.skill_context.logger
cls.db = cast(RegistrationDB, cls._skill.skill_context.registration_db)
cls.counterparty = "couterparty_1"
def test__init__i(self):
"""Test the __init__ of Strategy class."""
assert self.strategy.aw1_aea == self.aw1_aea
assert self.strategy.minimum_hours_between_txs == self.minimum_hours_between_txs
assert (
self.strategy.minimum_minutes_since_last_attempt
== self.minimum_minutes_since_last_attempt
)
def test__init__ii(self):
"""Test the __init__ of Strategy class where aw1_aea is None."""
with pytest.raises(ValueError, match="aw1_aea must be provided!"):
Strategy(
aw1_aea=None,
mininum_hours_between_txs=self.minimum_hours_between_txs,
minimum_minutes_since_last_attempt=self.minimum_minutes_since_last_attempt,
name="strategy",
skill_context=self.skill.skill_context,
)
def test_get_acceptable_counterparties(self):
"""Test the get_acceptable_counterparties method of the Strategy class."""
# setup
couterparties = ("couterparty_1", "couterparty_2", "couterparty_3")
is_valid_counterparty = [True, False, True]
# operation
with patch.object(
self.strategy, "is_valid_counterparty", side_effect=is_valid_counterparty
):
actual_acceptable_counterparties = self.strategy.get_acceptable_counterparties(
couterparties
)
# after
assert actual_acceptable_counterparties == ("couterparty_1", "couterparty_3")
def test_is_enough_time_since_last_attempt_i(self):
"""Test the is_enough_time_since_last_attempt method of the Strategy class where now IS greater than last attempt + min minutes."""
# setup
counterparty_last_attempt_time_str = "2020-12-22 20:30:00.000000"
counterparty_last_attempt_time = datetime.datetime.strptime(
counterparty_last_attempt_time_str, "%Y-%m-%d %H:%M:%S.%f"
)
mocked_now_greater_than_last_plus_minimum = "2020-12-22 20:33:00.000000"
datetime_mock = Mock(wraps=datetime.datetime)
datetime_mock.now.return_value = datetime.datetime.strptime(
mocked_now_greater_than_last_plus_minimum, "%Y-%m-%d %H:%M:%S.%f"
)
self.strategy.last_attempt = {self.counterparty: counterparty_last_attempt_time}
# operation
with patch("datetime.datetime", new=datetime_mock):
is_enough_time = self.strategy.is_enough_time_since_last_attempt(
self.counterparty
)
# after
assert is_enough_time is True
def test_is_enough_time_since_last_attempt_ii(self):
"""Test the is_enough_time_since_last_attempt method of the Strategy class where now is NOT greater than last attempt + min minutes."""
# setup
counterparty_last_attempt_time_str = "2020-12-22 20:30:00.000000"
counterparty_last_attempt_time = datetime.datetime.strptime(
counterparty_last_attempt_time_str, "%Y-%m-%d %H:%M:%S.%f"
)
mocked_now_less_than_last_plus_minimum = "2020-12-22 20:31:00.000000"
datetime_mock = Mock(wraps=datetime.datetime)
datetime_mock.now.return_value = datetime.datetime.strptime(
mocked_now_less_than_last_plus_minimum, "%Y-%m-%d %H:%M:%S.%f"
)
self.strategy.last_attempt = {self.counterparty: counterparty_last_attempt_time}
# operation
with patch("datetime.datetime", new=datetime_mock):
is_enough_time = self.strategy.is_enough_time_since_last_attempt(
self.counterparty
)
# after
assert is_enough_time is False
def test_is_enough_time_since_last_attempt_iii(self):
"""Test the is_enough_time_since_last_attempt method of the Strategy class where now counterparty is NOT in last_attempt."""
# setup
self.strategy.last_attempt = {}
# operation
is_enough_time = self.strategy.is_enough_time_since_last_attempt(
self.counterparty
)
# after
assert is_enough_time is True
def test_is_valid_counterparty_i(self):
"""Test the is_valid_counterparty method of the Strategy class where is_registered is False."""
# operation
with patch.object(self.db, "is_registered", return_value=False):
with patch.object(self.logger, "log") as mock_logger:
is_valid = self.strategy.is_valid_counterparty(self.counterparty)
# after
mock_logger.assert_any_call(
logging.INFO, f"Invalid counterparty={self.counterparty}, not registered!",
)
assert is_valid is False
def test_is_valid_counterparty_ii(self):
"""Test the is_valid_counterparty method of the Strategy class where is_enough_time_since_last_attempt is False."""
# operation
with patch.object(self.db, "is_registered", return_value=True):
with patch.object(
self.strategy, "is_enough_time_since_last_attempt", return_value=False
):
with patch.object(self.logger, "log") as mock_logger:
is_valid = self.strategy.is_valid_counterparty(self.counterparty)
# after
mock_logger.assert_any_call(
logging.DEBUG,
f"Not enough time since last attempt for counterparty={self.counterparty}!",
)
assert is_valid is False
def test_is_valid_counterparty_iii(self):
"""Test the is_valid_counterparty method of the Strategy class where is_allowed_to_trade is False."""
# operation
with patch.object(self.db, "is_registered", return_value=True):
with patch.object(
self.strategy, "is_enough_time_since_last_attempt", return_value=True
):
with patch.object(self.db, "is_allowed_to_trade", return_value=False):
is_valid = self.strategy.is_valid_counterparty(self.counterparty)
# after
assert is_valid is False
def test_is_valid_counterparty_iv(self):
"""Test the is_valid_counterparty method of the Strategy class where it succeeds."""
# operation
with patch.object(self.db, "is_registered", return_value=True):
with patch.object(
self.strategy, "is_enough_time_since_last_attempt", return_value=True
):
with patch.object(self.db, "is_allowed_to_trade", return_value=True):
is_valid = self.strategy.is_valid_counterparty(self.counterparty)
# after
assert is_valid is True
def test_successful_trade_with_counterparty(self):
"""Test the successful_trade_with_counterparty method of the Strategy class."""
# setup
data = {"some_key_1": "some_value_1", "some_key_2": "some_value_2"}
mocked_now_str = "2020-12-22 20:33:00.000000"
mock_now = datetime.datetime.strptime(mocked_now_str, "%Y-%m-%d %H:%M:%S.%f")
datetime_mock = Mock(wraps=datetime.datetime)
datetime_mock.now.return_value = mock_now
# operation
with patch.object(self.db, "set_trade") as mock_set_trade:
with patch("datetime.datetime", new=datetime_mock):
with patch.object(self.logger, "log") as mock_logger:
self.strategy.successful_trade_with_counterparty(
self.counterparty, data
)
# after
mock_set_trade.assert_any_call(self.counterparty, mock_now, data)
mock_logger.assert_any_call(
logging.INFO,
f"Successful trade with={self.counterparty}. Data acquired={data}!",
)
def test_register_counterparty(self):
"""Test the register_counterparty method of the Strategy class."""
# setup
developer_handle = "some_developer_handle"
# operation
with patch.object(self.db, "set_registered") as mock_set_registered:
self.strategy.register_counterparty(self.counterparty, developer_handle)
# after
mock_set_registered.assert_any_call(self.counterparty, developer_handle)
| 0 | 0 |
4a05ac857f23cb032431dca22e9f9dc234c173f4 | 370 | py | Python | pype9/utils/mpi.py | tclose/Pype9 | 23f96c0885fd9df12d9d11ff800f816520e4b17a | [
"MIT"
] | null | null | null | pype9/utils/mpi.py | tclose/Pype9 | 23f96c0885fd9df12d9d11ff800f816520e4b17a | [
"MIT"
] | null | null | null | pype9/utils/mpi.py | tclose/Pype9 | 23f96c0885fd9df12d9d11ff800f816520e4b17a | [
"MIT"
] | 1 | 2021-04-08T12:46:21.000Z | 2021-04-08T12:46:21.000Z | class DummyMPICom(object):
rank = 0
size = 1
def barrier(self):
pass
try:
from mpi4py import MPI # @UnusedImport @IgnorePep8 This is imported before NEURON to avoid a bug in NEURON
except ImportError:
mpi_comm = DummyMPICom()
else:
mpi_comm = MPI.COMM_WORLD
MPI_ROOT = 0
def is_mpi_master():
return (mpi_comm.rank == MPI_ROOT)
| 17.619048 | 111 | 0.678378 | class DummyMPICom(object):
rank = 0
size = 1
def barrier(self):
pass
try:
from mpi4py import MPI # @UnusedImport @IgnorePep8 This is imported before NEURON to avoid a bug in NEURON
except ImportError:
mpi_comm = DummyMPICom()
else:
mpi_comm = MPI.COMM_WORLD
MPI_ROOT = 0
def is_mpi_master():
return (mpi_comm.rank == MPI_ROOT)
| 0 | 0 |
ab0bdc4d6adc83eb5ef6b8b6083e23fb449db951 | 9,480 | py | Python | Trajectory_Mining/Bag_of_Words/unpack_items_all_victims.py | AdamCoscia/eve-trajectory-mining | 134f142a5665f66fbf92aada8dd6252fab64ddff | [
"MIT"
] | null | null | null | Trajectory_Mining/Bag_of_Words/unpack_items_all_victims.py | AdamCoscia/eve-trajectory-mining | 134f142a5665f66fbf92aada8dd6252fab64ddff | [
"MIT"
] | null | null | null | Trajectory_Mining/Bag_of_Words/unpack_items_all_victims.py | AdamCoscia/eve-trajectory-mining | 134f142a5665f66fbf92aada8dd6252fab64ddff | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Unpacks Raw API data from zkillboard into victim files that contain
TEST - 10/02/2019
Params: 10000002201505.csv | 61MB | 28208 rows x 8 columns
Output:
```
(+0.000s|t:0.000s) Importing modules...
(+2.209s|t:2.209s) Loading CSV data from local file...
(+1.132s|t:3.341s) Converting DataFrame column value types...
(+18.746s|t:22.087s) Loading YAML files into memory...
(+3.88m|t:4.25m) Unpacking DataFrame values...
(+2.30m|t:6.55m) Writing results to CSV...
(+8.008s|t:6.68m) Exit
```
Written By: Adam Coscia
Updated On: 11/09/2019
"""
# Start timing
import time
start = time.time()
total = 0
def lap(msg):
"""Records time elapsed."""
global start, total
elapsed = (time.time() - start) - total
total = time.time() - start
if elapsed > 3600:
print(f'(+{elapsed/3600:.2f}h|t:{total/3600:.2f}h) {msg}')
elif elapsed > 60:
if total > 3600:
print(f'(+{elapsed/60:.2f}m|t:{total/3600:.2f}h) {msg}')
else:
print(f'(+{elapsed/60:.2f}m|t:{total/60:.2f}m) {msg}')
else:
if total > 3600:
print(f'(+{elapsed:.3f}s|t:{total/3600:.2f}h) {msg}')
elif total > 60:
print(f'(+{elapsed:.3f}s|t:{total/60:.2f}m) {msg}')
else:
print(f'(+{elapsed:.3f}s|t:{total:.3f}s) {msg}')
lap("Importing modules...")
from ast import literal_eval
import os
import sys
import numpy as np
import pandas as pd
import yaml
def load_yaml(file_loc, encoding='utf-8'):
"""Loads yaml file at file_loc and returns Python object based on yaml
structure.
"""
data = None
with open(file_loc, 'r', encoding=encoding) as stream:
try:
data = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
return data
def unpack(data: pd.DataFrame):
"""Operations to unpack nested data, yield row for row in old data.
Iterate over each row of data using generator and unpack each row.
"""
def parse_items(items):
# Use sets for faster look-up time (no order needed to preserve)
# lo_flags = {11, 12, 13, 14, 15, 16, 17, 18}
# mi_flags = {19, 20, 21, 22, 23, 24, 25, 26}
# hi_flags = {27, 28, 29, 30, 31, 32, 33, 34}
itemList=[]
for item in items:
itemName, groupName = 'Missing', 'Missing'
try:
item_type_id = item['item_type_id']
try:
item_group_id = typeIDs[item_type_id]['groupID']
try:
itemName = typeIDs[item_type_id]['name']['en']
try:
groupName = groupIDs[item_group_id]['name']['en']
except:
pass
except:
pass
except:
pass
except:
pass
finally:
itemList.append((itemName, groupName))
return itemList
def parse_attackers(attackers):
attacker_keys = ('final_blow', 'damage_done', 'ship_type_id')
for attacker in attackers:
if 'character_id' in attacker:
a = (attacker['character_id'], [])
for a_key in attacker_keys:
if a_key in attacker:
a[1].append(attacker[a_key])
else:
a[1].append(np.nan)
yield a
for row in data.itertuples():
# Some killmails are npcs, don't include their items and values
if 'character_id' in row.victim:
# These values are guaranteed in every killmail
victim_row = [row.killmail_time,
row.solar_system_id,
row.victim['character_id']]
# Try to add ship_type_id to victim values if exists
if 'ship_type_id' in row.victim:
victim_row.append(row.victim['ship_type_id'])
else:
victim_row.append(np.nan)
# Try to add item info to victim values if exists
if 'items' in row.victim and row.victim['items']:
victim_row.append(parse_items(row.victim['items']))
else:
victim_row.append([]) # keep empty array
else:
victim_row = None
if 'npc' in row.zkb:
npc = row.zkb['npc']
else:
npc = False # Assume there are attackers
attacker_rows = []
if not npc:
attacker_rows.extend(
[attacker for attacker in parse_attackers(row.attackers)]
)
yield victim_row, attacker_rows, row.killmail_id
# Specify S3 parameters and SQL query
bucket='dilabevetrajectorymining'
key='eve-trajectory-mining/Killmail_Fetching/killmail_scrapes/byregion/10000002/10000002201505.csv'
query="""
SELECT *
FROM s3Object s
LIMIT 5
"""
# Let amazon do the api calls
# print('Querying s3 bucket...')
# df = select(bucket, key, query)
#
# Open YAML file of typeIDs to get names of items
# typeIDs.yaml -> dictionary of typeID keys which contain attributes
# ex. typeIDs[11317] -> {'description': {'en': 'blah', ...}, ...}
# typeIDs[11317]['name']['en'] == '800mm Rolled Tungsten Compact Plates'
# typeIDs[11317]['groupID'] == 329
# groupIDs[329] -> {'name': {'en': 'blah', ...}, ...}
# groupIDs[329]['name']['en'] == 'Armor Reinforcer'
#
lap("Loading YAML files into memory...")
root = "../Trajectory_Mining/docs/eve files" # YAML file location
typeIDs = load_yaml(os.path.join(root, 'typeIDs.yaml'))
groupIDs = load_yaml(os.path.join(root, 'groupIDs.yaml'))
# invFlags = load_yaml(os.path.join(root, 'invFlags.yaml'))
# invMarketGroups = load_yaml(os.path.join(root, 'invMarketGroups.yaml'))
# categoryIDs = load_yaml(os.path.join(root, 'categoryIDs.yaml'))
# Sequentially load CSV's from file
lap("Loading CSV data from killmail_scrapes...")
victims = [] # list of victim dataframes generated from CSV's
attackers = [] # list of victim dataframes generated from CSV's
for root, dirs, files in os.walk("../Killmail_Fetching/killmail_scrapes/byregion", topdown=False):
count = 0
num_files = len(files) # number of CSV files
for file in sorted(files):
print(f"Progress {count/num_files:2.1%} ", end="\r")
df = pd.read_csv(os.path.join(root, file), encoding='utf-8')
# Convert all timestamp strings to numpy.datetime64
# print("> Converting DataFrame column value types ", end="")
df['killmail_time'] = pd.to_datetime(df['killmail_time'],
# Turn errors into NaT
errors='coerce',
# Use this format to parse str
format='%Y-%m-%dT%H:%M:%SZ')
# Convert all numeric values in 'solar_system_id' to smallest int type
# Convert all non-numeric values in 'solar_system_id' to NaN
df['solar_system_id'] = pd.to_numeric(df['solar_system_id'],
# Turn errors into NaN
errors='coerce',
# Convert to smallest int type
downcast='integer')
# Convert values in columns to python objects
df['victim'] = df['victim'].apply(literal_eval)
df['attackers'] = df['attackers'].apply(literal_eval)
df['zkb'] = df['zkb'].apply(literal_eval)
# Unpack DataFrame subset containing lists and dicts
# print("> Unpacking DataFrame values ", end="")
victim_rows = []
attacker_rows = []
a_col = ['final_blow', 'damage_done', 'ship_type_id']
v_col = ['killmail_time', 'solar_system_id', 'character_id',
'ship_type_id', 'items']
for v_row, a_rows, k_id in unpack(df):
if v_row is not None: # If no character ID, don't append victim
victim_rows.append(pd.DataFrame(
[v_row],
columns=v_col,
index=pd.Index([k_id], name='killmail_id')
))
if a_rows:
attacker_rows.extend([pd.DataFrame(
[a_row],
columns=a_col,
index=pd.MultiIndex.from_tuples(
[(k_id, a_id)],
names=('killmail_id',
'character_id')
)
) for a_id, a_row in a_rows])
# Concat victim_rows together
# print("> Concating victim rows ", end="\r")
victims.append(pd.concat(victim_rows, sort=False))
# attackers.append(pd.concat(attacker_rows, sort=False))
count += 1
# Save victim and attacker info to CSV
lap("Writing results to CSV...")
df_victims = pd.concat(victims)
df_victims.to_csv('data/all_victims_items.csv')
# df_attackers = pd.concat(attackers)
# df_attackers.to_csv('data/all_attackers.csv')
lap("Exit")
| 38.380567 | 99 | 0.539873 | # -*- coding: utf-8 -*-
"""Unpacks Raw API data from zkillboard into victim files that contain
TEST - 10/02/2019
Params: 10000002201505.csv | 61MB | 28208 rows x 8 columns
Output:
```
(+0.000s|t:0.000s) Importing modules...
(+2.209s|t:2.209s) Loading CSV data from local file...
(+1.132s|t:3.341s) Converting DataFrame column value types...
(+18.746s|t:22.087s) Loading YAML files into memory...
(+3.88m|t:4.25m) Unpacking DataFrame values...
(+2.30m|t:6.55m) Writing results to CSV...
(+8.008s|t:6.68m) Exit
```
Written By: Adam Coscia
Updated On: 11/09/2019
"""
# Start timing
import time
start = time.time()
total = 0
def lap(msg):
"""Records time elapsed."""
global start, total
elapsed = (time.time() - start) - total
total = time.time() - start
if elapsed > 3600:
print(f'(+{elapsed/3600:.2f}h|t:{total/3600:.2f}h) {msg}')
elif elapsed > 60:
if total > 3600:
print(f'(+{elapsed/60:.2f}m|t:{total/3600:.2f}h) {msg}')
else:
print(f'(+{elapsed/60:.2f}m|t:{total/60:.2f}m) {msg}')
else:
if total > 3600:
print(f'(+{elapsed:.3f}s|t:{total/3600:.2f}h) {msg}')
elif total > 60:
print(f'(+{elapsed:.3f}s|t:{total/60:.2f}m) {msg}')
else:
print(f'(+{elapsed:.3f}s|t:{total:.3f}s) {msg}')
lap("Importing modules...")
from ast import literal_eval
import os
import sys
import numpy as np
import pandas as pd
import yaml
def load_yaml(file_loc, encoding='utf-8'):
"""Loads yaml file at file_loc and returns Python object based on yaml
structure.
"""
data = None
with open(file_loc, 'r', encoding=encoding) as stream:
try:
data = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
return data
def unpack(data: pd.DataFrame):
"""Operations to unpack nested data, yield row for row in old data.
Iterate over each row of data using generator and unpack each row.
"""
def parse_items(items):
# Use sets for faster look-up time (no order needed to preserve)
# lo_flags = {11, 12, 13, 14, 15, 16, 17, 18}
# mi_flags = {19, 20, 21, 22, 23, 24, 25, 26}
# hi_flags = {27, 28, 29, 30, 31, 32, 33, 34}
itemList=[]
for item in items:
itemName, groupName = 'Missing', 'Missing'
try:
item_type_id = item['item_type_id']
try:
item_group_id = typeIDs[item_type_id]['groupID']
try:
itemName = typeIDs[item_type_id]['name']['en']
try:
groupName = groupIDs[item_group_id]['name']['en']
except:
pass
except:
pass
except:
pass
except:
pass
finally:
itemList.append((itemName, groupName))
return itemList
def parse_attackers(attackers):
attacker_keys = ('final_blow', 'damage_done', 'ship_type_id')
for attacker in attackers:
if 'character_id' in attacker:
a = (attacker['character_id'], [])
for a_key in attacker_keys:
if a_key in attacker:
a[1].append(attacker[a_key])
else:
a[1].append(np.nan)
yield a
for row in data.itertuples():
# Some killmails are npcs, don't include their items and values
if 'character_id' in row.victim:
# These values are guaranteed in every killmail
victim_row = [row.killmail_time,
row.solar_system_id,
row.victim['character_id']]
# Try to add ship_type_id to victim values if exists
if 'ship_type_id' in row.victim:
victim_row.append(row.victim['ship_type_id'])
else:
victim_row.append(np.nan)
# Try to add item info to victim values if exists
if 'items' in row.victim and row.victim['items']:
victim_row.append(parse_items(row.victim['items']))
else:
victim_row.append([]) # keep empty array
else:
victim_row = None
if 'npc' in row.zkb:
npc = row.zkb['npc']
else:
npc = False # Assume there are attackers
attacker_rows = []
if not npc:
attacker_rows.extend(
[attacker for attacker in parse_attackers(row.attackers)]
)
yield victim_row, attacker_rows, row.killmail_id
# Specify S3 parameters and SQL query
bucket='dilabevetrajectorymining'
key='eve-trajectory-mining/Killmail_Fetching/killmail_scrapes/byregion/10000002/10000002201505.csv'
query="""
SELECT *
FROM s3Object s
LIMIT 5
"""
# Let amazon do the api calls
# print('Querying s3 bucket...')
# df = select(bucket, key, query)
#
# Open YAML file of typeIDs to get names of items
# typeIDs.yaml -> dictionary of typeID keys which contain attributes
# ex. typeIDs[11317] -> {'description': {'en': 'blah', ...}, ...}
# typeIDs[11317]['name']['en'] == '800mm Rolled Tungsten Compact Plates'
# typeIDs[11317]['groupID'] == 329
# groupIDs[329] -> {'name': {'en': 'blah', ...}, ...}
# groupIDs[329]['name']['en'] == 'Armor Reinforcer'
#
lap("Loading YAML files into memory...")
root = "../Trajectory_Mining/docs/eve files" # YAML file location
typeIDs = load_yaml(os.path.join(root, 'typeIDs.yaml'))
groupIDs = load_yaml(os.path.join(root, 'groupIDs.yaml'))
# invFlags = load_yaml(os.path.join(root, 'invFlags.yaml'))
# invMarketGroups = load_yaml(os.path.join(root, 'invMarketGroups.yaml'))
# categoryIDs = load_yaml(os.path.join(root, 'categoryIDs.yaml'))
# Sequentially load CSV's from file
lap("Loading CSV data from killmail_scrapes...")
victims = [] # list of victim dataframes generated from CSV's
attackers = [] # list of victim dataframes generated from CSV's
for root, dirs, files in os.walk("../Killmail_Fetching/killmail_scrapes/byregion", topdown=False):
count = 0
num_files = len(files) # number of CSV files
for file in sorted(files):
print(f"Progress {count/num_files:2.1%} ", end="\r")
df = pd.read_csv(os.path.join(root, file), encoding='utf-8')
# Convert all timestamp strings to numpy.datetime64
# print("> Converting DataFrame column value types ", end="")
df['killmail_time'] = pd.to_datetime(df['killmail_time'],
# Turn errors into NaT
errors='coerce',
# Use this format to parse str
format='%Y-%m-%dT%H:%M:%SZ')
# Convert all numeric values in 'solar_system_id' to smallest int type
# Convert all non-numeric values in 'solar_system_id' to NaN
df['solar_system_id'] = pd.to_numeric(df['solar_system_id'],
# Turn errors into NaN
errors='coerce',
# Convert to smallest int type
downcast='integer')
# Convert values in columns to python objects
df['victim'] = df['victim'].apply(literal_eval)
df['attackers'] = df['attackers'].apply(literal_eval)
df['zkb'] = df['zkb'].apply(literal_eval)
# Unpack DataFrame subset containing lists and dicts
# print("> Unpacking DataFrame values ", end="")
victim_rows = []
attacker_rows = []
a_col = ['final_blow', 'damage_done', 'ship_type_id']
v_col = ['killmail_time', 'solar_system_id', 'character_id',
'ship_type_id', 'items']
for v_row, a_rows, k_id in unpack(df):
if v_row is not None: # If no character ID, don't append victim
victim_rows.append(pd.DataFrame(
[v_row],
columns=v_col,
index=pd.Index([k_id], name='killmail_id')
))
if a_rows:
attacker_rows.extend([pd.DataFrame(
[a_row],
columns=a_col,
index=pd.MultiIndex.from_tuples(
[(k_id, a_id)],
names=('killmail_id',
'character_id')
)
) for a_id, a_row in a_rows])
# Concat victim_rows together
# print("> Concating victim rows ", end="\r")
victims.append(pd.concat(victim_rows, sort=False))
# attackers.append(pd.concat(attacker_rows, sort=False))
count += 1
# Save victim and attacker info to CSV
lap("Writing results to CSV...")
df_victims = pd.concat(victims)
df_victims.to_csv('data/all_victims_items.csv')
# df_attackers = pd.concat(attackers)
# df_attackers.to_csv('data/all_attackers.csv')
lap("Exit")
| 0 | 0 |
6a00d837f8f2733589c99f7342b4ddb14ce14281 | 848 | py | Python | iqps/report/views.py | thealphadollar/iqps | 187f6b134d82e2dce951b356cb0c7151994ca3ab | [
"MIT"
] | null | null | null | iqps/report/views.py | thealphadollar/iqps | 187f6b134d82e2dce951b356cb0c7151994ca3ab | [
"MIT"
] | null | null | null | iqps/report/views.py | thealphadollar/iqps | 187f6b134d82e2dce951b356cb0c7151994ca3ab | [
"MIT"
] | null | null | null | from django.shortcuts import render, get_object_or_404
from django.contrib import messages
from .forms import ReportForm
from data.models import Paper
def reportPaper(request, paperId):
paper = get_object_or_404(Paper, pk=paperId)
form = None
try:
assert request.method == "POST"
form = ReportForm(request.POST)
assert form.is_valid()
report = form.save(commit=False)
report.paper = paper
report.save()
messages.add_message(request, messages.INFO, "Report Successful!")
return render(request, "reportform.html", {
"form": ReportForm(),
"paper": paper
})
except:
if form is None:
form = ReportForm()
return render(request, "reportform.html", {
"form": form,
"paper": paper
})
| 30.285714 | 74 | 0.607311 | from django.shortcuts import render, get_object_or_404
from django.contrib import messages
from .forms import ReportForm
from data.models import Paper
def reportPaper(request, paperId):
paper = get_object_or_404(Paper, pk=paperId)
form = None
try:
assert request.method == "POST"
form = ReportForm(request.POST)
assert form.is_valid()
report = form.save(commit=False)
report.paper = paper
report.save()
messages.add_message(request, messages.INFO, "Report Successful!")
return render(request, "reportform.html", {
"form": ReportForm(),
"paper": paper
})
except:
if form is None:
form = ReportForm()
return render(request, "reportform.html", {
"form": form,
"paper": paper
})
| 0 | 0 |
72598642d175089036d9345be8dbcafd77a05743 | 19,963 | py | Python | build/lib/geonomics/sim/stats.py | AnushaPB/geonomics-1 | deee0c377e81f509463eaf6f9d0b2f0809f2ddc3 | [
"MIT"
] | null | null | null | build/lib/geonomics/sim/stats.py | AnushaPB/geonomics-1 | deee0c377e81f509463eaf6f9d0b2f0809f2ddc3 | [
"MIT"
] | null | null | null | build/lib/geonomics/sim/stats.py | AnushaPB/geonomics-1 | deee0c377e81f509463eaf6f9d0b2f0809f2ddc3 | [
"MIT"
] | null | null | null | #!/usr/bin/python
#stats.py
'''
Classes and functons to implement calculation and output of statistics
'''
#geonomics imports
from geonomics.utils.io import (_append_array2d_to_array_stack,
_append_row_to_csv, _write_dict_to_csv)
from geonomics.ops.selection import _calc_fitness
from geonomics.utils.viz import _check_display
#other imports
import numpy as np
from scipy.stats.stats import pearsonr
from collections import Counter as C
import os
import matplotlib as mpl
_check_display()
import matplotlib.pyplot as plt
######################################
# -----------------------------------#
# CLASSES ---------------------------#
# -----------------------------------#
######################################
#a StatsCollector class, to parameterize and manage calculation
#and collection of stats, then write them to file at the end of
#each model iteration
class _StatsCollector:
def __init__(self, model_name, params):
#set model_name
self.model_name = model_name
#set total model time
self.T = params.model.T
#grab the stats parameters
stats_params = params.model.stats
#a dictionary to link the stats' names in the params dict
#to the functions to be called to calculate them
self.calc_fn_dict = {'Nt': _calc_Nt,
'ld': _calc_ld,
'het': _calc_het,
'maf': _calc_maf,
'mean_fit': _calc_mean_fitness,
}
#a dictionary to link the stats' names in the params dict
#to the functions to be called to write them to disk
self.write_fn_dict = {'ld': self._write_array_to_stack,
'het': self._write_row_to_csv,
'maf': self._write_row_to_csv,
}
#a dictionary to link stats to the file extensions that
#should be used to write them to disk
self.file_suffix_dict = {'Nt': 'OTHER_STATS.csv',
'ld': 'LD.txt',
'het': 'HET.csv',
'maf': 'MAF.csv',
'mean_fit': 'OTHER_STATS.csv',
}
#get the species names
spps_with_wout_genomes = {str(k):('gen_arch' in v.keys()) for k, v
in params.comm.species.items()}
#list stats that cannot be calculated for species without genomes
stats_invalid_wout_genomes = ['ld', 'het', 'maf', 'mean_fit']
#create a stats attribute, to store all stats calculated
self.stats = {}
for spp_name, genome in spps_with_wout_genomes.items():
self.stats[spp_name] = {}
for stat, stat_params in stats_params.items():
#skip species without genomes for stats that need genomes
if not genome and stat in stats_invalid_wout_genomes:
break
#each spp gets a subdict
else:
#each subdict gets a key for each stat to be calculated
if stat_params.calc:
#create a subdictionary for each stat, with a list of
#NaNs self.T items long, which will be filled in for
#each whenever it is sampled (NOTE: this forces all
#stats to have the same length so that they all fit
#into one pd.DataFrame at the end, and so that plots
#easily line up on the same timeframe)
self.stats[spp_name][stat]= {
'vals': [np.nan]*self.T,
'freq': stat_params.freq,
#add a 'filepath' key, whose value will be updated
#to contain to correct filepaths for each stat
'filepath': None,
#create tuple of other, stat-specific parameters,
#to later be unpacked as arguments to
#the appropriate stat function
'other_params': dict([(k,v) for k,v in
stat_params.items() if k not in ['calc',
'freq']])
}
#if the freq value is 0, change it to self.T -1, so
#that it collects only on the first and last timestep
if self.stats[spp_name][stat]['freq'] == 0:
self.stats[spp_name][stat]['freq'] = self.T-1
#create a master method, to be called each timestep, which will make a list
#of all stats that need to be calculated that timestep (based on the
#calculation-frequencies provided in the params dicts), and then calls the
#functions to calculate them all and adds the results to self.stats
def _calc_stats(self, community, t, iteration):
#set the filepaths, if this is the first timestep of the model
#iteration
if t == 0:
self._set_filepaths(iteration)
#for each species
for spp in community.values():
#list the stats to be calculated this timestep
if t == self.T-1:
#calculate all, if it's the last timestep
calc_list = [*self.stats[spp.name]]
else:
#or else only calculate based on the parameterized frequencies
#for each stat
calc_list = [k for k,v in self.stats[spp.name].items() if (
t % v['freq'] == 0)]
#then calculate each stat
for stat in calc_list:
vals = self.calc_fn_dict[stat](spp,
**self.stats[spp.name][stat]['other_params'])
#and add each stat to the right location (by timestep)
#in its list
try:
self.stats[spp.name][stat]['vals'][t] = vals
#unless the list isn't long enough (which happens if mod.walk
#has been used to run the model past its initially stipulated
#length of time), in which case make it long enough and make
#the last value the stat just calculated
except IndexError:
stats_list = self.stats[spp.name][stat]['vals']
stats_list.extend([np.nan] * (t-len(stats_list)) + [vals])
#and write whichever stats are necessary to file
self._write_stats(t)
#a method to make the filenames for all of the stats to be saved
def _set_filepaths(self, iteration):
#get the directory name for this model and iteration
dirname = os.path.join('GNX_mod-%s' % self.model_name,
'it-%i' % iteration)
#for each species
for spp_name in [*self.stats]:
#get the subdirectory name and filename for this species
subdirname = os.path.join(dirname, 'spp-%s' % spp_name)
#make this subdir, and any parent dirs as necessary
os.makedirs(subdirname, exist_ok = True)
#create the filename and filepath for this spp, for each stat
for stat in [*self.stats[spp_name]]:
filename = 'mod-%s_it-%i_spp-%s_%s' % (self.model_name,
iteration, spp_name, self.file_suffix_dict[stat])
filepath = os.path.join(subdirname, filename)
#add the filepath for this stat to self.stats
self.stats[spp_name][stat]['filepath'] = filepath
#wrapper around io.append_array2d_to_array_stack
#TODO WHAT TO DO WITH t IN THIS CASE?? CAN'T ADD TO txt 3D ARRAY FILE
def _write_array_to_stack(self, filepath, array, t):
_append_array2d_to_array_stack(filepath, array)
#wrapper around io.append_row_to_csv
def _write_row_to_csv(self, filepath, array, t):
_append_row_to_csv(filepath, array, t)
#use io._write_dict_to_csv to write to disk all "other stats", i.e.
#all stats that collect only a single value per species per timestep
#TODO: CHANGE THE 'OTHER STATS' NAMING CONVENTION TO SOMETING MORE
#DESCRIPTIVE
def _write_other_stats(self):
for spp, spp_stats in self.stats.items():
#get a dictionary of the data values for all stats that are to be
#written just once at the end of the iteration
data_dict = {k:v['vals'] for k,v in spp_stats.items() if
'OTHER_STATS' in v['filepath']}
#they all have the same filepath, so just grab the first
filepath = [*spp_stats.values()][0]['filepath']
#write to disk
_write_dict_to_csv(filepath, data_dict)
#method to write stats to files, in the appropriate directory (by model
#and iteration number), and with the appropriate spp names in the filenames
def _write_stats(self, t):
#for each species
for spp_name, spp_stats in self.stats.items():
#for each stat
write_list = [k for k,v in spp_stats.items() if t % v['freq'] == 0]
for stat, stat_dict in spp_stats.items():
#get the filepath
filepath = stat_dict['filepath']
#if the filepath does not contain "OTHER_STATS" then it is a
#stat that produces more than a single value per species per
#timestep it is collected, so write the data to disk
#intermittently and then delete the data from memory (if it was
#collected this timestep)
if "OTHER_STATS" not in filepath and stat in write_list:
#get the correct write_fn for this stat
write_fn = self.write_fn_dict[stat]
#call the write_fn to write the data to disk
write_fn(filepath, stat_dict['vals'][t], t)
#then replace the last data collected prior to this
#timestep's data with None, to free up memory but still
#maintain the latest data in case of plotting
rev_nonnull = [n for n, v in enumerate(
stat_dict['vals'][::-1]) if (v is not np.nan and
v is not None)]
nonnull = [range(len(
stat_dict['vals']))[::-1][n] for n in rev_nonnull]
nonnull = [v for v in nonnull if v != t]
for v in nonnull:
stat_dict['vals'][v] = None
#or write all 'other stats' to disk, if it's the last timestep
if t == self.T-1:
self._write_other_stats()
#method to plot whichever stat as a function of runtime
def _plot_stat(self, stat, spp_name=None):
#check that the stat argument is valid
assert type(stat) is str, "The 'stat' argument must be a string."
assert stat in [*self.stats.values()][0].keys(), ("The 'stat' "
"argument must name a statistic that was collected. Valid values: "
"%s.") % (','.join(["'%s'" % val for val in
[*self.stats.values()][0].keys()]))
#get the list of spps to plot
if spp_name is None:
spp_names = [*self.stats]
elif (spp_name is not None
and type(spp_name) is str and spp_name in [*self.stats]):
spp_names = [spp_name]
else:
raise ValueError(("The 'spp_name' argument, if provided, "
"must be a string containing a valid species name."))
#create the figure
fig = plt.figure()
#plot each species for the chosen statistic
for n, spp_name in enumerate(spp_names):
#get the stat values to plot
vals = self.stats[spp_name][stat]['vals']
#plot 'Nt' or 'mean_fit'
if stat in ['Nt', 'mean_fit']:
#add axes objects horizontally across
ax = fig.add_subplot(1, len(spp_names), n+1)
#get the indices of non-NaN values to be plotted
indices_to_plot = np.array(np.where(
np.invert(np.isnan(vals)))[0])
#get the timesteps at the non-NaN values
x = np.arange(0, len(vals))[indices_to_plot]
#get the non-NaN values
y = np.array(vals)[indices_to_plot]
#plot a dotted line (which necessarily linearly interpolates
#between collected timesteps if not all timesteps
#were collected)
plt.plot(x, y, ':')
#and plot dots at each of the collected timesteps
plt.plot(x, y, '.')
#set the title to the stat and the species' name
ax.set_title("SPP: '%s'" % (spp_name))
#set the x- and y-labels
plt.xlabel('timestep')
plt.ylabel(stat)
#or plot 'maf' or 'het'
elif stat in ['het', 'maf']:
#add axes objects horizontally across
ax = fig.add_subplot(1, len(spp_names), n+1)
#get the reversed-list index of the last set of values
#calculated
rev_idx_last_vals = [n for n,v in enumerate(vals[::-1]) if (
v is not None and v is not np.nan)][0]
#get the last set of values calculated
last_vals = vals[::-1][rev_idx_last_vals]
#get the timestep of the last set of values
t_last_vals = range(len(vals))[::-1][rev_idx_last_vals]
#plot the values
plt.plot(range(len(last_vals)), last_vals, '-')
#set the title to the species' name and timestep of the
#values plotted
ax.set_title("SPP: '%s'; T: %i" % (spp_name, t_last_vals))
#set the x- and y-labels
plt.xlabel('locus')
plt.ylabel(stat)
#or plot 'ld'
elif stat in ['ld']:
#get the reversed-list index of the last set of values
#calculated
rev_idx_last_vals = [n for n,v in enumerate(vals[::-1]) if (
v is not None and v is not np.nan)][0]
#get the last set of values (i.e. r^2 array) calculated
r2_mat = vals[::-1][rev_idx_last_vals]
#get the timestep of the last set of values
t_last_vals = range(len(vals))[::-1][rev_idx_last_vals]
#add axes objects horizontally across, in two rows
ax = fig.add_subplot(2, len(spp_names), n+1)
#plot the LD matrix in row 1
plt.imshow(np.clip(r2_mat, a_min = 0, a_max = None),
interpolation = 'nearest')
plt.colorbar()
#set plot title
ax.set_title(("SPP: '%s'; T: %i\nLocus-wise "
"linkage matrix") % (spp_name, t_last_vals))
#set the x- and y-labels
plt.xlabel('locus')
plt.ylabel('locus')
ax = fig.add_subplot(2, len(spp_names), n+1+len(spp_names))
#plot of mean linkage values
r2_list = [r2_mat[0,1]]
L = r2_mat.shape[0]
for i in range(1,L-1):
r2_list.append(np.mean([r2_mat[i-1,i], r2_mat[i,i+1]]))
r2_list.append(r2_mat[L-2,L-1])
plt.scatter(range(L), r2_list, c = 'red', marker = 'o', s=25)
#set plot title
ax.set_title("Locus-wise mean linkage values")
#set the x- and y-labels
plt.xlabel('locus')
plt.ylabel('mean linkage')
#or else return informative error message
else:
raise ValueError(("The value provided for the 'stat' argument "
"is not a valid statistic. Valid values include: %s\n\n")%(
','.join(['%s' % k for k in [*self.calc_fn_dict]])))
#set the main title to the stat plotted
fig.suptitle('STAT: %s' % stat)
#show the image
fig.show()
######################################
# -----------------------------------#
# FUNCTIONS -------------------------#
# -----------------------------------#
######################################
#method to get pop size (NOTE: not actually calculating it)
def _calc_Nt(spp):
Nt = spp.Nt[-1]
return(Nt)
def _calc_ld(spp, plot = False):
#TODO: I should also include (either as an alternative within this fn,
#or as separate fn) the option to calculate D'
#TODO: I keep getting warnings like the following, which could just be
#due to divison of small floating-point numbers, but I should figure out
#exactly what's going on and be sure everything checks out. WARNING:
# stats.py:117: RuntimeWarning: invalid value encountered in double_scalars
speciome = spp._get_genotypes()
n = np.shape(speciome)[0] #num individs
x = np.shape(speciome)[2] #ploidy
N = n*x
L = spp.gen_arch.L
assert L == np.shape(speciome)[1], ("The length of the 1st dimension "
"of speciome doesn't equal spp.genomic_arch.L")
r2_mat = np.zeros([L]*2) * np.nan # vals default to NaN
for i in range(L):
for j in range(i+1, L):
#calculate freq of allele 1 at locus i
f1_i = np.sum(speciome[:,i,:], axis = None)/(N)
#calculate freq of allele 1 at locus j
f1_j = np.sum(speciome[:,j,:], axis = None)/(N)
#calculate freq of chroms with 1_1 haplotype at loci i and j
f11_ij = float(np.sum(speciome[:,[i,j],:].sum(axis = 1) ==2,
axis = None))/(N)
D_1_1 = f11_ij - (f1_i * f1_j)
r2 = (D_1_1**2)/(f1_i*(1-f1_i)*f1_j*(1-f1_j))
# add to both triangular halves, to produce a symmetric matrix
r2_mat[i,j] = r2
r2_mat[j,i] = r2
return(r2_mat)
#function to calculate the locus-wise (if mean == False) or mean (if
#mean == True) heterozygosity of the species
def _calc_het(spp, mean=False):
#get pop size
N = len(spp)
#get the speciome
speciome = spp._get_genotypes()
#calculate the frequency of heterozygotes, locus-wise
het = np.sum(np.mean(speciome, axis = 2) == 0.5, axis = 0)/N
#get the mean heterozygosity, if mean argument is True
if mean:
het = mean(het)
return(het)
#function to calculate the locus-wise minor allele frequency of the species
def _calc_maf(spp):
#get two times the pop size
two_N = 2*len(spp)
#get the speciome
speciome = spp._get_genotypes()
#get the frequencies of 1-alleles for all loci
freqs_1 = np.sum(np.sum(speciome, axis = 2), axis = 0)/two_N
#find all loci where the 1-allele is the major allele
majors = np.where(freqs_1 > 0.5)
#replace the locations where 1 is the major allele with 0-allele freq
maf = freqs_1[:]
maf[majors] = 1 - freqs_1[majors]
return(maf)
#function to calculate the mean fitness of the species
def _calc_mean_fitness(spp):
#calculate the mean fitness, if this species has traits
if spp.gen_arch.traits is not None:
mean_fit = np.mean(_calc_fitness(spp))
#or else return NaN
else:
mean_fit = np.nan
return(mean_fit)
| 45.786697 | 80 | 0.539698 | #!/usr/bin/python
#stats.py
'''
Classes and functons to implement calculation and output of statistics
'''
#geonomics imports
from geonomics.utils.io import (_append_array2d_to_array_stack,
_append_row_to_csv, _write_dict_to_csv)
from geonomics.ops.selection import _calc_fitness
from geonomics.utils.viz import _check_display
#other imports
import numpy as np
from scipy.stats.stats import pearsonr
from collections import Counter as C
import os
import matplotlib as mpl
_check_display()
import matplotlib.pyplot as plt
######################################
# -----------------------------------#
# CLASSES ---------------------------#
# -----------------------------------#
######################################
#a StatsCollector class, to parameterize and manage calculation
#and collection of stats, then write them to file at the end of
#each model iteration
class _StatsCollector:
def __init__(self, model_name, params):
#set model_name
self.model_name = model_name
#set total model time
self.T = params.model.T
#grab the stats parameters
stats_params = params.model.stats
#a dictionary to link the stats' names in the params dict
#to the functions to be called to calculate them
self.calc_fn_dict = {'Nt': _calc_Nt,
'ld': _calc_ld,
'het': _calc_het,
'maf': _calc_maf,
'mean_fit': _calc_mean_fitness,
}
#a dictionary to link the stats' names in the params dict
#to the functions to be called to write them to disk
self.write_fn_dict = {'ld': self._write_array_to_stack,
'het': self._write_row_to_csv,
'maf': self._write_row_to_csv,
}
#a dictionary to link stats to the file extensions that
#should be used to write them to disk
self.file_suffix_dict = {'Nt': 'OTHER_STATS.csv',
'ld': 'LD.txt',
'het': 'HET.csv',
'maf': 'MAF.csv',
'mean_fit': 'OTHER_STATS.csv',
}
#get the species names
spps_with_wout_genomes = {str(k):('gen_arch' in v.keys()) for k, v
in params.comm.species.items()}
#list stats that cannot be calculated for species without genomes
stats_invalid_wout_genomes = ['ld', 'het', 'maf', 'mean_fit']
#create a stats attribute, to store all stats calculated
self.stats = {}
for spp_name, genome in spps_with_wout_genomes.items():
self.stats[spp_name] = {}
for stat, stat_params in stats_params.items():
#skip species without genomes for stats that need genomes
if not genome and stat in stats_invalid_wout_genomes:
break
#each spp gets a subdict
else:
#each subdict gets a key for each stat to be calculated
if stat_params.calc:
#create a subdictionary for each stat, with a list of
#NaNs self.T items long, which will be filled in for
#each whenever it is sampled (NOTE: this forces all
#stats to have the same length so that they all fit
#into one pd.DataFrame at the end, and so that plots
#easily line up on the same timeframe)
self.stats[spp_name][stat]= {
'vals': [np.nan]*self.T,
'freq': stat_params.freq,
#add a 'filepath' key, whose value will be updated
#to contain to correct filepaths for each stat
'filepath': None,
#create tuple of other, stat-specific parameters,
#to later be unpacked as arguments to
#the appropriate stat function
'other_params': dict([(k,v) for k,v in
stat_params.items() if k not in ['calc',
'freq']])
}
#if the freq value is 0, change it to self.T -1, so
#that it collects only on the first and last timestep
if self.stats[spp_name][stat]['freq'] == 0:
self.stats[spp_name][stat]['freq'] = self.T-1
#create a master method, to be called each timestep, which will make a list
#of all stats that need to be calculated that timestep (based on the
#calculation-frequencies provided in the params dicts), and then calls the
#functions to calculate them all and adds the results to self.stats
def _calc_stats(self, community, t, iteration):
#set the filepaths, if this is the first timestep of the model
#iteration
if t == 0:
self._set_filepaths(iteration)
#for each species
for spp in community.values():
#list the stats to be calculated this timestep
if t == self.T-1:
#calculate all, if it's the last timestep
calc_list = [*self.stats[spp.name]]
else:
#or else only calculate based on the parameterized frequencies
#for each stat
calc_list = [k for k,v in self.stats[spp.name].items() if (
t % v['freq'] == 0)]
#then calculate each stat
for stat in calc_list:
vals = self.calc_fn_dict[stat](spp,
**self.stats[spp.name][stat]['other_params'])
#and add each stat to the right location (by timestep)
#in its list
try:
self.stats[spp.name][stat]['vals'][t] = vals
#unless the list isn't long enough (which happens if mod.walk
#has been used to run the model past its initially stipulated
#length of time), in which case make it long enough and make
#the last value the stat just calculated
except IndexError:
stats_list = self.stats[spp.name][stat]['vals']
stats_list.extend([np.nan] * (t-len(stats_list)) + [vals])
#and write whichever stats are necessary to file
self._write_stats(t)
#a method to make the filenames for all of the stats to be saved
def _set_filepaths(self, iteration):
#get the directory name for this model and iteration
dirname = os.path.join('GNX_mod-%s' % self.model_name,
'it-%i' % iteration)
#for each species
for spp_name in [*self.stats]:
#get the subdirectory name and filename for this species
subdirname = os.path.join(dirname, 'spp-%s' % spp_name)
#make this subdir, and any parent dirs as necessary
os.makedirs(subdirname, exist_ok = True)
#create the filename and filepath for this spp, for each stat
for stat in [*self.stats[spp_name]]:
filename = 'mod-%s_it-%i_spp-%s_%s' % (self.model_name,
iteration, spp_name, self.file_suffix_dict[stat])
filepath = os.path.join(subdirname, filename)
#add the filepath for this stat to self.stats
self.stats[spp_name][stat]['filepath'] = filepath
#wrapper around io.append_array2d_to_array_stack
#TODO WHAT TO DO WITH t IN THIS CASE?? CAN'T ADD TO txt 3D ARRAY FILE
def _write_array_to_stack(self, filepath, array, t):
_append_array2d_to_array_stack(filepath, array)
#wrapper around io.append_row_to_csv
def _write_row_to_csv(self, filepath, array, t):
_append_row_to_csv(filepath, array, t)
#use io._write_dict_to_csv to write to disk all "other stats", i.e.
#all stats that collect only a single value per species per timestep
#TODO: CHANGE THE 'OTHER STATS' NAMING CONVENTION TO SOMETING MORE
#DESCRIPTIVE
def _write_other_stats(self):
for spp, spp_stats in self.stats.items():
#get a dictionary of the data values for all stats that are to be
#written just once at the end of the iteration
data_dict = {k:v['vals'] for k,v in spp_stats.items() if
'OTHER_STATS' in v['filepath']}
#they all have the same filepath, so just grab the first
filepath = [*spp_stats.values()][0]['filepath']
#write to disk
_write_dict_to_csv(filepath, data_dict)
#method to write stats to files, in the appropriate directory (by model
#and iteration number), and with the appropriate spp names in the filenames
def _write_stats(self, t):
#for each species
for spp_name, spp_stats in self.stats.items():
#for each stat
write_list = [k for k,v in spp_stats.items() if t % v['freq'] == 0]
for stat, stat_dict in spp_stats.items():
#get the filepath
filepath = stat_dict['filepath']
#if the filepath does not contain "OTHER_STATS" then it is a
#stat that produces more than a single value per species per
#timestep it is collected, so write the data to disk
#intermittently and then delete the data from memory (if it was
#collected this timestep)
if "OTHER_STATS" not in filepath and stat in write_list:
#get the correct write_fn for this stat
write_fn = self.write_fn_dict[stat]
#call the write_fn to write the data to disk
write_fn(filepath, stat_dict['vals'][t], t)
#then replace the last data collected prior to this
#timestep's data with None, to free up memory but still
#maintain the latest data in case of plotting
rev_nonnull = [n for n, v in enumerate(
stat_dict['vals'][::-1]) if (v is not np.nan and
v is not None)]
nonnull = [range(len(
stat_dict['vals']))[::-1][n] for n in rev_nonnull]
nonnull = [v for v in nonnull if v != t]
for v in nonnull:
stat_dict['vals'][v] = None
#or write all 'other stats' to disk, if it's the last timestep
if t == self.T-1:
self._write_other_stats()
#method to plot whichever stat as a function of runtime
def _plot_stat(self, stat, spp_name=None):
#check that the stat argument is valid
assert type(stat) is str, "The 'stat' argument must be a string."
assert stat in [*self.stats.values()][0].keys(), ("The 'stat' "
"argument must name a statistic that was collected. Valid values: "
"%s.") % (','.join(["'%s'" % val for val in
[*self.stats.values()][0].keys()]))
#get the list of spps to plot
if spp_name is None:
spp_names = [*self.stats]
elif (spp_name is not None
and type(spp_name) is str and spp_name in [*self.stats]):
spp_names = [spp_name]
else:
raise ValueError(("The 'spp_name' argument, if provided, "
"must be a string containing a valid species name."))
#create the figure
fig = plt.figure()
#plot each species for the chosen statistic
for n, spp_name in enumerate(spp_names):
#get the stat values to plot
vals = self.stats[spp_name][stat]['vals']
#plot 'Nt' or 'mean_fit'
if stat in ['Nt', 'mean_fit']:
#add axes objects horizontally across
ax = fig.add_subplot(1, len(spp_names), n+1)
#get the indices of non-NaN values to be plotted
indices_to_plot = np.array(np.where(
np.invert(np.isnan(vals)))[0])
#get the timesteps at the non-NaN values
x = np.arange(0, len(vals))[indices_to_plot]
#get the non-NaN values
y = np.array(vals)[indices_to_plot]
#plot a dotted line (which necessarily linearly interpolates
#between collected timesteps if not all timesteps
#were collected)
plt.plot(x, y, ':')
#and plot dots at each of the collected timesteps
plt.plot(x, y, '.')
#set the title to the stat and the species' name
ax.set_title("SPP: '%s'" % (spp_name))
#set the x- and y-labels
plt.xlabel('timestep')
plt.ylabel(stat)
#or plot 'maf' or 'het'
elif stat in ['het', 'maf']:
#add axes objects horizontally across
ax = fig.add_subplot(1, len(spp_names), n+1)
#get the reversed-list index of the last set of values
#calculated
rev_idx_last_vals = [n for n,v in enumerate(vals[::-1]) if (
v is not None and v is not np.nan)][0]
#get the last set of values calculated
last_vals = vals[::-1][rev_idx_last_vals]
#get the timestep of the last set of values
t_last_vals = range(len(vals))[::-1][rev_idx_last_vals]
#plot the values
plt.plot(range(len(last_vals)), last_vals, '-')
#set the title to the species' name and timestep of the
#values plotted
ax.set_title("SPP: '%s'; T: %i" % (spp_name, t_last_vals))
#set the x- and y-labels
plt.xlabel('locus')
plt.ylabel(stat)
#or plot 'ld'
elif stat in ['ld']:
#get the reversed-list index of the last set of values
#calculated
rev_idx_last_vals = [n for n,v in enumerate(vals[::-1]) if (
v is not None and v is not np.nan)][0]
#get the last set of values (i.e. r^2 array) calculated
r2_mat = vals[::-1][rev_idx_last_vals]
#get the timestep of the last set of values
t_last_vals = range(len(vals))[::-1][rev_idx_last_vals]
#add axes objects horizontally across, in two rows
ax = fig.add_subplot(2, len(spp_names), n+1)
#plot the LD matrix in row 1
plt.imshow(np.clip(r2_mat, a_min = 0, a_max = None),
interpolation = 'nearest')
plt.colorbar()
#set plot title
ax.set_title(("SPP: '%s'; T: %i\nLocus-wise "
"linkage matrix") % (spp_name, t_last_vals))
#set the x- and y-labels
plt.xlabel('locus')
plt.ylabel('locus')
ax = fig.add_subplot(2, len(spp_names), n+1+len(spp_names))
#plot of mean linkage values
r2_list = [r2_mat[0,1]]
L = r2_mat.shape[0]
for i in range(1,L-1):
r2_list.append(np.mean([r2_mat[i-1,i], r2_mat[i,i+1]]))
r2_list.append(r2_mat[L-2,L-1])
plt.scatter(range(L), r2_list, c = 'red', marker = 'o', s=25)
#set plot title
ax.set_title("Locus-wise mean linkage values")
#set the x- and y-labels
plt.xlabel('locus')
plt.ylabel('mean linkage')
#or else return informative error message
else:
raise ValueError(("The value provided for the 'stat' argument "
"is not a valid statistic. Valid values include: %s\n\n")%(
','.join(['%s' % k for k in [*self.calc_fn_dict]])))
#set the main title to the stat plotted
fig.suptitle('STAT: %s' % stat)
#show the image
fig.show()
######################################
# -----------------------------------#
# FUNCTIONS -------------------------#
# -----------------------------------#
######################################
#method to get pop size (NOTE: not actually calculating it)
def _calc_Nt(spp):
Nt = spp.Nt[-1]
return(Nt)
def _calc_ld(spp, plot = False):
#TODO: I should also include (either as an alternative within this fn,
#or as separate fn) the option to calculate D'
#TODO: I keep getting warnings like the following, which could just be
#due to divison of small floating-point numbers, but I should figure out
#exactly what's going on and be sure everything checks out. WARNING:
# stats.py:117: RuntimeWarning: invalid value encountered in double_scalars
speciome = spp._get_genotypes()
n = np.shape(speciome)[0] #num individs
x = np.shape(speciome)[2] #ploidy
N = n*x
L = spp.gen_arch.L
assert L == np.shape(speciome)[1], ("The length of the 1st dimension "
"of speciome doesn't equal spp.genomic_arch.L")
r2_mat = np.zeros([L]*2) * np.nan # vals default to NaN
for i in range(L):
for j in range(i+1, L):
#calculate freq of allele 1 at locus i
f1_i = np.sum(speciome[:,i,:], axis = None)/(N)
#calculate freq of allele 1 at locus j
f1_j = np.sum(speciome[:,j,:], axis = None)/(N)
#calculate freq of chroms with 1_1 haplotype at loci i and j
f11_ij = float(np.sum(speciome[:,[i,j],:].sum(axis = 1) ==2,
axis = None))/(N)
D_1_1 = f11_ij - (f1_i * f1_j)
r2 = (D_1_1**2)/(f1_i*(1-f1_i)*f1_j*(1-f1_j))
# add to both triangular halves, to produce a symmetric matrix
r2_mat[i,j] = r2
r2_mat[j,i] = r2
return(r2_mat)
#function to calculate the locus-wise (if mean == False) or mean (if
#mean == True) heterozygosity of the species
def _calc_het(spp, mean=False):
#get pop size
N = len(spp)
#get the speciome
speciome = spp._get_genotypes()
#calculate the frequency of heterozygotes, locus-wise
het = np.sum(np.mean(speciome, axis = 2) == 0.5, axis = 0)/N
#get the mean heterozygosity, if mean argument is True
if mean:
het = mean(het)
return(het)
#function to calculate the locus-wise minor allele frequency of the species
def _calc_maf(spp):
#get two times the pop size
two_N = 2*len(spp)
#get the speciome
speciome = spp._get_genotypes()
#get the frequencies of 1-alleles for all loci
freqs_1 = np.sum(np.sum(speciome, axis = 2), axis = 0)/two_N
#find all loci where the 1-allele is the major allele
majors = np.where(freqs_1 > 0.5)
#replace the locations where 1 is the major allele with 0-allele freq
maf = freqs_1[:]
maf[majors] = 1 - freqs_1[majors]
return(maf)
#function to calculate the mean fitness of the species
def _calc_mean_fitness(spp):
#calculate the mean fitness, if this species has traits
if spp.gen_arch.traits is not None:
mean_fit = np.mean(_calc_fitness(spp))
#or else return NaN
else:
mean_fit = np.nan
return(mean_fit)
| 0 | 0 |
4d14eb208318979b2b03eac311b90a75532fc290 | 1,743 | py | Python | quantz_ground/app.py | zhangyuz/quantz_ground | a3c10aceaa9da537ff5d1fc015f198945bf9d6f0 | [
"Apache-2.0"
] | 1 | 2020-10-20T15:45:20.000Z | 2020-10-20T15:45:20.000Z | quantz_ground/app.py | zhangyuz/quantz_ground | a3c10aceaa9da537ff5d1fc015f198945bf9d6f0 | [
"Apache-2.0"
] | null | null | null | quantz_ground/app.py | zhangyuz/quantz_ground | a3c10aceaa9da537ff5d1fc015f198945bf9d6f0 | [
"Apache-2.0"
] | null | null | null | from eve import Eve
from .db_domains import db_domains
import os
def isInDocker():
return os.environ.get('AM_I_IN_A_DOCKER_CONTAINER', False)
SETTINGS = {
'DOMAIN': db_domains,
'MONGO_HOST': 'localhost',
'MONGO_PORT': 27017,
# MONGO_USERNAME': os.environ.get(...),
# MONGO_PASSWORD': os.environ.get(...),
'MONGO_DBNAME': 'quantz',
'RENDERERS': [
'eve.render.JSONRenderer'
# 'eve.render.XMLRenderer'
],
'ALLOW_UNKNOWN': True,
# 'X_DOMAINS_RE': r'.*',
'X_DOMAINS': [r'*.zhangyuzheng.com'],
'IF_MATCH': False,
'ENFORCE_IF_MATCH': False,
'HATEOAS': False,
# , _items items_
# 'ITEMS': 'items',
# 'META': 'meta',
# 'DATE_CREATED': 'created',
# 'ID_FIELD': 'id', # FIXME: not working, Y?
# 'LAST_UPDATED': 'updated',
# 'ETAG': 'etag',
'PAGINATION_DEFAULT': 10000,
'PAGINATION_LIMIT': 99999999,
# 'OPTIMIZE_PAGINATION_FOR_SPEED': True,
'RESOURCE_METHODS': ['GET'],
'ITEM_METHODS': ['GET']
}
def exclude_fields(resource, response):
excluded_fields = ['_id', '_created', '_updated', '_etag']
for doc in response['_items']:
for field in excluded_fields:
# Better ask forgiveness than permission
try:
del doc[field]
except KeyError as e:
pass
def on_fetched_resource(resource_name, response):
print('on_fetched_resource:%s' % resource_name)
exclude_fields(resource_name, response)
app = Eve(settings=SETTINGS)
app.on_fetched_resource += on_fetched_resource
@app.route('/mnt')
def mnt():
return 'This is Maintanance Page'
if __name__ == '__main__':
app.run(host='0.0.0.0', port=80)
| 24.208333 | 62 | 0.624785 | from eve import Eve
from .db_domains import db_domains
import os
def isInDocker():
return os.environ.get('AM_I_IN_A_DOCKER_CONTAINER', False)
SETTINGS = {
'DOMAIN': db_domains,
'MONGO_HOST': 'localhost',
'MONGO_PORT': 27017,
# MONGO_USERNAME': os.environ.get(...),
# MONGO_PASSWORD': os.environ.get(...),
'MONGO_DBNAME': 'quantz',
'RENDERERS': [
'eve.render.JSONRenderer'
# 'eve.render.XMLRenderer'
],
'ALLOW_UNKNOWN': True,
# 'X_DOMAINS_RE': r'.*',
'X_DOMAINS': [r'*.zhangyuzheng.com'],
'IF_MATCH': False,
'ENFORCE_IF_MATCH': False,
'HATEOAS': False,
# 修改数据域名称,从 _items 改为 items,避免前端语法检查严格不能使用_开头的变量
# 'ITEMS': 'items',
# 'META': 'meta',
# 'DATE_CREATED': 'created',
# 'ID_FIELD': 'id', # FIXME: not working, Y?
# 'LAST_UPDATED': 'updated',
# 'ETAG': 'etag',
'PAGINATION_DEFAULT': 10000,
'PAGINATION_LIMIT': 99999999,
# 'OPTIMIZE_PAGINATION_FOR_SPEED': True,
'RESOURCE_METHODS': ['GET'],
'ITEM_METHODS': ['GET']
}
def exclude_fields(resource, response):
excluded_fields = ['_id', '_created', '_updated', '_etag']
for doc in response['_items']:
for field in excluded_fields:
# Better ask forgiveness than permission
try:
del doc[field]
except KeyError as e:
pass
def on_fetched_resource(resource_name, response):
print('on_fetched_resource:%s' % resource_name)
exclude_fields(resource_name, response)
app = Eve(settings=SETTINGS)
app.on_fetched_resource += on_fetched_resource
@app.route('/mnt')
def mnt():
return 'This is Maintanance Page'
if __name__ == '__main__':
app.run(host='0.0.0.0', port=80)
| 90 | 0 |
5304b9a306a30a7e2cf57d01ceb4e53e6ccd0bca | 1,869 | py | Python | databird/dtutil.py | jonas-hagen/databird | cfb358e74da62bb9d7ea0e6c7ac984671472120b | [
"MIT"
] | 1 | 2021-11-05T00:12:00.000Z | 2021-11-05T00:12:00.000Z | databird/dtutil.py | jonas-hagen/databird | cfb358e74da62bb9d7ea0e6c7ac984671472120b | [
"MIT"
] | null | null | null | databird/dtutil.py | jonas-hagen/databird | cfb358e74da62bb9d7ea0e6c7ac984671472120b | [
"MIT"
] | null | null | null | import datetime as dt
import calendar
import time
def parse_timedelta(s):
valid_units = [
"weeks",
"days",
"hours",
"seconds",
"minutes",
"miliseconds",
"microseconds",
]
try:
if s == "0":
return dt.timedelta()
value, unit = s.split(" ")
if unit[-1] != "s":
unit += "s"
value = float(value)
delta = dt.timedelta(**{unit: value})
return delta
except:
raise ValueError(
"Could not parse '{}'. Timedelta format is '<number> <unit> | 0', where `unit` is one of {} (tailing 's' is optional).".format(
s, ", ".join(valid_units)
)
)
def parse_datetime(s):
try:
date = dt.datetime.strptime(s, "%Y-%m-%d")
except:
try:
date = dt.datetime.strptime(s, "%Y-%m-%d %H:%M:%S")
except:
raise ValueError(
"Could not parse '{}'. Time format is '%Y-%m-%d' or '%Y-%m-%d %H:%M:%S'.".format(
s
)
)
return date
def iter_dates(start, end, period):
"""Yield dates from `start` to `end` with step equalt to `period`."""
current = start
while current <= end:
yield current
current += period
def month_last_day(date):
"""Return the last date of the month for the month containing date."""
_, last_day = calendar.monthrange(date.year, date.month)
return dt.datetime(date.year, date.month, last_day)
def month_first_day(date):
"""Return the first date of the month (always 01) for the month containing date."""
return dt.datetime(date.year, date.month, 1)
def iso_date(date):
return date.strftime("%Y-%m-%d")
def normalize_datetime(date):
return dt.datetime.fromtimestamp(time.mktime(date.timetuple()))
| 24.92 | 139 | 0.543606 | import datetime as dt
import calendar
import time
def parse_timedelta(s):
valid_units = [
"weeks",
"days",
"hours",
"seconds",
"minutes",
"miliseconds",
"microseconds",
]
try:
if s == "0":
return dt.timedelta()
value, unit = s.split(" ")
if unit[-1] != "s":
unit += "s"
value = float(value)
delta = dt.timedelta(**{unit: value})
return delta
except:
raise ValueError(
"Could not parse '{}'. Timedelta format is '<number> <unit> | 0', where `unit` is one of {} (tailing 's' is optional).".format(
s, ", ".join(valid_units)
)
)
def parse_datetime(s):
try:
date = dt.datetime.strptime(s, "%Y-%m-%d")
except:
try:
date = dt.datetime.strptime(s, "%Y-%m-%d %H:%M:%S")
except:
raise ValueError(
"Could not parse '{}'. Time format is '%Y-%m-%d' or '%Y-%m-%d %H:%M:%S'.".format(
s
)
)
return date
def iter_dates(start, end, period):
"""Yield dates from `start` to `end` with step equalt to `period`."""
current = start
while current <= end:
yield current
current += period
def month_last_day(date):
"""Return the last date of the month for the month containing date."""
_, last_day = calendar.monthrange(date.year, date.month)
return dt.datetime(date.year, date.month, last_day)
def month_first_day(date):
"""Return the first date of the month (always 01) for the month containing date."""
return dt.datetime(date.year, date.month, 1)
def iso_date(date):
return date.strftime("%Y-%m-%d")
def normalize_datetime(date):
return dt.datetime.fromtimestamp(time.mktime(date.timetuple()))
| 0 | 0 |
5922ee45b768da565c33dd1950061bcdab97ffc8 | 662 | py | Python | coding_interviews/leetcode/easy/remove_duplicates/remove_duplicates.py | LeandroTk/Algorithms | 569ed68eba3eeff902f8078992099c28ce4d7cd6 | [
"MIT"
] | 205 | 2018-12-01T17:49:49.000Z | 2021-12-22T07:02:27.000Z | coding_interviews/leetcode/easy/remove_duplicates/remove_duplicates.py | LeandroTk/Algorithms | 569ed68eba3eeff902f8078992099c28ce4d7cd6 | [
"MIT"
] | 2 | 2020-01-01T16:34:29.000Z | 2020-04-26T19:11:13.000Z | coding_interviews/leetcode/easy/remove_duplicates/remove_duplicates.py | LeandroTk/Algorithms | 569ed68eba3eeff902f8078992099c28ce4d7cd6 | [
"MIT"
] | 50 | 2018-11-28T20:51:36.000Z | 2021-11-29T04:08:25.000Z | # https://leetcode.com/problems/remove-all-adjacent-duplicates-in-string
def remove_duplicates(S):
if len(S) <= 1: return S
start, end = 0, 1
while end < len(S):
if S[start] != S[end]:
start = end
end = start + 1
elif S[start] == S[end] and end + 1 == len(S):
S = S[0:start]
elif S[start] == S[end]:
S = S[0:start] + S[end+1:]
start, end = 0, 1
return S
def remove_duplicates(S):
stack = []
for char in S:
if len(stack) and stack[-1] == char:
stack.pop()
else:
stack.append(char)
return ''.join(stack) | 21.354839 | 72 | 0.487915 | # https://leetcode.com/problems/remove-all-adjacent-duplicates-in-string
def remove_duplicates(S):
if len(S) <= 1: return S
start, end = 0, 1
while end < len(S):
if S[start] != S[end]:
start = end
end = start + 1
elif S[start] == S[end] and end + 1 == len(S):
S = S[0:start]
elif S[start] == S[end]:
S = S[0:start] + S[end+1:]
start, end = 0, 1
return S
def remove_duplicates(S):
stack = []
for char in S:
if len(stack) and stack[-1] == char:
stack.pop()
else:
stack.append(char)
return ''.join(stack) | 0 | 0 |
b82ae840a377927194b91a181787f7527c4df71f | 271 | py | Python | py3wirecard/entities/acquirerdetails.py | robertons/py3wirecard | 8a9b541a67ee96d75b1c864762fce7148cccb8b4 | [
"MIT"
] | 2 | 2019-09-05T20:20:44.000Z | 2020-01-14T18:20:45.000Z | py3wirecard/entities/acquirerdetails.py | robertons/py3wirecard | 8a9b541a67ee96d75b1c864762fce7148cccb8b4 | [
"MIT"
] | 1 | 2020-01-15T12:27:56.000Z | 2020-01-16T12:26:13.000Z | py3wirecard/entities/acquirerdetails.py | robertons/py3wirecard | 8a9b541a67ee96d75b1c864762fce7148cccb8b4 | [
"MIT"
] | null | null | null | #-*- coding: utf-8 -*-
from py3wirecard.entities.lib.wireentity import *
from py3wirecard.entities.taxdocument import TaxDocument
class AcquirerDetails(WireEntity):
@String()
def authorizationNumber(self): pass
@Object(type=TaxDocument)
def taxDocument(self):pass
| 22.583333 | 56 | 0.782288 | #-*- coding: utf-8 -*-
from py3wirecard.entities.lib.wireentity import *
from py3wirecard.entities.taxdocument import TaxDocument
class AcquirerDetails(WireEntity):
@String()
def authorizationNumber(self): pass
@Object(type=TaxDocument)
def taxDocument(self):pass
| 0 | 0 |
a55666e686775ea98506356ddf52aca3da3da5cf | 1,119 | py | Python | clpc.py | CnybTseng/LPRNet | 5983ae3e3445d121c2ac31ac396287aa134545ab | [
"MIT"
] | null | null | null | clpc.py | CnybTseng/LPRNet | 5983ae3e3445d121c2ac31ac396287aa134545ab | [
"MIT"
] | null | null | null | clpc.py | CnybTseng/LPRNet | 5983ae3e3445d121c2ac31ac396287aa134545ab | [
"MIT"
] | null | null | null | chinese_strings = [
'', '', '', '', '', '', '', '', '', '',
'', '', '', '', '', '', '', '', '', '',
'', '', '', '', '', '', '', '', '', '',
'', '', '', '', '', 'WJ']
# 36
chinese = [
'Beijing',
'Tianjin',
'Hebei',
'Shanxi',
'InnerMongolia',
'Liaoning',
'Jilin',
'Heilongjiang',
'Shanghai',
'Jiangsu',
'Zhejiang',
'Anhui',
'Fujian',
'Jiangxi',
'Shandong',
'Henan',
'Hubei',
'Hunan',
'Guangdong',
'Guangxi',
'Hainan',
'Chongqing',
'Sichuan',
'Guizhou',
'Yunnan',
'Xizang',
'Shaanxi',
'Gansu',
'Qinghai',
'Ningxia',
'Xinjiang',
'HongKong',
'Macau',
'Tibet',
'police',
'WJ']
# 26
alphabet = [
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K',
'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V',
'W', 'X', 'Y', 'Z', 'I', 'O']
# 10
number = [
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
blank = ['-']
CHARS = blank + chinese + alphabet + number
SHOW_CHARS = blank + chinese_strings + alphabet + number | 18.966102 | 56 | 0.392315 | chinese_strings = [
'京', '津', '冀', '晋', '蒙', '辽', '吉', '黑', '沪', '苏',
'浙', '皖', '闽', '赣', '鲁', '豫', '鄂', '湘', '粤', '桂',
'琼', '渝', '川', '贵', '云', '藏', '陕', '甘', '青', '宁',
'新', '港', '澳', '台', '警', 'WJ']
# 36
chinese = [
'Beijing',
'Tianjin',
'Hebei',
'Shanxi',
'InnerMongolia',
'Liaoning',
'Jilin',
'Heilongjiang',
'Shanghai',
'Jiangsu',
'Zhejiang',
'Anhui',
'Fujian',
'Jiangxi',
'Shandong',
'Henan',
'Hubei',
'Hunan',
'Guangdong',
'Guangxi',
'Hainan',
'Chongqing',
'Sichuan',
'Guizhou',
'Yunnan',
'Xizang',
'Shaanxi',
'Gansu',
'Qinghai',
'Ningxia',
'Xinjiang',
'HongKong',
'Macau',
'Tibet',
'police',
'WJ']
# 26
alphabet = [
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K',
'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V',
'W', 'X', 'Y', 'Z', 'I', 'O']
# 10
number = [
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
blank = ['-']
CHARS = blank + chinese + alphabet + number
SHOW_CHARS = blank + chinese_strings + alphabet + number | 105 | 0 |
fb16e31bf96d01e63ade275800859d1d3efc6eef | 2,319 | py | Python | core/migrations/0001_initial.py | atthana/restapi_q_udemy | f49df5a614ac1b88a3bea975aea9498b8e85d504 | [
"MIT"
] | null | null | null | core/migrations/0001_initial.py | atthana/restapi_q_udemy | f49df5a614ac1b88a3bea975aea9498b8e85d504 | [
"MIT"
] | null | null | null | core/migrations/0001_initial.py | atthana/restapi_q_udemy | f49df5a614ac1b88a3bea975aea9498b8e85d504 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.7 on 2021-04-05 14:35
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Customer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('address', models.CharField(max_length=50)),
('active', models.BooleanField(default=True)),
('doc_num', models.CharField(blank=True, max_length=12, null=True, unique=True)),
],
),
migrations.CreateModel(
name='DataSheet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(max_length=50)),
('historical_data', models.TextField()),
],
),
migrations.CreateModel(
name='Profession',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Document',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dtype', models.CharField(choices=[('PP', 'Passport'), ('ID', 'Identity card'), ('OT', 'Others')], max_length=2)),
('doc_number', models.CharField(max_length=50)),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.customer')),
],
),
migrations.AddField(
model_name='customer',
name='datasheet',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.datasheet'),
),
migrations.AddField(
model_name='customer',
name='profession',
field=models.ManyToManyField(to='core.Profession'),
),
]
| 38.65 | 131 | 0.564899 | # Generated by Django 3.1.7 on 2021-04-05 14:35
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Customer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('address', models.CharField(max_length=50)),
('active', models.BooleanField(default=True)),
('doc_num', models.CharField(blank=True, max_length=12, null=True, unique=True)),
],
),
migrations.CreateModel(
name='DataSheet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(max_length=50)),
('historical_data', models.TextField()),
],
),
migrations.CreateModel(
name='Profession',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Document',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dtype', models.CharField(choices=[('PP', 'Passport'), ('ID', 'Identity card'), ('OT', 'Others')], max_length=2)),
('doc_number', models.CharField(max_length=50)),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.customer')),
],
),
migrations.AddField(
model_name='customer',
name='datasheet',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.datasheet'),
),
migrations.AddField(
model_name='customer',
name='profession',
field=models.ManyToManyField(to='core.Profession'),
),
]
| 0 | 0 |
9f663f59a9673a49aadc92ab4dc19bf0f2475490 | 2,918 | py | Python | virtool_workflow/workflow.py | eroberts9789/virtool-workflow | 18219eec2b9b934cedd3770ac319f40305c165f2 | [
"MIT"
] | 5 | 2020-09-24T20:29:08.000Z | 2022-03-17T14:50:56.000Z | virtool_workflow/workflow.py | eroberts9789/virtool-workflow | 18219eec2b9b934cedd3770ac319f40305c165f2 | [
"MIT"
] | 126 | 2020-10-01T23:38:34.000Z | 2022-03-31T08:26:28.000Z | virtool_workflow/workflow.py | eroberts9789/virtool-workflow | 18219eec2b9b934cedd3770ac319f40305c165f2 | [
"MIT"
] | 5 | 2020-09-29T21:29:46.000Z | 2021-07-27T20:34:58.000Z | """Main definitions for Virtool Workflows."""
from typing import Any, Callable, Coroutine, Iterable, Optional, Sequence
from virtool_workflow.utils import coerce_to_coroutine_function
from fixtures import FixtureScope
WorkflowStep = Callable[..., Coroutine[Any, Any, None]]
class Workflow:
"""
A Workflow is a step-wise, long-running operation.
A workflow is comprised of:
1. a set of functions to be executed on startup (.on_startup)
2. a set of step functions which will be executed in order (.steps)
3. a set of functions to be executed once all steps are completed (.on_cleanup)
"""
on_startup: Sequence[WorkflowStep]
on_cleanup: Sequence[WorkflowStep]
steps: Sequence[WorkflowStep]
def __new__(
cls,
*args,
startup: Optional[Iterable[WorkflowStep]] = None,
cleanup: Optional[Iterable[WorkflowStep]] = None,
steps: Optional[Iterable[WorkflowStep]] = None,
**kwargs
):
"""
:param startup: An initial set of startup steps.
:param cleanup: An initial set of cleanup steps.
:param steps: An initial set of steps.
"""
obj = super().__new__(cls)
obj.on_startup = []
obj.on_cleanup = []
obj.steps = []
if startup:
obj.on_startup.extend(startup)
if cleanup:
obj.on_cleanup.extend(cleanup)
if steps:
obj.steps.extend(steps)
return obj
def startup(self, action: Callable) -> Callable:
"""Decorator for adding a step to workflow startup."""
self.on_startup.append(coerce_to_coroutine_function(action))
return action
def cleanup(self, action: Callable) -> Callable:
"""Decorator for adding a step to workflow cleanup."""
self.on_cleanup.append(coerce_to_coroutine_function(action))
return action
def step(self, step: Callable) -> Callable:
"""Decorator for adding a step to the workflow."""
self.steps.append(coerce_to_coroutine_function(step))
return step
def merge(self, *workflows: "Workflow"):
"""Merge steps from other workflows into this workflow."""
self.steps.extend(step for w in workflows for step in w.steps)
self.on_startup.extend(
step for w in workflows for step in w.on_startup)
self.on_cleanup.extend(
step for w in workflows for step in w.on_cleanup)
return self
async def bind_to_fixtures(self, scope: FixtureScope):
"""
Bind a workflow to fixtures.
This is a convenience method for binding a workflow to a set of fixtures.
"""
self.on_startup = [await scope.bind(f) for f in self.on_startup]
self.on_cleanup = [await scope.bind(f) for f in self.on_cleanup]
self.steps = [await scope.bind(f) for f in self.steps]
return self
| 34.329412 | 87 | 0.642221 | """Main definitions for Virtool Workflows."""
from typing import Any, Callable, Coroutine, Iterable, Optional, Sequence
from virtool_workflow.utils import coerce_to_coroutine_function
from fixtures import FixtureScope
WorkflowStep = Callable[..., Coroutine[Any, Any, None]]
class Workflow:
"""
A Workflow is a step-wise, long-running operation.
A workflow is comprised of:
1. a set of functions to be executed on startup (.on_startup)
2. a set of step functions which will be executed in order (.steps)
3. a set of functions to be executed once all steps are completed (.on_cleanup)
"""
on_startup: Sequence[WorkflowStep]
on_cleanup: Sequence[WorkflowStep]
steps: Sequence[WorkflowStep]
def __new__(
cls,
*args,
startup: Optional[Iterable[WorkflowStep]] = None,
cleanup: Optional[Iterable[WorkflowStep]] = None,
steps: Optional[Iterable[WorkflowStep]] = None,
**kwargs
):
"""
:param startup: An initial set of startup steps.
:param cleanup: An initial set of cleanup steps.
:param steps: An initial set of steps.
"""
obj = super().__new__(cls)
obj.on_startup = []
obj.on_cleanup = []
obj.steps = []
if startup:
obj.on_startup.extend(startup)
if cleanup:
obj.on_cleanup.extend(cleanup)
if steps:
obj.steps.extend(steps)
return obj
def startup(self, action: Callable) -> Callable:
"""Decorator for adding a step to workflow startup."""
self.on_startup.append(coerce_to_coroutine_function(action))
return action
def cleanup(self, action: Callable) -> Callable:
"""Decorator for adding a step to workflow cleanup."""
self.on_cleanup.append(coerce_to_coroutine_function(action))
return action
def step(self, step: Callable) -> Callable:
"""Decorator for adding a step to the workflow."""
self.steps.append(coerce_to_coroutine_function(step))
return step
def merge(self, *workflows: "Workflow"):
"""Merge steps from other workflows into this workflow."""
self.steps.extend(step for w in workflows for step in w.steps)
self.on_startup.extend(
step for w in workflows for step in w.on_startup)
self.on_cleanup.extend(
step for w in workflows for step in w.on_cleanup)
return self
async def bind_to_fixtures(self, scope: FixtureScope):
"""
Bind a workflow to fixtures.
This is a convenience method for binding a workflow to a set of fixtures.
"""
self.on_startup = [await scope.bind(f) for f in self.on_startup]
self.on_cleanup = [await scope.bind(f) for f in self.on_cleanup]
self.steps = [await scope.bind(f) for f in self.steps]
return self
| 0 | 0 |
daa5b4decdea31a15e43a756422b8f10fbdeb296 | 1,637 | py | Python | test/test_pool.py | viverlxl/resource_pool | 6fa226e0ac504df604362bf0ef84cb3a9f21109c | [
"MIT"
] | 2 | 2019-10-09T10:30:23.000Z | 2020-01-20T01:36:10.000Z | test/test_pool.py | viverlxl/resource_pool | 6fa226e0ac504df604362bf0ef84cb3a9f21109c | [
"MIT"
] | null | null | null | test/test_pool.py | viverlxl/resource_pool | 6fa226e0ac504df604362bf0ef84cb3a9f21109c | [
"MIT"
] | 1 | 2020-07-28T22:57:50.000Z | 2020-07-28T22:57:50.000Z | #coding:utf-8
import threading
import time
import pytest
from .connect import DataClient, DataBase
from ..psrc import ConnPool
from concurrent.futures import ProcessPoolExecutor,ThreadPoolExecutor,as_completed
app = None
DATABASECONFIG = {
"test":{
"host": "localhost",
"port": 3306,
"username": "root",
"password": "",
"schema" : "test"
}
}
class JJApp:
def __init__(self):
self.obj_pool = {}
self.init_mysql_pool()
def init_mysql_pool(self):
debug = False
for key in DATABASECONFIG:
mysql_pool = ConnPool()
mysql_pool.add_obj(DataBase, DATABASECONFIG[key], debug)
self.obj_pool.setdefault(key, mysql_pool)
def __getattr__(self, name):
obj = None
if name in DATABASECONFIG:
pool = self.obj_pool[name]
obj = pool.get_obj()
if not obj:
time.sleep(10)
obj = pool.get_obj()
return obj
def release(self, name):
if name in self.obj_pool:
pool = self.obj_pool[name]
pool.release_obj()
def print_func(lock):
global app
sql = u"""
select * from test limit 10;
"""
data = app.test.query(sql)
if lock.acquire():
for item in data:
print(item['name'])
lock.release()
app.release("test")
time.sleep(20)
def test_pool():
global app
app = JJApp()
lock = threading.Lock()
with ThreadPoolExecutor(3) as executor:
for _ in range(5):
executor.submit(print_func, lock)
| 23.385714 | 82 | 0.568112 | #coding:utf-8
import threading
import time
import pytest
from .connect import DataClient, DataBase
from ..psrc import ConnPool
from concurrent.futures import ProcessPoolExecutor,ThreadPoolExecutor,as_completed
app = None
DATABASECONFIG = {
"test":{
"host": "localhost",
"port": 3306,
"username": "root",
"password": "",
"schema" : "test"
}
}
class JJApp:
def __init__(self):
self.obj_pool = {}
self.init_mysql_pool()
def init_mysql_pool(self):
debug = False
for key in DATABASECONFIG:
mysql_pool = ConnPool()
mysql_pool.add_obj(DataBase, DATABASECONFIG[key], debug)
self.obj_pool.setdefault(key, mysql_pool)
def __getattr__(self, name):
obj = None
if name in DATABASECONFIG:
pool = self.obj_pool[name]
obj = pool.get_obj()
if not obj:
time.sleep(10)
obj = pool.get_obj()
return obj
def release(self, name):
if name in self.obj_pool:
pool = self.obj_pool[name]
pool.release_obj()
def print_func(lock):
global app
sql = u"""
select * from test limit 10;
"""
data = app.test.query(sql)
if lock.acquire():
for item in data:
print(item['name'])
lock.release()
app.release("test")
time.sleep(20)
def test_pool():
global app
app = JJApp()
lock = threading.Lock()
with ThreadPoolExecutor(3) as executor:
for _ in range(5):
executor.submit(print_func, lock)
| 0 | 0 |
43335eeb60e279b37dceda2e87c76453a8540cc6 | 24 | py | Python | nlppack/__init__.py | swordsbird/Jx3Price | 03663665fe9c712268368e77145640d8228ae3b0 | [
"MIT"
] | 11 | 2019-12-20T12:51:33.000Z | 2021-06-05T13:35:40.000Z | nlppack/__init__.py | swordsbird/Jx3Price | 03663665fe9c712268368e77145640d8228ae3b0 | [
"MIT"
] | 8 | 2019-12-20T13:21:53.000Z | 2022-03-08T23:06:27.000Z | nlppack/__init__.py | swordsbird/Jx3Price | 03663665fe9c712268368e77145640d8228ae3b0 | [
"MIT"
] | 1 | 2020-11-13T15:29:01.000Z | 2020-11-13T15:29:01.000Z | from . import parseutil
| 12 | 23 | 0.791667 | from . import parseutil
| 0 | 0 |
5dbd873944ee57a896246371918c17eb040e68d8 | 5,614 | py | Python | org/apache/helix/HelixProperty.py | davzhang/helix-python-binding | 11a9ecf730bce07720e0b0bcf7f0ec1cd2b25878 | [
"Apache-2.0"
] | 3 | 2015-04-08T22:51:04.000Z | 2015-05-03T06:42:35.000Z | org/apache/helix/HelixProperty.py | zzhang5/helix-python-binding | 11a9ecf730bce07720e0b0bcf7f0ec1cd2b25878 | [
"Apache-2.0"
] | null | null | null | org/apache/helix/HelixProperty.py | zzhang5/helix-python-binding | 11a9ecf730bce07720e0b0bcf7f0ec1cd2b25878 | [
"Apache-2.0"
] | 1 | 2020-03-31T21:43:01.000Z | 2020-03-31T21:43:01.000Z | # package org.apache.helix
#from org.apache.helix import *
#from java.lang.reflect import Constructor
#from java.util import ArrayList
#from java.util import Collection
#from java.util import Collections
#from java.util import HashMap
#from java.util import List
#from java.util import Map
from org.apache.helix.util.misc import enum
from org.apache.helix.ZNRecord import ZNRecord
import traceback
HelixPropertyAttribute = enum('BUCKET_SIZE', 'GROUP_MESSAGE_MODE')
class HelixProperty(object):
def __init__(self, *args):
self._record = ZNRecord(*args)
# """
#
# Parameters:
# String id
# """
# def __init__(self, id):
# self._record = ZNRecord(id)
#
#
# """
#
# Parameters:
# ZNRecord record
# """
# def __init__(self, record):
# self._record = ZNRecord(record)
def getId(self):
"""
Returns String
Java modifiers:
final
"""
return self._record.getId()
def getRecord(self):
"""
Returns ZNRecord
Java modifiers:
final
"""
return self._record
def setDeltaList(self, deltaList):
"""
Returns void
Parameters:
deltaList: List<ZNRecordDelta>
Java modifiers:
final
"""
self._record.setDeltaList(deltaList)
def toString(self):
"""
Returns String
@Override
"""
return self._record.toString()
def getBucketSize(self):
"""
Returns int
"""
# String
bucketSizeStr = self._record.getSimpleField('BUCKET_SIZE')
# int
bucketSize = 0
if bucketSizeStr != None:
try:
bucketSize = int(bucketSizeStr)
except ValueError, e: pass
return bucketSize
def setBucketSize(self, bucketSize):
"""
Returns void
Parameters:
bucketSize: int
"""
if bucketSize <= 0:
bucketSize = 0
self._record.setSimpleField('BUCKET_SIZE', "" + str(bucketSize))
@staticmethod
def convertToTypedInstance(clazz, record):
"""
Returns T
Parameters:
clazz: Class<T>record: ZNRecord
Java modifiers:
static
Parameterized: <T extends HelixProperty>
"""
if record == None:
return None
try:
# Constructor<T>
# getConstructor = clazz.getConstructor(new Class[] { ZNRecord.class })
# constructor = clazz(re)
# return constructor.newInstance(record)
# return clazz(record)
# return clazz.getTypeClass()(record) # call constructor
# return type(clazz)(record) # call constructor
return clazz(record) # call constructor
except :
print traceback.format_exc()
return None
@staticmethod
def convertToTypedList(clazz, records):
"""
Returns List<T>
Parameters:
clazz: Class<T>records: Collection<ZNRecord>
Java modifiers:
static
Parameterized: <T extends HelixProperty>
"""
if records == None:
return None
# List<T>
decorators = []
for record in records: # T
decorator = HelixProperty.convertToTypedInstance(clazz, record)
if decorator != None:
decorators.add(decorator)
return decorators
@staticmethod
def convertListToMap(records):
"""
Returns Map<String, T>
Parameters:
records: List<T>
Java modifiers:
static
Parameterized: <T extends HelixProperty>
"""
if records == None:
return {}
# Map<String, T>
decorators = {}
for record in records: decorators.__setitem__(record.getId(), record)
return decorators
@staticmethod
def convertToList(typedInstances):
"""
Returns List<ZNRecord>
Parameters:
typedInstances: List<T>
Java modifiers:
static
Parameterized: <T extends HelixProperty>
"""
if typedInstances == None:
return []
# List<ZNRecord>
records = []
for typedInstance in typedInstances: records.append(typedInstance.getRecord())
return records
def setGroupMessageMode(self, enable):
"""
Returns void
Parameters:
enable: boolean
"""
self._record.setSimpleField('GROUP_MESSAGE_MODE', "" + str(enable))
def getGroupMessageMode(self):
"""
Returns boolean
"""
# String
enableStr = self._record.getSimpleField('GROUP_MESSAGE_MODE')
if enableStr == None:
return False
try:
groupMode = eval(enableStr.lower().capitalize())
except: return False
if not groupMode: return False
return groupMode
def isValid(self):
"""
Returns boolean
"""
return False
def __eq__(self, obj):
"""
Returns boolean
Parameters:
obj: Object
@Override
"""
if obj == None:
return False
if type(obj) == HelixProperty:
# HelixProperty
that = obj
if that.getRecord() != None:
return (that.getRecord() == self.getRecord())
return False
| 20.792593 | 86 | 0.543997 | # package org.apache.helix
#from org.apache.helix import *
#from java.lang.reflect import Constructor
#from java.util import ArrayList
#from java.util import Collection
#from java.util import Collections
#from java.util import HashMap
#from java.util import List
#from java.util import Map
from org.apache.helix.util.misc import enum
from org.apache.helix.ZNRecord import ZNRecord
import traceback
HelixPropertyAttribute = enum('BUCKET_SIZE', 'GROUP_MESSAGE_MODE')
class HelixProperty(object):
def __init__(self, *args):
self._record = ZNRecord(*args)
# """
#
# Parameters:
# String id
# """
# def __init__(self, id):
# self._record = ZNRecord(id)
#
#
# """
#
# Parameters:
# ZNRecord record
# """
# def __init__(self, record):
# self._record = ZNRecord(record)
def getId(self):
"""
Returns String
Java modifiers:
final
"""
return self._record.getId()
def getRecord(self):
"""
Returns ZNRecord
Java modifiers:
final
"""
return self._record
def setDeltaList(self, deltaList):
"""
Returns void
Parameters:
deltaList: List<ZNRecordDelta>
Java modifiers:
final
"""
self._record.setDeltaList(deltaList)
def toString(self):
"""
Returns String
@Override
"""
return self._record.toString()
def getBucketSize(self):
"""
Returns int
"""
# String
bucketSizeStr = self._record.getSimpleField('BUCKET_SIZE')
# int
bucketSize = 0
if bucketSizeStr != None:
try:
bucketSize = int(bucketSizeStr)
except ValueError, e: pass
return bucketSize
def setBucketSize(self, bucketSize):
"""
Returns void
Parameters:
bucketSize: int
"""
if bucketSize <= 0:
bucketSize = 0
self._record.setSimpleField('BUCKET_SIZE', "" + str(bucketSize))
@staticmethod
def convertToTypedInstance(clazz, record):
"""
Returns T
Parameters:
clazz: Class<T>record: ZNRecord
Java modifiers:
static
Parameterized: <T extends HelixProperty>
"""
if record == None:
return None
try:
# Constructor<T>
# getConstructor = clazz.getConstructor(new Class[] { ZNRecord.class })
# constructor = clazz(re)
# return constructor.newInstance(record)
# return clazz(record)
# return clazz.getTypeClass()(record) # call constructor
# return type(clazz)(record) # call constructor
return clazz(record) # call constructor
except :
print traceback.format_exc()
return None
@staticmethod
def convertToTypedList(clazz, records):
"""
Returns List<T>
Parameters:
clazz: Class<T>records: Collection<ZNRecord>
Java modifiers:
static
Parameterized: <T extends HelixProperty>
"""
if records == None:
return None
# List<T>
decorators = []
for record in records: # T
decorator = HelixProperty.convertToTypedInstance(clazz, record)
if decorator != None:
decorators.add(decorator)
return decorators
@staticmethod
def convertListToMap(records):
"""
Returns Map<String, T>
Parameters:
records: List<T>
Java modifiers:
static
Parameterized: <T extends HelixProperty>
"""
if records == None:
return {}
# Map<String, T>
decorators = {}
for record in records: decorators.__setitem__(record.getId(), record)
return decorators
@staticmethod
def convertToList(typedInstances):
"""
Returns List<ZNRecord>
Parameters:
typedInstances: List<T>
Java modifiers:
static
Parameterized: <T extends HelixProperty>
"""
if typedInstances == None:
return []
# List<ZNRecord>
records = []
for typedInstance in typedInstances: records.append(typedInstance.getRecord())
return records
def setGroupMessageMode(self, enable):
"""
Returns void
Parameters:
enable: boolean
"""
self._record.setSimpleField('GROUP_MESSAGE_MODE', "" + str(enable))
def getGroupMessageMode(self):
"""
Returns boolean
"""
# String
enableStr = self._record.getSimpleField('GROUP_MESSAGE_MODE')
if enableStr == None:
return False
try:
groupMode = eval(enableStr.lower().capitalize())
except: return False
if not groupMode: return False
return groupMode
def isValid(self):
"""
Returns boolean
"""
return False
def __eq__(self, obj):
"""
Returns boolean
Parameters:
obj: Object
@Override
"""
if obj == None:
return False
if type(obj) == HelixProperty:
# HelixProperty
that = obj
if that.getRecord() != None:
return (that.getRecord() == self.getRecord())
return False
| 0 | 0 |
674b67bec6ee90176fbdee9879dd043d45c1fa36 | 2,431 | py | Python | modules/runtime/tests/py_importer_tests.py | ctonic/bark | 35591e69310a0f0c9e6e72b8a9ee71713901b12e | [
"MIT"
] | null | null | null | modules/runtime/tests/py_importer_tests.py | ctonic/bark | 35591e69310a0f0c9e6e72b8a9ee71713901b12e | [
"MIT"
] | null | null | null | modules/runtime/tests/py_importer_tests.py | ctonic/bark | 35591e69310a0f0c9e6e72b8a9ee71713901b12e | [
"MIT"
] | null | null | null | # Copyright (c) 2019 fortiss GmbH
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
import unittest
import matplotlib.pyplot as plt
from bark.world.agent import *
from bark.models.behavior import *
from bark.world import *
from bark.geometry import *
from bark.models.dynamic import *
from bark.models.execution import *
from bark.geometry import *
from bark.geometry.standard_shapes import *
from modules.runtime.commons.parameters import ParameterServer
from bark.world.opendrive import *
from bark.world.map import *
from modules.runtime.commons.xodr_parser import XodrParser
class ImporterTests(unittest.TestCase):
def test_python_map(self):
pass
# xodr_parser = XodrParser("modules/runtime/tests/data/Crossing8Course.xodr")
# xodr_parser.print_python_map()
def test_map(self):
xodr_parser = XodrParser("modules/runtime/tests/data/city_highway_straight.xodr")
# xodr_parser = XodrParser("modules/runtime/tests/data/CulDeSac.xodr")
params = ParameterServer()
world = World(params)
map_interface = MapInterface()
map_interface.set_open_drive_map(xodr_parser.map)
map_interface.set_roadgraph(xodr_parser.roadgraph)
world.set_map(map_interface)
for _, road in xodr_parser.map.get_roads().items():
for lane_section in road.lane_sections:
for _, lane in lane_section.get_lanes().items():
line_np = lane.line.toArray()
plt.text(line_np[-1, 0], line_np[-1, 1], 'center_{i}_{j}'.format(i=lane.lane_id,j=lane.lane_position))
plt.plot(
line_np[:, 0],
line_np[:, 1],
color="grey",
alpha=1.0)
plt.axis("equal")
plt.show()
# driving corridor calculation test
#lanes = map_interface.find_nearest_lanes(Point2d(-11,-8),1)
#left_line, right_line, center_line = map_interface.calculate_driving_corridor(lanes[0].lane_id,2)
#plt.plot(center_line.toArray()[:,0],center_line.toArray()[:,1])
#plt.show()
# TODO: plot cpp map
#cwd = os.getcwd()
#print (cwd)
roadgraph = xodr_parser.roadgraph
roadgraph.print_graph("/home/bernhard/"+"test1234.dot")
if __name__ == '__main__':
unittest.main()
| 34.239437 | 122 | 0.647882 | # Copyright (c) 2019 fortiss GmbH
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
import unittest
import matplotlib.pyplot as plt
from bark.world.agent import *
from bark.models.behavior import *
from bark.world import *
from bark.geometry import *
from bark.models.dynamic import *
from bark.models.execution import *
from bark.geometry import *
from bark.geometry.standard_shapes import *
from modules.runtime.commons.parameters import ParameterServer
from bark.world.opendrive import *
from bark.world.map import *
from modules.runtime.commons.xodr_parser import XodrParser
class ImporterTests(unittest.TestCase):
def test_python_map(self):
pass
# xodr_parser = XodrParser("modules/runtime/tests/data/Crossing8Course.xodr")
# xodr_parser.print_python_map()
def test_map(self):
xodr_parser = XodrParser("modules/runtime/tests/data/city_highway_straight.xodr")
# xodr_parser = XodrParser("modules/runtime/tests/data/CulDeSac.xodr")
params = ParameterServer()
world = World(params)
map_interface = MapInterface()
map_interface.set_open_drive_map(xodr_parser.map)
map_interface.set_roadgraph(xodr_parser.roadgraph)
world.set_map(map_interface)
for _, road in xodr_parser.map.get_roads().items():
for lane_section in road.lane_sections:
for _, lane in lane_section.get_lanes().items():
line_np = lane.line.toArray()
plt.text(line_np[-1, 0], line_np[-1, 1], 'center_{i}_{j}'.format(i=lane.lane_id,j=lane.lane_position))
plt.plot(
line_np[:, 0],
line_np[:, 1],
color="grey",
alpha=1.0)
plt.axis("equal")
plt.show()
# driving corridor calculation test
#lanes = map_interface.find_nearest_lanes(Point2d(-11,-8),1)
#left_line, right_line, center_line = map_interface.calculate_driving_corridor(lanes[0].lane_id,2)
#plt.plot(center_line.toArray()[:,0],center_line.toArray()[:,1])
#plt.show()
# TODO: plot cpp map
#cwd = os.getcwd()
#print (cwd)
roadgraph = xodr_parser.roadgraph
roadgraph.print_graph("/home/bernhard/"+"test1234.dot")
if __name__ == '__main__':
unittest.main()
| 0 | 0 |
7e8d210f5257d00943ef0de386a015fecb1a21ab | 215 | py | Python | scripts/portal/Pianus.py | G00dBye/YYMS | 1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb | [
"MIT"
] | 54 | 2019-04-16T23:24:48.000Z | 2021-12-18T11:41:50.000Z | scripts/portal/Pianus.py | G00dBye/YYMS | 1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb | [
"MIT"
] | 3 | 2019-05-19T15:19:41.000Z | 2020-04-27T16:29:16.000Z | scripts/portal/Pianus.py | G00dBye/YYMS | 1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb | [
"MIT"
] | 49 | 2020-11-25T23:29:16.000Z | 2022-03-26T16:20:24.000Z | if sm.hasQuest(1666):
sm.warpInstanceIn(931050429)
sm.createClock(6*60)
sm.invokeAfterDelay(6*60*1000, "warpInstanceOut", 230040410, 0)
else:
map = 230040420
portal = 2
sm.warp(map, portal)
| 21.5 | 67 | 0.674419 | if sm.hasQuest(1666):
sm.warpInstanceIn(931050429)
sm.createClock(6*60)
sm.invokeAfterDelay(6*60*1000, "warpInstanceOut", 230040410, 0)
else:
map = 230040420
portal = 2
sm.warp(map, portal)
| 0 | 0 |
b0d90b1d516b0e0847b2cd536c61131786cada83 | 926 | py | Python | pysbr/queries/marketsbymarketids.py | power-edge/PySBR | f768c24e539557c08dfcaf39ce1eaca7d730cf25 | [
"MIT"
] | 49 | 2020-12-13T07:07:50.000Z | 2022-02-09T18:54:39.000Z | pysbr/queries/marketsbymarketids.py | power-edge/PySBR | f768c24e539557c08dfcaf39ce1eaca7d730cf25 | [
"MIT"
] | 11 | 2021-01-08T05:04:52.000Z | 2022-03-16T12:51:28.000Z | pysbr/queries/marketsbymarketids.py | power-edge/PySBR | f768c24e539557c08dfcaf39ce1eaca7d730cf25 | [
"MIT"
] | 9 | 2021-01-18T02:03:24.000Z | 2022-01-29T04:47:01.000Z | from typing import List, Union
from pysbr.queries.query import Query
import pysbr.utils as utils
class MarketsByMarketIds(Query):
"""Get information about a number of leagues from their league ids.
Market name, description, and market type id are included in the response.
Args:
market_ids: SBR market id or list of market ids.
sport_id: SBR sport id.
"""
@Query.typecheck
def __init__(self, market_ids: Union[List[int]], sport_id: int):
super().__init__()
market_ids = utils.make_list(market_ids)
self.name = "marketTypesById"
self.arg_str = self._get_args("market_ids")
self.args = {"mtids": market_ids, "spids": [sport_id]}
self.fields = self._get_fields("markets_by_id")
self._raw = self._build_and_execute_query(
self.name, self.fields, self.arg_str, self.args
)
self._id_key = "market id"
| 30.866667 | 78 | 0.661987 | from typing import List, Union
from pysbr.queries.query import Query
import pysbr.utils as utils
class MarketsByMarketIds(Query):
"""Get information about a number of leagues from their league ids.
Market name, description, and market type id are included in the response.
Args:
market_ids: SBR market id or list of market ids.
sport_id: SBR sport id.
"""
@Query.typecheck
def __init__(self, market_ids: Union[List[int]], sport_id: int):
super().__init__()
market_ids = utils.make_list(market_ids)
self.name = "marketTypesById"
self.arg_str = self._get_args("market_ids")
self.args = {"mtids": market_ids, "spids": [sport_id]}
self.fields = self._get_fields("markets_by_id")
self._raw = self._build_and_execute_query(
self.name, self.fields, self.arg_str, self.args
)
self._id_key = "market id"
| 0 | 0 |
e34b7a0941162e0522b0241ed24bb6257057d4d6 | 108 | py | Python | examples/play_e2e4.py | certik/chess | dc806fccc0fb9acc57c40db56e620f2c55157425 | [
"MIT"
] | 1 | 2016-05-09T00:40:16.000Z | 2016-05-09T00:40:16.000Z | examples/play_e2e4.py | certik/chess | dc806fccc0fb9acc57c40db56e620f2c55157425 | [
"MIT"
] | null | null | null | examples/play_e2e4.py | certik/chess | dc806fccc0fb9acc57c40db56e620f2c55157425 | [
"MIT"
] | null | null | null | from py_uci import UCIEngine
e = UCIEngine()
e.new_game()
e.set_position(moves=["e2e4"])
e.find_best_move()
| 18 | 30 | 0.75 | from py_uci import UCIEngine
e = UCIEngine()
e.new_game()
e.set_position(moves=["e2e4"])
e.find_best_move()
| 0 | 0 |
24c03418435a7ac547e14c75a900568329890bf9 | 87 | py | Python | BranchBound/__init__.py | jskeet314/branch_bound_helper | 9336c47db2cf448fb8d8ef3b8b1c617bb56ff52a | [
"MIT"
] | null | null | null | BranchBound/__init__.py | jskeet314/branch_bound_helper | 9336c47db2cf448fb8d8ef3b8b1c617bb56ff52a | [
"MIT"
] | null | null | null | BranchBound/__init__.py | jskeet314/branch_bound_helper | 9336c47db2cf448fb8d8ef3b8b1c617bb56ff52a | [
"MIT"
] | null | null | null | name = "branch_bound"
if __name__ == "__main__":
print("branch bound installed!")
| 17.4 | 36 | 0.678161 | name = "branch_bound"
if __name__ == "__main__":
print("branch bound installed!")
| 0 | 0 |
5bb02276122f217a1d8c0e497fdf9aa0ae10602a | 750 | py | Python | createPb_v2.py | ats05/hmr | e6f2e7843a120ee2143c77a70bb1e82ae681b255 | [
"MIT"
] | null | null | null | createPb_v2.py | ats05/hmr | e6f2e7843a120ee2143c77a70bb1e82ae681b255 | [
"MIT"
] | null | null | null | createPb_v2.py | ats05/hmr | e6f2e7843a120ee2143c77a70bb1e82ae681b255 | [
"MIT"
] | null | null | null | # coding:utf-8
# tensorflow version1.13.1
import tensorflow as tf
saver = tf.train.import_meta_graph('models/model.ckpt-667589.meta', clear_devices=True)
with tf.Session() as sess:
chpt_state = tf.train.get_checkpoint_state('models/model.ckpt-667589')
# if chpt_state:
# last_model = chpt_state.model_checkpoint_path
last_model = "models/model.ckpt-667589"
saver.restore(sess,last_model)
print ("model was loaded",last_model)
# else:
# print ("model cannot loaded")
# exit(1)
graph = tf.get_default_graph()
graph_def = graph.as_graph_def()
x = graph.get_tensor_by_name('x:0')
out = graph.get_tensor_by_name('reduce/out:0')
tf.saved_model.simple_save(sess, './models', inputs={"x": x}, outputs={"reduce/out": out})
| 28.846154 | 92 | 0.722667 | # coding:utf-8
# tensorflow version1.13.1
import tensorflow as tf
saver = tf.train.import_meta_graph('models/model.ckpt-667589.meta', clear_devices=True)
with tf.Session() as sess:
chpt_state = tf.train.get_checkpoint_state('models/model.ckpt-667589')
# if chpt_state:
# last_model = chpt_state.model_checkpoint_path
last_model = "models/model.ckpt-667589"
saver.restore(sess,last_model)
print ("model was loaded",last_model)
# else:
# print ("model cannot loaded")
# exit(1)
graph = tf.get_default_graph()
graph_def = graph.as_graph_def()
x = graph.get_tensor_by_name('x:0')
out = graph.get_tensor_by_name('reduce/out:0')
tf.saved_model.simple_save(sess, './models', inputs={"x": x}, outputs={"reduce/out": out})
| 0 | 0 |
709dbb247538d10b3eb2ae120e003e5a4a33d3f1 | 11,460 | py | Python | seesaw/externalprocess.py | Ghostofapacket/seesaw-kit | a3d8395167eb38ec2c446aad254d940d621fbd98 | [
"Unlicense"
] | null | null | null | seesaw/externalprocess.py | Ghostofapacket/seesaw-kit | a3d8395167eb38ec2c446aad254d940d621fbd98 | [
"Unlicense"
] | null | null | null | seesaw/externalprocess.py | Ghostofapacket/seesaw-kit | a3d8395167eb38ec2c446aad254d940d621fbd98 | [
"Unlicense"
] | null | null | null | '''Running subprocesses asynchronously.'''
from __future__ import print_function
import fcntl
import os
import os.path
import subprocess
import functools
import datetime
import pty
import signal
import atexit
import tornado.ioloop
from tornado.ioloop import IOLoop, PeriodicCallback
import tornado.process
from seesaw.event import Event
from seesaw.task import Task
from seesaw.config import realize
import time
_all_procs = set()
@atexit.register
def cleanup():
if _all_procs:
print('Subprocess did not exit properly!')
for proc in _all_procs:
print('Killing', proc)
try:
if hasattr(proc, 'proc'):
proc.proc.terminate()
else:
proc.terminate()
except Exception as error:
print(error)
time.sleep(0.1)
try:
if hasattr(proc, 'proc'):
proc.proc.kill()
else:
proc.kill()
except Exception as error:
print(error)
class AsyncPopen(object):
'''Asynchronous version of :class:`subprocess.Popen`.
Deprecated.
'''
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.ioloop = None
self.master_fd = None
self.master = None
self.pipe = None
self.stdin = None
self.on_output = Event()
self.on_end = Event()
@classmethod
def ignore_sigint(cls):
# http://stackoverflow.com/q/5045771/1524507
signal.signal(signal.SIGINT, signal.SIG_IGN)
os.setpgrp()
def run(self):
self.ioloop = IOLoop.instance()
(master_fd, slave_fd) = pty.openpty()
# make stdout, stderr non-blocking
fcntl.fcntl(master_fd, fcntl.F_SETFL,
fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
self.master_fd = master_fd
self.master = os.fdopen(master_fd)
# listen to stdout, stderr
self.ioloop.add_handler(master_fd, self._handle_subprocess_stdout,
self.ioloop.READ)
slave = os.fdopen(slave_fd)
self.kwargs["stdout"] = slave
self.kwargs["stderr"] = slave
self.kwargs["close_fds"] = True
self.kwargs["preexec_fn"] = self.ignore_sigint
self.pipe = subprocess.Popen(*self.args, **self.kwargs)
self.stdin = self.pipe.stdin
# check for process exit
self.wait_callback = PeriodicCallback(self._wait_for_end, 250)
self.wait_callback.start()
_all_procs.add(self.pipe)
def _handle_subprocess_stdout(self, fd, events):
if not self.master.closed and (events & IOLoop._EPOLLIN) != 0:
data = self.master.read()
self.on_output(data)
self._wait_for_end(events)
def _wait_for_end(self, events=0):
self.pipe.poll()
if self.pipe.returncode is not None or \
(events & tornado.ioloop.IOLoop._EPOLLHUP) > 0:
self.wait_callback.stop()
self.master.close()
self.ioloop.remove_handler(self.master_fd)
self.on_end(self.pipe.returncode)
_all_procs.remove(self.pipe)
class AsyncPopen2(object):
'''Adapter for the legacy AsyncPopen'''
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.on_output = Event()
self.on_end = Event()
self.pipe = None
def run(self):
self.kwargs["stdout"] = tornado.process.Subprocess.STREAM
self.kwargs["stderr"] = tornado.process.Subprocess.STREAM
self.kwargs["preexec_fn"] = AsyncPopen.ignore_sigint
self.pipe = tornado.process.Subprocess(*self.args, **self.kwargs)
self.pipe.stdout.read_until_close(
callback=self._handle_subprocess_stdout,
streaming_callback=self._handle_subprocess_stdout)
self.pipe.stderr.read_until_close(
callback=self._handle_subprocess_stdout,
streaming_callback=self._handle_subprocess_stdout)
self.pipe.set_exit_callback(self._end_callback)
_all_procs.add(self.pipe)
def _handle_subprocess_stdout(self, data):
self.on_output(data)
def _end_callback(self, return_code):
self.on_end(return_code)
_all_procs.remove(self.pipe)
@property
def stdin(self):
return self.pipe.stdin
class ExternalProcess(Task):
'''External subprocess runner.'''
def __init__(self, name, args, max_tries=1, retry_delay=2,
kill_pipeline_on_error=False, accept_on_exit_code=None, retry_on_exit_code=None, env=None):
Task.__init__(self, name)
self.args = args
self.max_tries = max_tries
self.retry_delay = retry_delay
if accept_on_exit_code is not None:
self.accept_on_exit_code = accept_on_exit_code
else:
self.accept_on_exit_code = [0]
if kill_pipeline_on_error is True:
self.hard_exit = True
else:
self.hard_exit = False
self.retry_on_exit_code = retry_on_exit_code
self.env = env or {}
if 'PYTHONIOENCODING' not in self.env:
self.env['PYTHONIOENCODING'] = 'utf8:replace'
def enqueue(self, item):
self.start_item(item)
item.log_output("Starting %s for %s\n" % (self, item.description()))
item["tries"] = 0
item["ExternalProcess.stdin_write_error"] = False
item["ExternalProcess.running"] = False
self.process(item)
def stdin_data(self, item):
return b""
def process(self, item):
with self.task_cwd():
p = AsyncPopen2(
args=realize(self.args, item),
env=realize(self.env, item),
stdin=subprocess.PIPE,
close_fds=True
)
p.on_output += functools.partial(self.on_subprocess_stdout, p,
item)
p.on_end += functools.partial(self.on_subprocess_end, item)
p.run()
item["ExternalProcess.running"] = True
try:
p.stdin.write(self.stdin_data(item))
except Exception as error:
# FIXME: We need to properly propagate errors
item.log_output("Error writing to process: %s" % str(error))
item["ExternalProcess.stdin_write_error"] = True
p.stdin.close()
def fail_item(self, item):
# Don't allow the item to fail until the external process completes
if item["ExternalProcess.running"]:
return
if self.hard_exit == True:
Task.hard_fail_item(self, item)
else:
Task.fail_item(self, item)
def on_subprocess_stdout(self, pipe, item, data):
item.log_output(data, full_line=False)
def on_subprocess_end(self, item, returncode):
item["ExternalProcess.running"] = False
if returncode in self.accept_on_exit_code and \
not item["ExternalProcess.stdin_write_error"]:
self.handle_process_result(returncode, item)
else:
self.handle_process_error(returncode, item)
def handle_process_result(self, exit_code, item):
item.log_output("Finished %s for %s\n" % (self, item.description()))
self.complete_item(item)
def handle_process_error(self, exit_code, item):
item["tries"] += 1
item.log_output(
"Process %s returned exit code %d for %s\n" %
(self, exit_code, item.description())
)
item.log_error(self, exit_code)
retry_acceptable = self.max_tries is None or \
item["tries"] < self.max_tries
exit_status_indicates_retry = self.retry_on_exit_code is None or \
exit_code in self.retry_on_exit_code or \
item["ExternalProcess.stdin_write_error"]
if retry_acceptable and exit_status_indicates_retry:
item.log_output(
"Retrying %s for %s after %d seconds...\n" %
(self, item.description(), self.retry_delay)
)
IOLoop.instance().add_timeout(
datetime.timedelta(seconds=self.retry_delay),
functools.partial(self.process, item)
)
else:
item.log_output("Failed %s for %s\n" % (self, item.description()))
self.fail_item(item)
class WgetDownload(ExternalProcess):
'''Download with Wget process runner.'''
def __init__(self, args, max_tries=1, accept_on_exit_code=None,
kill_pipeline_on_error=False, retry_on_exit_code=None, env=None, stdin_data_function=None):
ExternalProcess.__init__(
self, "WgetDownload",
args=args, max_tries=max_tries,
accept_on_exit_code=(accept_on_exit_code
if accept_on_exit_code is not None else [0]),
retry_on_exit_code=retry_on_exit_code,
kill_pipeline_on_error=kill_pipeline_on_error,
env=env)
self.stdin_data_function = stdin_data_function
def stdin_data(self, item):
if self.stdin_data_function:
return self.stdin_data_function(item)
else:
return b""
class RsyncUpload(ExternalProcess):
'''Upload with Rsync process runner.'''
def __init__(self, target, files, target_source_path="./", bwlimit="0",
max_tries=None, extra_args=None):
args = [
"rsync",
"-rltv",
"--timeout=300",
"--contimeout=300",
"--progress",
"--bwlimit", bwlimit
]
if extra_args is not None:
args.extend(extra_args)
args.extend([
"--files-from=-",
target_source_path,
target
])
ExternalProcess.__init__(self, "RsyncUpload",
args=args,
max_tries=max_tries)
self.files = files
self.target_source_path = target_source_path
def stdin_data(self, item):
return "".join(
[
"%s\n" % os.path.relpath(
realize(f, item),
realize(self.target_source_path, item)
)
for f in realize(self.files, item)
]).encode('utf-8')
class CurlUpload(ExternalProcess):
'''Upload with Curl process runner.'''
def __init__(self, target, filename, connect_timeout="60", speed_limit="1",
speed_time="900", max_tries=None):
args = [
"curl",
"--fail",
"--output", "/dev/null",
"--connect-timeout", str(connect_timeout),
"--speed-limit", str(speed_limit), # minimum upload speed 1B/s
# stop if speed < speed-limit for 900 seconds
"--speed-time", str(speed_time),
"--header", "X-Curl-Limits: inf,%s,%s" % (str(speed_limit),
str(speed_time)),
"--write-out", "Upload server: %{url_effective}\\n",
"--location",
"--upload-file", filename,
target
]
ExternalProcess.__init__(self, "CurlUpload",
args=args,
max_tries=max_tries)
| 32.10084 | 108 | 0.584991 | '''Running subprocesses asynchronously.'''
from __future__ import print_function
import fcntl
import os
import os.path
import subprocess
import functools
import datetime
import pty
import signal
import atexit
import tornado.ioloop
from tornado.ioloop import IOLoop, PeriodicCallback
import tornado.process
from seesaw.event import Event
from seesaw.task import Task
from seesaw.config import realize
import time
_all_procs = set()
@atexit.register
def cleanup():
if _all_procs:
print('Subprocess did not exit properly!')
for proc in _all_procs:
print('Killing', proc)
try:
if hasattr(proc, 'proc'):
proc.proc.terminate()
else:
proc.terminate()
except Exception as error:
print(error)
time.sleep(0.1)
try:
if hasattr(proc, 'proc'):
proc.proc.kill()
else:
proc.kill()
except Exception as error:
print(error)
class AsyncPopen(object):
'''Asynchronous version of :class:`subprocess.Popen`.
Deprecated.
'''
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.ioloop = None
self.master_fd = None
self.master = None
self.pipe = None
self.stdin = None
self.on_output = Event()
self.on_end = Event()
@classmethod
def ignore_sigint(cls):
# http://stackoverflow.com/q/5045771/1524507
signal.signal(signal.SIGINT, signal.SIG_IGN)
os.setpgrp()
def run(self):
self.ioloop = IOLoop.instance()
(master_fd, slave_fd) = pty.openpty()
# make stdout, stderr non-blocking
fcntl.fcntl(master_fd, fcntl.F_SETFL,
fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
self.master_fd = master_fd
self.master = os.fdopen(master_fd)
# listen to stdout, stderr
self.ioloop.add_handler(master_fd, self._handle_subprocess_stdout,
self.ioloop.READ)
slave = os.fdopen(slave_fd)
self.kwargs["stdout"] = slave
self.kwargs["stderr"] = slave
self.kwargs["close_fds"] = True
self.kwargs["preexec_fn"] = self.ignore_sigint
self.pipe = subprocess.Popen(*self.args, **self.kwargs)
self.stdin = self.pipe.stdin
# check for process exit
self.wait_callback = PeriodicCallback(self._wait_for_end, 250)
self.wait_callback.start()
_all_procs.add(self.pipe)
def _handle_subprocess_stdout(self, fd, events):
if not self.master.closed and (events & IOLoop._EPOLLIN) != 0:
data = self.master.read()
self.on_output(data)
self._wait_for_end(events)
def _wait_for_end(self, events=0):
self.pipe.poll()
if self.pipe.returncode is not None or \
(events & tornado.ioloop.IOLoop._EPOLLHUP) > 0:
self.wait_callback.stop()
self.master.close()
self.ioloop.remove_handler(self.master_fd)
self.on_end(self.pipe.returncode)
_all_procs.remove(self.pipe)
class AsyncPopen2(object):
'''Adapter for the legacy AsyncPopen'''
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.on_output = Event()
self.on_end = Event()
self.pipe = None
def run(self):
self.kwargs["stdout"] = tornado.process.Subprocess.STREAM
self.kwargs["stderr"] = tornado.process.Subprocess.STREAM
self.kwargs["preexec_fn"] = AsyncPopen.ignore_sigint
self.pipe = tornado.process.Subprocess(*self.args, **self.kwargs)
self.pipe.stdout.read_until_close(
callback=self._handle_subprocess_stdout,
streaming_callback=self._handle_subprocess_stdout)
self.pipe.stderr.read_until_close(
callback=self._handle_subprocess_stdout,
streaming_callback=self._handle_subprocess_stdout)
self.pipe.set_exit_callback(self._end_callback)
_all_procs.add(self.pipe)
def _handle_subprocess_stdout(self, data):
self.on_output(data)
def _end_callback(self, return_code):
self.on_end(return_code)
_all_procs.remove(self.pipe)
@property
def stdin(self):
return self.pipe.stdin
class ExternalProcess(Task):
'''External subprocess runner.'''
def __init__(self, name, args, max_tries=1, retry_delay=2,
kill_pipeline_on_error=False, accept_on_exit_code=None, retry_on_exit_code=None, env=None):
Task.__init__(self, name)
self.args = args
self.max_tries = max_tries
self.retry_delay = retry_delay
if accept_on_exit_code is not None:
self.accept_on_exit_code = accept_on_exit_code
else:
self.accept_on_exit_code = [0]
if kill_pipeline_on_error is True:
self.hard_exit = True
else:
self.hard_exit = False
self.retry_on_exit_code = retry_on_exit_code
self.env = env or {}
if 'PYTHONIOENCODING' not in self.env:
self.env['PYTHONIOENCODING'] = 'utf8:replace'
def enqueue(self, item):
self.start_item(item)
item.log_output("Starting %s for %s\n" % (self, item.description()))
item["tries"] = 0
item["ExternalProcess.stdin_write_error"] = False
item["ExternalProcess.running"] = False
self.process(item)
def stdin_data(self, item):
return b""
def process(self, item):
with self.task_cwd():
p = AsyncPopen2(
args=realize(self.args, item),
env=realize(self.env, item),
stdin=subprocess.PIPE,
close_fds=True
)
p.on_output += functools.partial(self.on_subprocess_stdout, p,
item)
p.on_end += functools.partial(self.on_subprocess_end, item)
p.run()
item["ExternalProcess.running"] = True
try:
p.stdin.write(self.stdin_data(item))
except Exception as error:
# FIXME: We need to properly propagate errors
item.log_output("Error writing to process: %s" % str(error))
item["ExternalProcess.stdin_write_error"] = True
p.stdin.close()
def fail_item(self, item):
# Don't allow the item to fail until the external process completes
if item["ExternalProcess.running"]:
return
if self.hard_exit == True:
Task.hard_fail_item(self, item)
else:
Task.fail_item(self, item)
def on_subprocess_stdout(self, pipe, item, data):
item.log_output(data, full_line=False)
def on_subprocess_end(self, item, returncode):
item["ExternalProcess.running"] = False
if returncode in self.accept_on_exit_code and \
not item["ExternalProcess.stdin_write_error"]:
self.handle_process_result(returncode, item)
else:
self.handle_process_error(returncode, item)
def handle_process_result(self, exit_code, item):
item.log_output("Finished %s for %s\n" % (self, item.description()))
self.complete_item(item)
def handle_process_error(self, exit_code, item):
item["tries"] += 1
item.log_output(
"Process %s returned exit code %d for %s\n" %
(self, exit_code, item.description())
)
item.log_error(self, exit_code)
retry_acceptable = self.max_tries is None or \
item["tries"] < self.max_tries
exit_status_indicates_retry = self.retry_on_exit_code is None or \
exit_code in self.retry_on_exit_code or \
item["ExternalProcess.stdin_write_error"]
if retry_acceptable and exit_status_indicates_retry:
item.log_output(
"Retrying %s for %s after %d seconds...\n" %
(self, item.description(), self.retry_delay)
)
IOLoop.instance().add_timeout(
datetime.timedelta(seconds=self.retry_delay),
functools.partial(self.process, item)
)
else:
item.log_output("Failed %s for %s\n" % (self, item.description()))
self.fail_item(item)
class WgetDownload(ExternalProcess):
'''Download with Wget process runner.'''
def __init__(self, args, max_tries=1, accept_on_exit_code=None,
kill_pipeline_on_error=False, retry_on_exit_code=None, env=None, stdin_data_function=None):
ExternalProcess.__init__(
self, "WgetDownload",
args=args, max_tries=max_tries,
accept_on_exit_code=(accept_on_exit_code
if accept_on_exit_code is not None else [0]),
retry_on_exit_code=retry_on_exit_code,
kill_pipeline_on_error=kill_pipeline_on_error,
env=env)
self.stdin_data_function = stdin_data_function
def stdin_data(self, item):
if self.stdin_data_function:
return self.stdin_data_function(item)
else:
return b""
class RsyncUpload(ExternalProcess):
'''Upload with Rsync process runner.'''
def __init__(self, target, files, target_source_path="./", bwlimit="0",
max_tries=None, extra_args=None):
args = [
"rsync",
"-rltv",
"--timeout=300",
"--contimeout=300",
"--progress",
"--bwlimit", bwlimit
]
if extra_args is not None:
args.extend(extra_args)
args.extend([
"--files-from=-",
target_source_path,
target
])
ExternalProcess.__init__(self, "RsyncUpload",
args=args,
max_tries=max_tries)
self.files = files
self.target_source_path = target_source_path
def stdin_data(self, item):
return "".join(
[
"%s\n" % os.path.relpath(
realize(f, item),
realize(self.target_source_path, item)
)
for f in realize(self.files, item)
]).encode('utf-8')
class CurlUpload(ExternalProcess):
'''Upload with Curl process runner.'''
def __init__(self, target, filename, connect_timeout="60", speed_limit="1",
speed_time="900", max_tries=None):
args = [
"curl",
"--fail",
"--output", "/dev/null",
"--connect-timeout", str(connect_timeout),
"--speed-limit", str(speed_limit), # minimum upload speed 1B/s
# stop if speed < speed-limit for 900 seconds
"--speed-time", str(speed_time),
"--header", "X-Curl-Limits: inf,%s,%s" % (str(speed_limit),
str(speed_time)),
"--write-out", "Upload server: %{url_effective}\\n",
"--location",
"--upload-file", filename,
target
]
ExternalProcess.__init__(self, "CurlUpload",
args=args,
max_tries=max_tries)
| 0 | 0 |
2a77bb600b7c374939281efcdc2822c2bb1565e6 | 10,337 | py | Python | face2anime/train_utils.py | davidleonfdez/face2anime | 896bf85a7aa28322cc9e9e586685db8cbbf39d89 | [
"MIT"
] | null | null | null | face2anime/train_utils.py | davidleonfdez/face2anime | 896bf85a7aa28322cc9e9e586685db8cbbf39d89 | [
"MIT"
] | 1 | 2022-01-15T23:57:33.000Z | 2022-01-15T23:57:33.000Z | face2anime/train_utils.py | davidleonfdez/face2anime | 896bf85a7aa28322cc9e9e586685db8cbbf39d89 | [
"MIT"
] | null | null | null | from fastai.vision.all import *
import gc
import torch
from typing import Callable
__all__ = ['EMAAverager', 'EMACallback', 'add_ema_to_gan_learner', 'custom_save_model',
'custom_load_model', 'SaveCheckpointsCallback', 'clean_mem']
class EMAAverager():
"""Callable class that calculates the EMA of a parameter.
It can be used as the `avg_fn` parameter of `torch.optim.swa_utils.AveragedModel`
Args:
decay (float): weight of averaged value. The new value of the parameter is
multiplied by 1 - decay.
"""
def __init__(self, decay=0.999):
self.decay = decay
def __call__(self, averaged_model_parameter, model_parameter, num_averaged):
return self.decay * averaged_model_parameter + (1 - self.decay) * model_parameter
def _default_forward_batch(model, batch, device):
input = batch
if isinstance(input, (list, tuple)):
input = input[0]
if device is not None:
input = input.to(device)
model(input)
class FullyAveragedModel(torch.optim.swa_utils.AveragedModel):
"""Extension of AveragedModel that also averages the buffers.
To update both the parameters and the buffers, the method `update_all` should be
called instead of `update_parameters`."""
def _update_buffers(self, model):
for b_swa, b_model in zip(self.module.buffers(), model.buffers()):
device = b_swa.device
b_model_ = b_model.detach().to(device)
if self.n_averaged == 0:
b_swa.detach().copy_(b_model_)
else:
b_swa.detach().copy_(self.avg_fn(b_swa.detach(), b_model_,
self.n_averaged.to(device)))
def update_all(self, model):
# Buffers must be updated first, because this method relies on n_averaged,
# which is updated by super().update_parameters()
self._update_buffers(model)
self.update_parameters(model)
@torch.no_grad()
def _update_bn(loader, model, device=None, forward_batch:Callable=None):
r"""Updates BatchNorm running_mean, running_var buffers in the model.
It performs one pass over data in `loader` to estimate the activation
statistics for BatchNorm layers in the model.
Args:
loader (torch.utils.data.DataLoader): dataset loader to compute the
activation statistics on. Each data batch should be either a
tensor, or a list/tuple whose first element is a tensor
containing data.
model (torch.nn.Module): model for which we seek to update BatchNorm
statistics.
device (torch.device, optional): If set, data will be transferred to
:attr:`device` before being passed into :attr:`model`.
forward_batch: method that chooses how to extract the input from every
element of :attr:`loader`, transfers it to :attr:`device` and
finally makes a forward pass on :attr:`model`.
Example:
>>> loader, model = ...
>>> _update_bn(loader, model)
"""
momenta = {}
for module in model.modules():
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
module.running_mean = torch.zeros_like(module.running_mean)
module.running_var = torch.ones_like(module.running_var)
momenta[module] = module.momentum
if not momenta:
return
was_training = model.training
model.train()
for module in momenta.keys():
module.momentum = None
module.num_batches_tracked *= 0
if forward_batch is None: forward_batch = _default_forward_batch
for batch in loader:
forward_batch(model, batch, device)
for bn_module in momenta.keys():
bn_module.momentum = momenta[bn_module]
model.train(was_training)
class EMACallback(Callback):
"""Updates the averaged weights of the generator of a GAN after every opt step.
It's meant to be used only with a GANLearner; i.e., an instance of this callback
is assumed to be attached to a GANLearner.
Args:
ema_model: AveragedModel that wraps the averaged generator module.
orig_model: active (not averaged) generator module, the one that's included
in learner.model and updated by the optimizer.
dl: dataloader needed to iterate over all data and make forward passes over the
ema_model in order to update the running statistic of BN layers.
update_buffers: if True, not only parameters, but also buffers, of ema_model are
averaged and updated,
forward_batch (Callable): Method with params (model, batch, device) that chooses
how to extract the input from every element of `dl`, transfers it to the proper
device and finally makes a forward pass on the model (here `ema_model`).
It's needed for updating the running statistics of BN layers.
"""
def __init__(self, ema_model:FullyAveragedModel, orig_model:nn.Module, dl,
update_buffers=True, forward_batch=None):
self.ema_model = ema_model
self.orig_model = orig_model
self.dl = dl
self.update_buffers = update_buffers
self.update_bn_pending = False
self.forward_batch = forward_batch
def after_step(self):
if self.gan_trainer.gen_mode:
update_method = (self.ema_model.update_all if self.update_buffers
else self.ema_model.update_parameters)
update_method(self.orig_model)
self.update_bn_pending = True
def after_fit(self):
if not self.update_bn_pending: return
#torch.optim.swa_utils.update_bn(self.dl, self.ema_model)
_update_bn(self.dl, self.ema_model, forward_batch=self.forward_batch)
self.update_bn_pending = False
def add_ema_to_gan_learner(gan_learner, dblock, decay=0.999, update_bn_dl_bs=64,
forward_batch=None):
""""Creates and setups everything needed to update an alternative EMA generator.
It stores the EMA generator in `ema_model` attribute of `gan_learner`.
Args:
gan_learner (GANLearner): the learner to add the EMA generator to.
dblock (DataBlock): needed to create dataloaders that are independent of those
of `gan_learner`, used after fit to update BN running stats of the EMA G.
decay: weight that multiplies averaged parameter every update.
update_bn_dl_bs: batch size used to update BN running stats.
forward_batch (Callable): Method with params (model, batch, device) that chooses
how to extract the input from every element of the dataloader, transfers it
to the proper device and finally makes a forward pass on the ema model.
It's needed for updating the running statistics of BN layers.
"""
generator = gan_learner.model.generator
ema_avg_fn = EMAAverager(decay=decay)
gan_learner.ema_model = FullyAveragedModel(generator, avg_fn=ema_avg_fn)
ds_path = gan_learner.dls.path
clean_dls = dblock.dataloaders(ds_path, path=ds_path, bs=update_bn_dl_bs)
gan_learner.ema_model.eval().to(clean_dls.device)
gan_learner.add_cb(EMACallback(gan_learner.ema_model, generator, clean_dls.train,
forward_batch=forward_batch))
def custom_save_model(learner, filename, base_path='.'):
"""Saves the model and optimizer state of the learner.
The path of the generated file is base_path/learner.model_dir/filename
with ".pth" extension. If the learner has an EMA G model attached too,
a similar file with the suffix "_ema" is generated too.
"""
if isinstance(base_path, str): base_path = Path(base_path)
if not isinstance(base_path, Path): raise Exception('Invalid base_path')
file = join_path_file(filename, base_path/learner.model_dir, ext='.pth')
save_model(file, learner.model, learner.opt)
if getattr(learner, 'ema_model', None) is not None:
_save_ema_model(learner, base_path, filename)
def custom_load_model(learner, filename, with_opt=True, device=None,
base_path='./models',
with_ema=False, **kwargs):
"""Loads the model and optimizer state of the learner.
The file is expected to be placed in `base_path/filename` with ".pth"
extension. `kwargs` are forwarded to fastai's `load_model` method.
"""
if isinstance(base_path, str): base_path = Path(base_path)
if not isinstance(base_path, Path): raise Exception('Invalid base_path')
if device is None and hasattr(learner.dls, 'device'): device = learner.dls.device
if learner.opt is None: learner.create_opt()
#file = join_path_file(filename, base_path/learner.model_dir, ext='.pth')
file = base_path/f'{filename}.pth'
load_model(file, learner.model, learner.opt, with_opt=with_opt, device=device, **kwargs)
if with_ema:
_load_ema_model(learner, base_path, filename)
def _load_ema_model(learner, base_path, filename, device=None):
ema_filename = base_path/f'{filename}_ema.pth'
load_model(ema_filename, learner.ema_model, None, with_opt=False, device=device)
#state_dict = torch.load(ema_filename)
#learner.ema_model.load_state_dict(state_dict)
def _save_ema_model(learner, base_path, filename):
file = join_path_file(filename+'_ema', base_path/learner.model_dir, ext='.pth')
save_model(file, learner.ema_model, None, with_opt=False)
#torch.save(file, learner.ema_model.state_dict())
class SaveCheckpointsCallback(Callback):
"Callback that saves the model at the end of each epoch."
def __init__(self, fn_prefix, base_path=Path('.'), initial_epoch=1,
save_cycle_len=1):
self.fn_prefix = fn_prefix
self.base_path = base_path
self.epoch = initial_epoch
self.save_cycle_len = save_cycle_len
def after_epoch(self):
if (self.epoch % self.save_cycle_len) == 0:
fn = f'{self.fn_prefix}_{self.epoch}ep'
custom_save_model(self.learn, fn, base_path=self.base_path)
self.epoch += 1
def clean_mem():
if torch.cuda.is_available(): torch.cuda.empty_cache()
gc.collect()
| 43.432773 | 93 | 0.676695 | from fastai.vision.all import *
import gc
import torch
from typing import Callable
__all__ = ['EMAAverager', 'EMACallback', 'add_ema_to_gan_learner', 'custom_save_model',
'custom_load_model', 'SaveCheckpointsCallback', 'clean_mem']
class EMAAverager():
"""Callable class that calculates the EMA of a parameter.
It can be used as the `avg_fn` parameter of `torch.optim.swa_utils.AveragedModel`
Args:
decay (float): weight of averaged value. The new value of the parameter is
multiplied by 1 - decay.
"""
def __init__(self, decay=0.999):
self.decay = decay
def __call__(self, averaged_model_parameter, model_parameter, num_averaged):
return self.decay * averaged_model_parameter + (1 - self.decay) * model_parameter
def _default_forward_batch(model, batch, device):
input = batch
if isinstance(input, (list, tuple)):
input = input[0]
if device is not None:
input = input.to(device)
model(input)
class FullyAveragedModel(torch.optim.swa_utils.AveragedModel):
"""Extension of AveragedModel that also averages the buffers.
To update both the parameters and the buffers, the method `update_all` should be
called instead of `update_parameters`."""
def _update_buffers(self, model):
for b_swa, b_model in zip(self.module.buffers(), model.buffers()):
device = b_swa.device
b_model_ = b_model.detach().to(device)
if self.n_averaged == 0:
b_swa.detach().copy_(b_model_)
else:
b_swa.detach().copy_(self.avg_fn(b_swa.detach(), b_model_,
self.n_averaged.to(device)))
def update_all(self, model):
# Buffers must be updated first, because this method relies on n_averaged,
# which is updated by super().update_parameters()
self._update_buffers(model)
self.update_parameters(model)
@torch.no_grad()
def _update_bn(loader, model, device=None, forward_batch:Callable=None):
r"""Updates BatchNorm running_mean, running_var buffers in the model.
It performs one pass over data in `loader` to estimate the activation
statistics for BatchNorm layers in the model.
Args:
loader (torch.utils.data.DataLoader): dataset loader to compute the
activation statistics on. Each data batch should be either a
tensor, or a list/tuple whose first element is a tensor
containing data.
model (torch.nn.Module): model for which we seek to update BatchNorm
statistics.
device (torch.device, optional): If set, data will be transferred to
:attr:`device` before being passed into :attr:`model`.
forward_batch: method that chooses how to extract the input from every
element of :attr:`loader`, transfers it to :attr:`device` and
finally makes a forward pass on :attr:`model`.
Example:
>>> loader, model = ...
>>> _update_bn(loader, model)
"""
momenta = {}
for module in model.modules():
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
module.running_mean = torch.zeros_like(module.running_mean)
module.running_var = torch.ones_like(module.running_var)
momenta[module] = module.momentum
if not momenta:
return
was_training = model.training
model.train()
for module in momenta.keys():
module.momentum = None
module.num_batches_tracked *= 0
if forward_batch is None: forward_batch = _default_forward_batch
for batch in loader:
forward_batch(model, batch, device)
for bn_module in momenta.keys():
bn_module.momentum = momenta[bn_module]
model.train(was_training)
class EMACallback(Callback):
"""Updates the averaged weights of the generator of a GAN after every opt step.
It's meant to be used only with a GANLearner; i.e., an instance of this callback
is assumed to be attached to a GANLearner.
Args:
ema_model: AveragedModel that wraps the averaged generator module.
orig_model: active (not averaged) generator module, the one that's included
in learner.model and updated by the optimizer.
dl: dataloader needed to iterate over all data and make forward passes over the
ema_model in order to update the running statistic of BN layers.
update_buffers: if True, not only parameters, but also buffers, of ema_model are
averaged and updated,
forward_batch (Callable): Method with params (model, batch, device) that chooses
how to extract the input from every element of `dl`, transfers it to the proper
device and finally makes a forward pass on the model (here `ema_model`).
It's needed for updating the running statistics of BN layers.
"""
def __init__(self, ema_model:FullyAveragedModel, orig_model:nn.Module, dl,
update_buffers=True, forward_batch=None):
self.ema_model = ema_model
self.orig_model = orig_model
self.dl = dl
self.update_buffers = update_buffers
self.update_bn_pending = False
self.forward_batch = forward_batch
def after_step(self):
if self.gan_trainer.gen_mode:
update_method = (self.ema_model.update_all if self.update_buffers
else self.ema_model.update_parameters)
update_method(self.orig_model)
self.update_bn_pending = True
def after_fit(self):
if not self.update_bn_pending: return
#torch.optim.swa_utils.update_bn(self.dl, self.ema_model)
_update_bn(self.dl, self.ema_model, forward_batch=self.forward_batch)
self.update_bn_pending = False
def add_ema_to_gan_learner(gan_learner, dblock, decay=0.999, update_bn_dl_bs=64,
forward_batch=None):
""""Creates and setups everything needed to update an alternative EMA generator.
It stores the EMA generator in `ema_model` attribute of `gan_learner`.
Args:
gan_learner (GANLearner): the learner to add the EMA generator to.
dblock (DataBlock): needed to create dataloaders that are independent of those
of `gan_learner`, used after fit to update BN running stats of the EMA G.
decay: weight that multiplies averaged parameter every update.
update_bn_dl_bs: batch size used to update BN running stats.
forward_batch (Callable): Method with params (model, batch, device) that chooses
how to extract the input from every element of the dataloader, transfers it
to the proper device and finally makes a forward pass on the ema model.
It's needed for updating the running statistics of BN layers.
"""
generator = gan_learner.model.generator
ema_avg_fn = EMAAverager(decay=decay)
gan_learner.ema_model = FullyAveragedModel(generator, avg_fn=ema_avg_fn)
ds_path = gan_learner.dls.path
clean_dls = dblock.dataloaders(ds_path, path=ds_path, bs=update_bn_dl_bs)
gan_learner.ema_model.eval().to(clean_dls.device)
gan_learner.add_cb(EMACallback(gan_learner.ema_model, generator, clean_dls.train,
forward_batch=forward_batch))
def custom_save_model(learner, filename, base_path='.'):
"""Saves the model and optimizer state of the learner.
The path of the generated file is base_path/learner.model_dir/filename
with ".pth" extension. If the learner has an EMA G model attached too,
a similar file with the suffix "_ema" is generated too.
"""
if isinstance(base_path, str): base_path = Path(base_path)
if not isinstance(base_path, Path): raise Exception('Invalid base_path')
file = join_path_file(filename, base_path/learner.model_dir, ext='.pth')
save_model(file, learner.model, learner.opt)
if getattr(learner, 'ema_model', None) is not None:
_save_ema_model(learner, base_path, filename)
def custom_load_model(learner, filename, with_opt=True, device=None,
base_path='./models',
with_ema=False, **kwargs):
"""Loads the model and optimizer state of the learner.
The file is expected to be placed in `base_path/filename` with ".pth"
extension. `kwargs` are forwarded to fastai's `load_model` method.
"""
if isinstance(base_path, str): base_path = Path(base_path)
if not isinstance(base_path, Path): raise Exception('Invalid base_path')
if device is None and hasattr(learner.dls, 'device'): device = learner.dls.device
if learner.opt is None: learner.create_opt()
#file = join_path_file(filename, base_path/learner.model_dir, ext='.pth')
file = base_path/f'{filename}.pth'
load_model(file, learner.model, learner.opt, with_opt=with_opt, device=device, **kwargs)
if with_ema:
_load_ema_model(learner, base_path, filename)
def _load_ema_model(learner, base_path, filename, device=None):
ema_filename = base_path/f'{filename}_ema.pth'
load_model(ema_filename, learner.ema_model, None, with_opt=False, device=device)
#state_dict = torch.load(ema_filename)
#learner.ema_model.load_state_dict(state_dict)
def _save_ema_model(learner, base_path, filename):
file = join_path_file(filename+'_ema', base_path/learner.model_dir, ext='.pth')
save_model(file, learner.ema_model, None, with_opt=False)
#torch.save(file, learner.ema_model.state_dict())
class SaveCheckpointsCallback(Callback):
"Callback that saves the model at the end of each epoch."
def __init__(self, fn_prefix, base_path=Path('.'), initial_epoch=1,
save_cycle_len=1):
self.fn_prefix = fn_prefix
self.base_path = base_path
self.epoch = initial_epoch
self.save_cycle_len = save_cycle_len
def after_epoch(self):
if (self.epoch % self.save_cycle_len) == 0:
fn = f'{self.fn_prefix}_{self.epoch}ep'
custom_save_model(self.learn, fn, base_path=self.base_path)
self.epoch += 1
def clean_mem():
if torch.cuda.is_available(): torch.cuda.empty_cache()
gc.collect()
| 0 | 0 |
040c7f55302a46486df83f60b08c0f12421da7b6 | 5,027 | py | Python | spinnaker_csp/puzzles/sudoku_puzzles.py | neworderofjamie/SpiNNakerCSPs | 56af0782d0bb83fd6e9934021e4007604f107993 | [
"BSD-3-Clause-Clear"
] | 3 | 2018-03-14T08:53:20.000Z | 2020-05-28T17:28:18.000Z | spinnaker_csp/puzzles/sudoku_puzzles.py | neworderofjamie/SpiNNakerCSPs | 56af0782d0bb83fd6e9934021e4007604f107993 | [
"BSD-3-Clause-Clear"
] | null | null | null | spinnaker_csp/puzzles/sudoku_puzzles.py | neworderofjamie/SpiNNakerCSPs | 56af0782d0bb83fd6e9934021e4007604f107993 | [
"BSD-3-Clause-Clear"
] | 6 | 2019-04-11T16:00:58.000Z | 2021-07-03T14:48:16.000Z | """A set of sudoku puzzles to experiment with the spinnaker_csp package.
the puzzles are containned on the dictionary puzzles, keys are the name of the puzzle and values are tuples with the
puzzle as first element and solution as second element.
"""
puzzles={
#---------------------------------------------------------------------
'Dream': ("dream",
#---------------------------------------------------------------------
[[0 for x in range(9)] for y in range(9)],
None),
#---------------------------------------------------------------------
'easy':("easy", # easy from doi:10.1038/srep00725
#---------------------------------------
[[0, 4, 0, 8, 0, 5, 2, 0, 0],
[0, 2, 0, 0, 4, 0, 0, 5, 0],
[5, 0, 0, 0, 0, 0, 0, 0, 4],
[0, 9, 0, 0, 0, 3, 1, 2, 0],
[1, 0, 6, 0, 7, 8, 0, 0, 3],
[3, 7, 0, 9, 0, 4, 0, 8, 0],
[0, 0, 0, 0, 0, 6, 7, 0, 0],
[0, 0, 8, 3, 5, 9, 0, 1, 0],
[0, 1, 9, 0, 0, 7, 6, 0, 0]],
#---------------------------------------
[[9, 4, 7, 8, 3, 5, 2, 6, 1],
[6, 2, 3, 7, 4, 1, 8, 5, 9],
[5, 8, 1, 6, 9, 2, 3, 7, 4],
[8, 9, 4, 5, 6, 3, 1, 2, 7],
[1, 5, 6, 2, 7, 8, 9, 4, 3],
[3, 7, 2, 9, 1, 4, 5, 8, 6],
[4, 3, 5, 1, 2, 6, 7, 9, 8],
[7, 6, 8, 3, 5, 9, 4, 1, 2],
[2, 1, 9, 4, 8, 7, 6, 3, 5]]),
#---------------------------------------------------------------------
'hard':('hard', # hard puzzle from https://doi.org/10.1371/journal.pcbi.1003311
#---------------------------------------------------------------------
[[8, 0, 5, 0, 0, 0, 0, 3, 0],
[0, 3, 0, 9, 0, 0, 0, 0, 0],
[4, 0, 6, 0, 3, 0, 0, 0, 0],
[6, 0, 0, 0, 1, 0, 9, 0, 0],
[0, 5, 0, 3, 0, 8, 0, 7, 0],
[0, 0, 9, 0, 4, 0, 0, 0, 1],
[0, 0, 0, 0, 2, 0, 3, 0, 8],
[0, 0, 0, 0, 0, 9, 0, 2, 0],
[0, 7, 0, 0, 0, 0, 5, 0, 4]],
#---------------------------------------------------------------------
[[8, 1, 5, 6, 7, 4, 2, 3, 9],
[7, 3, 2, 9, 5, 1, 4, 8, 6],
[4, 9, 6, 8, 3, 2, 7, 1, 5],
[6, 8, 7, 2, 1, 5, 9, 4, 3],
[1, 5, 4, 3, 9, 8, 6, 7, 2],
[3, 2, 9, 7, 4, 6, 8, 5, 1],
[9, 4, 1, 5, 2, 7, 3, 6, 8],
[5, 6, 3, 4, 8, 9, 1, 2, 7],
[2, 7, 8, 1, 6, 3, 5, 9, 4]]),
#---------------------------------------------------------------------
'AI_escargot': ('AI_escargot',
#---------------------------------------------------------------------
[[1, 0, 0, 0, 0, 7, 0, 9, 0],
[0, 3, 0, 0, 2, 0, 0, 0, 8],
[0, 0, 9, 6, 0, 0, 5, 0, 0],
[0, 0, 5, 3, 0, 0, 9, 0, 0],
[0, 1, 0, 0, 8, 0, 0, 0, 2],
[6, 0, 0, 0, 0, 4, 0, 0, 0],
[3, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 4, 0, 0, 0, 0, 0, 0, 7],
[0, 0, 7, 0, 0, 0, 3, 0, 0]],
#---------------------------------------------------------------------
[[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]]),
#---------------------------------------------------------------------
'platinum_blonde':('platinum_blonde', # hard from doi:10.1038/srep00725
#---------------------------------------------------------------------
[[0, 0, 0, 0, 0, 0, 0, 1, 2],
[0, 0, 0, 0, 0, 0, 0, 0, 3],
[0, 0, 2, 3, 0, 0, 4, 0, 0],
[0, 0, 1, 8, 0, 0, 0, 0, 5],
[0, 6, 0, 0, 7, 0, 8, 0, 0],
[0, 0, 0, 0, 0, 9, 0, 0, 0],
[0, 0, 8, 5, 0, 0, 0, 0, 0],
[9, 0, 0, 0, 4, 0, 5, 0, 0],
[4, 7, 0, 0, 0, 6, 0, 0, 0]],
#---------------------------------------------------------------------
[[8, 3, 9, 4, 6, 5, 7, 1, 2],
[1, 4, 6, 7, 8, 2, 9, 5, 3],
[7, 5, 2, 3, 9, 1, 4, 8, 6],
[3, 9, 1, 8, 2, 4, 6, 7, 5],
[5, 6, 4, 1, 7, 3, 8, 2, 9],
[2, 8, 7, 6, 5, 9, 3, 4, 1],
[6, 2, 8, 5, 3, 7, 1, 9, 4],
[9, 1, 3, 2, 4, 8, 5, 6, 7],
[4, 7, 5, 9, 1, 6, 2, 3, 8]])
}
#-----------------TEMPLATE---------------------------------------------
##---------------------------------------------------------------------
# [[0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0]]
#-----------------TEMPLATE 16X16----------------------------------------
# #---------------------------------------------------------------------
# [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] | 38.374046 | 116 | 0.282475 | """A set of sudoku puzzles to experiment with the spinnaker_csp package.
the puzzles are containned on the dictionary puzzles, keys are the name of the puzzle and values are tuples with the
puzzle as first element and solution as second element.
"""
puzzles={
#---------------------------------------------------------------------
'Dream': ("dream",
#---------------------------------------------------------------------
[[0 for x in range(9)] for y in range(9)],
None),
#---------------------------------------------------------------------
'easy':("easy", # easy from doi:10.1038/srep00725
#---------------------------------------
[[0, 4, 0, 8, 0, 5, 2, 0, 0],
[0, 2, 0, 0, 4, 0, 0, 5, 0],
[5, 0, 0, 0, 0, 0, 0, 0, 4],
[0, 9, 0, 0, 0, 3, 1, 2, 0],
[1, 0, 6, 0, 7, 8, 0, 0, 3],
[3, 7, 0, 9, 0, 4, 0, 8, 0],
[0, 0, 0, 0, 0, 6, 7, 0, 0],
[0, 0, 8, 3, 5, 9, 0, 1, 0],
[0, 1, 9, 0, 0, 7, 6, 0, 0]],
#---------------------------------------
[[9, 4, 7, 8, 3, 5, 2, 6, 1],
[6, 2, 3, 7, 4, 1, 8, 5, 9],
[5, 8, 1, 6, 9, 2, 3, 7, 4],
[8, 9, 4, 5, 6, 3, 1, 2, 7],
[1, 5, 6, 2, 7, 8, 9, 4, 3],
[3, 7, 2, 9, 1, 4, 5, 8, 6],
[4, 3, 5, 1, 2, 6, 7, 9, 8],
[7, 6, 8, 3, 5, 9, 4, 1, 2],
[2, 1, 9, 4, 8, 7, 6, 3, 5]]),
#---------------------------------------------------------------------
'hard':('hard', # hard puzzle from https://doi.org/10.1371/journal.pcbi.1003311
#---------------------------------------------------------------------
[[8, 0, 5, 0, 0, 0, 0, 3, 0],
[0, 3, 0, 9, 0, 0, 0, 0, 0],
[4, 0, 6, 0, 3, 0, 0, 0, 0],
[6, 0, 0, 0, 1, 0, 9, 0, 0],
[0, 5, 0, 3, 0, 8, 0, 7, 0],
[0, 0, 9, 0, 4, 0, 0, 0, 1],
[0, 0, 0, 0, 2, 0, 3, 0, 8],
[0, 0, 0, 0, 0, 9, 0, 2, 0],
[0, 7, 0, 0, 0, 0, 5, 0, 4]],
#---------------------------------------------------------------------
[[8, 1, 5, 6, 7, 4, 2, 3, 9],
[7, 3, 2, 9, 5, 1, 4, 8, 6],
[4, 9, 6, 8, 3, 2, 7, 1, 5],
[6, 8, 7, 2, 1, 5, 9, 4, 3],
[1, 5, 4, 3, 9, 8, 6, 7, 2],
[3, 2, 9, 7, 4, 6, 8, 5, 1],
[9, 4, 1, 5, 2, 7, 3, 6, 8],
[5, 6, 3, 4, 8, 9, 1, 2, 7],
[2, 7, 8, 1, 6, 3, 5, 9, 4]]),
#---------------------------------------------------------------------
'AI_escargot': ('AI_escargot',
#---------------------------------------------------------------------
[[1, 0, 0, 0, 0, 7, 0, 9, 0],
[0, 3, 0, 0, 2, 0, 0, 0, 8],
[0, 0, 9, 6, 0, 0, 5, 0, 0],
[0, 0, 5, 3, 0, 0, 9, 0, 0],
[0, 1, 0, 0, 8, 0, 0, 0, 2],
[6, 0, 0, 0, 0, 4, 0, 0, 0],
[3, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 4, 0, 0, 0, 0, 0, 0, 7],
[0, 0, 7, 0, 0, 0, 3, 0, 0]],
#---------------------------------------------------------------------
[[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]]),
#---------------------------------------------------------------------
'platinum_blonde':('platinum_blonde', # hard from doi:10.1038/srep00725
#---------------------------------------------------------------------
[[0, 0, 0, 0, 0, 0, 0, 1, 2],
[0, 0, 0, 0, 0, 0, 0, 0, 3],
[0, 0, 2, 3, 0, 0, 4, 0, 0],
[0, 0, 1, 8, 0, 0, 0, 0, 5],
[0, 6, 0, 0, 7, 0, 8, 0, 0],
[0, 0, 0, 0, 0, 9, 0, 0, 0],
[0, 0, 8, 5, 0, 0, 0, 0, 0],
[9, 0, 0, 0, 4, 0, 5, 0, 0],
[4, 7, 0, 0, 0, 6, 0, 0, 0]],
#---------------------------------------------------------------------
[[8, 3, 9, 4, 6, 5, 7, 1, 2],
[1, 4, 6, 7, 8, 2, 9, 5, 3],
[7, 5, 2, 3, 9, 1, 4, 8, 6],
[3, 9, 1, 8, 2, 4, 6, 7, 5],
[5, 6, 4, 1, 7, 3, 8, 2, 9],
[2, 8, 7, 6, 5, 9, 3, 4, 1],
[6, 2, 8, 5, 3, 7, 1, 9, 4],
[9, 1, 3, 2, 4, 8, 5, 6, 7],
[4, 7, 5, 9, 1, 6, 2, 3, 8]])
}
#-----------------TEMPLATE---------------------------------------------
##---------------------------------------------------------------------
# [[0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0]]
#-----------------TEMPLATE 16X16----------------------------------------
# #---------------------------------------------------------------------
# [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] | 0 | 0 |
06fff0cc841bc55f26d1376b8560f7a8a4ac31ac | 707 | py | Python | bin/print_busco_config.py | ewels/nf-core-neutronstar | c64a04a2422b3a113b8b45774b8045cf874af3fe | [
"MIT"
] | 4 | 2018-10-02T09:44:02.000Z | 2019-09-13T11:19:33.000Z | bin/print_busco_config.py | ewels/nf-core-neutronstar | c64a04a2422b3a113b8b45774b8045cf874af3fe | [
"MIT"
] | 14 | 2018-10-05T14:43:03.000Z | 2020-09-15T08:45:59.000Z | bin/print_busco_config.py | ewels/nf-core-neutronstar | c64a04a2422b3a113b8b45774b8045cf874af3fe | [
"MIT"
] | 4 | 2018-11-06T08:30:07.000Z | 2020-02-11T13:00:38.000Z | #!/usr/bin/env python
from __future__ import print_function
import os
print(
"""[busco]
out_path = {0}
tmp_path = {0}/tmp
[tblastn]
# path to tblastn
path = /usr/bin/
[makeblastdb]
# path to makeblastdb
path = /usr/bin/
[augustus]
# path to augustus
path = /opt/augustus/bin/
[etraining]
# path to augustus etraining
path = /opt/augustus/bin/
# path to augustus perl scripts, redeclare it for each new script
[gff2gbSmallDNA.pl]
path = /usr/bin/
[new_species.pl]
path = /usr/bin/
[optimize_augustus.pl]
path = /usr/bin/
[hmmsearch]
# path to HMMsearch executable
path = /usr/local/bin/
[Rscript]
# path to Rscript, if you wish to use the plot tool
path = /usr/bin/""".format(os.environ['PWD'])
)
| 17.243902 | 65 | 0.704385 | #!/usr/bin/env python
from __future__ import print_function
import os
print(
"""[busco]
out_path = {0}
tmp_path = {0}/tmp
[tblastn]
# path to tblastn
path = /usr/bin/
[makeblastdb]
# path to makeblastdb
path = /usr/bin/
[augustus]
# path to augustus
path = /opt/augustus/bin/
[etraining]
# path to augustus etraining
path = /opt/augustus/bin/
# path to augustus perl scripts, redeclare it for each new script
[gff2gbSmallDNA.pl]
path = /usr/bin/
[new_species.pl]
path = /usr/bin/
[optimize_augustus.pl]
path = /usr/bin/
[hmmsearch]
# path to HMMsearch executable
path = /usr/local/bin/
[Rscript]
# path to Rscript, if you wish to use the plot tool
path = /usr/bin/""".format(os.environ['PWD'])
)
| 0 | 0 |
d8d1c4eb65e7686f36da0471b39172240669ffc2 | 903 | py | Python | disasterpets/Pictures/models.py | KavenArango/Disaster_pets_backend | b8510f58fe62c38fefa07a66758af85b70e71693 | [
"MIT"
] | null | null | null | disasterpets/Pictures/models.py | KavenArango/Disaster_pets_backend | b8510f58fe62c38fefa07a66758af85b70e71693 | [
"MIT"
] | 1 | 2021-01-18T20:04:09.000Z | 2021-02-09T16:08:16.000Z | disasterpets/Pictures/models.py | KavenArango/Disaster_pets_backend | b8510f58fe62c38fefa07a66758af85b70e71693 | [
"MIT"
] | null | null | null | from flask import Flask, current_app
import jwt
from disasterpets import db
from disasterpets.Pets.models import Pets
class PetImage(db.Model):
__tablename__ = 'petimage'
id = db.Column(db.Integer, primary_key = True, autoincrement = True)
image_url =db.Column(db.String(200), nullable = False)
def __init__ (self, image_url):
self.image_url = image_url
class PetImageJoin(db.Model):
__tablename__ = 'petimagejoin'
id = db.Column(db.Integer, primary_key = True, autoincrement = True)
pet_id = db.Column(db.Integer, db.ForeignKey('pets.id'))
pet= db.relationship("Pets", uselist=False, lazy='select')
petimage_id = db.Column(db.Integer, db.ForeignKey('petimage.id'))
petimage = db.relationship("PetImage", uselist=False, lazy='select')
def __init__ (self, pet_id, petimage_id):
self.pet_id = pet_id
self.petimage_id = petimage_id | 33.444444 | 72 | 0.707641 | from flask import Flask, current_app
import jwt
from disasterpets import db
from disasterpets.Pets.models import Pets
class PetImage(db.Model):
__tablename__ = 'petimage'
id = db.Column(db.Integer, primary_key = True, autoincrement = True)
image_url =db.Column(db.String(200), nullable = False)
def __init__ (self, image_url):
self.image_url = image_url
class PetImageJoin(db.Model):
__tablename__ = 'petimagejoin'
id = db.Column(db.Integer, primary_key = True, autoincrement = True)
pet_id = db.Column(db.Integer, db.ForeignKey('pets.id'))
pet= db.relationship("Pets", uselist=False, lazy='select')
petimage_id = db.Column(db.Integer, db.ForeignKey('petimage.id'))
petimage = db.relationship("PetImage", uselist=False, lazy='select')
def __init__ (self, pet_id, petimage_id):
self.pet_id = pet_id
self.petimage_id = petimage_id | 0 | 0 |
3119fab8ff4c8283e3ff2a1b33aa787a926adf2f | 3,234 | py | Python | parlai/core/build_data.py | rockingdingo/ParlAI | ceb009e1d81d2fec22454667559c6ff02a5624b9 | [
"BSD-3-Clause"
] | null | null | null | parlai/core/build_data.py | rockingdingo/ParlAI | ceb009e1d81d2fec22454667559c6ff02a5624b9 | [
"BSD-3-Clause"
] | null | null | null | parlai/core/build_data.py | rockingdingo/ParlAI | ceb009e1d81d2fec22454667559c6ff02a5624b9 | [
"BSD-3-Clause"
] | 1 | 2019-10-10T01:17:09.000Z | 2019-10-10T01:17:09.000Z | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
"""
Utilities for downloading and building data.
These can be replaced if your particular file system does not support them.
"""
import datetime
import os
import requests
import shutil
import wget
def built(path):
"""Checks if '.built' flag has been set for that task."""
return os.path.isfile(os.path.join(path, '.built'))
def download(path, url, redownload=True):
"""Downloads file using `wget`. If redownload is set to false, then will not
download tar file again if it is present (default true).
"""
if redownload or not os.path.isfile(path):
filename = wget.download(url, out=path)
print() # wget prints download status, without newline
def download_request(url, path, fname):
"""Downloads file using `requests`."""
with requests.Session() as session:
response = session.get(url, stream=True)
CHUNK_SIZE = 32768
with open(os.path.join(path, fname), 'wb') as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
response.close()
def make_dir(path):
"""Makes the directory and any nonexistent parent directories."""
os.makedirs(path, exist_ok=True)
def mark_done(path):
"""Marks the path as done by adding a '.built' file with the current
timestamp.
"""
with open(os.path.join(path, '.built'), 'w') as write:
write.write(str(datetime.datetime.today()))
def move(path1, path2):
"""Renames the given file."""
shutil.move(path1, path2)
def remove_dir(path):
"""Removes the given directory, if it exists."""
shutil.rmtree(path, ignore_errors=True)
def untar(path, fname, deleteTar=True):
"""Unpacks the given archive file to the same directory, then (by default)
deletes the archive file.
"""
print('unpacking ' + fname)
fullpath = os.path.join(path, fname)
shutil.unpack_archive(fullpath, path)
if deleteTar:
os.remove(fullpath)
def _get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def download_from_google_drive(gd_id, destination):
"""Uses the requests package to download a file from Google Drive."""
URL = 'https://docs.google.com/uc?export=download'
with requests.Session() as session:
response = session.get(URL, params={'id': gd_id}, stream=True)
token = _get_confirm_token(response)
if token:
response.close()
params = {'id': gd_id, 'confirm': token}
response = session.get(URL, params=params, stream=True)
CHUNK_SIZE = 32768
with open(destination, 'wb') as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
response.close()
| 34.404255 | 80 | 0.660482 | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
"""
Utilities for downloading and building data.
These can be replaced if your particular file system does not support them.
"""
import datetime
import os
import requests
import shutil
import wget
def built(path):
"""Checks if '.built' flag has been set for that task."""
return os.path.isfile(os.path.join(path, '.built'))
def download(path, url, redownload=True):
"""Downloads file using `wget`. If redownload is set to false, then will not
download tar file again if it is present (default true).
"""
if redownload or not os.path.isfile(path):
filename = wget.download(url, out=path)
print() # wget prints download status, without newline
def download_request(url, path, fname):
"""Downloads file using `requests`."""
with requests.Session() as session:
response = session.get(url, stream=True)
CHUNK_SIZE = 32768
with open(os.path.join(path, fname), 'wb') as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
response.close()
def make_dir(path):
"""Makes the directory and any nonexistent parent directories."""
os.makedirs(path, exist_ok=True)
def mark_done(path):
"""Marks the path as done by adding a '.built' file with the current
timestamp.
"""
with open(os.path.join(path, '.built'), 'w') as write:
write.write(str(datetime.datetime.today()))
def move(path1, path2):
"""Renames the given file."""
shutil.move(path1, path2)
def remove_dir(path):
"""Removes the given directory, if it exists."""
shutil.rmtree(path, ignore_errors=True)
def untar(path, fname, deleteTar=True):
"""Unpacks the given archive file to the same directory, then (by default)
deletes the archive file.
"""
print('unpacking ' + fname)
fullpath = os.path.join(path, fname)
shutil.unpack_archive(fullpath, path)
if deleteTar:
os.remove(fullpath)
def _get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def download_from_google_drive(gd_id, destination):
"""Uses the requests package to download a file from Google Drive."""
URL = 'https://docs.google.com/uc?export=download'
with requests.Session() as session:
response = session.get(URL, params={'id': gd_id}, stream=True)
token = _get_confirm_token(response)
if token:
response.close()
params = {'id': gd_id, 'confirm': token}
response = session.get(URL, params=params, stream=True)
CHUNK_SIZE = 32768
with open(destination, 'wb') as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
response.close()
| 0 | 0 |
8d4636f7e70195eab6c8489ce3351b6b03573fe7 | 732 | py | Python | pi/firebase_utils.py | sastels/pi-temp | 9f56ed1f14129884fd72ec0d36cfa05657170f1c | [
"MIT"
] | null | null | null | pi/firebase_utils.py | sastels/pi-temp | 9f56ed1f14129884fd72ec0d36cfa05657170f1c | [
"MIT"
] | 8 | 2020-09-04T17:19:36.000Z | 2022-02-26T10:03:49.000Z | pi/firebase_utils.py | sastels/pi-temp | 9f56ed1f14129884fd72ec0d36cfa05657170f1c | [
"MIT"
] | null | null | null | from datetime import datetime
import pytz
import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
def setup_firebase(service_account_path):
cred = credentials.Certificate(service_account_path)
firebase_admin.initialize_app(cred)
db = firestore.client()
return db
def upload_to_firebase(db, pi_id, temperature, humidity):
now = datetime.utcnow().replace(tzinfo=pytz.utc)
firebase_id = str(now)
print(firebase_id + " :: temperature= "+ str(temperature), flush=True)
doc_ref = db.collection(pi_id).document(firebase_id)
doc_ref.set({
'pi_id': pi_id,
'datetime': now,
'temperature': temperature,
'humidity': humidity
})
| 30.5 | 74 | 0.719945 | from datetime import datetime
import pytz
import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
def setup_firebase(service_account_path):
cred = credentials.Certificate(service_account_path)
firebase_admin.initialize_app(cred)
db = firestore.client()
return db
def upload_to_firebase(db, pi_id, temperature, humidity):
now = datetime.utcnow().replace(tzinfo=pytz.utc)
firebase_id = str(now)
print(firebase_id + " :: temperature= "+ str(temperature), flush=True)
doc_ref = db.collection(pi_id).document(firebase_id)
doc_ref.set({
'pi_id': pi_id,
'datetime': now,
'temperature': temperature,
'humidity': humidity
})
| 0 | 0 |
5c417606898496b4f5606f5108cdcc3a843ec79b | 5,172 | py | Python | eyed/driver/bacnet/bacnet.py | ThousandMileEye/Eye | b0eca371fed5e01353ebddf7e4c400927decf0d2 | [
"Apache-2.0"
] | null | null | null | eyed/driver/bacnet/bacnet.py | ThousandMileEye/Eye | b0eca371fed5e01353ebddf7e4c400927decf0d2 | [
"Apache-2.0"
] | 55 | 2017-12-21T15:20:36.000Z | 2019-01-20T02:49:41.000Z | eyed/driver/bacnet/bacnet.py | ThousandMileEye/Eye | b0eca371fed5e01353ebddf7e4c400927decf0d2 | [
"Apache-2.0"
] | 3 | 2018-05-18T09:02:36.000Z | 2019-12-29T10:27:44.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from bacpypes.iocb import IOCB
from bacpypes.pdu import Address, GlobalBroadcast
from bacpypes.apdu import WhoIsRequest, ReadPropertyRequest, ReadPropertyACK
from bacpypes.object import get_object_class, get_datatype
from bacpypes.object import ObjectType, registered_object_types
from bacpypes.basetypes import PropertyIdentifier
from eyed.driver.bacnet import definition
#
# BACnet Client
#
class BACnetClient:
#
# BACnetClient
#
def __init__(self, application, auto_device_discovery = True):
#
#
#
self.application = application
#
#
#
self.auto_device_discovery = auto_device_discovery
#
# getAddressByDeviceID
#
def getAddressByDeviceID(self, device_id):
#
#
#
device_map = self.application.getDeviceMap()
if device_id in device_map:
return device_map[device_id]
return None
#
# WhoIsRequest
#
def WhoIsRequest(self, low_limit = 1, high_limit = 65535):
#
# WhoIsRequest (IAmRequest)
#
#self.application.clear()
#
# WhoIsRequest
#
self.application.who_is(low_limit, high_limit, GlobalBroadcast())
return True
#
# IamRequest
# - : Empty ()
#
def receiveIamRequest(self, timeout):
#
#
#
device_queue = self.application.getDeviceQueue()
device_id = device_queue.get(timeout = timeout)
return { 'device_id' : device_id }
#
# ReadProperty
#
def _ReadPropertyRequest(self, device_id, objectIdentifier, propertyIdentifier):
#
# ID IP
#
address = self.getAddressByDeviceID(device_id)
if not address:
#
#
#
if self.auto_device_discovery == False:
return None
#
#
#
self.WhoIsRequest()
#
#
#
request = ReadPropertyRequest(
destination = address,
objectIdentifier = objectIdentifier,
propertyIdentifier = propertyIdentifier,
)
#
# &
#
iocb = IOCB(request)
self.application.request_io(iocb)
iocb.wait()
#
#
#
if iocb.ioError:
return None
#
#
#
elif iocb.ioResponse:
#
#
#
apdu = iocb.ioResponse
#
# ACK
#
if not isinstance(apdu, ReadPropertyACK):
print 'ACK is not contain...'
return None
#
#
#
datatype = get_datatype(apdu.objectIdentifier[0], apdu.propertyIdentifier)
if not datatype:
print 'Unknown datatype...'
return None
#
#
#
return apdu, datatype
#
#
#
else:
print 'Response seems something wrong...'
return None
#
# ReadProperty
#
def ReadPropertyRequest(self, device_id, object_id, instance_id, property_id):
#
#
#
result = BACnetClient._ReadPropertyRequest(
self,
device_id = device_id,
objectIdentifier = (object_id, instance_id),
propertyIdentifier = property_id
)
#
#
#
if result == None:
return None
#
#
#
apdu, datatype = result
return apdu.propertyValue.cast_out(datatype)
#
# ReadDeviceProperty ()
#
def _ReadDevicePropertyRequest(self, device_id, propertyIdentifier):
#
#
#
result = BACnetClient._ReadPropertyRequest(
self,
device_id = device_id,
objectIdentifier = ('device', device_id),
propertyIdentifier = propertyIdentifier
)
#
#
#
if result == None:
return None
#
#
#
apdu, datatype = result
return apdu.propertyValue.cast_out(datatype)
#
# addObject ( )
#
def addObject(self, name, object_id, instance_id):
#
#
#
objectIdentifier = self.getObjectIdentifier(object_id, instance_id)
if objectIdentifier == None:
return False
#
#
#
Object = definition.findObjectClassByType(objectIdentifier[0])
#
#
#
new_object = Object(
objectName = name,
objectIdentifier = objectIdentifier,
)
#
#
#
self.application.add_object(new_object)
return True
#
# addProperty ( )
#
def addProperty(self, name, property_instance):
#
#
#
obj = self.application.get_object_name(name)
if obj == None: return False
#
#
#
obj.add_property(property_instance)
return True
#
# getProperty ( )
#
def getProperty(self, name, property_name):
obj = self.getObjectByName(name)
return obj._properties.get(property_name)
#
# getObjectByID ( )
#
def getObjectIdentifier(self, object_id, instance_id):
#
#
#
obj_type = definition.findObjectByID(object_id)
if obj_type == None:
return None
objectType = obj_type['name']
#
#
#
return (objectType, instance_id)
#
# getObjectByID ( [ID ])
#
def getObjectByID(self, objectIdentifier, instance_id):
#
#
#
return self.application.get_object_id((objectIdentifier, instance_id))
#
# getObjectByName ( [])
#
def getObjectByName(self, name):
#
#
#
return self.application.get_object_name(name)
| 18.083916 | 81 | 0.691415 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from bacpypes.iocb import IOCB
from bacpypes.pdu import Address, GlobalBroadcast
from bacpypes.apdu import WhoIsRequest, ReadPropertyRequest, ReadPropertyACK
from bacpypes.object import get_object_class, get_datatype
from bacpypes.object import ObjectType, registered_object_types
from bacpypes.basetypes import PropertyIdentifier
from eyed.driver.bacnet import definition
#
# BACnet Client
#
class BACnetClient:
#
# BACnetClient 初期化処理
#
def __init__(self, application, auto_device_discovery = True):
#
# アプリケーションの取得
#
self.application = application
#
# デバイス の 探索を自動で実行するか?
#
self.auto_device_discovery = auto_device_discovery
#
# getAddressByDeviceID
#
def getAddressByDeviceID(self, device_id):
#
# デバイスマップの返却
#
device_map = self.application.getDeviceMap()
if device_id in device_map:
return device_map[device_id]
return None
#
# WhoIsRequest
#
def WhoIsRequest(self, low_limit = 1, high_limit = 65535):
#
# WhoIsRequest の レスポンス(IAmRequest) を保存するキューをクリア
#
#self.application.clear()
#
# WhoIsRequest の 送信
#
self.application.who_is(low_limit, high_limit, GlobalBroadcast())
return True
#
# IamRequest の 受信待ち
# - 例外: Empty (タイムアウト時)
#
def receiveIamRequest(self, timeout):
#
# タイムアウト秒の間受信待ち
#
device_queue = self.application.getDeviceQueue()
device_id = device_queue.get(timeout = timeout)
return { 'device_id' : device_id }
#
# ReadProperty
#
def _ReadPropertyRequest(self, device_id, objectIdentifier, propertyIdentifier):
#
# デバイスID から IPの取得
#
address = self.getAddressByDeviceID(device_id)
if not address:
#
# デバイスの探索オプションの確認
#
if self.auto_device_discovery == False:
return None
#
# デバイスの探索
#
self.WhoIsRequest()
#
# リクエスト作成
#
request = ReadPropertyRequest(
destination = address,
objectIdentifier = objectIdentifier,
propertyIdentifier = propertyIdentifier,
)
#
# リクエストを送信 & 結果取得待ち
#
iocb = IOCB(request)
self.application.request_io(iocb)
iocb.wait()
#
# エラーがあるかを確認
#
if iocb.ioError:
return None
#
# レスポンスの確認
#
elif iocb.ioResponse:
#
# レスポンスデータの取得
#
apdu = iocb.ioResponse
#
# ACKであるかの確認
#
if not isinstance(apdu, ReadPropertyACK):
print 'ACK is not contain...'
return None
#
# データタイプの取得
#
datatype = get_datatype(apdu.objectIdentifier[0], apdu.propertyIdentifier)
if not datatype:
print 'Unknown datatype...'
return None
#
# データ種別と値の取得
#
return apdu, datatype
#
# 例外
#
else:
print 'Response seems something wrong...'
return None
#
# ReadProperty
#
def ReadPropertyRequest(self, device_id, object_id, instance_id, property_id):
#
# リクエストの作成
#
result = BACnetClient._ReadPropertyRequest(
self,
device_id = device_id,
objectIdentifier = (object_id, instance_id),
propertyIdentifier = property_id
)
#
# レスポンスの確認
#
if result == None:
return None
#
# キャスト
#
apdu, datatype = result
return apdu.propertyValue.cast_out(datatype)
#
# ReadDeviceProperty (デバイス関連の情報読み出し)
#
def _ReadDevicePropertyRequest(self, device_id, propertyIdentifier):
#
# リクエストの作成
#
result = BACnetClient._ReadPropertyRequest(
self,
device_id = device_id,
objectIdentifier = ('device', device_id),
propertyIdentifier = propertyIdentifier
)
#
# レスポンスの確認
#
if result == None:
return None
#
# キャスト
#
apdu, datatype = result
return apdu.propertyValue.cast_out(datatype)
#
# addObject (オブジェクト の 登録)
#
def addObject(self, name, object_id, instance_id):
#
# オブジェクト識別子の取得
#
objectIdentifier = self.getObjectIdentifier(object_id, instance_id)
if objectIdentifier == None:
return False
#
# オブジェクトクラス の 取得
#
Object = definition.findObjectClassByType(objectIdentifier[0])
#
# オブジェクト の 定義
#
new_object = Object(
objectName = name,
objectIdentifier = objectIdentifier,
)
#
# オブジェクト の 登録
#
self.application.add_object(new_object)
return True
#
# addProperty (プロパティ の 登録)
#
def addProperty(self, name, property_instance):
#
# オブジェクトを名前から検索
#
obj = self.application.get_object_name(name)
if obj == None: return False
#
# プロパティの登録
#
obj.add_property(property_instance)
return True
#
# getProperty (プロパティ の 登録)
#
def getProperty(self, name, property_name):
obj = self.getObjectByName(name)
return obj._properties.get(property_name)
#
# getObjectByID (オブジェクト の 取得)
#
def getObjectIdentifier(self, object_id, instance_id):
#
# オブジェクト識別子の作成
#
obj_type = definition.findObjectByID(object_id)
if obj_type == None:
return None
objectType = obj_type['name']
#
# オブジェクト識別子の作成
#
return (objectType, instance_id)
#
# getObjectByID (オブジェクト の 取得 [ID 検索])
#
def getObjectByID(self, objectIdentifier, instance_id):
#
# 登録されているオブジェクトの検索
#
return self.application.get_object_id((objectIdentifier, instance_id))
#
# getObjectByName (オブジェクト の 取得 [名前で検索])
#
def getObjectByName(self, name):
#
# オブジェクトを名前から検索
#
return self.application.get_object_name(name)
| 1,284 | 0 |
f285c95b7e4057a14af0bdee70433ff402fc6437 | 11,535 | py | Python | src/forms/users.py | Dourv/tornado-mongo | 95dbd1151abac2831d98b6d768a86f59b11c273d | [
"MIT"
] | 2 | 2015-04-21T14:49:05.000Z | 2015-04-21T15:15:40.000Z | src/forms/users.py | Dourv/tornado-mongo | 95dbd1151abac2831d98b6d768a86f59b11c273d | [
"MIT"
] | null | null | null | src/forms/users.py | Dourv/tornado-mongo | 95dbd1151abac2831d98b6d768a86f59b11c273d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from bson.objectid import ObjectId
from pymongo import MongoClient
from validate_email import validate_email
from views.base import base
import config
import hashlib
'''
forms constructor.
* Es necesario crear una variable tipo dict() que debe llevar la siguiente estructura.
{
'config(requerido)':{
'method(requerido)': 'valores POST o GET',
'action(requerido)': 'url para enviar la data',
'class' : 'Clases de css',
'error-class': 'Clase para el error'
},
fields(requerido): [
{
'name(requerido)': 'nombre del campo',
'widget(requerido)': 'Tipo de input',
'class': 'Clases de css',
'id': 'Valor del ID',
'label'(*Requiere que el ID del campo este seteado.): {
'attributes': 'Cualquier otro valor que no este disponible. ejemplo: data-*= "" ',
'class': 'Clases de css'
}
'placeholder': 'Valor del placeholder',
'required': 'Valores True o False',
'value': 'valor default del campo.'
}
]
}
'''
class users():
@property
def db(self):
if config.debug == True:
client = MongoClient('localhost', 27017)
else:
client = MongoClient('mongodb://'+config.__user+':'+config.__psw+'@'+config.__host, config.__port)
return client[config.database]
def form(self):
_form = {
'config' : {
'method': 'POST',
'action' : '/admin/users',
'class' : 'form-horizontal',
'error-class' : ''
},
'fields': [
{
'required':True,
'widget':'text',
'attributes': {
'class': 'form-control floating-label',
'data-hint':'Por favor escriba el usuario que usara para ingresar',
'name': 'username',
'placeholder': 'Username'
},
'form-group-class': 'col-md-12',
},
{
'required':True,
'widget':'text',
'attributes': {
'class': 'form-control floating-label',
'data-hint':'Escriba el nombre del usuario',
'name': 'first_name',
'placeholder': 'Nombre'
},
'form-group-class': 'col-md-12',
},
{
'required':True,
'widget':'text',
'attributes':{
'class': 'form-control floating-label',
'data-hint':'Escriba el apellido del usuario',
'name': 'last_name',
'placeholder': 'Last Name'
},
'form-group-class': 'col-md-12',
},
{
'required':True,
'widget':'email',
'attributes':{
'class': 'form-control floating-label',
'data-hint':'Escriba el correo electronico del Usuario',
'name': 'email',
'placeholder': 'Email'
},
'form-group-class': 'col-md-12',
},
{
'required':True,
'widget':'select',
'attributes':{
'name': 'rol',
'class': 'form-control',
'placeholder' : 'Seleccione un Rol de Usuario',
},
'label_class':'col-lg-1 control-label',
'form-group-class': 'col-md-12',
'options': list()
},
{
'required':True,
'widget':'password',
'attributes': {
'data-hint':"Escriba la contrasea para el usuario",
'name': 'password',
'placeholder': 'Password',
'class': 'form-control floating-label',
},
'form-group-class': 'col-md-12',
},
{
'required':True,
'widget':'password',
'attributes': {
'data-hint':'Confirme la contrasea del usuario',
'class': 'form-control floating-label',
'placeholder': 'Confirm Password',
'name': 'password_confirm',
},
'form-group-class': 'col-md-12',
},
{
'widget':'submit',
'attributes':{
'name': 'submit',
'class': 'btn btn-primary',
'value': 'Crear nuevo Usuario'
},
'form-group-class': 'col-md-6'
},
{
'widget':'reset',
'attributes':{
'name': 'submit',
'class': 'btn btn-default',
'value': 'Limpiar formulario'
},
'form-group-class': 'col-md-6'
}
]
}
rols = self.db.rols.find()
for rol in rols:
data ={
'name':rol['name']
}
_form['fields'][4]['options'].append(data)
return _form
def form_edit(self,id):
user = self.db.users.find_one({'_id':ObjectId(id)})
_form = {
'config' : {
'method': 'POST',
'action' : '/admin/users/edit/'+id,
'class' : 'form-horizontal',
'error-class' : ''
},
'fields': [
{
'required':True,
'widget':'text',
'attributes': {
'class': 'form-control floating-label',
'data-hint':'Por favor escriba el usuario que usara para ingresar',
'name': 'username',
'placeholder': 'Username',
'value' : user['username']
},
'form-group-class': 'col-md-12',
},
{
'required':True,
'widget':'text',
'attributes': {
'class': 'form-control floating-label',
'data-hint':'Escriba el nombre del usuario',
'name': 'first_name',
'placeholder': 'Nombre',
'value' : user['first_name']
},
'form-group-class': 'col-md-12',
},
{
'required':True,
'widget':'text',
'attributes':{
'class': 'form-control floating-label',
'data-hint':'Escriba el apellido del usuario',
'name': 'last_name',
'placeholder': 'Last Name',
'value': user['last_name']
},
'form-group-class': 'col-md-12',
},
{
'required':True,
'widget':'email',
'attributes':{
'class': 'form-control floating-label',
'data-hint':'Escriba el correo electronico del Usuario',
'name': 'email',
'placeholder': 'Email',
'value': user['email']
},
'form-group-class': 'col-md-12',
},
{
'required':True,
'widget':'select',
'attributes':{
'name': 'rol',
'class': 'form-control',
'placeholder' : 'Seleccione un Rol de Usuario',
},
'label_class':'col-lg-1 control-label',
'form-group-class': 'col-md-12',
'options': list()
},
{
'required':True,
'widget':'password',
'attributes': {
'data-hint':"Escriba la contrasea para el usuario",
'name': 'password',
'placeholder': 'Password',
'class': 'form-control floating-label',
},
'form-group-class': 'col-md-12',
},
{
'required':True,
'widget':'password',
'attributes': {
'data-hint':'Confirme la contrasea del usuario',
'class': 'form-control floating-label',
'placeholder': 'Confirm Password',
'name': 'password_confirm',
},
'form-group-class': 'col-md-12',
},
{
'widget':'hidden',
'attributes': {
'value': id,
'name':'id'
}
},
{
'widget':'submit',
'attributes':{
'name': 'submit',
'class': 'btn btn-primary',
'value': 'Crear nuevo Usuario'
},
'form-group-class': 'col-md-6'
},
{
'widget':'reset',
'attributes':{
'name': 'submit',
'class': 'btn btn-default',
'value': 'Limpiar formulario'
},
'form-group-class': 'col-md-6'
}
]
}
rols = self.db.rols.find()
for rol in rols:
data ={
'name':rol['name'],
'selected': False
}
if user['rol'] == rol['name']:
print user['rol']
print rol['name']
data['selected'] = True
_form['fields'][4]['options'].append(data)
return _form
def validation(self,data,edit=False):
form = self.form()
validation = {'status':True, 'errors': list() }
if 'username' in data:
user = self.db.users.find_one({'username': data['username']})
if len(data['username']) < 3:
validation['status'] = False
validation['errors'].append('El campo nombre debe poseer al menos 3 caracteres.')
if user != None:
if edit == False:
validation['status'] = False
validation['errors'].append('El nombre de usuario ya existe.')
else:
if data['id'] != str(user['_id']):
validation['status'] = False
validation['errors'].append('El nombre de usuario ya existe.')
else:
validation['status'] = False
validation['errors'].append('El campo nombre es Obligatorio.')
if 'first_name' in data:
if len(data['first_name']) < 3:
validation['status'] = False
validation['errors'].append({'field':'first_name','value':'El campo nombre debe poseer al menos 3 caracteres.'})
else:
validation['status'] = False
validation['errors'].append('El campo nombre es Obligatorio.')
if 'last_name' in data:
if len(data['last_name']) < 3:
validation['status'] = False
validation['errors'].append('El campo Apellido debe poseer al menos 3 caracteres.')
else:
validation['status'] = False
validation['errors'].append('El campo Apellido es Obligatorio.')
if 'email' in data:
if validate_email(data['email']) == False:
validation['status'] = False
validation['errors'].append('Inserte un email valido.')
else:
if edit == False:
if self.db.users.find_one({'email':data['email']}) != None:
validation['status'] = False
validation['errors'].append('Ya existe un usuario con este email.')
else:
email = self.db.users.find_one({'email':data['email']})
print data['id']
print str(email['_id'])
if email != None and data['id'] != str(email['_id']):
validation['status'] = False
validation['errors'].append('Otro usuario ya tiene este email.')
else:
validation['status'] = False
validation['errors'].append('El campo Email es Obligatorio.')
if 'rol' in data:
rols = self.db.rols.find_one({'name':data['rol']})
if rols == None:
if self.db.users.find().count() <= 0:
if data['rol'] != 'admin':
validation['status'] = False
validation['errors'].append('El Primer usuario debe ser Admin')
else:
validation['status'] = False
validation['errors'].append('Seleccione un rol valido')
password = False
if len(data['password']) > 0:
password = True
if len(data['password']) < 4:
validation['status'] = False
validation['errors'].append('La Contrasea debe tener al menos 4 Caracteres')
password = False
if password == True:
if data['password_confirm'] != data['password']:
validation['status'] = False
validation['errors'].append('Las Contraseas no coinciden')
if validation['status'] == True:
if edit == False:
if self.db.users.find().count() <= 0:
self.insert(data,admin=True)
else:
self.insert(data)
return 'Nuevo usuario '+data['username']+' Creado'
else:
return self.edit(data)
else:
return validation
def insert(self,data,admin=False):
_INSERT = {
'username': data['username'].lower(),
'first_name': data['first_name'],
'last_name': data['last_name'],
'email': data['email'],
'password': hashlib.md5(data['password']).hexdigest(),
'rol' : data['rol'],
'status' : True
}
if admin == True:
_INSERT['block'] = True
self.db.users.insert(_INSERT)
def edit(self, data):
old_data = self.db.users.find_one({'_id':ObjectId(data['id'])})
new_data = {
'username': data['username'].lower(),
'first_name': data['first_name'],
'last_name': data['last_name'],
'email': data['email'],
'password': hashlib.md5(data['password']).hexdigest(),
'rol' : data['rol'],
'status' : old_data['status']
}
if new_data['rol'] == 'admin':
new_data['block'] = True
self.db.users.update(old_data,new_data)
return 'Usuario '+old_data['first_name'] + ' ' + old_data['last_name'] +' editado correctamente.'
| 25.690423 | 118 | 0.570438 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from bson.objectid import ObjectId
from pymongo import MongoClient
from validate_email import validate_email
from views.base import base
import config
import hashlib
'''
forms constructor.
* Es necesario crear una variable tipo dict() que debe llevar la siguiente estructura.
{
'config(requerido)':{
'method(requerido)': 'valores POST o GET',
'action(requerido)': 'url para enviar la data',
'class' : 'Clases de css',
'error-class': 'Clase para el error'
},
fields(requerido): [
{
'name(requerido)': 'nombre del campo',
'widget(requerido)': 'Tipo de input',
'class': 'Clases de css',
'id': 'Valor del ID',
'label'(*Requiere que el ID del campo este seteado.): {
'attributes': 'Cualquier otro valor que no este disponible. ejemplo: data-*= "" ',
'class': 'Clases de css'
}
'placeholder': 'Valor del placeholder',
'required': 'Valores True o False',
'value': 'valor default del campo.'
}
]
}
'''
class users():
@property
def db(self):
if config.debug == True:
client = MongoClient('localhost', 27017)
else:
client = MongoClient('mongodb://'+config.__user+':'+config.__psw+'@'+config.__host, config.__port)
return client[config.database]
def form(self):
_form = {
'config' : {
'method': 'POST',
'action' : '/admin/users',
'class' : 'form-horizontal',
'error-class' : ''
},
'fields': [
{
'required':True,
'widget':'text',
'attributes': {
'class': 'form-control floating-label',
'data-hint':'Por favor escriba el usuario que usara para ingresar',
'name': 'username',
'placeholder': 'Username'
},
'form-group-class': 'col-md-12',
},
{
'required':True,
'widget':'text',
'attributes': {
'class': 'form-control floating-label',
'data-hint':'Escriba el nombre del usuario',
'name': 'first_name',
'placeholder': 'Nombre'
},
'form-group-class': 'col-md-12',
},
{
'required':True,
'widget':'text',
'attributes':{
'class': 'form-control floating-label',
'data-hint':'Escriba el apellido del usuario',
'name': 'last_name',
'placeholder': 'Last Name'
},
'form-group-class': 'col-md-12',
},
{
'required':True,
'widget':'email',
'attributes':{
'class': 'form-control floating-label',
'data-hint':'Escriba el correo electronico del Usuario',
'name': 'email',
'placeholder': 'Email'
},
'form-group-class': 'col-md-12',
},
{
'required':True,
'widget':'select',
'attributes':{
'name': 'rol',
'class': 'form-control',
'placeholder' : 'Seleccione un Rol de Usuario',
},
'label_class':'col-lg-1 control-label',
'form-group-class': 'col-md-12',
'options': list()
},
{
'required':True,
'widget':'password',
'attributes': {
'data-hint':"Escriba la contraseña para el usuario",
'name': 'password',
'placeholder': 'Password',
'class': 'form-control floating-label',
},
'form-group-class': 'col-md-12',
},
{
'required':True,
'widget':'password',
'attributes': {
'data-hint':'Confirme la contraseña del usuario',
'class': 'form-control floating-label',
'placeholder': 'Confirm Password',
'name': 'password_confirm',
},
'form-group-class': 'col-md-12',
},
{
'widget':'submit',
'attributes':{
'name': 'submit',
'class': 'btn btn-primary',
'value': 'Crear nuevo Usuario'
},
'form-group-class': 'col-md-6'
},
{
'widget':'reset',
'attributes':{
'name': 'submit',
'class': 'btn btn-default',
'value': 'Limpiar formulario'
},
'form-group-class': 'col-md-6'
}
]
}
rols = self.db.rols.find()
for rol in rols:
data ={
'name':rol['name']
}
_form['fields'][4]['options'].append(data)
return _form
def form_edit(self,id):
user = self.db.users.find_one({'_id':ObjectId(id)})
_form = {
'config' : {
'method': 'POST',
'action' : '/admin/users/edit/'+id,
'class' : 'form-horizontal',
'error-class' : ''
},
'fields': [
{
'required':True,
'widget':'text',
'attributes': {
'class': 'form-control floating-label',
'data-hint':'Por favor escriba el usuario que usara para ingresar',
'name': 'username',
'placeholder': 'Username',
'value' : user['username']
},
'form-group-class': 'col-md-12',
},
{
'required':True,
'widget':'text',
'attributes': {
'class': 'form-control floating-label',
'data-hint':'Escriba el nombre del usuario',
'name': 'first_name',
'placeholder': 'Nombre',
'value' : user['first_name']
},
'form-group-class': 'col-md-12',
},
{
'required':True,
'widget':'text',
'attributes':{
'class': 'form-control floating-label',
'data-hint':'Escriba el apellido del usuario',
'name': 'last_name',
'placeholder': 'Last Name',
'value': user['last_name']
},
'form-group-class': 'col-md-12',
},
{
'required':True,
'widget':'email',
'attributes':{
'class': 'form-control floating-label',
'data-hint':'Escriba el correo electronico del Usuario',
'name': 'email',
'placeholder': 'Email',
'value': user['email']
},
'form-group-class': 'col-md-12',
},
{
'required':True,
'widget':'select',
'attributes':{
'name': 'rol',
'class': 'form-control',
'placeholder' : 'Seleccione un Rol de Usuario',
},
'label_class':'col-lg-1 control-label',
'form-group-class': 'col-md-12',
'options': list()
},
{
'required':True,
'widget':'password',
'attributes': {
'data-hint':"Escriba la contraseña para el usuario",
'name': 'password',
'placeholder': 'Password',
'class': 'form-control floating-label',
},
'form-group-class': 'col-md-12',
},
{
'required':True,
'widget':'password',
'attributes': {
'data-hint':'Confirme la contraseña del usuario',
'class': 'form-control floating-label',
'placeholder': 'Confirm Password',
'name': 'password_confirm',
},
'form-group-class': 'col-md-12',
},
{
'widget':'hidden',
'attributes': {
'value': id,
'name':'id'
}
},
{
'widget':'submit',
'attributes':{
'name': 'submit',
'class': 'btn btn-primary',
'value': 'Crear nuevo Usuario'
},
'form-group-class': 'col-md-6'
},
{
'widget':'reset',
'attributes':{
'name': 'submit',
'class': 'btn btn-default',
'value': 'Limpiar formulario'
},
'form-group-class': 'col-md-6'
}
]
}
rols = self.db.rols.find()
for rol in rols:
data ={
'name':rol['name'],
'selected': False
}
if user['rol'] == rol['name']:
print user['rol']
print rol['name']
data['selected'] = True
_form['fields'][4]['options'].append(data)
return _form
def validation(self,data,edit=False):
form = self.form()
validation = {'status':True, 'errors': list() }
if 'username' in data:
user = self.db.users.find_one({'username': data['username']})
if len(data['username']) < 3:
validation['status'] = False
validation['errors'].append('El campo nombre debe poseer al menos 3 caracteres.')
if user != None:
if edit == False:
validation['status'] = False
validation['errors'].append('El nombre de usuario ya existe.')
else:
if data['id'] != str(user['_id']):
validation['status'] = False
validation['errors'].append('El nombre de usuario ya existe.')
else:
validation['status'] = False
validation['errors'].append('El campo nombre es Obligatorio.')
if 'first_name' in data:
if len(data['first_name']) < 3:
validation['status'] = False
validation['errors'].append({'field':'first_name','value':'El campo nombre debe poseer al menos 3 caracteres.'})
else:
validation['status'] = False
validation['errors'].append('El campo nombre es Obligatorio.')
if 'last_name' in data:
if len(data['last_name']) < 3:
validation['status'] = False
validation['errors'].append('El campo Apellido debe poseer al menos 3 caracteres.')
else:
validation['status'] = False
validation['errors'].append('El campo Apellido es Obligatorio.')
if 'email' in data:
if validate_email(data['email']) == False:
validation['status'] = False
validation['errors'].append('Inserte un email valido.')
else:
if edit == False:
if self.db.users.find_one({'email':data['email']}) != None:
validation['status'] = False
validation['errors'].append('Ya existe un usuario con este email.')
else:
email = self.db.users.find_one({'email':data['email']})
print data['id']
print str(email['_id'])
if email != None and data['id'] != str(email['_id']):
validation['status'] = False
validation['errors'].append('Otro usuario ya tiene este email.')
else:
validation['status'] = False
validation['errors'].append('El campo Email es Obligatorio.')
if 'rol' in data:
rols = self.db.rols.find_one({'name':data['rol']})
if rols == None:
if self.db.users.find().count() <= 0:
if data['rol'] != 'admin':
validation['status'] = False
validation['errors'].append('El Primer usuario debe ser Admin')
else:
validation['status'] = False
validation['errors'].append('Seleccione un rol valido')
password = False
if len(data['password']) > 0:
password = True
if len(data['password']) < 4:
validation['status'] = False
validation['errors'].append('La Contraseña debe tener al menos 4 Caracteres')
password = False
if password == True:
if data['password_confirm'] != data['password']:
validation['status'] = False
validation['errors'].append('Las Contraseñas no coinciden')
if validation['status'] == True:
if edit == False:
if self.db.users.find().count() <= 0:
self.insert(data,admin=True)
else:
self.insert(data)
return 'Nuevo usuario '+data['username']+' Creado'
else:
return self.edit(data)
else:
return validation
def insert(self,data,admin=False):
_INSERT = {
'username': data['username'].lower(),
'first_name': data['first_name'],
'last_name': data['last_name'],
'email': data['email'],
'password': hashlib.md5(data['password']).hexdigest(),
'rol' : data['rol'],
'status' : True
}
if admin == True:
_INSERT['block'] = True
self.db.users.insert(_INSERT)
def edit(self, data):
old_data = self.db.users.find_one({'_id':ObjectId(data['id'])})
new_data = {
'username': data['username'].lower(),
'first_name': data['first_name'],
'last_name': data['last_name'],
'email': data['email'],
'password': hashlib.md5(data['password']).hexdigest(),
'rol' : data['rol'],
'status' : old_data['status']
}
if new_data['rol'] == 'admin':
new_data['block'] = True
self.db.users.update(old_data,new_data)
return 'Usuario '+old_data['first_name'] + ' ' + old_data['last_name'] +' editado correctamente.'
| 12 | 0 |
f2d93262ed8c5501a226a28e3d0ba7c98b7c26e2 | 175 | py | Python | python/Twisted/krondo Twisted Introduction/basic-twisted/stack.py | RitamDey/My-Simple-Programs | 147b455a6a40c371ec894ce979e8a61d242e03bd | [
"Unlicense"
] | 2 | 2016-10-14T16:58:05.000Z | 2017-05-04T04:59:18.000Z | python/Twisted/krondo Twisted Introduction/basic-twisted/stack.py | GreenJoey/My-Simple-Programs | 147b455a6a40c371ec894ce979e8a61d242e03bd | [
"Unlicense"
] | null | null | null | python/Twisted/krondo Twisted Introduction/basic-twisted/stack.py | GreenJoey/My-Simple-Programs | 147b455a6a40c371ec894ce979e8a61d242e03bd | [
"Unlicense"
] | null | null | null | import traceback
from twisted.internet import reactor
def stack():
print("The Python Stack.")
traceback.print_stack()
reactor.callWhenRunning(stack)
reactor.run()
| 14.583333 | 36 | 0.748571 | import traceback
from twisted.internet import reactor
def stack():
print("The Python Stack.")
traceback.print_stack()
reactor.callWhenRunning(stack)
reactor.run()
| 0 | 0 |
ab96b071bc740f843d9faa533a1f2a73a5589c9b | 2,775 | py | Python | src/ralph_scrooge/plugins/collect/blade_server.py | xliiv/ralph_pricing | 88a295b6f0af66ae03c145205ada99f17ab51dd0 | [
"Apache-2.0"
] | null | null | null | src/ralph_scrooge/plugins/collect/blade_server.py | xliiv/ralph_pricing | 88a295b6f0af66ae03c145205ada99f17ab51dd0 | [
"Apache-2.0"
] | null | null | null | src/ralph_scrooge/plugins/collect/blade_server.py | xliiv/ralph_pricing | 88a295b6f0af66ae03c145205ada99f17ab51dd0 | [
"Apache-2.0"
] | 1 | 2021-11-15T21:21:17.000Z | 2021-11-15T21:21:17.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
from ralph.util import plugin
from ralph.util.api_scrooge import get_blade_servers
from ralph_scrooge.models import (
AssetInfo,
DailyAssetInfo,
DailyUsage,
UsageType,
)
logger = logging.getLogger(__name__)
class AssetInfoNotFoundError(Exception):
pass
class DailyAssetInfoNotFoundError(Exception):
pass
def update_usage(daily_asset_info, date, value, usage_type):
"""
Saves single record to model
"""
usage, created = DailyUsage.objects.get_or_create(
date=date,
type=usage_type,
daily_pricing_object=daily_asset_info,
defaults=dict(
service_environment=daily_asset_info.service_environment,
)
)
usage.service_environment = daily_asset_info.service_environment
usage.value = value
usage.save()
return created
def update_blade_server(data, date, usage_type):
"""
Updates single Blade Server usage type record
"""
try:
asset_info = AssetInfo.objects.get(device_id=data['device_id'])
daily_asset_info = asset_info.dailyassetinfo_set.get(date=date)
return update_usage(
daily_asset_info,
date,
1,
usage_type,
)
except AssetInfo.DoesNotExist:
raise AssetInfoNotFoundError()
except DailyAssetInfo.DoesNotExist:
raise DailyAssetInfoNotFoundError()
def get_usage_type():
"""
Returns Blade Server usage type
"""
return UsageType.objects.get_or_create(
symbol='blade_server',
defaults=dict(
name='Blade server',
)
)[0]
@plugin.register(chain='scrooge', requires=['asset', 'service'])
def blade_server(today, **kwargs):
"""
Updates Blade Servers usages from Ralph
"""
usage_type = get_usage_type()
new_blades = updated = total = 0
for data in get_blade_servers():
try:
if update_blade_server(data, today, usage_type):
new_blades += 1
else:
updated += 1
except AssetInfoNotFoundError:
logger.warning('Device {} not found'.format(data['device_id']))
except DailyAssetInfoNotFoundError:
logger.warning(
'DailyAssetInfo for id {} and date {} not found'.format(
data['device_id'],
today,
)
)
total += 1
return (
True,
'{} new Blade Servers usages, {} updated, {} total'.format(
new_blades,
updated,
total,
)
)
| 25.227273 | 75 | 0.623423 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
from ralph.util import plugin
from ralph.util.api_scrooge import get_blade_servers
from ralph_scrooge.models import (
AssetInfo,
DailyAssetInfo,
DailyUsage,
UsageType,
)
logger = logging.getLogger(__name__)
class AssetInfoNotFoundError(Exception):
pass
class DailyAssetInfoNotFoundError(Exception):
pass
def update_usage(daily_asset_info, date, value, usage_type):
"""
Saves single record to model
"""
usage, created = DailyUsage.objects.get_or_create(
date=date,
type=usage_type,
daily_pricing_object=daily_asset_info,
defaults=dict(
service_environment=daily_asset_info.service_environment,
)
)
usage.service_environment = daily_asset_info.service_environment
usage.value = value
usage.save()
return created
def update_blade_server(data, date, usage_type):
"""
Updates single Blade Server usage type record
"""
try:
asset_info = AssetInfo.objects.get(device_id=data['device_id'])
daily_asset_info = asset_info.dailyassetinfo_set.get(date=date)
return update_usage(
daily_asset_info,
date,
1,
usage_type,
)
except AssetInfo.DoesNotExist:
raise AssetInfoNotFoundError()
except DailyAssetInfo.DoesNotExist:
raise DailyAssetInfoNotFoundError()
def get_usage_type():
"""
Returns Blade Server usage type
"""
return UsageType.objects.get_or_create(
symbol='blade_server',
defaults=dict(
name='Blade server',
)
)[0]
@plugin.register(chain='scrooge', requires=['asset', 'service'])
def blade_server(today, **kwargs):
"""
Updates Blade Servers usages from Ralph
"""
usage_type = get_usage_type()
new_blades = updated = total = 0
for data in get_blade_servers():
try:
if update_blade_server(data, today, usage_type):
new_blades += 1
else:
updated += 1
except AssetInfoNotFoundError:
logger.warning('Device {} not found'.format(data['device_id']))
except DailyAssetInfoNotFoundError:
logger.warning(
'DailyAssetInfo for id {} and date {} not found'.format(
data['device_id'],
today,
)
)
total += 1
return (
True,
'{} new Blade Servers usages, {} updated, {} total'.format(
new_blades,
updated,
total,
)
)
| 0 | 0 |
1e0ac377a0f833c236fcddffaa7f7e63b266fd1d | 1,733 | py | Python | appCustomUser/models.py | celelstine/laundroxpress | 224a02902457e5e8662b1fa16e90098f56bff6f1 | [
"MIT"
] | null | null | null | appCustomUser/models.py | celelstine/laundroxpress | 224a02902457e5e8662b1fa16e90098f56bff6f1 | [
"MIT"
] | null | null | null | appCustomUser/models.py | celelstine/laundroxpress | 224a02902457e5e8662b1fa16e90098f56bff6f1 | [
"MIT"
] | null | null | null | from django.db import models
from django.contrib.auth.models import (
AbstractUser,
BaseUserManager
)
class AppUserManager(BaseUserManager):
def create_user(self, email, password=None):
"""
Creates and saves a User with the given email and password.
"""
if not email:
raise ValueError('Users must have an email address')
user = self.model(
email=self.normalize_email(email),
username=self.normalize_email(email),
)
user.set_password(password)
user.save(using=self._db)
return user
def create_staffuser(self, email, password):
"""
Creates and saves a staff user with the given email and password.
"""
user = self.create_user(
email,
password=password,
)
user.is_staff = True
user.save(using=self._db)
return user
def create_superuser(self, email, password):
"""
Creates and saves a superuser with the given email and password.
"""
user = self.create_user(
email,
password=password,
)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class User(AbstractUser):
email = models.EmailField(
max_length=255,
unique=True,
)
username = models.TextField(blank=True, null=True)
phone_number = models.CharField(
blank=True, null=True, unique=True, max_length=25)
address = models.TextField(blank=True, null=True)
objects = AppUserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = [] # Email & Password are required by default.
| 26.257576 | 73 | 0.605309 | from django.db import models
from django.contrib.auth.models import (
AbstractUser,
BaseUserManager
)
class AppUserManager(BaseUserManager):
def create_user(self, email, password=None):
"""
Creates and saves a User with the given email and password.
"""
if not email:
raise ValueError('Users must have an email address')
user = self.model(
email=self.normalize_email(email),
username=self.normalize_email(email),
)
user.set_password(password)
user.save(using=self._db)
return user
def create_staffuser(self, email, password):
"""
Creates and saves a staff user with the given email and password.
"""
user = self.create_user(
email,
password=password,
)
user.is_staff = True
user.save(using=self._db)
return user
def create_superuser(self, email, password):
"""
Creates and saves a superuser with the given email and password.
"""
user = self.create_user(
email,
password=password,
)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class User(AbstractUser):
email = models.EmailField(
max_length=255,
unique=True,
)
username = models.TextField(blank=True, null=True)
phone_number = models.CharField(
blank=True, null=True, unique=True, max_length=25)
address = models.TextField(blank=True, null=True)
objects = AppUserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = [] # Email & Password are required by default.
| 0 | 0 |
969a4e2c193cd1b6ea7f45a8a1316bd133df7a37 | 663 | py | Python | get_pavlick_dict.py | bhaddow/pmindia-crawler | 1282b1151f4d41f2c817d2df3f718889384ea95f | [
"MIT"
] | 11 | 2020-01-29T05:29:59.000Z | 2022-02-21T09:38:24.000Z | get_pavlick_dict.py | bhaddow/pmindia-crawler | 1282b1151f4d41f2c817d2df3f718889384ea95f | [
"MIT"
] | null | null | null | get_pavlick_dict.py | bhaddow/pmindia-crawler | 1282b1151f4d41f2c817d2df3f718889384ea95f | [
"MIT"
] | 3 | 2020-03-24T20:50:18.000Z | 2020-11-30T02:33:05.000Z | #!/usr/bin/env python3
#
# Convert Pavlick's dictionary to hunalign
#
import argparse
import re
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--dict", default="/home/bhaddow/data/pavlick-dicts/dict.hi")
args = parser.parse_args()
brackets = re.compile("\[[^\]]*\]")
delim = re.compile("[\t,/]")
with open(args.dict) as ifh:
for line in ifh:
line = brackets.sub("", line[:-1])
fields = delim.split(line)
for e in fields[1:]:
e = e.strip()
if e and fields[0]:
if e == "fullstop": e = "."
print("{} @ {}".format(fields[0],e))
if __name__ == "__main__":
main()
| 22.862069 | 89 | 0.576169 | #!/usr/bin/env python3
#
# Convert Pavlick's dictionary to hunalign
#
import argparse
import re
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--dict", default="/home/bhaddow/data/pavlick-dicts/dict.hi")
args = parser.parse_args()
brackets = re.compile("\[[^\]]*\]")
delim = re.compile("[\t,/]")
with open(args.dict) as ifh:
for line in ifh:
line = brackets.sub("", line[:-1])
fields = delim.split(line)
for e in fields[1:]:
e = e.strip()
if e and fields[0]:
if e == "fullstop": e = "."
print("{} @ {}".format(fields[0],e))
if __name__ == "__main__":
main()
| 0 | 0 |
8203e968123e42ef425551ec0f073dd160a7b50d | 762 | py | Python | rp/models/route_filter_list.py | stellaraf/rp | c4d7a23001060d11dbd1a1c4f285a58a28ed4326 | [
"BSD-3-Clause"
] | 1 | 2020-03-08T08:22:37.000Z | 2020-03-08T08:22:37.000Z | rp/models/route_filter_list.py | stellaraf/rp | c4d7a23001060d11dbd1a1c4f285a58a28ed4326 | [
"BSD-3-Clause"
] | null | null | null | rp/models/route_filter_list.py | stellaraf/rp | c4d7a23001060d11dbd1a1c4f285a58a28ed4326 | [
"BSD-3-Clause"
] | null | null | null | # Standard Library
from typing import List, Union, Optional
from ipaddress import IPv4Network, IPv6Network
# Third Party
from pydantic import StrictStr
# Project
from rp.models._common import Flag, RPModel
class RouteFilterEntry(RPModel):
"""JunOS route-filter-list item JSON model."""
address: Union[IPv4Network, IPv6Network]
longer: Flag
orlonger: Flag
exact: Flag
prefix_length_range: Optional[StrictStr]
through: Optional[StrictStr]
upto: Optional[StrictStr]
class Config:
"""Pydantic config overrides."""
fields = {"prefix_length_range": "prefix-length-range"}
class RouteFilterList(RPModel):
"""JunOS route-filter-list JSON model."""
name: StrictStr
rf_list: List[RouteFilterEntry]
| 22.411765 | 63 | 0.71916 | # Standard Library
from typing import List, Union, Optional
from ipaddress import IPv4Network, IPv6Network
# Third Party
from pydantic import StrictStr
# Project
from rp.models._common import Flag, RPModel
class RouteFilterEntry(RPModel):
"""JunOS route-filter-list item JSON model."""
address: Union[IPv4Network, IPv6Network]
longer: Flag
orlonger: Flag
exact: Flag
prefix_length_range: Optional[StrictStr]
through: Optional[StrictStr]
upto: Optional[StrictStr]
class Config:
"""Pydantic config overrides."""
fields = {"prefix_length_range": "prefix-length-range"}
class RouteFilterList(RPModel):
"""JunOS route-filter-list JSON model."""
name: StrictStr
rf_list: List[RouteFilterEntry]
| 0 | 0 |
420c7338283bf739005666061a1028136dc3f7ba | 3,234 | py | Python | bcs-ui/backend/templatesets/legacy_apps/configuration/validator.py | laodiu/bk-bcs | 2a956a42101ff6487ff521fb3ef429805bfa7e26 | [
"Apache-2.0"
] | 599 | 2019-06-25T03:20:46.000Z | 2022-03-31T12:14:33.000Z | bcs-ui/backend/templatesets/legacy_apps/configuration/validator.py | laodiu/bk-bcs | 2a956a42101ff6487ff521fb3ef429805bfa7e26 | [
"Apache-2.0"
] | 537 | 2019-06-27T06:03:44.000Z | 2022-03-31T12:10:01.000Z | bcs-ui/backend/templatesets/legacy_apps/configuration/validator.py | laodiu/bk-bcs | 2a956a42101ff6487ff521fb3ef429805bfa7e26 | [
"Apache-2.0"
] | 214 | 2019-06-25T03:26:05.000Z | 2022-03-31T07:52:03.000Z | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making PaaS (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import json
from django.utils.translation import ugettext_lazy as _
from jsonschema import SchemaError
from jsonschema import ValidationError as JsonValidationError
from jsonschema import validate as json_validate
from rest_framework.exceptions import ValidationError
from .constants import KEY_PATTERN, NUM_VAR_ERROR_MSG, REAL_NUM_VAR_PATTERN
from .models import VersionedEntity, get_model_class_by_resource_name
def get_name_from_config(config):
return config.get('metadata', {}).get('name') or ''
def is_name_duplicate(resource_name, resource_id, name, version_id):
""""""
#
model_class = get_model_class_by_resource_name(resource_name)
try:
resource = model_class.objects.get(id=resource_id)
if name == resource.name:
return False
except model_class.DoesNotExist:
pass
#
try:
version_entity = VersionedEntity.objects.get(id=version_id)
except VersionedEntity.DoesNotExist:
return False
else:
entity = version_entity.get_entity()
resource_ids = entity.get(resource_name, '')
if not resource_ids:
return False
if model_class.objects.filter(name=name, id__in=resource_ids.split(',')):
return True
return False
def validate_variable_inconfig(config):
""""""
search_list = KEY_PATTERN.findall(json.dumps(config))
search_keys = set(search_list)
for ikey in search_keys:
if not REAL_NUM_VAR_PATTERN.match(ikey):
raise ValidationError(_('[{}], {}').format(ikey, NUM_VAR_ERROR_MSG))
def validate_res_config(config, resource_name, schema):
err_prefix = '{resource_name} {suffix_msg}'.format(resource_name=resource_name, suffix_msg=_(""))
try:
json_validate(config, schema)
except JsonValidationError as e:
raise ValidationError(f'{err_prefix}:{e.message}')
except SchemaError as e:
raise ValidationError(f'{err_prefix}:{e}')
def validate_name_duplicate(data):
resource_id = data.get('resource_id', None)
version_id = data.get('version_id', None)
if resource_id is None or version_id is None:
return
resource_name = data['resource_name']
name = data['name']
is_duplicate = is_name_duplicate(resource_name, resource_id, name, version_id)
if is_duplicate:
raise ValidationError(_('{}:{},').format(resource_name, name))
| 37.172414 | 115 | 0.732839 | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import json
from django.utils.translation import ugettext_lazy as _
from jsonschema import SchemaError
from jsonschema import ValidationError as JsonValidationError
from jsonschema import validate as json_validate
from rest_framework.exceptions import ValidationError
from .constants import KEY_PATTERN, NUM_VAR_ERROR_MSG, REAL_NUM_VAR_PATTERN
from .models import VersionedEntity, get_model_class_by_resource_name
def get_name_from_config(config):
return config.get('metadata', {}).get('name') or ''
def is_name_duplicate(resource_name, resource_id, name, version_id):
"""同一类资源的名称不能重复"""
# 判断新名称与老名称是否一致,如果一致,则不会重复
model_class = get_model_class_by_resource_name(resource_name)
try:
resource = model_class.objects.get(id=resource_id)
if name == resource.name:
return False
except model_class.DoesNotExist:
pass
# 只校验当前版本内是否重复
try:
version_entity = VersionedEntity.objects.get(id=version_id)
except VersionedEntity.DoesNotExist:
return False
else:
entity = version_entity.get_entity()
resource_ids = entity.get(resource_name, '')
if not resource_ids:
return False
if model_class.objects.filter(name=name, id__in=resource_ids.split(',')):
return True
return False
def validate_variable_inconfig(config):
"""校验配置文件中的变量名是否合法"""
search_list = KEY_PATTERN.findall(json.dumps(config))
search_keys = set(search_list)
for ikey in search_keys:
if not REAL_NUM_VAR_PATTERN.match(ikey):
raise ValidationError(_('变量[{}]不合法, {}').format(ikey, NUM_VAR_ERROR_MSG))
def validate_res_config(config, resource_name, schema):
err_prefix = '{resource_name} {suffix_msg}'.format(resource_name=resource_name, suffix_msg=_("配置信息格式错误"))
try:
json_validate(config, schema)
except JsonValidationError as e:
raise ValidationError(f'{err_prefix}:{e.message}')
except SchemaError as e:
raise ValidationError(f'{err_prefix}:{e}')
def validate_name_duplicate(data):
resource_id = data.get('resource_id', None)
version_id = data.get('version_id', None)
if resource_id is None or version_id is None:
return
resource_name = data['resource_name']
name = data['name']
is_duplicate = is_name_duplicate(resource_name, resource_id, name, version_id)
if is_duplicate:
raise ValidationError(_('{}名称:{}已经在项目模板中被占用,请重新填写').format(resource_name, name))
| 309 | 0 |
a44c6e197f39d490a7a355053f89a213c3c72549 | 10,410 | py | Python | jocular/devicemanager.py | MartinCooke/jocular | 635816d4ef6aa6ea75187137e25386dad2d551e9 | [
"MIT"
] | 6 | 2021-03-21T16:46:44.000Z | 2021-11-27T14:07:06.000Z | jocular/devicemanager.py | MartinCooke/jocular | 635816d4ef6aa6ea75187137e25386dad2d551e9 | [
"MIT"
] | null | null | null | jocular/devicemanager.py | MartinCooke/jocular | 635816d4ef6aa6ea75187137e25386dad2d551e9 | [
"MIT"
] | null | null | null | '''
DeviceManager:
a Component that manages different device families e.g. Telescope, Camera, FilterWheel
via a GUI element that permits selection/connection/disconnection
DeviceFamily:
superclass of e.g. Camera, Telescope, FilterWheel
handles communication with devices for generic functions such as
select, connect, disconnect as well as common error handling
Device:
superclass of device instances e.g. SXCamera, ASCOMFilterWheel
'''
import json
import importlib
from functools import partial
from kivy.app import App
from loguru import logger
from kivy.metrics import dp
from kivy.uix.spinner import Spinner
from kivy.uix.button import Button
from kivy.uix.label import Label
from kivy.uix.boxlayout import BoxLayout
from kivy.event import EventDispatcher
from kivy.core.window import Window
from kivy.properties import (
ObjectProperty,
StringProperty, BooleanProperty, DictProperty
)
from kivy.clock import Clock
from jocular.component import Component
from jocular.settingsmanager import SettingsBase
from jocular.widgets import jicon, LabelL
from jocular.formwidgets import configurable_to_widget
from kivy.lang import Builder
Builder.load_string('''
<DeviceManager>:
canvas:
Color:
rgba: .2, .2, .2, .7
Ellipse:
pos: self.x + dp(58) + (self.width - self.height) / 2, dp(58)
size: self.height - dp(116), self.height - dp(116)
orientation: 'vertical'
pos_hint: {'center_x': 10, 'center_y': .5}
''')
class DeviceManager(Component, BoxLayout):
devices = {'Camera': 'Camera', 'Telescope': 'Telescope', 'FilterWheel': 'Filter wheel'}
def __init__(self, **args):
super().__init__(**args)
self.app = App.get_running_app()
self.status = {}
self.connect_buttons = {}
self.connect_dots = {}
self.size = Window.size
self.app.gui.add_widget(self)
def show(self, *args):
Component.get('SettingsManager').hide()
if self.pos_hint['center_x'] > 1:
self.show_device_manager()
self.pos_hint = {'center_x': .5, 'center_y': .5}
def hide(self, *args):
if self.pos_hint['center_x'] < 1:
self.pos_hint = {'center_x': 10, 'center_y': .5}
def show_device_manager(self):
''' Main device manager panel that handles mode selection and connection,
and links to configuration of current devices.
'''
self.clear_widgets()
self.add_widget(Label(size_hint=(1, None), height=dp(90)))
self.add_widget(Label(size_hint=(1, None), height=dp(60), text='Your devices', font_size='24sp'))
self.add_widget(Label(size_hint=(1, 1)))
for device, name in self.devices.items():
current_device = Component.get(device).device
bh = BoxLayout(size_hint=(1, None), height=dp(40))
bh.add_widget(Label(size_hint=(1, 1)))
# connection status
lab = self.connect_dots[device] = LabelL(size_hint=(None, 1), width=dp(24), markup=True,
text=jicon('dot', color='g' if current_device.connected else 'r'))
bh.add_widget(lab)
# device family
bh.add_widget(LabelL(text=name, size_hint=(None, 1), width=dp(120)))
# device chooser
spinner = Spinner(size_hint=(None, 1), width=dp(120),
text=Component.get(device).settings['current_mode'],
values=Component.get(device).modes.keys())
spinner.bind(text=partial(self.mode_changed, device))
bh.add_widget(spinner)
#mid spacer
bh.add_widget(Label(size_hint=(None, 1), width=dp(40)))
# connect/disconnect button
but = self.connect_buttons[device] = Button(size_hint=(None, 1), width=dp(120),
text='disconnect...' if current_device.connected else 'connect...',
on_press=partial(self.connect, device))
bh.add_widget(but)
# configure icon
lab = Button(size_hint=(None, 1), width=dp(140),
markup=True, background_color=(0, 0, 0, 0),
text=jicon('settings'), on_press=partial(self.config, device))
bh.add_widget(lab)
bh.add_widget(Label(size_hint=(1, 1)))
self.add_widget(bh)
# connection status message
bh = BoxLayout(padding=(10, 1), size_hint=(1, None), height=dp(40))
status = self.status[device] = Label(text=current_device.status,
size_hint=(1, 1), color=(.5, .5, .5, 1))
bh.add_widget(status)
self.add_widget(bh)
# inter-device spacer
# self.add_widget(Label(size_hint=(1, None), height=dp(40)))
self.add_widget(Label(size_hint=(1, 1)))
# done button
hb = BoxLayout(size_hint=(1, None), height=dp(30))
hb.add_widget(Label(size_hint=(1, 1)))
hb.add_widget(Button(size_hint=(None, 1), width=dp(100), text='close',
on_press=self.hide))
hb.add_widget(Label(size_hint=(1, 1)))
self.add_widget(hb)
self.add_widget(Label(size_hint=(1, None), height=dp(90)))
def mode_changed(self, device, spinner, mode):
Component.get(device).set_mode(mode)
def connect(self, device, widget=None):
try:
if self.connect_buttons[device].text == 'connect...':
Component.get(device).connect()
else:
Component.get(device).disconnect()
Component.get(device).save()
except Exception as e:
logger.exception(e)
def status_changed(self, device, status):
if device in self.status:
self.status[device].text = status
def connection_changed(self, device, connected):
if device in self.connect_dots:
self.connect_dots[device].text = jicon('dot', color=('g' if connected else 'r'))
Component.get(device).info('not connected')
if device in self.connect_buttons:
self.connect_buttons[device].text = 'disconnect...' if connected else 'connect...'
Component.get(device).info('connected')
def config(self, device, *args):
''' user wants to configure device
'''
logger.debug('Configuring {:} device'.format(device))
try:
self.current_device = Component.get(device).device
self.changed_settings = {}
if self.current_device is not None:
self.show_device_config_panel(name=device, device=self.current_device)
except Exception as e:
logger.exception(e)
def show_device_config_panel(self, name=None, device=None):
''' Build device settings panel
'''
self.clear_widgets()
self.add_widget(Label(size_hint=(1, None), height=dp(75)))
self.add_widget(Label(text=device.name, size_hint=(1, None), height=dp(60),
font_size='24sp'))
self.add_widget(Label(size_hint=(1, 1))) # spacer
for pname, pspec in device.configurables:
self.add_widget(configurable_to_widget(
text=pspec.get('name', pname),
name=pname,
spec=pspec,
helptext=pspec.get('help', ''),
initval=getattr(self.current_device, pname),
changed=device.setting_changed))
self.add_widget(Label(size_hint=(1, 1))) # spacer
# done button
hb = BoxLayout(size_hint=(1, None), height=dp(30))
hb.add_widget(Label(size_hint=(1, 1)))
hb.add_widget(Button(size_hint=(None, 1), width=dp(150), text='back to devices',
on_press=self._save_settings))
hb.add_widget(Label(size_hint=(1, 1)))
self.add_widget(hb)
self.add_widget(Label(size_hint=(1, None), height=dp(75)))
@logger.catch()
def _save_settings(self, *args):
self.current_device.apply_and_save_settings()
self.show_device_manager()
def on_touch_down(self, touch):
handled = super().on_touch_down(touch)
if self.collide_point(*touch.pos):
return True
return handled
class DeviceFamily:
device = ObjectProperty(None)
# these three need to be set in each subclass
family = StringProperty('Unknown')
modes = DictProperty({})
default_mode = StringProperty('')
def __init__(self, **kwargs):
self.app = App.get_running_app()
try:
with open(self.app.get_path('{:}.json'.format(self.family)), 'r') as f:
self.settings = json.load(f)
except:
self.settings = {}
Clock.schedule_once(self.post_init, 0)
def post_init(self, dt):
self.set_mode(self.settings.get('current_mode', self.default_mode))
self.connect()
def save(self):
with open(self.app.get_path('{:}.json'.format(self.family)), 'w') as f:
json.dump(self.settings, f, indent=1)
def set_mode(self, mode):
self.disconnect()
try:
if mode in self.modes:
devmod = importlib.import_module('jocular.{:}'.format(self.family.lower()))
devclass = getattr(devmod, self.modes[mode])
self.device = devclass()
self.settings['current_mode'] = mode
self.device.settings_have_changed()
# self.save()
except Exception as e:
logger.exception(e)
def get_configurables(self):
if self.device is not None:
return self.device.configurables
def configure(self):
if self.device is not None:
logger.debug('family {:} settings {:}'.format(self.family, self.settings['current_mode']))
self.device.configure()
def connect(self):
logger.debug('Connecting {:} (current mode: {:})'.format(
self.family, self.settings['current_mode']))
if self.device is not None:
self.device.connect()
# only save current mode if we are able to connect
if self.device.connected:
self.save()
self.device_connected()
self.device.on_new_object()
def disconnect(self):
if self.device is None:
return
if self.connected():
self.device.disconnect()
self.device_disconnected()
def connected(self):
if self.device is None:
return False
return self.device.connected
def device_connected(self):
pass
def device_disconnected(self):
pass
def on_close(self, *args):
if self.connected():
self.disconnect()
def choose(self, *args):
if self.device is not None:
self.device.choose()
''' Each actual device e.g. ASCOMTelescope, ManualFilterwheel etc is a subclass of this
'''
class Device(EventDispatcher, SettingsBase):
connected = BooleanProperty(False)
status = StringProperty('')
family = StringProperty('unknown family')
def on_close(self):
pass
def on_new_object(self):
pass
def on_previous_object(self):
pass
def connect(self):
self.status = 'Not implemented for this {:}'.format(self.family)
self.connected = False
def disconnect(self):
self.status = 'not connected'
self.connected = False
def on_connected(self, *args):
Component.get('DeviceManager').connection_changed(self.family, self.connected)
def on_status(self, *args):
Component.get('DeviceManager').status_changed(self.family, self.status)
def select(self, f):
return None
def choose(self):
pass
def handle_failure(self, message='problem'):
logger.error('{:}: failure {:}'.format(self.family, message))
self.disconnect()
self.connected = False
self.status = message
if hasattr(self, 'on_failure') and self.on_failure is not None:
self.on_failure()
| 28.598901 | 99 | 0.708165 | '''
DeviceManager:
a Component that manages different device families e.g. Telescope, Camera, FilterWheel
via a GUI element that permits selection/connection/disconnection
DeviceFamily:
superclass of e.g. Camera, Telescope, FilterWheel
handles communication with devices for generic functions such as
select, connect, disconnect as well as common error handling
Device:
superclass of device instances e.g. SXCamera, ASCOMFilterWheel
'''
import json
import importlib
from functools import partial
from kivy.app import App
from loguru import logger
from kivy.metrics import dp
from kivy.uix.spinner import Spinner
from kivy.uix.button import Button
from kivy.uix.label import Label
from kivy.uix.boxlayout import BoxLayout
from kivy.event import EventDispatcher
from kivy.core.window import Window
from kivy.properties import (
ObjectProperty,
StringProperty, BooleanProperty, DictProperty
)
from kivy.clock import Clock
from jocular.component import Component
from jocular.settingsmanager import SettingsBase
from jocular.widgets import jicon, LabelL
from jocular.formwidgets import configurable_to_widget
from kivy.lang import Builder
Builder.load_string('''
<DeviceManager>:
canvas:
Color:
rgba: .2, .2, .2, .7
Ellipse:
pos: self.x + dp(58) + (self.width - self.height) / 2, dp(58)
size: self.height - dp(116), self.height - dp(116)
orientation: 'vertical'
pos_hint: {'center_x': 10, 'center_y': .5}
''')
class DeviceManager(Component, BoxLayout):
devices = {'Camera': 'Camera', 'Telescope': 'Telescope', 'FilterWheel': 'Filter wheel'}
def __init__(self, **args):
super().__init__(**args)
self.app = App.get_running_app()
self.status = {}
self.connect_buttons = {}
self.connect_dots = {}
self.size = Window.size
self.app.gui.add_widget(self)
def show(self, *args):
Component.get('SettingsManager').hide()
if self.pos_hint['center_x'] > 1:
self.show_device_manager()
self.pos_hint = {'center_x': .5, 'center_y': .5}
def hide(self, *args):
if self.pos_hint['center_x'] < 1:
self.pos_hint = {'center_x': 10, 'center_y': .5}
def show_device_manager(self):
''' Main device manager panel that handles mode selection and connection,
and links to configuration of current devices.
'''
self.clear_widgets()
self.add_widget(Label(size_hint=(1, None), height=dp(90)))
self.add_widget(Label(size_hint=(1, None), height=dp(60), text='Your devices', font_size='24sp'))
self.add_widget(Label(size_hint=(1, 1)))
for device, name in self.devices.items():
current_device = Component.get(device).device
bh = BoxLayout(size_hint=(1, None), height=dp(40))
bh.add_widget(Label(size_hint=(1, 1)))
# connection status
lab = self.connect_dots[device] = LabelL(size_hint=(None, 1), width=dp(24), markup=True,
text=jicon('dot', color='g' if current_device.connected else 'r'))
bh.add_widget(lab)
# device family
bh.add_widget(LabelL(text=name, size_hint=(None, 1), width=dp(120)))
# device chooser
spinner = Spinner(size_hint=(None, 1), width=dp(120),
text=Component.get(device).settings['current_mode'],
values=Component.get(device).modes.keys())
spinner.bind(text=partial(self.mode_changed, device))
bh.add_widget(spinner)
# mid spacer
bh.add_widget(Label(size_hint=(None, 1), width=dp(40)))
# connect/disconnect button
but = self.connect_buttons[device] = Button(size_hint=(None, 1), width=dp(120),
text='disconnect...' if current_device.connected else 'connect...',
on_press=partial(self.connect, device))
bh.add_widget(but)
# configure icon
lab = Button(size_hint=(None, 1), width=dp(140),
markup=True, background_color=(0, 0, 0, 0),
text=jicon('settings'), on_press=partial(self.config, device))
bh.add_widget(lab)
bh.add_widget(Label(size_hint=(1, 1)))
self.add_widget(bh)
# connection status message
bh = BoxLayout(padding=(10, 1), size_hint=(1, None), height=dp(40))
status = self.status[device] = Label(text=current_device.status,
size_hint=(1, 1), color=(.5, .5, .5, 1))
bh.add_widget(status)
self.add_widget(bh)
# inter-device spacer
# self.add_widget(Label(size_hint=(1, None), height=dp(40)))
self.add_widget(Label(size_hint=(1, 1)))
# done button
hb = BoxLayout(size_hint=(1, None), height=dp(30))
hb.add_widget(Label(size_hint=(1, 1)))
hb.add_widget(Button(size_hint=(None, 1), width=dp(100), text='close',
on_press=self.hide))
hb.add_widget(Label(size_hint=(1, 1)))
self.add_widget(hb)
self.add_widget(Label(size_hint=(1, None), height=dp(90)))
def mode_changed(self, device, spinner, mode):
Component.get(device).set_mode(mode)
def connect(self, device, widget=None):
try:
if self.connect_buttons[device].text == 'connect...':
Component.get(device).connect()
else:
Component.get(device).disconnect()
Component.get(device).save()
except Exception as e:
logger.exception(e)
def status_changed(self, device, status):
if device in self.status:
self.status[device].text = status
def connection_changed(self, device, connected):
if device in self.connect_dots:
self.connect_dots[device].text = jicon('dot', color=('g' if connected else 'r'))
Component.get(device).info('not connected')
if device in self.connect_buttons:
self.connect_buttons[device].text = 'disconnect...' if connected else 'connect...'
Component.get(device).info('connected')
def config(self, device, *args):
''' user wants to configure device
'''
logger.debug('Configuring {:} device'.format(device))
try:
self.current_device = Component.get(device).device
self.changed_settings = {}
if self.current_device is not None:
self.show_device_config_panel(name=device, device=self.current_device)
except Exception as e:
logger.exception(e)
def show_device_config_panel(self, name=None, device=None):
''' Build device settings panel
'''
self.clear_widgets()
self.add_widget(Label(size_hint=(1, None), height=dp(75)))
self.add_widget(Label(text=device.name, size_hint=(1, None), height=dp(60),
font_size='24sp'))
self.add_widget(Label(size_hint=(1, 1))) # spacer
for pname, pspec in device.configurables:
self.add_widget(configurable_to_widget(
text=pspec.get('name', pname),
name=pname,
spec=pspec,
helptext=pspec.get('help', ''),
initval=getattr(self.current_device, pname),
changed=device.setting_changed))
self.add_widget(Label(size_hint=(1, 1))) # spacer
# done button
hb = BoxLayout(size_hint=(1, None), height=dp(30))
hb.add_widget(Label(size_hint=(1, 1)))
hb.add_widget(Button(size_hint=(None, 1), width=dp(150), text='back to devices',
on_press=self._save_settings))
hb.add_widget(Label(size_hint=(1, 1)))
self.add_widget(hb)
self.add_widget(Label(size_hint=(1, None), height=dp(75)))
@logger.catch()
def _save_settings(self, *args):
self.current_device.apply_and_save_settings()
self.show_device_manager()
def on_touch_down(self, touch):
handled = super().on_touch_down(touch)
if self.collide_point(*touch.pos):
return True
return handled
class DeviceFamily:
device = ObjectProperty(None)
# these three need to be set in each subclass
family = StringProperty('Unknown')
modes = DictProperty({})
default_mode = StringProperty('')
def __init__(self, **kwargs):
self.app = App.get_running_app()
try:
with open(self.app.get_path('{:}.json'.format(self.family)), 'r') as f:
self.settings = json.load(f)
except:
self.settings = {}
Clock.schedule_once(self.post_init, 0)
def post_init(self, dt):
self.set_mode(self.settings.get('current_mode', self.default_mode))
self.connect()
def save(self):
with open(self.app.get_path('{:}.json'.format(self.family)), 'w') as f:
json.dump(self.settings, f, indent=1)
def set_mode(self, mode):
self.disconnect()
try:
if mode in self.modes:
devmod = importlib.import_module('jocular.{:}'.format(self.family.lower()))
devclass = getattr(devmod, self.modes[mode])
self.device = devclass()
self.settings['current_mode'] = mode
self.device.settings_have_changed()
# self.save()
except Exception as e:
logger.exception(e)
def get_configurables(self):
if self.device is not None:
return self.device.configurables
def configure(self):
if self.device is not None:
logger.debug('family {:} settings {:}'.format(self.family, self.settings['current_mode']))
self.device.configure()
def connect(self):
logger.debug('Connecting {:} (current mode: {:})'.format(
self.family, self.settings['current_mode']))
if self.device is not None:
self.device.connect()
# only save current mode if we are able to connect
if self.device.connected:
self.save()
self.device_connected()
self.device.on_new_object()
def disconnect(self):
if self.device is None:
return
if self.connected():
self.device.disconnect()
self.device_disconnected()
def connected(self):
if self.device is None:
return False
return self.device.connected
def device_connected(self):
pass
def device_disconnected(self):
pass
def on_close(self, *args):
if self.connected():
self.disconnect()
def choose(self, *args):
if self.device is not None:
self.device.choose()
''' Each actual device e.g. ASCOMTelescope, ManualFilterwheel etc is a subclass of this
'''
class Device(EventDispatcher, SettingsBase):
connected = BooleanProperty(False)
status = StringProperty('')
family = StringProperty('unknown family')
def on_close(self):
pass
def on_new_object(self):
pass
def on_previous_object(self):
pass
def connect(self):
self.status = 'Not implemented for this {:}'.format(self.family)
self.connected = False
def disconnect(self):
self.status = 'not connected'
self.connected = False
def on_connected(self, *args):
Component.get('DeviceManager').connection_changed(self.family, self.connected)
def on_status(self, *args):
Component.get('DeviceManager').status_changed(self.family, self.status)
def select(self, f):
return None
def choose(self):
pass
def handle_failure(self, message='problem'):
logger.error('{:}: failure {:}'.format(self.family, message))
self.disconnect()
self.connected = False
self.status = message
if hasattr(self, 'on_failure') and self.on_failure is not None:
self.on_failure()
| 2 | 0 |
6acf6c0ad0633882340e332142971aa701a8f474 | 599 | py | Python | ContextualBehavioural/rf_merging/Rule.py | cybersoton/ml-ac | 0b5e65fc875287ac4e8502dd0b0690d8572ea1b3 | [
"MIT"
] | null | null | null | ContextualBehavioural/rf_merging/Rule.py | cybersoton/ml-ac | 0b5e65fc875287ac4e8502dd0b0690d8572ea1b3 | [
"MIT"
] | 1 | 2019-06-27T11:06:13.000Z | 2019-06-27T11:06:13.000Z | ContextualBehavioural/rf_merging/Rule.py | cybersoton/ml-ac | 0b5e65fc875287ac4e8502dd0b0690d8572ea1b3 | [
"MIT"
] | null | null | null | class Rule:
def __init__(self, exprs, label):
self.antec = exprs # list of Expression
self.cons = label
def searchantec(self, f):
lst = []
for i in self.antec:
if f == i.f:
lst.append(i)
return lst
def __str__(self):
res = ""
for i in range(len(self.antec)):
res = res + str(self.antec[i])
if i < len(self.antec) - 1:
res = res + " & "
return res + " ==> " + str(self.cons)
def __eq__(self, other):
return self.__dict__ == other.__dict__
| 24.958333 | 48 | 0.480801 | class Rule:
def __init__(self, exprs, label):
self.antec = exprs # list of Expression
self.cons = label
def searchantec(self, f):
lst = []
for i in self.antec:
if f == i.f:
lst.append(i)
return lst
def __str__(self):
res = ""
for i in range(len(self.antec)):
res = res + str(self.antec[i])
if i < len(self.antec) - 1:
res = res + " & "
return res + " ==> " + str(self.cons)
def __eq__(self, other):
return self.__dict__ == other.__dict__
| 0 | 0 |
68b6b85d463890a467a9a4397e9accc324f29eae | 567 | py | Python | use-cases/plc+opc/camera.py | TiagoDaFonseca/plc-machine-vision | 22864d2e09bcc52971fbd2a0088f38878f6b59e3 | [
"MIT"
] | null | null | null | use-cases/plc+opc/camera.py | TiagoDaFonseca/plc-machine-vision | 22864d2e09bcc52971fbd2a0088f38878f6b59e3 | [
"MIT"
] | null | null | null | use-cases/plc+opc/camera.py | TiagoDaFonseca/plc-machine-vision | 22864d2e09bcc52971fbd2a0088f38878f6b59e3 | [
"MIT"
] | null | null | null | import numpy as np
import cv2
import time
def connect( channel):
return cv2.VideoCapture(channel)
def capture_image (device,exposition):
cam= connect(device)
for i in range(exposition):
ret, bgr_img = cam.read()
cam.release()
return bgr_img
#Test unit
if __name__ == '__main__':
while True:
img = capture_image(0,10)
print(img)
time.sleep(2)
cv2.imshow("c",img)
cv2.waitKey(0)
cv2.destroyAllWindows()
| 22.68 | 41 | 0.536155 | import numpy as np
import cv2
import time
def connect( channel):
return cv2.VideoCapture(channel)
def capture_image (device,exposition):
cam= connect(device)
for i in range(exposition):
ret, bgr_img = cam.read()
cam.release()
return bgr_img
#Test unit
if __name__ == '__main__':
while True:
img = capture_image(0,10)
print(img)
time.sleep(2)
cv2.imshow("c",img)
cv2.waitKey(0)
cv2.destroyAllWindows()
| 0 | 0 |
7d0791575f89c9719c939871a996ca6932400251 | 23 | py | Python | devel/lib/python2.7/dist-packages/autolabor_pro1_driver/msg/__init__.py | lty1994/atuolabor | 42b8c52eac93a2e48fbd64275c7dd426a988000c | [
"Apache-2.0"
] | null | null | null | devel/lib/python2.7/dist-packages/autolabor_pro1_driver/msg/__init__.py | lty1994/atuolabor | 42b8c52eac93a2e48fbd64275c7dd426a988000c | [
"Apache-2.0"
] | null | null | null | devel/lib/python2.7/dist-packages/autolabor_pro1_driver/msg/__init__.py | lty1994/atuolabor | 42b8c52eac93a2e48fbd64275c7dd426a988000c | [
"Apache-2.0"
] | null | null | null | from ._Encode import *
| 11.5 | 22 | 0.73913 | from ._Encode import *
| 0 | 0 |
f20b27cf456d5eeefe90b7cde2cb3bf4272e1e3f | 129 | py | Python | tests/inputs/misc/81-builtin-funcs.py | helq/pytropos | 497ed5902e6e4912249ca0a46b477f9bfa6ae80a | [
"MIT"
] | 4 | 2019-10-06T18:01:24.000Z | 2020-07-03T05:27:35.000Z | tests/inputs/misc/81-builtin-funcs.py | helq/pytropos | 497ed5902e6e4912249ca0a46b477f9bfa6ae80a | [
"MIT"
] | 5 | 2021-06-07T15:50:04.000Z | 2021-06-07T15:50:06.000Z | tests/inputs/misc/81-builtin-funcs.py | helq/pytropos | 497ed5902e6e4912249ca0a46b477f9bfa6ae80a | [
"MIT"
] | null | null | null | a = float(2)
b = int(2.0)
c = bool(a)
d = float(None) # fails
e = int(None) # fails
f = bool(None) # fails
# show_store()
| 14.333333 | 24 | 0.550388 | a = float(2)
b = int(2.0)
c = bool(a)
d = float(None) # fails
e = int(None) # fails
f = bool(None) # fails
# show_store()
| 0 | 0 |
971441945df4116d6f750cbc934993c8e55ff602 | 605 | py | Python | Curso_Python_3_UDEMY/banco_dados/contatos_grupo.py | DanilooSilva/Cursos_de_Python | 8f167a4c6e16f01601e23b6f107578aa1454472d | [
"MIT"
] | null | null | null | Curso_Python_3_UDEMY/banco_dados/contatos_grupo.py | DanilooSilva/Cursos_de_Python | 8f167a4c6e16f01601e23b6f107578aa1454472d | [
"MIT"
] | null | null | null | Curso_Python_3_UDEMY/banco_dados/contatos_grupo.py | DanilooSilva/Cursos_de_Python | 8f167a4c6e16f01601e23b6f107578aa1454472d | [
"MIT"
] | null | null | null | from db import nova_conexao
from mysql.connector.errors import ProgrammingError
sql = '''
SELECT A.NOME,
A.TEL,
B.DESCRICAO
FROM CONTATOS A
INNER JOIN GRUPOS B ON A.IDGRUPO = B.ID
ORDER BY B.DESCRICAO, A.NOME
'''
with nova_conexao() as conexao:
try:
cursor = conexao.cursor()
cursor.execute(sql)
contatos = cursor.fetchall()
except ProgrammingError as e:
print(f'Erro: {e.msg}')
else:
for contato in contatos:
print(f'Nome: {contato[0]:10s} tel: {contato[1]:15s} grupo: {contato[2]}') | 27.5 | 86 | 0.591736 | from db import nova_conexao
from mysql.connector.errors import ProgrammingError
sql = '''
SELECT A.NOME,
A.TEL,
B.DESCRICAO
FROM CONTATOS A
INNER JOIN GRUPOS B ON A.IDGRUPO = B.ID
ORDER BY B.DESCRICAO, A.NOME
'''
with nova_conexao() as conexao:
try:
cursor = conexao.cursor()
cursor.execute(sql)
contatos = cursor.fetchall()
except ProgrammingError as e:
print(f'Erro: {e.msg}')
else:
for contato in contatos:
print(f'Nome: {contato[0]:10s} tel: {contato[1]:15s} grupo: {contato[2]}') | 0 | 0 |
e47ffe09852df85a98d02b97b19f9452b8ac0d20 | 275 | py | Python | 00/35.py | shuowangphd/lcpy | 18e11bf7ca77acacadeeef93bf6b7f1667eae2cd | [
"MIT"
] | null | null | null | 00/35.py | shuowangphd/lcpy | 18e11bf7ca77acacadeeef93bf6b7f1667eae2cd | [
"MIT"
] | null | null | null | 00/35.py | shuowangphd/lcpy | 18e11bf7ca77acacadeeef93bf6b7f1667eae2cd | [
"MIT"
] | null | null | null | class Solution:
def searchInsert(self, nums: List[int], target: int) -> int:
l,r = 0, len(nums)
while l < r:
mid = (l+r)//2
if target > nums[mid]:
l = mid+1
else:
r = mid
return l | 27.5 | 64 | 0.410909 | class Solution:
def searchInsert(self, nums: List[int], target: int) -> int:
l,r = 0, len(nums)
while l < r:
mid = (l+r)//2
if target > nums[mid]:
l = mid+1
else:
r = mid
return l | 0 | 0 |
048f47a7d570fbb6837142446eb4b40f6f3b3111 | 2,924 | py | Python | logger/readers/mqtt_reader.py | anshika-agarwal/openrvdas | 69c0c53902a988b790faad8baa21a5f299d033df | [
"BSD-2-Clause"
] | null | null | null | logger/readers/mqtt_reader.py | anshika-agarwal/openrvdas | 69c0c53902a988b790faad8baa21a5f299d033df | [
"BSD-2-Clause"
] | null | null | null | logger/readers/mqtt_reader.py | anshika-agarwal/openrvdas | 69c0c53902a988b790faad8baa21a5f299d033df | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python3
import json
import logging
import socket
import sys
from os.path import dirname, realpath; sys.path.append(dirname(dirname(dirname(realpath(__file__)))))
from logger.utils.formats import Text
from logger.readers.reader import Reader
# Don't barf if they don't have redis installed. Only complain if
# they actually try to use it, below
try:
import paho.mqtt.client as mqtt # import the client | $ pip installing paho-mqtt is necessary
PAHO_ENABLED = True
except ModuleNotFoundError:
PAHO_ENABLED = False
################################################################################
class MQTTReader(Reader):
"""
Read messages from an mqtt broker
"""
def __init__(self, broker, channel, client_name):
"""
Read text records from the channel subscription.
```
broker MQTT broker to connect, broker format[###.###.#.#]
channel MQTT channel to read from, channel format[@broker/path_of_subscripton]
```
Instructions on how to start an MQTT broker:
1. First install the Mosquitto Broker :
```
sudo apt-get update
sudo apt-get install mosquitto
sudo apt-get install mosquitto-clients
```
2. The mosquitto service starts automatically when downloaded but use :
```
sudo service mosquitto start
sudo service mosquitto stop
```
to start and stop the service.
3. To test the install use:
```
netstat -at
```
and you should see the MQTT broker which is the port 1883
4. In order to manually subscribe to a client use :
```
mosquitto_sub -t "example/topic"
```
and publish a message by using
```
mosquitto_pub -m "published message" -t "certain/channel"
```
5. Mosquitto uses a configuration file "mosquitto.conf" which you can find in /etc/mosquitto folder
```
"""
super().__init__(output_format=Text)
if not PAHO_ENABLED:
raise ModuleNotFoundError('MQTTReader(): paho-mqtt is not installed. Please '
'try "pip install paho-mqtt" prior to use.')
self.broker = broker
self.channel = channel
self.client_name = client_name
try:
self.paho = mqtt.Client(client_name)
self.paho.connect(broker)
self.paho.subscribe(channel)
while paho.loop() == 0:
pass
except mqtt.WebsocketConnectionError as e:
logging.error('Unable to connect to broker at %s:%s',
self.broker, self.channel)
raise e
############################
def read(self):
while True:
try:
self.paho.loop_forever()
message = next(iter(self.paho.listen()))
logging.debug('Got message "%s"', message)
if message.get('type', None) == 'message':
data = message.get('data', None)
if data:
return data
except KeyboardInterrupt:
self.paho.disconnect()
exit(0)
################################################################################
| 27.327103 | 104 | 0.624487 | #!/usr/bin/env python3
import json
import logging
import socket
import sys
from os.path import dirname, realpath; sys.path.append(dirname(dirname(dirname(realpath(__file__)))))
from logger.utils.formats import Text
from logger.readers.reader import Reader
# Don't barf if they don't have redis installed. Only complain if
# they actually try to use it, below
try:
import paho.mqtt.client as mqtt # import the client | $ pip installing paho-mqtt is necessary
PAHO_ENABLED = True
except ModuleNotFoundError:
PAHO_ENABLED = False
################################################################################
class MQTTReader(Reader):
"""
Read messages from an mqtt broker
"""
def __init__(self, broker, channel, client_name):
"""
Read text records from the channel subscription.
```
broker MQTT broker to connect, broker format[###.###.#.#]
channel MQTT channel to read from, channel format[@broker/path_of_subscripton]
```
Instructions on how to start an MQTT broker:
1. First install the Mosquitto Broker :
```
sudo apt-get update
sudo apt-get install mosquitto
sudo apt-get install mosquitto-clients
```
2. The mosquitto service starts automatically when downloaded but use :
```
sudo service mosquitto start
sudo service mosquitto stop
```
to start and stop the service.
3. To test the install use:
```
netstat -at
```
and you should see the MQTT broker which is the port 1883
4. In order to manually subscribe to a client use :
```
mosquitto_sub -t "example/topic"
```
and publish a message by using
```
mosquitto_pub -m "published message" -t "certain/channel"
```
5. Mosquitto uses a configuration file "mosquitto.conf" which you can find in /etc/mosquitto folder
```
"""
super().__init__(output_format=Text)
if not PAHO_ENABLED:
raise ModuleNotFoundError('MQTTReader(): paho-mqtt is not installed. Please '
'try "pip install paho-mqtt" prior to use.')
self.broker = broker
self.channel = channel
self.client_name = client_name
try:
self.paho = mqtt.Client(client_name)
self.paho.connect(broker)
self.paho.subscribe(channel)
while paho.loop() == 0:
pass
except mqtt.WebsocketConnectionError as e:
logging.error('Unable to connect to broker at %s:%s',
self.broker, self.channel)
raise e
############################
def read(self):
while True:
try:
self.paho.loop_forever()
message = next(iter(self.paho.listen()))
logging.debug('Got message "%s"', message)
if message.get('type', None) == 'message':
data = message.get('data', None)
if data:
return data
except KeyboardInterrupt:
self.paho.disconnect()
exit(0)
################################################################################
| 0 | 0 |
1ead3e61c0cfb9c1f187dba1bb471881875c24e4 | 1,167 | py | Python | synch/replication/consumer.py | luolin0313/synch | 1a4a1262c20a85fe06f2cb40291f0a066572518b | [
"Apache-2.0"
] | null | null | null | synch/replication/consumer.py | luolin0313/synch | 1a4a1262c20a85fe06f2cb40291f0a066572518b | [
"Apache-2.0"
] | null | null | null | synch/replication/consumer.py | luolin0313/synch | 1a4a1262c20a85fe06f2cb40291f0a066572518b | [
"Apache-2.0"
] | 1 | 2020-09-28T01:37:00.000Z | 2020-09-28T01:37:00.000Z | import logging
from synch.enums import ClickHouseEngine
from synch.factory import Global
from synch.replication.etl import etl_full
from synch.writer.collapsing_merge_tree import ClickHouseCollapsingMergeTree
from synch.writer.merge_tree import ClickHouseMergeTree
logger = logging.getLogger("synch.replication.consumer")
def consume(args):
settings = Global.settings
reader = Global.reader
broker = Global.broker
schema = args.schema
engine = settings.schema_settings.get(schema).get("clickhouse_engine")
if engine == ClickHouseEngine.merge_tree:
writer_cls = ClickHouseMergeTree
elif engine == ClickHouseEngine.collapsing_merge_tree:
writer_cls = ClickHouseCollapsingMergeTree
else:
raise NotImplementedError
writer = writer_cls(settings, broker)
tables = settings.schema_settings.get(schema).get("tables")
# try etl full
if settings.auto_full_etl:
etl_full(reader, writer, schema, tables)
tables_pk = {}
for table in tables:
tables_pk[table] = reader.get_primary_key(schema, table)
writer.start_consume(schema, tables_pk, args.last_msg_id, args.skip_error)
| 32.416667 | 78 | 0.75407 | import logging
from synch.enums import ClickHouseEngine
from synch.factory import Global
from synch.replication.etl import etl_full
from synch.writer.collapsing_merge_tree import ClickHouseCollapsingMergeTree
from synch.writer.merge_tree import ClickHouseMergeTree
logger = logging.getLogger("synch.replication.consumer")
def consume(args):
settings = Global.settings
reader = Global.reader
broker = Global.broker
schema = args.schema
engine = settings.schema_settings.get(schema).get("clickhouse_engine")
if engine == ClickHouseEngine.merge_tree:
writer_cls = ClickHouseMergeTree
elif engine == ClickHouseEngine.collapsing_merge_tree:
writer_cls = ClickHouseCollapsingMergeTree
else:
raise NotImplementedError
writer = writer_cls(settings, broker)
tables = settings.schema_settings.get(schema).get("tables")
# try etl full
if settings.auto_full_etl:
etl_full(reader, writer, schema, tables)
tables_pk = {}
for table in tables:
tables_pk[table] = reader.get_primary_key(schema, table)
writer.start_consume(schema, tables_pk, args.last_msg_id, args.skip_error)
| 0 | 0 |
5005e0852fc10d13f279f0d30c991920b9d07e96 | 71,131 | py | Python | jumeg/decompose/fourier_ica_plot.py | fboers/jumeg | e04896989faf72f4dbe7adf136e4d158d212f24a | [
"BSD-3-Clause"
] | 6 | 2015-04-10T07:13:07.000Z | 2021-12-12T04:04:37.000Z | jumeg/decompose/fourier_ica_plot.py | fboers/jumeg | e04896989faf72f4dbe7adf136e4d158d212f24a | [
"BSD-3-Clause"
] | 112 | 2015-01-07T10:19:24.000Z | 2022-02-01T15:48:16.000Z | jumeg/decompose/fourier_ica_plot.py | fboers/jumeg | e04896989faf72f4dbe7adf136e4d158d212f24a | [
"BSD-3-Clause"
] | 22 | 2015-03-11T12:19:50.000Z | 2021-11-20T04:24:42.000Z | # Authors: Lukas Breuer <[email protected]>
"""
----------------------------------------------------------------------
--- jumeg.decompose.fourier_ica_plot ---------------------------------
----------------------------------------------------------------------
autor : Lukas Breuer
email : [email protected]
last update: 17.11.2016
version : 1.1
----------------------------------------------------------------------
This is a simple implementation to plot the results achieved by
applying FourierICA
----------------------------------------------------------------------
"""
#######################################################
# #
# plotting functions for FourierICA #
# #
#######################################################
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Simple function to adjust axis in plots
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def adjust_spines(ax, spines, labelsize=10):
"""
Simple function to adjust axis in plots
Parameters
----------
ax: axis object
Plot object which should be adjusted
spines: list of strings ['bottom', 'left']
Name of the axis which should be adjusted
labelsize: integer
Font size for the x- and y-axis labels
"""
for loc, spine in list(ax.spines.items()):
if loc in spines:
spine.set_position(('outward', 4)) # outward by 4 points
# spine.set_smart_bounds(True)
else:
spine.set_color('none') # don't draw spine
# turn off ticks where there is no spine
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
else:
# no yaxis ticks
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
else:
# no xaxis ticks
ax.xaxis.set_ticks([])
ax.tick_params(axis='x', labelsize=labelsize)
ax.tick_params(axis='y', labelsize=labelsize)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# function to generate automatically combined labels
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def get_combined_labels(subject='fsaverage', subjects_dir=None,
parc='aparc.a2009s'):
"""
Helper function to combine labels automatically
according to previous studies.
Parameters
----------
subject: string containing the subjects name
default: subject='fsaverage'
subjects_dir: Subjects directory. If not given the
system variable SUBJECTS_DIR is used
default: subjects_dir=None
parc: name of the parcellation to use for reading
in the labels
default: parc='aparc.a2009s'
Return
------
label_keys: names of the new labels
labels: list containing the combined labels
"""
# ------------------------------------------
# import necessary modules
# ------------------------------------------
from mne import read_labels_from_annot
import numpy as np
from os.path import join
# ------------------------------------------
# define labels based on previous studies
# ------------------------------------------
# to get more information about the label names and their
# locations check the following publication:
# Destrieux et al. (2010), Automatic parcellation of human
# cortical gyri and sulci using standard anatomical nomenclature,
# NeuroImage, DOI: 10.1016/j.neuroimage.2010.06.010
label_combinations = {
'auditory': ['G_temp_sup-G_T_transv', 'G_temp_sup-Plan_polar',
'Lat_Fis-post'],
'broca': ['G_front_inf-Opercular', 'G_front_inf-Triangul',
'Lat_Fis-ant-Vertical'],
'cingulate': ['G_cingul-Post-dorsal', 'G_cingul-Post-ventral',
'G_and_S_cingul-Ant', 'G_and_S_cingul-Mid-Ant',
'G_and_S_cingul-Mid-Post', 'S_pericallosal',
'cingul-Post-ventral'],
'frontal': ['G_and_S_frontomargin', 'G_and_S_transv_frontopol',
'G_front_inf-Orbital', 'G_front_middle',
'G_front_sup', 'G_orbital',
'G_rectus', 'G_subcallosal',
'Lat_Fis-ant-Horizont', 'S_front_inf',
'S_front_middle', 'S_front_sup',
'S_orbital_lateral', 'S_orbital-H_Shaped',
'S_suborbital'],
'gustatory': ['G_and_S_subcentral'],
'insula': ['S_circular_insula_ant', 'S_circular_insula_inf',
'S_circular_insula_sup', 'G_Ins_lg_and_S_cent_ins',
'G_insular_short'],
'motor': ['G_precentral', 'S_precentral-sup-part',
'S_precentral-inf-part', 'S_central'],
'olfactory': ['S_temporal_transverse'],
'somatosensory': ['G_postcentral', 'S_postcentral'],
'somatosensory associated': ['G_and_S_paracentral', 'G_pariet_inf-Angular',
'G_parietal_sup', 'S_cingul-Marginalis',
'S_intrapariet_and_P_trans'],
'temporal': ['G_oc-temp_lat-fusifor', 'G_oc-temp_med-Parahip',
'G_temp_sup-Plan_polar', 'G_temporal_inf',
'G_temporal_middle', 'G_temp_sup-Lateral',
'Pole_temporal', 'S_collat_transv_ant',
'S_oc-temp_lat', 'S_oc-temp_med_and_Lingual',
'S_temporal_inf', 'S_temporal_sup'],
'vision': ['G_and_S_occipital_inf', 'G_occipital_middle',
'G_oc-temp_med-Lingual', 'S_collat_transv_post',
'S_oc_sup_and_transversal', 'S_occipital_ant',
'S_oc_middle_and_Lunatus'],
'visual': ['G_cuneus', 'G_precuneus',
'S_calcarine', 'S_parieto_occipital',
'G_occipital_sup', 'Pole_occipital',
'S_subparietal'],
'wernicke': ['G_pariet_inf-Supramar', 'G_temp_sup-Plan_tempo',
'S_interm_prim-Jensen']
}
label_keys = list(label_combinations.keys())
labels = []
# ------------------------------------------
# combine labels
# ------------------------------------------
# loop over both hemispheres
for hemi in ['lh', 'rh']:
# read all labels in
labels_all = read_labels_from_annot(subject, parc=parc, hemi=hemi,
surf_name='inflated',
subjects_dir=subjects_dir,
verbose=False)
# loop over all labels to extract label names
label_names = []
for label in labels_all:
label_names.append(label.name)
# ------------------------------------------
# now generate labels based on previous
# studies
# ------------------------------------------
# loop over all previously defined labels
for label_key in label_keys:
# get name of all labels related to the current one
label_members = label_combinations[label_key]
label_members = [x+'-'+hemi for x in label_members]
# check which labels we need for the current one
idx_labels_want = np.where(np.in1d(label_names, label_members))[0]
labels_want = [labels_all[i] for i in idx_labels_want]
# combine labels
label_new = np.sum(labels_want)
label_new.name = label_key + '-' + hemi
# fill the surface between sources
label_new.values.fill(1.0)
label_new.smooth(subject=subject, subjects_dir=subjects_dir)
# save new label
fnout = join(subjects_dir, subject, 'label',
hemi + '.' + label_key + '.label')
label_new.save(fnout)
labels.append(label_new)
return label_keys, labels
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# function to get the anatomical label to a given vertex
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def get_anat_label_name(vertex, hemi, labels=None, subject='fsaverage',
subjects_dir=None, parc='aparc.a2009s'):
"""
Helper function to get to a given vertex the
name of the anatomical label
Parameters
----------
vertex: integer containing the vertex number
hemi: string containing the information in which
hemisphere the vertex is located. Should be
either 'lh' or 'rh'
labels: labels to use for checking. If not given
the labels are read from the subjects directory
default: labels=None
subject: string containing the subjects name
default: subject='fsaverage'
subjects_dir: Subjects directory. If not given the
system variable SUBJECTS_DIR is used
default: subjects_dir=None
parc: name of the parcellation to use for reading
in the labels
default: parc='aparc.a2009s'
Return
------
name: string containing the name of the anatomical
label related to the given vertex
"""
# ------------------------------------------
# import necessary modules
# ------------------------------------------
from mne import read_labels_from_annot
import numpy as np
# ------------------------------------------
# check input parameter
# ------------------------------------------
# check if labels are given or must be read
if not labels:
labels = read_labels_from_annot(subject, parc=parc, hemi=hemi,
surf_name='inflated',
subjects_dir=subjects_dir,
verbose=False)
# ------------------------------------------
# loop over labels to find corresponding
# label
# ------------------------------------------
name = ''
for label in labels:
if label.hemi == hemi:
# get vertices of current label
label_vert = np.in1d(np.array(vertex), label.vertices)
if label_vert:
name = label.name
break
if name == '':
name = 'unknown-' + hemi
return name
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# function to get the MNI-coordinate(s) to a given
# FourierICA component
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def get_mni_coordinates(A_orig,
subject='fsaverage', subjects_dir=None,
parc='aparc.a2009s', percentile=97,
combine_labels=True):
"""
Helper function to get the MNI-coordinate(s) to a given
FourierICA component. The selection if a component has
activation in both hemispheres or only in one is made
like follows: estimate for each component an activation
threshold based on the given percentile. Next, estimate
the total number of voxels in the component which are
above the estimated threshold. Now check if at least 20%
of the total number of voxels above threshold are in each
hemisphere. If yes both hemispheres are marked as active,
otherwise only one.
Parameters
----------
A_orig: array
2D-mixing-array (nvoxel, ncomp) estimated
when applying FourierICA
subject: string containing the subjects name
default: subject='fsaverage'
subjects_dir: Subjects directory. If not given the
system variable SUBJECTS_DIR is used
default: subjects_dir=None
parc: name of the parcellation to use for reading
in the labels
default: parc='aparc.a2009s'
percentile: integer
value between 0 and 100 used to set a lower
limit for the shown intensity range of the
spatial plots
combine_labels: if set labels are combined automatically
according to previous studies
default: combine_labels=True
Return
------
mni_coords: dictionary
The dictionary contains two elements: 'rh' and 'lh',
each of which containing a list with the MNI
coordinates as string.
Note, each list contains the same number of
elements as components are given. If there is no MNI
coordinate for a component an empty string is used, e.g.
for two components
{'rh': ['(37.55, 1.58, -21.71)', '(44.78, -10.41, 27.89)'],
'lh': ['(-39.43, 5.60, -27.80)', '']}
hemi_loc_txt: list
containing for each FourierICA component to which region
it spatially belongs ('left', 'right' or 'both')
classification: dictionary
classification object. It is a dictionary containing
two sub-dictionaries 'lh' and 'rh' (for left and
right hemisphere). In both sub-dictionaries the
information about the groups is stored, i.e. a
group/region name + the information which components
are stored in this group (as indices). An example
for 6 components might look like this:
{'rh': {'somatosensory': [1, 3], 'cingulate': [4, 5]},
'lh': {'somatosensory': [1, 2], 'cingulate': [0, 5]}}
labels: list of strings
names of the labels which are involved in this data set
"""
# ------------------------------------------
# import necessary modules
# ------------------------------------------
from mne import vertex_to_mni
import numpy as np
from os import environ
import types
# -------------------------------------------
# check input parameter
# -------------------------------------------
if not subjects_dir:
subjects_dir = environ.get('SUBJECTS_DIR')
# -------------------------------------------
# generate spatial profiles
# (using magnitude and phase)
# -------------------------------------------
if isinstance(A_orig[0, 0], complex):
A_orig_mag = np.abs(A_orig)
else:
A_orig_mag = A_orig
# -------------------------------------------
# set some parameters
# -------------------------------------------
nvoxel, ncomp = A_orig_mag.shape
nvoxel_half = int(nvoxel / 2)
hemi = ['lh', 'rh']
hemi_names = ['left ', 'right', 'both ']
hemi_indices = [[0, nvoxel_half], [nvoxel_half, -1]]
hemi_loc_txt = np.array([' '] * ncomp)
hemi_loc = np.zeros(ncomp)
# -------------------------------------------
# generate structures to save results
# -------------------------------------------
# generate dictionary to save MNI coordinates
mni_coords = {'rh': [''] * ncomp, 'lh': [''] * ncomp}
# ------------------------------------------
# check if labels should be combined
# automatically
# ------------------------------------------
if combine_labels:
label_names, labels = get_combined_labels(subject=subject,
subjects_dir=subjects_dir,
parc=parc)
# generate empty classification dictionary
class_keys = label_names[:]
class_keys.append('unknown')
classification = {'lh': {key: [] for key in class_keys},
'rh': {key: [] for key in class_keys}}
# if not generate empty variables
else:
label_names, labels = None, None
classification = {}
# ------------------------------------------
# loop over all components
# ------------------------------------------
for icomp in range(ncomp):
# ------------------------------------------
# extract maxima in the spatial profile of
# the current component separately for both
# hemispheres
# ------------------------------------------
idx_ver_max_lh = np.argmax(A_orig_mag[:nvoxel_half, icomp])
idx_ver_max_rh = np.argmax(A_orig_mag[nvoxel_half:, icomp])
# ------------------------------------------
# check for both maxima if they are
# significant
# ------------------------------------------
# set some paremeter
threshold = np.percentile(A_orig_mag[:, icomp], percentile)
nidx_above = len(np.where(A_orig_mag[:, icomp] > threshold)[0])
cur_label_name = []
# loop over both hemispheres
for idx_hemi, idx_vertex_max in enumerate([idx_ver_max_lh, idx_ver_max_rh]):
# get the number of vertices above the threshold
# in the current hemisphere
nidx_above_hemi = len(np.where(A_orig_mag[hemi_indices[idx_hemi][0]:hemi_indices[idx_hemi][1],
icomp] > threshold)[0])
# check if at least 20% of all vertices above the threshold
# are in the current hemisphere
if nidx_above_hemi * 5 > nidx_above:
# get MNI-coordinate
mni_coord = vertex_to_mni(idx_vertex_max, idx_hemi, subject,
subjects_dir=subjects_dir)[0]
# store results in structures
mni_coords[hemi[idx_hemi]][icomp] = \
'(' + ', '.join(["%2.2f" % x for x in mni_coord]) + ')'
# store hemisphere information
hemi_loc[icomp] += idx_hemi + 1.0
# ------------------------------------------
# get MNI-coordinate to vertex as well as
# the name of the corresponding anatomical
# label
# ------------------------------------------
anat_name = get_anat_label_name(idx_vertex_max, hemi[idx_hemi],
subject=subject, subjects_dir=subjects_dir,
parc=parc, labels=labels)
cur_label_name.append(anat_name[:-3])
else:
cur_label_name.append(' ')
# ------------------------------------------
# check which results must be saved
# ------------------------------------------
if combine_labels:
# check if activation was found in both hemispheres
# --> if not we can directly save the results
if ' ' in cur_label_name:
# adjust classification dictionary
if cur_label_name[0] == ' ':
classification[hemi[1]][cur_label_name[1]].append(icomp)
else:
classification[hemi[0]][cur_label_name[0]].append(icomp)
# --> otherwise we have to make sure that we group the
# component only into one region
else:
# check if both vertices are in the same anatomical location
# --> then we have no problem
if cur_label_name[0] == cur_label_name[1]:
classification[hemi[0]][cur_label_name[0]].append(icomp)
classification[hemi[1]][cur_label_name[1]].append(icomp)
else:
# check if we have an unknown region being involved
# --> if yes chose the other one
if cur_label_name[0] == 'unknown':
classification[hemi[1]][cur_label_name[1]].append(icomp)
hemi_loc[icomp], mni_coords[hemi[0]][icomp] = 2, ''
elif cur_label_name[1] == 'unknown':
classification[hemi[0]][cur_label_name[0]].append(icomp)
hemi_loc[icomp], mni_coords[hemi[1]][icomp] = 1, ''
# otherwise chose the region with the strongest vertex
else:
if A_orig_mag[idx_ver_max_lh, icomp] > A_orig_mag[idx_ver_max_rh, icomp]:
classification[hemi[0]][cur_label_name[0]].append(icomp)
hemi_loc[icomp], mni_coords[hemi[1]][icomp] = 1, ''
else:
classification[hemi[1]][cur_label_name[1]].append(icomp)
hemi_loc[icomp], mni_coords[hemi[0]][icomp] = 2, ''
# ------------------------------------------
# adjust hemi_loc_txt if activity was found
# in both hemispheres
# ------------------------------------------
for idx, hemi_name in enumerate(hemi_names):
idx_change = np.where(hemi_loc == (idx + 1.0))[0]
hemi_loc_txt[idx_change] = hemi_name
# ------------------------------------------
# adjust label_names to only contain regions
# being involved in processing the current
# data
# ------------------------------------------
labels = []
for cur_hemi in hemi:
for key in label_names:
if classification[cur_hemi][key]:
labels.append(key)
labels = np.unique(labels).tolist()
return mni_coords, hemi_loc_txt, classification, labels
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# helper function to check if classification was
# performed prior to plotting
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _check_classification(classification, ncomp):
"""
Helper function to check if classification was
performed prior to plotting
Parameters
----------
classification: dictionary
classification object from the group_ica_object.
It is a dictionary containing two sub-dictionaries
'lh' and 'rh' (for left and right hemisphere). In
both sub-dictionaries the information about the
groups is stored, i.e. a group/region name + the
information which components are stored in this
group
ncomp: integer
number of components
Return
------
keys: list containing the group names
key_borders: list containing the group borders, i.e.
the information where to plot a new group name
idx_sort: array containing the plotting order of
the components, i.e. components beloning to one
group are plotted together
"""
# ------------------------------------------
# import necessary modules
# ------------------------------------------
import numpy as np
# ------------------------------------------
# check if classification was done
# ------------------------------------------
key_borders = []
if np.any(classification):
# initialize empty lists
idx_sort = []
keys_hemi = list(classification.keys())
# sort keys
keys = list(classification[keys_hemi[0]].keys())
keys.sort(key=lambda v: v.upper())
# set 'unknown' variables to the end
keys.remove('unknown')
keys.append('unknown')
# remove keys with empty entries
keys_want = []
for key in keys:
if classification[keys_hemi[0]][key] or\
classification[keys_hemi[1]][key]:
keys_want.append(key)
# loop over all keys
for key in keys_want:
# get indices to each class
idx_lh = classification[keys_hemi[0]][key]
idx_rh = classification[keys_hemi[1]][key]
# get indices of components in both hemispheres
idx_both = np.intersect1d(idx_lh, idx_rh)
# get indices of components only in right hemisphere
idx_only_rh = np.setdiff1d(idx_rh, idx_lh)
# get indices of components only in left hemisphere
idx_only_lh = np.setdiff1d(idx_lh, idx_rh)
# add components to list of sorted indices
idx_all = np.concatenate((idx_both, idx_only_rh, idx_only_lh))
idx_sort += idx_all.tolist()
key_borders.append(len(idx_all))
# add first border and estimate cumulative sum to
# have the right borders
key_borders = np.insert(key_borders, 0, 1)
key_borders = np.cumsum(key_borders)[:-1]
# ------------------------------------------
# if classification was not performed set
# some default values
# ------------------------------------------
else:
idx_sort = np.arange(ncomp)
keys_want = []
return keys_want, key_borders, idx_sort
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# helper function to handle time courses for plotting
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _get_temporal_envelopes(fourier_ica_obj, W_orig, temporal_envelope=[],
src_loc_data=[], tICA=False, global_scaling=True,
win_length_sec=None, tpre=None, flow=None):
"""
Helper function to check if classification was
performed prior to plotting
Parameters
----------
fourier_ica_obj: FourierICA object generated
when applying jumeg.decompose.fourier_ica
W_orig: array
2D-demixing-array (ncomp x nvoxel) estimated
when applying FourierICA
temporal_envelope: list of arrays containing
the temporal envelopes. If the temporal
envelopes are already given here z-scoring
and mean estimation is performed
src_loc_data: array
3D array containing the source localization
data used for FourierICA estimation
(nfreq x nepochs x nvoxel). Only necessary
if temporal_envelope is not given.
tICA: bool
If set we know that temporal ICA was applied
when estimating the FourierICA, i.e. when
generating the temporal-envelopes the data
must not be transformed from the Fourier
domain to the time-domain
global_scaling: bool
If set all temporal-envelopes are globally
scaled. Otherwise each component is scaled
individually
win_length_sec: float or None
Length of the epoch window in seconds
tpre: float or None
Lower border (in seconds) of the time-window
used for generating/showing the epochs. If
'None' the value stored in 'fourier_ica_obj'
is used
flow: float, integer or None
Lower frequency border for generating the
temporal-envelope. If 'None' the frequency
border stored in 'fourier_ica_obj' is used
Return
------
temporal_envelope_mean: list containing the 2D
arrays of the mean temporal envelopes
of the components
temporal_envelope: list containing the 3D
arrays of the temporal envelopes of the
components. Necessary for estimating the
spectral profiles
"""
# ------------------------------------------
# import necessary modules
# ------------------------------------------
from mne.baseline import rescale
import numpy as np
from scipy import fftpack
# -------------------------------------------
# check input parameter
# -------------------------------------------
if tpre == None:
tpre = fourier_ica_obj.tpre
if flow == None:
flow = fourier_ica_obj.flow
if not win_length_sec:
win_length_sec = fourier_ica_obj.win_length_sec
# estimate some simple parameter
sfreq = fourier_ica_obj.sfreq
ncomp, nvoxel = W_orig.shape
win_ntsl = int(np.floor(sfreq * win_length_sec))
startfftind = int(np.floor(flow * win_length_sec))
# -------------------------------------------
# check if temporal envelope is already
# given or should be estimated
# -------------------------------------------
if temporal_envelope == []:
# -------------------------------------------
# check if 'src_loc_data' is given...
# if not throw an error
# -------------------------------------------
if src_loc_data == []:
print(">>> ERROR: You have to provide either the 'temporal_envelope' or")
print(">>> 'src_loc_data'. Otherwise no temporal information can be plotted!")
import pdb
pdb.set_trace()
# -------------------------------------------
# get independent components
# -------------------------------------------
nfreq, nepochs, nvoxel = src_loc_data.shape
act = np.zeros((ncomp, nepochs, nfreq), dtype=np.complex)
if tICA:
win_ntsl = nfreq
temporal_envelope = np.zeros((nepochs, ncomp, win_ntsl))
fft_act = np.zeros((ncomp, win_ntsl), dtype=np.complex)
# loop over all epochs to get time-courses from
# source localized data by inverse FFT
for iepoch in range(nepochs):
# normalize data
src_loc_zero_mean = (src_loc_data[:, iepoch, :] - np.dot(np.ones((nfreq, 1)), fourier_ica_obj.dmean)) / \
np.dot(np.ones((nfreq, 1)), fourier_ica_obj.dstd)
act[:ncomp, iepoch, :] = np.dot(W_orig, src_loc_zero_mean.transpose())
#act[ncomp:, iepoch, :] = np.dot(W_orig, src_loc_zero_mean.transpose())
if tICA:
temporal_envelope[iepoch, :, :] = act[:, iepoch, :].real
else:
# -------------------------------------------
# generate temporal profiles
# -------------------------------------------
# apply inverse STFT to get temporal envelope
fft_act[:, startfftind:(startfftind + nfreq)] = act[:, iepoch, :]
temporal_envelope[iepoch, :, :] = fftpack.ifft(fft_act, n=win_ntsl, axis=1).real
# -------------------------------------------
# average temporal envelope
# -------------------------------------------
if not isinstance(temporal_envelope, list):
temporal_envelope = [[temporal_envelope]]
ntemp = len(temporal_envelope)
temporal_envelope_mean = np.empty((ntemp, 0)).tolist()
times = (np.arange(win_ntsl) / sfreq + tpre)
# -------------------------------------------
# perform baseline correction
# -------------------------------------------
for itemp in range(ntemp):
for icomp in range(ncomp):
temporal_envelope[itemp][0][:, icomp, :] = rescale(temporal_envelope[itemp][0][:, icomp, :],
times, (None, 0), 'zscore')
# -------------------------------------------
# estimate mean from temporal envelopes
# -------------------------------------------
for itemp in range(ntemp):
temporal_envelope_mean[itemp].append(np.mean(temporal_envelope[itemp][0], axis=0)[:, 5:-5])
# -------------------------------------------
# check if global scaling should be used
# -------------------------------------------
# if not scale each component separately between -0.5 and 0.5
if not global_scaling:
for icomp in range(ncomp):
min_val = np.min([temporal_envelope_mean[0][0][icomp, :], temporal_envelope_mean[1][0][icomp, :]])
max_val = np.max([temporal_envelope_mean[0][0][icomp, :], temporal_envelope_mean[1][0][icomp, :]])
scale_fact = 1.0 / (max_val - min_val)
for itemp in range(ntemp):
temporal_envelope_mean[itemp][0][icomp, :] = np.clip(
scale_fact * temporal_envelope_mean[itemp][0][icomp, :]
- scale_fact * min_val - 0.5, -0.5, 0.5)
# if global scaling should be used, scale all
# data between -0.5 and 0.5
else:
# scale temporal envelope between -0.5 and 0.5
min_val = np.min(temporal_envelope_mean)
max_val = np.max(temporal_envelope_mean)
scale_fact = 1.0 / (max_val - min_val)
for itemp in range(ntemp):
temporal_envelope_mean[itemp][0] = np.clip(scale_fact * temporal_envelope_mean[itemp][0]
- scale_fact * min_val - 0.5, -0.5, 0.5)
return temporal_envelope_mean, temporal_envelope
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# helper function to handle spatial profiles for plotting
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _get_spatial_profiles(A_orig, keys, idx_text, vertno=[],
subject='fsaverage', subjects_dir=None,
labels=None, classification={}, percentile=97,
mni_coord=[], add_foci=False, fnout=None):
"""
Helper function to get/generate the spatial
profiles of the FourierICA components for
plotting
Parameters
----------
A_orig: array
2D-mixing-array (nvoxel, ncomp) estimated
when applying FourierICA
keys: list containing the group names
idx_text: list containing the information in which
brain hemisphere a component is mainly
located (could be either 'both', 'left', 'right'
or ' ' if no classification was performed before
plotting)
vertno: list
list containing two arrays with the order
of the vertices. If not given it will be
generated in this routine
subject: string
string containing the subjects ID
subjects_dir: string
string containing the subjects directory
path
labels: list of strings
names of the labels which should be plotted.
Note, the prefix 'lh.' and the suffix '.label'
are automatically added
classification: dictionary
classification object from the group_ica_object.
It is a dictionary containing two sub-dictionaries
'lh' and 'rh' (for left and right hemisphere). In
both sub-dictionaries the information about the
groups is stored, i.e. a group/region name + the
information which components are stored in this
group
percentile: integer
value between 0 and 100 used to set a lower
limit for the shown intensity range of the
spatial plots
mni_coord: list of strings
if given the MNI coordinates are plotted
beneath the spatial profiles
add_foci: bool
if True and the MNI coordinates are given
a foci is plotted at the position of the
MNI coordinate
fnout: string or None
if labels and classification is given the
output filename of the brain plot containing
all labels. If 'None' the results are not stored
Return
------
temp_plot_dir: string
directory where the spatial profiles are
stored
"""
# ------------------------------------------
# import necessary modules
# ------------------------------------------
from matplotlib import gridspec as grd
from matplotlib import pyplot as plt
from mayavi import mlab
from mne.source_estimate import _make_stc
import numpy as np
from os import environ, makedirs
from os.path import exists, join
import re
from scipy import misc
from surfer import set_log_level
import types
# set log level to 'WARNING'
set_log_level('CRITICAL')
import mayavi
mayavi.mlab.options.offscreen = True
# -------------------------------------------
# create temporary directory to save plots
# of spatial profiles
# -------------------------------------------
temp_plot_dir = join(subjects_dir, subject, 'temp_plots')
if not exists(temp_plot_dir):
makedirs(temp_plot_dir)
# -------------------------------------------
# generate spatial profiles
# (using magnitude and phase)
# -------------------------------------------
if not subjects_dir:
subjects_dir = environ.get('SUBJECTS_DIR')
if isinstance(A_orig[0, 0], complex):
A_orig_mag = np.abs(A_orig)
else:
A_orig_mag = A_orig
nvoxel, ncomp = A_orig_mag.shape
# -------------------------------------------
# check if vertno is given, otherwise
# generate it
# -------------------------------------------
if not np.any(vertno):
vertno = [np.arange(nvoxel/2), np.arange(nvoxel/2)]
# -------------------------------------------
# check if labels should be plotted and if
# classification was already performed
# --> if yes define some colors for the
# labels
# -------------------------------------------
if labels and classification:
colors = ['green', 'red', 'cyan', 'yellow', 'mediumblue',
'magenta', 'chartreuse', 'indigo', 'sandybrown',
'slateblue', 'purple', 'lightpink', 'springgreen',
'orange', 'sienna', 'cadetblue', 'crimson',
'maroon', 'powderblue', 'deepskyblue', 'olive']
# -------------------------------------------
# loop over all components to generate
# spatial profiles
# -------------------------------------------
for icomp in range(ncomp):
# -------------------------------------------
# plot spatial profile
# -------------------------------------------
# generate stc-object from current component
A_cur = A_orig_mag[:, icomp]
src_loc = _make_stc(A_cur[:, np.newaxis], vertices=vertno, tmin=0, tstep=1,
subject=subject)
# define current range (Xth percentile)
fmin = np.percentile(A_cur, percentile)
fmax = np.max(A_cur)
fmid = 0.5 * (fmin + fmax)
clim = {'kind': 'value',
'lims': [fmin, fmid, fmax]}
# plot spatial profiles
brain = src_loc.plot(surface='inflated', hemi='split', subjects_dir=subjects_dir,
config_opts={'cortex': 'bone'}, views=['lateral', 'medial'],
time_label=' ', colorbar=False, clim=clim)
# check if foci should be added to the plot
if add_foci and np.any(mni_coord):
for i_hemi in ['lh', 'rh']:
mni_string = mni_coord[i_hemi][icomp]
# if 'mni_string' is not empty (it might be empty if activity
# can only be found in one hemisphere) plot a foci
if mni_string != "":
mni_float = list(map(float, re.findall("[-+]?\d*\.\d+|\d+", mni_string)))
brain.add_foci(mni_float, coords_as_verts=False, hemi=i_hemi, color='chartreuse',
scale_factor=1.5, map_surface='white')
# -------------------------------------------
# check if labels should be plotted
# -------------------------------------------
if labels and classification:
# import module to read in labels
from mne import read_label
# get path to labels
dir_labels = join(subjects_dir, subject, 'label')
# identify in which group the IC is classified
hemi = 'rh' if idx_text[icomp] == 'right' else 'lh'
# read in the corresponding label
for idx_key, key in enumerate(keys):
if icomp in classification[hemi][key]:
label_name = ".%s.label" % key
color = colors[idx_key]
break
# loop over both hemispheres to read the label in and plot it
hemi = ['lh', 'rh'] if idx_text[icomp] == 'both ' else [hemi]
for hemi_cur in hemi:
label = read_label(join(dir_labels, hemi_cur + label_name), subject=subject)
brain.add_label(label, borders=False, hemi=hemi_cur, color=color, alpha=0.1)
brain.add_label(label, borders=True, hemi=hemi_cur, color=color)
# save results
fn_base = "IC%02d_spatial_profile.png" % (icomp+1)
fnout_img = join(temp_plot_dir, fn_base)
brain.save_image(fnout_img)
# close mlab figure
mlab.close(all=True)
# -------------------------------------------
# also generate one plot with all labels
# -------------------------------------------
if labels and classification:
# set clim in a way that no activity can be seen
# (Note: we only want to see the labels)
clim = {'kind': 'value',
'lims': [fmax, 1.5 * fmax, 2.0 * fmax]}
# generate plot
brain = src_loc.plot(surface='inflated', hemi='split', subjects_dir=subjects_dir,
config_opts={'cortex': 'bone'}, views=['lateral', 'medial'],
time_label=' ', colorbar=False, clim=clim, background='white')
# loop over all labels
for idx_key, key in enumerate(keys):
label_name = ".%s.label" % key
color = colors[idx_key]
# loop over both hemispheres in order to plotting the labels
for hemi in ['lh', 'rh']:
label = read_label(join(dir_labels, hemi + label_name), subject=subject)
brain.add_label(label, borders=False, hemi=hemi, color=color, alpha=0.6)
# save results
if fnout:
fnout_img = '%s_labels.png' % fnout
brain.save_image(fnout_img)
# close mlab figure
mlab.close(all=True)
# -------------------------------------------
# now adjust the label plot appropriate
# -------------------------------------------
# read spatial profile image
spat_tmp = misc.imread(fnout_img)
# rearrange image
x_size, y_size, _ = spat_tmp.shape
x_half, y_half = x_size / 2, y_size / 2
x_frame, y_frame = int(0.11 * x_half), int(0.01 * y_half)
spatial_profile = np.concatenate((spat_tmp[x_frame:(x_half - x_frame), y_frame:(y_half - y_frame), :],
spat_tmp[(x_half + x_frame):-x_frame, y_frame:(y_half - y_frame), :],
spat_tmp[(x_half + x_frame):-x_frame, (y_half + y_frame):-y_frame, :],
spat_tmp[x_frame:(x_half - x_frame), (y_half + y_frame):-y_frame, :]),
axis=1)
# plot image
plt.ioff()
fig = plt.figure('Labels plots', figsize=(17, 3))
gs = grd.GridSpec(1, 30, wspace=0.00001, hspace=0.00001,
left=0.0, right=1.0, bottom=0.0, top=1.0)
# set plot position and plot image
p1 = fig.add_subplot(gs[0, 0:26])
p1.imshow(spatial_profile)
adjust_spines(p1, [])
# add label names
keys_fac = 0.8/len(keys)
keys_split = 0
p_text = fig.add_subplot(gs[0, 26:30])
keys_sort_idx = np.argsort(keys)
for idx_key in range(len(keys)):
key = keys[keys_sort_idx[idx_key]]
# check if string should be split
if len(key) > 21 and ' ' in key:
p_text.text(0.0, 0.9-keys_fac*(idx_key+keys_split), key.split()[0]+'-',
fontsize=13, color=colors[keys_sort_idx[idx_key]])
keys_split += 1
p_text.text(0.0, 0.9-keys_fac*(idx_key+keys_split), key.split()[1],
fontsize=13, color=colors[keys_sort_idx[idx_key]])
else:
p_text.text(0.0, 0.9-keys_fac*(idx_key+keys_split), key, fontsize=13,
color=colors[keys_sort_idx[idx_key]])
adjust_spines(p_text, [])
plt.savefig(fnout_img, dpi=300)
# close plot and set plotting back to screen
plt.close('FourierICA plots')
plt.ion()
mayavi.mlab.options.offscreen = False
return temp_plot_dir
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# helper function to get spectral profiles for plotting
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _get_spectral_profile(temporal_envelope, tpre,
sfreq, flow, fhigh,
bar_plot=False,
use_multitaper=False):
"""
Helper function to get the spectral-profile of the
temporal-envelopes of the FourierICA components
for plotting
Parameters
----------
temporal_envelope: list of arrays containing
the temporal envelopes.
tpre: float
Lower border (in seconds) of the time-window
used for generating/showing the epochs. If
'None' the value stored in 'fourier_ica_obj'
is used
sfreq: float
Sampling frequency of the data
flow: float or integer
Lower frequency range for time frequency analysis
fhigh: float or integer
Upper frequency range for time frequency analysis
bar_plot: boolean
if set the number of time points for time-frequency
estimation is reduced in order to save memory and
computing-time
use_multitaper: boolean
If set 'multitaper' is usewd for time frequency
analysis, otherwise 'stockwell'
Return
------
average_power_all: list containing the averaged
frequency power of all components
freqs: array containing the frequencies used to
calculate the frequency power
vmin: lower frequency range for plotting
vmax: upper frequency range for plotting
"""
# ------------------------------------------
# import necessary modules
# ------------------------------------------
from mne.baseline import rescale
from mne.time_frequency._stockwell import _induced_power_stockwell
import numpy as np
# ------------------------------------------
# define some parameter
# ------------------------------------------
ntemp = len(temporal_envelope)
ncomp = temporal_envelope[0][0].shape[1]
win_ntsl = temporal_envelope[0][0].shape[-1]
average_power_all = np.empty((ntemp, 0)).tolist()
vmin = np.zeros(ncomp)
vmax = np.zeros(ncomp)
# define some time parameter
times = np.arange(win_ntsl) / sfreq + tpre
idx_start = np.argmin(np.abs(times - tpre))
idx_end = np.argmin(np.abs(times - (tpre + win_ntsl/sfreq)))
if bar_plot:
decim = 10
else:
decim = 1
# ------------------------------------------
# loop over all time courses, i.e.
# conditions, and all components
# ------------------------------------------
for itemp in range(ntemp):
for icomp in range(ncomp):
# extract some information from the temporal_envelope
nepochs = temporal_envelope[itemp][0].shape[0]
# ------------------------------------------
# perform time frequency analysis
# ------------------------------------------
# prepare data for frequency analysis
data_stockwell = temporal_envelope[itemp][0][:, icomp, idx_start:idx_end].\
reshape((nepochs, 1, idx_end-idx_start))
data_stockwell = data_stockwell.transpose([1, 0, 2])
# mirror data to reduce transient frequencies
data_stockwell = np.concatenate((data_stockwell[:, :, 50:0:-1],
data_stockwell, data_stockwell[:, :, -1:-51:-1]), axis=-1)
n_fft = data_stockwell.shape[-1]
# check if 'multitaper' or 'stockwell' should be
# used for time-frequency analysis
if use_multitaper:
from mne.time_frequency.tfr import _compute_tfr
n_cycle = 3.0
if (10.0 * n_cycle*sfreq)/(2.0 * np.pi * flow) > n_fft:
flow *= ((10.0 * n_cycle*sfreq)/(2.0 * np.pi * flow))/n_fft
flow = np.ceil(flow)
freqs = np.arange(flow, fhigh)
power_data = _compute_tfr(data_stockwell, freqs, sfreq=sfreq, use_fft=True,
n_cycles=n_cycle, zero_mean=True, decim=decim,
output='power', method='multitaper',
time_bandwidth=10)
else:
power_data, _, freqs = _induced_power_stockwell(data_stockwell, sfreq=sfreq, fmin=flow,
fmax=fhigh, width=0.6, decim=1, n_fft=n_fft,
return_itc=False, n_jobs=4)
# perform baseline correction (and remove mirrored parts from data)
power_data = rescale(power_data[:, :, int(50/decim):-int(50/decim)],
times[idx_start:idx_end][0:-1:decim], (None, 0), 'mean')
average_power = np.mean(power_data, axis=0)
# ------------------------------------------
# store all frequency data in one list
# ------------------------------------------
average_power_all[itemp].append(average_power)
# ------------------------------------------
# estimate frequency thresholds for plotting
# ------------------------------------------
vmax[icomp] = np.max((np.percentile(average_power, 98), vmax[icomp]))
vmin[icomp] = np.min((np.percentile(average_power, 2), vmin[icomp]))
if np.abs(vmax[icomp]) > np.abs(vmin[icomp]):
vmin[icomp] = - np.abs(vmax[icomp])
else:
vmax[icomp] = np.abs(vmin[icomp])
return average_power_all, freqs, vmin, vmax
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# plot results when Fourier ICA was applied in the
# source space
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def plot_results_src_space(fourier_ica_obj, W_orig, A_orig,
src_loc_data=[], temporal_envelope=[], # parameter for temporal profiles
tpre=None, win_length_sec=None, tICA=False,
vertno=[], subject='fsaverage', subjects_dir=None, # parameter for spatial profiles
percentile=97, add_foci=True, classification={},
mni_coords=[], labels=None,
flow=None, fhigh=None, bar_plot=False, # parameter for spectral profiles
global_scaling=True, ncomp_per_plot=13, fnout=None, # general plotting parameter
temp_profile_names=[]):
"""
Generate plot containing all results achieved by
applying FourierICA in source space, i.e., plot
spatial and spectral profiles.
Parameters
----------
fourier_ica_obj: FourierICA object generated
when applying jumeg.decompose.fourier_ica
W_orig: array
2D-demixing-array (ncomp x nvoxel) estimated
when applying FourierICA
A_orig: array
2D-mixing-array (nvoxel, ncomp) estimated
when applying FourierICA
**** parameter for temporal profiles ****
src_loc_data: array
3D array containing the source localization
data used for FourierICA estimation
(nfreq x nepochs x nvoxel). Only necessary
if temporal_envelope is not given.
default: src_loc_data=[]
temporal_envelope: list of arrays containing
the temporal envelopes. If not given the
temporal envelopes are estimated here based
on the 'src_loc_data'
default: temporal_envelope=[]
tpre: float
Lower border (in seconds) of the time-window
used for generating/showing the epochs. If
'None' the value stored in 'fourier_ica_obj'
is used
win_length_sec: float or None
Length of the epoch window in seconds. If
'None' the value stored in 'fourier_ica_obj'
is used
tICA: boolean
should be True if temporal ICA was applied
default: tICA=False
**** parameter for spatial profiles ****
vertno: list
list containing two arrays with the order
of the vertices. If list is empty it will be
automatically generated
default: vertno=[]
subject: string
subjects ID
default: subject='fsaverage'
subjects_dir: string or None
string containing the subjects directory
path
default: subjects_dir=None --> system variable
SUBJETCS_DIR is used
percentile: integer
value between 0 and 100 used to set a lower
limit for the shown intensity range of the
spatial plots
default: percentile=97
add_foci: bool
if True and the MNI coordinates are given
a foci is plotted at the position of the
MNI coordinate
default: add_foci=True
classification: dictionary
classification object from the group_ica_object.
It is a dictionary containing two sub-dictionaries
'lh' and 'rh' (for left and right hemisphere). In
both sub-dictionaries the information about the
groups is stored, i.e. a group/region name + the
information which components are stored in this
group
default: classification={}
mni_coords: list of strings
if given the MNI coordinates are plotted
beneath the spatial profiles
default: mni_coords=[]
labels: list of strings
names of the labels which should be plotted.
Note, the prefix 'lh.' and the suffix '.label'
are automatically added
default: labels=None
**** parameter for spectral profiles ****
flow: float or integer
Lower frequency range for time frequency analysis
fhigh: float or integer
Upper frequency range for time frequency analysis
bar_plot: boolean
If set the results of the time-frequency analysis
are shown as bar plot. This option is recommended
when FourierICA was applied to resting-state data
default: bar_plot=False
**** general plotting parameter ****
global_scaling: bool
If set spatial, spectral and temporal profiles
are globally scaled. Otherwise each component
is scaled individually
default: global_scaling=True
ncomp_per_plot: integer
number of components per plot
fnout: string
default: fnout=None
temp_profile_names: list of string
The list should have the same number of elements
as conditions were used to generate the temporal
envelopes. The names given here are used as headline
for the temporal profiles in the plot
default: temp_profile_name=[]
"""
# ------------------------------------------
# import necessary modules
# ------------------------------------------
from matplotlib import pyplot as plt
from matplotlib import gridspec as grd
from matplotlib.colors import Normalize
import numpy as np
from os import remove, rmdir
from os.path import exists, join
from scipy import misc
# -------------------------------------------
# check input parameter
# -------------------------------------------
if tpre == None:
tpre = fourier_ica_obj.tpre
if flow == None:
flow = fourier_ica_obj.flow
if not fhigh:
fhigh = fourier_ica_obj.fhigh
if not win_length_sec:
win_length_sec = fourier_ica_obj.win_length_sec
# check if either 'src_loc_data' or
# 'temporal_envelope' is given, otherwise stop
if src_loc_data == [] and temporal_envelope == []:
print(">>> ERROR: you have either to provide the variable")
print(">>> 'src_loc_data' or 'temporal_envelope'.")
import pdb
pdb.set_trace()
# estimate/set some simple parameter
sfreq = fourier_ica_obj.sfreq
win_ntsl = int(np.floor(sfreq * win_length_sec))
ncomp, nvoxel = W_orig.shape
ylim_temp = [-0.55, 0.55]
time_range = [tpre, tpre + win_length_sec]
# -------------------------------------------
# get temporal envelopes, or rather check if
# temporal envelopes already exist or must
# be calculated
# -------------------------------------------
temporal_envelope_mean, temporal_envelope = \
_get_temporal_envelopes(fourier_ica_obj, W_orig, temporal_envelope=temporal_envelope,
src_loc_data=src_loc_data, tICA=tICA, global_scaling=global_scaling,
win_length_sec=win_length_sec, tpre=tpre, flow=flow)
ntemp = len(temporal_envelope)
# -------------------------------------------
# get MNI-coordinates of the FourierICA
# components
# -------------------------------------------
if not classification and not mni_coords and not labels:
mni_coords, hemi_loc_txt, classification, labels = \
get_mni_coordinates(A_orig, subject=subject, subjects_dir=subjects_dir,
percentile=percentile)
# otherwise we only have to get the 'hemi_loc_txt' variable
else:
hemi_loc = np.array([int(i != '') for i in mni_coords['lh']])
hemi_loc += np.array([2*int(i != '') for i in mni_coords['rh']])
hemi_loc_txt = np.array([' '] * len(hemi_loc))
for idx, hemi_name in enumerate(['left ', 'right', 'both ']):
idx_change = np.where(hemi_loc == (idx + 1.0))[0]
hemi_loc_txt[idx_change] = hemi_name
# check if classification was performed prior to plotting
keys, key_borders, idx_sort = _check_classification(classification, ncomp)
# -------------------------------------------
# get spatial profiles of all components
# Note: This will take a while
# -------------------------------------------
temp_plot_dir = _get_spatial_profiles(A_orig, keys, hemi_loc_txt, vertno=vertno,
subject=subject, subjects_dir=subjects_dir,
labels=labels, classification=classification,
percentile=percentile, mni_coord=mni_coords,
add_foci=add_foci, fnout=fnout)
# -------------------------------------------
# get spectral profiles of all components
# Note: This will take a while
# -------------------------------------------
average_power_all, freqs, vmin, vmax = \
_get_spectral_profile(temporal_envelope, tpre, sfreq, flow, fhigh, bar_plot=bar_plot)
# check if bar plot should be used
# --> if yes estimate histogram data and normalize results
if bar_plot:
# generate an array to store the results
freq_heights = np.zeros((ntemp, ncomp, len(freqs)))
# loop over all conditions
for i_power, average_power in enumerate(average_power_all):
freq_heights[i_power, :, :] = np.sum(np.abs(average_power), axis=2)
# normalize to a range between 0 and 1
freq_heights /= np.max(freq_heights)
# ------------------------------------------
# now generate plot containing spatial,
# spectral and temporal profiles
# ------------------------------------------
# set some general parameter
plt.ioff()
nimg = int(np.ceil(ncomp/(1.0*ncomp_per_plot)))
idx_key = 0
nplot = list(range(ncomp_per_plot, nimg*ncomp_per_plot, ncomp_per_plot))
nplot.append(ncomp)
# generate image and its layout for plotting
fig = plt.figure('FourierICA plots', figsize=(14 + ntemp * 8, 34))
n_keys = len(key_borders) if len(key_borders) > 0 else 1
gs = grd.GridSpec(ncomp_per_plot * 20 + n_keys * 10, 10 + ntemp * 8, wspace=0.1, hspace=0.05,
left=0.04, right=0.96, bottom=0.04, top=0.96)
# ------------------------------------------
# loop over the estimated number of images
# ------------------------------------------
for iimg in range(nimg):
# clear figure (to start with a white image in each loop)
plt.clf()
# estimate how many plots on current image
istart_plot = int(ncomp_per_plot * iimg)
# set idx_class parameter
idx_class = 1 if key_borders == [] else 0
# ------------------------------------------
# loop over all components which should be
# plotted on the current image
# ------------------------------------------
for icomp in range(istart_plot, nplot[iimg]):
# ----------------------------------------------
# check if key_boarders is set and should be
# written on the image
# ----------------------------------------------
if (icomp == istart_plot and key_borders != []) or \
((icomp + 1) in key_borders):
# adjust key-index
if (icomp + 1) in key_borders:
idx_key += 1
# add sub-plot with 'key_text'
p_text = fig.add_subplot(gs[20 * (icomp - istart_plot) + idx_class * 10: \
20 * (icomp - istart_plot) + 8 + idx_class * 10, 0:10])
p_text.text(0, 0, keys[idx_key-1], fontsize=25)
adjust_spines(p_text, [])
# adjust idx_class parameter
idx_class += 1
# ----------------------------------------------
# plot spatial profiles
# ----------------------------------------------
# read spatial profile image
fn_base = "IC%02d_spatial_profile.png" % (idx_sort[icomp] + 1)
fnin_img = join(temp_plot_dir, fn_base)
spat_tmp = misc.imread(fnin_img)
remove(fnin_img)
# rearrange image
x_size, y_size, _ = spat_tmp.shape
x_half, y_half = x_size / 2, y_size / 2
x_frame, y_frame = int(0.11 * x_half), int(0.01 * y_half)
spatial_profile = np.concatenate((spat_tmp[x_frame:(x_half - x_frame), y_frame:(y_half - y_frame), :],
spat_tmp[(x_half + x_frame):-x_frame, y_frame:(y_half - y_frame), :],
spat_tmp[(x_half + x_frame):-x_frame, (y_half + y_frame):-y_frame, :],
spat_tmp[x_frame:(x_half - x_frame), (y_half + y_frame):-y_frame, :]),
axis=1)
# set plot position and plot image
p1 = fig.add_subplot(
gs[20 * (icomp - istart_plot) + idx_class * 10:20 * (icomp - istart_plot) + 15 + idx_class * 10, 0:10])
p1.imshow(spatial_profile)
# set some plotting options
p1.yaxis.set_ticks([])
p1.xaxis.set_ticks([])
y_name = "IC#%02d" % (idx_sort[icomp] + 1)
p1.set_ylabel(y_name, fontsize=18)
# ----------------------------------------------
# if given write MNI coordinates under the image
# ----------------------------------------------
if np.any(mni_coords):
# left hemisphere
plt.text(120, 360, mni_coords['lh'][int(idx_sort[int(icomp)])], color="black",
fontsize=18)
# right hemisphere
plt.text(850, 360, mni_coords['rh'][int(idx_sort[int(icomp)])], color="black",
fontsize=18)
# add location information of the component
# --> if located in 'both', 'left' or 'right' hemisphere
plt.text(-220, 100, hemi_loc_txt[int(idx_sort[int(icomp)])], color="red",
fontsize=25, rotation=90)
# ----------------------------------------------
# temporal/spectral profiles
# ----------------------------------------------
# loop over all time courses
for itemp in range(ntemp):
# ----------------------------------------------
# if given plot a headline above the time
# courses of each condition
# ----------------------------------------------
if icomp == istart_plot and len(temp_profile_names):
# add a sub-plot for the text
p_text = fig.add_subplot(gs[(idx_class - 1) * 10: 6 + (idx_class - 1) * 12,
(itemp) * 8 + 11:(itemp + 1) * 8 + 9])
# plot the text and adjust spines
p_text.text(0, 0, " " + temp_profile_names[itemp], fontsize=30)
adjust_spines(p_text, [])
# set plot position
if bar_plot:
p2 = plt.subplot(
gs[20 * (icomp - istart_plot) + idx_class * 11:20 * (icomp - istart_plot) + 13 + idx_class * 10,
itemp * 8 + 11:(itemp + 1) * 8 + 9])
else:
p2 = plt.subplot(
gs[20 * (icomp - istart_plot) + idx_class * 10:20 * (icomp - istart_plot) + 15 + idx_class * 10,
itemp * 8 + 11:(itemp + 1) * 8 + 9])
# extract temporal plotting information
times = (np.arange(win_ntsl) / sfreq + tpre)[5:-5]
idx_start = np.argmin(np.abs(times - time_range[0]))
idx_end = np.argmin(np.abs(times - time_range[1]))
# ----------------------------------------------
# plot spectral profile
# ----------------------------------------------
# check if global scaling should be used
if global_scaling:
vmin_cur, vmax_cur = np.min(vmin), np.max(vmax)
else:
vmin_cur, vmax_cur = vmin[icomp], vmax[icomp]
# show spectral profile
if bar_plot:
plt.bar(freqs, freq_heights[itemp, int(idx_sort[icomp]), :], width=1.0, color='cornflowerblue')
plt.xlim(flow, fhigh)
plt.ylim(0.0, 1.0)
# set some parameter
p2.set_xlabel("freq. [Hz]")
p2.set_ylabel("ampl. [a.u.]")
# ----------------------------------------------
# plot temporal profile on the some spot
# ----------------------------------------------
ax = plt.twiny()
ax.set_xlabel("time [s]")
ax.plot(times[idx_start:idx_end], 0.5+temporal_envelope_mean[itemp][0][int(idx_sort[icomp]), idx_start:idx_end],
color='red', linewidth=3.0)
ax.set_xlim(times[idx_start], times[idx_end])
ax.set_ylim(0.0, 1.0)
else:
average_power = average_power_all[itemp][int(idx_sort[icomp])]
extent = (times[idx_start], times[idx_end], freqs[0], freqs[-1])
p2.imshow(average_power, extent=extent, aspect="auto", origin="lower",
picker=False, cmap='RdBu_r', vmin=vmin_cur,
vmax=vmax_cur)
# set some parameter
p2.set_xlabel("time [s]")
p2.set_ylabel("freq. [Hz]")
# ----------------------------------------------
# plot temporal profile on the some spot
# ----------------------------------------------
ax = plt.twinx()
ax.set_xlim(times[idx_start], times[idx_end])
ax.set_ylim(ylim_temp)
ax.set_ylabel("ampl. [a.u.]")
ax.plot(times[idx_start:idx_end], temporal_envelope_mean[itemp][0][int(idx_sort[icomp]), idx_start:idx_end],
color='black', linewidth=3.0)
# ----------------------------------------------
# finally plot a color bar
# ----------------------------------------------
if not bar_plot:
# first normalize the color table
norm = Normalize(vmin=np.round(vmin_cur, 2), vmax=np.round(vmax_cur, 2))
sm = plt.cm.ScalarMappable(cmap='RdBu_r', norm=norm)
sm.set_array(np.linspace(vmin_cur, 1.0))
# estimate position of the color bar
xpos = 0.405 + 0.5/(ntemp + 1.0)
if n_keys > 1:
cbaxes = fig.add_axes([xpos, 0.135, 0.2, 0.006])
else:
cbaxes = fig.add_axes([xpos, 0.03, 0.2, 0.006])
ticks_fac = (vmax_cur - vmin_cur) * 0.3333
ticks = np.round([vmin_cur, vmin_cur + ticks_fac, vmax_cur - ticks_fac, vmax_cur], 2)
# ticks = [-1.0, -0.5, 0.0, 0.5, 1.0]
# now plot color bar
cb = plt.colorbar(sm, ax=p2, cax=cbaxes, use_gridspec=False,
orientation='horizontal', ticks=ticks,
format='%1.2g')
cb.ax.tick_params(labelsize=18)
# ----------------------------------------------
# save image
# ----------------------------------------------
if fnout:
fnout_complete = '%s_%02d.png' % (fnout, iimg + 1)
plt.savefig(fnout_complete, format='png', dpi=300)
# close plot and set plotting back to screen
plt.close('FourierICA plots')
plt.ion()
# remove temporary directory for
# spatial profile plots
if exists(temp_plot_dir):
rmdir(temp_plot_dir)
return mni_coords, classification, labels
| 39.605234 | 132 | 0.505152 | # Authors: Lukas Breuer <[email protected]>
"""
----------------------------------------------------------------------
--- jumeg.decompose.fourier_ica_plot ---------------------------------
----------------------------------------------------------------------
autor : Lukas Breuer
email : [email protected]
last update: 17.11.2016
version : 1.1
----------------------------------------------------------------------
This is a simple implementation to plot the results achieved by
applying FourierICA
----------------------------------------------------------------------
"""
#######################################################
# #
# plotting functions for FourierICA #
# #
#######################################################
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Simple function to adjust axis in plots
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def adjust_spines(ax, spines, labelsize=10):
"""
Simple function to adjust axis in plots
Parameters
----------
ax: axis object
Plot object which should be adjusted
spines: list of strings ['bottom', 'left']
Name of the axis which should be adjusted
labelsize: integer
Font size for the x- and y-axis labels
"""
for loc, spine in list(ax.spines.items()):
if loc in spines:
spine.set_position(('outward', 4)) # outward by 4 points
# spine.set_smart_bounds(True)
else:
spine.set_color('none') # don't draw spine
# turn off ticks where there is no spine
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
else:
# no yaxis ticks
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
else:
# no xaxis ticks
ax.xaxis.set_ticks([])
ax.tick_params(axis='x', labelsize=labelsize)
ax.tick_params(axis='y', labelsize=labelsize)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# function to generate automatically combined labels
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def get_combined_labels(subject='fsaverage', subjects_dir=None,
parc='aparc.a2009s'):
"""
Helper function to combine labels automatically
according to previous studies.
Parameters
----------
subject: string containing the subjects name
default: subject='fsaverage'
subjects_dir: Subjects directory. If not given the
system variable SUBJECTS_DIR is used
default: subjects_dir=None
parc: name of the parcellation to use for reading
in the labels
default: parc='aparc.a2009s'
Return
------
label_keys: names of the new labels
labels: list containing the combined labels
"""
# ------------------------------------------
# import necessary modules
# ------------------------------------------
from mne import read_labels_from_annot
import numpy as np
from os.path import join
# ------------------------------------------
# define labels based on previous studies
# ------------------------------------------
# to get more information about the label names and their
# locations check the following publication:
# Destrieux et al. (2010), Automatic parcellation of human
# cortical gyri and sulci using standard anatomical nomenclature,
# NeuroImage, DOI: 10.1016/j.neuroimage.2010.06.010
label_combinations = {
'auditory': ['G_temp_sup-G_T_transv', 'G_temp_sup-Plan_polar',
'Lat_Fis-post'],
'broca': ['G_front_inf-Opercular', 'G_front_inf-Triangul',
'Lat_Fis-ant-Vertical'],
'cingulate': ['G_cingul-Post-dorsal', 'G_cingul-Post-ventral',
'G_and_S_cingul-Ant', 'G_and_S_cingul-Mid-Ant',
'G_and_S_cingul-Mid-Post', 'S_pericallosal',
'cingul-Post-ventral'],
'frontal': ['G_and_S_frontomargin', 'G_and_S_transv_frontopol',
'G_front_inf-Orbital', 'G_front_middle',
'G_front_sup', 'G_orbital',
'G_rectus', 'G_subcallosal',
'Lat_Fis-ant-Horizont', 'S_front_inf',
'S_front_middle', 'S_front_sup',
'S_orbital_lateral', 'S_orbital-H_Shaped',
'S_suborbital'],
'gustatory': ['G_and_S_subcentral'],
'insula': ['S_circular_insula_ant', 'S_circular_insula_inf',
'S_circular_insula_sup', 'G_Ins_lg_and_S_cent_ins',
'G_insular_short'],
'motor': ['G_precentral', 'S_precentral-sup-part',
'S_precentral-inf-part', 'S_central'],
'olfactory': ['S_temporal_transverse'],
'somatosensory': ['G_postcentral', 'S_postcentral'],
'somatosensory associated': ['G_and_S_paracentral', 'G_pariet_inf-Angular',
'G_parietal_sup', 'S_cingul-Marginalis',
'S_intrapariet_and_P_trans'],
'temporal': ['G_oc-temp_lat-fusifor', 'G_oc-temp_med-Parahip',
'G_temp_sup-Plan_polar', 'G_temporal_inf',
'G_temporal_middle', 'G_temp_sup-Lateral',
'Pole_temporal', 'S_collat_transv_ant',
'S_oc-temp_lat', 'S_oc-temp_med_and_Lingual',
'S_temporal_inf', 'S_temporal_sup'],
'vision': ['G_and_S_occipital_inf', 'G_occipital_middle',
'G_oc-temp_med-Lingual', 'S_collat_transv_post',
'S_oc_sup_and_transversal', 'S_occipital_ant',
'S_oc_middle_and_Lunatus'],
'visual': ['G_cuneus', 'G_precuneus',
'S_calcarine', 'S_parieto_occipital',
'G_occipital_sup', 'Pole_occipital',
'S_subparietal'],
'wernicke': ['G_pariet_inf-Supramar', 'G_temp_sup-Plan_tempo',
'S_interm_prim-Jensen']
}
label_keys = list(label_combinations.keys())
labels = []
# ------------------------------------------
# combine labels
# ------------------------------------------
# loop over both hemispheres
for hemi in ['lh', 'rh']:
# read all labels in
labels_all = read_labels_from_annot(subject, parc=parc, hemi=hemi,
surf_name='inflated',
subjects_dir=subjects_dir,
verbose=False)
# loop over all labels to extract label names
label_names = []
for label in labels_all:
label_names.append(label.name)
# ------------------------------------------
# now generate labels based on previous
# studies
# ------------------------------------------
# loop over all previously defined labels
for label_key in label_keys:
# get name of all labels related to the current one
label_members = label_combinations[label_key]
label_members = [x+'-'+hemi for x in label_members]
# check which labels we need for the current one
idx_labels_want = np.where(np.in1d(label_names, label_members))[0]
labels_want = [labels_all[i] for i in idx_labels_want]
# combine labels
label_new = np.sum(labels_want)
label_new.name = label_key + '-' + hemi
# fill the surface between sources
label_new.values.fill(1.0)
label_new.smooth(subject=subject, subjects_dir=subjects_dir)
# save new label
fnout = join(subjects_dir, subject, 'label',
hemi + '.' + label_key + '.label')
label_new.save(fnout)
labels.append(label_new)
return label_keys, labels
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# function to get the anatomical label to a given vertex
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def get_anat_label_name(vertex, hemi, labels=None, subject='fsaverage',
subjects_dir=None, parc='aparc.a2009s'):
"""
Helper function to get to a given vertex the
name of the anatomical label
Parameters
----------
vertex: integer containing the vertex number
hemi: string containing the information in which
hemisphere the vertex is located. Should be
either 'lh' or 'rh'
labels: labels to use for checking. If not given
the labels are read from the subjects directory
default: labels=None
subject: string containing the subjects name
default: subject='fsaverage'
subjects_dir: Subjects directory. If not given the
system variable SUBJECTS_DIR is used
default: subjects_dir=None
parc: name of the parcellation to use for reading
in the labels
default: parc='aparc.a2009s'
Return
------
name: string containing the name of the anatomical
label related to the given vertex
"""
# ------------------------------------------
# import necessary modules
# ------------------------------------------
from mne import read_labels_from_annot
import numpy as np
# ------------------------------------------
# check input parameter
# ------------------------------------------
# check if labels are given or must be read
if not labels:
labels = read_labels_from_annot(subject, parc=parc, hemi=hemi,
surf_name='inflated',
subjects_dir=subjects_dir,
verbose=False)
# ------------------------------------------
# loop over labels to find corresponding
# label
# ------------------------------------------
name = ''
for label in labels:
if label.hemi == hemi:
# get vertices of current label
label_vert = np.in1d(np.array(vertex), label.vertices)
if label_vert:
name = label.name
break
if name == '':
name = 'unknown-' + hemi
return name
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# function to get the MNI-coordinate(s) to a given
# FourierICA component
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def get_mni_coordinates(A_orig,
subject='fsaverage', subjects_dir=None,
parc='aparc.a2009s', percentile=97,
combine_labels=True):
"""
Helper function to get the MNI-coordinate(s) to a given
FourierICA component. The selection if a component has
activation in both hemispheres or only in one is made
like follows: estimate for each component an activation
threshold based on the given percentile. Next, estimate
the total number of voxels in the component which are
above the estimated threshold. Now check if at least 20%
of the total number of voxels above threshold are in each
hemisphere. If yes both hemispheres are marked as active,
otherwise only one.
Parameters
----------
A_orig: array
2D-mixing-array (nvoxel, ncomp) estimated
when applying FourierICA
subject: string containing the subjects name
default: subject='fsaverage'
subjects_dir: Subjects directory. If not given the
system variable SUBJECTS_DIR is used
default: subjects_dir=None
parc: name of the parcellation to use for reading
in the labels
default: parc='aparc.a2009s'
percentile: integer
value between 0 and 100 used to set a lower
limit for the shown intensity range of the
spatial plots
combine_labels: if set labels are combined automatically
according to previous studies
default: combine_labels=True
Return
------
mni_coords: dictionary
The dictionary contains two elements: 'rh' and 'lh',
each of which containing a list with the MNI
coordinates as string.
Note, each list contains the same number of
elements as components are given. If there is no MNI
coordinate for a component an empty string is used, e.g.
for two components
{'rh': ['(37.55, 1.58, -21.71)', '(44.78, -10.41, 27.89)'],
'lh': ['(-39.43, 5.60, -27.80)', '']}
hemi_loc_txt: list
containing for each FourierICA component to which region
it spatially belongs ('left', 'right' or 'both')
classification: dictionary
classification object. It is a dictionary containing
two sub-dictionaries 'lh' and 'rh' (for left and
right hemisphere). In both sub-dictionaries the
information about the groups is stored, i.e. a
group/region name + the information which components
are stored in this group (as indices). An example
for 6 components might look like this:
{'rh': {'somatosensory': [1, 3], 'cingulate': [4, 5]},
'lh': {'somatosensory': [1, 2], 'cingulate': [0, 5]}}
labels: list of strings
names of the labels which are involved in this data set
"""
# ------------------------------------------
# import necessary modules
# ------------------------------------------
from mne import vertex_to_mni
import numpy as np
from os import environ
import types
# -------------------------------------------
# check input parameter
# -------------------------------------------
if not subjects_dir:
subjects_dir = environ.get('SUBJECTS_DIR')
# -------------------------------------------
# generate spatial profiles
# (using magnitude and phase)
# -------------------------------------------
if isinstance(A_orig[0, 0], complex):
A_orig_mag = np.abs(A_orig)
else:
A_orig_mag = A_orig
# -------------------------------------------
# set some parameters
# -------------------------------------------
nvoxel, ncomp = A_orig_mag.shape
nvoxel_half = int(nvoxel / 2)
hemi = ['lh', 'rh']
hemi_names = ['left ', 'right', 'both ']
hemi_indices = [[0, nvoxel_half], [nvoxel_half, -1]]
hemi_loc_txt = np.array([' '] * ncomp)
hemi_loc = np.zeros(ncomp)
# -------------------------------------------
# generate structures to save results
# -------------------------------------------
# generate dictionary to save MNI coordinates
mni_coords = {'rh': [''] * ncomp, 'lh': [''] * ncomp}
# ------------------------------------------
# check if labels should be combined
# automatically
# ------------------------------------------
if combine_labels:
label_names, labels = get_combined_labels(subject=subject,
subjects_dir=subjects_dir,
parc=parc)
# generate empty classification dictionary
class_keys = label_names[:]
class_keys.append('unknown')
classification = {'lh': {key: [] for key in class_keys},
'rh': {key: [] for key in class_keys}}
# if not generate empty variables
else:
label_names, labels = None, None
classification = {}
# ------------------------------------------
# loop over all components
# ------------------------------------------
for icomp in range(ncomp):
# ------------------------------------------
# extract maxima in the spatial profile of
# the current component separately for both
# hemispheres
# ------------------------------------------
idx_ver_max_lh = np.argmax(A_orig_mag[:nvoxel_half, icomp])
idx_ver_max_rh = np.argmax(A_orig_mag[nvoxel_half:, icomp])
# ------------------------------------------
# check for both maxima if they are
# significant
# ------------------------------------------
# set some paremeter
threshold = np.percentile(A_orig_mag[:, icomp], percentile)
nidx_above = len(np.where(A_orig_mag[:, icomp] > threshold)[0])
cur_label_name = []
# loop over both hemispheres
for idx_hemi, idx_vertex_max in enumerate([idx_ver_max_lh, idx_ver_max_rh]):
# get the number of vertices above the threshold
# in the current hemisphere
nidx_above_hemi = len(np.where(A_orig_mag[hemi_indices[idx_hemi][0]:hemi_indices[idx_hemi][1],
icomp] > threshold)[0])
# check if at least 20% of all vertices above the threshold
# are in the current hemisphere
if nidx_above_hemi * 5 > nidx_above:
# get MNI-coordinate
mni_coord = vertex_to_mni(idx_vertex_max, idx_hemi, subject,
subjects_dir=subjects_dir)[0]
# store results in structures
mni_coords[hemi[idx_hemi]][icomp] = \
'(' + ', '.join(["%2.2f" % x for x in mni_coord]) + ')'
# store hemisphere information
hemi_loc[icomp] += idx_hemi + 1.0
# ------------------------------------------
# get MNI-coordinate to vertex as well as
# the name of the corresponding anatomical
# label
# ------------------------------------------
anat_name = get_anat_label_name(idx_vertex_max, hemi[idx_hemi],
subject=subject, subjects_dir=subjects_dir,
parc=parc, labels=labels)
cur_label_name.append(anat_name[:-3])
else:
cur_label_name.append(' ')
# ------------------------------------------
# check which results must be saved
# ------------------------------------------
if combine_labels:
# check if activation was found in both hemispheres
# --> if not we can directly save the results
if ' ' in cur_label_name:
# adjust classification dictionary
if cur_label_name[0] == ' ':
classification[hemi[1]][cur_label_name[1]].append(icomp)
else:
classification[hemi[0]][cur_label_name[0]].append(icomp)
# --> otherwise we have to make sure that we group the
# component only into one region
else:
# check if both vertices are in the same anatomical location
# --> then we have no problem
if cur_label_name[0] == cur_label_name[1]:
classification[hemi[0]][cur_label_name[0]].append(icomp)
classification[hemi[1]][cur_label_name[1]].append(icomp)
else:
# check if we have an unknown region being involved
# --> if yes chose the other one
if cur_label_name[0] == 'unknown':
classification[hemi[1]][cur_label_name[1]].append(icomp)
hemi_loc[icomp], mni_coords[hemi[0]][icomp] = 2, ''
elif cur_label_name[1] == 'unknown':
classification[hemi[0]][cur_label_name[0]].append(icomp)
hemi_loc[icomp], mni_coords[hemi[1]][icomp] = 1, ''
# otherwise chose the region with the strongest vertex
else:
if A_orig_mag[idx_ver_max_lh, icomp] > A_orig_mag[idx_ver_max_rh, icomp]:
classification[hemi[0]][cur_label_name[0]].append(icomp)
hemi_loc[icomp], mni_coords[hemi[1]][icomp] = 1, ''
else:
classification[hemi[1]][cur_label_name[1]].append(icomp)
hemi_loc[icomp], mni_coords[hemi[0]][icomp] = 2, ''
# ------------------------------------------
# adjust hemi_loc_txt if activity was found
# in both hemispheres
# ------------------------------------------
for idx, hemi_name in enumerate(hemi_names):
idx_change = np.where(hemi_loc == (idx + 1.0))[0]
hemi_loc_txt[idx_change] = hemi_name
# ------------------------------------------
# adjust label_names to only contain regions
# being involved in processing the current
# data
# ------------------------------------------
labels = []
for cur_hemi in hemi:
for key in label_names:
if classification[cur_hemi][key]:
labels.append(key)
labels = np.unique(labels).tolist()
return mni_coords, hemi_loc_txt, classification, labels
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# helper function to check if classification was
# performed prior to plotting
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _check_classification(classification, ncomp):
"""
Helper function to check if classification was
performed prior to plotting
Parameters
----------
classification: dictionary
classification object from the group_ica_object.
It is a dictionary containing two sub-dictionaries
'lh' and 'rh' (for left and right hemisphere). In
both sub-dictionaries the information about the
groups is stored, i.e. a group/region name + the
information which components are stored in this
group
ncomp: integer
number of components
Return
------
keys: list containing the group names
key_borders: list containing the group borders, i.e.
the information where to plot a new group name
idx_sort: array containing the plotting order of
the components, i.e. components beloning to one
group are plotted together
"""
# ------------------------------------------
# import necessary modules
# ------------------------------------------
import numpy as np
# ------------------------------------------
# check if classification was done
# ------------------------------------------
key_borders = []
if np.any(classification):
# initialize empty lists
idx_sort = []
keys_hemi = list(classification.keys())
# sort keys
keys = list(classification[keys_hemi[0]].keys())
keys.sort(key=lambda v: v.upper())
# set 'unknown' variables to the end
keys.remove('unknown')
keys.append('unknown')
# remove keys with empty entries
keys_want = []
for key in keys:
if classification[keys_hemi[0]][key] or\
classification[keys_hemi[1]][key]:
keys_want.append(key)
# loop over all keys
for key in keys_want:
# get indices to each class
idx_lh = classification[keys_hemi[0]][key]
idx_rh = classification[keys_hemi[1]][key]
# get indices of components in both hemispheres
idx_both = np.intersect1d(idx_lh, idx_rh)
# get indices of components only in right hemisphere
idx_only_rh = np.setdiff1d(idx_rh, idx_lh)
# get indices of components only in left hemisphere
idx_only_lh = np.setdiff1d(idx_lh, idx_rh)
# add components to list of sorted indices
idx_all = np.concatenate((idx_both, idx_only_rh, idx_only_lh))
idx_sort += idx_all.tolist()
key_borders.append(len(idx_all))
# add first border and estimate cumulative sum to
# have the right borders
key_borders = np.insert(key_borders, 0, 1)
key_borders = np.cumsum(key_borders)[:-1]
# ------------------------------------------
# if classification was not performed set
# some default values
# ------------------------------------------
else:
idx_sort = np.arange(ncomp)
keys_want = []
return keys_want, key_borders, idx_sort
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# helper function to handle time courses for plotting
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _get_temporal_envelopes(fourier_ica_obj, W_orig, temporal_envelope=[],
src_loc_data=[], tICA=False, global_scaling=True,
win_length_sec=None, tpre=None, flow=None):
"""
Helper function to check if classification was
performed prior to plotting
Parameters
----------
fourier_ica_obj: FourierICA object generated
when applying jumeg.decompose.fourier_ica
W_orig: array
2D-demixing-array (ncomp x nvoxel) estimated
when applying FourierICA
temporal_envelope: list of arrays containing
the temporal envelopes. If the temporal
envelopes are already given here z-scoring
and mean estimation is performed
src_loc_data: array
3D array containing the source localization
data used for FourierICA estimation
(nfreq x nepochs x nvoxel). Only necessary
if temporal_envelope is not given.
tICA: bool
If set we know that temporal ICA was applied
when estimating the FourierICA, i.e. when
generating the temporal-envelopes the data
must not be transformed from the Fourier
domain to the time-domain
global_scaling: bool
If set all temporal-envelopes are globally
scaled. Otherwise each component is scaled
individually
win_length_sec: float or None
Length of the epoch window in seconds
tpre: float or None
Lower border (in seconds) of the time-window
used for generating/showing the epochs. If
'None' the value stored in 'fourier_ica_obj'
is used
flow: float, integer or None
Lower frequency border for generating the
temporal-envelope. If 'None' the frequency
border stored in 'fourier_ica_obj' is used
Return
------
temporal_envelope_mean: list containing the 2D
arrays of the mean temporal envelopes
of the components
temporal_envelope: list containing the 3D
arrays of the temporal envelopes of the
components. Necessary for estimating the
spectral profiles
"""
# ------------------------------------------
# import necessary modules
# ------------------------------------------
from mne.baseline import rescale
import numpy as np
from scipy import fftpack
# -------------------------------------------
# check input parameter
# -------------------------------------------
if tpre == None:
tpre = fourier_ica_obj.tpre
if flow == None:
flow = fourier_ica_obj.flow
if not win_length_sec:
win_length_sec = fourier_ica_obj.win_length_sec
# estimate some simple parameter
sfreq = fourier_ica_obj.sfreq
ncomp, nvoxel = W_orig.shape
win_ntsl = int(np.floor(sfreq * win_length_sec))
startfftind = int(np.floor(flow * win_length_sec))
# -------------------------------------------
# check if temporal envelope is already
# given or should be estimated
# -------------------------------------------
if temporal_envelope == []:
# -------------------------------------------
# check if 'src_loc_data' is given...
# if not throw an error
# -------------------------------------------
if src_loc_data == []:
print(">>> ERROR: You have to provide either the 'temporal_envelope' or")
print(">>> 'src_loc_data'. Otherwise no temporal information can be plotted!")
import pdb
pdb.set_trace()
# -------------------------------------------
# get independent components
# -------------------------------------------
nfreq, nepochs, nvoxel = src_loc_data.shape
act = np.zeros((ncomp, nepochs, nfreq), dtype=np.complex)
if tICA:
win_ntsl = nfreq
temporal_envelope = np.zeros((nepochs, ncomp, win_ntsl))
fft_act = np.zeros((ncomp, win_ntsl), dtype=np.complex)
# loop over all epochs to get time-courses from
# source localized data by inverse FFT
for iepoch in range(nepochs):
# normalize data
src_loc_zero_mean = (src_loc_data[:, iepoch, :] - np.dot(np.ones((nfreq, 1)), fourier_ica_obj.dmean)) / \
np.dot(np.ones((nfreq, 1)), fourier_ica_obj.dstd)
act[:ncomp, iepoch, :] = np.dot(W_orig, src_loc_zero_mean.transpose())
#act[ncomp:, iepoch, :] = np.dot(W_orig, src_loc_zero_mean.transpose())
if tICA:
temporal_envelope[iepoch, :, :] = act[:, iepoch, :].real
else:
# -------------------------------------------
# generate temporal profiles
# -------------------------------------------
# apply inverse STFT to get temporal envelope
fft_act[:, startfftind:(startfftind + nfreq)] = act[:, iepoch, :]
temporal_envelope[iepoch, :, :] = fftpack.ifft(fft_act, n=win_ntsl, axis=1).real
# -------------------------------------------
# average temporal envelope
# -------------------------------------------
if not isinstance(temporal_envelope, list):
temporal_envelope = [[temporal_envelope]]
ntemp = len(temporal_envelope)
temporal_envelope_mean = np.empty((ntemp, 0)).tolist()
times = (np.arange(win_ntsl) / sfreq + tpre)
# -------------------------------------------
# perform baseline correction
# -------------------------------------------
for itemp in range(ntemp):
for icomp in range(ncomp):
temporal_envelope[itemp][0][:, icomp, :] = rescale(temporal_envelope[itemp][0][:, icomp, :],
times, (None, 0), 'zscore')
# -------------------------------------------
# estimate mean from temporal envelopes
# -------------------------------------------
for itemp in range(ntemp):
temporal_envelope_mean[itemp].append(np.mean(temporal_envelope[itemp][0], axis=0)[:, 5:-5])
# -------------------------------------------
# check if global scaling should be used
# -------------------------------------------
# if not scale each component separately between -0.5 and 0.5
if not global_scaling:
for icomp in range(ncomp):
min_val = np.min([temporal_envelope_mean[0][0][icomp, :], temporal_envelope_mean[1][0][icomp, :]])
max_val = np.max([temporal_envelope_mean[0][0][icomp, :], temporal_envelope_mean[1][0][icomp, :]])
scale_fact = 1.0 / (max_val - min_val)
for itemp in range(ntemp):
temporal_envelope_mean[itemp][0][icomp, :] = np.clip(
scale_fact * temporal_envelope_mean[itemp][0][icomp, :]
- scale_fact * min_val - 0.5, -0.5, 0.5)
# if global scaling should be used, scale all
# data between -0.5 and 0.5
else:
# scale temporal envelope between -0.5 and 0.5
min_val = np.min(temporal_envelope_mean)
max_val = np.max(temporal_envelope_mean)
scale_fact = 1.0 / (max_val - min_val)
for itemp in range(ntemp):
temporal_envelope_mean[itemp][0] = np.clip(scale_fact * temporal_envelope_mean[itemp][0]
- scale_fact * min_val - 0.5, -0.5, 0.5)
return temporal_envelope_mean, temporal_envelope
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# helper function to handle spatial profiles for plotting
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _get_spatial_profiles(A_orig, keys, idx_text, vertno=[],
subject='fsaverage', subjects_dir=None,
labels=None, classification={}, percentile=97,
mni_coord=[], add_foci=False, fnout=None):
"""
Helper function to get/generate the spatial
profiles of the FourierICA components for
plotting
Parameters
----------
A_orig: array
2D-mixing-array (nvoxel, ncomp) estimated
when applying FourierICA
keys: list containing the group names
idx_text: list containing the information in which
brain hemisphere a component is mainly
located (could be either 'both', 'left', 'right'
or ' ' if no classification was performed before
plotting)
vertno: list
list containing two arrays with the order
of the vertices. If not given it will be
generated in this routine
subject: string
string containing the subjects ID
subjects_dir: string
string containing the subjects directory
path
labels: list of strings
names of the labels which should be plotted.
Note, the prefix 'lh.' and the suffix '.label'
are automatically added
classification: dictionary
classification object from the group_ica_object.
It is a dictionary containing two sub-dictionaries
'lh' and 'rh' (for left and right hemisphere). In
both sub-dictionaries the information about the
groups is stored, i.e. a group/region name + the
information which components are stored in this
group
percentile: integer
value between 0 and 100 used to set a lower
limit for the shown intensity range of the
spatial plots
mni_coord: list of strings
if given the MNI coordinates are plotted
beneath the spatial profiles
add_foci: bool
if True and the MNI coordinates are given
a foci is plotted at the position of the
MNI coordinate
fnout: string or None
if labels and classification is given the
output filename of the brain plot containing
all labels. If 'None' the results are not stored
Return
------
temp_plot_dir: string
directory where the spatial profiles are
stored
"""
# ------------------------------------------
# import necessary modules
# ------------------------------------------
from matplotlib import gridspec as grd
from matplotlib import pyplot as plt
from mayavi import mlab
from mne.source_estimate import _make_stc
import numpy as np
from os import environ, makedirs
from os.path import exists, join
import re
from scipy import misc
from surfer import set_log_level
import types
# set log level to 'WARNING'
set_log_level('CRITICAL')
import mayavi
mayavi.mlab.options.offscreen = True
# -------------------------------------------
# create temporary directory to save plots
# of spatial profiles
# -------------------------------------------
temp_plot_dir = join(subjects_dir, subject, 'temp_plots')
if not exists(temp_plot_dir):
makedirs(temp_plot_dir)
# -------------------------------------------
# generate spatial profiles
# (using magnitude and phase)
# -------------------------------------------
if not subjects_dir:
subjects_dir = environ.get('SUBJECTS_DIR')
if isinstance(A_orig[0, 0], complex):
A_orig_mag = np.abs(A_orig)
else:
A_orig_mag = A_orig
nvoxel, ncomp = A_orig_mag.shape
# -------------------------------------------
# check if vertno is given, otherwise
# generate it
# -------------------------------------------
if not np.any(vertno):
vertno = [np.arange(nvoxel/2), np.arange(nvoxel/2)]
# -------------------------------------------
# check if labels should be plotted and if
# classification was already performed
# --> if yes define some colors for the
# labels
# -------------------------------------------
if labels and classification:
colors = ['green', 'red', 'cyan', 'yellow', 'mediumblue',
'magenta', 'chartreuse', 'indigo', 'sandybrown',
'slateblue', 'purple', 'lightpink', 'springgreen',
'orange', 'sienna', 'cadetblue', 'crimson',
'maroon', 'powderblue', 'deepskyblue', 'olive']
# -------------------------------------------
# loop over all components to generate
# spatial profiles
# -------------------------------------------
for icomp in range(ncomp):
# -------------------------------------------
# plot spatial profile
# -------------------------------------------
# generate stc-object from current component
A_cur = A_orig_mag[:, icomp]
src_loc = _make_stc(A_cur[:, np.newaxis], vertices=vertno, tmin=0, tstep=1,
subject=subject)
# define current range (Xth percentile)
fmin = np.percentile(A_cur, percentile)
fmax = np.max(A_cur)
fmid = 0.5 * (fmin + fmax)
clim = {'kind': 'value',
'lims': [fmin, fmid, fmax]}
# plot spatial profiles
brain = src_loc.plot(surface='inflated', hemi='split', subjects_dir=subjects_dir,
config_opts={'cortex': 'bone'}, views=['lateral', 'medial'],
time_label=' ', colorbar=False, clim=clim)
# check if foci should be added to the plot
if add_foci and np.any(mni_coord):
for i_hemi in ['lh', 'rh']:
mni_string = mni_coord[i_hemi][icomp]
# if 'mni_string' is not empty (it might be empty if activity
# can only be found in one hemisphere) plot a foci
if mni_string != "":
mni_float = list(map(float, re.findall("[-+]?\d*\.\d+|\d+", mni_string)))
brain.add_foci(mni_float, coords_as_verts=False, hemi=i_hemi, color='chartreuse',
scale_factor=1.5, map_surface='white')
# -------------------------------------------
# check if labels should be plotted
# -------------------------------------------
if labels and classification:
# import module to read in labels
from mne import read_label
# get path to labels
dir_labels = join(subjects_dir, subject, 'label')
# identify in which group the IC is classified
hemi = 'rh' if idx_text[icomp] == 'right' else 'lh'
# read in the corresponding label
for idx_key, key in enumerate(keys):
if icomp in classification[hemi][key]:
label_name = ".%s.label" % key
color = colors[idx_key]
break
# loop over both hemispheres to read the label in and plot it
hemi = ['lh', 'rh'] if idx_text[icomp] == 'both ' else [hemi]
for hemi_cur in hemi:
label = read_label(join(dir_labels, hemi_cur + label_name), subject=subject)
brain.add_label(label, borders=False, hemi=hemi_cur, color=color, alpha=0.1)
brain.add_label(label, borders=True, hemi=hemi_cur, color=color)
# save results
fn_base = "IC%02d_spatial_profile.png" % (icomp+1)
fnout_img = join(temp_plot_dir, fn_base)
brain.save_image(fnout_img)
# close mlab figure
mlab.close(all=True)
# -------------------------------------------
# also generate one plot with all labels
# -------------------------------------------
if labels and classification:
# set clim in a way that no activity can be seen
# (Note: we only want to see the labels)
clim = {'kind': 'value',
'lims': [fmax, 1.5 * fmax, 2.0 * fmax]}
# generate plot
brain = src_loc.plot(surface='inflated', hemi='split', subjects_dir=subjects_dir,
config_opts={'cortex': 'bone'}, views=['lateral', 'medial'],
time_label=' ', colorbar=False, clim=clim, background='white')
# loop over all labels
for idx_key, key in enumerate(keys):
label_name = ".%s.label" % key
color = colors[idx_key]
# loop over both hemispheres in order to plotting the labels
for hemi in ['lh', 'rh']:
label = read_label(join(dir_labels, hemi + label_name), subject=subject)
brain.add_label(label, borders=False, hemi=hemi, color=color, alpha=0.6)
# save results
if fnout:
fnout_img = '%s_labels.png' % fnout
brain.save_image(fnout_img)
# close mlab figure
mlab.close(all=True)
# -------------------------------------------
# now adjust the label plot appropriate
# -------------------------------------------
# read spatial profile image
spat_tmp = misc.imread(fnout_img)
# rearrange image
x_size, y_size, _ = spat_tmp.shape
x_half, y_half = x_size / 2, y_size / 2
x_frame, y_frame = int(0.11 * x_half), int(0.01 * y_half)
spatial_profile = np.concatenate((spat_tmp[x_frame:(x_half - x_frame), y_frame:(y_half - y_frame), :],
spat_tmp[(x_half + x_frame):-x_frame, y_frame:(y_half - y_frame), :],
spat_tmp[(x_half + x_frame):-x_frame, (y_half + y_frame):-y_frame, :],
spat_tmp[x_frame:(x_half - x_frame), (y_half + y_frame):-y_frame, :]),
axis=1)
# plot image
plt.ioff()
fig = plt.figure('Labels plots', figsize=(17, 3))
gs = grd.GridSpec(1, 30, wspace=0.00001, hspace=0.00001,
left=0.0, right=1.0, bottom=0.0, top=1.0)
# set plot position and plot image
p1 = fig.add_subplot(gs[0, 0:26])
p1.imshow(spatial_profile)
adjust_spines(p1, [])
# add label names
keys_fac = 0.8/len(keys)
keys_split = 0
p_text = fig.add_subplot(gs[0, 26:30])
keys_sort_idx = np.argsort(keys)
for idx_key in range(len(keys)):
key = keys[keys_sort_idx[idx_key]]
# check if string should be split
if len(key) > 21 and ' ' in key:
p_text.text(0.0, 0.9-keys_fac*(idx_key+keys_split), key.split()[0]+'-',
fontsize=13, color=colors[keys_sort_idx[idx_key]])
keys_split += 1
p_text.text(0.0, 0.9-keys_fac*(idx_key+keys_split), key.split()[1],
fontsize=13, color=colors[keys_sort_idx[idx_key]])
else:
p_text.text(0.0, 0.9-keys_fac*(idx_key+keys_split), key, fontsize=13,
color=colors[keys_sort_idx[idx_key]])
adjust_spines(p_text, [])
plt.savefig(fnout_img, dpi=300)
# close plot and set plotting back to screen
plt.close('FourierICA plots')
plt.ion()
mayavi.mlab.options.offscreen = False
return temp_plot_dir
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# helper function to get spectral profiles for plotting
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _get_spectral_profile(temporal_envelope, tpre,
sfreq, flow, fhigh,
bar_plot=False,
use_multitaper=False):
"""
Helper function to get the spectral-profile of the
temporal-envelopes of the FourierICA components
for plotting
Parameters
----------
temporal_envelope: list of arrays containing
the temporal envelopes.
tpre: float
Lower border (in seconds) of the time-window
used for generating/showing the epochs. If
'None' the value stored in 'fourier_ica_obj'
is used
sfreq: float
Sampling frequency of the data
flow: float or integer
Lower frequency range for time frequency analysis
fhigh: float or integer
Upper frequency range for time frequency analysis
bar_plot: boolean
if set the number of time points for time-frequency
estimation is reduced in order to save memory and
computing-time
use_multitaper: boolean
If set 'multitaper' is usewd for time frequency
analysis, otherwise 'stockwell'
Return
------
average_power_all: list containing the averaged
frequency power of all components
freqs: array containing the frequencies used to
calculate the frequency power
vmin: lower frequency range for plotting
vmax: upper frequency range for plotting
"""
# ------------------------------------------
# import necessary modules
# ------------------------------------------
from mne.baseline import rescale
from mne.time_frequency._stockwell import _induced_power_stockwell
import numpy as np
# ------------------------------------------
# define some parameter
# ------------------------------------------
ntemp = len(temporal_envelope)
ncomp = temporal_envelope[0][0].shape[1]
win_ntsl = temporal_envelope[0][0].shape[-1]
average_power_all = np.empty((ntemp, 0)).tolist()
vmin = np.zeros(ncomp)
vmax = np.zeros(ncomp)
# define some time parameter
times = np.arange(win_ntsl) / sfreq + tpre
idx_start = np.argmin(np.abs(times - tpre))
idx_end = np.argmin(np.abs(times - (tpre + win_ntsl/sfreq)))
if bar_plot:
decim = 10
else:
decim = 1
# ------------------------------------------
# loop over all time courses, i.e.
# conditions, and all components
# ------------------------------------------
for itemp in range(ntemp):
for icomp in range(ncomp):
# extract some information from the temporal_envelope
nepochs = temporal_envelope[itemp][0].shape[0]
# ------------------------------------------
# perform time frequency analysis
# ------------------------------------------
# prepare data for frequency analysis
data_stockwell = temporal_envelope[itemp][0][:, icomp, idx_start:idx_end].\
reshape((nepochs, 1, idx_end-idx_start))
data_stockwell = data_stockwell.transpose([1, 0, 2])
# mirror data to reduce transient frequencies
data_stockwell = np.concatenate((data_stockwell[:, :, 50:0:-1],
data_stockwell, data_stockwell[:, :, -1:-51:-1]), axis=-1)
n_fft = data_stockwell.shape[-1]
# check if 'multitaper' or 'stockwell' should be
# used for time-frequency analysis
if use_multitaper:
from mne.time_frequency.tfr import _compute_tfr
n_cycle = 3.0
if (10.0 * n_cycle*sfreq)/(2.0 * np.pi * flow) > n_fft:
flow *= ((10.0 * n_cycle*sfreq)/(2.0 * np.pi * flow))/n_fft
flow = np.ceil(flow)
freqs = np.arange(flow, fhigh)
power_data = _compute_tfr(data_stockwell, freqs, sfreq=sfreq, use_fft=True,
n_cycles=n_cycle, zero_mean=True, decim=decim,
output='power', method='multitaper',
time_bandwidth=10)
else:
power_data, _, freqs = _induced_power_stockwell(data_stockwell, sfreq=sfreq, fmin=flow,
fmax=fhigh, width=0.6, decim=1, n_fft=n_fft,
return_itc=False, n_jobs=4)
# perform baseline correction (and remove mirrored parts from data)
power_data = rescale(power_data[:, :, int(50/decim):-int(50/decim)],
times[idx_start:idx_end][0:-1:decim], (None, 0), 'mean')
average_power = np.mean(power_data, axis=0)
# ------------------------------------------
# store all frequency data in one list
# ------------------------------------------
average_power_all[itemp].append(average_power)
# ------------------------------------------
# estimate frequency thresholds for plotting
# ------------------------------------------
vmax[icomp] = np.max((np.percentile(average_power, 98), vmax[icomp]))
vmin[icomp] = np.min((np.percentile(average_power, 2), vmin[icomp]))
if np.abs(vmax[icomp]) > np.abs(vmin[icomp]):
vmin[icomp] = - np.abs(vmax[icomp])
else:
vmax[icomp] = np.abs(vmin[icomp])
return average_power_all, freqs, vmin, vmax
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
# plot results when Fourier ICA was applied in the
# source space
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++
def plot_results_src_space(fourier_ica_obj, W_orig, A_orig,
src_loc_data=[], temporal_envelope=[], # parameter for temporal profiles
tpre=None, win_length_sec=None, tICA=False,
vertno=[], subject='fsaverage', subjects_dir=None, # parameter for spatial profiles
percentile=97, add_foci=True, classification={},
mni_coords=[], labels=None,
flow=None, fhigh=None, bar_plot=False, # parameter for spectral profiles
global_scaling=True, ncomp_per_plot=13, fnout=None, # general plotting parameter
temp_profile_names=[]):
"""
Generate plot containing all results achieved by
applying FourierICA in source space, i.e., plot
spatial and spectral profiles.
Parameters
----------
fourier_ica_obj: FourierICA object generated
when applying jumeg.decompose.fourier_ica
W_orig: array
2D-demixing-array (ncomp x nvoxel) estimated
when applying FourierICA
A_orig: array
2D-mixing-array (nvoxel, ncomp) estimated
when applying FourierICA
**** parameter for temporal profiles ****
src_loc_data: array
3D array containing the source localization
data used for FourierICA estimation
(nfreq x nepochs x nvoxel). Only necessary
if temporal_envelope is not given.
default: src_loc_data=[]
temporal_envelope: list of arrays containing
the temporal envelopes. If not given the
temporal envelopes are estimated here based
on the 'src_loc_data'
default: temporal_envelope=[]
tpre: float
Lower border (in seconds) of the time-window
used for generating/showing the epochs. If
'None' the value stored in 'fourier_ica_obj'
is used
win_length_sec: float or None
Length of the epoch window in seconds. If
'None' the value stored in 'fourier_ica_obj'
is used
tICA: boolean
should be True if temporal ICA was applied
default: tICA=False
**** parameter for spatial profiles ****
vertno: list
list containing two arrays with the order
of the vertices. If list is empty it will be
automatically generated
default: vertno=[]
subject: string
subjects ID
default: subject='fsaverage'
subjects_dir: string or None
string containing the subjects directory
path
default: subjects_dir=None --> system variable
SUBJETCS_DIR is used
percentile: integer
value between 0 and 100 used to set a lower
limit for the shown intensity range of the
spatial plots
default: percentile=97
add_foci: bool
if True and the MNI coordinates are given
a foci is plotted at the position of the
MNI coordinate
default: add_foci=True
classification: dictionary
classification object from the group_ica_object.
It is a dictionary containing two sub-dictionaries
'lh' and 'rh' (for left and right hemisphere). In
both sub-dictionaries the information about the
groups is stored, i.e. a group/region name + the
information which components are stored in this
group
default: classification={}
mni_coords: list of strings
if given the MNI coordinates are plotted
beneath the spatial profiles
default: mni_coords=[]
labels: list of strings
names of the labels which should be plotted.
Note, the prefix 'lh.' and the suffix '.label'
are automatically added
default: labels=None
**** parameter for spectral profiles ****
flow: float or integer
Lower frequency range for time frequency analysis
fhigh: float or integer
Upper frequency range for time frequency analysis
bar_plot: boolean
If set the results of the time-frequency analysis
are shown as bar plot. This option is recommended
when FourierICA was applied to resting-state data
default: bar_plot=False
**** general plotting parameter ****
global_scaling: bool
If set spatial, spectral and temporal profiles
are globally scaled. Otherwise each component
is scaled individually
default: global_scaling=True
ncomp_per_plot: integer
number of components per plot
fnout: string
default: fnout=None
temp_profile_names: list of string
The list should have the same number of elements
as conditions were used to generate the temporal
envelopes. The names given here are used as headline
for the temporal profiles in the plot
default: temp_profile_name=[]
"""
# ------------------------------------------
# import necessary modules
# ------------------------------------------
from matplotlib import pyplot as plt
from matplotlib import gridspec as grd
from matplotlib.colors import Normalize
import numpy as np
from os import remove, rmdir
from os.path import exists, join
from scipy import misc
# -------------------------------------------
# check input parameter
# -------------------------------------------
if tpre == None:
tpre = fourier_ica_obj.tpre
if flow == None:
flow = fourier_ica_obj.flow
if not fhigh:
fhigh = fourier_ica_obj.fhigh
if not win_length_sec:
win_length_sec = fourier_ica_obj.win_length_sec
# check if either 'src_loc_data' or
# 'temporal_envelope' is given, otherwise stop
if src_loc_data == [] and temporal_envelope == []:
print(">>> ERROR: you have either to provide the variable")
print(">>> 'src_loc_data' or 'temporal_envelope'.")
import pdb
pdb.set_trace()
# estimate/set some simple parameter
sfreq = fourier_ica_obj.sfreq
win_ntsl = int(np.floor(sfreq * win_length_sec))
ncomp, nvoxel = W_orig.shape
ylim_temp = [-0.55, 0.55]
time_range = [tpre, tpre + win_length_sec]
# -------------------------------------------
# get temporal envelopes, or rather check if
# temporal envelopes already exist or must
# be calculated
# -------------------------------------------
temporal_envelope_mean, temporal_envelope = \
_get_temporal_envelopes(fourier_ica_obj, W_orig, temporal_envelope=temporal_envelope,
src_loc_data=src_loc_data, tICA=tICA, global_scaling=global_scaling,
win_length_sec=win_length_sec, tpre=tpre, flow=flow)
ntemp = len(temporal_envelope)
# -------------------------------------------
# get MNI-coordinates of the FourierICA
# components
# -------------------------------------------
if not classification and not mni_coords and not labels:
mni_coords, hemi_loc_txt, classification, labels = \
get_mni_coordinates(A_orig, subject=subject, subjects_dir=subjects_dir,
percentile=percentile)
# otherwise we only have to get the 'hemi_loc_txt' variable
else:
hemi_loc = np.array([int(i != '') for i in mni_coords['lh']])
hemi_loc += np.array([2*int(i != '') for i in mni_coords['rh']])
hemi_loc_txt = np.array([' '] * len(hemi_loc))
for idx, hemi_name in enumerate(['left ', 'right', 'both ']):
idx_change = np.where(hemi_loc == (idx + 1.0))[0]
hemi_loc_txt[idx_change] = hemi_name
# check if classification was performed prior to plotting
keys, key_borders, idx_sort = _check_classification(classification, ncomp)
# -------------------------------------------
# get spatial profiles of all components
# Note: This will take a while
# -------------------------------------------
temp_plot_dir = _get_spatial_profiles(A_orig, keys, hemi_loc_txt, vertno=vertno,
subject=subject, subjects_dir=subjects_dir,
labels=labels, classification=classification,
percentile=percentile, mni_coord=mni_coords,
add_foci=add_foci, fnout=fnout)
# -------------------------------------------
# get spectral profiles of all components
# Note: This will take a while
# -------------------------------------------
average_power_all, freqs, vmin, vmax = \
_get_spectral_profile(temporal_envelope, tpre, sfreq, flow, fhigh, bar_plot=bar_plot)
# check if bar plot should be used
# --> if yes estimate histogram data and normalize results
if bar_plot:
# generate an array to store the results
freq_heights = np.zeros((ntemp, ncomp, len(freqs)))
# loop over all conditions
for i_power, average_power in enumerate(average_power_all):
freq_heights[i_power, :, :] = np.sum(np.abs(average_power), axis=2)
# normalize to a range between 0 and 1
freq_heights /= np.max(freq_heights)
# ------------------------------------------
# now generate plot containing spatial,
# spectral and temporal profiles
# ------------------------------------------
# set some general parameter
plt.ioff()
nimg = int(np.ceil(ncomp/(1.0*ncomp_per_plot)))
idx_key = 0
nplot = list(range(ncomp_per_plot, nimg*ncomp_per_plot, ncomp_per_plot))
nplot.append(ncomp)
# generate image and its layout for plotting
fig = plt.figure('FourierICA plots', figsize=(14 + ntemp * 8, 34))
n_keys = len(key_borders) if len(key_borders) > 0 else 1
gs = grd.GridSpec(ncomp_per_plot * 20 + n_keys * 10, 10 + ntemp * 8, wspace=0.1, hspace=0.05,
left=0.04, right=0.96, bottom=0.04, top=0.96)
# ------------------------------------------
# loop over the estimated number of images
# ------------------------------------------
for iimg in range(nimg):
# clear figure (to start with a white image in each loop)
plt.clf()
# estimate how many plots on current image
istart_plot = int(ncomp_per_plot * iimg)
# set idx_class parameter
idx_class = 1 if key_borders == [] else 0
# ------------------------------------------
# loop over all components which should be
# plotted on the current image
# ------------------------------------------
for icomp in range(istart_plot, nplot[iimg]):
# ----------------------------------------------
# check if key_boarders is set and should be
# written on the image
# ----------------------------------------------
if (icomp == istart_plot and key_borders != []) or \
((icomp + 1) in key_borders):
# adjust key-index
if (icomp + 1) in key_borders:
idx_key += 1
# add sub-plot with 'key_text'
p_text = fig.add_subplot(gs[20 * (icomp - istart_plot) + idx_class * 10: \
20 * (icomp - istart_plot) + 8 + idx_class * 10, 0:10])
p_text.text(0, 0, keys[idx_key-1], fontsize=25)
adjust_spines(p_text, [])
# adjust idx_class parameter
idx_class += 1
# ----------------------------------------------
# plot spatial profiles
# ----------------------------------------------
# read spatial profile image
fn_base = "IC%02d_spatial_profile.png" % (idx_sort[icomp] + 1)
fnin_img = join(temp_plot_dir, fn_base)
spat_tmp = misc.imread(fnin_img)
remove(fnin_img)
# rearrange image
x_size, y_size, _ = spat_tmp.shape
x_half, y_half = x_size / 2, y_size / 2
x_frame, y_frame = int(0.11 * x_half), int(0.01 * y_half)
spatial_profile = np.concatenate((spat_tmp[x_frame:(x_half - x_frame), y_frame:(y_half - y_frame), :],
spat_tmp[(x_half + x_frame):-x_frame, y_frame:(y_half - y_frame), :],
spat_tmp[(x_half + x_frame):-x_frame, (y_half + y_frame):-y_frame, :],
spat_tmp[x_frame:(x_half - x_frame), (y_half + y_frame):-y_frame, :]),
axis=1)
# set plot position and plot image
p1 = fig.add_subplot(
gs[20 * (icomp - istart_plot) + idx_class * 10:20 * (icomp - istart_plot) + 15 + idx_class * 10, 0:10])
p1.imshow(spatial_profile)
# set some plotting options
p1.yaxis.set_ticks([])
p1.xaxis.set_ticks([])
y_name = "IC#%02d" % (idx_sort[icomp] + 1)
p1.set_ylabel(y_name, fontsize=18)
# ----------------------------------------------
# if given write MNI coordinates under the image
# ----------------------------------------------
if np.any(mni_coords):
# left hemisphere
plt.text(120, 360, mni_coords['lh'][int(idx_sort[int(icomp)])], color="black",
fontsize=18)
# right hemisphere
plt.text(850, 360, mni_coords['rh'][int(idx_sort[int(icomp)])], color="black",
fontsize=18)
# add location information of the component
# --> if located in 'both', 'left' or 'right' hemisphere
plt.text(-220, 100, hemi_loc_txt[int(idx_sort[int(icomp)])], color="red",
fontsize=25, rotation=90)
# ----------------------------------------------
# temporal/spectral profiles
# ----------------------------------------------
# loop over all time courses
for itemp in range(ntemp):
# ----------------------------------------------
# if given plot a headline above the time
# courses of each condition
# ----------------------------------------------
if icomp == istart_plot and len(temp_profile_names):
# add a sub-plot for the text
p_text = fig.add_subplot(gs[(idx_class - 1) * 10: 6 + (idx_class - 1) * 12,
(itemp) * 8 + 11:(itemp + 1) * 8 + 9])
# plot the text and adjust spines
p_text.text(0, 0, " " + temp_profile_names[itemp], fontsize=30)
adjust_spines(p_text, [])
# set plot position
if bar_plot:
p2 = plt.subplot(
gs[20 * (icomp - istart_plot) + idx_class * 11:20 * (icomp - istart_plot) + 13 + idx_class * 10,
itemp * 8 + 11:(itemp + 1) * 8 + 9])
else:
p2 = plt.subplot(
gs[20 * (icomp - istart_plot) + idx_class * 10:20 * (icomp - istart_plot) + 15 + idx_class * 10,
itemp * 8 + 11:(itemp + 1) * 8 + 9])
# extract temporal plotting information
times = (np.arange(win_ntsl) / sfreq + tpre)[5:-5]
idx_start = np.argmin(np.abs(times - time_range[0]))
idx_end = np.argmin(np.abs(times - time_range[1]))
# ----------------------------------------------
# plot spectral profile
# ----------------------------------------------
# check if global scaling should be used
if global_scaling:
vmin_cur, vmax_cur = np.min(vmin), np.max(vmax)
else:
vmin_cur, vmax_cur = vmin[icomp], vmax[icomp]
# show spectral profile
if bar_plot:
plt.bar(freqs, freq_heights[itemp, int(idx_sort[icomp]), :], width=1.0, color='cornflowerblue')
plt.xlim(flow, fhigh)
plt.ylim(0.0, 1.0)
# set some parameter
p2.set_xlabel("freq. [Hz]")
p2.set_ylabel("ampl. [a.u.]")
# ----------------------------------------------
# plot temporal profile on the some spot
# ----------------------------------------------
ax = plt.twiny()
ax.set_xlabel("time [s]")
ax.plot(times[idx_start:idx_end], 0.5+temporal_envelope_mean[itemp][0][int(idx_sort[icomp]), idx_start:idx_end],
color='red', linewidth=3.0)
ax.set_xlim(times[idx_start], times[idx_end])
ax.set_ylim(0.0, 1.0)
else:
average_power = average_power_all[itemp][int(idx_sort[icomp])]
extent = (times[idx_start], times[idx_end], freqs[0], freqs[-1])
p2.imshow(average_power, extent=extent, aspect="auto", origin="lower",
picker=False, cmap='RdBu_r', vmin=vmin_cur,
vmax=vmax_cur)
# set some parameter
p2.set_xlabel("time [s]")
p2.set_ylabel("freq. [Hz]")
# ----------------------------------------------
# plot temporal profile on the some spot
# ----------------------------------------------
ax = plt.twinx()
ax.set_xlim(times[idx_start], times[idx_end])
ax.set_ylim(ylim_temp)
ax.set_ylabel("ampl. [a.u.]")
ax.plot(times[idx_start:idx_end], temporal_envelope_mean[itemp][0][int(idx_sort[icomp]), idx_start:idx_end],
color='black', linewidth=3.0)
# ----------------------------------------------
# finally plot a color bar
# ----------------------------------------------
if not bar_plot:
# first normalize the color table
norm = Normalize(vmin=np.round(vmin_cur, 2), vmax=np.round(vmax_cur, 2))
sm = plt.cm.ScalarMappable(cmap='RdBu_r', norm=norm)
sm.set_array(np.linspace(vmin_cur, 1.0))
# estimate position of the color bar
xpos = 0.405 + 0.5/(ntemp + 1.0)
if n_keys > 1:
cbaxes = fig.add_axes([xpos, 0.135, 0.2, 0.006])
else:
cbaxes = fig.add_axes([xpos, 0.03, 0.2, 0.006])
ticks_fac = (vmax_cur - vmin_cur) * 0.3333
ticks = np.round([vmin_cur, vmin_cur + ticks_fac, vmax_cur - ticks_fac, vmax_cur], 2)
# ticks = [-1.0, -0.5, 0.0, 0.5, 1.0]
# now plot color bar
cb = plt.colorbar(sm, ax=p2, cax=cbaxes, use_gridspec=False,
orientation='horizontal', ticks=ticks,
format='%1.2g')
cb.ax.tick_params(labelsize=18)
# ----------------------------------------------
# save image
# ----------------------------------------------
if fnout:
fnout_complete = '%s_%02d.png' % (fnout, iimg + 1)
plt.savefig(fnout_complete, format='png', dpi=300)
# close plot and set plotting back to screen
plt.close('FourierICA plots')
plt.ion()
# remove temporary directory for
# spatial profile plots
if exists(temp_plot_dir):
rmdir(temp_plot_dir)
return mni_coords, classification, labels
| 0 | 0 |
363cd66c50b81a1f02268cecb470ac1771146697 | 2,194 | py | Python | experiments/tabular_benchmarks/process_HB.py | auto-flow/oxygen | 6ff221027c4b1b022499d0b7d46b65f18815ada8 | [
"BSD-3-Clause"
] | 90 | 2020-12-14T23:35:40.000Z | 2022-03-04T05:20:36.000Z | experiments/tabular_benchmarks/process_HB.py | auto-flow/oxygen | 6ff221027c4b1b022499d0b7d46b65f18815ada8 | [
"BSD-3-Clause"
] | 1 | 2021-02-14T03:09:23.000Z | 2021-02-17T03:39:40.000Z | experiments/tabular_benchmarks/process_HB.py | auto-flow/oxygen | 6ff221027c4b1b022499d0b7d46b65f18815ada8 | [
"BSD-3-Clause"
] | 15 | 2020-12-22T09:54:58.000Z | 2022-03-15T11:16:03.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : qichun tang
# @Contact : [email protected]
import json
import os
from pathlib import Path
import pandas as pd
from joblib import Parallel, delayed
from joblib import dump
info = {
"bohb": ("HpBandSter-BOHB", "r",),
"ultraopt_BOHB": ("UltraOpt-BOHB", "g",),
"ultraopt_HyperBand": ("HyperBand", "b",),
"tpe": ("HyperOpt-TPE", "r",),
"ultraopt_ETPE": ("UltraOpt-ETPE", "g",),
"ultraopt_Random": ("Random", "b",),
}
benchmarks = [
"protein_structure",
"slice_localization",
"naval_propulsion",
"parkinsons_telemonitoring"
]
def process(benchmark, fname):
print(f"start, {benchmark}-{fname}")
target_file = f"{benchmark}-{fname}.pkl"
if os.path.exists(target_file):
print(f"exist, {benchmark}-{fname}")
return
regret_tests = []
runtimes = []
ts = []
df_t = pd.DataFrame()
for file in Path(f"{benchmark}-{fname}").iterdir():
if file.suffix != ".json":
continue
data = json.loads(file.read_text())
col_name = file.name.split(".")[0]
# regret_validation = data["regret_validation"]
regret_test = data["regret_test"]
for i in range(1, len(regret_test)):
regret_test[i] = min(regret_test[i - 1], regret_test[i])
regret_tests.append(regret_test)
runtime = data["runtime"]
runtimes.append(runtime)
ts.extend(runtime)
for timestamp, regret in zip(runtime, regret_test):
df_t.loc[timestamp, col_name] = regret
df_t.sort_index(inplace=True)
n_rows = df_t.shape[0]
for i, col in enumerate(df_t.columns):
pre_max=None
for j in range(n_rows):
if pd.isna(df_t.iloc[j, i]):
if pre_max is not None:
df_t.iloc[j, i] = pre_max
else:
pre_max = df_t.iloc[j, i]
print(f"ok, {benchmark}-{fname}")
dump(df_t, target_file)
args_list = []
for _, benchmark in enumerate(benchmarks):
for fname in info.keys():
args_list.append((benchmark, fname))
Parallel(n_jobs=10)(
delayed(process)(*args) for args in args_list
)
| 28.493506 | 68 | 0.597995 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : qichun tang
# @Contact : [email protected]
import json
import os
from pathlib import Path
import pandas as pd
from joblib import Parallel, delayed
from joblib import dump
info = {
"bohb": ("HpBandSter-BOHB", "r",),
"ultraopt_BOHB": ("UltraOpt-BOHB", "g",),
"ultraopt_HyperBand": ("HyperBand", "b",),
"tpe": ("HyperOpt-TPE", "r",),
"ultraopt_ETPE": ("UltraOpt-ETPE", "g",),
"ultraopt_Random": ("Random", "b",),
}
benchmarks = [
"protein_structure",
"slice_localization",
"naval_propulsion",
"parkinsons_telemonitoring"
]
def process(benchmark, fname):
print(f"start, {benchmark}-{fname}")
target_file = f"{benchmark}-{fname}.pkl"
if os.path.exists(target_file):
print(f"exist, {benchmark}-{fname}")
return
regret_tests = []
runtimes = []
ts = []
df_t = pd.DataFrame()
for file in Path(f"{benchmark}-{fname}").iterdir():
if file.suffix != ".json":
continue
data = json.loads(file.read_text())
col_name = file.name.split(".")[0]
# regret_validation = data["regret_validation"]
regret_test = data["regret_test"]
for i in range(1, len(regret_test)):
regret_test[i] = min(regret_test[i - 1], regret_test[i])
regret_tests.append(regret_test)
runtime = data["runtime"]
runtimes.append(runtime)
ts.extend(runtime)
for timestamp, regret in zip(runtime, regret_test):
df_t.loc[timestamp, col_name] = regret
df_t.sort_index(inplace=True)
n_rows = df_t.shape[0]
for i, col in enumerate(df_t.columns):
pre_max=None
for j in range(n_rows):
if pd.isna(df_t.iloc[j, i]):
if pre_max is not None:
df_t.iloc[j, i] = pre_max
else:
pre_max = df_t.iloc[j, i]
print(f"ok, {benchmark}-{fname}")
dump(df_t, target_file)
args_list = []
for _, benchmark in enumerate(benchmarks):
for fname in info.keys():
args_list.append((benchmark, fname))
Parallel(n_jobs=10)(
delayed(process)(*args) for args in args_list
)
| 0 | 0 |
cf2f5303e12688810ef838f064e43fa35b43f0f1 | 4,776 | py | Python | docs/conf.py | Sohl-Dickstein/learned_optimization | cd929359a51d09444665021387c058aac11b63ba | [
"Apache-2.0"
] | 70 | 2021-12-16T07:12:11.000Z | 2022-03-31T19:13:36.000Z | docs/conf.py | Sohl-Dickstein/learned_optimization | cd929359a51d09444665021387c058aac11b63ba | [
"Apache-2.0"
] | 10 | 2021-12-29T10:03:37.000Z | 2022-03-22T15:59:55.000Z | docs/conf.py | Sohl-Dickstein/learned_optimization | cd929359a51d09444665021387c058aac11b63ba | [
"Apache-2.0"
] | 5 | 2021-12-16T04:52:35.000Z | 2022-03-22T03:45:31.000Z | # coding=utf-8
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- Project information -----------------------------------------------------
project = 'learned_optimization'
copyright = '2021, Google LLC.'
author = 'The learned_optimization authors'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = ''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
needs_sphinx = '2.1'
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'matplotlib.sphinxext.plot_directive',
'sphinx_autodoc_typehints',
'myst_nb',
]
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
'numpy': ('https://numpy.org/doc/stable/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None),
}
suppress_warnings = [
'ref.citation', # Many duplicated citations in numpy/scipy docstrings.
'ref.footnote', # Many unreferenced footnotes in numpy/scipy docstrings
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
source_suffix = ['.rst', '.ipynb', '.md']
# The main toctree document.
main_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [
# Sometimes sphinx reads its own outputs as inputs!
'_build/html',
'_build/',
'_build/jupyter_execute',
'notebooks/README.md',
'README.md',
# Ignore markdown source for notebooks; myst-nb builds from the ipynb
# These are kept in sync via jupytext --sync
'notebooks/*.md',
]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
autosummary_generate = True
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'logo_only': True,
}
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# TODO(lmetz) add logos!
# html_logo = '_static/logo_250px.png'
# html_favicon = '_static/favicon.png'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for myst ----------------------------------------------
jupyter_execute_notebooks = 'force'
execution_allow_errors = False
# Notebook cell execution timeout; defaults to 30.
execution_timeout = 100
# List of patterns, relative to source directory, that match notebook
# files that will not be executed.
execution_excludepatterns = ['*']
# -- Extension configuration -------------------------------------------------
# Tell sphinx-autodoc-typehints to generate stub parameter annotations including
# types, even if the parameters aren't explicitly documented.
always_document_param_types = True
# force clear docs every rebuild.
import shutil
if os.path.exists('_build/'):
shutil.rmtree('_build/')
| 31.421053 | 80 | 0.695352 | # coding=utf-8
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- Project information -----------------------------------------------------
project = 'learned_optimization'
copyright = '2021, Google LLC.'
author = 'The learned_optimization authors'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = ''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
needs_sphinx = '2.1'
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'matplotlib.sphinxext.plot_directive',
'sphinx_autodoc_typehints',
'myst_nb',
]
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
'numpy': ('https://numpy.org/doc/stable/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None),
}
suppress_warnings = [
'ref.citation', # Many duplicated citations in numpy/scipy docstrings.
'ref.footnote', # Many unreferenced footnotes in numpy/scipy docstrings
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
source_suffix = ['.rst', '.ipynb', '.md']
# The main toctree document.
main_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [
# Sometimes sphinx reads its own outputs as inputs!
'_build/html',
'_build/',
'_build/jupyter_execute',
'notebooks/README.md',
'README.md',
# Ignore markdown source for notebooks; myst-nb builds from the ipynb
# These are kept in sync via jupytext --sync
'notebooks/*.md',
]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
autosummary_generate = True
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'logo_only': True,
}
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# TODO(lmetz) add logos!
# html_logo = '_static/logo_250px.png'
# html_favicon = '_static/favicon.png'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for myst ----------------------------------------------
jupyter_execute_notebooks = 'force'
execution_allow_errors = False
# Notebook cell execution timeout; defaults to 30.
execution_timeout = 100
# List of patterns, relative to source directory, that match notebook
# files that will not be executed.
execution_excludepatterns = ['*']
# -- Extension configuration -------------------------------------------------
# Tell sphinx-autodoc-typehints to generate stub parameter annotations including
# types, even if the parameters aren't explicitly documented.
always_document_param_types = True
# force clear docs every rebuild.
import shutil
if os.path.exists('_build/'):
shutil.rmtree('_build/')
| 0 | 0 |