hexsha
stringlengths 40
40
| size
int64 7
1.04M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
247
| max_stars_repo_name
stringlengths 4
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
368k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
247
| max_issues_repo_name
stringlengths 4
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
247
| max_forks_repo_name
stringlengths 4
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
1.04M
| avg_line_length
float64 1.77
618k
| max_line_length
int64 1
970k
| alphanum_fraction
float64 0
1
| original_content
stringlengths 7
1.04M
| filtered:remove_non_ascii
int64 0
514k
| filtered:remove_delete_markers
int64 0
0
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e7e9653d546ade6c8ce9b53c49b25b1b21568a5c | 5,267 | py | Python | VisualGimp/Markup.py | duangsuse/VisualGimp | 79776fded12595ab3c56855b5ae56e2242780b2e | [
"MIT"
] | 2 | 2019-05-07T12:09:11.000Z | 2019-05-08T09:31:44.000Z | VisualGimp/Markup.py | duangsuse-valid-projects/VisualGimp | 79776fded12595ab3c56855b5ae56e2242780b2e | [
"MIT"
] | null | null | null | VisualGimp/Markup.py | duangsuse-valid-projects/VisualGimp | 79776fded12595ab3c56855b5ae56e2242780b2e | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
# -*- encoding: utf-8 -*-
# Gimp Markup Builder
# author: duangsuse
# date: Thu May 02 2019 CST
from os import linesep
from Util import stream_join
class MarkupBuilder:
''' Gimp Markup SGML builder '''
def __init__(self, indent = -1, nl = linesep, buffer = str):
self.marks = buffer()
self.tag_stack = list()
self.nl = nl
self.indent = indent
self.last_spaces = 0
self.revert_last_indent_size = 0
self.last_is_text = False
'''
Indent rules:
when starting new tag, write last spaces, last spaces += indent
if new tag is not text tag start (inner is just text), write newline
when leaving tag, last spaces -= indent
'''
def useindent(self): return self.indent != -1
indented = property(useindent)
def wnewline(self):
''' see use_indent'''
self.marks += self.nl
def windent(self):
''' see use_indent'''
wrote = 0
for _ in range(0, self.last_spaces):
self.marks += ' '
wrote += 1 # dummy?
return wrote
def cancel_indent(self):
''' cancel last indent '''
if self.indented: self.marks = self.marks[:-self.revert_last_indent_size]
def do_indent(self, entering = True):
''' Write indent, increase last_spaces, saving wrote spaces and newline to revert_last_indent_size '''
def do():
self.wnewline()
if (entering):
self.last_spaces += self.indent
else: self.last_spaces -= self.indent
self.revert_last_indent_size = self.windent() +1
if self.indented: do()
def do_last_indent(self, *args, **kwargs):
''' write indenting for last block '''
self.last_spaces -= self.indent
self.do_indent(*args, **kwargs)
self.last_spaces += self.indent
def begin(self, tag, attrs = {}):
'''
Make a tag with name and attributes
Attribute name, value and tag name is escaped
'''
self.last_is_text = False
attrst = str()
tagscape = self.escape(tag)
ary = list(stream_join(attrs.keys(), attrs.values())) if attrs.__class__ is dict else list(attrs)
if len(attrs) != 0:
for n in range(0, len(ary), 2):
attrst += self.escape(str(ary[n]))
attrst += '='
#print(ary)
#print(n)
attrst += "\"%s\"" % self.escape(str(ary[n+1]))
self.marks += '<' + tagscape
if len(attrs) != 0: self.marks += ' '
self.marks += attrst + '>'
# always write indents for next line
# makes its possible to drop last indent (text tag)
self.do_indent()
self.tag_stack.append(tagscape)
return self
def make(self): return self.marks
def tag(self, *args, **kwargs):
r'''
EDSL using __close__ with syntax
create nodes like:
with xml.tag('span', {color: '#66ccff'}):
xml % 'Q \w\ Q'
'''
self.last_is_text = False
class TagBuilder:
def __init__(self, xml):
self.xml = xml
def __enter__(self):
self.xml.begin(*args, attrs = kwargs)
def __exit__(self, *lveinfo):
self.xml.end()
return TagBuilder(self)
def text(self, content):
''' append text content '''
self.last_is_text = True
if self.indented: self.cancel_indent()
self.marks += self.escape(content)
return self
#@staticmethod
#def test():
# m = MarkupBuilder()
# m > 'html'
# m > 'head'
# m > 'title'
# m < 'Hello World'
# m <= 2
# m > 'body'
# m > 'text'
# with m.tag("b"):
# m < 'String'
# m >= ['a', {'id': 'str'}]
# m < '|sg.'
# m <= 4
# return m
def end(self):
''' delimites last tag '''
if not self.last_is_text: # cancel indentation
#print(self.indent, self.tag_stack)
self.cancel_indent()
self.do_indent(False)
self.marks += '</' + self.tag_stack.pop() + '>'
self.do_indent(False)
self.last_is_text = False
# Not cared by Markup indent emitter
def raw(self, raw):
''' write raw text (unescaped) '''
self.marks += raw
return self
def rawtag(self, rawtext):
''' append unescaped raw <> text '''
self.marks += '<'
self.marks += rawtext
self.marks += '>'
def _escape(self, xml):
'''
Escape XML string
' is replaced with '
" is replaced with "
& is replaced with &
< is replaced with <
> is replaced with >
'''
escapes = frozenset("'\"&<>")
replacement = { '\'': 'apos', '"': 'quot', '&': 'amp', '<': 'lt', '>': 'gt' }
if len(xml) < 1: return
output = str()
for i in range(0, len(xml)):
char = xml[i]
if (char in escapes):
output += '&'
output += replacement[char]
output += ';'
else: output += char
return output
escape = classmethod(_escape)
def __str__(self):
''' M(marks)..[tag stack] '''
return 'M(' + self.marks + ')..' + str(self.tag_stack)
__lt__ = text # chain
__gt__ = begin # chain
__add__ = raw # chain
def __contains__(self, tag):
''' is tag inside enclosing tags ? '''
return tag in self.tag_stack
def __ge__(self, tag_attr):
''' xml >= ['markup', {'name': 'abcs'}] '''
mark = tag_attr[0]
attr = tag_attr[1]
self.begin(mark, attr)
def __le__(self, n = 1):
''' Leave (close) N tags '''
while n > 0:
self.end()
n -= 1
| 24.962085 | 106 | 0.584204 | #!/usr/bin/env python2
# -*- encoding: utf-8 -*-
# Gimp Markup Builder
# author: duangsuse
# date: Thu May 02 2019 CST
from os import linesep
from Util import stream_join
class MarkupBuilder:
''' Gimp Markup SGML builder '''
def __init__(self, indent = -1, nl = linesep, buffer = str):
self.marks = buffer()
self.tag_stack = list()
self.nl = nl
self.indent = indent
self.last_spaces = 0
self.revert_last_indent_size = 0
self.last_is_text = False
'''
Indent rules:
when starting new tag, write last spaces, last spaces += indent
if new tag is not text tag start (inner is just text), write newline
when leaving tag, last spaces -= indent
'''
def useindent(self): return self.indent != -1
indented = property(useindent)
def wnewline(self):
''' see use_indent'''
self.marks += self.nl
def windent(self):
''' see use_indent'''
wrote = 0
for _ in range(0, self.last_spaces):
self.marks += ' '
wrote += 1 # dummy?
return wrote
def cancel_indent(self):
''' cancel last indent '''
if self.indented: self.marks = self.marks[:-self.revert_last_indent_size]
def do_indent(self, entering = True):
''' Write indent, increase last_spaces, saving wrote spaces and newline to revert_last_indent_size '''
def do():
self.wnewline()
if (entering):
self.last_spaces += self.indent
else: self.last_spaces -= self.indent
self.revert_last_indent_size = self.windent() +1
if self.indented: do()
def do_last_indent(self, *args, **kwargs):
''' write indenting for last block '''
self.last_spaces -= self.indent
self.do_indent(*args, **kwargs)
self.last_spaces += self.indent
def begin(self, tag, attrs = {}):
'''
Make a tag with name and attributes
Attribute name, value and tag name is escaped
'''
self.last_is_text = False
attrst = str()
tagscape = self.escape(tag)
ary = list(stream_join(attrs.keys(), attrs.values())) if attrs.__class__ is dict else list(attrs)
if len(attrs) != 0:
for n in range(0, len(ary), 2):
attrst += self.escape(str(ary[n]))
attrst += '='
#print(ary)
#print(n)
attrst += "\"%s\"" % self.escape(str(ary[n+1]))
self.marks += '<' + tagscape
if len(attrs) != 0: self.marks += ' '
self.marks += attrst + '>'
# always write indents for next line
# makes its possible to drop last indent (text tag)
self.do_indent()
self.tag_stack.append(tagscape)
return self
def make(self): return self.marks
def tag(self, *args, **kwargs):
r'''
EDSL using __close__ with syntax
create nodes like:
with xml.tag('span', {color: '#66ccff'}):
xml % 'Q \w\ Q'
'''
self.last_is_text = False
class TagBuilder:
def __init__(self, xml):
self.xml = xml
def __enter__(self):
self.xml.begin(*args, attrs = kwargs)
def __exit__(self, *lveinfo):
self.xml.end()
return TagBuilder(self)
def text(self, content):
''' append text content '''
self.last_is_text = True
if self.indented: self.cancel_indent()
self.marks += self.escape(content)
return self
#@staticmethod
#def test():
# m = MarkupBuilder()
# m > 'html'
# m > 'head'
# m > 'title'
# m < 'Hello World'
# m <= 2
# m > 'body'
# m > 'text'
# with m.tag("b"):
# m < 'String'
# m >= ['a', {'id': 'str'}]
# m < '|sg.'
# m <= 4
# return m
def end(self):
''' delimites last tag '''
if not self.last_is_text: # cancel indentation
#print(self.indent, self.tag_stack)
self.cancel_indent()
self.do_indent(False)
self.marks += '</' + self.tag_stack.pop() + '>'
self.do_indent(False)
self.last_is_text = False
# Not cared by Markup indent emitter
def raw(self, raw):
''' write raw text (unescaped) '''
self.marks += raw
return self
def rawtag(self, rawtext):
''' append unescaped raw <> text '''
self.marks += '<'
self.marks += rawtext
self.marks += '>'
def _escape(self, xml):
'''
Escape XML string
' is replaced with '
" is replaced with "
& is replaced with &
< is replaced with <
> is replaced with >
'''
escapes = frozenset("'\"&<>")
replacement = { '\'': 'apos', '"': 'quot', '&': 'amp', '<': 'lt', '>': 'gt' }
if len(xml) < 1: return
output = str()
for i in range(0, len(xml)):
char = xml[i]
if (char in escapes):
output += '&'
output += replacement[char]
output += ';'
else: output += char
return output
escape = classmethod(_escape)
def __str__(self):
''' M(marks)..[tag stack] '''
return 'M(' + self.marks + ')..' + str(self.tag_stack)
__lt__ = text # chain
__gt__ = begin # chain
__add__ = raw # chain
def __contains__(self, tag):
''' is tag inside enclosing tags ? '''
return tag in self.tag_stack
def __ge__(self, tag_attr):
''' xml >= ['markup', {'name': 'abcs'}] '''
mark = tag_attr[0]
attr = tag_attr[1]
self.begin(mark, attr)
def __le__(self, n = 1):
''' Leave (close) N tags '''
while n > 0:
self.end()
n -= 1
| 0 | 0 |
8796a12ade2e6974f6dfc98adc77e755604d7da8 | 895 | py | Python | sqlalchemy_redshift/__init__.py | Hivestack/sqlalchemy-redshift | 6226ffe4c6f3583606016492641e1bd5d351933a | [
"MIT"
] | null | null | null | sqlalchemy_redshift/__init__.py | Hivestack/sqlalchemy-redshift | 6226ffe4c6f3583606016492641e1bd5d351933a | [
"MIT"
] | null | null | null | sqlalchemy_redshift/__init__.py | Hivestack/sqlalchemy-redshift | 6226ffe4c6f3583606016492641e1bd5d351933a | [
"MIT"
] | null | null | null | from pkg_resources import DistributionNotFound, get_distribution, parse_version
try:
import psycopg2 # noqa: F401
except ImportError:
raise ImportError(
'No module named psycopg2. Please install either '
'psycopg2 or psycopg2-binary package for CPython '
'or psycopg2cffi for Pypy.'
)
for package in ['psycopg2', 'psycopg2-binary', 'psycopg2cffi']:
try:
if get_distribution(package).parsed_version < parse_version('2.5'):
raise ImportError('Minimum required version for psycopg2 is 2.5')
break
except DistributionNotFound:
pass
__version__ = get_distribution('hs-sqlalchemy-redshift').version
from sqlalchemy.dialects import registry
registry.register("redshift", "sqlalchemy_redshift.dialect", "RedshiftDialect")
registry.register(
"redshift.psycopg2", "sqlalchemy_redshift.dialect", "RedshiftDialect"
)
| 31.964286 | 79 | 0.727374 | from pkg_resources import DistributionNotFound, get_distribution, parse_version
try:
import psycopg2 # noqa: F401
except ImportError:
raise ImportError(
'No module named psycopg2. Please install either '
'psycopg2 or psycopg2-binary package for CPython '
'or psycopg2cffi for Pypy.'
)
for package in ['psycopg2', 'psycopg2-binary', 'psycopg2cffi']:
try:
if get_distribution(package).parsed_version < parse_version('2.5'):
raise ImportError('Minimum required version for psycopg2 is 2.5')
break
except DistributionNotFound:
pass
__version__ = get_distribution('hs-sqlalchemy-redshift').version
from sqlalchemy.dialects import registry
registry.register("redshift", "sqlalchemy_redshift.dialect", "RedshiftDialect")
registry.register(
"redshift.psycopg2", "sqlalchemy_redshift.dialect", "RedshiftDialect"
)
| 0 | 0 |
fdbf1c941811766f3c215aa9700b09effe98e5e6 | 134 | py | Python | ch2/chapter2_features_of_fastapi_02.py | PacktPublishing/Understanding-How-Web-APIs-Work | 63220e7bf6b31315c46650e45c670ca9a01011fc | [
"MIT"
] | 2 | 2021-10-03T09:34:34.000Z | 2021-10-04T04:52:48.000Z | ch2/chapter2_features_of_fastapi_02.py | PacktPublishing/Understanding-How-Web-APIs-Work | 63220e7bf6b31315c46650e45c670ca9a01011fc | [
"MIT"
] | 1 | 2021-04-25T05:57:34.000Z | 2021-04-25T14:49:24.000Z | ch2/chapter2_features_of_fastapi_02.py | PacktPublishing/Understanding-How-Web-APIs-Work | 63220e7bf6b31315c46650e45c670ca9a01011fc | [
"MIT"
] | 3 | 2021-05-13T09:39:27.000Z | 2021-06-29T05:51:46.000Z | # -*- coding: utf-8 -*-
def message(age: int = 0, name: str = 'stranger') -> str:
return f'Hello {name}, you are {age} years old'
| 33.5 | 57 | 0.58209 | # -*- coding: utf-8 -*-
def message(age: int = 0, name: str = 'stranger') -> str:
return f'Hello {name}, you are {age} years old'
| 0 | 0 |
515654029ae48e70e4487c739d107ea440403f1d | 8,124 | py | Python | Lib/site-packages/hackedit/app/templates.py | fochoao/cpython | 3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9 | [
"bzip2-1.0.6",
"0BSD"
] | null | null | null | Lib/site-packages/hackedit/app/templates.py | fochoao/cpython | 3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9 | [
"bzip2-1.0.6",
"0BSD"
] | 20 | 2021-05-03T18:02:23.000Z | 2022-03-12T12:01:04.000Z | Lib/site-packages/hackedit/app/templates.py | fochoao/cpython | 3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9 | [
"bzip2-1.0.6",
"0BSD"
] | null | null | null | """
This module contains the top level API for managing the project/file templates.
"""
import json
import logging
import os
import re
from binaryornot.check import is_binary
from hackedit.app import settings
def create(template, dest_dir, answers):
"""
Creates a file/project from the specified template, at the specified directory.
:param template: Template data.
:param dest_dir: Destination directory where to create the file/project
:param answers: Dict of answers for substitution variables
"""
def get_paths(root, path, src_dir, dest_dir):
src_path = os.path.join(root, path)
rel_path = os.path.relpath(src_path, src_dir)
dst_path = os.path.join(dest_dir, rel_path)
return src_path, dst_path
def get_file_encoding(path):
if is_binary(path):
return 'binary'
try:
encodings = template['encodings']
except KeyError:
encodings = ['utf-8', 'cp1252']
for encoding in encodings:
try:
with open(path, encoding=encoding) as f:
f.read()
except UnicodeDecodeError:
continue
else:
return encoding
def open_file(path, encoding, to_write=None):
if encoding == 'binary':
if to_write is None:
mode = 'rb'
else:
mode = 'wb'
encoding = None
else:
if to_write is None:
mode = 'r'
else:
mode = 'w'
content = None
with open(path, mode, encoding=encoding) as f:
if to_write is not None:
f.write(to_write)
else:
content = f.read()
return content
def subsitute_vars(string):
for var, value in answers.items():
string = re.sub('@%s@' % var, value, string)
return string
ret_val = []
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
src_dir = template['path']
for root, dirs, files in os.walk(src_dir):
for file in files:
if file == 'template.json' or file.endswith('.pyc'):
continue
src, dst = get_paths(root, file, src_dir, dest_dir)
dst = subsitute_vars(dst)
encoding = get_file_encoding(src)
try:
content = open_file(src, encoding)
except OSError:
_logger().exception('failed to open file: %r', src)
if encoding != 'binary':
content = subsitute_vars(content)
if file == 'btpad_btn_img_0.png':
print(len(content), encoding)
try:
open_file(dst, encoding, to_write=content)
except PermissionError:
_logger().exception('failed to write file: %r', dst)
else:
ret_val.append(dst)
assert open_file(dst, encoding) == content
for directory in dirs:
src, dst = get_paths(root, directory, src_dir, dest_dir)
dst = subsitute_vars(dst)
try:
os.mkdir(dst)
except PermissionError:
_logger().exception('failed to create directory: %r', dst)
return ret_val
def get_sources():
"""
Returns the template sources (directory associated with a label).
"""
s = settings.load()
tmpl_sources = s.value('_templates/sources', '[]')
tmpl_sources = json.loads(tmpl_sources)
return sorted(tmpl_sources, key=lambda x: x['label'])
def add_source(label, path):
"""
Adds a template source
:param label: Name of the template source.
:param path: Path of the template source.
"""
tmpl_sources = get_sources()
tmpl_sources.append({'label': label, 'path': path})
s = settings.load()
s.setValue('_templates/sources', json.dumps(tmpl_sources))
def rm_source(label):
"""
Removes the specified template source.
:param label: Name of the template source to remove.
"""
tmpl_sources = get_sources()
for src in tmpl_sources:
if src['label'] == label:
tmpl_sources.remove(src)
s = settings.load()
s.setValue('_templates/sources', json.dumps(tmpl_sources))
def clear_sources():
"""
Clear template sources.
"""
s = settings.load()
s.setValue('_templates/sources', json.dumps([]))
def get_templates(category='', source_filter=''):
"""
Gets the list of templates.
:param category: Template category to retrieve.
- use "Project" to get project templates
- use "File" to get file templates
- use an empty string to retrieve them all (default).
:param source: Label of the source of the templates to retrieve. Use an empty string to retrieve
templates from all sources.
"""
def filtered_sources():
"""
Filter list of sources based on the ``source`` parameter.
"""
tmpl_sources = get_sources()
filtered = []
if source_filter:
# only keep the requested template source
for src in tmpl_sources:
if src['label'] == source_filter:
filtered.append(src)
break
else:
filtered = tmpl_sources
return filtered
def get_template(tdir):
"""
Returns template data for the given template directory.
Returns None if the template is invalid.
:param tdir: Template directory to get data from.
"""
tmpl = None
template_json = os.path.join(tdir, 'template.json')
if not os.path.exists(template_json):
# no template.json -> invalid template
_logger().warn('"template.json" not found in template directory: %r', tdir)
else:
try:
with open(template_json) as f:
tmpl = json.loads(f.read())
except (OSError, json.JSONDecodeError):
# unreadable template.json -> invalid template
_logger().exception('failed to read %r', template_json)
tmpl = None
else:
try:
tmpl_cat = tmpl['category']
except KeyError:
# no metadata or no category in template.json -> invalid template
_logger().exception('failed to read category from template metadata, '
'incomplete template.json?')
tmpl = None
else:
# valid template (finally).
tmpl['source'] = src
if category and category != tmpl_cat:
_logger().debug('rejecting template directory: %r, invalid category', tdir)
tmpl = None
return tmpl
def listdir(directory):
"""
Securely list subdirectories of ``directory``.
Returns an empty list of an OSError occurred.
"""
try:
return os.listdir(directory)
except OSError:
return []
for src in filtered_sources():
for tdir in listdir(src['path']):
tdir = os.path.join(src['path'], tdir)
if os.path.isfile(tdir):
continue
tmpl = get_template(tdir)
if tmpl:
tmpl['path'] = tdir
yield tmpl
def get_template(source, template):
"""
Returns the specified template data.
"""
for t in get_templates(source_filter=source):
if t['name'] == template:
return t
return None
def _logger():
return logging.getLogger(__name__)
if __name__ == '__main__':
clear_sources()
add_source('COBOL', '/home/colin/Documents/hackedit-cobol/hackedit_cobol/templates')
add_source('Python', '/home/colin/Documents/hackedit-python/hackedit_python/templates')
for tmpl in get_templates():
print(json.dumps(tmpl, indent=4, sort_keys=True))
| 31.126437 | 100 | 0.563269 | """
This module contains the top level API for managing the project/file templates.
"""
import json
import logging
import os
import re
from binaryornot.check import is_binary
from hackedit.app import settings
def create(template, dest_dir, answers):
"""
Creates a file/project from the specified template, at the specified directory.
:param template: Template data.
:param dest_dir: Destination directory where to create the file/project
:param answers: Dict of answers for substitution variables
"""
def get_paths(root, path, src_dir, dest_dir):
src_path = os.path.join(root, path)
rel_path = os.path.relpath(src_path, src_dir)
dst_path = os.path.join(dest_dir, rel_path)
return src_path, dst_path
def get_file_encoding(path):
if is_binary(path):
return 'binary'
try:
encodings = template['encodings']
except KeyError:
encodings = ['utf-8', 'cp1252']
for encoding in encodings:
try:
with open(path, encoding=encoding) as f:
f.read()
except UnicodeDecodeError:
continue
else:
return encoding
def open_file(path, encoding, to_write=None):
if encoding == 'binary':
if to_write is None:
mode = 'rb'
else:
mode = 'wb'
encoding = None
else:
if to_write is None:
mode = 'r'
else:
mode = 'w'
content = None
with open(path, mode, encoding=encoding) as f:
if to_write is not None:
f.write(to_write)
else:
content = f.read()
return content
def subsitute_vars(string):
for var, value in answers.items():
string = re.sub('@%s@' % var, value, string)
return string
ret_val = []
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
src_dir = template['path']
for root, dirs, files in os.walk(src_dir):
for file in files:
if file == 'template.json' or file.endswith('.pyc'):
continue
src, dst = get_paths(root, file, src_dir, dest_dir)
dst = subsitute_vars(dst)
encoding = get_file_encoding(src)
try:
content = open_file(src, encoding)
except OSError:
_logger().exception('failed to open file: %r', src)
if encoding != 'binary':
content = subsitute_vars(content)
if file == 'btpad_btn_img_0.png':
print(len(content), encoding)
try:
open_file(dst, encoding, to_write=content)
except PermissionError:
_logger().exception('failed to write file: %r', dst)
else:
ret_val.append(dst)
assert open_file(dst, encoding) == content
for directory in dirs:
src, dst = get_paths(root, directory, src_dir, dest_dir)
dst = subsitute_vars(dst)
try:
os.mkdir(dst)
except PermissionError:
_logger().exception('failed to create directory: %r', dst)
return ret_val
def get_sources():
"""
Returns the template sources (directory associated with a label).
"""
s = settings.load()
tmpl_sources = s.value('_templates/sources', '[]')
tmpl_sources = json.loads(tmpl_sources)
return sorted(tmpl_sources, key=lambda x: x['label'])
def add_source(label, path):
"""
Adds a template source
:param label: Name of the template source.
:param path: Path of the template source.
"""
tmpl_sources = get_sources()
tmpl_sources.append({'label': label, 'path': path})
s = settings.load()
s.setValue('_templates/sources', json.dumps(tmpl_sources))
def rm_source(label):
"""
Removes the specified template source.
:param label: Name of the template source to remove.
"""
tmpl_sources = get_sources()
for src in tmpl_sources:
if src['label'] == label:
tmpl_sources.remove(src)
s = settings.load()
s.setValue('_templates/sources', json.dumps(tmpl_sources))
def clear_sources():
"""
Clear template sources.
"""
s = settings.load()
s.setValue('_templates/sources', json.dumps([]))
def get_templates(category='', source_filter=''):
"""
Gets the list of templates.
:param category: Template category to retrieve.
- use "Project" to get project templates
- use "File" to get file templates
- use an empty string to retrieve them all (default).
:param source: Label of the source of the templates to retrieve. Use an empty string to retrieve
templates from all sources.
"""
def filtered_sources():
"""
Filter list of sources based on the ``source`` parameter.
"""
tmpl_sources = get_sources()
filtered = []
if source_filter:
# only keep the requested template source
for src in tmpl_sources:
if src['label'] == source_filter:
filtered.append(src)
break
else:
filtered = tmpl_sources
return filtered
def get_template(tdir):
"""
Returns template data for the given template directory.
Returns None if the template is invalid.
:param tdir: Template directory to get data from.
"""
tmpl = None
template_json = os.path.join(tdir, 'template.json')
if not os.path.exists(template_json):
# no template.json -> invalid template
_logger().warn('"template.json" not found in template directory: %r', tdir)
else:
try:
with open(template_json) as f:
tmpl = json.loads(f.read())
except (OSError, json.JSONDecodeError):
# unreadable template.json -> invalid template
_logger().exception('failed to read %r', template_json)
tmpl = None
else:
try:
tmpl_cat = tmpl['category']
except KeyError:
# no metadata or no category in template.json -> invalid template
_logger().exception('failed to read category from template metadata, '
'incomplete template.json?')
tmpl = None
else:
# valid template (finally).
tmpl['source'] = src
if category and category != tmpl_cat:
_logger().debug('rejecting template directory: %r, invalid category', tdir)
tmpl = None
return tmpl
def listdir(directory):
"""
Securely list subdirectories of ``directory``.
Returns an empty list of an OSError occurred.
"""
try:
return os.listdir(directory)
except OSError:
return []
for src in filtered_sources():
for tdir in listdir(src['path']):
tdir = os.path.join(src['path'], tdir)
if os.path.isfile(tdir):
continue
tmpl = get_template(tdir)
if tmpl:
tmpl['path'] = tdir
yield tmpl
def get_template(source, template):
"""
Returns the specified template data.
"""
for t in get_templates(source_filter=source):
if t['name'] == template:
return t
return None
def _logger():
return logging.getLogger(__name__)
if __name__ == '__main__':
clear_sources()
add_source('COBOL', '/home/colin/Documents/hackedit-cobol/hackedit_cobol/templates')
add_source('Python', '/home/colin/Documents/hackedit-python/hackedit_python/templates')
for tmpl in get_templates():
print(json.dumps(tmpl, indent=4, sort_keys=True))
| 0 | 0 |
1a60970d1a4cf3ecc7aacdd16b38eca549a34840 | 1,845 | py | Python | src/tubize/videotomp4.py | olivervinn/tubizescripts | 8756f322d3e31f76f8b77cb8e084ded5941e29fa | [
"MIT"
] | null | null | null | src/tubize/videotomp4.py | olivervinn/tubizescripts | 8756f322d3e31f76f8b77cb8e084ded5941e29fa | [
"MIT"
] | null | null | null | src/tubize/videotomp4.py | olivervinn/tubizescripts | 8756f322d3e31f76f8b77cb8e084ded5941e29fa | [
"MIT"
] | null | null | null | """
Convert video format x to MP4/H.264.
"""
import os
import sys
import logging
from .videometainfo import VideoMetaInfo
from .utils import sizeof_fmt, time_fmt, find_files, check_dependencies, call, ffmpeg
logger = logging.getLogger(__name__)
class VideoToMP4:
"""To Mp4"""
SUPPORTED_EXTENSIONS = ".wmv, .avi, .mkv, .mov, .flv"
RULES = {
".wmv": "-c:v libx264 -crf 19 ",
".avi":
"-vf yadif=1 -c:v h264_nvenc -preset slow -tune film -crf 17",
".mkv": "-c copy",
".mov": "-vcodec h264 -acodec aac -strict -2 -crf 19 ",
".flv": " -r 20 ",
}
def process(self, video_file: str):
"""Convert video files to MP4 container format."""
name = os.path.splitext(video_file)[0]
ext = os.path.splitext(video_file)[1]
new_name = f"{name}.mp4"
if os.path.exists(new_name):
logger.info(f"Skipping file {new_name} already exists!")
elif ext not in VideoToMP4.RULES:
logger.error(f"Skipping unsupported type {ext}!")
else:
print(f'Convert {ext} to MP4 {new_name} ... ')
meta_info = VideoMetaInfo(video_file)
rule = VideoToMP4.RULES[ext]
flags = "-movflags +faststart -pix_fmt yuv420p"
ffmpeg(
f'-i "{video_file}" {flags} {rule} -metadata date="{meta_info.original_date}" "{new_name}"'
)
def file(self, filename: str) -> None:
logger.debug(f"converting file {filename}")
self.process(filename)
def directory(self, path: str, extension: str) -> int:
files = find_files(path, extension)
if len(files) < 1:
print("No matching files found in directory!", file=sys.stderr)
else:
for f in files:
self.file(f)
| 32.368421 | 107 | 0.571816 | """
Convert video format x to MP4/H.264.
"""
import os
import sys
import logging
from .videometainfo import VideoMetaInfo
from .utils import sizeof_fmt, time_fmt, find_files, check_dependencies, call, ffmpeg
logger = logging.getLogger(__name__)
class VideoToMP4:
"""To Mp4"""
SUPPORTED_EXTENSIONS = ".wmv, .avi, .mkv, .mov, .flv"
RULES = {
".wmv": "-c:v libx264 -crf 19 ",
".avi":
"-vf yadif=1 -c:v h264_nvenc -preset slow -tune film -crf 17",
".mkv": "-c copy",
".mov": "-vcodec h264 -acodec aac -strict -2 -crf 19 ",
".flv": " -r 20 ",
}
def process(self, video_file: str):
"""Convert video files to MP4 container format."""
name = os.path.splitext(video_file)[0]
ext = os.path.splitext(video_file)[1]
new_name = f"{name}.mp4"
if os.path.exists(new_name):
logger.info(f"Skipping file {new_name} already exists!")
elif ext not in VideoToMP4.RULES:
logger.error(f"Skipping unsupported type {ext}!")
else:
print(f'Convert {ext} to MP4 {new_name} ... ')
meta_info = VideoMetaInfo(video_file)
rule = VideoToMP4.RULES[ext]
flags = "-movflags +faststart -pix_fmt yuv420p"
ffmpeg(
f'-i "{video_file}" {flags} {rule} -metadata date="{meta_info.original_date}" "{new_name}"'
)
def file(self, filename: str) -> None:
logger.debug(f"converting file {filename}")
self.process(filename)
def directory(self, path: str, extension: str) -> int:
files = find_files(path, extension)
if len(files) < 1:
print("No matching files found in directory!", file=sys.stderr)
else:
for f in files:
self.file(f)
| 0 | 0 |
8052d0446907259540de210ff2c92410c7342f2e | 117 | py | Python | setup.py | snasiriany/parasol | 88b99704676fb1253b8bc6402665a3174a00072d | [
"MIT"
] | 66 | 2019-01-07T23:59:26.000Z | 2021-12-29T16:51:56.000Z | setup.py | snasiriany/parasol | 88b99704676fb1253b8bc6402665a3174a00072d | [
"MIT"
] | 8 | 2019-01-09T01:35:54.000Z | 2021-08-23T20:05:03.000Z | setup.py | snasiriany/parasol | 88b99704676fb1253b8bc6402665a3174a00072d | [
"MIT"
] | 21 | 2019-03-26T01:02:33.000Z | 2022-01-26T20:34:34.000Z | from setuptools import setup
setup(
name='parasol',
dependency_links=[
],
install_requires=[
]
)
| 13 | 28 | 0.623932 | from setuptools import setup
setup(
name='parasol',
dependency_links=[
],
install_requires=[
]
)
| 0 | 0 |
79299c770a188b579e6412af89f2263960e65f50 | 568 | py | Python | app/migrations/0007_auto_20211102_1946.py | Rqwannn/Rudemy | fe2d84540f3cc64c0ff6821e5f2fac22675fd381 | [
"MIT"
] | 3 | 2021-12-27T06:16:26.000Z | 2022-01-20T02:13:03.000Z | app/migrations/0007_auto_20211102_1946.py | Rqwannn/Rudemy | fe2d84540f3cc64c0ff6821e5f2fac22675fd381 | [
"MIT"
] | null | null | null | app/migrations/0007_auto_20211102_1946.py | Rqwannn/Rudemy | fe2d84540f3cc64c0ff6821e5f2fac22675fd381 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.8 on 2021-11-02 12:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0006_auto_20211102_1928'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='skill',
),
migrations.AddField(
model_name='profile',
name='tags',
field=models.ManyToManyField(blank=True, to='app.Tag'),
),
migrations.DeleteModel(
name='Skill',
),
]
| 21.846154 | 67 | 0.549296 | # Generated by Django 3.2.8 on 2021-11-02 12:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0006_auto_20211102_1928'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='skill',
),
migrations.AddField(
model_name='profile',
name='tags',
field=models.ManyToManyField(blank=True, to='app.Tag'),
),
migrations.DeleteModel(
name='Skill',
),
]
| 0 | 0 |
752ee840202809a32e9848a1a2c9a1828e74e71c | 5,132 | py | Python | oasislmf/model_execution/conf.py | ibailey-SCOR/OasisLMF | 966b4de4e1e64851970f4291c5bdfe7edc20cb7a | [
"BSD-3-Clause"
] | null | null | null | oasislmf/model_execution/conf.py | ibailey-SCOR/OasisLMF | 966b4de4e1e64851970f4291c5bdfe7edc20cb7a | [
"BSD-3-Clause"
] | null | null | null | oasislmf/model_execution/conf.py | ibailey-SCOR/OasisLMF | 966b4de4e1e64851970f4291c5bdfe7edc20cb7a | [
"BSD-3-Clause"
] | null | null | null | import csv
import io
import json
import logging
import os
import warnings
from collections import defaultdict
from ..utils.exceptions import OasisException
from ..utils.log import oasis_log
from .files import GENERAL_SETTINGS_FILE, GUL_SUMMARIES_FILE, IL_SUMMARIES_FILE, MODEL_SETTINGS_FILE
def _get_summaries(summary_file):
"""
Get a list representation of a summary file.
"""
summaries_dict = defaultdict(lambda: {'leccalc': {}})
with io.open(summary_file, 'r', encoding='utf-8') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
id = int(row[0])
if row[1].startswith('leccalc'):
summaries_dict[id]['leccalc'][row[1]] = row[2].lower() == 'true'
else:
summaries_dict[id][row[1]] = row[2].lower() == 'true'
summaries = list()
for id in sorted(summaries_dict):
summaries_dict[id]['id'] = id
summaries.append(summaries_dict[id])
return summaries
@oasis_log
def create_analysis_settings_json(directory):
"""
Generate an analysis settings JSON from a set of
CSV files in a specified directory.
Args:
``directory`` (string): the directory containing the CSV files.
Returns:
The analysis settings JSON.
"""
if not os.path.exists(directory):
error_message = "Directory does not exist: {}".format(directory)
logging.getLogger().error(error_message)
raise OasisException(error_message)
general_settings_file = os.path.join(directory, GENERAL_SETTINGS_FILE)
model_settings_file = os.path.join(directory, MODEL_SETTINGS_FILE)
gul_summaries_file = os.path.join(directory, GUL_SUMMARIES_FILE)
il_summaries_file = os.path.join(directory, IL_SUMMARIES_FILE)
for file in [general_settings_file, model_settings_file, gul_summaries_file, il_summaries_file]:
if not os.path.exists(file):
error_message = "File does not exist: {}".format(directory)
logging.getLogger().error(error_message)
raise OasisException(error_message)
general_settings = dict()
with io.open(general_settings_file, 'r', encoding='utf-8') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
general_settings[row[0]] = eval("{}('{}')".format(row[2], row[1]))
model_settings = dict()
with io.open(model_settings_file, 'r', encoding='utf-8') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
model_settings[row[0]] = eval("{}('{}')".format(row[2], row[1]))
gul_summaries = _get_summaries(gul_summaries_file)
il_summaries = _get_summaries(il_summaries_file)
analysis_settings = general_settings
analysis_settings['model_settings'] = model_settings
analysis_settings['gul_summaries'] = gul_summaries
analysis_settings['il_summaries'] = il_summaries
output_json = json.dumps(analysis_settings)
logging.getLogger().info("Analysis settings json: {}".format(output_json))
return output_json
def read_analysis_settings(analysis_settings_fp, il_files_exist=False,
ri_files_exist=False):
"""Read the analysis settings file"""
# Load analysis_settings file
try:
# Load as a json
with io.open(analysis_settings_fp, 'r', encoding='utf-8') as f:
analysis_settings = json.load(f)
# Extract the analysis_settings part within the json
if analysis_settings.get('analysis_settings'):
analysis_settings = analysis_settings['analysis_settings']
except (IOError, TypeError, ValueError):
raise OasisException('Invalid analysis settings file or file path: {}.'.format(
analysis_settings_fp))
# Reset il_output if the files are not there
if not il_files_exist or 'il_output' not in analysis_settings:
# No insured loss output
analysis_settings['il_output'] = False
analysis_settings['il_summaries'] = []
# Same for ri_output
if not ri_files_exist or 'ri_output' not in analysis_settings:
# No reinsured loss output
analysis_settings['ri_output'] = False
analysis_settings['ri_summaries'] = []
# If we want ri_output, we will need il_output, which needs il_files
if analysis_settings['ri_output'] and not analysis_settings['il_output']:
if not il_files_exist:
warnings.warn("ri_output selected, but il files not found")
analysis_settings['ri_output'] = False
analysis_settings['ri_summaries'] = []
else:
analysis_settings['il_output'] = True
# guard - Check if at least one output type is selected
if not any([
analysis_settings['gul_output'] if 'gul_output' in analysis_settings else False,
analysis_settings['il_output'] if 'il_output' in analysis_settings else False,
analysis_settings['ri_output'] if 'ri_output' in analysis_settings else False,
]):
raise OasisException(
'No valid output settings in: {}'.format(analysis_settings_fp))
return analysis_settings
| 36.657143 | 100 | 0.677319 | import csv
import io
import json
import logging
import os
import warnings
from collections import defaultdict
from ..utils.exceptions import OasisException
from ..utils.log import oasis_log
from .files import GENERAL_SETTINGS_FILE, GUL_SUMMARIES_FILE, IL_SUMMARIES_FILE, MODEL_SETTINGS_FILE
def _get_summaries(summary_file):
"""
Get a list representation of a summary file.
"""
summaries_dict = defaultdict(lambda: {'leccalc': {}})
with io.open(summary_file, 'r', encoding='utf-8') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
id = int(row[0])
if row[1].startswith('leccalc'):
summaries_dict[id]['leccalc'][row[1]] = row[2].lower() == 'true'
else:
summaries_dict[id][row[1]] = row[2].lower() == 'true'
summaries = list()
for id in sorted(summaries_dict):
summaries_dict[id]['id'] = id
summaries.append(summaries_dict[id])
return summaries
@oasis_log
def create_analysis_settings_json(directory):
"""
Generate an analysis settings JSON from a set of
CSV files in a specified directory.
Args:
``directory`` (string): the directory containing the CSV files.
Returns:
The analysis settings JSON.
"""
if not os.path.exists(directory):
error_message = "Directory does not exist: {}".format(directory)
logging.getLogger().error(error_message)
raise OasisException(error_message)
general_settings_file = os.path.join(directory, GENERAL_SETTINGS_FILE)
model_settings_file = os.path.join(directory, MODEL_SETTINGS_FILE)
gul_summaries_file = os.path.join(directory, GUL_SUMMARIES_FILE)
il_summaries_file = os.path.join(directory, IL_SUMMARIES_FILE)
for file in [general_settings_file, model_settings_file, gul_summaries_file, il_summaries_file]:
if not os.path.exists(file):
error_message = "File does not exist: {}".format(directory)
logging.getLogger().error(error_message)
raise OasisException(error_message)
general_settings = dict()
with io.open(general_settings_file, 'r', encoding='utf-8') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
general_settings[row[0]] = eval("{}('{}')".format(row[2], row[1]))
model_settings = dict()
with io.open(model_settings_file, 'r', encoding='utf-8') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
model_settings[row[0]] = eval("{}('{}')".format(row[2], row[1]))
gul_summaries = _get_summaries(gul_summaries_file)
il_summaries = _get_summaries(il_summaries_file)
analysis_settings = general_settings
analysis_settings['model_settings'] = model_settings
analysis_settings['gul_summaries'] = gul_summaries
analysis_settings['il_summaries'] = il_summaries
output_json = json.dumps(analysis_settings)
logging.getLogger().info("Analysis settings json: {}".format(output_json))
return output_json
def read_analysis_settings(analysis_settings_fp, il_files_exist=False,
ri_files_exist=False):
"""Read the analysis settings file"""
# Load analysis_settings file
try:
# Load as a json
with io.open(analysis_settings_fp, 'r', encoding='utf-8') as f:
analysis_settings = json.load(f)
# Extract the analysis_settings part within the json
if analysis_settings.get('analysis_settings'):
analysis_settings = analysis_settings['analysis_settings']
except (IOError, TypeError, ValueError):
raise OasisException('Invalid analysis settings file or file path: {}.'.format(
analysis_settings_fp))
# Reset il_output if the files are not there
if not il_files_exist or 'il_output' not in analysis_settings:
# No insured loss output
analysis_settings['il_output'] = False
analysis_settings['il_summaries'] = []
# Same for ri_output
if not ri_files_exist or 'ri_output' not in analysis_settings:
# No reinsured loss output
analysis_settings['ri_output'] = False
analysis_settings['ri_summaries'] = []
# If we want ri_output, we will need il_output, which needs il_files
if analysis_settings['ri_output'] and not analysis_settings['il_output']:
if not il_files_exist:
warnings.warn("ri_output selected, but il files not found")
analysis_settings['ri_output'] = False
analysis_settings['ri_summaries'] = []
else:
analysis_settings['il_output'] = True
# guard - Check if at least one output type is selected
if not any([
analysis_settings['gul_output'] if 'gul_output' in analysis_settings else False,
analysis_settings['il_output'] if 'il_output' in analysis_settings else False,
analysis_settings['ri_output'] if 'ri_output' in analysis_settings else False,
]):
raise OasisException(
'No valid output settings in: {}'.format(analysis_settings_fp))
return analysis_settings
| 0 | 0 |
cb8ea6149e57e707c1ee331f670e37c8feb61914 | 6,815 | py | Python | codes/functions.py | Wenupi/protoplanetary_disks | 51f8decbec5415e1da9893316f03d32ca5ab27de | [
"MIT"
] | null | null | null | codes/functions.py | Wenupi/protoplanetary_disks | 51f8decbec5415e1da9893316f03d32ca5ab27de | [
"MIT"
] | null | null | null | codes/functions.py | Wenupi/protoplanetary_disks | 51f8decbec5415e1da9893316f03d32ca5ab27de | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#--------------------------------------------------------------------------------
#Changes the sky coordinates (x,y,z) to the disk coordinates (x_d,y_d,z_d)
#The x axis is the rotation axis
def FUN_rotation(x,y,z):
x_d = x
y_d = y*np.cos(inc) - z*np.sin(inc)
z_d = y*np.sin(inc) + z*np.cos(inc)
return x_d,y_d,z_d
#--------------------------------------------------------------------------------
#Radiative transfer equation
def FUN_intensity(I,z,x,y,optde):
x_d,y_d,z_d = FUN_rotation(x,y,z)
density = EQ_density(x_d,y_d,z_d)
amax = EQ_amax(x_d,y_d,z_d)
opa = function_ext(amax)
S = funcion_S([z_d,y_d,x_d])
# print ('x,y,z', x,y,z)
# print (S, x_d, y_d, z_d)
# print (optde(z))
dIdz = -S*opa*density*np.exp(-optde(z)) #z es la variable de integracion (debe ser evaluada en cualquier punto)
return dIdz
#--------------------------------------------------------------------------------
#Optical depth
def FUN_tau(tt,z,x,y):
x_d,y_d,z_d = FUN_rotation(x,y,z)
density = EQ_density(x_d,y_d,z_d)
amax = EQ_amax(x_d,y_d,z_d)
opa = function_ext(amax)
dtau = -opa*density
return dtau
#--------------------------------------------------------------------------------
def FUN_tau_zaxis(tt,z,x,y):
x_d,y_d,z_d = x,y,z
density = EQ_density(x_d,y_d,z_d)
amax = EQ_amax(x_d,y_d,z_d)
opa = function_ext(amax)
dtau = -opa*density
return dtau
#--------------------------------------------------------------------------------
#Black body radiation
def FUN_BB(nu,T):
# B = 2.*hP*nu**3/clight**2/( np.exp(hP*nu/kB/T) - 1.)
B = 1./( np.exp(hP*nu/kB/T) - 1.)
return B
#--------------------------------------------------------------------------------
def FUN_limits_mult(xx,yy):
Hout = EQ_Height(Rout)
lim_z = Rout*np.sin(inc) + 2.*Hout*np.cos(inc) #Based on the geometry of the disk
lim_y = Rout*np.cos(inc) + 2.*Hout*np.sin(inc) #Based on the geometry of the disk
z_arr = np.linspace(1.1*lim_z, -1.1*lim_z, 200)
z_crit = []
if ((np.abs(xx) <=Rout) and (np.abs(yy) <= lim_y)):
xd,yd,zd = FUN_rotation(xx,yy,z_arr)
crit = np.zeros((len(z_arr)))
###############################################################################
#Funciona pero podria ser optimizado
###############################################################################
for ii in range(len(z_arr)): #Crea un vector de densidad en la linea de vision
if (EQ_density(xd,yd[ii],zd[ii]) == 0.):
crit[ii] = 0
else:
crit[ii] = 1
for ii in range(len(z_arr)): #Ve los indices donde cambia de 0 a algun valor, o de algun valor a 0 (fronteras)
if ( (ii != 0) and (crit[ii] - crit[ii-1] != 0 )):
z_crit.append(z_arr[ii])
elif(ii == 0 and crit[0] == 1):
z_crit.append(z_arr[0])
###############################################################################
return z_crit
#--------------------------------------------------------------------------------
def FUN_creates_source_function(x_array,y_array):
#Arrays and limits
Hout = EQ_Height(Rout)
z_array = np.linspace(-2.*Hout, 2.*Hout, 200)
Sfunction = np.zeros((len(z_array),len(y_array),len(x_array)))
Temfunction = np.zeros((len(z_array),len(y_array),len(x_array)))
op_depth_p = np.zeros((len(y_array),len(x_array)))
#Computes the optical depth (perpendicular to the disk midplane)
for j in range(len(y_array)):
for i in range(len(x_array)):
if(x_array[i] == 0. and y_array[j] == 0.):
Sfunction[:,j,i] = 0.
Temfunction[:,j,i] = 0.
else:
rad = np.sqrt(x_array[i]**2 + y_array[j]**2)
Hscale = EQ_Height(rad)
z_integ = np.linspace(2.*Hscale,-2.*Hscale,200)
sol = odeint(FUN_tau_zaxis,0.,z_integ,args=(x_array[i],y_array[j])).T[0]
op_depth_p[j][i] = sol[len(z_integ)-1]
inter_opt = interpolate.interp1d(z_integ,sol,kind='linear', bounds_error=False,fill_value=0.)
for k in range(len(z_array)):
amax = EQ_amax(x_array[i],y_array[j],z_array[k])
albedo = function_alb(amax)
##########Temperature##########
Omega2 = Ggrav*Mstar/(rad*AU)**3
Teff4 = 3.*Mdot*Omega2/8./np.pi/sigmaB
Tacc4 = 3./4.*(7.*inter_opt(abs(z_array[k])) + 2./3.)*Teff4
Tirr4 = Tstar**4./4.*(Rstar/rad/AU)**2*np.exp(-7.*inter_opt(abs(z_array[k]))/phi_angle)
Temfunction[k,j,i] = (Tacc4 + Tirr4)**(0.25)
#Temfunction[k,j,i] = EQ_temperature(x_array[i],y_array[j],z_array[k])
###############################
Sfunction[k,j,i] = FUN_BB(nu,Temfunction[k,j,i])*(1.+ albedo*FUN_f(inter_opt(z_array[k]),op_depth_p[j][i],albedo))
#Crea funcion fuente y temperatura en 3D
funcion_S = RegularGridInterpolator((z_array, y_array, x_array), Sfunction,bounds_error=False,fill_value=None)
funcion_T = RegularGridInterpolator((z_array, y_array, x_array), Temfunction,bounds_error=False,fill_value=None)
return funcion_S, funcion_T
#--------------------------------------------------------------------------------
def FUN_f(t,tau,alb):
eps = np.sqrt(1.-alb)
fff = np.exp(-np.sqrt(3.)*eps*t) + np.exp(np.sqrt(3.)*eps*(t-tau))
fff = fff/( np.exp(-np.sqrt(3.)*eps*tau)*(eps-1.) - (eps+1.) )
return fff
#--------------------------------------------------------------------------------
#Lee las tablas de opacidad DSHARP
#Load opacities
with np.load('default_opacities_smooth.npz') as d:
a_w = d['a']
gsca_w = d['g']
lam_w = d['lam']
k_abs_w = d['k_abs']
k_sca_w = d['k_sca']
lam_avgs = wl
# We split the opacities within the range of frequency to make the calculations faster
k_abs_w = k_abs_w[(0.9*lam_avgs<lam_w) & (1.1*lam_avgs>lam_w),:]
k_sca_w = k_sca_w[(0.9*lam_avgs<lam_w) & (1.1*lam_avgs>lam_w),:]
k_sca_w = k_sca_w*(1. - gsca_w[(0.9*lam_avgs<lam_w) & (1.1*lam_avgs>lam_w),:])
lam_w = lam_w[(0.9*lam_avgs<lam_w) & (1.1*lam_avgs>lam_w)]
opac_grid = opacity.size_average_opacity(lam_avgs, a_w, lam_w, k_abs_w.T, k_sca_w.T, q=3.5, plot=True)
function_ext = interpolate.interp1d(a_w, opac_grid['ka'][:]+opac_grid['ks'][:],kind='cubic')
function_alb = interpolate.interp1d(a_w, opac_grid['ks'][:]/(opac_grid['ka'][:]+opac_grid['ks'][:]),kind='cubic')
if not scattering:
function_alb = interpolate.interp1d(a_w, np.zeros((np.shape(opac_grid['ks'][:]))),kind='cubic')
| 43.685897 | 134 | 0.501981 | #!/usr/bin/env python
#--------------------------------------------------------------------------------
#Changes the sky coordinates (x,y,z) to the disk coordinates (x_d,y_d,z_d)
#The x axis is the rotation axis
def FUN_rotation(x,y,z):
x_d = x
y_d = y*np.cos(inc) - z*np.sin(inc)
z_d = y*np.sin(inc) + z*np.cos(inc)
return x_d,y_d,z_d
#--------------------------------------------------------------------------------
#Radiative transfer equation
def FUN_intensity(I,z,x,y,optde):
x_d,y_d,z_d = FUN_rotation(x,y,z)
density = EQ_density(x_d,y_d,z_d)
amax = EQ_amax(x_d,y_d,z_d)
opa = function_ext(amax)
S = funcion_S([z_d,y_d,x_d])
# print ('x,y,z', x,y,z)
# print (S, x_d, y_d, z_d)
# print (optde(z))
dIdz = -S*opa*density*np.exp(-optde(z)) #z es la variable de integracion (debe ser evaluada en cualquier punto)
return dIdz
#--------------------------------------------------------------------------------
#Optical depth
def FUN_tau(tt,z,x,y):
x_d,y_d,z_d = FUN_rotation(x,y,z)
density = EQ_density(x_d,y_d,z_d)
amax = EQ_amax(x_d,y_d,z_d)
opa = function_ext(amax)
dtau = -opa*density
return dtau
#--------------------------------------------------------------------------------
def FUN_tau_zaxis(tt,z,x,y):
x_d,y_d,z_d = x,y,z
density = EQ_density(x_d,y_d,z_d)
amax = EQ_amax(x_d,y_d,z_d)
opa = function_ext(amax)
dtau = -opa*density
return dtau
#--------------------------------------------------------------------------------
#Black body radiation
def FUN_BB(nu,T):
# B = 2.*hP*nu**3/clight**2/( np.exp(hP*nu/kB/T) - 1.)
B = 1./( np.exp(hP*nu/kB/T) - 1.)
return B
#--------------------------------------------------------------------------------
def FUN_limits_mult(xx,yy):
Hout = EQ_Height(Rout)
lim_z = Rout*np.sin(inc) + 2.*Hout*np.cos(inc) #Based on the geometry of the disk
lim_y = Rout*np.cos(inc) + 2.*Hout*np.sin(inc) #Based on the geometry of the disk
z_arr = np.linspace(1.1*lim_z, -1.1*lim_z, 200)
z_crit = []
if ((np.abs(xx) <=Rout) and (np.abs(yy) <= lim_y)):
xd,yd,zd = FUN_rotation(xx,yy,z_arr)
crit = np.zeros((len(z_arr)))
###############################################################################
#Funciona pero podria ser optimizado
###############################################################################
for ii in range(len(z_arr)): #Crea un vector de densidad en la linea de vision
if (EQ_density(xd,yd[ii],zd[ii]) == 0.):
crit[ii] = 0
else:
crit[ii] = 1
for ii in range(len(z_arr)): #Ve los indices donde cambia de 0 a algun valor, o de algun valor a 0 (fronteras)
if ( (ii != 0) and (crit[ii] - crit[ii-1] != 0 )):
z_crit.append(z_arr[ii])
elif(ii == 0 and crit[0] == 1):
z_crit.append(z_arr[0])
###############################################################################
return z_crit
#--------------------------------------------------------------------------------
def FUN_creates_source_function(x_array,y_array):
#Arrays and limits
Hout = EQ_Height(Rout)
z_array = np.linspace(-2.*Hout, 2.*Hout, 200)
Sfunction = np.zeros((len(z_array),len(y_array),len(x_array)))
Temfunction = np.zeros((len(z_array),len(y_array),len(x_array)))
op_depth_p = np.zeros((len(y_array),len(x_array)))
#Computes the optical depth (perpendicular to the disk midplane)
for j in range(len(y_array)):
for i in range(len(x_array)):
if(x_array[i] == 0. and y_array[j] == 0.):
Sfunction[:,j,i] = 0.
Temfunction[:,j,i] = 0.
else:
rad = np.sqrt(x_array[i]**2 + y_array[j]**2)
Hscale = EQ_Height(rad)
z_integ = np.linspace(2.*Hscale,-2.*Hscale,200)
sol = odeint(FUN_tau_zaxis,0.,z_integ,args=(x_array[i],y_array[j])).T[0]
op_depth_p[j][i] = sol[len(z_integ)-1]
inter_opt = interpolate.interp1d(z_integ,sol,kind='linear', bounds_error=False,fill_value=0.)
for k in range(len(z_array)):
amax = EQ_amax(x_array[i],y_array[j],z_array[k])
albedo = function_alb(amax)
##########Temperature##########
Omega2 = Ggrav*Mstar/(rad*AU)**3
Teff4 = 3.*Mdot*Omega2/8./np.pi/sigmaB
Tacc4 = 3./4.*(7.*inter_opt(abs(z_array[k])) + 2./3.)*Teff4
Tirr4 = Tstar**4./4.*(Rstar/rad/AU)**2*np.exp(-7.*inter_opt(abs(z_array[k]))/phi_angle)
Temfunction[k,j,i] = (Tacc4 + Tirr4)**(0.25)
#Temfunction[k,j,i] = EQ_temperature(x_array[i],y_array[j],z_array[k])
###############################
Sfunction[k,j,i] = FUN_BB(nu,Temfunction[k,j,i])*(1.+ albedo*FUN_f(inter_opt(z_array[k]),op_depth_p[j][i],albedo))
#Crea funcion fuente y temperatura en 3D
funcion_S = RegularGridInterpolator((z_array, y_array, x_array), Sfunction,bounds_error=False,fill_value=None)
funcion_T = RegularGridInterpolator((z_array, y_array, x_array), Temfunction,bounds_error=False,fill_value=None)
return funcion_S, funcion_T
#--------------------------------------------------------------------------------
def FUN_f(t,tau,alb):
eps = np.sqrt(1.-alb)
fff = np.exp(-np.sqrt(3.)*eps*t) + np.exp(np.sqrt(3.)*eps*(t-tau))
fff = fff/( np.exp(-np.sqrt(3.)*eps*tau)*(eps-1.) - (eps+1.) )
return fff
#--------------------------------------------------------------------------------
#Lee las tablas de opacidad DSHARP
#Load opacities
with np.load('default_opacities_smooth.npz') as d:
a_w = d['a']
gsca_w = d['g']
lam_w = d['lam']
k_abs_w = d['k_abs']
k_sca_w = d['k_sca']
lam_avgs = wl
# We split the opacities within the range of frequency to make the calculations faster
k_abs_w = k_abs_w[(0.9*lam_avgs<lam_w) & (1.1*lam_avgs>lam_w),:]
k_sca_w = k_sca_w[(0.9*lam_avgs<lam_w) & (1.1*lam_avgs>lam_w),:]
k_sca_w = k_sca_w*(1. - gsca_w[(0.9*lam_avgs<lam_w) & (1.1*lam_avgs>lam_w),:])
lam_w = lam_w[(0.9*lam_avgs<lam_w) & (1.1*lam_avgs>lam_w)]
opac_grid = opacity.size_average_opacity(lam_avgs, a_w, lam_w, k_abs_w.T, k_sca_w.T, q=3.5, plot=True)
function_ext = interpolate.interp1d(a_w, opac_grid['ka'][:]+opac_grid['ks'][:],kind='cubic')
function_alb = interpolate.interp1d(a_w, opac_grid['ks'][:]/(opac_grid['ka'][:]+opac_grid['ks'][:]),kind='cubic')
if not scattering:
function_alb = interpolate.interp1d(a_w, np.zeros((np.shape(opac_grid['ks'][:]))),kind='cubic')
| 0 | 0 |
d281bf9d519356903906b4ce02f43f84e40f8147 | 2,893 | py | Python | F0AM_Tools/TUV_to_mat.py | jdhask/pyMCM | 32b65e1dff2e9626df5d52623fd1ac4af29f8c57 | [
"MIT"
] | 1 | 2021-11-15T19:24:40.000Z | 2021-11-15T19:24:40.000Z | F0AM_Tools/TUV_to_mat.py | jdhask/pyMCM | 32b65e1dff2e9626df5d52623fd1ac4af29f8c57 | [
"MIT"
] | null | null | null | F0AM_Tools/TUV_to_mat.py | jdhask/pyMCM | 32b65e1dff2e9626df5d52623fd1ac4af29f8c57 | [
"MIT"
] | 2 | 2021-11-15T19:23:46.000Z | 2021-11-29T12:42:26.000Z | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 16 18:06:05 2021
@author: jhask
"""
import csv
import pandas as pd
import numpy as np
import re
import scipy.io as sio
import os
# Map MCM names to TUV labels
j_vals_dict= dict({
'O3 -> O2 + O(1D)':'J1',
'O3 -> O2 + O(3P)':'J2',
'H2O2 -> 2 OH':'J3',
'NO2 -> NO + O(3P)':'J4',
'NO3 -> NO + O2':'J5',
'NO3 -> NO2 + O(3P)':'J6',
'HNO2 -> OH + NO':'J7',
'HNO3 -> OH + NO2':'J8',
'CH2O -> H + HCO':'J11',
'CH2O -> H2 + CO':'J12',
'CH3CHO -> CH3 + HCO':'J13',
'C2H5CHO -> C2H5 + HCO':'J14',
'CH2=C(CH3)CHO -> Products':'J18',
'CH3COCH3 -> CH3CO + CH3':'J21',
'CH3COCH2CH3 -> CH3CO + CH2CH3':'J22',
'CH3COCH=CH2 -> Products':'J23',
'CHOCHO -> H2 + 2CO':'J31',
'CHOCHO -> CH2O + CO':'J32',
'CHOCHO -> HCO + HCO':'J33',
'CH3COCHO -> CH3CO + HCO':'J34',
'CH3COCOCH3 -> Products':'J35',
'CH3OOH -> CH3O + OH':'J41',
'CH3ONO2 -> CH3O + NO2':'J51',
'C2H5ONO2 -> C2H5O + NO2':'J52',
'n-C3H7ONO2 -> C3H7O + NO2':'J53',
'CH3CHONO2CH3 -> CH3CHOCH3 + NO2':'J54',
'C(CH3)3(ONO2) -> C(CH3)3(O.) + NO2':'J55',
'CH3COCH2(ONO2) -> CH3COCH2(O.) + NO2':'J56',
'CH2(OH)COCH3 -> CH3CO + CH2(OH)':'Jn10',
'CH2=CHCHO -> Products':'Jn11',
'CH3CO(OONO2) -> CH3CO(OO) + NO2':'Jn14',
'CH3CO(OONO2) -> CH3CO(O) + NO3':'Jn15',
'CH3(OONO2) -> CH3(OO) + NO2':'Jn16',
'CH3(OONO2) -> CH3(OO) + NO2':'Jn17',
'N2O5 -> NO3 + NO2':'Jn19',
'N2O5 -> NO3 + NO + O(3P)':'Jn20',
'HNO4 -> HO2 + NO2':'Jn21'})
#TUV output file.
file= 'C:/Users/jhask/OneDrive/Documents/MATLAB/F0AM/Setups/SOAS_RCIM/foam_6_29_out.txt'
with open(file, "r",errors="ignore") as f: # read line by line.
reader = csv.reader(f, delimiter="\t")
# Initialize vars we fill in reading the file.
ln_num = 0; map_cols=dict({})
in_species_list=False;
pass_go=False
for row in reader:
line = " ".join(row) # read line by line.
hdrs= [key for key in list(j_vals_dict.keys()) if key in line]
if len(hdrs) > 0 :
headers= re.search(r"[\d]*[\=\w]", line)
print(line, hdrs, j_vals_dict[ hdrs[:][0]])
if headers: map_cols[headers.group()]=j_vals_dict[ hdrs[:][0]]
if (pass_go is True) and ('------' not in line ):
# Append the j-values to the dataframe at this point in time.
splt= [float(item) for item in line.split(" ") if item !='']
df.loc[len(df)]=np.array(splt)
if 'time, hrs. sza, deg.' in line:
pass_go=True
df=pd.DataFrame(columns= ['time', 'sza']+ list(map_cols.values()))
to_mat={name: col.values for name, col in df.items()}
filename= os.path.join('C:/Users/jhask/OneDrive/Documents/MATLAB/F0AM/Setups/SOAS_RCIM/'+'F0AM_tuv.mat')
sio.savemat(filename, to_mat)
print(filename)
| 30.452632 | 105 | 0.5458 | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 16 18:06:05 2021
@author: jhask
"""
import csv
import pandas as pd
import numpy as np
import re
import scipy.io as sio
import os
# Map MCM names to TUV labels
j_vals_dict= dict({
'O3 -> O2 + O(1D)':'J1',
'O3 -> O2 + O(3P)':'J2',
'H2O2 -> 2 OH':'J3',
'NO2 -> NO + O(3P)':'J4',
'NO3 -> NO + O2':'J5',
'NO3 -> NO2 + O(3P)':'J6',
'HNO2 -> OH + NO':'J7',
'HNO3 -> OH + NO2':'J8',
'CH2O -> H + HCO':'J11',
'CH2O -> H2 + CO':'J12',
'CH3CHO -> CH3 + HCO':'J13',
'C2H5CHO -> C2H5 + HCO':'J14',
'CH2=C(CH3)CHO -> Products':'J18',
'CH3COCH3 -> CH3CO + CH3':'J21',
'CH3COCH2CH3 -> CH3CO + CH2CH3':'J22',
'CH3COCH=CH2 -> Products':'J23',
'CHOCHO -> H2 + 2CO':'J31',
'CHOCHO -> CH2O + CO':'J32',
'CHOCHO -> HCO + HCO':'J33',
'CH3COCHO -> CH3CO + HCO':'J34',
'CH3COCOCH3 -> Products':'J35',
'CH3OOH -> CH3O + OH':'J41',
'CH3ONO2 -> CH3O + NO2':'J51',
'C2H5ONO2 -> C2H5O + NO2':'J52',
'n-C3H7ONO2 -> C3H7O + NO2':'J53',
'CH3CHONO2CH3 -> CH3CHOCH3 + NO2':'J54',
'C(CH3)3(ONO2) -> C(CH3)3(O.) + NO2':'J55',
'CH3COCH2(ONO2) -> CH3COCH2(O.) + NO2':'J56',
'CH2(OH)COCH3 -> CH3CO + CH2(OH)':'Jn10',
'CH2=CHCHO -> Products':'Jn11',
'CH3CO(OONO2) -> CH3CO(OO) + NO2':'Jn14',
'CH3CO(OONO2) -> CH3CO(O) + NO3':'Jn15',
'CH3(OONO2) -> CH3(OO) + NO2':'Jn16',
'CH3(OONO2) -> CH3(OO) + NO2':'Jn17',
'N2O5 -> NO3 + NO2':'Jn19',
'N2O5 -> NO3 + NO + O(3P)':'Jn20',
'HNO4 -> HO2 + NO2':'Jn21'})
#TUV output file.
file= 'C:/Users/jhask/OneDrive/Documents/MATLAB/F0AM/Setups/SOAS_RCIM/foam_6_29_out.txt'
with open(file, "r",errors="ignore") as f: # read line by line.
reader = csv.reader(f, delimiter="\t")
# Initialize vars we fill in reading the file.
ln_num = 0; map_cols=dict({})
in_species_list=False;
pass_go=False
for row in reader:
line = " ".join(row) # read line by line.
hdrs= [key for key in list(j_vals_dict.keys()) if key in line]
if len(hdrs) > 0 :
headers= re.search(r"[\d]*[\=\w]", line)
print(line, hdrs, j_vals_dict[ hdrs[:][0]])
if headers: map_cols[headers.group()]=j_vals_dict[ hdrs[:][0]]
if (pass_go is True) and ('------' not in line ):
# Append the j-values to the dataframe at this point in time.
splt= [float(item) for item in line.split(" ") if item !='']
df.loc[len(df)]=np.array(splt)
if 'time, hrs. sza, deg.' in line:
pass_go=True
df=pd.DataFrame(columns= ['time', 'sza']+ list(map_cols.values()))
to_mat={name: col.values for name, col in df.items()}
filename= os.path.join('C:/Users/jhask/OneDrive/Documents/MATLAB/F0AM/Setups/SOAS_RCIM/'+'F0AM_tuv.mat')
sio.savemat(filename, to_mat)
print(filename)
| 0 | 0 |
1d7b25e9a1db4f378a05b7199423917d7b5b9f81 | 1,343 | py | Python | extract_url.py | nickinack/extract_url | d084ca0a791d5c50ab2accaee7cb4d0b981bd132 | [
"MIT"
] | 2 | 2022-02-07T05:51:36.000Z | 2022-02-07T05:52:11.000Z | extract_url.py | nickinack/extract_url | d084ca0a791d5c50ab2accaee7cb4d0b981bd132 | [
"MIT"
] | null | null | null | extract_url.py | nickinack/extract_url | d084ca0a791d5c50ab2accaee7cb4d0b981bd132 | [
"MIT"
] | 1 | 2020-05-18T08:29:22.000Z | 2020-05-18T08:29:22.000Z | '''
Imports
'''
from config import *
from newspaper import Article
import sys as sys
import pandas as pd
import csv
from collections import defaultdict
import re
'''
URL Extract
'''
columns = defaultdict(list)
with open('SecurityIDRBT.csv') as f:
reader = csv.DictReader(f) # read rows into a dictionary format
for row in reader: # read a row as {column1: value1, column2: value2,...}
for (k,v) in row.items(): # go over each column name and value
columns[k].append(v) # append the value into the appropriate list
url_list = [] # based on column name k
for element in range(len(columns['Body'])):
urls = re.findall('https?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+', columns['Body'][element])
for url in urls:
url_list.append(url)
'''
Find Unique URLs and filter with semantic search results
'''
url_unique = []
for element in url_list:
if element not in url_unique:
if element not in common_urls_http:
if element not in common_urls_https:
url_unique.append(element)
'''
Write it in a new CSV
'''
with open('url.csv', 'w',newline='') as myfile:
wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
for word in url_unique:
wr.writerow([word])
| 29.844444 | 95 | 0.603127 | '''
Imports
'''
from config import *
from newspaper import Article
import sys as sys
import pandas as pd
import csv
from collections import defaultdict
import re
'''
URL Extract
'''
columns = defaultdict(list)
with open('SecurityIDRBT.csv') as f:
reader = csv.DictReader(f) # read rows into a dictionary format
for row in reader: # read a row as {column1: value1, column2: value2,...}
for (k,v) in row.items(): # go over each column name and value
columns[k].append(v) # append the value into the appropriate list
url_list = [] # based on column name k
for element in range(len(columns['Body'])):
urls = re.findall('https?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+', columns['Body'][element])
for url in urls:
url_list.append(url)
'''
Find Unique URLs and filter with semantic search results
'''
url_unique = []
for element in url_list:
if element not in url_unique:
if element not in common_urls_http:
if element not in common_urls_https:
url_unique.append(element)
'''
Write it in a new CSV
'''
with open('url.csv', 'w',newline='') as myfile:
wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
for word in url_unique:
wr.writerow([word])
| 0 | 0 |
56b682792eb61ccb189ac68b9d7a874cbd6c0a60 | 3,279 | py | Python | test/python/test_mapper_coupling.py | kifumi/qiskit-terra | 203fca6d694a18824a6b12cbabd3dd2c64dd12ae | [
"Apache-2.0"
] | 1 | 2018-11-01T01:35:43.000Z | 2018-11-01T01:35:43.000Z | test/python/test_mapper_coupling.py | a-amaral/qiskit-terra | e73beba1e68de2617046a7e1e9eeac375b61de81 | [
"Apache-2.0"
] | null | null | null | test/python/test_mapper_coupling.py | a-amaral/qiskit-terra | e73beba1e68de2617046a7e1e9eeac375b61de81 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2018, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
# pylint: disable=missing-docstring
from qiskit.mapper import _coupling
from .common import QiskitTestCase
class CouplingTest(QiskitTestCase):
def test_coupling_dict2list(self):
input_dict = {0: [1, 2], 1: [2]}
result = _coupling.coupling_dict2list(input_dict)
expected = [[0, 1], [0, 2], [1, 2]]
self.assertEqual(expected, result)
def test_coupling_dict2list_empty_dict(self):
self.assertIsNone(_coupling.coupling_dict2list({}))
def test_coupling_list2dict(self):
input_list = [[0, 1], [0, 2], [1, 2]]
result = _coupling.coupling_list2dict(input_list)
expected = {0: [1, 2], 1: [2]}
self.assertEqual(expected, result)
def test_coupling_list2dict_empty_list(self):
self.assertIsNone(_coupling.coupling_list2dict([]))
def test_empty_coupling_class(self):
coupling = _coupling.Coupling()
self.assertEqual(0, coupling.size())
self.assertEqual([], coupling.get_qubits())
self.assertEqual([], coupling.get_edges())
self.assertFalse(coupling.connected())
self.assertEqual("", str(coupling))
def test_coupling_str(self):
coupling_dict = {0: [1, 2], 1: [2]}
coupling = _coupling.Coupling(coupling_dict)
expected = ("qubits: q[0] @ 1, q[1] @ 2, q[2] @ 3\n"
"edges: q[0]-q[1], q[0]-q[2], q[1]-q[2]")
self.assertEqual(expected, str(coupling))
def test_coupling_compute_distance(self):
coupling_dict = {0: [1, 2], 1: [2]}
coupling = _coupling.Coupling(coupling_dict)
self.assertTrue(coupling.connected())
coupling.compute_distance()
qubits = coupling.get_qubits()
result = coupling.distance(qubits[0], qubits[1])
self.assertEqual(1, result)
def test_coupling_compute_distance_coupling_error(self):
coupling = _coupling.Coupling()
self.assertRaises(_coupling.CouplingError, coupling.compute_distance)
def test_add_qubit(self):
coupling = _coupling.Coupling()
self.assertEqual("", str(coupling))
coupling.add_qubit(('q', 0))
self.assertEqual("qubits: q[0] @ 1", str(coupling))
def test_add_qubit_not_tuple(self):
coupling = _coupling.Coupling()
self.assertRaises(_coupling.CouplingError, coupling.add_qubit, 'q0')
def test_add_qubit_tuple_incorrect_form(self):
coupling = _coupling.Coupling()
self.assertRaises(_coupling.CouplingError, coupling.add_qubit,
('q', '0'))
def test_add_edge(self):
coupling = _coupling.Coupling()
self.assertEqual("", str(coupling))
coupling.add_edge(("q", 0), ('q', 1))
expected = ("qubits: q[0] @ 1, q[1] @ 2\n"
"edges: q[0]-q[1]")
self.assertEqual(expected, str(coupling))
def test_distance_error(self):
"""Test distance method validation."""
graph = _coupling.Coupling({0: [1, 2], 1: [2]})
self.assertRaises(_coupling.CouplingError, graph.distance, ('q0', 0), ('q1', 1))
| 36.433333 | 88 | 0.633425 | # -*- coding: utf-8 -*-
# Copyright 2018, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
# pylint: disable=missing-docstring
from qiskit.mapper import _coupling
from .common import QiskitTestCase
class CouplingTest(QiskitTestCase):
def test_coupling_dict2list(self):
input_dict = {0: [1, 2], 1: [2]}
result = _coupling.coupling_dict2list(input_dict)
expected = [[0, 1], [0, 2], [1, 2]]
self.assertEqual(expected, result)
def test_coupling_dict2list_empty_dict(self):
self.assertIsNone(_coupling.coupling_dict2list({}))
def test_coupling_list2dict(self):
input_list = [[0, 1], [0, 2], [1, 2]]
result = _coupling.coupling_list2dict(input_list)
expected = {0: [1, 2], 1: [2]}
self.assertEqual(expected, result)
def test_coupling_list2dict_empty_list(self):
self.assertIsNone(_coupling.coupling_list2dict([]))
def test_empty_coupling_class(self):
coupling = _coupling.Coupling()
self.assertEqual(0, coupling.size())
self.assertEqual([], coupling.get_qubits())
self.assertEqual([], coupling.get_edges())
self.assertFalse(coupling.connected())
self.assertEqual("", str(coupling))
def test_coupling_str(self):
coupling_dict = {0: [1, 2], 1: [2]}
coupling = _coupling.Coupling(coupling_dict)
expected = ("qubits: q[0] @ 1, q[1] @ 2, q[2] @ 3\n"
"edges: q[0]-q[1], q[0]-q[2], q[1]-q[2]")
self.assertEqual(expected, str(coupling))
def test_coupling_compute_distance(self):
coupling_dict = {0: [1, 2], 1: [2]}
coupling = _coupling.Coupling(coupling_dict)
self.assertTrue(coupling.connected())
coupling.compute_distance()
qubits = coupling.get_qubits()
result = coupling.distance(qubits[0], qubits[1])
self.assertEqual(1, result)
def test_coupling_compute_distance_coupling_error(self):
coupling = _coupling.Coupling()
self.assertRaises(_coupling.CouplingError, coupling.compute_distance)
def test_add_qubit(self):
coupling = _coupling.Coupling()
self.assertEqual("", str(coupling))
coupling.add_qubit(('q', 0))
self.assertEqual("qubits: q[0] @ 1", str(coupling))
def test_add_qubit_not_tuple(self):
coupling = _coupling.Coupling()
self.assertRaises(_coupling.CouplingError, coupling.add_qubit, 'q0')
def test_add_qubit_tuple_incorrect_form(self):
coupling = _coupling.Coupling()
self.assertRaises(_coupling.CouplingError, coupling.add_qubit,
('q', '0'))
def test_add_edge(self):
coupling = _coupling.Coupling()
self.assertEqual("", str(coupling))
coupling.add_edge(("q", 0), ('q', 1))
expected = ("qubits: q[0] @ 1, q[1] @ 2\n"
"edges: q[0]-q[1]")
self.assertEqual(expected, str(coupling))
def test_distance_error(self):
"""Test distance method validation."""
graph = _coupling.Coupling({0: [1, 2], 1: [2]})
self.assertRaises(_coupling.CouplingError, graph.distance, ('q0', 0), ('q1', 1))
| 0 | 0 |
End of preview. Expand
in Dataset Viewer.
This is a dataset originated from bigcode/the-stack-dedup with some filters applied. The filters filtered in this dataset are:
- remove_non_ascii
- Downloads last month
- 29