filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_1033 | import numpy as np
""" This file creates a grid of stars for the HRD plot """
metallicities = [0.0001, 0.001, 0.01, 0.01416]
# make a small grid for all metallicities
with open("grid.txt", "w") as f:
masses = np.round(np.logspace(np.log10(0.1), np.log10(150.0), 500), 3)
grid_lines = ["--initial-mass {} --metallicity {} \n".format(masses[i], metallicities[j]) for j in range(len(metallicities)) for i in range(len(masses))]
f.writelines(grid_lines)
# make a small grid just for solar
with open("rapid_grid.txt", "w") as f:
masses = np.round(np.logspace(np.log10(0.1), np.log10(150.0), 500), 3)
grid_lines = ["--initial-mass {} --metallicity {} \n".format(masses[i], 0.0001) for i in range(len(masses))]
f.writelines(grid_lines)
# make a dense grid of solar (mostly dense for NSs and low mass BHs)
with open("MM20_grid.txt", "w") as f:
low_masses = np.round(np.logspace(np.log10(0.1), np.log10(8.0), 200), 4)
med_masses = np.round(np.logspace(np.log10(8.0), np.log10(50.0), 4600), 4)
high_masses = np.round(np.logspace(np.log10(50.0), np.log10(150.0), 200), 4)
masses = np.concatenate((low_masses, med_masses, high_masses))
grid_lines = ["--initial-mass {} --metallicity {} \n".format(masses[i], 0.01416) for i in range(len(masses))]
f.writelines(grid_lines) |
the-stack_0_1034 | import statsapi
import pandas as pd
# logging
import logging
logger = logging.getLogger('statsapi')
logger.setLevel(logging.DEBUG)
rootLogger = logging.getLogger()
rootLogger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
formatter = logging.Formatter("%(asctime)s - %(levelname)8s - %(name)s(%(thread)s) - %(message)s")
ch.setFormatter(formatter)
rootLogger.addHandler(ch)
# from ptimeit import timethis, Timer
# @timethis()
# def get_rookie_hr_leader():
# rookie_hr_leaders = statsapi.league_leaders('homeRuns', season=2021, playerPool = 'rookies', limit=15)
# print(rookie_hr_leaders)
# Timer.run(1)
from functools import wraps
from time import time
def timing(f):
@wraps(f)
def wrap(*args, **kw):
ts = time()
result = f(*args, **kw)
te = time()
print(f"func: {f.__name__} args:{args},{kw} took{te-ts:2.4f} secs")
# print 'func:%r args:[%r, %r] took: %2.4f sec' % \
# (f.__name__, args, kw, te-ts)
return result
return wrap
@timing
def get_rookie_hr_leader():
rookie_hr_leaders = statsapi.league_leaders('homeRuns', season=2021, playerPool = 'rookies', limit=15)
print(rookie_hr_leaders)
get_rookie_hr_leader() |
the-stack_0_1036 | #! /usr/bin/python
# coding: utf-8
# Copyright 2018 IBM All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import matplotlib
matplotlib.use('Agg') # Generate images without having a window appear
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
from argparse import ArgumentParser
from itertools import cycle
import pandas as pd
import csv
from __init__ import INTENT_JUDGE_COLUMN, UTF_8, CONFIDENCE_COLUMN, \
PREDICTED_INTENT_COLUMN, GOLDEN_INTENT_COLUMN, \
INTENT_COLUMN, POPULATION_WEIGHT_MODE, \
EQUAL_WEIGHT_MODE, DEFAULT_CONF_THRES, SCORE_COLUMN
# total different number of line style len(line_styles) * len(line_color) = 12
line_styles = ['-', '--', '-.', ':']
line_color = ['b', 'g', 'r']
LEGEND_AXIS_FONT_SIZE = 14
TITLE_FONT_SIZE = 16
WEIGHT_COLUMN = 'weight'
def func(args):
""" Read classifiers results and draw the curves on one canvas for comparision
Input Schema:
| predicted intent | confidence | does intent match |
| intent 0 | confidence score | yes/no value |
"""
classifier_stat_list = []
cf_frames = []
confidences_list = []
intents_in_results = pd.Series()
classifier_num = len(args.classifiers_results)
# Prepare labels for each curve
labels = [os.path.splitext(os.path.basename(file_path))[0]
for file_path in args.classifiers_results]
# Only do cutomization on labels if numbers match
if args.classifier_names and \
(len(args.classifier_names) == classifier_num):
labels = args.classifier_names
# Initialization
for i in range(classifier_num):
file_path = args.classifiers_results[i]
frame = pd.read_csv(file_path, encoding=UTF_8, quoting=csv.QUOTE_ALL)
if INTENT_JUDGE_COLUMN not in frame.columns: # Column validation
raise ValueError("'{}' column not in {}".format(
INTENT_JUDGE_COLUMN, file_path))
# Read the cf files into list
cf_frames.append(frame)
# Collect all intents from the classification results
intents_in_results = pd.concat([intents_in_results,
frame[PREDICTED_INTENT_COLUMN]])
# Convert nan to zero values to avoid use zero as divider
confidences_list.append(frame[CONFIDENCE_COLUMN].fillna(0)
.drop_duplicates().sort_values().tolist())
intents_in_results = intents_in_results.drop_duplicates()
# Read weight
weights_df = None
weight_mode = args.weight.lower()
# Read the intent weights pairs from file
if weight_mode != POPULATION_WEIGHT_MODE and \
weight_mode != EQUAL_WEIGHT_MODE:
try:
weights_df = pd.read_csv(args.weight, encoding=UTF_8,
quoting=csv.QUOTE_ALL)
# Validate the completeness
for _, intent in intents_in_results.iteritems():
if not any(weights_df[INTENT_COLUMN] == intent):
raise ValueError("'{}' intent not in {}".format(
intent, args.weight))
except Exception as e:
print(e)
weight_mode = POPULATION_WEIGHT_MODE # default population mode
print('Fall back to {} mode'.format(POPULATION_WEIGHT_MODE))
# Init the classifier_stat_list:
for i in range(classifier_num):
# array of zeros to hold precision values
classifier_stat_list.append(np.zeros([len(confidences_list[i]), 3]))
for j in range(classifier_num):
confidences = confidences_list[j]
for i in range(len(confidences)):
conf = confidences[i]
cf_frame = cf_frames[j]
precision = 0
answered = \
cf_frame[cf_frame[CONFIDENCE_COLUMN] >= conf].shape[0]
if weight_mode == POPULATION_WEIGHT_MODE:
correct = cf_frame[
cf_frame[CONFIDENCE_COLUMN] >= conf][SCORE_COLUMN].sum()
precision = correct / answered
# print(precision)
else:
intent_uttr_num_map = \
cf_frame[cf_frame[CONFIDENCE_COLUMN] >= conf] \
.groupby(PREDICTED_INTENT_COLUMN)[PREDICTED_INTENT_COLUMN] \
.count().to_dict()
# Calulate precision use equal weights
uttr_correct_intent = \
cf_frame[cf_frame[CONFIDENCE_COLUMN] >= conf] \
.groupby(GOLDEN_INTENT_COLUMN)[SCORE_COLUMN] \
.sum()
intent_weights = None
weight_coeff = 1 / len(intent_uttr_num_map)
if weight_mode != EQUAL_WEIGHT_MODE:
required_weights_df = \
weights_df[
weights_df[INTENT_COLUMN]
.isin(uttr_correct_intent.index)]
weight_sum = required_weights_df[WEIGHT_COLUMN].sum()
# Normalize weights
weights_df[WEIGHT_COLUMN] = \
weights_df[WEIGHT_COLUMN] / weight_sum
intent_weights = \
weights_df.set_index(INTENT_COLUMN)[WEIGHT_COLUMN] \
.to_dict()
for intent, correct_intent_num in \
uttr_correct_intent.iteritems():
if weight_mode != EQUAL_WEIGHT_MODE:
weight_coeff = intent_weights[intent]
precision += \
weight_coeff * correct_intent_num \
/ intent_uttr_num_map[intent]
classifier_stat_list[j][i, 0] = precision
classifier_stat_list[j][i, 1] = 100 * answered / len(cf_frame)
classifier_stat_list[j][i, 2] = conf
for idx in range(len(classifier_stat_list)):
# reversing order for helpful plotting
classifier_stat_list[idx] = classifier_stat_list[idx][::-1]
# plotting
fig = plt.figure()
ax = fig.gca()
ax.set_ylim([0, 1.0]) # Hardcoding y-axis to a consistent 0-1.0 for the benefit of easing historical comparisions
ax.grid(color='b', linestyle='--', alpha=0.3)
ax.set_xlabel('Percentage of Questions Answered',
fontsize=LEGEND_AXIS_FONT_SIZE)
ax.set_ylabel('Precision', fontsize=LEGEND_AXIS_FONT_SIZE)
line_style_cycler = cycle(line_styles)
line_color_cycler = cycle(line_color)
lines = [] # reference to lines
# plot the curve and save the figure
for i in range(len(classifier_stat_list)):
classifier_stat = classifier_stat_list[i]
# Default to the idx of lowest conf
tau_idx = len(classifier_stat[:, 2]) - 1
indices_gtr_tau, = np.where(classifier_stat[:, 2] <= args.tau)
if len(indices_gtr_tau) > 0:
tau_idx = indices_gtr_tau[0]
color = next(line_color_cycler)
line, = plt.plot(classifier_stat[:, 1], classifier_stat[:, 0],
color=color, label=labels[i],
linestyle=next(line_style_cycler))
plt.plot(classifier_stat[tau_idx, 1], classifier_stat[tau_idx, 0],
'{}o'.format(color), markerfacecolor='None')
lines.append(line)
tau_desc = mlines.Line2D([], [], markeredgecolor='black', marker='o',
linestyle='None', markerfacecolor='None',
markersize=10,
label='tau = {}'.format(args.tau))
ax.legend(handles=lines + [tau_desc], loc='lower left', shadow=False,
prop={'size': LEGEND_AXIS_FONT_SIZE})
ax.set_title(args.figure_title,
fontsize=TITLE_FONT_SIZE)
if args.ymin != 0.0:
plt.ylim(args.ymin, 1.0)
# Save figure as file
plt.savefig(args.outfile)
print("Wrote precision curve to {}".format(args.outfile))
def create_parser():
parser = ArgumentParser(description="Draw precision curves on a single canvas \
from multiple classifiers' classification results")
parser.add_argument('-i', '--classifiers_results', nargs='+',
required=True,
help='Files of results from individual classifiers')
parser.add_argument('-n', '--classifier_names', nargs='*',
help='Names of each classifier')
parser.add_argument('-t', '--figure_title', required=True, type=str,
help='Title of output figure')
parser.add_argument('-o', '--outfile', help='File of the output figure',
default='figure.png', type=str)
parser.add_argument('-w', '--weight', default='population', type=str,
help='Weight configuration for each intent')
parser.add_argument('--tau', default=DEFAULT_CONF_THRES, type=float,
help='Confidence threshold for curve marker')
parser.add_argument('--ymin', default=0.0, type=float,
help='Minimum for Y axis')
return parser
if __name__ == '__main__':
ARGS = create_parser().parse_args()
func(ARGS)
|
the-stack_0_1037 | # Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
#
# PYTHON_ARGCOMPLETE_OK (Must be in first 1024 bytes, so if tab completion
# is failing, move this above the license)
import argcomplete
import argparse
import importlib
import logging
import os
import pdb
import sys
import traceback
from datetime import datetime
from dateutil.parser import parse as date_parse
try:
from setproctitle import setproctitle
except ImportError:
def setproctitle(t):
return None
from c7n import deprecated
from c7n.config import Config
DEFAULT_REGION = 'us-east-1'
log = logging.getLogger('custodian.cli')
def _default_options(p, exclude=[]):
""" Add basic options ot the subparser.
`exclude` is a list of options to exclude from the default set.
e.g.: ['region', 'log-group']
"""
provider = p.add_argument_group(
"provider", "AWS account information, defaults per the aws cli")
if 'region' not in exclude:
provider.add_argument(
"-r", "--region", action='append', default=[],
dest='regions', metavar='REGION',
help="AWS Region to target. Can be used multiple times")
provider.add_argument(
"--profile",
help="AWS Account Config File Profile to utilize")
provider.add_argument("--assume", default=None, dest="assume_role",
help="Role to assume")
provider.add_argument("--external-id", default=None, dest="external_id",
help="External Id to provide when assuming a role")
config = p.add_argument_group(
"config", "Policy config file(s) and policy selectors")
# -c is deprecated. Supported for legacy reasons
config.add_argument("-c", "--config", help=argparse.SUPPRESS)
config.add_argument("configs", nargs='*',
help="Policy configuration file(s)")
config.add_argument("-p", "--policies", default=[], dest='policy_filters',
action='append', help="Only use named/matched policies")
config.add_argument("-t", "--resource", default=[], dest='resource_types',
action='append',
help="Only use policies with the given resource type")
output = p.add_argument_group("output", "Output control")
output.add_argument("-v", "--verbose", action="count", help="Verbose logging")
if 'quiet' not in exclude:
output.add_argument("-q", "--quiet", action="count",
help="Less logging (repeatable, -qqq for no output)")
else:
output.add_argument("-q", "--quiet", action="count", help=argparse.SUPPRESS)
output.add_argument("--debug", default=False, help=argparse.SUPPRESS,
action="store_true")
if 'vars' not in exclude:
# p.add_argument('--vars', default=None,
# help='Vars file to substitute into policy')
p.set_defaults(vars=None)
if 'log-group' not in exclude:
p.add_argument(
"-l", "--log-group", default=None,
help="Location to send policy logs (Ex: AWS CloudWatch Log Group)")
else:
p.add_argument("--log-group", default=None, help=argparse.SUPPRESS)
if 'output-dir' not in exclude:
p.add_argument("-s", "--output-dir", required=True,
help="[REQUIRED] Directory or S3 URL For policy output")
if 'cache' not in exclude:
p.add_argument(
"-f", "--cache", default="~/.cache/cloud-custodian.cache",
help="Cache file (default %(default)s)")
p.add_argument(
"--cache-period", default=15, type=int,
help="Cache validity in minutes (default %(default)i)")
else:
p.add_argument("--cache", default=None, help=argparse.SUPPRESS)
def _report_options(p):
""" Add options specific to the report subcommand. """
_default_options(p, exclude=['cache', 'log-group', 'quiet'])
p.add_argument(
'--days', type=float, default=1,
help="Number of days of history to consider")
p.add_argument(
'--raw', type=argparse.FileType('w'),
help="Store raw json of collected records to given file path")
p.add_argument(
'--field', action='append', default=[], type=_key_val_pair,
metavar='HEADER=FIELD',
help='Repeatable. JMESPath of field to include in the output OR '
'for a tag use prefix `tag:`. Special case fields `region` and'
'`policy` are available')
p.add_argument(
'--no-default-fields', action="store_true",
help='Exclude default fields for report.')
p.add_argument(
'--format', default='csv', choices=['csv', 'grid', 'simple', 'json'],
help="Format to output data in (default: %(default)s). "
"Options include simple, grid, csv, json")
p.add_argument(
'--all-findings', default=False, action="store_true",
help="Outputs all findings per resource. Defaults to a single finding per resource. ")
def _metrics_options(p):
""" Add options specific to metrics subcommand. """
_default_options(p, exclude=['log-group', 'output-dir', 'cache', 'quiet'])
p.add_argument(
'--start', type=date_parse,
help='Start date (requires --end, overrides --days)')
p.add_argument(
'--end', type=date_parse, help='End date')
p.add_argument(
'--days', type=int, default=14,
help='Number of days of history to consider (default: %(default)i)')
p.add_argument('--period', type=int, default=60 * 24 * 24)
def _logs_options(p):
""" Add options specific to logs subcommand. """
_default_options(p, exclude=['cache', 'quiet'])
# default time range is 0 to "now" (to include all log entries)
p.add_argument(
'--start',
default='the beginning', # invalid, will result in 0
help='Start date and/or time',
)
p.add_argument(
'--end',
default=datetime.now().strftime('%c'),
help='End date and/or time',
)
def _schema_options(p):
""" Add options specific to schema subcommand. """
p.add_argument(
'resource', metavar='selector', nargs='?', default=None)
p.add_argument(
'--summary', action="store_true",
help="Summarize counts of available resources, actions and filters")
p.add_argument('--json', action="store_true",
help="Export custodian's jsonschema")
p.add_argument('--outline', action="store_true",
help="Print outline of all resources and their actions and filters")
p.add_argument("-v", "--verbose", action="count", help="Verbose logging")
p.add_argument("-q", "--quiet", action="count", help=argparse.SUPPRESS)
p.add_argument("--debug", default=False, help=argparse.SUPPRESS)
def _dryrun_option(p):
p.add_argument(
"-d", "--dryrun", "--dry-run", action="store_true",
help="Don't execute actions but filter resources")
def _key_val_pair(value):
"""
Type checker to ensure that --field values are of the format key=val
"""
if '=' not in value:
msg = 'values must be of the form `header=field`'
raise argparse.ArgumentTypeError(msg)
return value
def setup_parser():
c7n_desc = "Cloud Custodian - Cloud fleet management"
parser = argparse.ArgumentParser(description=c7n_desc)
# Setting `dest` means we capture which subparser was used.
subs = parser.add_subparsers(
title='commands',
dest='subparser')
run_desc = "\n".join((
"Execute the policies in a config file.",
"",
"Multiple regions can be passed in, as can the symbolic region 'all'. ",
"",
"When running across multiple regions, policies targeting resources in ",
"regions where they do not exist will not be run. The output directory ",
"when passing multiple regions is suffixed with the region. Resources ",
"with global endpoints are run just once and are suffixed with the first ",
"region passed in or us-east-1 if running against 'all' regions.",
""
))
run = subs.add_parser(
"run", description=run_desc,
help="Execute the policies in a config file",
formatter_class=argparse.RawDescriptionHelpFormatter)
run.set_defaults(command="c7n.commands.run")
_default_options(run)
_dryrun_option(run)
run.add_argument(
"--skip-validation",
action="store_true",
help="Skips validation of policies (assumes you've run the validate command seperately).")
metrics_help = ("Emit metrics to provider metrics. Specify 'aws', 'gcp', or 'azure'. "
"For more details on aws metrics options, see: "
"https://cloudcustodian.io/docs/aws/usage.html#metrics")
run.add_argument(
"-m", "--metrics-enabled", metavar="PROVIDER",
default=None, nargs="?", const="aws",
help=metrics_help)
run.add_argument(
"--trace",
dest="tracer",
help="Tracing integration",
default=None, nargs="?", const="default")
schema_desc = ("Browse the available vocabularies (resources, filters, modes, and "
"actions) for policy construction. The selector "
"is specified with RESOURCE[.CATEGORY[.ITEM]] "
"examples: s3, ebs.actions, or ec2.filters.instance-age")
schema = subs.add_parser(
'schema', description=schema_desc,
help="Interactive cli docs for policy authors")
schema.set_defaults(command="c7n.commands.schema_cmd")
_schema_options(schema)
report_desc = ("Report of resources that a policy matched/ran on. "
"The default output format is csv, but other formats "
"are available.")
report = subs.add_parser(
"report", description=report_desc,
help="Tabular report on policy matched resources")
report.set_defaults(command="c7n.commands.report")
_report_options(report)
logs = subs.add_parser(
'logs')
logs.set_defaults(command="c7n.commands.logs")
_logs_options(logs)
metrics = subs.add_parser('metrics')
metrics.set_defaults(command="c7n.commands.metrics_cmd")
_metrics_options(metrics)
version = subs.add_parser(
'version', help="Display installed version of custodian")
version.set_defaults(command='c7n.commands.version_cmd')
version.add_argument('-v', '--verbose', action="count", help="Verbose logging")
version.add_argument("-q", "--quiet", action="count", help=argparse.SUPPRESS)
version.add_argument(
"--debug", action="store_true",
help="Print info for bug reports")
validate_desc = (
"Validate config files against the json schema")
validate = subs.add_parser(
'validate', description=validate_desc, help=validate_desc)
validate.set_defaults(command="c7n.commands.validate", check_deprecations="yes")
validate.add_argument(
"-c", "--config", help=argparse.SUPPRESS)
validate.add_argument("configs", nargs='*',
help="Policy Configuration File(s)")
validate.add_argument("-v", "--verbose", action="count", help="Verbose Logging")
validate.add_argument("-q", "--quiet", action="count", help="Less logging (repeatable)")
validate.add_argument("--debug", default=False, help=argparse.SUPPRESS)
deprecations = validate.add_mutually_exclusive_group(required=False)
deprecations.add_argument("--no-deps", dest="check_deprecations",
action='store_const', const=deprecated.SKIP,
help="Do not check for deprecations")
deprecations.add_argument("--strict", dest="check_deprecations",
action='store_const', const=deprecated.STRICT,
help="Any deprecations will cause a non-zero exit code")
return parser
def _setup_logger(options):
level = 3 + (options.verbose or 0) - (options.quiet or 0)
if level <= 0:
# print nothing
log_level = logging.CRITICAL + 1
elif level == 1:
log_level = logging.ERROR
elif level == 2:
log_level = logging.WARNING
elif level == 3:
# default
log_level = logging.INFO
else:
log_level = logging.DEBUG
logging.basicConfig(
level=log_level,
format="%(asctime)s: %(name)s:%(levelname)s %(message)s")
external_log_level = logging.ERROR
if level <= 0:
external_log_level = logging.CRITICAL + 1
elif level >= 5:
external_log_level = logging.INFO
logging.getLogger('botocore').setLevel(external_log_level)
logging.getLogger('urllib3').setLevel(external_log_level)
logging.getLogger('s3transfer').setLevel(external_log_level)
logging.getLogger('urllib3').setLevel(logging.ERROR)
def main():
parser = setup_parser()
argcomplete.autocomplete(parser)
options = parser.parse_args()
if options.subparser is None:
parser.print_help(file=sys.stderr)
return sys.exit(2)
_setup_logger(options)
# Support the deprecated -c option
if getattr(options, 'config', None) is not None:
options.configs.append(options.config)
config = Config.empty(**vars(options))
try:
command = options.command
if not callable(command):
command = getattr(
importlib.import_module(command.rsplit('.', 1)[0]),
command.rsplit('.', 1)[-1])
# Set the process name to something cleaner
process_name = [os.path.basename(sys.argv[0])]
process_name.extend(sys.argv[1:])
setproctitle(' '.join(process_name))
command(config)
except Exception:
if not options.debug:
raise
traceback.print_exc()
pdb.post_mortem(sys.exc_info()[-1])
if __name__ == '__main__':
main()
|
the-stack_0_1038 | #!/usr/bin/env python
'''
cchecker_web.reverse_proxy
Ruthlessly stolen from:
http://flask.pocoo.org/snippets/35/
'''
class ReverseProxied(object):
'''Wrap the application in this middleware and configure the
front-end server to add these headers, to let you quietly bind
this to a URL other than / and to an HTTP scheme that is
different than what is used locally.
In nginx:
location /myprefix {
proxy_pass http://192.168.0.1:5001;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Scheme $scheme;
proxy_set_header X-Script-Name /myprefix;
}
:param app: the WSGI application
'''
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
script_name = environ.get('HTTP_X_SCRIPT_NAME', '')
if script_name:
environ['SCRIPT_NAME'] = script_name
path_info = environ['PATH_INFO']
if path_info.startswith(script_name):
environ['PATH_INFO'] = path_info[len(script_name):]
scheme = environ.get('HTTP_X_SCHEME', '')
if scheme:
environ['wsgi.url_scheme'] = scheme
return self.app(environ, start_response)
|
the-stack_0_1039 | __usage__ = """
To run tests locally:
python tests/test_arpack.py [-l<int>] [-v<int>]
"""
import threading
import itertools
import sys
import platform
import numpy as np
from numpy.testing import (assert_allclose, assert_array_almost_equal_nulp,
assert_equal, assert_array_equal, suppress_warnings)
from pytest import raises as assert_raises
import pytest
from numpy import dot, conj, random
from scipy.linalg import eig, eigh, hilbert, svd
from scipy.sparse import csc_matrix, csr_matrix, isspmatrix, diags, rand
from scipy.sparse.linalg import LinearOperator, aslinearoperator
from scipy.sparse.linalg.eigen.arpack import eigs, eigsh, svds, \
ArpackNoConvergence, arpack
from scipy._lib._gcutils import assert_deallocated, IS_PYPY
IS_MACOS_ARM64 = sys.platform == 'darwin' and platform.machine() == 'arm64'
# precision for tests
_ndigits = {'f': 3, 'd': 11, 'F': 3, 'D': 11}
def _get_test_tolerance(type_char, mattype=None):
"""
Return tolerance values suitable for a given test:
Parameters
----------
type_char : {'f', 'd', 'F', 'D'}
Data type in ARPACK eigenvalue problem
mattype : {csr_matrix, aslinearoperator, asarray}, optional
Linear operator type
Returns
-------
tol
Tolerance to pass to the ARPACK routine
rtol
Relative tolerance for outputs
atol
Absolute tolerance for outputs
"""
rtol = {'f': 3000 * np.finfo(np.float32).eps,
'F': 3000 * np.finfo(np.float32).eps,
'd': 2000 * np.finfo(np.float64).eps,
'D': 2000 * np.finfo(np.float64).eps}[type_char]
atol = rtol
tol = 0
if mattype is aslinearoperator and type_char in ('f', 'F'):
# iterative methods in single precision: worse errors
# also: bump ARPACK tolerance so that the iterative method converges
tol = 30 * np.finfo(np.float32).eps
rtol *= 5
if mattype is csr_matrix and type_char in ('f', 'F'):
# sparse in single precision: worse errors
rtol *= 5
return tol, rtol, atol
def generate_matrix(N, complex_=False, hermitian=False,
pos_definite=False, sparse=False):
M = np.random.random((N, N))
if complex_:
M = M + 1j * np.random.random((N, N))
if hermitian:
if pos_definite:
if sparse:
i = np.arange(N)
j = np.random.randint(N, size=N-2)
i, j = np.meshgrid(i, j)
M[i, j] = 0
M = np.dot(M.conj(), M.T)
else:
M = np.dot(M.conj(), M.T)
if sparse:
i = np.random.randint(N, size=N * N // 4)
j = np.random.randint(N, size=N * N // 4)
ind = np.nonzero(i == j)
j[ind] = (j[ind] + 1) % N
M[i, j] = 0
M[j, i] = 0
else:
if sparse:
i = np.random.randint(N, size=N * N // 2)
j = np.random.randint(N, size=N * N // 2)
M[i, j] = 0
return M
def generate_matrix_symmetric(N, pos_definite=False, sparse=False):
M = np.random.random((N, N))
M = 0.5 * (M + M.T) # Make M symmetric
if pos_definite:
Id = N * np.eye(N)
if sparse:
M = csr_matrix(M)
M += Id
else:
if sparse:
M = csr_matrix(M)
return M
def _aslinearoperator_with_dtype(m):
m = aslinearoperator(m)
if not hasattr(m, 'dtype'):
x = np.zeros(m.shape[1])
m.dtype = (m * x).dtype
return m
def assert_allclose_cc(actual, desired, **kw):
"""Almost equal or complex conjugates almost equal"""
try:
assert_allclose(actual, desired, **kw)
except AssertionError:
assert_allclose(actual, conj(desired), **kw)
def argsort_which(eigenvalues, typ, k, which,
sigma=None, OPpart=None, mode=None):
"""Return sorted indices of eigenvalues using the "which" keyword
from eigs and eigsh"""
if sigma is None:
reval = np.round(eigenvalues, decimals=_ndigits[typ])
else:
if mode is None or mode == 'normal':
if OPpart is None:
reval = 1. / (eigenvalues - sigma)
elif OPpart == 'r':
reval = 0.5 * (1. / (eigenvalues - sigma)
+ 1. / (eigenvalues - np.conj(sigma)))
elif OPpart == 'i':
reval = -0.5j * (1. / (eigenvalues - sigma)
- 1. / (eigenvalues - np.conj(sigma)))
elif mode == 'cayley':
reval = (eigenvalues + sigma) / (eigenvalues - sigma)
elif mode == 'buckling':
reval = eigenvalues / (eigenvalues - sigma)
else:
raise ValueError("mode='%s' not recognized" % mode)
reval = np.round(reval, decimals=_ndigits[typ])
if which in ['LM', 'SM']:
ind = np.argsort(abs(reval))
elif which in ['LR', 'SR', 'LA', 'SA', 'BE']:
ind = np.argsort(np.real(reval))
elif which in ['LI', 'SI']:
# for LI,SI ARPACK returns largest,smallest abs(imaginary) why?
if typ.islower():
ind = np.argsort(abs(np.imag(reval)))
else:
ind = np.argsort(np.imag(reval))
else:
raise ValueError("which='%s' is unrecognized" % which)
if which in ['LM', 'LA', 'LR', 'LI']:
return ind[-k:]
elif which in ['SM', 'SA', 'SR', 'SI']:
return ind[:k]
elif which == 'BE':
return np.concatenate((ind[:k//2], ind[k//2-k:]))
def eval_evec(symmetric, d, typ, k, which, v0=None, sigma=None,
mattype=np.asarray, OPpart=None, mode='normal'):
general = ('bmat' in d)
if symmetric:
eigs_func = eigsh
else:
eigs_func = eigs
if general:
err = ("error for %s:general, typ=%s, which=%s, sigma=%s, "
"mattype=%s, OPpart=%s, mode=%s" % (eigs_func.__name__,
typ, which, sigma,
mattype.__name__,
OPpart, mode))
else:
err = ("error for %s:standard, typ=%s, which=%s, sigma=%s, "
"mattype=%s, OPpart=%s, mode=%s" % (eigs_func.__name__,
typ, which, sigma,
mattype.__name__,
OPpart, mode))
a = d['mat'].astype(typ)
ac = mattype(a)
if general:
b = d['bmat'].astype(typ)
bc = mattype(b)
# get exact eigenvalues
exact_eval = d['eval'].astype(typ.upper())
ind = argsort_which(exact_eval, typ, k, which,
sigma, OPpart, mode)
exact_eval = exact_eval[ind]
# compute arpack eigenvalues
kwargs = dict(which=which, v0=v0, sigma=sigma)
if eigs_func is eigsh:
kwargs['mode'] = mode
else:
kwargs['OPpart'] = OPpart
# compute suitable tolerances
kwargs['tol'], rtol, atol = _get_test_tolerance(typ, mattype)
# on rare occasions, ARPACK routines return results that are proper
# eigenvalues and -vectors, but not necessarily the ones requested in
# the parameter which. This is inherent to the Krylov methods, and
# should not be treated as a failure. If such a rare situation
# occurs, the calculation is tried again (but at most a few times).
ntries = 0
while ntries < 5:
# solve
if general:
try:
eigenvalues, evec = eigs_func(ac, k, bc, **kwargs)
except ArpackNoConvergence:
kwargs['maxiter'] = 20*a.shape[0]
eigenvalues, evec = eigs_func(ac, k, bc, **kwargs)
else:
try:
eigenvalues, evec = eigs_func(ac, k, **kwargs)
except ArpackNoConvergence:
kwargs['maxiter'] = 20*a.shape[0]
eigenvalues, evec = eigs_func(ac, k, **kwargs)
ind = argsort_which(eigenvalues, typ, k, which,
sigma, OPpart, mode)
eigenvalues = eigenvalues[ind]
evec = evec[:, ind]
# check eigenvectors
LHS = np.dot(a, evec)
if general:
RHS = eigenvalues * np.dot(b, evec)
else:
RHS = eigenvalues * evec
assert_allclose(LHS, RHS, rtol=rtol, atol=atol, err_msg=err)
try:
# check eigenvalues
assert_allclose_cc(eigenvalues, exact_eval, rtol=rtol, atol=atol,
err_msg=err)
break
except AssertionError:
ntries += 1
# check eigenvalues
assert_allclose_cc(eigenvalues, exact_eval, rtol=rtol, atol=atol, err_msg=err)
class DictWithRepr(dict):
def __init__(self, name):
self.name = name
def __repr__(self):
return "<%s>" % self.name
class SymmetricParams:
def __init__(self):
self.eigs = eigsh
self.which = ['LM', 'SM', 'LA', 'SA', 'BE']
self.mattypes = [csr_matrix, aslinearoperator, np.asarray]
self.sigmas_modes = {None: ['normal'],
0.5: ['normal', 'buckling', 'cayley']}
# generate matrices
# these should all be float32 so that the eigenvalues
# are the same in float32 and float64
N = 6
np.random.seed(2300)
Ar = generate_matrix(N, hermitian=True,
pos_definite=True).astype('f').astype('d')
M = generate_matrix(N, hermitian=True,
pos_definite=True).astype('f').astype('d')
Ac = generate_matrix(N, hermitian=True, pos_definite=True,
complex_=True).astype('F').astype('D')
Mc = generate_matrix(N, hermitian=True, pos_definite=True,
complex_=True).astype('F').astype('D')
v0 = np.random.random(N)
# standard symmetric problem
SS = DictWithRepr("std-symmetric")
SS['mat'] = Ar
SS['v0'] = v0
SS['eval'] = eigh(SS['mat'], eigvals_only=True)
# general symmetric problem
GS = DictWithRepr("gen-symmetric")
GS['mat'] = Ar
GS['bmat'] = M
GS['v0'] = v0
GS['eval'] = eigh(GS['mat'], GS['bmat'], eigvals_only=True)
# standard hermitian problem
SH = DictWithRepr("std-hermitian")
SH['mat'] = Ac
SH['v0'] = v0
SH['eval'] = eigh(SH['mat'], eigvals_only=True)
# general hermitian problem
GH = DictWithRepr("gen-hermitian")
GH['mat'] = Ac
GH['bmat'] = M
GH['v0'] = v0
GH['eval'] = eigh(GH['mat'], GH['bmat'], eigvals_only=True)
# general hermitian problem with hermitian M
GHc = DictWithRepr("gen-hermitian-Mc")
GHc['mat'] = Ac
GHc['bmat'] = Mc
GHc['v0'] = v0
GHc['eval'] = eigh(GHc['mat'], GHc['bmat'], eigvals_only=True)
self.real_test_cases = [SS, GS]
self.complex_test_cases = [SH, GH, GHc]
class NonSymmetricParams:
def __init__(self):
self.eigs = eigs
self.which = ['LM', 'LR', 'LI'] # , 'SM', 'LR', 'SR', 'LI', 'SI']
self.mattypes = [csr_matrix, aslinearoperator, np.asarray]
self.sigmas_OPparts = {None: [None],
0.1: ['r'],
0.1 + 0.1j: ['r', 'i']}
# generate matrices
# these should all be float32 so that the eigenvalues
# are the same in float32 and float64
N = 6
np.random.seed(2300)
Ar = generate_matrix(N).astype('f').astype('d')
M = generate_matrix(N, hermitian=True,
pos_definite=True).astype('f').astype('d')
Ac = generate_matrix(N, complex_=True).astype('F').astype('D')
v0 = np.random.random(N)
# standard real nonsymmetric problem
SNR = DictWithRepr("std-real-nonsym")
SNR['mat'] = Ar
SNR['v0'] = v0
SNR['eval'] = eig(SNR['mat'], left=False, right=False)
# general real nonsymmetric problem
GNR = DictWithRepr("gen-real-nonsym")
GNR['mat'] = Ar
GNR['bmat'] = M
GNR['v0'] = v0
GNR['eval'] = eig(GNR['mat'], GNR['bmat'], left=False, right=False)
# standard complex nonsymmetric problem
SNC = DictWithRepr("std-cmplx-nonsym")
SNC['mat'] = Ac
SNC['v0'] = v0
SNC['eval'] = eig(SNC['mat'], left=False, right=False)
# general complex nonsymmetric problem
GNC = DictWithRepr("gen-cmplx-nonsym")
GNC['mat'] = Ac
GNC['bmat'] = M
GNC['v0'] = v0
GNC['eval'] = eig(GNC['mat'], GNC['bmat'], left=False, right=False)
self.real_test_cases = [SNR, GNR]
self.complex_test_cases = [SNC, GNC]
def test_symmetric_modes():
params = SymmetricParams()
k = 2
symmetric = True
for D in params.real_test_cases:
for typ in 'fd':
for which in params.which:
for mattype in params.mattypes:
for (sigma, modes) in params.sigmas_modes.items():
for mode in modes:
eval_evec(symmetric, D, typ, k, which,
None, sigma, mattype, None, mode)
def test_hermitian_modes():
params = SymmetricParams()
k = 2
symmetric = True
for D in params.complex_test_cases:
for typ in 'FD':
for which in params.which:
if which == 'BE':
continue # BE invalid for complex
for mattype in params.mattypes:
for sigma in params.sigmas_modes:
eval_evec(symmetric, D, typ, k, which,
None, sigma, mattype)
def test_symmetric_starting_vector():
params = SymmetricParams()
symmetric = True
for k in [1, 2, 3, 4, 5]:
for D in params.real_test_cases:
for typ in 'fd':
v0 = random.rand(len(D['v0'])).astype(typ)
eval_evec(symmetric, D, typ, k, 'LM', v0)
def test_symmetric_no_convergence():
np.random.seed(1234)
m = generate_matrix(30, hermitian=True, pos_definite=True)
tol, rtol, atol = _get_test_tolerance('d')
try:
w, v = eigsh(m, 4, which='LM', v0=m[:, 0], maxiter=5, tol=tol, ncv=9)
raise AssertionError("Spurious no-error exit")
except ArpackNoConvergence as err:
k = len(err.eigenvalues)
if k <= 0:
raise AssertionError("Spurious no-eigenvalues-found case") from err
w, v = err.eigenvalues, err.eigenvectors
assert_allclose(dot(m, v), w * v, rtol=rtol, atol=atol)
def test_real_nonsymmetric_modes():
params = NonSymmetricParams()
k = 2
symmetric = False
for D in params.real_test_cases:
for typ in 'fd':
for which in params.which:
for mattype in params.mattypes:
for sigma, OPparts in params.sigmas_OPparts.items():
for OPpart in OPparts:
eval_evec(symmetric, D, typ, k, which,
None, sigma, mattype, OPpart)
def test_complex_nonsymmetric_modes():
params = NonSymmetricParams()
k = 2
symmetric = False
for D in params.complex_test_cases:
for typ in 'DF':
for which in params.which:
for mattype in params.mattypes:
for sigma in params.sigmas_OPparts:
eval_evec(symmetric, D, typ, k, which,
None, sigma, mattype)
def test_standard_nonsymmetric_starting_vector():
params = NonSymmetricParams()
sigma = None
symmetric = False
for k in [1, 2, 3, 4]:
for d in params.complex_test_cases:
for typ in 'FD':
A = d['mat']
n = A.shape[0]
v0 = random.rand(n).astype(typ)
eval_evec(symmetric, d, typ, k, "LM", v0, sigma)
def test_general_nonsymmetric_starting_vector():
params = NonSymmetricParams()
sigma = None
symmetric = False
for k in [1, 2, 3, 4]:
for d in params.complex_test_cases:
for typ in 'FD':
A = d['mat']
n = A.shape[0]
v0 = random.rand(n).astype(typ)
eval_evec(symmetric, d, typ, k, "LM", v0, sigma)
@pytest.mark.skipif(IS_MACOS_ARM64, reason='failing on arm64')
def test_standard_nonsymmetric_no_convergence():
np.random.seed(1234)
m = generate_matrix(30, complex_=True)
tol, rtol, atol = _get_test_tolerance('d')
try:
w, v = eigs(m, 4, which='LM', v0=m[:, 0], maxiter=5, tol=tol)
raise AssertionError("Spurious no-error exit")
except ArpackNoConvergence as err:
k = len(err.eigenvalues)
if k <= 0:
raise AssertionError("Spurious no-eigenvalues-found case") from err
w, v = err.eigenvalues, err.eigenvectors
for ww, vv in zip(w, v.T):
assert_allclose(dot(m, vv), ww * vv, rtol=rtol, atol=atol)
def test_eigen_bad_shapes():
# A is not square.
A = csc_matrix(np.zeros((2, 3)))
assert_raises(ValueError, eigs, A)
def test_eigen_bad_kwargs():
# Test eigen on wrong keyword argument
A = csc_matrix(np.zeros((8, 8)))
assert_raises(ValueError, eigs, A, which='XX')
def test_ticket_1459_arpack_crash():
for dtype in [np.float32, np.float64]:
# This test does not seem to catch the issue for float32,
# but we made the same fix there, just to be sure
N = 6
k = 2
np.random.seed(2301)
A = np.random.random((N, N)).astype(dtype)
v0 = np.array([-0.71063568258907849895, -0.83185111795729227424,
-0.34365925382227402451, 0.46122533684552280420,
-0.58001341115969040629, -0.78844877570084292984e-01],
dtype=dtype)
# Should not crash:
evals, evecs = eigs(A, k, v0=v0)
#----------------------------------------------------------------------
# sparse SVD tests
def sorted_svd(m, k, which='LM'):
# Compute svd of a dense matrix m, and return singular vectors/values
# sorted.
if isspmatrix(m):
m = m.todense()
u, s, vh = svd(m)
if which == 'LM':
ii = np.argsort(s)[-k:]
elif which == 'SM':
ii = np.argsort(s)[:k]
else:
raise ValueError("unknown which=%r" % (which,))
return u[:, ii], s[ii], vh[ii]
def svd_estimate(u, s, vh):
return np.dot(u, np.dot(np.diag(s), vh))
def svd_test_input_check():
x = np.array([[1, 2, 3],
[3, 4, 3],
[1, 0, 2],
[0, 0, 1]], float)
assert_raises(ValueError, svds, x, k=-1)
assert_raises(ValueError, svds, x, k=0)
assert_raises(ValueError, svds, x, k=10)
assert_raises(ValueError, svds, x, k=x.shape[0])
assert_raises(ValueError, svds, x, k=x.shape[1])
assert_raises(ValueError, svds, x.T, k=x.shape[0])
assert_raises(ValueError, svds, x.T, k=x.shape[1])
def test_svd_simple_real():
x = np.array([[1, 2, 3],
[3, 4, 3],
[1, 0, 2],
[0, 0, 1]], float)
y = np.array([[1, 2, 3, 8],
[3, 4, 3, 5],
[1, 0, 2, 3],
[0, 0, 1, 0]], float)
z = csc_matrix(x)
for solver in [None, 'arpack', 'lobpcg']:
for m in [x.T, x, y, z, z.T]:
for k in range(1, min(m.shape)):
u, s, vh = sorted_svd(m, k)
su, ss, svh = svds(m, k, solver=solver)
m_hat = svd_estimate(u, s, vh)
sm_hat = svd_estimate(su, ss, svh)
assert_array_almost_equal_nulp(m_hat, sm_hat, nulp=1000)
def test_svd_simple_complex():
x = np.array([[1, 2, 3],
[3, 4, 3],
[1 + 1j, 0, 2],
[0, 0, 1]], complex)
y = np.array([[1, 2, 3, 8 + 5j],
[3 - 2j, 4, 3, 5],
[1, 0, 2, 3],
[0, 0, 1, 0]], complex)
z = csc_matrix(x)
for solver in [None, 'arpack', 'lobpcg']:
for m in [x, x.T.conjugate(), x.T, y, y.conjugate(), z, z.T]:
for k in range(1, min(m.shape) - 1):
u, s, vh = sorted_svd(m, k)
su, ss, svh = svds(m, k, solver=solver)
m_hat = svd_estimate(u, s, vh)
sm_hat = svd_estimate(su, ss, svh)
assert_array_almost_equal_nulp(m_hat, sm_hat, nulp=1000)
def test_svd_maxiter():
# check that maxiter works as expected
x = hilbert(6)
# ARPACK shouldn't converge on such an ill-conditioned matrix with just
# one iteration
assert_raises(ArpackNoConvergence, svds, x, 1, maxiter=1, ncv=3)
# but 100 iterations should be more than enough
u, s, vt = svds(x, 1, maxiter=100, ncv=3)
assert_allclose(s, [1.7], atol=0.5)
def test_svd_return():
# check that the return_singular_vectors parameter works as expected
x = hilbert(6)
_, s, _ = sorted_svd(x, 2)
ss = svds(x, 2, return_singular_vectors=False)
assert_allclose(s, ss)
def test_svd_which():
# check that the which parameter works as expected
x = hilbert(6)
for which in ['LM', 'SM']:
_, s, _ = sorted_svd(x, 2, which=which)
for solver in [None, 'arpack', 'lobpcg']:
ss = svds(x, 2, which=which, return_singular_vectors=False,
solver=solver)
ss.sort()
assert_allclose(s, ss, atol=np.sqrt(1e-15))
def test_svd_v0():
# check that the v0 parameter works as expected
x = np.array([[1, 2, 3, 4], [5, 6, 7, 8]], float)
for solver in [None, 'arpack', 'lobpcg']:
u, s, vh = svds(x, 1, solver=solver)
u2, s2, vh2 = svds(x, 1, v0=u[:, 0], solver=solver)
assert_allclose(s, s2, atol=np.sqrt(1e-15))
def _check_svds(A, k, U, s, VH):
n, m = A.shape
# Check shapes.
assert_equal(U.shape, (n, k))
assert_equal(s.shape, (k,))
assert_equal(VH.shape, (k, m))
# Check that the original matrix can be reconstituted.
A_rebuilt = (U*s).dot(VH)
assert_equal(A_rebuilt.shape, A.shape)
assert_allclose(A_rebuilt, A)
# Check that U is a semi-orthogonal matrix.
UH_U = np.dot(U.T.conj(), U)
assert_equal(UH_U.shape, (k, k))
assert_allclose(UH_U, np.identity(k), atol=1e-12)
# Check that V is a semi-orthogonal matrix.
VH_V = np.dot(VH, VH.T.conj())
assert_equal(VH_V.shape, (k, k))
assert_allclose(VH_V, np.identity(k), atol=1e-12)
def test_svd_LM_ones_matrix():
# Check that svds can deal with matrix_rank less than k in LM mode.
k = 3
for n, m in (6, 5), (5, 5), (5, 6):
for t in float, complex:
A = np.ones((n, m), dtype=t)
for solver in [None, 'arpack', 'lobpcg']:
U, s, VH = svds(A, k, solver=solver)
# Check some generic properties of svd.
_check_svds(A, k, U, s, VH)
# Check that the largest singular value is near sqrt(n*m)
# and the other singular values have been forced to zero.
assert_allclose(np.max(s), np.sqrt(n*m))
assert_array_equal(sorted(s)[:-1], 0)
def test_svd_LM_zeros_matrix():
# Check that svds can deal with matrices containing only zeros.
k = 1
for n, m in (3, 4), (4, 4), (4, 3):
for t in float, complex:
A = np.zeros((n, m), dtype=t)
for solver in [None, 'arpack', 'lobpcg']:
U, s, VH = svds(A, k, solver=solver)
# Check some generic properties of svd.
_check_svds(A, k, U, s, VH)
# Check that the singular values are zero.
assert_array_equal(s, 0)
def test_svd_LM_zeros_matrix_gh_3452():
# Regression test for a github issue.
# https://github.com/scipy/scipy/issues/3452
# Note that for complex dype the size of this matrix is too small for k=1.
n, m, k = 4, 2, 1
A = np.zeros((n, m))
for solver in [None, 'arpack', 'lobpcg']:
U, s, VH = svds(A, k, solver=solver)
# Check some generic properties of svd.
_check_svds(A, k, U, s, VH)
# Check that the singular values are zero.
assert_array_equal(s, 0)
class CheckingLinearOperator(LinearOperator):
def __init__(self, A):
self.A = A
self.dtype = A.dtype
self.shape = A.shape
def _matvec(self, x):
assert_equal(max(x.shape), np.size(x))
return self.A.dot(x)
def _rmatvec(self, x):
assert_equal(max(x.shape), np.size(x))
return self.A.T.conjugate().dot(x)
def test_svd_linop():
nmks = [(6, 7, 3),
(9, 5, 4),
(10, 8, 5)]
def reorder(args):
U, s, VH = args
j = np.argsort(s)
return U[:, j], s[j], VH[j, :]
for n, m, k in nmks:
# Test svds on a LinearOperator.
A = np.random.RandomState(52).randn(n, m)
L = CheckingLinearOperator(A)
v0 = np.ones(min(A.shape))
for solver in [None, 'arpack', 'lobpcg']:
U1, s1, VH1 = reorder(svds(A, k, v0=v0, solver=solver))
U2, s2, VH2 = reorder(svds(L, k, v0=v0, solver=solver))
assert_allclose(np.abs(U1), np.abs(U2))
assert_allclose(s1, s2)
assert_allclose(np.abs(VH1), np.abs(VH2))
assert_allclose(np.dot(U1, np.dot(np.diag(s1), VH1)),
np.dot(U2, np.dot(np.diag(s2), VH2)))
# Try again with which="SM".
A = np.random.RandomState(1909).randn(n, m)
L = CheckingLinearOperator(A)
for solver in [None, 'arpack', 'lobpcg']:
U1, s1, VH1 = reorder(svds(A, k, which="SM", solver=solver))
U2, s2, VH2 = reorder(svds(L, k, which="SM", solver=solver))
assert_allclose(np.abs(U1), np.abs(U2))
assert_allclose(s1, s2)
assert_allclose(np.abs(VH1), np.abs(VH2))
assert_allclose(np.dot(U1, np.dot(np.diag(s1), VH1)),
np.dot(U2, np.dot(np.diag(s2), VH2)))
if k < min(n, m) - 1:
# Complex input and explicit which="LM".
for (dt, eps) in [(complex, 1e-7), (np.complex64, 1e-3)]:
rng = np.random.RandomState(1648)
A = (rng.randn(n, m) + 1j * rng.randn(n, m)).astype(dt)
L = CheckingLinearOperator(A)
for solver in [None, 'arpack', 'lobpcg']:
U1, s1, VH1 = reorder(svds(A, k, which="LM", solver=solver))
U2, s2, VH2 = reorder(svds(L, k, which="LM", solver=solver))
assert_allclose(np.abs(U1), np.abs(U2), rtol=eps)
assert_allclose(s1, s2, rtol=eps)
assert_allclose(np.abs(VH1), np.abs(VH2), rtol=eps)
assert_allclose(np.dot(U1, np.dot(np.diag(s1), VH1)),
np.dot(U2, np.dot(np.diag(s2), VH2)),
rtol=eps)
@pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy")
def test_linearoperator_deallocation():
# Check that the linear operators used by the Arpack wrappers are
# deallocatable by reference counting -- they are big objects, so
# Python's cyclic GC may not collect them fast enough before
# running out of memory if eigs/eigsh are called in a tight loop.
M_d = np.eye(10)
M_s = csc_matrix(M_d)
M_o = aslinearoperator(M_d)
with assert_deallocated(lambda: arpack.SpLuInv(M_s)):
pass
with assert_deallocated(lambda: arpack.LuInv(M_d)):
pass
with assert_deallocated(lambda: arpack.IterInv(M_s)):
pass
with assert_deallocated(lambda: arpack.IterOpInv(M_o, None, 0.3)):
pass
with assert_deallocated(lambda: arpack.IterOpInv(M_o, M_o, 0.3)):
pass
def test_svds_partial_return():
x = np.array([[1, 2, 3],
[3, 4, 3],
[1, 0, 2],
[0, 0, 1]], float)
# test vertical matrix
z = csr_matrix(x)
vh_full = svds(z, 2)[-1]
vh_partial = svds(z, 2, return_singular_vectors='vh')[-1]
dvh = np.linalg.norm(np.abs(vh_full) - np.abs(vh_partial))
if dvh > 1e-10:
raise AssertionError('right eigenvector matrices differ when using return_singular_vectors parameter')
if svds(z, 2, return_singular_vectors='vh')[0] is not None:
raise AssertionError('left eigenvector matrix was computed when it should not have been')
# test horizontal matrix
z = csr_matrix(x.T)
u_full = svds(z, 2)[0]
u_partial = svds(z, 2, return_singular_vectors='vh')[0]
du = np.linalg.norm(np.abs(u_full) - np.abs(u_partial))
if du > 1e-10:
raise AssertionError('left eigenvector matrices differ when using return_singular_vectors parameter')
if svds(z, 2, return_singular_vectors='u')[-1] is not None:
raise AssertionError('right eigenvector matrix was computed when it should not have been')
def test_svds_wrong_eigen_type():
# Regression test for a github issue.
# https://github.com/scipy/scipy/issues/4590
# Function was not checking for eigenvalue type and unintended
# values could be returned.
x = np.array([[1, 2, 3],
[3, 4, 3],
[1, 0, 2],
[0, 0, 1]], float)
assert_raises(ValueError, svds, x, 1, which='LA')
def test_parallel_threads():
results = []
v0 = np.random.rand(50)
def worker():
x = diags([1, -2, 1], [-1, 0, 1], shape=(50, 50))
w, v = eigs(x, k=3, v0=v0)
results.append(w)
w, v = eigsh(x, k=3, v0=v0)
results.append(w)
threads = [threading.Thread(target=worker) for k in range(10)]
for t in threads:
t.start()
for t in threads:
t.join()
worker()
for r in results:
assert_allclose(r, results[-1])
def test_reentering():
# Just some linear operator that calls eigs recursively
def A_matvec(x):
x = diags([1, -2, 1], [-1, 0, 1], shape=(50, 50))
w, v = eigs(x, k=1)
return v / w[0]
A = LinearOperator(matvec=A_matvec, dtype=float, shape=(50, 50))
# The Fortran code is not reentrant, so this fails (gracefully, not crashing)
assert_raises(RuntimeError, eigs, A, k=1)
assert_raises(RuntimeError, eigsh, A, k=1)
def test_regression_arpackng_1315():
# Check that issue arpack-ng/#1315 is not present.
# Adapted from arpack-ng/TESTS/bug_1315_single.c
# If this fails, then the installed ARPACK library is faulty.
for dtype in [np.float32, np.float64]:
np.random.seed(1234)
w0 = np.arange(1, 1000+1).astype(dtype)
A = diags([w0], [0], shape=(1000, 1000))
v0 = np.random.rand(1000).astype(dtype)
w, v = eigs(A, k=9, ncv=2*9+1, which="LM", v0=v0)
assert_allclose(np.sort(w), np.sort(w0[-9:]),
rtol=1e-4)
def test_eigs_for_k_greater():
# Test eigs() for k beyond limits.
A_sparse = diags([1, -2, 1], [-1, 0, 1], shape=(4, 4)) # sparse
A = generate_matrix(4, sparse=False)
M_dense = np.random.random((4, 4))
M_sparse = generate_matrix(4, sparse=True)
M_linop = aslinearoperator(M_dense)
eig_tuple1 = eig(A, b=M_dense)
eig_tuple2 = eig(A, b=M_sparse)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning)
assert_equal(eigs(A, M=M_dense, k=3), eig_tuple1)
assert_equal(eigs(A, M=M_dense, k=4), eig_tuple1)
assert_equal(eigs(A, M=M_dense, k=5), eig_tuple1)
assert_equal(eigs(A, M=M_sparse, k=5), eig_tuple2)
# M as LinearOperator
assert_raises(TypeError, eigs, A, M=M_linop, k=3)
# Test 'A' for different types
assert_raises(TypeError, eigs, aslinearoperator(A), k=3)
assert_raises(TypeError, eigs, A_sparse, k=3)
def test_eigsh_for_k_greater():
# Test eigsh() for k beyond limits.
A_sparse = diags([1, -2, 1], [-1, 0, 1], shape=(4, 4)) # sparse
A = generate_matrix(4, sparse=False)
M_dense = generate_matrix_symmetric(4, pos_definite=True)
M_sparse = generate_matrix_symmetric(4, pos_definite=True, sparse=True)
M_linop = aslinearoperator(M_dense)
eig_tuple1 = eigh(A, b=M_dense)
eig_tuple2 = eigh(A, b=M_sparse)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning)
assert_equal(eigsh(A, M=M_dense, k=4), eig_tuple1)
assert_equal(eigsh(A, M=M_dense, k=5), eig_tuple1)
assert_equal(eigsh(A, M=M_sparse, k=5), eig_tuple2)
# M as LinearOperator
assert_raises(TypeError, eigsh, A, M=M_linop, k=4)
# Test 'A' for different types
assert_raises(TypeError, eigsh, aslinearoperator(A), k=4)
assert_raises(TypeError, eigsh, A_sparse, M=M_dense, k=4)
def test_real_eigs_real_k_subset():
np.random.seed(1)
n = 10
A = rand(n, n, density=0.5)
A.data *= 2
A.data -= 1
v0 = np.ones(n)
whichs = ['LM', 'SM', 'LR', 'SR', 'LI', 'SI']
dtypes = [np.float32, np.float64]
for which, sigma, dtype in itertools.product(whichs, [None, 0, 5], dtypes):
prev_w = np.array([], dtype=dtype)
eps = np.finfo(dtype).eps
for k in range(1, 9):
w, z = eigs(A.astype(dtype), k=k, which=which, sigma=sigma,
v0=v0.astype(dtype), tol=0)
assert_allclose(np.linalg.norm(A.dot(z) - z * w), 0, atol=np.sqrt(eps))
# Check that the set of eigenvalues for `k` is a subset of that for `k+1`
dist = abs(prev_w[:,None] - w).min(axis=1)
assert_allclose(dist, 0, atol=np.sqrt(eps))
prev_w = w
# Check sort order
if sigma is None:
d = w
else:
d = 1 / (w - sigma)
if which == 'LM':
# ARPACK is systematic for 'LM', but sort order
# appears not well defined for other modes
assert np.all(np.diff(abs(d)) <= 1e-6)
|
the-stack_0_1042 | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nitro.resource.base.base_resource import base_resource
from nitro.resource.base.base_resource import base_response
from nitro.service.options import options
from nitro.exception.nitro_exception import nitro_exception
from nitro.util.nitro_util import nitro_util
class locationparameter(base_resource) :
"""Configuration for location parameter resource."""
def __init__(self) :
self._context = ""
self._q1label = ""
self._q2label = ""
self._q3label = ""
self._q4label = ""
self._q5label = ""
self._q6label = ""
self._Locationfile = ""
self._format = ""
self._custom = 0
self._Static = 0
self._lines = 0
self._errors = 0
self._warnings = 0
self._entries = 0
self._locationfile6 = ""
self._format6 = ""
self._custom6 = 0
self._static6 = 0
self._lines6 = 0
self._errors6 = 0
self._warnings6 = 0
self._entries6 = 0
self._flags = 0
self._status = 0
self._databasemode = ""
self._flushing = ""
self._loading = ""
@property
def context(self) :
"""Context for describing locations. In geographic context, qualifier labels are assigned by default in the following sequence: Continent.Country.Region.City.ISP.Organization. In custom context, the qualifiers labels can have any meaning that you designate.<br/>Possible values = geographic, custom."""
try :
return self._context
except Exception as e:
raise e
@context.setter
def context(self, context) :
"""Context for describing locations. In geographic context, qualifier labels are assigned by default in the following sequence: Continent.Country.Region.City.ISP.Organization. In custom context, the qualifiers labels can have any meaning that you designate.<br/>Possible values = geographic, custom
:param context:
"""
try :
self._context = context
except Exception as e:
raise e
@property
def q1label(self) :
"""Label specifying the meaning of the first qualifier. Can be specified for custom context only.<br/>Minimum length = 1."""
try :
return self._q1label
except Exception as e:
raise e
@q1label.setter
def q1label(self, q1label) :
"""Label specifying the meaning of the first qualifier. Can be specified for custom context only.<br/>Minimum length = 1
:param q1label:
"""
try :
self._q1label = q1label
except Exception as e:
raise e
@property
def q2label(self) :
"""Label specifying the meaning of the second qualifier. Can be specified for custom context only.<br/>Minimum length = 1."""
try :
return self._q2label
except Exception as e:
raise e
@q2label.setter
def q2label(self, q2label) :
"""Label specifying the meaning of the second qualifier. Can be specified for custom context only.<br/>Minimum length = 1
:param q2label:
"""
try :
self._q2label = q2label
except Exception as e:
raise e
@property
def q3label(self) :
"""Label specifying the meaning of the third qualifier. Can be specified for custom context only.<br/>Minimum length = 1."""
try :
return self._q3label
except Exception as e:
raise e
@q3label.setter
def q3label(self, q3label) :
"""Label specifying the meaning of the third qualifier. Can be specified for custom context only.<br/>Minimum length = 1
:param q3label:
"""
try :
self._q3label = q3label
except Exception as e:
raise e
@property
def q4label(self) :
"""Label specifying the meaning of the fourth qualifier. Can be specified for custom context only.<br/>Minimum length = 1."""
try :
return self._q4label
except Exception as e:
raise e
@q4label.setter
def q4label(self, q4label) :
"""Label specifying the meaning of the fourth qualifier. Can be specified for custom context only.<br/>Minimum length = 1
:param q4label:
"""
try :
self._q4label = q4label
except Exception as e:
raise e
@property
def q5label(self) :
"""Label specifying the meaning of the fifth qualifier. Can be specified for custom context only.<br/>Minimum length = 1."""
try :
return self._q5label
except Exception as e:
raise e
@q5label.setter
def q5label(self, q5label) :
"""Label specifying the meaning of the fifth qualifier. Can be specified for custom context only.<br/>Minimum length = 1
:param q5label:
"""
try :
self._q5label = q5label
except Exception as e:
raise e
@property
def q6label(self) :
"""Label specifying the meaning of the sixth qualifier. Can be specified for custom context only.<br/>Minimum length = 1."""
try :
return self._q6label
except Exception as e:
raise e
@q6label.setter
def q6label(self, q6label) :
"""Label specifying the meaning of the sixth qualifier. Can be specified for custom context only.<br/>Minimum length = 1
:param q6label:
"""
try :
self._q6label = q6label
except Exception as e:
raise e
@property
def Locationfile(self) :
"""Currently loaded location database file."""
try :
return self._Locationfile
except Exception as e:
raise e
@property
def format(self) :
"""Location file format.<br/>Possible values = netscaler, ip-country, ip-country-isp, ip-country-region-city, ip-country-region-city-isp, geoip-country, geoip-region, geoip-city, geoip-country-org, geoip-country-isp, geoip-city-isp-org."""
try :
return self._format
except Exception as e:
raise e
@property
def custom(self) :
"""Number of configured custom locations."""
try :
return self._custom
except Exception as e:
raise e
@property
def Static(self) :
"""Number of configured locations in the database file (static locations)."""
try :
return self._Static
except Exception as e:
raise e
@property
def lines(self) :
"""Number of lines in the databse files."""
try :
return self._lines
except Exception as e:
raise e
@property
def errors(self) :
"""Number of errros encountered while reading the database file."""
try :
return self._errors
except Exception as e:
raise e
@property
def warnings(self) :
"""Number of warnings encountered while reading the database file."""
try :
return self._warnings
except Exception as e:
raise e
@property
def entries(self) :
"""Number of successfully added entries."""
try :
return self._entries
except Exception as e:
raise e
@property
def locationfile6(self) :
"""Currently loaded location database file."""
try :
return self._locationfile6
except Exception as e:
raise e
@property
def format6(self) :
"""Location file format.<br/>Possible values = netscaler6, geoip-country6."""
try :
return self._format6
except Exception as e:
raise e
@property
def custom6(self) :
"""Number of configured custom locations."""
try :
return self._custom6
except Exception as e:
raise e
@property
def static6(self) :
"""Number of configured locations in the database file (static locations)."""
try :
return self._static6
except Exception as e:
raise e
@property
def lines6(self) :
"""Number of lines in the databse files."""
try :
return self._lines6
except Exception as e:
raise e
@property
def errors6(self) :
"""Number of errros encountered while reading the database file."""
try :
return self._errors6
except Exception as e:
raise e
@property
def warnings6(self) :
"""Number of warnings encountered while reading the database file."""
try :
return self._warnings6
except Exception as e:
raise e
@property
def entries6(self) :
"""Number of successfully added entries."""
try :
return self._entries6
except Exception as e:
raise e
@property
def flags(self) :
"""Information needed for display. This argument passes information from the kernel to the user space."""
try :
return self._flags
except Exception as e:
raise e
@property
def status(self) :
"""This argument displays when the status (success or failure) of database loading."""
try :
return self._status
except Exception as e:
raise e
@property
def databasemode(self) :
"""This argument displays the database mode.<br/>Possible values = File, Internal, Not applicable."""
try :
return self._databasemode
except Exception as e:
raise e
@property
def flushing(self) :
"""This argument displays the state of flushing.<br/>Possible values = In progress, Idle."""
try :
return self._flushing
except Exception as e:
raise e
@property
def loading(self) :
"""This argument displays the state of loading.<br/>Possible values = In progress, Idle."""
try :
return self._loading
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
"""converts nitro response into object and returns the object array in case of get request.
:param service:
:param response:
"""
try :
result = service.payload_formatter.string_to_resource(locationparameter_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.locationparameter
except Exception as e :
raise e
def _get_object_name(self) :
"""Returns the value of object identifier argument"""
try :
return 0
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
"""Use this API to update locationparameter.
:param client:
:param resource:
"""
try :
if type(resource) is not list :
updateresource = locationparameter()
updateresource.context = resource.context
updateresource.q1label = resource.q1label
updateresource.q2label = resource.q2label
updateresource.q3label = resource.q3label
updateresource.q4label = resource.q4label
updateresource.q5label = resource.q5label
updateresource.q6label = resource.q6label
return updateresource.update_resource(client)
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
"""Use this API to unset the properties of locationparameter resource.
Properties that need to be unset are specified in args array.
:param client:
:param resource:
:param args:
"""
try :
if type(resource) is not list :
unsetresource = locationparameter()
return unsetresource.unset_resource(client, args)
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
"""Use this API to fetch all the locationparameter resources that are configured on netscaler.
:param client:
:param name: (Default value = "")
:param option_: (Default value = "")
"""
try :
if not name :
obj = locationparameter()
response = obj.get_resources(client, option_)
return response
except Exception as e :
raise e
class Loading:
""" """
In_progress = "In progress"
Idle = "Idle"
class Databasemode:
""" """
File = "File"
Internal = "Internal"
Not_applicable = "Not applicable"
class Format:
""" """
netscaler = "netscaler"
ip_country = "ip-country"
ip_country_isp = "ip-country-isp"
ip_country_region_city = "ip-country-region-city"
ip_country_region_city_isp = "ip-country-region-city-isp"
geoip_country = "geoip-country"
geoip_region = "geoip-region"
geoip_city = "geoip-city"
geoip_country_org = "geoip-country-org"
geoip_country_isp = "geoip-country-isp"
geoip_city_isp_org = "geoip-city-isp-org"
class Context:
""" """
geographic = "geographic"
custom = "custom"
class Flushing:
""" """
In_progress = "In progress"
Idle = "Idle"
class Format6:
""" """
netscaler6 = "netscaler6"
geoip_country6 = "geoip-country6"
class locationparameter_response(base_response) :
""" """
def __init__(self, length=1) :
self.locationparameter = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.locationparameter = [locationparameter() for _ in range(length)]
|
the-stack_0_1044 | # -*- coding: utf-8 -*-
'''
Outputter for displaying results of state runs
==============================================
The return data from the Highstate command is a standard data structure
which is parsed by the highstate outputter to deliver a clean and readable
set of information about the HighState run on minions.
Two configurations can be set to modify the highstate outputter. These values
can be set in the master config to change the output of the ``salt`` command or
set in the minion config to change the output of the ``salt-call`` command.
state_verbose:
By default `state_verbose` is set to `True`, setting this to `False` will
instruct the highstate outputter to omit displaying anything in green, this
means that nothing with a result of True and no changes will not be printed
state_output:
The highstate outputter has six output modes, ``full``, ``terse``,
``mixed``, ``mixed_id``, ``changes`` and ``filter``.
* The default is set to ``full``, which will display many lines of detailed
information for each executed chunk.
* If ``terse`` is used, then the output is greatly simplified and shown in
only one line.
* If ``mixed`` is used, then terse output will be used unless a state
failed, in which case full output will be used.
* If ``mixed_id`` is used, then the mixed form will be used, but the value for ``name``
will be drawn from the state ID. This is useful for cases where the name
value might be very long and hard to read.
* If ``changes`` is used, then terse output will be used if there was no
error and no changes, otherwise full output will be used.
* If ``filter`` is used, then either or both of two different filters can be
used: ``exclude`` or ``terse``.
* for ``exclude``, state.highstate expects a list of states to be excluded
(or ``None``)
followed by ``True`` for terse output or ``False`` for regular output.
Because of parsing nuances, if only one of these is used, it must still
contain a comma. For instance: `exclude=True,`.
* for ``terse``, state.highstate expects simply ``True`` or ``False``.
These can be set as such from the command line, or in the Salt config as
`state_output_exclude` or `state_output_terse`, respectively.
state_tabular:
If `state_output` uses the terse output, set this to `True` for an aligned
output format. If you wish to use a custom format, this can be set to a
string.
Example usage:
If ``state_output: filter`` is set in the configuration file:
.. code-block:: bash
salt '*' state.highstate exclude=None,True
means to exclude no states from the highstate and turn on terse output.
.. code-block:: bash
salt twd state.highstate exclude=problemstate1,problemstate2,False
means to exclude states ``problemstate1`` and ``problemstate2``
from the highstate, and use regular output.
Example output for the above highstate call when ``top.sls`` defines only
one other state to apply to minion ``twd``:
.. code-block:: text
twd:
Summary for twd
------------
Succeeded: 1 (changed=1)
Failed: 0
------------
Total states run: 1
Example output with no special settings in configuration files:
.. code-block:: text
myminion:
----------
ID: test.ping
Function: module.run
Result: True
Comment: Module function test.ping executed
Changes:
----------
ret:
True
Summary for myminion
------------
Succeeded: 1
Failed: 0
------------
Total: 0
'''
# Import python libs
from __future__ import absolute_import
import pprint
import textwrap
# Import salt libs
import salt.utils
import salt.output
from salt.utils.locales import sdecode
# Import 3rd-party libs
import salt.ext.six as six
import logging
log = logging.getLogger(__name__)
def output(data, **kwargs): # pylint: disable=unused-argument
'''
The HighState Outputter is only meant to be used with the state.highstate
function, or a function that returns highstate return data.
'''
# Discard retcode in dictionary as present in orchestrate data
local_masters = [key for key in data.keys() if key.endswith('.local_master')]
orchestrator_output = 'retcode' in data.keys() and len(local_masters) == 1
if orchestrator_output:
del data['retcode']
# If additional information is passed through via the "data" dictionary to
# the highstate outputter, such as "outputter" or "retcode", discard it.
# We only want the state data that was passed through, if it is wrapped up
# in the "data" key, as the orchestrate runner does. See Issue #31330,
# pull request #27838, and pull request #27175 for more information.
if 'data' in data:
data = data.pop('data')
ret = [
_format_host(host, hostdata)[0]
for host, hostdata in six.iteritems(data)
]
if ret:
return "\n".join(ret)
log.error(
'Data passed to highstate outputter is not a valid highstate return: %s',
data
)
# We should not reach here, but if we do return empty string
return ''
def _format_host(host, data):
host = sdecode(host)
colors = salt.utils.get_colors(
__opts__.get('color'),
__opts__.get('color_theme'))
tabular = __opts__.get('state_tabular', False)
rcounts = {}
rdurations = []
hcolor = colors['GREEN']
hstrs = []
nchanges = 0
strip_colors = __opts__.get('strip_colors', True)
if isinstance(data, int) or isinstance(data, str):
# Data in this format is from saltmod.function,
# so it is always a 'change'
nchanges = 1
hstrs.append((u'{0} {1}{2[ENDC]}'
.format(hcolor, data, colors)))
hcolor = colors['CYAN'] # Print the minion name in cyan
if isinstance(data, list):
# Errors have been detected, list them in RED!
hcolor = colors['LIGHT_RED']
hstrs.append((u' {0}Data failed to compile:{1[ENDC]}'
.format(hcolor, colors)))
for err in data:
if strip_colors:
err = salt.output.strip_esc_sequence(sdecode(err))
hstrs.append((u'{0}----------\n {1}{2[ENDC]}'
.format(hcolor, err, colors)))
if isinstance(data, dict):
# Verify that the needed data is present
data_tmp = {}
for tname, info in six.iteritems(data):
if isinstance(info, dict) and tname is not 'changes' and info and '__run_num__' not in info:
err = (u'The State execution failed to record the order '
'in which all states were executed. The state '
'return missing data is:')
hstrs.insert(0, pprint.pformat(info))
hstrs.insert(0, err)
if isinstance(info, dict) and 'result' in info:
data_tmp[tname] = info
data = data_tmp
# Everything rendered as it should display the output
for tname in sorted(
data,
key=lambda k: data[k].get('__run_num__', 0)):
ret = data[tname]
# Increment result counts
rcounts.setdefault(ret['result'], 0)
rcounts[ret['result']] += 1
rduration = ret.get('duration', 0)
try:
float(rduration)
rdurations.append(rduration)
except ValueError:
rduration, _, _ = rduration.partition(' ms')
try:
float(rduration)
rdurations.append(rduration)
except ValueError:
log.error('Cannot parse a float from duration {0}'
.format(ret.get('duration', 0)))
tcolor = colors['GREEN']
orchestration = ret.get('__orchestration__', False)
schanged, ctext = _format_changes(ret['changes'], orchestration)
nchanges += 1 if schanged else 0
# Skip this state if it was successful & diff output was requested
if __opts__.get('state_output_diff', False) and \
ret['result'] and not schanged:
continue
# Skip this state if state_verbose is False, the result is True and
# there were no changes made
if not __opts__.get('state_verbose', False) and \
ret['result'] and not schanged:
continue
if schanged:
tcolor = colors['CYAN']
if ret['result'] is False:
hcolor = colors['RED']
tcolor = colors['RED']
if ret['result'] is None:
hcolor = colors['LIGHT_YELLOW']
tcolor = colors['LIGHT_YELLOW']
comps = [sdecode(comp) for comp in tname.split('_|-')]
if __opts__.get('state_output', 'full').lower() == 'filter':
# By default, full data is shown for all types. However, return
# data may be excluded by setting state_output_exclude to a
# comma-separated list of True, False or None, or including the
# same list with the exclude option on the command line. For
# now, this option must include a comma. For example:
# exclude=True,
# The same functionality is also available for making return
# data terse, instead of excluding it.
cliargs = __opts__.get('arg', [])
clikwargs = {}
for item in cliargs:
if isinstance(item, dict) and '__kwarg__' in item:
clikwargs = item.copy()
exclude = clikwargs.get(
'exclude', __opts__.get('state_output_exclude', [])
)
if isinstance(exclude, six.string_types):
exclude = str(exclude).split(',')
terse = clikwargs.get(
'terse', __opts__.get('state_output_terse', [])
)
if isinstance(terse, six.string_types):
terse = str(terse).split(',')
if str(ret['result']) in terse:
msg = _format_terse(tcolor, comps, ret, colors, tabular)
hstrs.append(msg)
continue
if str(ret['result']) in exclude:
continue
elif __opts__.get('state_output', 'full').lower() == 'terse':
# Print this chunk in a terse way and continue in the
# loop
msg = _format_terse(tcolor, comps, ret, colors, tabular)
hstrs.append(msg)
continue
elif __opts__.get('state_output', 'full').lower().startswith('mixed'):
if __opts__['state_output'] == 'mixed_id':
# Swap in the ID for the name. Refs #35137
comps[2] = comps[1]
# Print terse unless it failed
if ret['result'] is not False:
msg = _format_terse(tcolor, comps, ret, colors, tabular)
hstrs.append(msg)
continue
elif __opts__.get('state_output', 'full').lower() == 'changes':
# Print terse if no error and no changes, otherwise, be
# verbose
if ret['result'] and not schanged:
msg = _format_terse(tcolor, comps, ret, colors, tabular)
hstrs.append(msg)
continue
state_lines = [
u'{tcolor}----------{colors[ENDC]}',
u' {tcolor} ID: {comps[1]}{colors[ENDC]}',
u' {tcolor}Function: {comps[0]}.{comps[3]}{colors[ENDC]}',
u' {tcolor} Result: {ret[result]!s}{colors[ENDC]}',
u' {tcolor} Comment: {comment}{colors[ENDC]}',
]
if __opts__.get('state_output_profile', True) and 'start_time' in ret:
state_lines.extend([
u' {tcolor} Started: {ret[start_time]!s}{colors[ENDC]}',
u' {tcolor}Duration: {ret[duration]!s}{colors[ENDC]}',
])
# This isn't the prettiest way of doing this, but it's readable.
if comps[1] != comps[2]:
state_lines.insert(
3, u' {tcolor} Name: {comps[2]}{colors[ENDC]}')
# be sure that ret['comment'] is utf-8 friendly
try:
if not isinstance(ret['comment'], six.text_type):
ret['comment'] = str(ret['comment']).decode('utf-8')
except UnicodeDecodeError:
# but try to continue on errors
pass
try:
comment = sdecode(ret['comment'])
comment = comment.strip().replace(
u'\n',
u'\n' + u' ' * 14)
except AttributeError: # Assume comment is a list
try:
comment = ret['comment'].join(' ').replace(
u'\n',
u'\n' + u' ' * 13)
except AttributeError:
# Comment isn't a list either, just convert to string
comment = str(ret['comment'])
comment = comment.strip().replace(
u'\n',
u'\n' + u' ' * 14)
# If there is a data attribute, append it to the comment
if 'data' in ret:
if isinstance(ret['data'], list):
for item in ret['data']:
comment = '{0} {1}'.format(comment, item)
elif isinstance(ret['data'], dict):
for key, value in ret['data'].items():
comment = '{0}\n\t\t{1}: {2}'.format(comment, key, value)
else:
comment = '{0} {1}'.format(comment, ret['data'])
for detail in ['start_time', 'duration']:
ret.setdefault(detail, u'')
if ret['duration'] != '':
ret['duration'] = u'{0} ms'.format(ret['duration'])
svars = {
'tcolor': tcolor,
'comps': comps,
'ret': ret,
'comment': sdecode(comment),
# This nukes any trailing \n and indents the others.
'colors': colors
}
hstrs.extend([sline.format(**svars) for sline in state_lines])
changes = u' Changes: ' + ctext
hstrs.append((u'{0}{1}{2[ENDC]}'
.format(tcolor, changes, colors)))
if 'warnings' in ret:
rcounts.setdefault('warnings', 0)
rcounts['warnings'] += 1
wrapper = textwrap.TextWrapper(
width=80,
initial_indent=u' ' * 14,
subsequent_indent=u' ' * 14
)
hstrs.append(
u' {colors[LIGHT_RED]} Warnings: {0}{colors[ENDC]}'.format(
wrapper.fill('\n'.join(ret['warnings'])).lstrip(),
colors=colors
)
)
# Append result counts to end of output
colorfmt = u'{0}{1}{2[ENDC]}'
rlabel = {True: u'Succeeded', False: u'Failed', None: u'Not Run', 'warnings': u'Warnings'}
count_max_len = max([len(str(x)) for x in six.itervalues(rcounts)] or [0])
label_max_len = max([len(x) for x in six.itervalues(rlabel)] or [0])
line_max_len = label_max_len + count_max_len + 2 # +2 for ': '
hstrs.append(
colorfmt.format(
colors['CYAN'],
u'\nSummary for {0}\n{1}'.format(host, '-' * line_max_len),
colors
)
)
def _counts(label, count):
return u'{0}: {1:>{2}}'.format(
label,
count,
line_max_len - (len(label) + 2)
)
# Successful states
changestats = []
if None in rcounts and rcounts.get(None, 0) > 0:
# test=True states
changestats.append(
colorfmt.format(
colors['LIGHT_YELLOW'],
u'unchanged={0}'.format(rcounts.get(None, 0)),
colors
)
)
if nchanges > 0:
changestats.append(
colorfmt.format(
colors['GREEN'],
u'changed={0}'.format(nchanges),
colors
)
)
if changestats:
changestats = u' ({0})'.format(', '.join(changestats))
else:
changestats = u''
hstrs.append(
colorfmt.format(
colors['GREEN'],
_counts(
rlabel[True],
rcounts.get(True, 0) + rcounts.get(None, 0)
),
colors
) + changestats
)
# Failed states
num_failed = rcounts.get(False, 0)
hstrs.append(
colorfmt.format(
colors['RED'] if num_failed else colors['CYAN'],
_counts(rlabel[False], num_failed),
colors
)
)
num_warnings = rcounts.get('warnings', 0)
if num_warnings:
hstrs.append(
colorfmt.format(
colors['LIGHT_RED'],
_counts(rlabel['warnings'], num_warnings),
colors
)
)
totals = u'{0}\nTotal states run: {1:>{2}}'.format('-' * line_max_len,
sum(six.itervalues(rcounts)) - rcounts.get('warnings', 0),
line_max_len - 7)
hstrs.append(colorfmt.format(colors['CYAN'], totals, colors))
if __opts__.get('state_output_profile', True):
sum_duration = sum(rdurations)
duration_unit = 'ms'
# convert to seconds if duration is 1000ms or more
if sum_duration > 999:
sum_duration /= 1000
duration_unit = 's'
total_duration = u'Total run time: {0} {1}'.format(
'{0:.3f}'.format(sum_duration).rjust(line_max_len - 5),
duration_unit)
hstrs.append(colorfmt.format(colors['CYAN'], total_duration, colors))
if strip_colors:
host = salt.output.strip_esc_sequence(host)
hstrs.insert(0, (u'{0}{1}:{2[ENDC]}'.format(hcolor, host, colors)))
return u'\n'.join(hstrs), nchanges > 0
def _nested_changes(changes):
'''
Print the changes data using the nested outputter
'''
global __opts__ # pylint: disable=W0601
opts = __opts__.copy()
# Pass the __opts__ dict. The loader will splat this modules __opts__ dict
# anyway so have to restore it after the other outputter is done
if __opts__['color']:
__opts__['color'] = u'CYAN'
ret = u'\n'
ret += salt.output.out_format(
changes,
'nested',
__opts__,
nested_indent=14)
__opts__ = opts
return ret
def _format_changes(changes, orchestration=False):
'''
Format the changes dict based on what the data is
'''
if not changes:
return False, u''
if orchestration:
return True, _nested_changes(changes)
if not isinstance(changes, dict):
return True, u'Invalid Changes data: {0}'.format(changes)
ret = changes.get('ret')
if ret is not None and changes.get('out') == 'highstate':
ctext = u''
changed = False
for host, hostdata in six.iteritems(ret):
s, c = _format_host(host, hostdata)
ctext += u'\n' + u'\n'.join((u' ' * 14 + l) for l in s.splitlines())
changed = changed or c
else:
changed = True
ctext = _nested_changes(changes)
return changed, ctext
def _format_terse(tcolor, comps, ret, colors, tabular):
'''
Terse formatting of a message.
'''
result = u'Clean'
if ret['changes']:
result = u'Changed'
if ret['result'] is False:
result = u'Failed'
elif ret['result'] is None:
result = u'Differs'
if tabular is True:
fmt_string = ''
if 'warnings' in ret:
fmt_string += u'{c[LIGHT_RED]}Warnings:\n{w}{c[ENDC]}\n'.format(
c=colors, w='\n'.join(ret['warnings'])
)
fmt_string += u'{0}'
if __opts__.get('state_output_profile', True) and 'start_time' in ret:
fmt_string += u'{6[start_time]!s} [{6[duration]!s:>7} ms] '
fmt_string += u'{2:>10}.{3:<10} {4:7} Name: {1}{5}'
elif isinstance(tabular, str):
fmt_string = tabular
else:
fmt_string = ''
if 'warnings' in ret:
fmt_string += u'{c[LIGHT_RED]}Warnings:\n{w}{c[ENDC]}'.format(
c=colors, w='\n'.join(ret['warnings'])
)
fmt_string += u' {0} Name: {1} - Function: {2}.{3} - Result: {4}'
if __opts__.get('state_output_profile', True) and 'start_time' in ret:
fmt_string += u' Started: - {6[start_time]!s} Duration: {6[duration]!s} ms'
fmt_string += u'{5}'
msg = fmt_string.format(tcolor,
comps[2],
comps[0],
comps[-1],
result,
colors['ENDC'],
ret)
return msg
|
the-stack_0_1046 | import logging
import os
from unittest.mock import create_autospec, patch
import boto3
import botocore
import botocore.client
import botocore.config
import pytest
import awswrangler as wr
from awswrangler._config import apply_configs
from awswrangler.s3._fs import open_s3_object
logging.getLogger("awswrangler").setLevel(logging.DEBUG)
def _urls_test(glue_database):
original = botocore.client.ClientCreator.create_client
def wrapper(self, **kwarg):
name = kwarg["service_name"]
url = kwarg["endpoint_url"]
if name == "sts":
assert url == wr.config.sts_endpoint_url
elif name == "athena":
assert url == wr.config.athena_endpoint_url
elif name == "s3":
assert url == wr.config.s3_endpoint_url
elif name == "glue":
assert url == wr.config.glue_endpoint_url
return original(self, **kwarg)
with patch("botocore.client.ClientCreator.create_client", new=wrapper):
wr.athena.read_sql_query(sql="SELECT 1 as col0", database=glue_database)
def test_basics(path, glue_database, glue_table, workgroup0, workgroup1):
args = {"table": glue_table, "path": "", "columns_types": {"col0": "bigint"}}
# Missing database argument
with pytest.raises(TypeError):
wr.catalog.create_parquet_table(**args)
# Configuring default database value
wr.config.database = glue_database
# Testing configured database
wr.catalog.create_parquet_table(**args)
# Configuring default database with wrong value
wr.config.database = "missing_database"
with pytest.raises(boto3.client("glue").exceptions.EntityNotFoundException):
wr.catalog.create_parquet_table(**args)
# Overwriting configured database
wr.catalog.create_parquet_table(database=glue_database, **args)
# Testing configured s3 block size
size = 1 * 2 ** 20 # 1 MB
wr.config.s3_block_size = size
with open_s3_object(path, mode="wb") as s3obj:
s3obj.write(b"foo")
with open_s3_object(path, mode="rb") as s3obj:
assert s3obj._s3_block_size == size
# Resetting all configs
wr.config.reset()
# Missing database argument
with pytest.raises(TypeError):
wr.catalog.does_table_exist(table=glue_table)
# Configuring default database value again
wr.config.database = glue_database
# Testing configured database again
assert wr.catalog.does_table_exist(table=glue_table) is True
# Resetting this specific config
wr.config.reset("database")
# Missing database argument
with pytest.raises(TypeError):
wr.catalog.does_table_exist(table=glue_table)
# exporting environment variable
os.environ["WR_DATABASE"] = glue_database
wr.config.reset("database")
assert wr.catalog.does_table_exist(table=glue_table) is True
del os.environ["WR_DATABASE"]
wr.config.reset("database")
# Missing database argument
with pytest.raises(TypeError):
wr.catalog.does_table_exist(table=glue_table)
assert wr.config.to_pandas().shape == (len(wr._config._CONFIG_ARGS), 7)
# Workgroup
wr.config.workgroup = workgroup0
df = wr.athena.read_sql_query(sql="SELECT 1 as col0", database=glue_database)
assert df.query_metadata["WorkGroup"] == workgroup0
os.environ["WR_WORKGROUP"] = workgroup1
wr.config.reset()
df = wr.athena.read_sql_query(sql="SELECT 1 as col0", database=glue_database)
assert df.query_metadata["WorkGroup"] == workgroup1
# Endpoints URLs
region = boto3.Session().region_name
wr.config.sts_endpoint_url = f"https://sts.{region}.amazonaws.com"
wr.config.s3_endpoint_url = f"https://s3.{region}.amazonaws.com"
wr.config.athena_endpoint_url = f"https://athena.{region}.amazonaws.com"
wr.config.glue_endpoint_url = f"https://glue.{region}.amazonaws.com"
_urls_test(glue_database)
os.environ["WR_STS_ENDPOINT_URL"] = f"https://sts.{region}.amazonaws.com"
os.environ["WR_S3_ENDPOINT_URL"] = f"https://s3.{region}.amazonaws.com"
os.environ["WR_ATHENA_ENDPOINT_URL"] = f"https://athena.{region}.amazonaws.com"
os.environ["WR_GLUE_ENDPOINT_URL"] = f"https://glue.{region}.amazonaws.com"
wr.config.reset()
_urls_test(glue_database)
def test_athena_cache_configuration():
wr.config.max_local_cache_entries = 20
assert wr.config.max_remote_cache_entries == 20
def test_botocore_config(path):
original = botocore.client.ClientCreator.create_client
# Default values for botocore.config.Config
expected_max_retries_attempt = 5
expected_connect_timeout = 10
expected_max_pool_connections = 10
expected_retry_mode = None
def wrapper(self, **kwarg):
assert kwarg["client_config"].retries["max_attempts"] == expected_max_retries_attempt
assert kwarg["client_config"].connect_timeout == expected_connect_timeout
assert kwarg["client_config"].max_pool_connections == expected_max_pool_connections
assert kwarg["client_config"].retries.get("mode") == expected_retry_mode
return original(self, **kwarg)
# Check for default values
with patch("botocore.client.ClientCreator.create_client", new=wrapper):
with open_s3_object(path, mode="wb") as s3obj:
s3obj.write(b"foo")
# Update default config with environment variables
expected_max_retries_attempt = 20
expected_connect_timeout = 10
expected_max_pool_connections = 10
expected_retry_mode = "adaptive"
os.environ["AWS_MAX_ATTEMPTS"] = str(expected_max_retries_attempt)
os.environ["AWS_RETRY_MODE"] = expected_retry_mode
with patch("botocore.client.ClientCreator.create_client", new=wrapper):
with open_s3_object(path, mode="wb") as s3obj:
s3obj.write(b"foo")
del os.environ["AWS_MAX_ATTEMPTS"]
del os.environ["AWS_RETRY_MODE"]
# Update botocore.config.Config
expected_max_retries_attempt = 30
expected_connect_timeout = 40
expected_max_pool_connections = 50
expected_retry_mode = "legacy"
botocore_config = botocore.config.Config(
retries={"max_attempts": expected_max_retries_attempt, "mode": expected_retry_mode},
connect_timeout=expected_connect_timeout,
max_pool_connections=expected_max_pool_connections,
)
wr.config.botocore_config = botocore_config
with patch("botocore.client.ClientCreator.create_client", new=wrapper):
with open_s3_object(path, mode="wb") as s3obj:
s3obj.write(b"foo")
wr.config.reset()
def test_chunk_size():
expected_chunksize = 123
wr.config.chunksize = expected_chunksize
for function_to_mock in [wr.postgresql.to_sql, wr.mysql.to_sql, wr.sqlserver.to_sql, wr.redshift.to_sql]:
mock = create_autospec(function_to_mock)
apply_configs(mock)(df=None, con=None, table=None, schema=None)
mock.assert_called_with(df=None, con=None, table=None, schema=None, chunksize=expected_chunksize)
expected_chunksize = 456
os.environ["WR_CHUNKSIZE"] = str(expected_chunksize)
wr.config.reset()
for function_to_mock in [wr.postgresql.to_sql, wr.mysql.to_sql, wr.sqlserver.to_sql, wr.redshift.to_sql]:
mock = create_autospec(function_to_mock)
apply_configs(mock)(df=None, con=None, table=None, schema=None)
mock.assert_called_with(df=None, con=None, table=None, schema=None, chunksize=expected_chunksize)
|
the-stack_0_1047 | import unittest
from cloudrail.dev_tools.rule_test_utils import create_empty_entity
from cloudrail.knowledge.context.aws.cloudfront.cloud_front_distribution_list import CloudFrontDistribution, ViewerCertificate
from cloudrail.knowledge.context.aws.aws_environment_context import AwsEnvironmentContext
from cloudrail.knowledge.rules.aws.non_context_aware.protocol_enforcments.ensure_cloudfront_protocol_version_is_good import \
CloudFrontEnsureVersionRule
from cloudrail.knowledge.rules.base_rule import RuleResultType
class TestCloudFrontEnsureVersionRule(unittest.TestCase):
def setUp(self):
self.rule = CloudFrontEnsureVersionRule()
def test_non_car_cloudfront_protocol_version_fail(self):
# Arrange
cloudfront_dist_list: CloudFrontDistribution = create_empty_entity(CloudFrontDistribution)
viewer_cert: ViewerCertificate = create_empty_entity(ViewerCertificate)
viewer_cert.minimum_protocol_version = 'TLSv1.2_2018'
cloudfront_dist_list.viewer_cert = viewer_cert
context = AwsEnvironmentContext(cloudfront_distribution_list=[cloudfront_dist_list])
# Act
result = self.rule.run(context, {})
# Assert
self.assertEqual(RuleResultType.FAILED, result.status)
self.assertEqual(1, len(result.issues))
def test_non_car_cloudfront_protocol_version_pass(self):
# Arrange
cloudfront_dist_list: CloudFrontDistribution = create_empty_entity(CloudFrontDistribution)
viewer_cert: ViewerCertificate = create_empty_entity(ViewerCertificate)
viewer_cert.minimum_protocol_version = 'TLSv1.2_2019'
cloudfront_dist_list.viewer_cert = viewer_cert
context = AwsEnvironmentContext(cloudfront_distribution_list=[cloudfront_dist_list])
# Act
result = self.rule.run(context, {})
# Assert
self.assertEqual(RuleResultType.SUCCESS, result.status)
self.assertEqual(0, len(result.issues))
|
the-stack_0_1049 | # coding: utf-8
"""
Contain solution for the python/numpy training
"""
__authors__ = ["Pierre Knobel", "Jerome Kieffer", "Henri Payno",
"Armando Sole", "Valentin Valls", "Thomas Vincent"]
__date__ = "18/09/2018"
__license__ = "MIT"
import inspect
import numpy
def show(exercice_name):
function = globals()[exercice_name]
print(inspect.getsource(function))
return function()
def ex3_1():
""" Simple example of an element wise comparaison"""
x = numpy.arange(10)
y = numpy.arange(1, 11)
difference = x - y
return difference
def ex3_2():
""" Simple way to compute the difference x[i+1]-x[i] for all the elements
of the 1D array"""
x = numpy.arange(10)
difference = x[1:] - x[:-1]
return difference
def ex4_1():
"""Generate a 1D array of [1..99] then operate a binning 1 2 3 4 -> 1+2 3+4
"""
data = numpy.arange(100) + 1
binned = data[::2] + data[1::2]
return data, binned
def ex4_2():
"""Generate a 2D array of [1..9999] then operate a 2x2 binning
"""
data = numpy.arange(10000).reshape(100, 100)
data = data + 1
binned = data[::2, ::2] + data[::2, 1::2] + data[1::2, ::2] + data[1::2, 1::2]
return data, binned
def ex4_2_alt():
"""Generate a 2D array of [1..9999] then operate a 2x2 binning using numpy
sum and moving the array to 4D
"""
height = 100
width = 100
data = numpy.arange(10000).reshape(height, width)
data = data + 1
reshaped_data = data.reshape(height // 2, 2, width // 2, 2)
binned = reshaped_data.sum(axis=3).sum(axis=1)
return data, binned
def ex5_inefficient_fill(height=1000, width=1000):
"""Inefficient fill using 2 for loops"""
data = numpy.zeros((height, width), dtype=numpy.float)
for row in range(int(height)):
for col in range(int(width)):
data[row, col] = numpy.cos(row) * numpy.sin(col)
return data
def ex5_naive_fill(height=1000, width=1000):
"""Fill using 2 for loops but pre-computing sin and cos"""
width_sin = numpy.sin(numpy.arange(width))
height_cos = numpy.cos(numpy.arange(height))
data = numpy.zeros((height, width), numpy.float)
for row in range(int(height)):
for col in range(int(width)):
data[row, col] = height_cos[row] * width_sin[col]
return data
def ex5_clever_fill(height=1000, width=1000):
"""Fill using 2 outer products"""
width_sin = numpy.sin(numpy.arange(width))
height_cos = numpy.cos(numpy.arange(height))
cos_loop = numpy.outer(height_cos, numpy.ones(width))
sin_loop = numpy.outer(numpy.ones(height), width_sin)
return cos_loop * sin_loop
def ex5_practical_fill(height=1000, width=1000):
"""Fill using meshgrid"""
width_sin = numpy.sin(numpy.arange(width))
height_cos = numpy.cos(numpy.arange(height))
sin_loop, cos_loop = numpy.meshgrid(width_sin, height_cos)
return sin_loop * cos_loop
def ex5_optimized_fill(height=1000, width=1000):
"""Fill using outer product"""
width_sin = numpy.sin(numpy.arange(width))
height_cos = numpy.cos(numpy.arange(height))
return numpy.outer(height_cos, width_sin)
def ex5_atleast_2d_fill(height=1000, width=1000):
"""Fill using atleast_2d and transpose"""
sine = numpy.sin(numpy.arange(width))
cosine = numpy.cos(numpy.arange(height))
return numpy.atleast_2d(sine) * numpy.atleast_2d(cosine).T
|
the-stack_0_1053 | import torch
from torch.utils.data import DataLoader
import isao
def main():
train_dataset = isao.Isao('./data/preprocessed', use_label=True, resize=(64,64))
train_dataloader = DataLoader(train_dataset, batch_size=1000, shuffle=True)
for batch in train_dataloader:
print(batch['img'].shape)
if __name__ == '__main__':
main() |
the-stack_0_1055 | #!/usr/bin/env python
"""This module provides utility classes and functions for threading/multiprocessing"""
from __future__ import print_function
from .logutil import GetLogger
from . import sfdefaults as _sfdefaults
from . import SolidFireError, SFTimeoutError
import atexit
import fcntl as _fcntl
import functools as _functools
import multiprocessing as _multiprocessing
import multiprocessing.pool as _multiprocessing_pool
import sys as _sys
import threading as _threading
import traceback as _traceback
from io import open
# Helpful multiprocessing debug for threadpools
# from logging import DEBUG as _DEBUG_LEVEL
# import multiprocessing.util as _multiprocessing_util
# _multiprocessing_util.log_to_stderr(_DEBUG_LEVEL)
CPU_THREADS = _multiprocessing.cpu_count()
_globalPool = None
_globalPoolLock = _multiprocessing.Lock()
def GlobalPool():
""" Get the global thread pool """
#pylint: disable=global-statement
global _globalPool
#pylint: enable=global-statement
with _globalPoolLock:
if not _globalPool:
_globalPool = ThreadPool()
return _globalPool
def ShutdownGlobalPool():
with _globalPoolLock:
if _globalPool:
_globalPool.Shutdown()
def IsMainThread():
"""
Check if the current thread is the main thread
Returns:
Boolean true if this is the main thread, false otherwise
"""
return _threading.current_thread().name == "MainThread"
def IsMainProcess():
"""
Check if the current process is the main process
Returns:
Boolean true if this is the main process, false otherwise
"""
return _multiprocessing.current_process().name == "MainProcess"
class AsyncResult(object):
"""Result object from posting to a ThreadPool"""
def __init__(self, result):
self.result = result
def Get(self):
"""
Wait for and return the result of the thread
Returns:
The return value of the thread
"""
return self.result.get(0xFFFF)
def GetWithTimeout(self, timeout):
try:
return self.result.get(timeout)
except _multiprocessing.TimeoutError as e:
SFTimeoutError("Timeout waiting for thread to complete", innerException=e)
def Wait(self, timeout):
"""
Wait for the thread to complete
Args:
timeout: how long to wait before giving up, in seconds (float)
Returns:
Boolean true if the thread is ready or false if the timeout expired (bool)
"""
return self.result.wait(timeout)
def _initworkerprocess():
"""
Initialization function for workers in a process pool.
This turns off SIGINT handling in sub-processes
"""
import signal
signal.signal(signal.SIGINT, signal.SIG_IGN)
class ThreadPool(object):
"""Helper to manage status and lifetime of threads/processes"""
def __init__(self, maxThreads=CPU_THREADS, useMultiprocessing=_sfdefaults.use_multiprocessing):
if useMultiprocessing:
self.threadPool = _multiprocessing.Pool(processes=maxThreads, initializer=_initworkerprocess)
else:
self.threadPool = _multiprocessing_pool.ThreadPool(processes=maxThreads)
self.results = []
atexit.register(self.threadPool.close)
def Post(self, threadFunc, *args, **kwargs):
"""
Add a new work item
Args:
threadFunc: the function to be run as a thread
args: args to pass to the thread function
kwargs: keyword args to pass to the thread function
Returns:
AsyncResult object
"""
async_res = self.threadPool.apply_async(threadFunc, args, kwargs)
res = AsyncResult(async_res)
self.results.append(res)
return res
def Wait(self):
"""
Wait for all threads to finish and collect the results
Returns:
Boolean true if all threads succeeded, False if one or more failed
"""
return WaitForThreads(self.results)
def Shutdown(self):
"""
Abort any running processes and shut down the pool
"""
self.threadPool.close()
self.threadPool.terminate()
def WaitForThreads(asyncResults):
"""
Wait for a list of threads to finish and collect the results
Args:
asyncResults: a list of async results to wait for (multiprocessing.pool.AsyncResult)
Returns:
Boolean true if all threads succeeded, False if one or more failed
"""
log = GetLogger()
allgood = True
for async_res in asyncResults:
# If the result is not True, or if there is an exception, this thread failed
try:
result = async_res.Get()
if result is False:
allgood = False
except SolidFireError as e:
log.error(e)
allgood = False
return allgood
def threadwrapper(func):
"""Decorator for functions to be run as threads"""
@_functools.wraps(func)
def wrapper(*args, **kwargs):
orig_name = _threading.current_thread().name
try:
return func(*args, **kwargs)
except (KeyboardInterrupt, SystemExit):
print("KeyboardInterrupt/SystemExit in thread {}".format(_threading.current_thread().name))
raise
except:
# For exceptions from child threads/processes, we want to extract and store the original traceback, otherwise it may
# be lost to multiprocessing/pickling and inaccessible when the exception gets rethrown in the parent process
# For convenience, we also convert all exceptions into our rooted exception hierarchy
ex_type, ex_val, ex_tb = _sys.exc_info()
str_tb = "".join(_traceback.format_tb(ex_tb))
if isinstance(ex_val, SolidFireError):
ex_val.originalTraceback = str_tb
raise
log = GetLogger()
log.debug(str_tb)
raise SolidFireError("{}: {}".format(ex_type.__name__, ex_val), str_tb)
finally:
_threading.current_thread().name = orig_name
return wrapper
class LockFile(object):
def __init__(self, lockname):
self.lockFile = "/var/tmp/{}.lockfile".format(lockname)
self.fd = open(self.lockFile, "w")
def __enter__(self):
self.Lock()
def __exit__(self, extype, exval, tb):
self.Unlock()
def __del__(self):
"""Make sure the lock gets unlocked when we exit"""
self.Unlock()
self.fd.close()
def Lock(self):
"""Lock"""
_fcntl.flock(self.fd, _fcntl.LOCK_EX | _fcntl.LOCK_NB)
def Unlock(self):
"""Unlock"""
_fcntl.flock(self.fd, _fcntl.LOCK_UN)
|
the-stack_0_1057 | from typing import Sequence
import editdistance
import pytorch_lightning as pl
import torch
class CharacterErrorRate(pl.metrics.Metric):
"""Character error rate metric, computed using Levenshtein distance."""
def __init__(self, ignore_tokens: Sequence[int], *args):
super().__init__(*args)
self.ignore_tokens = set(ignore_tokens)
self.add_state("error", default=torch.tensor(0.0), dist_reduce_fx="sum")
self.add_state("total", default=torch.tensor(0), dist_reduce_fx="sum")
def update(self, preds: torch.Tensor, targets: torch.Tensor) -> None:
N = preds.shape[0]
for ind in range(N):
pred = [_ for _ in preds[ind].tolist() if _ not in self.ignore_tokens]
target = [_ for _ in targets[ind].tolist() if _ not in self.ignore_tokens]
distance = editdistance.distance(pred, target)
error = distance / max(len(pred), len(target))
self.error = self.error + error
self.total = self.total + N
def compute(self) -> torch.Tensor:
return self.error / self.total
def test_character_error_rate():
metric = CharacterErrorRate([0, 1])
X = torch.tensor(
[
[0, 2, 2, 3, 3, 1], # error will be 0
[0, 2, 1, 1, 1, 1], # error will be .75
[0, 2, 2, 4, 4, 1], # error will be .5
]
)
Y = torch.tensor([[0, 2, 2, 3, 3, 1], [0, 2, 2, 3, 3, 1], [0, 2, 2, 3, 3, 1],])
metric(X, Y)
print(metric.compute())
assert metric.compute() == sum([0, 0.75, 0.5]) / 3
if __name__ == "__main__":
test_character_error_rate()
|
the-stack_0_1059 | import sys
import time
if sys.version_info < (3, 6, 5):
sys.exit('RoboMaster Sdk requires Python 3.6.5 or later')
import logging
logger_name = "multi_robot"
logger = logging.getLogger(logger_name)
logger.setLevel(logging.ERROR)
fmt = "%(asctime)-15s %(levelname)s %(filename)s:%(lineno)d %(message)s"
formatter = logging.Formatter(fmt)
sh = logging.StreamHandler()
sh.setFormatter(formatter)
logger.addHandler(sh)
def enable_logging_to_file():
logger.setLevel(logging.INFO)
filename = "RoboMasterSDK_MultiRobot_{0}_log.txt".format(time.strftime("%Y%m%d%H%M%S", time.localtime()))
fh = logging.FileHandler(filename)
fh.setFormatter(formatter)
logger.addHandler(fh)
__all__ = ['multi_robot', 'multi_group', 'multi_module', 'tool']
|
the-stack_0_1060 | import logging
from time import time
from threading import Timer
from contextlib import contextmanager
import progressbar
import numpy as np
from pybar.analysis.analyze_raw_data import AnalyzeRawData
from pybar.fei4.register_utils import invert_pixel_mask, make_box_pixel_mask_from_col_row
from pybar.fei4_run_base import Fei4RunBase
from pybar.run_manager import RunManager
class ExtTriggerScan(Fei4RunBase):
'''External trigger scan with FE-I4
For use with external scintillator (user RX0), TLU (use RJ45), FE-I4 HitOR (USBpix self-trigger).
Note:
Set up trigger in DUT configuration file (e.g. dut_configuration_mio.yaml).
'''
_default_run_conf = {
"broadcast_commands": True,
"threaded_scan": True,
"trig_count": 0, # FE-I4 trigger count, number of consecutive BCs, 0 means 16, from 0 to 15
"trigger_latency": 232, # FE-I4 trigger latency, in BCs, external scintillator / TLU / HitOR: 232, USBpix self-trigger: 220
"trigger_delay": 8, # trigger delay, in BCs
"trigger_rate_limit": 500, # artificially limiting the trigger rate, in BCs (25ns)
"col_span": [1, 80], # defining active column interval, 2-tuple, from 1 to 80
"row_span": [1, 336], # defining active row interval, 2-tuple, from 1 to 336
"overwrite_enable_mask": False, # if True, use col_span and row_span to define an active region regardless of the Enable pixel register. If False, use col_span and row_span to define active region by also taking Enable pixel register into account.
"use_enable_mask_for_imon": True, # if True, apply inverted Enable pixel mask to Imon pixel mask
"no_data_timeout": 10, # no data timeout after which the scan will be aborted, in seconds
"scan_timeout": 60, # timeout for scan after which the scan will be stopped, in seconds
"max_triggers": 10000, # maximum triggers after which the scan will be stopped, if 0, no maximum triggers are set
"enable_tdc": False, # if True, enables TDC
"reset_rx_on_error": False # long scans have a high propability for ESD related data transmission errors; recover and continue here
}
def configure(self):
commands = []
commands.extend(self.register.get_commands("ConfMode"))
# Enable
enable_pixel_mask = make_box_pixel_mask_from_col_row(column=self.col_span, row=self.row_span)
if not self.overwrite_enable_mask:
enable_pixel_mask = np.logical_and(enable_pixel_mask, self.register.get_pixel_register_value('Enable'))
self.register.set_pixel_register_value('Enable', enable_pixel_mask)
commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=False, name='Enable'))
# Imon
if self.use_enable_mask_for_imon:
imon_pixel_mask = invert_pixel_mask(enable_pixel_mask)
else:
imon_pixel_mask = make_box_pixel_mask_from_col_row(column=self.col_span, row=self.row_span, default=1, value=0) # 0 for selected columns, else 1
imon_pixel_mask = np.logical_or(imon_pixel_mask, self.register.get_pixel_register_value('Imon'))
self.register.set_pixel_register_value('Imon', imon_pixel_mask)
commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=False, name='Imon'))
# C_High
self.register.set_pixel_register_value('C_High', 0)
commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=True, name='C_High'))
# C_Low
self.register.set_pixel_register_value('C_Low', 0)
commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=True, name='C_Low'))
# Registers
self.register.set_global_register_value("Trig_Lat", self.trigger_latency) # set trigger latency
self.register.set_global_register_value("Trig_Count", self.trig_count) # set number of consecutive triggers
commands.extend(self.register.get_commands("WrRegister", name=["Trig_Lat", "Trig_Count"]))
commands.extend(self.register.get_commands("RunMode"))
self.register_utils.send_commands(commands)
def scan(self):
# preload command
lvl1_command = self.register.get_commands("zeros", length=self.trigger_delay)[0] + self.register.get_commands("LV1")[0] + self.register.get_commands("zeros", length=self.trigger_rate_limit)[0]
self.register_utils.set_command(lvl1_command)
with self.readout(no_data_timeout=self.no_data_timeout, **self.scan_parameters._asdict()):
with self.trigger():
got_data = False
start = time()
while not self.stop_run.wait(1.0):
if not got_data:
if self.data_words_per_second() > 0:
got_data = True
logging.info('Taking data...')
if self.max_triggers:
self.progressbar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=self.max_triggers, poll=10, term_width=80).start()
else:
self.progressbar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.Timer()], maxval=self.scan_timeout, poll=10, term_width=80).start()
else:
triggers = self.dut['TLU']['TRIGGER_COUNTER']
try:
if self.max_triggers:
self.progressbar.update(triggers)
else:
self.progressbar.update(time() - start)
except ValueError:
pass
if self.max_triggers and triggers >= self.max_triggers:
self.progressbar.finish()
self.stop(msg='Trigger limit was reached: %i' % self.max_triggers)
logging.info('Total amount of triggers collected: %d', self.dut['TLU']['TRIGGER_COUNTER'])
def analyze(self):
with AnalyzeRawData(raw_data_file=self.output_filename, create_pdf=True) as analyze_raw_data:
analyze_raw_data.trigger_data_format = self.dut['TLU']['DATA_FORMAT']
analyze_raw_data.create_source_scan_hist = True
analyze_raw_data.create_cluster_size_hist = True
analyze_raw_data.create_cluster_tot_hist = True
analyze_raw_data.align_at_trigger = True
if self.enable_tdc:
analyze_raw_data.create_tdc_counter_hist = True # histogram all TDC words
analyze_raw_data.create_tdc_hist = True # histogram the hit TDC information
analyze_raw_data.align_at_tdc = False # align events at the TDC word
analyze_raw_data.interpreter.set_warning_output(False)
analyze_raw_data.interpret_word_table()
analyze_raw_data.interpreter.print_summary()
analyze_raw_data.plot_histograms()
@contextmanager
def trigger(self):
self.start_trigger()
try:
yield
finally:
try:
self.stop_trigger()
except Exception:
# in case something fails, call this on last resort
self.scan_timeout_timer.cancel()
self.connect_cancel(["abort"])
def start_trigger(self, *args, **kwargs):
self.connect_cancel(["stop"])
self.dut['TDC']['ENABLE'] = self.enable_tdc
self.dut['TLU']['TRIGGER_COUNTER'] = 0
if self.max_triggers:
self.dut['TLU']['MAX_TRIGGERS'] = self.max_triggers
else:
self.dut['TLU']['MAX_TRIGGERS'] = 0 # infinity triggers
self.dut['TX']['EN_EXT_TRIGGER'] = True
with self.synchronized():
self.dut['TLU']['TRIGGER_ENABLE'] = True
def timeout():
try:
self.progressbar.finish()
except AttributeError:
pass
self.stop(msg='Scan timeout was reached')
self.scan_timeout_timer = Timer(self.scan_timeout, timeout)
if self.scan_timeout:
self.scan_timeout_timer.start()
def stop_trigger(self):
self.scan_timeout_timer.cancel()
with self.synchronized():
self.dut['TLU']['TRIGGER_ENABLE'] = False
self.dut['TX']['EN_EXT_TRIGGER'] = False
self.dut['TDC']['ENABLE'] = False
self.connect_cancel(["abort"])
if __name__ == "__main__":
with RunManager('configuration.yaml') as runmngr:
runmngr.run_run(ExtTriggerScan)
|
the-stack_0_1061 | # --------------------------------------------------------
# Deformable Convolutional Networks
# Copyright (c) 2017 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Modified by Zheng Zhang
# --------------------------------------------------------
# Based on:
# MX-RCNN
# Copyright (c) 2016 by Contributors
# Licence under The Apache 2.0 License
# https://github.com/ijkguo/mx-rcnn/
# --------------------------------------------------------
import pprint
from config.config import config
from core.loader import TestDataLoader
from core.tester import Predictor, pred_eval
from dataset import *
from symbols import *
from utils.load_model import load_param
def test_deeplab(network, dataset, image_set, root_path, dataset_path,
ctx, prefix, epoch,
vis, logger=None, output_path=None):
if not logger:
assert False, 'require a logger'
# print config
pprint.pprint(config)
logger.info('testing config:{}\n'.format(pprint.pformat(config)))
# load symbol and testing data
sym = eval('get_' + network + '_test')(num_classes=config.dataset.NUM_CLASSES)
imdb = eval(dataset)(image_set, root_path, dataset_path, result_path=output_path)
segdb = imdb.gt_segdb()
# get test data iter
test_data = TestDataLoader(segdb, batch_size=len(ctx))
# load model
# arg_params, aux_params = load_param(prefix, epoch, convert=True, ctx=ctx, process=True)
arg_params, aux_params = load_param(prefix, epoch, process=True)
# infer shape
data_shape_dict = dict(test_data.provide_data_single)
arg_shape, _, aux_shape = sym.infer_shape(**data_shape_dict)
arg_shape_dict = dict(zip(sym.list_arguments(), arg_shape))
aux_shape_dict = dict(zip(sym.list_auxiliary_states(), aux_shape))
# check parameters
for k in sym.list_arguments():
if k in data_shape_dict or k in ['softmax_label']:
continue
assert k in arg_params, k + ' not initialized'
assert arg_params[k].shape == arg_shape_dict[k], \
'shape inconsistent for ' + k + ' inferred ' + str(arg_shape_dict[k]) + ' provided ' + str(
arg_params[k].shape)
for k in sym.list_auxiliary_states():
assert k in aux_params, k + ' not initialized'
assert aux_params[k].shape == aux_shape_dict[k], \
'shape inconsistent for ' + k + ' inferred ' + str(aux_shape_dict[k]) + ' provided ' + str(
aux_params[k].shape)
# decide maximum shape
data_names = [k[0] for k in test_data.provide_data_single]
label_names = ['softmax_label']
max_data_shape = [[('data', (1, 3, max([v[0] for v in config.SCALES]), max([v[1] for v in config.SCALES])))]]
# create predictor
predictor = Predictor(sym, data_names, label_names,
context=ctx, max_data_shapes=max_data_shape,
provide_data=test_data.provide_data, provide_label=test_data.provide_label,
arg_params=arg_params, aux_params=aux_params)
# start detection
pred_eval(predictor, test_data, imdb, vis=vis, logger=logger)
|
the-stack_0_1062 | #!/usr/bin/env python3
# Copyright (c) 2018-2019 The Bitcoin Core developers
# Copyright (c) 2017 The Raven Core developers
# Copyright (c) 2018 The Rito Core developers
# Copyright (c) 2020 The KringleProjectCoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Testing asset use cases
"""
from test_framework.test_framework import KringleProjectCoinTestFramework
from test_framework.util import *
import string
class AssetTest(KringleProjectCoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [['-assetindex'], ['-assetindex'], ['-assetindex']]
def activate_assets(self):
self.log.info("Generating KPC for node[0] and activating assets...")
n0, n1, n2 = self.nodes[0], self.nodes[1], self.nodes[2]
n0.generate(1)
self.sync_all()
n0.generate(431)
self.sync_all()
assert_equal("active", n0.getblockchaininfo()['bip9_softforks']['assets']['status'])
def big_test(self):
self.log.info("Running big test!")
n0, n1, n2 = self.nodes[0], self.nodes[1], self.nodes[2]
self.log.info("Calling issue()...")
address0 = n0.getnewaddress()
ipfs_hash = "QmcvyefkqQX3PpjpY5L8B2yMd47XrVwAipr6cxUt2zvYU8"
n0.issue(asset_name="MY_ASSET", qty=1000, to_address=address0, change_address="", \
units=4, reissuable=True, has_ipfs=True, ipfs_hash=ipfs_hash)
self.log.info("Waiting for ten confirmations after issue...")
n0.generate(10)
self.sync_all()
self.log.info("Checkout getassetdata()...")
assetdata = n0.getassetdata("MY_ASSET")
assert_equal(assetdata["name"], "MY_ASSET")
assert_equal(assetdata["amount"], 1000)
assert_equal(assetdata["units"], 4)
assert_equal(assetdata["reissuable"], 1)
assert_equal(assetdata["has_ipfs"], 1)
assert_equal(assetdata["ipfs_hash"], ipfs_hash)
self.log.info("Checking listmyassets()...")
myassets = n0.listmyassets(asset="MY_ASSET*", verbose=True)
assert_equal(len(myassets), 2)
asset_names = list(myassets.keys())
assert_equal(asset_names.count("MY_ASSET"), 1)
assert_equal(asset_names.count("MY_ASSET!"), 1)
assert_equal(myassets["MY_ASSET"]["balance"], 1000)
assert_equal(myassets["MY_ASSET!"]["balance"], 1)
assert_equal(len(myassets["MY_ASSET"]["outpoints"]), 1)
assert_equal(len(myassets["MY_ASSET!"]["outpoints"]), 1)
assert_is_hash_string(myassets["MY_ASSET"]["outpoints"][0]["txid"])
assert_equal(myassets["MY_ASSET"]["outpoints"][0]["txid"], \
myassets["MY_ASSET!"]["outpoints"][0]["txid"])
assert(int(myassets["MY_ASSET"]["outpoints"][0]["vout"]) >= 0)
assert(int(myassets["MY_ASSET!"]["outpoints"][0]["vout"]) >= 0)
assert_equal(myassets["MY_ASSET"]["outpoints"][0]["amount"], 1000)
assert_equal(myassets["MY_ASSET!"]["outpoints"][0]["amount"], 1)
self.log.info("Checking listassetbalancesbyaddress()...")
assert_equal(n0.listassetbalancesbyaddress(address0)["MY_ASSET"], 1000)
assert_equal(n0.listassetbalancesbyaddress(address0)["MY_ASSET!"], 1)
self.log.info("Checking listassetbalancesbyaddress()...")
assert_equal(n0.listaddressesbyasset("MY_ASSET"), n1.listaddressesbyasset("MY_ASSET"))
self.log.info("Calling transfer()...")
address1 = n1.getnewaddress()
n0.transfer(asset_name="MY_ASSET", qty=200, to_address=address1)
self.log.info("Waiting for ten confirmations after transfer...")
n0.generate(10)
self.sync_all()
self.log.info("Checking listmyassets()...")
myassets = n1.listmyassets(asset="MY_ASSET*", verbose=True)
assert_equal(len(myassets), 1)
asset_names = list(myassets.keys())
assert_equal(asset_names.count("MY_ASSET"), 1)
assert_equal(asset_names.count("MY_ASSET!"), 0)
assert_equal(myassets["MY_ASSET"]["balance"], 200)
assert_equal(len(myassets["MY_ASSET"]["outpoints"]), 1)
assert_is_hash_string(myassets["MY_ASSET"]["outpoints"][0]["txid"])
assert(int(myassets["MY_ASSET"]["outpoints"][0]["vout"]) >= 0)
assert_equal(n0.listmyassets(asset="MY_ASSET")["MY_ASSET"], 800)
self.log.info("Checking listassetbalancesbyaddress()...")
assert_equal(n1.listassetbalancesbyaddress(address1)["MY_ASSET"], 200)
changeaddress = None
assert_equal(n0.listaddressesbyasset("MY_ASSET"), n1.listaddressesbyasset("MY_ASSET"))
assert_equal(sum(n0.listaddressesbyasset("MY_ASSET").values()), 1000)
assert_equal(sum(n1.listaddressesbyasset("MY_ASSET").values()), 1000)
for assaddr in n0.listaddressesbyasset("MY_ASSET").keys():
if n0.validateaddress(assaddr)["ismine"] == True:
changeaddress = assaddr
assert_equal(n0.listassetbalancesbyaddress(changeaddress)["MY_ASSET"], 800)
assert(changeaddress != None)
assert_equal(n0.listassetbalancesbyaddress(address0)["MY_ASSET!"], 1)
self.log.info("Burning all units to test reissue on zero units...")
n0.transfer(asset_name="MY_ASSET", qty=800, to_address="n1BurnXXXXXXXXXXXXXXXXXXXXXXU1qejP")
n0.generate(1)
assert_does_not_contain_key("MY_ASSET", n0.listmyassets(asset="MY_ASSET", verbose=True))
self.log.info("Calling reissue()...")
address1 = n0.getnewaddress()
ipfs_hash2 = "QmcvyefkqQX3PpjpY5L8B2yMd47XrVwAipr6cxUt2zvYU8"
n0.reissue(asset_name="MY_ASSET", qty=2000, to_address=address0, change_address=address1, \
reissuable=False, new_unit=-1, new_ipfs=ipfs_hash2)
self.log.info("Waiting for ten confirmations after reissue...")
self.sync_all()
n0.generate(10)
self.sync_all()
self.log.info("Checkout getassetdata()...")
assetdata = n0.getassetdata("MY_ASSET")
assert_equal(assetdata["name"], "MY_ASSET")
assert_equal(assetdata["amount"], 3000)
assert_equal(assetdata["units"], 4)
assert_equal(assetdata["reissuable"], 0)
assert_equal(assetdata["has_ipfs"], 1)
assert_equal(assetdata["ipfs_hash"], ipfs_hash2)
self.log.info("Checking listassetbalancesbyaddress()...")
assert_equal(n0.listassetbalancesbyaddress(address0)["MY_ASSET"], 2000)
self.log.info("Checking listassets()...")
n0.issue("KPC1", 1000)
n0.issue("KPC2", 1000)
n0.issue("KPC3", 1000)
n0.generate(1)
self.sync_all()
n0.listassets(asset="KPC*", verbose=False, count=2, start=-2)
self.log.info("Creating some sub-assets...")
n0.issue(asset_name="MY_ASSET/SUB1", qty=1000, to_address=address0, change_address=address0,\
units=4, reissuable=True, has_ipfs=True, ipfs_hash=ipfs_hash)
self.sync_all()
self.log.info("Waiting for ten confirmations after issuesubasset...")
n0.generate(10)
self.sync_all()
self.log.info("Checkout getassetdata()...")
assetdata = n0.getassetdata("MY_ASSET/SUB1")
assert_equal(assetdata["name"], "MY_ASSET/SUB1")
assert_equal(assetdata["amount"], 1000)
assert_equal(assetdata["units"], 4)
assert_equal(assetdata["reissuable"], 1)
assert_equal(assetdata["has_ipfs"], 1)
assert_equal(assetdata["ipfs_hash"], ipfs_hash)
kringleprojectcoin_assets = n0.listassets(asset="KPC*", verbose=False, count=2, start=-2)
assert_equal(len(kringleprojectcoin_assets), 2)
assert_equal(kringleprojectcoin_assets[0], "KPC2")
assert_equal(kringleprojectcoin_assets[1], "KPC3")
self.sync_all()
def issue_param_checks(self):
self.log.info("Checking bad parameter handling!")
n0, n1, n2 = self.nodes[0], self.nodes[1], self.nodes[2]
# just plain bad asset name
assert_raises_rpc_error(-8, "Invalid asset name: bad-asset-name", \
n0.issue, "bad-asset-name");
# trying to issue things that can't be issued
assert_raises_rpc_error(-8, "Unsupported asset type: OWNER", \
n0.issue, "AN_OWNER!");
assert_raises_rpc_error(-8, "Unsupported asset type: MSGCHANNEL", \
n0.issue, "A_MSGCHANNEL~CHANNEL_4");
assert_raises_rpc_error(-8, "Unsupported asset type: VOTE", \
n0.issue, "A_VOTE^PEDRO");
# check bad unique params
assert_raises_rpc_error(-8, "Invalid parameters for issuing a unique asset.", \
n0.issue, "A_UNIQUE#ASSET", 2)
assert_raises_rpc_error(-8, "Invalid parameters for issuing a unique asset.", \
n0.issue, "A_UNIQUE#ASSET", 1, "", "", 1)
assert_raises_rpc_error(-8, "Invalid parameters for issuing a unique asset.", \
n0.issue, "A_UNIQUE#ASSET", 1, "", "", 0, True)
def chain_assets(self):
self.log.info("Issuing chained assets in depth issue()...")
n0, n1, n2 = self.nodes[0], self.nodes[1], self.nodes[2]
chain_address = n0.getnewaddress()
ipfs_hash = "QmacSRmrkVmvJfbCpmU6pK72furJ8E8fbKHindrLxmYMQo"
chain_string = "CHAIN1"
n0.issue(asset_name=chain_string, qty=1000, to_address=chain_address, change_address="", \
units=4, reissuable=True, has_ipfs=True, ipfs_hash=ipfs_hash)
for c in string.ascii_uppercase:
chain_string += '/' + c
if len(chain_string) > 30:
break
n0.issue(asset_name=chain_string, qty=1000, to_address=chain_address, change_address="", \
units=4, reissuable=True, has_ipfs=True, ipfs_hash=ipfs_hash)
n0.generate(1)
self.sync_all()
chain_assets = n1.listassets(asset="CHAIN1*", verbose=False)
assert_equal(len(chain_assets), 13)
self.log.info("Issuing chained assets in width issue()...")
chain_address = n0.getnewaddress()
chain_string = "CHAIN2"
n0.issue(asset_name=chain_string, qty=1000, to_address=chain_address, change_address="", \
units=4, reissuable=True, has_ipfs=True, ipfs_hash=ipfs_hash)
for c in string.ascii_uppercase:
asset_name = chain_string + '/' + c
n0.issue(asset_name=asset_name, qty=1000, to_address=chain_address, change_address="", \
units=4, reissuable=True, has_ipfs=True, ipfs_hash=ipfs_hash)
n0.generate(1)
self.sync_all()
chain_assets = n1.listassets(asset="CHAIN2/*", verbose=False)
assert_equal(len(chain_assets), 26)
self.log.info("Chaining reissue transactions...")
address0 = n0.getnewaddress()
n0.issue(asset_name="CHAIN_REISSUE", qty=1000, to_address=address0, change_address="", \
units=4, reissuable=True, has_ipfs=False)
n0.generate(1)
self.sync_all()
n0.reissue(asset_name="CHAIN_REISSUE", qty=1000, to_address=address0, change_address="", \
reissuable=True)
assert_raises_rpc_error(-4, "Error: The transaction was rejected! Reason given: bad-tx-reissue-chaining-not-allowed", n0.reissue, "CHAIN_REISSUE", 1000, address0, "", True)
n0.generate(1)
self.sync_all()
n0.reissue(asset_name="CHAIN_REISSUE", qty=1000, to_address=address0, change_address="", \
reissuable=True)
n0.generate(1)
self.sync_all()
assetdata = n0.getassetdata("CHAIN_REISSUE")
assert_equal(assetdata["name"], "CHAIN_REISSUE")
assert_equal(assetdata["amount"], 3000)
assert_equal(assetdata["units"], 4)
assert_equal(assetdata["reissuable"], 1)
assert_equal(assetdata["has_ipfs"], 0)
def ipfs_state(self):
self.log.info("Checking ipfs hash state changes...")
n0 = self.nodes[0]
asset_name1 = "ASSET111"
asset_name2 = "ASSET222"
address1 = n0.getnewaddress()
address2 = n0.getnewaddress()
ipfs_hash = "QmcvyefkqQX3PpjpY5L8B2yMd47XrVwAipr6cxUt2zvYU8"
bad_hash = "RncvyefkqQX3PpjpY5L8B2yMd47XrVwAipr6cxUt2zvYU8"
########################################
# bad hash (isn't a valid multihash sha2-256)
try:
n0.issue(asset_name=asset_name1, qty=1000, to_address=address1, change_address=address2, \
units=0, reissuable=True, has_ipfs=True, ipfs_hash=bad_hash)
except JSONRPCException as e:
if "Invalid IPFS hash (doesn't start with 'Qm')" not in e.error['message']:
raise AssertionError("Expected substring not found:" + e.error['message'])
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
raise AssertionError("No exception raised")
########################################
# no hash
n0.issue(asset_name=asset_name2, qty=1000, to_address=address1, change_address=address2, \
units=0, reissuable=True, has_ipfs=False)
n0.generate(1)
ad = n0.getassetdata(asset_name2)
assert_equal(0, ad['has_ipfs'])
assert_does_not_contain_key('ipfs_hash', ad)
########################################
# reissue w/ bad hash
try:
n0.reissue(asset_name=asset_name2, qty=2000, to_address=address1, change_address=address2, \
reissuable=True, new_unit=-1, new_ipfs=bad_hash)
except JSONRPCException as e:
if "Invalid IPFS hash (doesn't start with 'Qm')" not in e.error['message']:
raise AssertionError("Expected substring not found:" + e.error['message'])
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
raise AssertionError("No exception raised")
########################################
# reissue w/ hash
n0.reissue(asset_name=asset_name2, qty=2000, to_address=address1, change_address=address2, \
reissuable=True, new_unit=-1, new_ipfs=ipfs_hash)
n0.generate(1)
ad = n0.getassetdata(asset_name2)
assert_equal(1, ad['has_ipfs'])
assert_equal(ipfs_hash, ad['ipfs_hash'])
########################################
# invalidate and reconsider
best = n0.getbestblockhash()
n0.invalidateblock(n0.getbestblockhash())
ad = n0.getassetdata(asset_name2)
assert_equal(0, ad['has_ipfs'])
assert_does_not_contain_key('ipfs_hash', ad)
n0.reconsiderblock(best)
ad = n0.getassetdata(asset_name2)
assert_equal(1, ad['has_ipfs'])
assert_equal(ipfs_hash, ad['ipfs_hash'])
def db_corruption_regression(self):
self.log.info("Checking db corruption invalidate block...")
n0 = self.nodes[0]
asset_name = "DATA_CORRUPT"
# Test to make sure that undoing a reissue and an issue during a reorg doesn't screw up the database/cache
n0.issue(asset_name)
a = n0.generate(1)[0]
n0.reissue(asset_name, 500, n0.getnewaddress())
b = n0.generate(1)[0]
self.log.info(f"Invalidating {a}...")
n0.invalidateblock(a)
assert_equal(0, len(n0.listassets(asset_name, True)))
def reissue_prec_change(self):
self.log.info("Testing precision change on reissue...")
n0 = self.nodes[0]
asset_name = "PREC_CHANGES"
address = n0.getnewaddress()
n0.issue(asset_name, 10, "", "", 0, True, False)
n0.generate(1)
assert_equal(0, n0.listassets("*", True)[asset_name]["units"])
for i in range(0, 8):
n0.reissue(asset_name, 10.0**(-i), address, "", True, i+1)
n0.generate(1)
assert_equal(i+1, n0.listassets("*", True)[asset_name]["units"])
assert_raises_rpc_error(-25, "Error: Unable to reissue asset: unit must be larger than current unit selection", \
n0.reissue, asset_name, 10.0**(-i), address, "", True, i)
n0.reissue(asset_name, 0.00000001, address)
n0.generate(1)
assert_equal(Decimal('11.11111111'), n0.listassets("*", True)[asset_name]["amount"])
def run_test(self):
self.activate_assets()
self.big_test()
self.issue_param_checks()
self.chain_assets()
self.ipfs_state()
self.db_corruption_regression()
self.reissue_prec_change()
if __name__ == '__main__':
AssetTest().main()
|
the-stack_0_1063 | import sys
import colorsys
import pygame.gfxdraw
try:
import pygame
except ImportError:
print("To simulate a unicorn HAT on your computer, please pip install pygame")
class UnicornHatSim(object):
def __init__(self, width, height, rotation_offset = 0):
# Compat with old library
self.AUTO = None
self.PHAT = None
# Set some defaults
self.rotation_offset = rotation_offset
self.rotation(0)
self.pixels = [(0, 0, 0)] * width * height
self.pixel_size = 15
self.width = width
self.height = height
self.window_width = width * self.pixel_size
self.window_height = height * self.pixel_size
# Init pygame and off we go
pygame.init()
pygame.display.set_caption("Unicorn HAT simulator")
self.screen = pygame.display.set_mode([self.window_width, self.window_height])
self.clear()
def set_pixel(self, x, y, r, g, b):
i = (x * self.width) + y
self.pixels[i] = [int(r), int(g), int(b)]
def draw(self):
for event in pygame.event.get(): # User did something
if event.type == pygame.QUIT:
print("Exiting...")
sys.exit()
for x in range(self.width):
for y in range(self.height):
self.draw_led(x, y)
def show(self):
self.clear()
self.draw()
pygame.display.flip()
def draw_led(self, x, y):
self.draw_gfxcircle(x,y)
def draw_gfxcircle(self, x, y):
p = self.pixel_size
w_x = int(x * p + self.pixel_size / 2)
w_y = int((self.height - 1 - y) * p + self.pixel_size / 2)
r = int(self.pixel_size / 4)
color = self.pixels[self.index(x, y)]
pygame.gfxdraw.aacircle(self.screen, w_x, w_y, r, color)
pygame.gfxdraw.filled_circle(self.screen, w_x, w_y, r, color)
def get_shape(self):
return (self.width, self.height)
def brightness(self, *args):
pass
def rotation(self, r):
self._rotation = int(round(r/90.0)) % 3
def clear(self):
self.screen.fill((0, 0, 0))
def get_rotation(self):
return self._rotation * 90
def set_layout(self, *args):
pass
def set_pixel_hsv(self, x, y, h, s=1.0, v=1.0):
r, g, b = [int(n*255) for n in colorsys.hsv_to_rgb(h, s, v)]
self.set_pixel(x, y, r, g, b)
def off(self):
print("Closing window")
pygame.quit()
def index(self, x, y):
# Offset to match device rotation
rot = (self.get_rotation() + self.rotation_offset) % 360
if rot == 0:
xx = x
yy = y
elif rot == 90:
xx = self.height - 1 - y
yy = x
elif rot == 180:
xx = self.width - 1 - x
yy = self.height - 1 - y
elif rot == 270:
xx = y
yy = self.width - 1 - x
return (xx * self.width) + yy
# SD hats works as expected
unicornhat = UnicornHatSim(8,8)
unicornphat = UnicornHatSim(8, 4)
# Unicornhat HD seems to be the other way around (not that there's anything wrong with that), so we rotate it 180°
unicornhathd = UnicornHatSim(16, 16, 180)
|
the-stack_0_1065 | #!/usr/bin/env python
"""These are standard aff4 objects."""
import hashlib
import re
import StringIO
from grr.lib import aff4
from grr.lib import data_store
from grr.lib import flow
from grr.lib import rdfvalue
from grr.lib import utils
class VFSDirectory(aff4.AFF4Volume):
"""This represents a directory from the client."""
default_container = "VFSDirectory"
# We contain other objects within the tree.
_behaviours = frozenset(["Container"])
def Update(self, attribute=None, priority=None):
"""Refresh an old attribute.
Note that refreshing the attribute is asynchronous. It does not change
anything about the current object - you need to reopen the same URN some
time later to get fresh data.
Attributes:
CONTAINS - Refresh the content of the directory listing.
Args:
attribute: An attribute object as listed above.
priority: Priority to set for updating flow, None for default.
Returns:
The Flow ID that is pending
Raises:
IOError: If there has been an error starting the flow.
"""
# client id is the first path element
client_id = self.urn.Split()[0]
if attribute == "CONTAINS":
# Get the pathspec for this object
flow_id = flow.GRRFlow.StartFlow(client_id=client_id,
flow_name="ListDirectory",
pathspec=self.real_pathspec,
priority=priority,
notify_to_user=False,
token=self.token)
return flow_id
class SchemaCls(aff4.AFF4Volume.SchemaCls):
"""Attributes specific to VFSDirectory."""
STAT = aff4.Attribute("aff4:stat", rdfvalue.StatEntry,
"A StatResponse protobuf describing this file.",
"stat")
PATHSPEC = aff4.Attribute(
"aff4:pathspec", rdfvalue.PathSpec,
"The pathspec used to retrieve this object from the client.",
"pathspec")
class HashList(rdfvalue.RDFBytes):
"""A list of hashes."""
HASH_SIZE = 32
def __len__(self):
return len(self._value) / self.HASH_SIZE
def __iter__(self):
for i in range(len(self)):
yield self[i]
def __getitem__(self, idx):
return rdfvalue.HashDigest(
self._value[idx * self.HASH_SIZE: (idx + 1) * self.HASH_SIZE])
class BlobImage(aff4.AFF4Image):
"""An AFF4 stream which stores chunks by hashes.
The hash stream is kept within an AFF4 Attribute, instead of another stream
making it more efficient for smaller files.
"""
# Size of a sha256 hash
_HASH_SIZE = 32
# How many chunks we read ahead
_READAHEAD = 5
def Initialize(self):
super(BlobImage, self).Initialize()
self.content_dirty = False
if self.mode == "w":
self.index = StringIO.StringIO("")
self.finalized = False
else:
self.index = StringIO.StringIO(self.Get(self.Schema.HASHES, ""))
self.finalized = self.Get(self.Schema.FINALIZED, False)
def Truncate(self, offset=0):
if offset != 0:
raise IOError("Non-zero truncation not supported for BlobImage")
super(BlobImage, self).Truncate(0)
self.index = StringIO.StringIO("")
self.finalized = False
def _GetChunkForWriting(self, chunk):
"""Chunks must be added using the AddBlob() method."""
raise NotImplementedError("Direct writing of HashImage not allowed.")
def _GetChunkForReading(self, chunk):
"""Retrieve the relevant blob from the AFF4 data store or cache."""
result = None
offset = chunk * self._HASH_SIZE
self.index.seek(offset)
chunk_name = self.index.read(self._HASH_SIZE)
try:
result = self.chunk_cache.Get(chunk_name)
except KeyError:
# Read ahead a few chunks.
self.index.seek(offset)
readahead = {}
for _ in range(self._READAHEAD):
name = self.index.read(self._HASH_SIZE)
if name and name not in self.chunk_cache:
urn = aff4.ROOT_URN.Add("blobs").Add(name.encode("hex"))
readahead[urn] = name
fds = aff4.FACTORY.MultiOpen(readahead, mode="r", token=self.token)
for fd in fds:
name = readahead[fd.urn]
# Remember the right fd
if name == chunk_name:
result = fd
# Put back into the cache
self.chunk_cache.Put(readahead[fd.urn], fd)
if result is None:
raise IOError("Chunk '%s' not found for reading!" % chunk)
return result
def FromBlobImage(self, fd):
"""Copy this file cheaply from another BlobImage."""
self.content_dirty = True
self.SetChunksize(fd.chunksize)
self.index = StringIO.StringIO(fd.index.getvalue())
self.size = fd.size
def Flush(self, sync=True):
if self.content_dirty:
self.Set(self.Schema.SIZE(self.size))
self.Set(self.Schema.HASHES(self.index.getvalue()))
self.Set(self.Schema.FINALIZED(self.finalized))
super(BlobImage, self).Flush(sync)
def AppendContent(self, src_fd):
"""Create new blob hashes and append to BlobImage.
We don't support writing at arbitrary file offsets, but this method provides
a convenient way to add blobs for a new file, or append content to an
existing one.
Args:
src_fd: source file handle open for read
Raises:
IOError: if blob has already been finalized.
"""
while 1:
blob = src_fd.read(self.chunksize)
if not blob:
break
blob_hash = hashlib.sha256(blob).digest()
blob_urn = rdfvalue.RDFURN("aff4:/blobs").Add(blob_hash.encode("hex"))
try:
fd = aff4.FACTORY.Open(blob_urn, "AFF4MemoryStream", mode="r",
token=self.token)
except IOError:
fd = aff4.FACTORY.Create(blob_urn, "AFF4MemoryStream", mode="w",
token=self.token)
fd.Write(blob)
fd.Close(sync=True)
self.AddBlob(blob_hash, len(blob))
self.Flush()
def AddBlob(self, blob_hash, length):
"""Add another blob to this image using its hash.
Once a blob is added that is smaller than the chunksize we finalize the
file, since handling adding more blobs makes the code much more complex.
Args:
blob_hash: sha256 binary digest
length: int length of blob
Raises:
IOError: if blob has been finalized.
"""
if self.finalized and length > 0:
raise IOError("Can't add blobs to finalized BlobImage")
self.content_dirty = True
self.index.seek(0, 2)
self.index.write(blob_hash)
self.size += length
if length < self.chunksize:
self.finalized = True
class SchemaCls(aff4.AFF4Image.SchemaCls):
"""The schema for Blob Images."""
STAT = aff4.AFF4Object.VFSDirectory.SchemaCls.STAT
HASHES = aff4.Attribute("aff4:hashes", rdfvalue.HashList,
"List of hashes of each chunk in this file.")
FINGERPRINT = aff4.Attribute("aff4:fingerprint",
rdfvalue.FingerprintResponse,
"DEPRECATED protodict containing arrays of "
" hashes. Use AFF4Stream.HASH instead.")
FINALIZED = aff4.Attribute("aff4:finalized",
rdfvalue.RDFBool,
"Once a blobimage is finalized, further writes"
" will raise exceptions.")
class HashImage(aff4.AFF4Image):
"""An AFF4 Image which refers to chunks by their hash.
This object stores a large image in chunks. Each chunk is stored using its
hash in the AFF4 data store. We have an index with a series of hashes stored
back to back. When we need to read a chunk, we seek the index for the hash,
and then open the data blob indexed by this hash. Chunks are cached as per the
AFF4Image implementation.
Assumptions:
Hashes do not collide.
All data blobs have the same size (the chunk size), except possibly the last
one in the file.
"""
# Size of a sha256 hash
_HASH_SIZE = 32
# How many chunks we read ahead
_READAHEAD = 5
_data_dirty = False
def Initialize(self):
super(HashImage, self).Initialize()
self.index = None
def _OpenIndex(self):
if self.index is None:
index_urn = self.urn.Add("index")
self.index = aff4.FACTORY.Create(index_urn, "AFF4Image", mode=self.mode,
token=self.token)
def _GetChunkForWriting(self, chunk):
"""Chunks must be added using the AddBlob() method."""
raise NotImplementedError("Direct writing of HashImage not allowed.")
def _GetChunkForReading(self, chunk):
"""Retrieve the relevant blob from the AFF4 data store or cache."""
result = None
self._OpenIndex()
self.index.Seek(chunk * self._HASH_SIZE)
chunk_name = self.index.Read(self._HASH_SIZE)
try:
result = self.chunk_cache.Get(chunk_name)
except KeyError:
# Read ahead a few chunks.
self.index.Seek(-self._HASH_SIZE, whence=1)
readahead = {}
for _ in range(self._READAHEAD):
name = self.index.Read(self._HASH_SIZE)
if name and name not in self.chunk_cache:
urn = aff4.ROOT_URN.Add("blobs").Add(name.encode("hex"))
readahead[urn] = name
fds = aff4.FACTORY.MultiOpen(readahead, mode="r", token=self.token)
for fd in fds:
name = readahead[fd.urn]
# Remember the right fd
if name == chunk_name:
result = fd
# Put back into the cache
self.chunk_cache.Put(readahead[fd.urn], fd)
return result
def Close(self, sync=True):
if self._data_dirty:
self.Set(self.Schema.SIZE(self.size))
if self.index:
self.index.Close(sync)
super(HashImage, self).Close(sync)
def AddBlob(self, blob_hash, length):
"""Add another blob to this image using its hash."""
self._OpenIndex()
self._data_dirty = True
self.index.Seek(0, 2)
self.index.Write(blob_hash)
self.size += length
class SchemaCls(aff4.AFF4Image.SchemaCls):
"""The schema for AFF4 files in the GRR VFS."""
STAT = aff4.AFF4Object.VFSDirectory.SchemaCls.STAT
CONTENT_LOCK = aff4.Attribute(
"aff4:content_lock", rdfvalue.RDFURN,
"This lock contains a URN pointing to the flow that is currently "
"updating this object.")
FINGERPRINT = aff4.Attribute("aff4:fingerprint",
rdfvalue.FingerprintResponse,
"DEPRECATED protodict containing arrays of "
" hashes. Use AFF4Stream.HASH instead.")
class AFF4SparseImage(BlobImage):
"""A class to store partial files."""
class SchemaCls(aff4.BlobImage.SchemaCls):
PATHSPEC = VFSDirectory.SchemaCls.PATHSPEC
def Initialize(self):
super(AFF4SparseImage, self).Initialize()
self._OpenIndex()
def _OpenIndex(self):
"""Create the index if it doesn't exist, otherwise open it."""
index_urn = self.urn.Add("index")
self.index = aff4.FACTORY.Create(index_urn, "AFF4SparseIndex", mode="rw",
token=self.token)
def Truncate(self, offset=0):
if offset != 0:
raise IOError("Non-zero truncation not supported for AFF4SparseImage")
super(AFF4SparseImage, self).Truncate(0)
self._OpenIndex()
self.finalized = False
def Read(self, length):
result = []
while length > 0:
data = self._ReadPartial(length)
if not data:
break
length -= len(data)
result.append(data)
return "".join(result)
def _GetChunkForReading(self, chunk):
"""Retrieve the relevant blob from the AFF4 data store or cache."""
result = None
offset = chunk * self._HASH_SIZE
self.index.seek(offset)
chunk_name = self.index.read(self._HASH_SIZE)
try:
result = self.chunk_cache.Get(chunk_name)
# Cache hit, so we're done.
return result
except KeyError:
# Read ahead a few chunks.
self.index.seek(offset)
readahead = {}
# Read all the hashes in one go, then split up the result.
chunks = self.index.read(self._HASH_SIZE * self._READAHEAD)
chunk_names = [chunks[i:i + self._HASH_SIZE]
for i in xrange(0, len(chunks), self._HASH_SIZE)]
for name in chunk_names:
# Try and read ahead a few chunks from the datastore and add them to the
# cache. If the chunks ahead aren't there, that's okay, we just can't
# cache them. We still keep reading to see if chunks after them are
# there, since the image is sparse.
try:
if name not in self.chunk_cache:
urn = aff4.ROOT_URN.Add("blobs").Add(name.encode("hex"))
readahead[urn] = name
except aff4.ChunkNotFoundError:
pass
fds = aff4.FACTORY.MultiOpen(readahead, mode="r", token=self.token)
for fd in fds:
name = readahead[fd.urn]
# Remember the right fd
if name == chunk_name:
result = fd
# Put back into the cache
self.chunk_cache.Put(readahead[fd.urn], fd)
if result is None:
raise aff4.ChunkNotFoundError("Chunk '%s' (urn: %s) not "
"found for reading!"
% (chunk, chunk_name))
return result
def _ReadPartial(self, length):
"""Read as much as possible, but not more than length."""
chunk = self.offset / self.chunksize
chunk_offset = self.offset % self.chunksize
# If we're past the end of the file, we don't have a chunk to read from, so
# we can't read anymore. We return the empty string here so we can read off
# the end of a file without raising, and get as much data as is there.
if chunk > self.index.last_chunk:
return ""
available_to_read = min(length, self.chunksize - chunk_offset)
fd = self._GetChunkForReading(chunk)
fd.Seek(chunk_offset)
result = fd.Read(available_to_read)
self.offset += len(result)
return result
def AddBlob(self, blob_hash, length, chunk_number):
"""Add another blob to this image using its hash."""
# TODO(user) Allow the index's chunksize to be > self._HASH_SIZE.
# This will reduce the number of rows we need to store in the datastore.
# We'll fill chunks with 0s when we don't have enough information to write
# to them fully, and ignore 0s when we're reading chunks.
# There's one hash in the index for each chunk in the file.
offset = chunk_number * self.index.chunksize
self.index.Seek(offset)
# If we're adding a new blob, we should increase the size. If we're just
# updating an existing blob, the size should stay the same.
# That is, if we read the index at the right offset and no hash is there, we
# must not have seen this blob before, so we say we're adding a new one and
# increase in size.
if not self.index.ChunkExists(chunk_number):
# We say that we've increased in size by the size of the blob,
# but really we only store its hash in the AFF4SparseImage.
self.size += length
# Seek back in case we've read past the offset we're meant to write to.
self.index.Seek(offset)
self.index.Write(blob_hash)
self._dirty = True
def Flush(self, sync=True):
if self._dirty:
self.index.Flush(sync=sync)
super(AFF4SparseImage, self).Flush(sync=sync)
class AFF4SparseIndex(aff4.AFF4Image):
"""A sparse index for AFF4SparseImage."""
# TODO(user) Allow for a bigger chunk size. At the moment, the
# chunksize must be exactly the hash size.
chunksize = 32
class SchemaCls(aff4.AFF4Image.SchemaCls):
_CHUNKSIZE = aff4.Attribute("aff4:chunksize", rdfvalue.RDFInteger,
"Total size of each chunk.", default=32)
LAST_CHUNK = aff4.Attribute("aff4:lastchunk", rdfvalue.RDFInteger,
"The highest numbered chunk in this object.",
default=-1)
def Initialize(self):
# The rightmost chunk we've seen so far. We'll use this to keep track of
# what the biggest possible size this file could be is.
self.last_chunk = self.Get(self.Schema.LAST_CHUNK)
super(AFF4SparseIndex, self).Initialize()
def _GetChunkForWriting(self, chunk):
"""Look in the datastore for a chunk, and create it if it isn't there."""
chunk_name = self.urn.Add(self.CHUNK_ID_TEMPLATE % chunk)
try:
fd = self.chunk_cache.Get(chunk_name)
except KeyError:
# Try and get a lock on the chunk.
fd = aff4.FACTORY.OpenWithLock(chunk_name, token=self.token)
# If the chunk didn't exist in the datastore, create it.
if fd.Get(fd.Schema.LAST) is None:
# Each time we create a new chunk, we grow in size.
self.size += self.chunksize
self._dirty = True
fd = aff4.FACTORY.Create(chunk_name, "AFF4MemoryStream", mode="rw",
token=self.token)
self.chunk_cache.Put(chunk_name, fd)
# Keep track of the biggest chunk_number we've seen so far.
if chunk > self.last_chunk:
self.last_chunk = chunk
self._dirty = True
return fd
def ChunkExists(self, chunk_number):
"""Do we have this chunk in the index?"""
try:
self._GetChunkForReading(chunk_number)
return True
except aff4.ChunkNotFoundError:
return False
def Write(self, data):
"""Write data to the file."""
self._dirty = True
if not isinstance(data, bytes):
raise IOError("Cannot write unencoded string.")
while data:
data = self._WritePartial(data)
def Read(self, length):
"""Read a block of data from the file."""
result = ""
# The total available size in the file
length = int(length)
# Make sure we don't read past the "end" of the file. We say the end is the
# end of the last chunk. If we do try and read past the end, we should
# return an empty string.
# The end of the file is the *end* of the last chunk, so we add one here.
length = min(length,
((self.last_chunk + 1) * self.chunksize) - self.offset)
while length > 0:
data = self._ReadPartial(length)
if not data:
break
length -= len(data)
result += data
return result
def Flush(self, sync=True):
if self._dirty:
self.Set(self.Schema.LAST_CHUNK, rdfvalue.RDFInteger(self.last_chunk))
super(AFF4SparseIndex, self).Flush(sync=sync)
class AFF4Index(aff4.AFF4Object):
"""An aff4 object which manages access to an index.
This object has no actual attributes, it simply manages the index.
"""
# Value to put in the cell for index hits.
PLACEHOLDER_VALUE = "X"
def __init__(self, urn, **kwargs):
# Never read anything directly from the table by forcing an empty clone.
kwargs["clone"] = {}
super(AFF4Index, self).__init__(urn, **kwargs)
# We collect index data here until we flush.
self.to_set = set()
self.to_delete = set()
def Flush(self, sync=False):
"""Flush the data to the index."""
super(AFF4Index, self).Flush(sync=sync)
# Remove entries from deletion set that are going to be added anyway.
self.to_delete = self.to_delete.difference(self.to_set)
# Convert sets into dicts that MultiSet handles.
to_set = dict(zip(self.to_set, self.PLACEHOLDER_VALUE * len(self.to_set)))
data_store.DB.MultiSet(self.urn, to_set, to_delete=list(self.to_delete),
token=self.token, replace=True, sync=sync)
self.to_set = set()
self.to_delete = set()
def Close(self, sync=False):
self.Flush(sync=sync)
super(AFF4Index, self).Close(sync=sync)
def Add(self, urn, attribute, value):
"""Add the attribute of an AFF4 object to the index.
Args:
urn: The URN of the AFF4 object this attribute belongs to.
attribute: The attribute to add to the index.
value: The value of the attribute to index.
Raises:
RuntimeError: If a bad URN is passed in.
"""
if not isinstance(urn, rdfvalue.RDFURN):
raise RuntimeError("Bad urn parameter for index addition.")
column_name = "index:%s:%s:%s" % (
attribute.predicate, value.lower(), urn)
self.to_set.add(column_name)
def Query(self, attributes, regex, limit=100):
"""Query the index for the attribute.
Args:
attributes: A list of attributes to query for.
regex: The regex to search this attribute.
limit: A (start, length) tuple of integers representing subjects to
return. Useful for paging. If its a single integer we take it as the
length limit (start=0).
Returns:
A list of RDFURNs which match the index search.
"""
# Make the regular expressions.
regex = regex.lstrip("^") # Begin and end string matches work because
regex = regex.rstrip("$") # they are explicit in the storage.
regexes = ["index:%s:%s:.*" % (a.predicate, regex.lower())
for a in attributes]
start = 0
try:
start, length = limit # pylint: disable=unpacking-non-sequence
except TypeError:
length = limit
# Get all the hits
index_hits = set()
for col, _, _ in data_store.DB.ResolveRegex(
self.urn, regexes, token=self.token,
timestamp=data_store.DB.ALL_TIMESTAMPS):
# Extract URN from the column_name.
index_hits.add(rdfvalue.RDFURN(col.rsplit("aff4:/", 1)[1]))
hits = []
for i, hit in enumerate(index_hits):
if i < start: continue
hits.append(hit)
if i >= start + length - 1:
break
return hits
def _QueryRaw(self, regex):
return set([(x, y) for (y, x, _) in data_store.DB.ResolveRegex(
self.urn, regex, token=self.token,
timestamp=data_store.DB.ALL_TIMESTAMPS)])
def MultiQuery(self, attributes, regexes):
"""Query the index for the attribute, matching multiple regexes at a time.
Args:
attributes: A list of attributes to query for.
regexes: A list of regexes to search the attributes for.
Returns:
A dict mapping each matched attribute name to a list of RDFURNs.
"""
# Make the regular expressions.
combined_regexes = []
# Begin and end string matches work because they are explicit in storage.
regexes = [r.lstrip("^").rstrip("$").lower() for r in regexes]
for attribute in attributes:
combined_regexes.append("index:%s:(%s):.*" % (
attribute.predicate, "|".join(regexes)))
# Get all the hits
result = {}
for col, _, _ in data_store.DB.ResolveRegex(
self.urn, combined_regexes, token=self.token,
timestamp=data_store.DB.ALL_TIMESTAMPS):
# Extract the attribute name.
attribute_name = col.split(":")[3]
# Extract URN from the column_name.
urn = rdfvalue.RDFURN(col.rsplit("aff4:/", 1)[1])
result.setdefault(attribute_name, []).append(urn)
return result
def DeleteAttributeIndexesForURN(self, attribute, value, urn):
"""Remove all entries for a given attribute referring to a specific urn."""
if not isinstance(urn, rdfvalue.RDFURN):
raise RuntimeError("Bad urn parameter for index deletion.")
column_name = "index:%s:%s:%s" % (
attribute.predicate, value.lower(), urn)
self.to_delete.add(column_name)
class AFF4IndexSet(aff4.AFF4Object):
"""Index that behaves as a set of strings."""
PLACEHOLDER_VALUE = "X"
INDEX_PREFIX = "index:"
INDEX_PREFIX_LEN = len(INDEX_PREFIX)
def Initialize(self):
super(AFF4IndexSet, self).Initialize()
self.to_set = {}
self.to_delete = set()
def Add(self, value):
column_name = self.INDEX_PREFIX + utils.SmartStr(value)
self.to_set[column_name] = self.PLACEHOLDER_VALUE
def Remove(self, value):
column_name = self.INDEX_PREFIX + utils.SmartStr(value)
self.to_delete.add(column_name)
def ListValues(self, regex=".*", limit=10000):
values = data_store.DB.ResolveRegex(self.urn, self.INDEX_PREFIX + regex,
token=self.token, limit=limit)
result = set()
for v in values:
column_name = v[0]
if column_name in self.to_delete:
continue
result.add(column_name[self.INDEX_PREFIX_LEN:])
for column_name in self.to_set:
if column_name in self.to_delete:
continue
result.add(column_name[self.INDEX_PREFIX_LEN:])
return result
def Flush(self, sync=False):
super(AFF4IndexSet, self).Flush(sync=sync)
data_store.DB.MultiSet(self.urn, self.to_set, token=self.token,
to_delete=list(self.to_delete), replace=True,
sync=sync)
self.to_set = {}
self.to_delete = set()
def Close(self, sync=False):
self.Flush(sync=sync)
super(AFF4IndexSet, self).Close(sync=sync)
class AFF4LabelsIndex(aff4.AFF4Volume):
"""Index for objects' labels with vaiorus querying capabilities."""
# Separator is a character that's not allowed in labels names.
SEPARATOR = "|"
ESCAPED_SEPARATOR = re.escape("|")
def Initialize(self):
super(AFF4LabelsIndex, self).Initialize()
self._urns_index = None
self._used_labels_index = None
@property
def urns_index(self):
if self._urns_index is None:
self._urns_index = aff4.FACTORY.Create(
self.urn.Add("urns_index"), "AFF4Index", mode=self.mode,
token=self.token)
return self._urns_index
@property
def used_labels_index(self):
if self._used_labels_index is None:
self._used_labels_index = aff4.FACTORY.Create(
self.urn.Add("used_labels_index"), "AFF4IndexSet", mode=self.mode,
token=self.token)
return self._used_labels_index
def IndexNameForLabel(self, label_name, label_owner):
return label_owner + self.SEPARATOR + label_name
def LabelForIndexName(self, index_name):
label_owner, label_name = utils.SmartStr(index_name).split(
self.SEPARATOR, 1)
return rdfvalue.AFF4ObjectLabel(name=label_name, owner=label_owner)
def AddLabel(self, urn, label_name, owner=None):
if owner is None:
raise ValueError("owner can't be None")
index_name = self.IndexNameForLabel(label_name, owner)
self.urns_index.Add(urn, aff4.AFF4Object.SchemaCls.LABELS, index_name)
self.used_labels_index.Add(index_name)
def RemoveLabel(self, urn, label_name, owner=None):
if owner is None:
raise ValueError("owner can't be None")
self.urns_index.DeleteAttributeIndexesForURN(
aff4.AFF4Object.SchemaCls.LABELS,
self.IndexNameForLabel(label_name, owner), urn)
def ListUsedLabels(self):
index_results = self.used_labels_index.ListValues()
return [self.LabelForIndexName(name) for name in index_results]
def FindUrnsByLabel(self, label, owner=None):
results = self.MultiFindUrnsByLabel([label], owner=owner).values()
if not results:
return []
else:
return results[0]
def MultiFindUrnsByLabel(self, labels, owner=None):
if owner is None:
owner = ".+"
else:
owner = re.escape(owner)
query_results = self.urns_index.MultiQuery(
[aff4.AFF4Object.SchemaCls.LABELS],
[owner + self.ESCAPED_SEPARATOR + re.escape(label) for label in labels])
results = {}
for key, value in query_results.iteritems():
results[self.LabelForIndexName(key)] = value
return results
def FindUrnsByLabelNameRegex(self, label_name_regex, owner=None):
return self.MultiFindUrnsByLabelNameRegex([label_name_regex], owner=owner)
def MultiFindUrnsByLabelNameRegex(self, label_name_regexes, owner=None):
if owner is None:
owner = ".+"
else:
owner = re.escape(owner)
query_results = self.urns_index.MultiQuery(
[aff4.AFF4Object.SchemaCls.LABELS],
[owner + self.ESCAPED_SEPARATOR + regex
for regex in label_name_regexes])
results = {}
for key, value in query_results.iteritems():
results[self.LabelForIndexName(key)] = value
return results
def CleanUpUsedLabelsIndex(self):
raise NotImplementedError()
def Flush(self, sync=False):
super(AFF4LabelsIndex, self).Flush(sync=sync)
self.urns_index.Flush(sync=sync)
self.used_labels_index.Flush(sync=sync)
def Close(self, sync=False):
self.Flush(sync=sync)
super(AFF4LabelsIndex, self).Close(sync=sync)
class TempMemoryFile(aff4.AFF4MemoryStream):
"""A temporary AFF4MemoryStream-based file with a random URN."""
def __init__(self, urn, **kwargs):
if urn is None:
urn = rdfvalue.RDFURN("aff4:/tmp").Add("%X" % utils.PRNG.GetULong())
super(TempMemoryFile, self).__init__(urn, **kwargs)
class TempImageFile(aff4.AFF4Image):
"""A temporary file AFF4Image-based file with a random URN."""
def __init__(self, urn, **kwargs):
if urn is None:
urn = rdfvalue.RDFURN("aff4:/tmp").Add("%X" % utils.PRNG.GetULong())
super(TempImageFile, self).__init__(urn, **kwargs)
|
the-stack_0_1066 | import warnings
import mmcv
import numpy as np
import pycocotools.mask as maskUtils
import torch
from mmcv.runner import load_checkpoint
from mmdet.core import get_classes
from mmdet.datasets import to_tensor
from mmdet.datasets.transforms import ImageTransform
from mmdet.models import build_detector
from imantics import Polygons, Mask
import cv2
def init_detector(config, checkpoint=None, device='cuda:0'):
"""Initialize a detector from config file.
Args:
config (str or :obj:`mmcv.Config`): Config file path or the config
object.
checkpoint (str, optional): Checkpoint path. If left as None, the model
will not load any weights.
Returns:
nn.Module: The constructed detector.
"""
if isinstance(config, str):
config = mmcv.Config.fromfile(config)
elif not isinstance(config, mmcv.Config):
raise TypeError('config must be a filename or Config object, '
'but got {}'.format(type(config)))
config.model.pretrained = None
model = build_detector(config.model, test_cfg=config.test_cfg)
if checkpoint is not None:
checkpoint = load_checkpoint(model, checkpoint)
if 'CLASSES' in checkpoint['meta']:
model.CLASSES = checkpoint['meta']['classes']
else:
warnings.warn('Class names are not saved in the checkpoint\'s '
'meta data, use COCO classes by default.')
model.CLASSES = get_classes('coco')
model.cfg = config # save the config in the model for convenience
model.to(device)
model.eval()
return model
def inference_detector(model, imgs):
"""Inference image(s) with the detector.
Args:
model (nn.Module): The loaded detector.
imgs (str/ndarray or list[str/ndarray]): Either image files or loaded
images.
Returns:
If imgs is a str, a generator will be returned, otherwise return the
detection results directly.
"""
cfg = model.cfg
img_transform = ImageTransform(
size_divisor=cfg.data.test.size_divisor, **cfg.img_norm_cfg)
device = next(model.parameters()).device # model device
if not isinstance(imgs, list):
return _inference_single(model, imgs, img_transform, device)
else:
return _inference_generator(model, imgs, img_transform, device)
def _prepare_data(img, img_transform, cfg, device):
ori_shape = img.shape
img, img_shape, pad_shape, scale_factor = img_transform(
img,
scale=cfg.data.test.img_scale,
keep_ratio=cfg.data.test.get('resize_keep_ratio', True))
img = to_tensor(img).to(device).unsqueeze(0)
img_meta = [
dict(
ori_shape=ori_shape,
img_shape=img_shape,
pad_shape=pad_shape,
scale_factor=scale_factor,
flip=False)
]
return dict(img=[img], img_meta=[img_meta])
def _inference_single(model, img, img_transform, device):
img = mmcv.imread(img)
data = _prepare_data(img, img_transform, model.cfg, device)
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
return result
def _inference_generator(model, imgs, img_transform, device):
for img in imgs:
yield _inference_single(model, img, img_transform, device)
# TODO: merge this method with the one in BaseDetector
def show_result(img, result, class_names, score_thr=0.3, out_file=None):
"""Visualize the detection results on the image.
Args:
img (str or np.ndarray): Image filename or loaded image.
result (tuple[list] or list): The detection result, can be either
(bbox, segm) or just bbox.
class_names (list[str] or tuple[str]): A list of class names.
score_thr (float): The threshold to visualize the bboxes and masks.
out_file (str, optional): If specified, the visualization result will
be written to the out file instead of shown in a window.
"""
assert isinstance(class_names, (tuple, list))
img = mmcv.imread(img)
if isinstance(result, tuple):
bbox_result, segm_result = result
else:
bbox_result, segm_result = result, None
bboxes = np.vstack(bbox_result)
# draw segmentation masks
if segm_result is not None:
segms = mmcv.concat_list(segm_result)
inds = np.where(bboxes[:, -1] > score_thr)[0]
for i in inds:
colors = {k: [] for k in 'rgb'}
temp = {k: np.random.randint(0, 255) for k in 'rgb'}
for k in temp:
while 1:
c = temp[k]
t = set(j for j in range(c - 25, c + 25) if 0 <= j <= 255)
if t.intersection(colors[k]):
temp[k] = np.random.randint(0, 255)
else:
break
colors[k].append(temp[k])
color_mask = np.array([colors['r'][0], colors['g'][0], colors['b'][0]])
# color_mask = np.random.randint(
# 0, 256, (1, 3), dtype=np.uint8)
mask = maskUtils.decode(segms[i]).astype(np.bool)
img[mask] = img[mask] * 0.5 + color_mask * 0.5
# draw bounding boxes
labels = [
np.full(bbox.shape[0], i, dtype=np.int32)
for i, bbox in enumerate(bbox_result)
]
labels = np.concatenate(labels)
thickness_t = 2 if img.shape[0] < 3000 else 6
font_scale_t = 0.65 if img.shape[0] < 3000 else 2.5
mmcv.imshow_det_bboxes(
img.copy(),
bboxes,
labels,
class_names=class_names,
score_thr=score_thr,
text_color='yellow',
thickness=thickness_t,
font_scale=font_scale_t,
show=out_file is None,
out_file=out_file,
win_name='demo')
def result2dict(img, result, class_names, score_thr=0.3, out_file=None):
assert isinstance(class_names, (tuple, list))
img = mmcv.imread(img)
if isinstance(result, tuple):
bbox_result, segm_result = result
else:
bbox_result, segm_result = result, None
bboxes = np.vstack(bbox_result)
# draw segmentation masks
arr_poly = []
arr_masks = []
if segm_result is not None:
segms = mmcv.concat_list(segm_result)
inds = np.where(bboxes[:, -1] > score_thr)[0]
for i in inds:
color_mask = np.random.randint(
0, 256, (1, 3), dtype=np.uint8)
mask = maskUtils.decode(segms[i]).astype(np.bool)
# img[mask] = img[mask] * 0.5 + color_mask * 0.5
polygons = Mask(mask).polygons()
if polygons.points:
arr_poly.append(polygons.points[0])
arr_masks.append(mask)
else:
arr_poly.append(np.empty([]))
arr_masks.append(np.empty([]))
# draw bounding boxes
labels = [
np.full(bbox.shape[0], i, dtype=np.int32)
for i, bbox in enumerate(bbox_result)
]
if not arr_poly:
return None
keep = np.array([i for i, val in enumerate(arr_poly) if (val != np.empty([])).any()])
labels = np.concatenate(labels)
ret_poly = [i.tolist() for i in np.array(arr_poly)[keep]]
ret_mask = [i.tolist() for i in np.array(arr_masks)[keep]]
return bboxes[keep].tolist(), ret_poly, np.array([class_names[i] for i in labels])[keep].tolist(), ret_mask
# mmcv.imshow_det_bboxes(
# img.copy(),
# bboxes,
# labels,
# class_names=class_names,
# score_thr=score_thr,
# show=out_file is None,
# out_file=out_file)
|
the-stack_0_1067 | from os import path
from pandas.api.types import CategoricalDtype
from numpy import mean, concatenate, ones, sqrt, zeros, arange
from scipy.stats import norm
from sklearn.impute import SimpleImputer
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestClassifier
from attack_models.attack_model import PrivacyAttack
from utils.constants import *
from utils.logging import LOGGER
class AttributeInferenceAttack(PrivacyAttack):
"""A privacy attack that aims to reconstruct a sensitive attribute c given a partial target record T"""
def __init__(self, PredictionModel, sensitiveAttribute, metadata, quids=None):
"""
Parent class for simple regression attribute inference attack
:param PredictionModel: object: sklearn-type prediction model
:param sensitiveAttribute: string: name of a column in a DataFrame that is considered the unknown, sensitive attribute
:param metadata: dict: schema for the data to be attacked
:param backgroundKnowledge: pd.DataFrame: adversary's background knowledge dataset
"""
self.PredictionModel = PredictionModel
self.sensitiveAttribute = sensitiveAttribute
self.metadata, self.knownAttributes, self.categoricalAttributes, self.nfeatures = self._read_meta(metadata, quids)
self.ImputerCat = SimpleImputer(strategy='most_frequent')
self.ImputerNum = SimpleImputer(strategy='median')
self.trained = False
self.__name__ = f'{self.PredictionModel.__class__.__name__}'
def attack(self, targetAux, attemptLinkage=False, data=None):
"""Makes a guess about the target's secret attribute"""
assert self.trained, 'Attack must first be trained on some data before can predict sensitive target value'
if attemptLinkage:
assert data is not None, "Need a dataset for linkage attack."
try:
groups = data.groupby(self.categoricalAttributes)
targetCats = targetAux[self.categoricalAttributes].values
groupSize = groups.size()[targetCats]
if all(groupSize == 1):
guess = groups.get_group(tuple(targetCats[0]))[self.sensitiveAttribute].values[0]
else:
guess = self._make_guess(targetAux)
except:
guess = self._make_guess(targetAux)
else:
guess = self._make_guess(targetAux)
return guess
def _make_guess(self, targetAux):
raise NotImplementedError('Method must be overriden by a subclass')
def _read_meta(self, metadata, quids):
if quids is None:
quids = []
meta_dict = {}
knownAttributes = []
categoricalAttributes = []
nfeatures = 0
for cdict in metadata['columns']:
attr_name = cdict['name']
data_type = cdict['type']
if data_type == FLOAT or data_type == INTEGER:
if attr_name in quids:
cat_bins = cdict['bins']
cat_labels = [f'({cat_bins[i]},{cat_bins[i+1]}]' for i in range(len(cat_bins)-1)]
meta_dict[attr_name] = {
'type': CATEGORICAL,
'categories': cat_labels,
'size': len(cat_labels)
}
nfeatures += len(cat_labels)
if attr_name != self.sensitiveAttribute:
categoricalAttributes.append(attr_name)
else:
meta_dict[attr_name] = {
'type': data_type,
'min': cdict['min'],
'max': cdict['max']
}
nfeatures += 1
elif data_type == CATEGORICAL or data_type == ORDINAL:
meta_dict[attr_name] = {
'type': data_type,
'categories': cdict['i2s'],
'size': len(cdict['i2s'])
}
nfeatures += len(cdict['i2s'])
if attr_name != self.sensitiveAttribute:
categoricalAttributes.append(attr_name)
else:
raise ValueError(f'Unknown data type {data_type} for attribute {attr_name}')
if attr_name != self.sensitiveAttribute:
knownAttributes.append(attr_name)
return meta_dict, knownAttributes, categoricalAttributes, nfeatures
def _encode_data(self, data):
dfcopy = data.copy()
for col, cdict in self.metadata.items():
if col in list(dfcopy):
col_data = dfcopy[col]
if cdict['type'] in [CATEGORICAL, ORDINAL]:
if len(col_data) > len(col_data.dropna()):
col_data = col_data.fillna(FILLNA_VALUE_CAT)
if FILLNA_VALUE_CAT not in cdict['categories']:
col['categories'].append(FILLNA_VALUE_CAT)
col['size'] += 1
cat = CategoricalDtype(categories=cdict['categories'], ordered=True)
col_data = col_data.astype(cat)
dfcopy[col] = col_data.cat.codes
return dfcopy.values
def _impute_missing_values(self, df):
dfImpute = df.copy()
catCols = []
numCols = []
for attr, col in self.metadata.items():
if attr in list(dfImpute):
if col['type'] in [CATEGORICAL, ORDINAL]:
catCols.append(attr)
elif col['type'] in NUMERICAL:
numCols.append(attr)
self.ImputerCat.fit(df[catCols])
dfImpute[catCols] = self.ImputerCat.transform(df[catCols])
self.ImputerNum.fit(df[numCols])
dfImpute[numCols] = self.ImputerNum.transform(df[numCols])
return dfImpute
def _one_hot(self, col_data, categories):
col_data_onehot = zeros((len(col_data), len(categories)))
cidx = [categories.index(c) for c in col_data]
col_data_onehot[arange(len(col_data)), cidx] = 1
return col_data_onehot
class LinRegAttack(AttributeInferenceAttack):
"""An AttributeInferenceAttack based on a simple Linear Regression model"""
def __init__(self, sensitiveAttribute, metadata, quids=None):
super().__init__(LinearRegression(fit_intercept=False), sensitiveAttribute, metadata, quids)
self.scaleFactor = None
self.coefficients = None
self.sigma = None
def train(self, data):
"""
Train a MLE attack to reconstruct an unknown sensitive value from a vector of known attributes
:param data: type(DataFrame) A dataset of shape (n, k)
"""
features = self._encode_data(data.drop(self.sensitiveAttribute, axis=1))
labels = data[self.sensitiveAttribute].values
n, k = features.shape
# Center independent variables for better regression performance
self.scaleFactor = mean(features, axis=0)
featuresScaled = features - self.scaleFactor
featuresScaled = concatenate([ones((n, 1)), featuresScaled], axis=1) # append all ones for inclu intercept in beta vector
# Get MLE for linear coefficients
self.PredictionModel.fit(featuresScaled, labels)
self.coefficients = self.PredictionModel.coef_
self.sigma = sum((labels - featuresScaled.dot(self.coefficients))**2)/(n-k)
LOGGER.debug('Finished training regression model')
self.trained = True
def _make_guess(self, targetAux):
targetFeatures = self._encode_data(targetAux)
targetFeaturesScaled = targetFeatures - self.scaleFactor
targetFeaturesScaled = concatenate([ones((len(targetFeaturesScaled), 1)), targetFeatures], axis=1)
guess = targetFeaturesScaled.dot(self.coefficients)[0]
return guess
def get_likelihood(self, targetAux, targetSensitive, attemptLinkage=False, data=None):
assert self.trained, 'Attack must first be trained on some data before can predict sensitive target value'
targetFeatures = self._encode_data(targetAux)
targetFeaturesScaled = targetFeatures - self.scaleFactor
targetFeaturesScaled = concatenate([ones((len(targetFeaturesScaled), 1)), targetFeatures], axis=1)
if attemptLinkage:
assert data is not None, "Need a dataset for linkage attack."
try:
groups = data.groupby(self.categoricalAttributes)
targetCats = targetAux[self.categoricalAttributes].values
groupSize = groups.size()[targetCats]
if all(groupSize == 1):
pCorrect = 1.
else:
pdfLikelihood = norm(loc=targetFeaturesScaled.dot(self.coefficients), scale=sqrt(self.sigma))
pCorrect = pdfLikelihood.pdf(targetSensitive)[0]
except:
pdfLikelihood = norm(loc=targetFeaturesScaled.dot(self.coefficients), scale=sqrt(self.sigma))
pCorrect = pdfLikelihood.pdf(targetSensitive)[0]
else:
pdfLikelihood = norm(loc=targetFeaturesScaled.dot(self.coefficients), scale=sqrt(self.sigma))
pCorrect = pdfLikelihood.pdf(targetSensitive)[0]
return pCorrect
class RandForestAttack(AttributeInferenceAttack):
"""An AttributeInferenceAttack based on a simple Linear Regression model"""
def __init__(self, sensitiveAttribute, metadata, quids=None):
super().__init__(RandomForestClassifier(), sensitiveAttribute, metadata, quids)
self.labels = {l:i for i, l in enumerate(self.metadata[self.sensitiveAttribute]['categories'])}
self.labelsInv = {i:l for l, i in self.labels.items()}
self.scaleFactor = None
def train(self, data):
"""
Train a Classifier to reconstruct an unknown sensitive label from a vector of known attributes
:param data: type(DataFrame) A dataset of shape (n, k)
"""
features = self._encode_data(data.drop(self.sensitiveAttribute, axis=1))
labels = data[self.sensitiveAttribute].apply(lambda x: self.labels[x]).values
# Feature normalisation
self.scaleFactor = mean(features, axis=0)
featuresScaled = features - self.scaleFactor
# Get MLE for linear coefficients
self.PredictionModel.fit(featuresScaled, labels)
LOGGER.debug('Finished training regression model')
self.trained = True
def _make_guess(self, targetAux):
targetFeatures = self._encode_data(targetAux)
targetFeaturesScaled = targetFeatures - self.scaleFactor
guess = self.PredictionModel.predict(targetFeaturesScaled)
return self.labelsInv[guess[0]]
def get_likelihood(self, targetAux, targetSensitive, attemptLinkage=False, data=None):
assert self.trained, 'Attack must first be trained on some data before can predict sensitive target value'
targetFeatures = self._encode_data(targetAux)
targetFeaturesScaled = targetFeatures - self.scaleFactor
if attemptLinkage:
assert data is not None, "Need a dataset for linkage attack."
try:
groups = data.groupby(self.categoricalAttributes)
targetCats = targetAux[self.categoricalAttributes].values
groupSize = groups.size()[targetCats]
if all(groupSize == 1):
pCorrect = 1.
else:
probs = self.PredictionModel.predict_proba(targetFeaturesScaled).flatten()
pCorrect = probs[self.labels[targetSensitive]]
except:
probs = self.PredictionModel.predict_proba(targetFeaturesScaled).flatten()
pCorrect = probs[self.labels[targetSensitive]]
else:
probs = self.PredictionModel.predict_proba(targetFeaturesScaled).flatten()
pCorrect = probs[self.labels[targetSensitive]]
return pCorrect |
the-stack_0_1068 | from django.db import IntegrityError
from django.shortcuts import render,redirect, get_object_or_404
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.contrib.auth import login, logout, authenticate
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
from django.utils import timezone
from .forms import TodoForm
from .models import *
# Create your views here.
@login_required
def kaamkaj(request):
return render(request,'kaamkaj/kaamkaj_home.html',locals())
@login_required
def kaam_list(request):
todos = KaamKaj.objects.filter(user=request.user, complete_date__isnull=True).order_by('-created')
return render(request,'kaamkaj/current_kaam.html',locals())
@login_required
def create_kaam(request):
if request.method == 'GET':
return render(request,'kaamkaj/create_kaam.html', {'form':TodoForm})
else:
try:
form = TodoForm(request.POST)
new_todo =form.save(commit=False)
new_todo.user = request.user
new_todo.save()
return redirect('kaamkaj_list')
except ValueError:
return render(request,'kaamkaj/create_kaam.html', {'form':TodoForm, 'error':'Bad data passend in. Try again'} )
@login_required
def kaamkaj_details(request,todo_id):
todo = get_object_or_404(KaamKaj, id=todo_id, user=request.user)
if request.method == "GET":
form = TodoForm(instance=todo)
return render(request,'kaamkaj/kaamkaj_details.html', {'todo':todo,'form':form})
else:
try:
form = TodoForm(request.POST, instance=todo)
form.save()
return redirect('kaamkaj_list')
except ValueError:
return render(request,'kaamkaj/kaamkaj_details.html', {'todo':todo, 'form':form, 'error':'Bad Info'} )
@login_required
def kaamkaj_complete(request,todo_pk):
todo = get_object_or_404(KaamKaj, id=todo_pk, user=request.user)
if request.method == 'POST':
todo.complete_date = timezone.now()
todo.save()
return redirect('kaamkaj_list')
|
the-stack_0_1069 | import sc2, sys
from __init__ import run_ladder_game
from sc2 import Race, Difficulty
from sc2.player import Bot, Computer
# Load bot
from example_bot import ExampleBot
bot = Bot(Race.Terran, ExampleBot())
# Start game
if __name__ == '__main__':
if "--LadderServer" in sys.argv:
# Ladder game started by LadderManager
print("Starting ladder game...")
result, opponentid = run_ladder_game(bot)
print(result," against opponent ", opponentid)
else:
# Local game
print("Starting local game...")
sc2.run_game(sc2.maps.get("Abyssal Reef LE"), [
bot,
Computer(Race.Protoss, Difficulty.VeryHard)
], realtime=True)
|
the-stack_0_1070 | #
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import random
from itertools import chain, permutations
from functools import partial
import cuml
import cuml.common.logger as logger
import cupy as cp
import numpy as np
import pytest
import cudf
from cuml.ensemble import RandomForestClassifier as curfc
from cuml.metrics.cluster import adjusted_rand_score as cu_ars
from cuml.metrics import accuracy_score as cu_acc_score
from cuml.metrics.cluster import silhouette_score as cu_silhouette_score
from cuml.metrics.cluster import silhouette_samples as cu_silhouette_samples
from cuml.test.utils import get_handle, get_pattern, array_equal, \
unit_param, quality_param, stress_param, generate_random_labels, \
score_labeling_with_handle
from numba import cuda
from numpy.testing import assert_almost_equal
from sklearn.datasets import make_classification, make_blobs
from sklearn.metrics import accuracy_score as sk_acc_score
from sklearn.metrics import log_loss as sklearn_log_loss
from sklearn.metrics.cluster import adjusted_rand_score as sk_ars
from sklearn.metrics.cluster import homogeneity_score as sk_homogeneity_score
from sklearn.metrics.cluster import completeness_score as sk_completeness_score
from sklearn.metrics.cluster import mutual_info_score as sk_mutual_info_score
from sklearn.metrics.cluster import silhouette_score as sk_silhouette_score
from sklearn.metrics.cluster import silhouette_samples as sk_silhouette_samples
from sklearn.preprocessing import StandardScaler
from cuml.metrics.cluster import entropy
from cuml.metrics.regression import mean_squared_error, \
mean_squared_log_error, mean_absolute_error
from sklearn.metrics import mean_squared_error as sklearn_mse
from sklearn.metrics import confusion_matrix as sk_confusion_matrix
from cuml.metrics import confusion_matrix
from sklearn.metrics import mean_absolute_error as sklearn_mae
from sklearn.metrics import mean_squared_log_error as sklearn_msle
from cuml.common import has_scipy
from cuml.metrics import roc_auc_score
from cuml.metrics import precision_recall_curve
from cuml.metrics import log_loss
from sklearn.metrics import roc_auc_score as sklearn_roc_auc_score
from sklearn.metrics import precision_recall_curve \
as sklearn_precision_recall_curve
from cuml.metrics import pairwise_distances, PAIRWISE_DISTANCE_METRICS
from sklearn.metrics import pairwise_distances as sklearn_pairwise_distances
@pytest.fixture(scope='module')
def random_state():
random_state = random.randint(0, 1e6)
with logger.set_level(logger.level_debug):
logger.debug("Random seed: {}".format(random_state))
return random_state
@pytest.fixture(
scope='module',
params=(
{'n_clusters': 2, 'n_features': 2, 'label_type': 'int64',
'data_type': 'float32'},
{'n_clusters': 5, 'n_features': 1000, 'label_type': 'int32',
'data_type': 'float64'}
)
)
def labeled_clusters(request, random_state):
data, labels = make_blobs(
n_samples=1000,
n_features=request.param['n_features'],
random_state=random_state,
centers=request.param['n_clusters'],
center_box=(-1, 1),
cluster_std=1.5 # Allow some cluster overlap
)
return (
data.astype(request.param['data_type']),
labels.astype(request.param['label_type'])
)
@pytest.mark.parametrize('datatype', [np.float32, np.float64])
@pytest.mark.parametrize('use_handle', [True, False])
def test_r2_score(datatype, use_handle):
a = np.array([0.1, 0.2, 0.3, 0.4, 0.5], dtype=datatype)
b = np.array([0.12, 0.22, 0.32, 0.42, 0.52], dtype=datatype)
a_dev = cuda.to_device(a)
b_dev = cuda.to_device(b)
handle, stream = get_handle(use_handle)
score = cuml.metrics.r2_score(a_dev, b_dev, handle=handle)
np.testing.assert_almost_equal(score, 0.98, decimal=7)
def test_sklearn_search():
"""Test ensures scoring function works with sklearn machinery
"""
import numpy as np
from cuml import Ridge as cumlRidge
import cudf
from sklearn import datasets
from sklearn.model_selection import train_test_split, GridSearchCV
diabetes = datasets.load_diabetes()
X_train, X_test, y_train, y_test = train_test_split(diabetes.data,
diabetes.target,
test_size=0.2,
shuffle=False,
random_state=1)
alpha = np.array([1.0])
fit_intercept = True
normalize = False
params = {'alpha': np.logspace(-3, -1, 10)}
cu_clf = cumlRidge(alpha=alpha, fit_intercept=fit_intercept,
normalize=normalize, solver="eig")
assert getattr(cu_clf, 'score', False)
sk_cu_grid = GridSearchCV(cu_clf, params, cv=5, iid=False)
gdf_data = cudf.DataFrame(X_train)
gdf_train = cudf.DataFrame(dict(train=y_train))
sk_cu_grid.fit(gdf_data, gdf_train.train)
assert sk_cu_grid.best_params_ == {'alpha': 0.1}
@pytest.mark.parametrize('nrows', [unit_param(30), quality_param(5000),
stress_param(500000)])
@pytest.mark.parametrize('ncols', [unit_param(10), quality_param(100),
stress_param(200)])
@pytest.mark.parametrize('n_info', [unit_param(7), quality_param(50),
stress_param(100)])
@pytest.mark.parametrize('datatype', [np.float32])
def test_accuracy(nrows, ncols, n_info, datatype):
use_handle = True
train_rows = np.int32(nrows*0.8)
X, y = make_classification(n_samples=nrows, n_features=ncols,
n_clusters_per_class=1, n_informative=n_info,
random_state=123, n_classes=5)
X_test = np.asarray(X[train_rows:, 0:]).astype(datatype)
y_test = np.asarray(y[train_rows:, ]).astype(np.int32)
X_train = np.asarray(X[0:train_rows, :]).astype(datatype)
y_train = np.asarray(y[0:train_rows, ]).astype(np.int32)
# Create a handle for the cuml model
handle, stream = get_handle(use_handle, n_streams=8)
# Initialize, fit and predict using cuML's
# random forest classification model
cuml_model = curfc(max_features=1.0,
n_bins=8, split_algo=0, split_criterion=0,
min_samples_leaf=2,
n_estimators=40, handle=handle, max_leaves=-1,
max_depth=16)
cuml_model.fit(X_train, y_train)
cu_predict = cuml_model.predict(X_test)
cu_acc = cu_acc_score(y_test, cu_predict)
cu_acc_using_sk = sk_acc_score(y_test, cu_predict)
# compare the accuracy of the two models
assert array_equal(cu_acc, cu_acc_using_sk)
dataset_names = ['noisy_circles', 'noisy_moons', 'aniso'] + \
[pytest.param(ds, marks=pytest.mark.xfail)
for ds in ['blobs', 'varied']]
@pytest.mark.parametrize('name', dataset_names)
@pytest.mark.parametrize('nrows', [unit_param(20), quality_param(5000),
stress_param(500000)])
def test_rand_index_score(name, nrows):
default_base = {'quantile': .3,
'eps': .3,
'damping': .9,
'preference': -200,
'n_neighbors': 10,
'n_clusters': 3}
pat = get_pattern(name, nrows)
params = default_base.copy()
params.update(pat[1])
cuml_kmeans = cuml.KMeans(n_clusters=params['n_clusters'])
X, y = pat[0]
X = StandardScaler().fit_transform(X)
cu_y_pred = cuml_kmeans.fit_predict(X)
cu_score = cu_ars(y, cu_y_pred)
cu_score_using_sk = sk_ars(y, cp.asnumpy(cu_y_pred))
assert array_equal(cu_score, cu_score_using_sk)
@pytest.mark.parametrize('metric', (
'cityblock', 'cosine', 'euclidean', 'l1', 'sqeuclidean'
))
@pytest.mark.parametrize('chunk_divider', [1, 3, 5])
def test_silhouette_score_batched(metric, chunk_divider, labeled_clusters):
X, labels = labeled_clusters
cuml_score = cu_silhouette_score(X, labels, metric=metric,
chunksize=int(X.shape[0]/chunk_divider))
sk_score = sk_silhouette_score(X, labels, metric=metric)
assert_almost_equal(cuml_score, sk_score, decimal=2)
@pytest.mark.parametrize('metric', (
'cityblock', 'cosine', 'euclidean', 'l1', 'sqeuclidean'
))
@pytest.mark.parametrize('chunk_divider', [1, 3, 5])
def test_silhouette_samples_batched(metric, chunk_divider, labeled_clusters):
X, labels = labeled_clusters
cuml_scores = cu_silhouette_samples(X, labels, metric=metric,
chunksize=int(X.shape[0] /
chunk_divider))
sk_scores = sk_silhouette_samples(X, labels, metric=metric)
cu_trunc = cp.around(cuml_scores, decimals=3)
sk_trunc = cp.around(sk_scores, decimals=3)
diff = cp.absolute(cu_trunc - sk_trunc) > 0
over_diff = cp.all(diff)
# 0.5% elements allowed to be different
if len(over_diff.shape) > 0:
assert over_diff.shape[0] <= 0.005 * X.shape[0]
# different elements should not differ more than 1e-1
tolerance_diff = cp.absolute(cu_trunc[diff] - sk_trunc[diff]) > 1e-1
diff_change = cp.all(tolerance_diff)
if len(diff_change.shape) > 0:
assert False
def score_homogeneity(ground_truth, predictions, use_handle):
return score_labeling_with_handle(cuml.metrics.homogeneity_score,
ground_truth,
predictions,
use_handle,
dtype=np.int32)
def score_completeness(ground_truth, predictions, use_handle):
return score_labeling_with_handle(cuml.metrics.completeness_score,
ground_truth,
predictions,
use_handle,
dtype=np.int32)
def score_mutual_info(ground_truth, predictions, use_handle):
return score_labeling_with_handle(cuml.metrics.mutual_info_score,
ground_truth,
predictions,
use_handle,
dtype=np.int32)
@pytest.mark.parametrize('use_handle', [True, False])
@pytest.mark.parametrize('data', [([0, 0, 1, 1], [1, 1, 0, 0]),
([0, 0, 1, 1], [0, 0, 1, 1])])
def test_homogeneity_perfect_labeling(use_handle, data):
# Perfect labelings are homogeneous
hom = score_homogeneity(*data, use_handle)
assert_almost_equal(hom, 1.0, decimal=4)
@pytest.mark.parametrize('use_handle', [True, False])
@pytest.mark.parametrize('data', [([0, 0, 1, 1], [0, 0, 1, 2]),
([0, 0, 1, 1], [0, 1, 2, 3])])
def test_homogeneity_non_perfect_labeling(use_handle, data):
# Non-perfect labelings that further split classes into more clusters can
# be perfectly homogeneous
hom = score_homogeneity(*data, use_handle)
assert_almost_equal(hom, 1.0, decimal=4)
@pytest.mark.parametrize('use_handle', [True, False])
@pytest.mark.parametrize('data', [([0, 0, 1, 1], [0, 1, 0, 1]),
([0, 0, 1, 1], [0, 0, 0, 0])])
def test_homogeneity_non_homogeneous_labeling(use_handle, data):
# Clusters that include samples from different classes do not make for an
# homogeneous labeling
hom = score_homogeneity(*data, use_handle)
assert_almost_equal(hom, 0.0, decimal=4)
@pytest.mark.parametrize('use_handle', [True, False])
@pytest.mark.parametrize('input_range', [[0, 1000],
[-1000, 1000]])
def test_homogeneity_score_big_array(use_handle, input_range):
a, b, _, _ = generate_random_labels(lambda rd: rd.randint(*input_range,
int(10e4),
dtype=np.int32))
score = score_homogeneity(a, b, use_handle)
ref = sk_homogeneity_score(a, b)
np.testing.assert_almost_equal(score, ref, decimal=4)
@pytest.mark.parametrize('use_handle', [True, False])
@pytest.mark.parametrize('input_range', [[0, 2],
[-5, 20],
[int(-10e2), int(10e2)]])
def test_homogeneity_completeness_symmetry(use_handle, input_range):
a, b, _, _ = generate_random_labels(lambda rd: rd.randint(*input_range,
int(10e3),
dtype=np.int32))
hom = score_homogeneity(a, b, use_handle)
com = score_completeness(b, a, use_handle)
np.testing.assert_almost_equal(hom, com, decimal=4)
@pytest.mark.parametrize('use_handle', [True, False])
@pytest.mark.parametrize('input_labels', [([0, 0, 1, 1], [1, 1, 0, 0]),
([0, 0, 1, 1], [0, 0, 1, 1]),
([0, 0, 1, 1], [0, 0, 1, 2]),
([0, 0, 1, 1], [0, 1, 2, 3]),
([0, 0, 1, 1], [0, 1, 0, 1]),
([0, 0, 1, 1], [0, 0, 0, 0])])
def test_mutual_info_score(use_handle, input_labels):
score = score_mutual_info(*input_labels, use_handle)
ref = sk_mutual_info_score(*input_labels)
np.testing.assert_almost_equal(score, ref, decimal=4)
@pytest.mark.parametrize('use_handle', [True, False])
@pytest.mark.parametrize('input_range', [[0, 1000],
[-1000, 1000]])
def test_mutual_info_score_big_array(use_handle, input_range):
a, b, _, _ = generate_random_labels(lambda rd: rd.randint(*input_range,
int(10e4),
dtype=np.int32))
score = score_mutual_info(a, b, use_handle)
ref = sk_mutual_info_score(a, b)
np.testing.assert_almost_equal(score, ref, decimal=4)
@pytest.mark.parametrize('use_handle', [True, False])
@pytest.mark.parametrize('n', [14])
def test_mutual_info_score_range_equal_samples(use_handle, n):
input_range = (-n, n)
a, b, _, _ = generate_random_labels(lambda rd: rd.randint(*input_range,
n,
dtype=np.int32))
score = score_mutual_info(a, b, use_handle)
ref = sk_mutual_info_score(a, b)
np.testing.assert_almost_equal(score, ref, decimal=4)
@pytest.mark.parametrize('use_handle', [True, False])
@pytest.mark.parametrize('input_range', [[0, 19],
[0, 2],
[-5, 20]])
@pytest.mark.parametrize('n_samples', [129, 258])
def test_mutual_info_score_many_blocks(use_handle, input_range, n_samples):
a, b, _, _ = generate_random_labels(lambda rd: rd.randint(*input_range,
n_samples,
dtype=np.int32))
score = score_mutual_info(a, b, use_handle)
ref = sk_mutual_info_score(a, b)
np.testing.assert_almost_equal(score, ref, decimal=4)
@pytest.mark.parametrize('use_handle', [True, False])
@pytest.mark.parametrize('data', [([0, 0, 1, 1], [1, 1, 0, 0]),
([0, 0, 1, 1], [0, 0, 1, 1])])
def test_completeness_perfect_labeling(use_handle, data):
# Perfect labelings are complete
com = score_completeness(*data, use_handle)
np.testing.assert_almost_equal(com, 1.0, decimal=4)
@pytest.mark.parametrize('use_handle', [True, False])
@pytest.mark.parametrize('data', [([0, 0, 1, 1], [0, 0, 0, 0]),
([0, 1, 2, 3], [0, 0, 1, 1])])
def test_completeness_non_perfect_labeling(use_handle, data):
# Non-perfect labelings that assign all classes members to the same
# clusters are still complete
com = score_completeness(*data, use_handle)
np.testing.assert_almost_equal(com, 1.0, decimal=4)
@pytest.mark.parametrize('use_handle', [True, False])
@pytest.mark.parametrize('data', [([0, 0, 1, 1], [0, 1, 0, 1]),
([0, 0, 0, 0], [0, 1, 2, 3])])
def test_completeness_non_complete_labeling(use_handle, data):
# If classes members are split across different clusters, the assignment
# cannot be complete
com = score_completeness(*data, use_handle)
np.testing.assert_almost_equal(com, 0.0, decimal=4)
@pytest.mark.parametrize('use_handle', [True, False])
@pytest.mark.parametrize('input_range', [[0, 1000],
[-1000, 1000]])
def test_completeness_score_big_array(use_handle, input_range):
a, b, _, _ = generate_random_labels(lambda rd: rd.randint(*input_range,
int(10e4),
dtype=np.int32))
score = score_completeness(a, b, use_handle)
ref = sk_completeness_score(a, b)
np.testing.assert_almost_equal(score, ref, decimal=4)
def test_regression_metrics():
y_true = np.arange(50, dtype=np.int)
y_pred = y_true + 1
assert_almost_equal(mean_squared_error(y_true, y_pred), 1.)
assert_almost_equal(mean_squared_log_error(y_true, y_pred),
mean_squared_error(np.log(1 + y_true),
np.log(1 + y_pred)))
assert_almost_equal(mean_absolute_error(y_true, y_pred), 1.)
@pytest.mark.parametrize('n_samples', [50, stress_param(500000)])
@pytest.mark.parametrize('dtype', [np.int32, np.int64, np.float32, np.float64])
@pytest.mark.parametrize('function', ['mse', 'mae', 'msle'])
def test_regression_metrics_random(n_samples, dtype, function):
if dtype == np.float32 and n_samples == 500000:
# stress test for float32 fails because of floating point precision
pytest.xfail()
y_true, y_pred, _, _ = generate_random_labels(
lambda rng: rng.randint(0, 1000, n_samples).astype(dtype))
cuml_reg, sklearn_reg = {
'mse': (mean_squared_error, sklearn_mse),
'mae': (mean_absolute_error, sklearn_mae),
'msle': (mean_squared_log_error, sklearn_msle)
}[function]
res = cuml_reg(y_true, y_pred, multioutput='raw_values')
ref = sklearn_reg(y_true, y_pred, multioutput='raw_values')
cp.testing.assert_array_almost_equal(res, ref, decimal=2)
@pytest.mark.parametrize('function', ['mse', 'mse_not_squared', 'mae', 'msle'])
def test_regression_metrics_at_limits(function):
y_true = np.array([0.], dtype=np.float)
y_pred = np.array([0.], dtype=np.float)
cuml_reg = {
'mse': mean_squared_error,
'mse_not_squared': partial(mean_squared_error, squared=False),
'mae': mean_absolute_error,
'msle': mean_squared_log_error,
}[function]
assert_almost_equal(cuml_reg(y_true, y_pred), 0.00, decimal=2)
@pytest.mark.parametrize('inputs', [([-1.], [-1.]),
([1., 2., 3.], [1., -2., 3.]),
([1., -2., 3.], [1., 2., 3.])])
def test_mean_squared_log_error_exceptions(inputs):
with pytest.raises(ValueError):
mean_squared_log_error(np.array(inputs[0]), np.array(inputs[1]))
def test_multioutput_regression():
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]])
error = mean_squared_error(y_true, y_pred)
assert_almost_equal(error, (1. + 2. / 3) / 4.)
error = mean_squared_error(y_true, y_pred, squared=False)
assert_almost_equal(error, 0.645, decimal=2)
error = mean_squared_log_error(y_true, y_pred)
assert_almost_equal(error, 0.200, decimal=2)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
error = mean_absolute_error(y_true, y_pred)
assert_almost_equal(error, (1. + 2. / 3) / 4.)
def test_regression_metrics_multioutput_array():
y_true = np.array([[1, 2], [2.5, -1], [4.5, 3], [5, 7]], dtype=np.float)
y_pred = np.array([[1, 1], [2, -1], [5, 4], [5, 6.5]], dtype=np.float)
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
cp.testing.assert_array_almost_equal(mse, [0.125, 0.5625], decimal=2)
cp.testing.assert_array_almost_equal(mae, [0.25, 0.625], decimal=2)
weights = np.array([0.4, 0.6], dtype=np.float)
msew = mean_squared_error(y_true, y_pred, multioutput=weights)
rmsew = mean_squared_error(y_true, y_pred, multioutput=weights,
squared=False)
assert_almost_equal(msew, 0.39, decimal=2)
assert_almost_equal(rmsew, 0.62, decimal=2)
y_true = np.array([[0, 0]] * 4, dtype=np.int)
y_pred = np.array([[1, 1]] * 4, dtype=np.int)
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
cp.testing.assert_array_almost_equal(mse, [1., 1.], decimal=2)
cp.testing.assert_array_almost_equal(mae, [1., 1.], decimal=2)
y_true = np.array([[0.5, 1], [1, 2], [7, 6]])
y_pred = np.array([[0.5, 2], [1, 2.5], [8, 8]])
msle = mean_squared_log_error(y_true, y_pred, multioutput='raw_values')
msle2 = mean_squared_error(np.log(1 + y_true), np.log(1 + y_pred),
multioutput='raw_values')
cp.testing.assert_array_almost_equal(msle, msle2, decimal=2)
@pytest.mark.parametrize('function', ['mse', 'mae'])
def test_regression_metrics_custom_weights(function):
y_true = np.array([1, 2, 2.5, -1], dtype=np.float)
y_pred = np.array([1, 1, 2, -1], dtype=np.float)
weights = np.array([0.2, 0.25, 0.4, 0.15], dtype=np.float)
cuml_reg, sklearn_reg = {
'mse': (mean_squared_error, sklearn_mse),
'mae': (mean_absolute_error, sklearn_mae)
}[function]
score = cuml_reg(y_true, y_pred, sample_weight=weights)
ref = sklearn_reg(y_true, y_pred, sample_weight=weights)
assert_almost_equal(score, ref, decimal=2)
def test_mse_vs_msle_custom_weights():
y_true = np.array([0.5, 2, 7, 6], dtype=np.float)
y_pred = np.array([0.5, 1, 8, 8], dtype=np.float)
weights = np.array([0.2, 0.25, 0.4, 0.15], dtype=np.float)
msle = mean_squared_log_error(y_true, y_pred, sample_weight=weights)
msle2 = mean_squared_error(np.log(1 + y_true), np.log(1 + y_pred),
sample_weight=weights)
assert_almost_equal(msle, msle2, decimal=2)
@pytest.mark.parametrize('use_handle', [True, False])
def test_entropy(use_handle):
handle, stream = get_handle(use_handle)
# The outcome of a fair coin is the most uncertain:
# in base 2 the result is 1 (One bit of entropy).
cluster = np.array([0, 1], dtype=np.int32)
assert_almost_equal(entropy(cluster, base=2., handle=handle), 1.)
# The outcome of a biased coin is less uncertain:
cluster = np.array(([0] * 9) + [1], dtype=np.int32)
assert_almost_equal(entropy(cluster, base=2., handle=handle), 0.468995593)
# base e
assert_almost_equal(entropy(cluster, handle=handle), 0.32508297339144826)
@pytest.mark.parametrize('n_samples', [50, stress_param(500000)])
@pytest.mark.parametrize('base', [None, 2, 10, 50])
@pytest.mark.parametrize('use_handle', [True, False])
def test_entropy_random(n_samples, base, use_handle):
if has_scipy():
from scipy.stats import entropy as sp_entropy
else:
pytest.skip('Skipping test_entropy_random because Scipy is missing')
handle, stream = get_handle(use_handle)
clustering, _, _, _ = \
generate_random_labels(lambda rng: rng.randint(0, 1000, n_samples))
# generate unormalized probabilities from clustering
pk = np.bincount(clustering)
# scipy's entropy uses probabilities
sp_S = sp_entropy(pk, base=base)
# we use a clustering
S = entropy(np.array(clustering, dtype=np.int32), base, handle=handle)
assert_almost_equal(S, sp_S, decimal=2)
def test_confusion_matrix():
y_true = cp.array([2, 0, 2, 2, 0, 1])
y_pred = cp.array([0, 0, 2, 2, 0, 2])
cm = confusion_matrix(y_true, y_pred)
ref = cp.array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
cp.testing.assert_array_equal(cm, ref)
def test_confusion_matrix_binary():
y_true = cp.array([0, 1, 0, 1])
y_pred = cp.array([1, 1, 1, 0])
tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()
ref = cp.array([0, 2, 1, 1])
cp.testing.assert_array_equal(ref, cp.array([tn, fp, fn, tp]))
@pytest.mark.parametrize('n_samples', [50, 3000, stress_param(500000)])
@pytest.mark.parametrize('dtype', [np.int32, np.int64])
@pytest.mark.parametrize('problem_type', ['binary', 'multiclass'])
def test_confusion_matrix_random(n_samples, dtype, problem_type):
upper_range = 2 if problem_type == 'binary' else 1000
y_true, y_pred, _, _ = generate_random_labels(
lambda rng: rng.randint(0, upper_range, n_samples).astype(dtype))
cm = confusion_matrix(y_true, y_pred)
ref = sk_confusion_matrix(y_true, y_pred)
cp.testing.assert_array_almost_equal(ref, cm, decimal=4)
@pytest.mark.parametrize(
"normalize, expected_results",
[('true', 0.333333333),
('pred', 0.333333333),
('all', 0.1111111111),
(None, 2)]
)
def test_confusion_matrix_normalize(normalize, expected_results):
y_test = cp.array([0, 1, 2] * 6)
y_pred = cp.array(list(chain(*permutations([0, 1, 2]))))
cm = confusion_matrix(y_test, y_pred, normalize=normalize)
cp.testing.assert_allclose(cm, cp.array(expected_results))
@pytest.mark.parametrize('labels', [(0, 1),
(2, 1),
(2, 1, 4, 7),
(2, 20)])
def test_confusion_matrix_multiclass_subset_labels(labels):
y_true, y_pred, _, _ = generate_random_labels(
lambda rng: rng.randint(0, 3, 10).astype(np.int32))
ref = sk_confusion_matrix(y_true, y_pred, labels=labels)
labels = cp.array(labels, dtype=np.int32)
cm = confusion_matrix(y_true, y_pred, labels=labels)
cp.testing.assert_array_almost_equal(ref, cm, decimal=4)
@pytest.mark.parametrize('n_samples', [50, 3000, stress_param(500000)])
@pytest.mark.parametrize('dtype', [np.int32, np.int64])
@pytest.mark.parametrize('weights_dtype', ['int', 'float'])
def test_confusion_matrix_random_weights(n_samples, dtype, weights_dtype):
y_true, y_pred, _, _ = generate_random_labels(
lambda rng: rng.randint(0, 10, n_samples).astype(dtype))
if weights_dtype == 'int':
sample_weight = np.random.RandomState(0).randint(0, 10, n_samples)
else:
sample_weight = np.random.RandomState(0).rand(n_samples)
cm = confusion_matrix(y_true, y_pred, sample_weight=sample_weight)
ref = sk_confusion_matrix(y_true, y_pred, sample_weight=sample_weight)
cp.testing.assert_array_almost_equal(ref, cm, decimal=4)
def test_roc_auc_score():
y_true = np.array([0, 0, 1, 1])
y_pred = np.array([0.1, 0.4, 0.35, 0.8])
assert_almost_equal(roc_auc_score(y_true, y_pred),
sklearn_roc_auc_score(y_true, y_pred))
y_true = np.array([0, 0, 1, 1, 0])
y_pred = np.array([0.8, 0.4, 0.4, 0.8, 0.8])
assert_almost_equal(roc_auc_score(y_true, y_pred),
sklearn_roc_auc_score(y_true, y_pred))
@pytest.mark.parametrize('n_samples', [50, 500000])
@pytest.mark.parametrize('dtype', [np.int32, np.int64, np.float32, np.float64])
def test_roc_auc_score_random(n_samples, dtype):
y_true, _, _, _ = generate_random_labels(
lambda rng: rng.randint(0, 2, n_samples).astype(dtype))
y_pred, _, _, _ = generate_random_labels(
lambda rng: rng.randint(0, 1000, n_samples).astype(dtype))
auc = roc_auc_score(y_true, y_pred)
skl_auc = sklearn_roc_auc_score(y_true, y_pred)
assert_almost_equal(auc, skl_auc)
def test_roc_auc_score_at_limits():
y_true = np.array([0., 0., 0.], dtype=np.float)
y_pred = np.array([0., 0.5, 1.], dtype=np.float)
err_msg = ("roc_auc_score cannot be used when "
"only one class present in y_true. ROC AUC score "
"is not defined in that case.")
with pytest.raises(ValueError, match=err_msg):
roc_auc_score(y_true, y_pred)
y_true = np.array([0., 0.5, 1.0], dtype=np.float)
y_pred = np.array([0., 0.5, 1.], dtype=np.float)
err_msg = ("Continuous format of y_true "
"is not supported.")
with pytest.raises(ValueError, match=err_msg):
roc_auc_score(y_true, y_pred)
def test_precision_recall_curve():
y_true = np.array([0, 0, 1, 1])
y_score = np.array([0.1, 0.4, 0.35, 0.8])
precision_using_sk, recall_using_sk, thresholds_using_sk = \
sklearn_precision_recall_curve(
y_true, y_score)
precision, recall, thresholds = precision_recall_curve(
y_true, y_score)
assert array_equal(precision, precision_using_sk)
assert array_equal(recall, recall_using_sk)
assert array_equal(thresholds, thresholds_using_sk)
def test_precision_recall_curve_at_limits():
y_true = np.array([0., 0., 0.], dtype=np.float)
y_pred = np.array([0., 0.5, 1.], dtype=np.float)
err_msg = ("precision_recall_curve cannot be used when "
"y_true is all zero.")
with pytest.raises(ValueError, match=err_msg):
precision_recall_curve(y_true, y_pred)
y_true = np.array([0., 0.5, 1.0], dtype=np.float)
y_pred = np.array([0., 0.5, 1.], dtype=np.float)
err_msg = ("Continuous format of y_true "
"is not supported.")
with pytest.raises(ValueError, match=err_msg):
precision_recall_curve(y_true, y_pred)
@pytest.mark.parametrize('n_samples', [50, 500000])
@pytest.mark.parametrize('dtype', [np.int32, np.int64, np.float32, np.float64])
def test_precision_recall_curve_random(n_samples, dtype):
y_true, _, _, _ = generate_random_labels(
lambda rng: rng.randint(0, 2, n_samples).astype(dtype))
y_score, _, _, _ = generate_random_labels(
lambda rng: rng.randint(0, 1000, n_samples).astype(dtype))
precision_using_sk, recall_using_sk, thresholds_using_sk = \
sklearn_precision_recall_curve(
y_true, y_score)
precision, recall, thresholds = precision_recall_curve(
y_true, y_score)
assert array_equal(precision, precision_using_sk)
assert array_equal(recall, recall_using_sk)
assert array_equal(thresholds, thresholds_using_sk)
def test_log_loss():
y_true = np.array([0, 0, 1, 1])
y_pred = np.array([0.1, 0.4, 0.35, 0.8])
assert_almost_equal(log_loss(y_true, y_pred),
sklearn_log_loss(y_true, y_pred))
y_true = np.array([0, 0, 1, 1, 0])
y_pred = np.array([0.8, 0.4, 0.4, 0.8, 0.8])
assert_almost_equal(log_loss(y_true, y_pred),
sklearn_log_loss(y_true, y_pred))
@pytest.mark.parametrize('n_samples', [500, 500000])
@pytest.mark.parametrize('dtype', [np.int32, np.int64, np.float32, np.float64])
def test_log_loss_random(n_samples, dtype):
y_true, _, _, _ = generate_random_labels(
lambda rng: rng.randint(0, 10, n_samples).astype(dtype))
y_pred, _, _, _ = generate_random_labels(
lambda rng: rng.rand(n_samples, 10))
assert_almost_equal(log_loss(y_true, y_pred),
sklearn_log_loss(y_true, y_pred))
def test_log_loss_at_limits():
y_true = np.array([0., 1., 2.], dtype=np.float)
y_pred = np.array([0., 0.5, 1.], dtype=np.float)
err_msg = ("The shape of y_pred doesn't "
"match the number of classes")
with pytest.raises(ValueError, match=err_msg):
log_loss(y_true, y_pred)
y_true = np.array([0., 0.5, 1.0], dtype=np.float)
y_pred = np.array([0., 0.5, 1.], dtype=np.float)
err_msg = ("'y_true' can only have integer values")
with pytest.raises(ValueError, match=err_msg):
log_loss(y_true, y_pred)
@pytest.mark.parametrize("metric", PAIRWISE_DISTANCE_METRICS)
@pytest.mark.parametrize("matrix_size", [(5, 4), (1000, 3), (2, 10),
(500, 400)])
@pytest.mark.parametrize("is_col_major", [True, False])
def test_pairwise_distances(metric: str, matrix_size, is_col_major):
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
def prep_array(array):
return np.asfortranarray(array) if is_col_major else array
# For fp64, compare at 13 decimals, (2 places less than the ~15 max)
compare_precision = 10
# Compare to sklearn, single input
X = prep_array(rng.random_sample(matrix_size))
S = pairwise_distances(X, metric=metric)
S2 = sklearn_pairwise_distances(X, metric=metric)
cp.testing.assert_array_almost_equal(S, S2, decimal=compare_precision)
# Compare to sklearn, double input with same dimensions
Y = X
S = pairwise_distances(X, Y, metric=metric)
S2 = sklearn_pairwise_distances(X, Y, metric=metric)
cp.testing.assert_array_almost_equal(S, S2, decimal=compare_precision)
# Compare single and double inputs to eachother
S = pairwise_distances(X, metric=metric)
S2 = pairwise_distances(X, Y, metric=metric)
cp.testing.assert_array_almost_equal(S, S2, decimal=compare_precision)
# Compare to sklearn, with Y dim != X dim
Y = prep_array(rng.random_sample((2, matrix_size[1])))
S = pairwise_distances(X, Y, metric=metric)
S2 = sklearn_pairwise_distances(X, Y, metric=metric)
cp.testing.assert_array_almost_equal(S, S2, decimal=compare_precision)
# Change precision of one parameter
Y = np.asfarray(Y, dtype=np.float32)
S = pairwise_distances(X, Y, metric=metric)
S2 = sklearn_pairwise_distances(X, Y, metric=metric)
cp.testing.assert_array_almost_equal(S, S2, decimal=compare_precision)
# For fp32, compare at 5 decimals, (2 places less than the ~7 max)
compare_precision = 2
# Change precision of both parameters to float
X = np.asfarray(X, dtype=np.float32)
Y = np.asfarray(Y, dtype=np.float32)
S = pairwise_distances(X, Y, metric=metric)
S2 = sklearn_pairwise_distances(X, Y, metric=metric)
cp.testing.assert_array_almost_equal(S, S2, decimal=compare_precision)
# Test sending an int type with convert_dtype=True
Y = prep_array(rng.randint(10, size=Y.shape))
S = pairwise_distances(X, Y, metric=metric, convert_dtype=True)
S2 = sklearn_pairwise_distances(X, Y, metric=metric)
cp.testing.assert_array_almost_equal(S, S2, decimal=compare_precision)
# Test that uppercase on the metric name throws an error.
with pytest.raises(ValueError):
pairwise_distances(X, Y, metric=metric.capitalize())
@pytest.mark.parametrize("metric", PAIRWISE_DISTANCE_METRICS)
@pytest.mark.parametrize("matrix_size", [
unit_param((1000, 100)),
quality_param((2000, 1000)),
stress_param((10000, 10000))])
def test_pairwise_distances_sklearn_comparison(metric: str, matrix_size):
# Test larger sizes to sklearn
rng = np.random.RandomState(1)
element_count = matrix_size[0] * matrix_size[1]
X = rng.random_sample(matrix_size)
Y = rng.random_sample(matrix_size)
# For fp64, compare at 10 decimals, (5 places less than the ~15 max)
compare_precision = 10
# Compare to sklearn, fp64
S = pairwise_distances(X, Y, metric=metric)
if (element_count <= 2000000):
S2 = sklearn_pairwise_distances(X, Y, metric=metric)
cp.testing.assert_array_almost_equal(S, S2, decimal=compare_precision)
# For fp32, compare at 4 decimals, (3 places less than the ~7 max)
compare_precision = 4
X = np.asfarray(X, dtype=np.float32)
Y = np.asfarray(Y, dtype=np.float32)
# Compare to sklearn, fp32
S = pairwise_distances(X, Y, metric=metric)
if (element_count <= 2000000):
S2 = sklearn_pairwise_distances(X, Y, metric=metric)
cp.testing.assert_array_almost_equal(S, S2, decimal=compare_precision)
@pytest.mark.parametrize("metric", PAIRWISE_DISTANCE_METRICS)
def test_pairwise_distances_one_dimension_order(metric: str):
# Test the pairwise_distance helper function for 1 dimensional cases which
# can break down when using a size of 1 for either dimension
rng = np.random.RandomState(2)
Xc = rng.random_sample((1, 4))
Yc = rng.random_sample((10, 4))
Xf = np.asfortranarray(Xc)
Yf = np.asfortranarray(Yc)
# For fp64, compare at 13 decimals, (2 places less than the ~15 max)
compare_precision = 13
# Compare to sklearn, C/C order
S = pairwise_distances(Xc, Yc, metric=metric)
S2 = sklearn_pairwise_distances(Xc, Yc, metric=metric)
cp.testing.assert_array_almost_equal(S, S2, decimal=compare_precision)
# Compare to sklearn, C/F order
S = pairwise_distances(Xc, Yf, metric=metric)
S2 = sklearn_pairwise_distances(Xc, Yf, metric=metric)
cp.testing.assert_array_almost_equal(S, S2, decimal=compare_precision)
# Compare to sklearn, F/C order
S = pairwise_distances(Xf, Yc, metric=metric)
S2 = sklearn_pairwise_distances(Xf, Yc, metric=metric)
cp.testing.assert_array_almost_equal(S, S2, decimal=compare_precision)
# Compare to sklearn, F/F order
S = pairwise_distances(Xf, Yf, metric=metric)
S2 = sklearn_pairwise_distances(Xf, Yf, metric=metric)
cp.testing.assert_array_almost_equal(S, S2, decimal=compare_precision)
# Switch which input has single dimension
Xc = rng.random_sample((1, 4))
Yc = rng.random_sample((10, 4))
Xf = np.asfortranarray(Xc)
Yf = np.asfortranarray(Yc)
# Compare to sklearn, C/C order
S = pairwise_distances(Xc, Yc, metric=metric)
S2 = sklearn_pairwise_distances(Xc, Yc, metric=metric)
cp.testing.assert_array_almost_equal(S, S2, decimal=compare_precision)
# Compare to sklearn, C/F order
S = pairwise_distances(Xc, Yf, metric=metric)
S2 = sklearn_pairwise_distances(Xc, Yf, metric=metric)
cp.testing.assert_array_almost_equal(S, S2, decimal=compare_precision)
# Compare to sklearn, F/C order
S = pairwise_distances(Xf, Yc, metric=metric)
S2 = sklearn_pairwise_distances(Xf, Yc, metric=metric)
cp.testing.assert_array_almost_equal(S, S2, decimal=compare_precision)
# Compare to sklearn, F/F order
S = pairwise_distances(Xf, Yf, metric=metric)
S2 = sklearn_pairwise_distances(Xf, Yf, metric=metric)
cp.testing.assert_array_almost_equal(S, S2, decimal=compare_precision)
@pytest.mark.parametrize("metric", ["haversine", "nan_euclidean"])
def test_pairwise_distances_unsuppored_metrics(metric):
rng = np.random.RandomState(3)
X = rng.random_sample((5, 4))
with pytest.raises(ValueError):
pairwise_distances(X, metric=metric)
def test_pairwise_distances_exceptions():
rng = np.random.RandomState(4)
X_int = rng.randint(10, size=(5, 4))
X_double = rng.random_sample((5, 4))
X_float = np.asfarray(X_double, dtype=np.float32)
X_bool = rng.choice([True, False], size=(5, 4))
# Test int inputs (only float/double accepted at this time)
with pytest.raises(TypeError):
pairwise_distances(X_int, metric="euclidean")
# Test second int inputs (should not have an exception with
# convert_dtype=True)
pairwise_distances(X_double, X_int, metric="euclidean")
# Test bool inputs (only float/double accepted at this time)
with pytest.raises(TypeError):
pairwise_distances(X_bool, metric="euclidean")
# Test sending different types with convert_dtype=False
with pytest.raises(TypeError):
pairwise_distances(X_double, X_float, metric="euclidean",
convert_dtype=False)
# Invalid metric name
with pytest.raises(ValueError):
pairwise_distances(X_double, metric="Not a metric")
# Invalid dimensions
X = rng.random_sample((5, 4))
Y = rng.random_sample((5, 7))
with pytest.raises(ValueError):
pairwise_distances(X, Y, metric="euclidean")
@pytest.mark.parametrize("input_type", ["cudf", "numpy", "cupy"])
@pytest.mark.parametrize("output_type", ["cudf", "numpy", "cupy"])
@pytest.mark.parametrize("use_global", [True, False])
def test_pairwise_distances_output_types(input_type, output_type, use_global):
# Test larger sizes to sklearn
rng = np.random.RandomState(5)
X = rng.random_sample((100, 100))
Y = rng.random_sample((100, 100))
if input_type == "cudf":
X = cudf.DataFrame(X)
Y = cudf.DataFrame(Y)
elif input_type == "cupy":
X = cp.asarray(X)
Y = cp.asarray(Y)
# Set to None if we are using the global object
output_type_param = None if use_global else output_type
# Use the global manager object. Should do nothing unless use_global is set
with cuml.using_output_type(output_type):
# Compare to sklearn, fp64
S = pairwise_distances(X, Y, metric="euclidean",
output_type=output_type_param)
if output_type == "input":
assert isinstance(S, type(X))
elif output_type == "cudf":
assert isinstance(S, cudf.DataFrame)
elif output_type == "numpy":
assert isinstance(S, np.ndarray)
elif output_type == "cupy":
assert isinstance(S, cp.core.core.ndarray)
|
the-stack_0_1071 | #
# Collective Knowledge: CK-powered Caffe crowdbenchmarking (very early prototyping)
#
# See CK LICENSE.txt for licensing details
# See CK COPYRIGHT.txt for copyright details
#
# Developer: Grigori Fursin, [email protected], http://fursin.net
#
cfg={} # Will be updated by CK (meta description of this module)
work={} # Will be updated by CK (temporal data)
ck=None # Will be updated by CK (initialized CK kernel)
# Local settings
line='================================================================'
ck_url='http://cknowledge.org/repo/web.php?native_action=show&native_module_uoa=program.optimization&scenario=1eb2f50d4620903e'
ck_url1='http://cknowledge.org/repo/web.php?wcid=experiment.bench.dnn:'
ffstat='ck-stat-flat-characteristics.json'
ffmin='ck-stat-flat-min.json'
form_name='wa_web_form'
onchange='document.'+form_name+'.submit();'
hextra='<i><center>\n'
hextra+=' [ <a href="http://cKnowledge.org">CK project website</a> ], '
hextra+=' [ <a href="https://github.com/mlcommons/ck-mlops">CK automation recipes for portable MLOps</a> ], '
hextra+=' [ <a href="https://en.wikipedia.org/wiki/Collective_Knowledge_(software)">Wikipedia</a> ] \n'
hextra+='</center></i>\n'
hextra+='<br>\n'
selector=[{'name':'Type', 'key':'dnn_type'},
{'name':'DNN engine', 'key':'dnn_engine_name'},
{'name':'Model', 'key':'nn_type'},
{'name':'Platform', 'key':'plat_name', 'new_line':'yes'},
{'name':'CPU', 'key':'cpu_name'},
{'name':'OS', 'key':'os_name', 'new_line':'yes'},
{'name':'GPGPU', 'key':'gpgpu_name'}]
replay_clean_vars=['no_compile','host_os','device_id']
replay_clean_env_vars=['CK_CAFFE_MODEL','CK_CAFFE_MODEL_FILE','CK_ENV_MODEL_CAFFE_WEIGHTS']
##############################################################################
# Initialize module
def init(i):
"""
Input: {}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
return {'return':0}
##############################################################################
# crowdsource these experiments
def crowdsource(i):
"""
Input: {
(local) - if 'yes', local crowd-benchmarking, instead of public
(user) - force different user ID/email for demos
(choices) - force different choices to program pipeline
(repetitions) - statistical repetitions (default=1), for now statistical analysis is not used (TBD)
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
import copy
import os
# Setting output
o=i.get('out','')
oo=''
if o=='con': oo='con'
quiet=i.get('quiet','')
er=i.get('exchange_repo','')
if er=='': er=ck.cfg['default_exchange_repo_uoa']
esr=i.get('exchange_subrepo','')
if esr=='': esr=ck.cfg['default_exchange_subrepo_uoa']
if i.get('local','')=='yes':
er='local'
esr=''
la=i.get('local_autotuning','')
repetitions=i.get('repetitions','')
if repetitions=='': repetitions=3
repetitions=int(repetitions)
record='no'
# Check if any input has . and convert to dict
for k in list(i.keys()):
if k.find('.')>0:
v=i[k]
kk='##'+k.replace('.','#')
del(i[k])
r=ck.set_by_flat_key({'dict':i, 'key':kk, 'value':v})
if r['return']>0: return r
choices=i.get('choices',{})
env=i.get('env',{})
if 'env' not in choices: choices['env']={}
r=ck.merge_dicts({'dict1':choices['env'], 'dict2':copy.deepcopy(env)})
env={}
xchoices=copy.deepcopy(choices)
# Get user
user=''
mcfg={}
ii={'action':'load',
'module_uoa':'module',
'data_uoa':cfg['module_deps']['program.optimization']}
r=ck.access(ii)
if r['return']==0:
mcfg=r['dict']
dcfg={}
ii={'action':'load',
'module_uoa':mcfg['module_deps']['cfg'],
'data_uoa':mcfg['cfg_uoa']}
r=ck.access(ii)
if r['return']>0 and r['return']!=16: return r
if r['return']!=16:
dcfg=r['dict']
user=dcfg.get('user_email','')
# Initialize local environment for program optimization ***********************************************************
pi=i.get('platform_info',{})
if len(pi)==0:
ii=copy.deepcopy(i)
ii['action']='initialize'
ii['module_uoa']=cfg['module_deps']['program.optimization']
ii['data_uoa']='caffe'
ii['exchange_repo']=er
ii['exchange_subrepo']=esr
ii['skip_welcome']='yes'
ii['skip_log_wait']='yes'
ii['crowdtuning_type']='caffe-crowd-benchmarking'
r=ck.access(ii)
if r['return']>0: return r
pi=r['platform_info']
user=r.get('user','')
hos=pi['host_os_uoa']
hosd=pi['host_os_dict']
tos=pi['os_uoa']
tosd=pi['os_dict']
tbits=tosd.get('bits','')
remote=tosd.get('remote','')
tdid=pi['device_id']
features=pi.get('features',{})
fplat=features.get('platform',{})
fos=features.get('os',{})
fcpu=features.get('cpu',{})
fgpu=features.get('gpu',{})
plat_name=fplat.get('name','')
plat_uid=features.get('platform_uid','')
os_name=fos.get('name','')
os_uid=features.get('os_uid','')
cpu_name=fcpu.get('name','')
if cpu_name=='': cpu_name='unknown-'+fcpu.get('cpu_abi','')
cpu_uid=features.get('cpu_uid','')
gpu_name=fgpu.get('name','')
gpgpu_name=''
sn=fos.get('serial_number','')
# Ask for cmd
tp=['cpu', 'cuda', 'cuda_fp16', 'opencl']
ck.out(line)
ck.out('Select Caffe library type:')
ck.out('')
r=ck.access({'action':'select_list',
'module_uoa':cfg['module_deps']['choice'],
'choices':tp})
if r['return']>0: return r
xtp=r['choice']
xtp16=''
if xtp=='cuda_fp16':
xtp='cuda'
xtp16='yes'
android=False
if 'android' in tos: android=True
# Get extra platform features if "cuda" or "opencl"
if android:
run_cmd='default'
prog_uoa='caffe-time'
else:
run_cmd='time_cpu'
prog_uoa='caffe'
gpgpu_uid=''
if xtp=='cuda' or xtp=='opencl':
r=ck.access({'action':'detect',
'module_uoa':cfg['module_deps']['platform.gpgpu'],
'host_os':hos,
'target_os':tos,
'device_id':tdid,
'type':xtp,
'share':'yes',
'exchange_repo':er,
'exchange_subrepo':esr})
if r['return']>0: return r
gfeat=r.get('features',{})
gpgpus=gfeat.get('gpgpu',[])
if len(gpgpus)>0:
gpgpu_name=gpgpus[0].get('gpgpu',{}).get('name','')
gpgpu_uid=gpgpus[0].get('gpgpu_uoa','')
if android:
if xtp!='opencl':
return {'return':1, 'error':'can\'t crowdbenchmark this type of DNN engine on selected target platform'}
run_cmd='default'
prog_uoa='caffe-time-opencl'
else:
run_cmd='time_gpu'
if xtp16=='yes': run_cmd='time_gpu_fp16'
# Get deps from caffe program
r=ck.access({'action':'load',
'module_uoa':cfg['module_deps']['program'],
'data_uoa':prog_uoa})
if r['return']>0: return r
pp=r['path']
# lib_dep=r['dict']['run_deps']['lib-caffe']
# deps={'lib-caffe':lib_dep}
# Check environment for selected type
# r=ck.access({'action':'resolve',
# 'module_uoa':cfg['module_deps']['env'],
# 'deps':deps,
# 'host_os':hos,
# 'target_os':tos,
# 'device_id':tdid,
# 'out':o})
# if r['return']>0: return r
# deps=r['deps']
# Prepare CK pipeline for a given workload
ii={'action':'pipeline',
'module_uoa':cfg['module_deps']['program'],
'data_uoa':prog_uoa,
'host_os':hos,
'target_os':tos,
'device_id':tdid,
'skip_target':'yes',
'prepare':'yes',
'env':env,
'choices':choices,
# 'dependencies':deps,
'cmd_key':run_cmd,
'no_state_check':'yes',
'no_compiler_description':'yes',
'skip_info_collection':'yes',
'skip_calibration':'yes',
'cpu_freq':'max',
'gpu_freq':'max',
'env_speed':'yes',
'energy':'no',
'skip_print_timers':'yes',
'generate_rnd_tmp_dir':'no',
'out':oo}
rr=ck.access(ii)
if rr['return']>0: return rr
fail=rr.get('fail','')
if fail=='yes':
return {'return':10, 'error':'pipeline failed ('+rr.get('fail_reason','')+')'}
ready=rr.get('ready','')
if ready!='yes':
return {'return':11, 'error':'couldn\'t prepare universal CK program workflow'}
state=rr['state']
tmp_dir=state.get('tmp_dir','')
if tmp_dir=='': tmp_dir='tmp' # usually when no_compile
deps=rr['dependencies'] # resolved deps
# Clean pipeline
if 'ready' in rr: del(rr['ready'])
if 'fail' in rr: del(rr['fail'])
if 'return' in rr: del(rr['return'])
# Prepare high-level experiment meta
meta={'cpu_name':cpu_name,
'os_name':os_name,
'plat_name':plat_name,
'gpu_name':gpu_name,
'dnn_type':xtp,
'gpgpu_name':gpgpu_name,
'cmd_key':run_cmd}
# Process deps
xdeps={}
xnn=''
xblas=''
for k in deps:
dp=deps[k]
ptags=dp.get('tags',[])
puoa=dp.get('package_uoa','')
if puoa=='':
puoa=dp.get('cus',{}).get('used_package_uoa','')
dname=dp.get('dict',{}).get('data_name','')
if k=='caffemodel':
xnn=dname
j1=xnn.rfind('(')
if j1>0:
xnn=xnn[j1+1:-1]
xdeps[k]={'name':dp.get('name',''), 'data_name':dname, 'ver':dp.get('ver',''), 'package_uoa':puoa, 'package_tags':ptags}
# versions of engine sub deps
dvers={}
mdep=deps['lib-caffe']
mdeps=mdep.get('dict',{}).get('deps',{})
for k in mdeps:
dvers[k]=mdeps[k].get('ver','')
meta['xversions']=dvers
meta['xdeps']=xdeps
meta['nn_type']=xnn
meta['choices']=xchoices
mmeta=copy.deepcopy(meta)
# Extra meta which is not used to search similar case ...
mmeta['platform_uid']=plat_uid
mmeta['os_uid']=os_uid
mmeta['cpu_uid']=cpu_uid
mmeta['gpgpu_uid']=gpgpu_uid
mmeta['user']=user
# Check if already exists (to aggregate stats)
aggregated_stats={}
rduid=''
found=False
if o=='con':
ck.out('')
ck.out('Checking if results already exists in a public repo (to aggregate statistics) ...')
# Find remote entry
ii={'action':'search',
'module_uoa':work['self_module_uid'],
'repo_uoa':er,
'remote_repo_uoa':esr,
'search_dict':{'meta':meta}}
rx=ck.access(ii)
if rx['return']>0: return rx
lst=rx['lst']
if len(lst)==1:
rduid=lst[0]['data_uid']
found=True
if o=='con':
ck.out('')
ck.out('Results found. Pre-loading aggregated stats from '+rduid+' ...')
# Load stats
rx=ck.access({'action':'load',
'module_uoa':work['self_module_uid'],
'data_uoa':rduid,
'repo_uoa':er,
'remote_repo_uoa':esr,
'load_extra_json_files':[ffstat]})
if rx['return']==0:
aggregated_stats=rx.get('extra_json_files',{}).get(ffstat,{})
else:
ck.out('')
ck.out('WARNING: couldn\'t load data ('+rx['error']+')')
else:
rx=ck.gen_uid({})
if rx['return']>0: return rx
rduid=rx['data_uid']
# Run CK pipeline *****************************************************
pipeline=copy.deepcopy(rr)
if len(choices)>0:
r=ck.merge_dicts({'dict1':pipeline['choices'], 'dict2':xchoices})
if r['return']>0: return r
ii={'action':'autotune',
'module_uoa':cfg['module_deps']['pipeline'],
'host_os':hos,
'target_os':tos,
'device_id':tdid,
'iterations':1,
'repetitions':repetitions,
'collect_all':'yes',
'process_multi_keys':['##characteristics#*'],
'tmp_dir':tmp_dir,
'pipeline':pipeline,
'stat_flat_dict':aggregated_stats,
"features_keys_to_process":["##choices#*"],
"record_params": {
"search_point_by_features":"yes"
},
'out':oo}
rrr=ck.access(ii)
if rrr['return']>0: return rrr
ls=rrr.get('last_iteration_output',{})
state=ls.get('state',{})
xchoices=copy.deepcopy(ls.get('choices',{}))
lsaf=rrr.get('last_stat_analysis',{}).get('dict_flat',{})
real_proto=xchoices.get('env',{}).get('CK_CAFFE_MODEL','') # to push to server
ddd={'meta':mmeta}
ddd['choices']=xchoices
features=ls.get('features',{})
deps=ls.get('dependencies',{})
fail=ls.get('fail','')
fail_reason=ls.get('fail_reason','')
ch=ls.get('characteristics',{})
# Save pipeline
ddd['state']={'fail':fail, 'fail_reason':fail_reason}
ddd['characteristics']=ch
ddd['user']=user
# Add files
ddd['file_stat']=ffstat
if real_proto!='':
ddd['file_model_topology']=os.path.basename(real_proto)
if not found:
if o=='con':
ck.out('')
ck.out('Saving results to the remote public repo ('+rduid+') ...')
# Update meta
rx=ck.access({'action':'add',
'module_uoa':work['self_module_uid'],
'data_uoa':rduid,
'repo_uoa':er,
'remote_repo_uoa':esr,
'dict':ddd,
'sort_keys':'yes'})
if rx['return']>0: return rx
# Push real proto
if real_proto!='':
if o=='con':
ck.out('')
ck.out('Pushing prototxt to the remote public repo ...')
rx=ck.access({'action':'push',
'module_uoa':work['self_module_uid'],
'data_uoa':rduid,
'repo_uoa':er,
'remote_repo_uoa':esr,
'filename':real_proto,
'overwrite':'yes'})
if rx['return']>0: return rx
# Push statistical characteristics
if o=='con':
ck.out('')
ck.out('Pushing file with statistics to server ...')
fstat=os.path.join(pp,tmp_dir,ffstat)
r=ck.save_json_to_file({'json_file':fstat, 'dict':lsaf, 'sort_keys':'yes'})
if r['return']>0: return r
rx=ck.access({'action':'push',
'module_uoa':work['self_module_uid'],
'data_uoa':rduid,
'repo_uoa':er,
'remote_repo_uoa':esr,
'filename':fstat,
'overwrite':'yes'})
if rx['return']>0: return rx
os.remove(fstat)
# Info
if o=='con':
ck.out('')
ck.out('Succesfully recorded results in remote repo (Entry UID='+rduid+')')
# Check host URL prefix and default module/action
url=ck_url+'&highlight_uid='+rduid
ck.out('')
ck.out('You can see your results at the following URL:')
ck.out('')
ck.out(url)
return {'return':0}
##############################################################################
# show results
def show(i):
"""
Input: {
(crowd_module_uoa) - if rendered from experiment crowdsourcing
(crowd_key) - add extra name to Web keys to avoid overlapping with original crowdsourcing HTML
(crowd_on_change) - reuse onchange doc from original crowdsourcing HTML
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
import os
st=''
cmuoa=i.get('crowd_module_uoa','')
ckey=i.get('crowd_key','')
conc=i.get('crowd_on_change','')
if conc=='':
conc=onchange
hi_uid=i.get('highlight_uid','')
h=''
# h='<hr>\n'
h+='<center>\n'
h+='\n\n<script language="JavaScript">function copyToClipboard (text) {window.prompt ("Copy to clipboard: Ctrl+C, Enter", text);}</script>\n\n'
# h+='<h2>Aggregated results from Caffe crowd-benchmarking (time, accuracy, energy, cost, ...)</h2>\n'
h+=hextra
# Check host URL prefix and default module/action
rx=ck.access({'action':'form_url_prefix',
'module_uoa':'wfe',
'host':i.get('host',''),
'port':i.get('port',''),
'template':i.get('template','')})
if rx['return']>0: return rx
url0=rx['url']
template=rx['template']
url=url0
action=i.get('action','')
muoa=i.get('module_uoa','')
st=''
url+='action=index&module_uoa=wfe&native_action='+action+'&'+'native_module_uoa='+muoa
url1=url
# List entries
ii={'action':'search',
'module_uoa':work['self_module_uid'],
'add_meta':'yes'}
if cmuoa!='':
ii['module_uoa']=cmuoa
r=ck.access(ii)
if r['return']>0: return r
lst=r['lst']
# Check unique entries
choices={}
wchoices={}
for q in lst:
d=q['meta']
meta=d.get('meta',{})
for kk in selector:
kx=kk['key']
k=ckey+kx
if k not in choices:
choices[k]=[]
wchoices[k]=[{'name':'','value':''}]
kflat=kk.get('flat_key','')
if kflat=='': kflat='##'+kx
rx=ck.get_by_flat_key({'dict':meta, 'key':kflat})
if rx['return']>0: return rx
v=rx['value']
if v==None: v=''
if v!='':
if v not in choices[k]:
choices[k].append(v)
wchoices[k].append({'name':v, 'value':v})
# Prepare query div ***************************************************************
if cmuoa=='':
# Start form + URL (even when viewing entry)
r=ck.access({'action':'start_form',
'module_uoa':cfg['module_deps']['wfe'],
'url':url1,
'name':form_name})
if r['return']>0: return r
h+=r['html']
for kk in selector:
kx=kk['key']
k=ckey+kx
n=kk['name']
nl=kk.get('new_line','')
if nl=='yes':
h+='<br>\n<div id="ck_entries_space8"></div>\n'
v=''
if i.get(k,'')!='':
v=i[k]
kk['value']=v
# Show hardware
ii={'action':'create_selector',
'module_uoa':cfg['module_deps']['wfe'],
'data':wchoices.get(k,[]),
'name':k,
'onchange':conc,
'skip_sort':'no',
'selected_value':v}
r=ck.access(ii)
if r['return']>0: return r
h+='<b>'+n+':</b> '+r['html'].strip()+'\n'
# Check hidden
if hi_uid!='':
h+='<input type="hidden" name="highlight_uid" value="'+hi_uid+'">\n'
h+='<br><br>'
# Prune list
plst=[]
for q in lst:
d=q['meta']
meta=d.get('meta',{})
# Check selector
skip=False
for kk in selector:
k=kk['key']
n=kk['name']
v=kk.get('value','')
kflat=kk.get('flat_key','')
if kflat=='': kflat='##'+k
rx=ck.get_by_flat_key({'dict':meta, 'key':kflat})
if rx['return']>0: return rx
vxx=rx['value']
if vxx==None: vxx=''
if v!='' and vxx!=v:
skip=True
if not skip:
plst.append(q)
# Check if too many
lplst=len(plst)
if lplst==0:
h+='<b>No results found!</b>'
return {'return':0, 'html':h, 'style':st}
elif lplst>300:
h+='<b>Too many entries to show ('+str(lplst)+') - please, prune list further!</b>'
return {'return':0, 'html':h, 'style':st}
# Prepare table
h+='<table border="1" cellpadding="7" cellspacing="0">\n'
ha='align="center" valign="top"'
hb='align="left" valign="top"'
h+=' <tr style="background-color:#dddddd">\n'
h+=' <td '+ha+'><b>#</b></td>\n'
h+=' <td '+ha+'><b>Platform</b></td>\n'
h+=' <td '+ha+'><b>OS</b></td>\n'
h+=' <td '+ha+'><b>CPU</b></td>\n'
h+=' <td '+ha+'><b>GPGPU</b></td>\n'
h+=' <td '+ha+'><b>Type</b></td>\n'
h+=' <td '+ha+'><b>DNN engine</b></td>\n'
h+=' <td '+ha+'><b>Model</b></td>\n'
h+=' <td '+ha+'><b>Choices (env)</b></td>\n'
h+=' <td '+ha+'><b>FWBW<br>min time</b><br><br>(exp time)<br>stat. repetitions</td>\n'
h+=' <td '+ha+'><b>FW</b></td>\n'
h+=' <td '+ha+'><b>BW</b></td>\n'
h+=' <td '+ha+'><b>Per layer</b></td>\n'
h+=' <td '+ha+'><b>HW costs</td>\n'
h+=' <td '+ha+'><b>All usage costs (preparation, training, inference, errors, etc)</td>\n'
h+=' <td '+ha+'><b>Model size</b></td>\n'
h+=' <td '+ha+'><b><a href="https://github.com/dividiti/ck-caffe/blob/master/script/explore-accuracy/explore_accuracy.20160808.ipynb">Model accuracy on ImageNet</a></td>\n'
h+=' <td '+ha+'><b>Model topology and parameters</td>\n'
h+=' <td '+ha+'><b>Power consumption (W)<br>min / max</td>\n'
h+=' <td '+ha+'><b>Acoustic noise (dB)<br>min / max</td>\n'
h+=' <td '+ha+'><b>Memory usage (MB)</td>\n'
h+=' <td '+ha+'><b>Bug detected?</b></td>\n'
h+=' <td '+ha+'><b>User</b></td>\n'
h+=' <td '+ha+'><b>Replay</b></td>\n'
h+=' <tr>\n'
# Dictionary to hold target meta
tm={}
ix=0
bgraph={'0':[]} # Just for graph demo
if hi_uid!='':
bgraph['1']=[]
# Load min stat
for q in plst:
pmin=os.path.join(q['path'],ffmin)
dx={'##characteristics#run#time_fwbw_ms#min':1e99}
if os.path.isfile(pmin):
rx=ck.load_json_file({'json_file':pmin})
if rx['return']==0:
dx=rx['dict']
# Fix
x=dx.get('##characteristics#run#time_fwbw_ms#min','')
if x==None or x=='' or x>50000:
dx['##characteristics#run#time_fwbw_ms#min']=1e99
if q.get('meta',{}).get('state',{}).get('fail_reason','')=='':
q['meta']['state']['fail']='yes'
q['meta']['state']['fail_reason']='strange timing'
q['min_stat']=dx
# Sort
splst=sorted(plst, key=lambda x: x.get('min_stat',{}).get('##characteristics#run#time_fwbw_ms#min',0))
# splst=sorted(plst, key=lambda x: x.get('meta',{}).get('characteristics',{}).get('run',{}).get('time_fwbw_ms',0))
for q in splst:
ix+=1
duid=q['data_uid']
path=q['path']
d=q['meta']
# Characteristics
# Check if has statistics
dstat={}
fstat=os.path.join(path,'ck-stat-flat-characteristics.json')
if os.path.isfile(fstat):
r=ck.load_json_file({'json_file':fstat, 'dict':dstat})
if r['return']>0: return r
dstat=r['dict']
x0=dstat.get("##characteristics#run#time_fwbw_ms#min",None)
meta=d.get('meta',{})
choices=d.get('choices',{})
env=choices.get('env',{})
params=choices.get('params',{}).get('params',{})
xdeps=meta.get('xdeps',{})
d_model=xdeps.get('caffemodel',{})
d_model_name=d_model.get('data_name','')
d_model_package_uoa=d_model.get('package_uoa','')
d_model_ver=d_model.get('ver','')
d_engine=xdeps.get('lib-caffe',{})
d_engine_name=d_engine.get('data_name','')
d_engine_package_uoa=d_engine.get('package_uoa','')
d_engine_ver=d_engine.get('ver','')
tp=meta.get('dnn_type','')
nn=meta.get('nn_type','')
plat_name=meta.get('plat_name','')
cpu_name=meta.get('cpu_name','')
os_name=meta.get('os_name','')
gpgpu_name=meta.get('gpgpu_name','')
plat_uid=meta.get('platform_uid','')
cpu_uid=meta.get('cpu_uid','')
os_uid=meta.get('os_uid','')
gpu_uid=meta.get('gpu_uid','')
gpgpu_uid=meta.get('gpgpu_uid','')
user=meta.get('user','')
te=d.get('characteristics',{}).get('run',{})
# bgc='afffaf'
bgc='dfffdf'
fail=d.get('state',{}).get('fail','')
fail_reason=d.get('state',{}).get('fail_reason','')
if fail=='yes':
if fail_reason=='': fail_reason='yes'
bgc='ffafaf'
elif hi_uid!='' and duid==hi_uid:
bgc='9fff9f'
# bgraph['0'].append([ix,None])
# bgraph['1'].append([ix,x0])
bg=' style="background-color:#'+bgc+';"'
h+=' <tr'+bg+'>\n'
# Number
h+=' <td '+ha+'><a name="'+duid+'">'+str(ix)+'</a></td>\n'
# Platform, etc ...
x=plat_name
if plat_uid!='':
x='<a href="'+url0+'&wcid='+cfg['module_deps']['platform']+':'+plat_uid+'">'+x+'</a>'
h+=' <td '+ha+'>'+x+'</td>\n'
x=os_name
if os_uid!='':
x='<a href="'+url0+'&wcid='+cfg['module_deps']['platform']+':'+os_uid+'">'+x+'</a>'
h+=' <td '+ha+'>'+x+'</td>\n'
x=cpu_name
if cpu_uid!='':
x='<a href="'+url0+'&wcid='+cfg['module_deps']['platform.cpu']+':'+cpu_uid+'">'+x+'</a>'
h+=' <td '+ha+'>'+x+'</td>\n'
x=gpgpu_name
if gpgpu_uid!='':
x='<a href="'+url0+'&wcid='+cfg['module_deps']['platform.gpgpu']+':'+gpgpu_uid+'">'+x+'</a>'
h+=' <td '+ha+'>'+x+'</td>\n'
# All files
uu1=work['self_module_uid']
if cmuoa!='': uu1=cmuoa
uu2=str(ix)+') <a href="'+url0+'&wcid='+uu1+':'+duid+'">'+duid+'</a>'
uu3='[ <a href="'+url0+'&wcid='+uu1+':'+duid+'">See raw files</a> ]<br><br>('+duid+')'
uu4=uu1+':'+duid
# Type
h+=' <td '+ha+'>'+tp+'</a></td>\n'
# Engine
x=d_engine_name
if d_engine_package_uoa!='':
x='<a href="'+url0+'&wcid=package:'+d_engine_package_uoa+'">'+x+'</a>'
if x!='' and d_engine_ver!='':
x+='\n<br><br>Version <b>'+d_engine_ver+'</b>'
# Versions
ver=''
dver=meta.get('xversions',{})
for dx in sorted(dver):
vx=dver[dx]
if vx!=None and vx!='':
ver+=dx+': '+str(dver[dx])+'\n'
ver=ver.replace("\'","'").replace("'","\\'").replace('\"','"').replace('"',"\\'").replace('\n','\\n')
if ver!='':
ver='<input type="button" class="ck_small_button" onClick="alert(\''+ver+'\');" value="See versions of all deps">'
h+=' <td '+ha+'>'+x+'<br><br>'+ver+'</td>\n'
# Model
x=nn
msize=''
mtop=''
mtop5=''
if d_model_package_uoa!='':
x='<a href="'+url0+'&wcid=package:'+d_model_package_uoa+'">'+x+'</a>'
# Load features
rx=ck.access({'action':'load',
'module_uoa':'package',
'data_uoa':d_model_package_uoa})
if rx['return']==0:
mft=rx['dict'].get('features',{})
msize=str(mft.get('model_size_mb',''))+' MB'
mtop=str(mft.get('accuracy',''))
mtop5=str(mft.get('accuracy_top5',''))
# if x!='' and d_model_ver!='':
# x+='\n<br><br>Version <b>'+d_model_ver+'</b>'
h+=' <td '+ha+'>'+x+'</td>\n'
# Choices (for now env)
# x='<table border="0" cellpadding="0" cellspacing="2">\n'
x=''
for k in sorted(env):
v=env[k]
x+=str(k)+'='+str(v)+'\n'
# x+='<tr><td>'+str(k)+'=</td><td>'+str(v)+'</td></tr>\n'
# x+='</table>\n'
# x=x.replace("'","\'").replace('"',"\\'").replace('\n','\\n')
x=x.replace("\'","'").replace("'","\\'").replace('\"','"').replace('"',"\\'").replace('\n','\\n')
x1=''
if x!='':
if env.get('CK_CAFFE_BATCH_SIZE','')!='':
x1+='Batch size='+env['CK_CAFFE_BATCH_SIZE']+'<br><br>\n'
x1+='<input type="button" class="ck_small_button" onClick="alert(\''+x+'\');" value="View all">'
h+=' <td '+ha+'>'+x1+'</td>\n'
x=''
# Check if has stats
x0=dstat.get("##characteristics#run#time_fwbw_ms#min",None)
x0e=dstat.get("##characteristics#run#time_fwbw_ms#exp",None)
x1=dstat.get("##characteristics#run#time_fwbw_ms#center",None)
xr=dstat.get("##characteristics#run#time_fwbw_ms#repeats",None)
x2=dstat.get("##characteristics#run#time_fwbw_ms#halfrange",None)
x=''
if x0!=None:
x='<b>'+('%.0f'%x0)+' ms.</b>\n'
# x+='('+('%.0f'%x1)+' ± '+('%.0f'%x2)+' ms.)'
if x0e!=None and x2!=None:
x+='<br><br>('+('%.0f'%x0e)+' ± '+('%.0f'%x2)+' ms.)\n'
if xr!=None:
x+='<br><i>'+str(xr)+' repetitions</i>\n'
h+=' <td '+ha+' style="background-color:#afffaf">'+x+'</td>\n'
if fail=='yes': x0=0
bgraph['0'].append([ix,x0])
if fail!='yes' and x0!=None and duid!=hi_uid:
if hi_uid!='': bgraph['1'].append([ix,None])
x1=dstat.get("##characteristics#run#time_fw_ms#center",None)
x2=dstat.get("##characteristics#run#time_fw_ms#halfrange",None)
if x1!=None and x2!=None:
x=('%.0f'%x1)+' ± '+('%.0f'%x2)+' ms.'
h+=' <td '+ha+'>'+x+'</td>\n'
x1=dstat.get("##characteristics#run#time_bw_ms#center",None)
x2=dstat.get("##characteristics#run#time_bw_ms#halfrange",None)
if x1!=None and x2!=None:
x=('%.0f'%x1)+' ± '+('%.0f'%x2)+' ms.'
h+=' <td '+ha+'>'+x+'</td>\n'
# Check all characteristics
x=''
x5=''
for k in sorted(te):
v=te[k]
kx="##characteristics#run#"+k
kx1=dstat.get(kx+'#center',None)
kx2=dstat.get(kx+'#halfrange',None)
x6=''
if type(v)==int:
if kx1!=None and kx2!=None:
x6=str(kx1)+' +- '+str(kx2)
else:
x6=str(v)
elif type(v)==float:
if kx1!=None and kx2!=None:
x6=('%.1f'%kx1)+' +- '+('%.1f'%kx2)
else:
x6=('%.1f'%v)
if x6!='':
x5+=str(k)+'='+x6+'\n'
# Also layers
y5=''
for j in range(0,1000):
k1='##characteristics#run#per_layer_info@'+str(j)+'#direction#min'
k2='##characteristics#run#per_layer_info@'+str(j)+'#label#min'
k3='##characteristics#run#per_layer_info@'+str(j)+'#time_ms#min'
k4='##characteristics#run#per_layer_info@'+str(j)+'#time_ms#max'
k5='##characteristics#run#per_layer_info@'+str(j)+'#time_ms#exp_allx'
v1=dstat.get(k1,'')
v2=dstat.get(k2,'')
v3=dstat.get(k3,'')
v4=dstat.get(k4,'')
v5=dstat.get(k5,[])
if v1!='' and v2!='' and v3!='' and v4!='':
v6=0
if len(v5)>0:
v6=v5[0]
xv3=''
xv4=''
xv5=''
if v3!='': xv3=('%.1f'%v3)
if v4!='': xv4=('%.1f'%v4)
if v6!='': xv6=('%.1f'%v6)
if y5=='': y5='Layers:\nName (direction): min time (ms.) ; expected time (ms.) ; max time (ms.)\n'
y5+='\n'+v2+' ('+v1+'): '+xv3+';'+xv6+';'+xv4
else:
break
y5=y5.replace("\'","'").replace("'","\\'").replace('\"','"').replace('"',"\\'").replace('\n','\\n')
if y5!='':
x+='<a href="'+ck_url1+duid+'">Stats per layer</a><br><br>\n'
x+='<input type="button" class="ck_small_button" onClick="alert(\''+y5+'\');" value="All layers as pop-up">'
# x5=x5.replace("'","\'").replace('"',"\\'").replace('\n','\\n')
x5=x5.replace("\'","'").replace("'","\\'").replace('\"','"').replace('"',"\\'").replace('\n','\\n')
if x5!='':
x+='<br><br><input type="button" class="ck_small_button" onClick="alert(\''+x5+'\');" value="CK vars">'
h+=' <td '+ha+'>'+x+'</td>\n'
# Get info about platform
hd={}
if plat_uid!='':
rh=ck.access({'action':'load',
'module_uoa':cfg['module_deps']['platform'],
'data_uoa':plat_uid})
if rh['return']==0:
hd=rh['dict']
# Cost (take from platform meta)
hc='-'
if len(hd)>0:
costs=hd.get('features',{}).get('cost',[])
hc=''
for c in costs:
if hc!='': hc+='<br>\n'
hc+='<b>'+str(c.get('price',''))+' '+c.get('currency','')+ '</b> - '+c.get('desc','')+' ('+c.get('date','')+')'
h+=' <td '+ha+'>'+hc+'</a></td>\n'
# TBD: all other costs
h+=' <td '+ha+'></a></td>\n'
# Model size
h+=' <td '+ha+'>'+msize+'</td>\n'
# Accuracy
x=''
if mtop!='' and mtop5!='':
x=mtop+' / '+mtop5
# if nn=='bvlc, alexnet':
# x='0.568279 / 0.799501'
# elif nn=='bvlc, googlenet':
# x='0.689299 / 0.891441'
# elif nn=='deepscale, squeezenet, 1.1':
# x='0.583880 / 0.810123'
# elif nn=='deepscale, squeezenet, 1.0':
# x='0.576801 / 0.803903'
h+=' <td '+ha+'>'+x+'</td>\n'
# Model topology
x=''
fmt=d.get('file_model_topology','')
if fmt!='':
pfmt=os.path.join(path,fmt)
if os.path.isfile(pfmt):
x='<a href="'+url0+'&action=pull&common_action=yes&cid='+work['self_module_uid']+':'+duid+'&filename='+fmt+'">deploy.prototxt</a>\n'
h+=' <td '+ha+'>'+x+'</td>\n'
# Power consumption (TBD - real measurements)
x='-'
if len(hd)>0:
power=hd.get('features',{}).get('power_consumption',{})
if len(power)>0:
pmin=power.get('min','')
pmax=power.get('max','')
x=str(pmin)+' / '+str(pmax)
h+=' <td '+ha+'>'+x+'</a></td>\n'
# Acoustic noise (TBD - real measurements)
x='-'
if len(hd)>0:
power=hd.get('features',{}).get('acoustic_noise',{})
if len(power)>0:
pmin=power.get('min','')
pmax=power.get('max','')
x=str(pmin)+' / '+str(pmax)
h+=' <td '+ha+'>'+x+'</a></td>\n'
# Memory usage
x=''
mem=dstat.get("##characteristics#run#memory_mbytes#max",None)
if mem!=None:
x=str(int(mem))+' MB'
h+=' <td '+ha+'>'+x+'</td>\n'
# Crowdsourcing bug detection
x=fail_reason
if x=='':
x=''
else:
fail_reason=fail_reason.replace("\'","'").replace("'","\\'").replace('\"','"').replace('"',"\\'").replace('\n','\\n')
x='Yes <input type="button" class="ck_small_button" onClick="alert(\''+fail_reason+'\');" value="Log">'
h+=' <td '+ha+'>'+x+'</td>\n'
h+=' <td '+ha+'><a href="'+url0+'&action=index&module_uoa=wfe&native_action=show&native_module_uoa=experiment.user">'+user+'</a></td>\n'
h+=' <td '+ha+'><input type="button" class="ck_small_button" onClick="copyToClipboard(\'ck replay '+uu4+' '+ck.cfg.get('add_extra_to_replay','')+'\');" value="Replay"><br><br>\n'
h+=' '+uu3+'</td>\n'
h+=' <tr>\n'
h+='</table>\n'
h+='</center>\n'
if cmuoa=='':
h+='</form>\n'
if len(bgraph['0'])>0:
ii={'action':'plot',
'module_uoa':cfg['module_deps']['graph'],
"table":bgraph,
"h_lines":[1.0],
"ymin":0,
"ignore_point_if_none":"yes",
"plot_type":"d3_2d_bars",
"display_y_error_bar":"no",
"title":"Powered by Collective Knowledge",
"x_ticks_period":10,
"axis_x_desc":"Experiment",
"axis_y_desc":"Neural network total time (ms.)",
"plot_grid":"yes",
"d3_div":"ck_interactive",
"image_width":"900",
"image_height":"400",
"wfe_url":url0}
r=ck.access(ii)
if r['return']==0:
x=r.get('html','')
if x!='':
st+=r.get('style','')
h+='<br>\n'
h+='<center>\n'
h+='<div id="ck_box_with_shadow" style="width:920px;">\n'
h+=' <div id="ck_interactive" style="text-align:center">\n'
h+=x+'\n'
h+=' </div>\n'
h+='</div>\n'
h+='</center>\n'
return {'return':0, 'html':h, 'style':st}
##############################################################################
# browse public results
def browse(i):
"""
Input: {
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
import webbrowser
ck.out('Opening web page '+ck_url+' ...')
webbrowser.open(ck_url)
return {'return':0}
##############################################################################
# show info for all layers
def html_viewer(i):
"""
Input: {
data_uoa - CK entry UOA to view
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
import os
duoa=i.get('data_uoa','')
# Load entry
r=ck.access({'action':'load',
'module_uoa':work['self_module_uid'],
'data_uoa':duoa})
if r['return']>0: return r
p=r['path']
d=r['dict']
dchars=d.get('characteristics',{})
dchoices=d.get('choices',{})
dmeta=d.get('meta',{})
# Load stats
dstat={}
fstat=os.path.join(p,'ck-stat-flat-characteristics.json')
if os.path.isfile(fstat):
r=ck.load_json_file({'json_file':fstat, 'dict':dstat})
if r['return']>0: return r
dstat=r['dict']
# Prepare table
h=''
# h+='<hr>\n'
h+='<br>\n'
h+='<center>\n'
h+='<h2>DNN engine and model evaluation statistics per layer (crowd-tuning)</h2><br>\n'
h+='</center>\n'
xdeps=dmeta.get('xdeps',{})
lcaffe=xdeps.get('lib-caffe',{})
lmodel=xdeps.get('caffemodel',{})
# Prepare extra info
h+='<p>\n'
h+='<table border="1" cellpadding="8" cellspacing="0">\n'
h+=' <tr>\n'
h+=' <td><b>DNN engine name:</b></td>\n'
h+=' <td>'+lcaffe.get('data_name','')+'</td>\n'
h+=' </tr>\n'
h+=' <tr>\n'
h+=' <td><b>DNN engine version:</b></td>\n'
h+=' <td>'+lcaffe.get('ver','')+'</td>\n'
h+=' </tr>\n'
h+=' <tr>\n'
h+=' <td><b>DNN engine type:</b></td>\n'
h+=' <td>'+dmeta.get('dnn_type','')+'</td>\n'
h+=' </tr>\n'
x=''
dx=dmeta.get('xversions',{})
for k in sorted(dx):
v=dx[k]
if v!='':
if x!='': x+='<br>\n'
x+=k+'='+str(v)+'\n'
h+=' <tr>\n'
h+=' <td><b>DNN engine dependencies:</b></td>\n'
h+=' <td>'+x+'</td>\n'
h+=' </tr>\n'
h+=' <tr>\n'
h+=' <td><b>DNN model name:</b></td>\n'
h+=' <td>'+lmodel.get('data_name','')+'</td>\n'
h+=' </tr>\n'
h+=' <tr>\n'
h+=' <td><b>DNN model version:</b></td>\n'
h+=' <td>'+lmodel.get('ver','')+'</td>\n'
h+=' </tr>\n'
h+=' <tr>\n'
h+=' <td><b>Batch size:</b></td>\n'
h+=' <td>'+dchars.get('run',{}).get('REAL_ENV_CK_CAFFE_BATCH_SIZE','')+'</td>\n'
h+=' </tr>\n'
# TBD: Need to show min,exp,max!
# h+=' <tr>\n'
# h+=' <td><b>FWBW time (ms.):</b></td>\n'
# h+=' <td>'+str(dchars.get('run',{}).get('time_bw_ms',''))+'</td>\n'
# h+=' </tr>\n'
# h+=' <tr>\n'
# h+=' <td><b>FW time (ms.):</b></td>\n'
# h+=' <td>'+str(dchars.get('run',{}).get('time_fw_ms',''))+'</td>\n'
# h+=' </tr>\n'
# h+=' <tr>\n'
# h+=' <td><b>BW time (ms.):</b></td>\n'
# h+=' <td>'+str(dchars.get('run',{}).get('time_bw_ms',''))+'</td>\n'
# h+=' </tr>\n'
h+=' <tr>\n'
h+=' <td><b>Platform:</b></td>\n'
h+=' <td>'+dmeta.get('plat_name','')+'</td>\n'
h+=' </tr>\n'
h+=' <tr>\n'
h+=' <td><b>OS:</b></td>\n'
h+=' <td>'+dmeta.get('os_name','')+'</td>\n'
h+=' </tr>\n'
h+=' <tr>\n'
h+=' <td><b>CPU:</b></td>\n'
h+=' <td>'+dmeta.get('cpu_name','')+'</td>\n'
h+=' </tr>\n'
h+=' <tr>\n'
h+=' <td><b>GPU:</b></td>\n'
h+=' <td>'+dmeta.get('gpu_name','')+'</td>\n'
h+=' </tr>\n'
h+=' </tr>\n'
h+='</table>\n'
h+='<center>\n'
h+='<p>\n'
h+='<table border="0" cellpadding="10" cellspacing="0">\n'
h+=' <tr>\n'
h+=' <td><b>Name</b></td>\n'
h+=' <td><b>Direction</b></td>\n'
h+=' <td align="right"><b>Min time (ms.):</b></td>\n'
h+=' <td align="right"><b>Expected time (ms.):</b></td>\n'
h+=' <td align="right"><b>Max time (ms.):</b></td>\n'
h+=' <td align="right"><b>Repetitions:</b></td>\n'
h+=' </tr>\n'
# Detecting number of layers
jj={}
for j in range(0,1000):
k3='##characteristics#run#per_layer_info@'+str(j)+'#time_ms#min'
v3=dstat.get(k3,'')
if v3=='': break
jj[j]=v3
# Sorting by min time
if i.get('all_params',{}).get('skip_sort','')!='yes':
jj=sorted(jj, key=lambda x: jj[x], reverse=True)
# Also layers
for j in jj:
k1='##characteristics#run#per_layer_info@'+str(j)+'#direction#min'
k2='##characteristics#run#per_layer_info@'+str(j)+'#label#min'
k3='##characteristics#run#per_layer_info@'+str(j)+'#time_ms#min'
k4='##characteristics#run#per_layer_info@'+str(j)+'#time_ms#max'
k5='##characteristics#run#per_layer_info@'+str(j)+'#time_ms#exp_allx'
k7='##characteristics#run#per_layer_info@'+str(j)+'#time_ms#repeats'
v1=dstat.get(k1,'')
v2=dstat.get(k2,'')
v3=dstat.get(k3,'')
v4=dstat.get(k4,'')
v5=dstat.get(k5,[])
v7=dstat.get(k7,'')
if v1!='' and v2!='' and v3!='' and v4!='':
v6=0
if len(v5)>0:
v6=v5[0]
xv3=''
xv4=''
xv6=''
if v3!='':
if v3<0.1: xv3='0'
else: xv3='<b>'+('%.1f'%v3)+'</b>'
if v4!='':
if v4<0.1: xv4='0'
else: xv4='<b>'+('%.1f'%v4)+'</b>'
if v6!='':
if v6<0.1: xv6='0'
else: xv6='<b>'+('%.1f'%v6)+'</b>'
h+=' <tr>\n'
h+=' <td>'+v2+'</td>\n'
h+=' <td>'+v1+'</td>\n'
h+=' <td align="right">'+xv3+'</td>\n'
h+=' <td align="right">'+xv6+'</td>\n'
h+=' <td align="right">'+xv4+'</td>\n'
h+=' <td align="right">'+str(v7)+'</td>\n'
h+=' </tr>\n'
h+='</table>\n'
h+='</center>\n'
return {'return':0, 'html':h, 'show_top':'yes'}
##############################################################################
# replay experiment (TBD)
def replay(i):
"""
Input: {
(data_uoa)
(remote)
(host_os)
(target_os)
(device_id)
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
import copy
import os
# Setting output
o=i.get('out','')
oo=''
if o=='con': oo='con'
duoa=i.get('data_uoa','')
remote=i.get('remote','')
er=''
esr=''
if remote=='yes':
er=i.get('exchange_repo','')
if er=='': er=ck.cfg['default_exchange_repo_uoa']
esr=i.get('exchange_subrepo','')
if esr=='': esr=ck.cfg['default_exchange_subrepo_uoa']
# Try to load info
if o=='con':
ck.out('Loading experiment entry ...')
ck.out('')
r=ck.access({'action':'load',
'module_uoa':work['self_module_uid'],
'data_uoa':duoa,
'repo_uoa':er,
'remote_repo_uoa':esr})
if r['return']>0: return r
d=r['dict']
hos=i.get('host_os','')
tos=i.get('target_os','')
tdid=i.get('device_id','')
# Check two main deps (engine and model)
meta=d.get('meta',{})
xdeps=meta.get('xdeps',{})
# TBD: rebuild env by tags!
#
# dnn=xdeps.get('lib-caffe',{})
# model=xdeps.get('caffemodel',{})
#
# pdnn=dnn.get('package_uoa','')
# pmodel=model.get('package_uoa','')
#
# preset_env={}
# penv=[pdnn,pmodel]
#
# for j in range(0, len(penv)):
# px=''
# py=penv[j]
#
# if py!='':
# # Search by package
# r=ck.access({'action':'search',
# 'module_uoa':cfg['module_deps']['env'],
# 'search_dict':{'package_uoa':py}})
# if r['return']>0: return r
#
# l=r['lst']
#
# if j==0: preset_env['lib-caffe']=px
# elif j==1: preset_env['caffemodel']=px
# Run pipeline
choices=d.get('choices',{})
# Clean various vars
for k in replay_clean_vars:
if k in choices:
del(choices[k])
if i.get('target_os','')!='' and not i['target_os'].startswith('android'):
del(i['target_os'])
env=choices.get('env',{})
for k in replay_clean_env_vars:
if k in env:
del(env[k])
choices['env']=env
if hos!='': choices['host_os']=hos
if tos!='': choices['target_os']=tos
if tdid!='': choices['device_id']=tdid
pipeline_data_uoa=choices['module_uoa']
# Prepare pipeline
ii={'action':'pipeline',
'module_uoa':cfg['module_deps']['program'],
'prepare':'yes',
'choices':choices,
'out':o}
rr=ck.access(ii)
if rr['return']>0: return rr
fail=rr.get('fail','')
if fail=='yes':
return {'return':10, 'error':'pipeline failed ('+rr.get('fail_reason','')+')'}
ready=rr.get('ready','')
if ready!='yes':
return {'return':11, 'error':'couldn\'t prepare universal CK program workflow'}
# Run pipeline
ii={'action':'run',
'module_uoa':cfg['module_deps']['pipeline'],
'data_uoa':pipeline_data_uoa,
'pipeline':rr,
'out':o}
rr=ck.access(ii)
if rr['return']>0: return rr
fail=rr.get('fail','')
if fail=='yes':
return {'return':10, 'error':'pipeline failed ('+rr.get('fail_reason','')+')'}
if o=='con':
ck.out('')
ck.out('Your results:')
ck.out('')
dstat=rr.get('last_stat_analysis',{}).get('dict_flat',{})
x0=dstat.get("##characteristics#run#time_fwbw_ms#min",None)
x0e=dstat.get("##characteristics#run#time_fwbw_ms#exp",None)
if x0!=None:
ck.out('* FWBW min: '+('%.0f'%x0)+' ms.')
if x0e!=None:
ck.out('* FWBW exp: '+('%.0f'%x0e)+' ms.')
x1=dstat.get("##characteristics#run#time_fw_ms#min",None)
x1e=dstat.get("##characteristics#run#time_fw_ms#exp",None)
if x1!=None:
ck.out('* FW min: '+('%.0f'%x1)+' ms.')
if x1e!=None:
ck.out('* FW exp: '+('%.0f'%x1e)+' ms.')
x2=dstat.get("##characteristics#run#time_bw_ms#min",None)
x2e=dstat.get("##characteristics#run#time_bw_ms#exp",None)
if x2!=None:
ck.out('* BW min: '+('%.0f'%x2)+' ms.')
if x2e!=None:
ck.out('* BW exp: '+('%.0f'%x2e)+' ms.')
return {'return':0}
|
the-stack_0_1072 | from collections import namedtuple
from enum import Enum
from string import ascii_lowercase
import numpy as np
# ABC for the Decision class
class Decision(object):
ENUM = None
FIELDS = ('decision_id')
# suggestion for subclassing
# FIELDS = super().FIELDS + ('target_idxs',)
# etc.
# An Ellipsis instead of fields indicate there is a variable
# number of fields.
SHAPES = ((1,),)
DTYPES = (np.int,)
@classmethod
def enum_dict_by_name(cls):
if cls.ENUM is None:
raise NotImplementedError
d = {}
for enum in cls.ENUM:
d[enum.name] = enum.value
return d
@classmethod
def enum_dict_by_value(cls):
if cls.ENUM is None:
raise NotImplementedError
d = {}
for enum in cls.ENUM:
d[enum.value] = enum
return d
@classmethod
def enum_by_value(cls, enum_value):
d = cls.enum_dict_by_value()
return d[enum_value]
@classmethod
def enum_by_name(cls, enum_name):
d = cls.enum_dict_by_name()
return d[enum_name]
@classmethod
def record(cls, enum_value):
# TODO check to make sure the enum value is valid
return {'decision_id' : enum_value}
@classmethod
def action(cls, walkers, decisions):
"""Perform the instructions for a set of resampling records on
walkers."""
raise NotImplementedError
@classmethod
def parents(cls, step):
"""Given a row of resampling records (for a single resampling step)
returns the parents of the children of this step."""
# initialize a list for the parents of this stages walkers
step_parents = [None for i in range(len(step))]
# the rest of the stages parents are based on the previous stage
for parent_idx, parent_rec in enumerate(step):
# if the decision is an ancestor then the instruction
# values will be the children
if parent_rec[0] in cls.ANCESTOR_DECISION_IDS:
# the first value of the parent record is the target
# idxs
child_idxs = parent_rec[1]
for child_idx in child_idxs:
step_parents[child_idx] = parent_idx
return step_parents
class NothingDecisionEnum(Enum):
NOTHING = 0
# an example of a Decision class that has the enumeration, instruction
# record namedtuple, and the instruction dtypes
class NoDecision(Decision):
ENUM = NothingDecisionEnum
INSTRUCTION_NAMES = (
(ENUM.NOTHING, "NothingInstructionRecord"),
)
INSTRUCTION_FIELDS = (
(ENUM.NOTHING, ('pos',)),)
INSTRUCTION_FIELD_DTYPES = (
(ENUM.NOTHING, (np.int,)),
)
# the decision types that pass on their state
ANCESTOR_DECISION_IDS = (ENUM.NOTHING.value,)
@classmethod
def action(cls, walkers, decisions):
# list for the modified walkers
mod_walkers = [None for i in range(len(walkers))]
# go through each decision and perform the decision
# instructions
for walker_idx, decision in enumerate(decisions):
decision_value, instruction = decision
if decision_value == cls.ENUM.NOTHING.value:
# check to make sure a walker doesn't already exist
# where you are going to put it
if mod_walkers[instruction[0]] is not None:
raise ValueError(
"Multiple walkers assigned to position {}".format(instruction[0]))
# put the walker in the position specified by the
# instruction
mod_walkers[instruction[0]] = walkers[walker_idx]
return mod_walkers
|
the-stack_0_1073 | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint:disable=line-too-long
r"""Beam job to map to tf.Examples of embeddings.
This file has two modes:
1) Map from tf.Examples of audio to tf.Examples of embeddings.
2) Map from TFDS dataseet to tf.Examples of embeddings.
"""
# pylint:enable=line-too-long
from absl import app
from absl import flags
from absl import logging
import apache_beam as beam
import tensorflow as tf
from non_semantic_speech_benchmark.data_prep import audio_to_embeddings_beam_utils
flags.DEFINE_string('input_glob', None,
'Glob for input dir. XOR with `tfds_data`.')
flags.DEFINE_string(
'tfds_dataset', None, 'Name of TFDS dataset. '
'XOR with `input_glob`. Should be of the form ex "cifar".'
'Exactly one of `sample_rate_key`, `sample_rate`, or '
'`tfds_dataset` must be not None.')
flags.DEFINE_string('output_filename', None, 'Output filename.')
flags.DEFINE_list(
'embedding_names', None,
'List of embedding module names. Used for logging, and as '
'in the features key of the results tf.Example feature list.')
flags.DEFINE_list(
'embedding_modules', None,
'List of embedding modules to compute. Should be accepted '
'by `hub.load`.`')
flags.DEFINE_list(
'module_output_keys', None,
'List of module output key. Must be the same length as '
'`embedding_modules`.')
flags.DEFINE_string('audio_key', None, 'Key of audio.')
flags.DEFINE_string(
'sample_rate_key', None, 'Key of sample rate. '
'Exactly one of `sample_rate_key`, `sample_rate`, or '
'`tfds_dataset` must be not None.')
flags.DEFINE_integer(
'sample_rate', None, 'Sample rate.'
'Exactly one of `sample_rate_key`, `sample_rate`, or '
'`tfds_dataset` must be not None.')
flags.DEFINE_string(
'label_key', None, 'Key for labels. If the feature value is an integer, '
'convert to bytes.')
flags.DEFINE_string(
'speaker_id_key', None,
'Key for speaker_id, or `None`. If this flag is present, '
'check that the key exists and is of type `bytes`.')
flags.DEFINE_bool('average_over_time', False,
'If true, return embeddings that are averaged over time.')
flags.DEFINE_bool(
'delete_audio_from_output', True,
'If true, remove audio from the output table. Can be '
'helpful in keeping output tables small.')
flags.DEFINE_bool('debug', False, 'If True, run in debug model.')
FLAGS = flags.FLAGS
def main(unused_argv):
# Get input data location from flags. If we're reading a TFDS dataset, get
# train, validation, and test.
input_filenames_list, output_filenames, sample_rate = audio_to_embeddings_beam_utils.read_input_glob_and_sample_rate_from_flags(
FLAGS.input_glob, FLAGS.sample_rate, FLAGS.tfds_dataset,
FLAGS.output_filename)
# Check that inputs and flags are formatted correctly.
audio_to_embeddings_beam_utils.validate_inputs(input_filenames_list,
output_filenames,
FLAGS.embedding_modules,
FLAGS.embedding_names,
FLAGS.module_output_keys)
input_format = 'tfrecord'
output_format = 'tfrecord'
# If you have custom beam options, add them here.
beam_options = None
logging.info('Starting to create flume pipeline...')
with beam.Pipeline(beam_options) as root:
for i, (input_filenames_or_glob, output_filename) in enumerate(
zip(input_filenames_list, output_filenames)):
audio_to_embeddings_beam_utils.make_beam_pipeline(
root,
input_filenames_or_glob,
sample_rate,
FLAGS.debug,
FLAGS.embedding_names,
FLAGS.embedding_modules,
FLAGS.module_output_keys,
FLAGS.audio_key,
FLAGS.sample_rate_key,
FLAGS.label_key,
FLAGS.speaker_id_key,
FLAGS.average_over_time,
FLAGS.delete_audio_from_output,
output_filename,
input_format=input_format,
output_format=output_format,
suffix=i)
if __name__ == '__main__':
flags.mark_flags_as_required([
'output_filename', 'embedding_names', 'embedding_modules',
'module_output_keys', 'audio_key', 'label_key'
])
flags.mark_flags_as_mutual_exclusive(['input_glob', 'tfds_dataset'],
required=True)
flags.mark_flags_as_mutual_exclusive(
['tfds_dataset', 'sample_rate_key', 'sample_rate'], required=True)
tf.compat.v2.enable_v2_behavior()
assert tf.executing_eagerly()
app.run(main)
|
the-stack_0_1074 | from pprint import pformat
stack=[]
def print_stack_before_operation(f):
def method(*args, **kwargs):
print("Current stack:", pformat(stack))
f(*args, **kwargs)
print("After operation:", pformat(stack))
return method
def print_operation_name_and_parameter(f):
def method(*args, **kwargs):
print("Operation name:{0} {1}".format( f.__name__, (", parameter: {0}".format( pformat(*args, **kwargs)) if args or kwargs else "")))
f(*args, **kwargs)
return method
def print_spaceline(linecount):
def wrapper(f):
def method(*args, **kwargs):
f(*args, **kwargs)
for _ in range(linecount): print()
return method
return wrapper
@print_spaceline(1)
@print_stack_before_operation
@print_operation_name_and_parameter
def push(v):
stack.append(v)
@print_spaceline(1)
@print_stack_before_operation
@print_operation_name_and_parameter
def multiply():
a = stack.pop(-1)
b = stack.pop(-1)
stack.append(a*b)
@print_spaceline(1)
@print_stack_before_operation
@print_operation_name_and_parameter
def add():
a = stack.pop(-1)
b = stack.pop(-1)
stack.append(a+b)
@print_spaceline(1)
@print_stack_before_operation
@print_operation_name_and_parameter
def pop():
print("Poped ------------------->", stack.pop(-1))
opcodes={
1: {
"func": push,
"pcount": 1
},
2: {
"func": multiply,
"pcount": 0
},
3: {
"func": add,
"pcount": 0
},
4: {
"func": pop,
"pcount": 0
}
}
# 1+2*3+4 = 11
push(1)
push(2)
push(3)
multiply()
add()
push(4)
add()
pop()
print("=*"* 30)
codes=[1, 1,
1, 2,
1, 3,
2,
3,
1, 4,
3,
4
]
while codes:
code=codes.pop(0)
params = []
for _ in range(opcodes[code]['pcount']):
params.append(codes.pop(0))
opcodes[code]['func'](*params)
|
the-stack_0_1075 | #!/usr/bin/env python
import os
import time
import json
import argparse
import pprint as pp
import numpy as np
import pandas as pd
import pickle
from tqdm import tqdm
from datetime import timedelta
import torch
from torch.utils.data import DataLoader
import torch.optim as optim
from nets.attention_model import AttentionModel
from nets.nar_model import NARModel
from nets.encoders.gat_encoder import GraphAttentionEncoder
from nets.encoders.gnn_encoder import GNNEncoder
from nets.encoders.mlp_encoder import MLPEncoder
from reinforce_baselines import *
from problems.tsp.problem_tsp import TSP
from utils import *
from train import *
from tensorboard_logger import Logger as TbLogger
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
import warnings
warnings.filterwarnings("ignore", message="indexing with dtype torch.uint8 is now deprecated, please use a dtype torch.bool instead.")
def train_batch_ft(model, optimizer, baseline, epoch,
batch_id, step, batch, tb_logger, opts):
# Unwrap baseline
bat, bl_val = baseline.unwrap_batch(batch)
# Optionally move Tensors to GPU
x = move_to(bat['nodes'], opts.device)
graph = move_to(bat['graph'], opts.device)
bl_val = move_to(bl_val, opts.device) if bl_val is not None else None
# Evaluate model, get costs and log probabilities
cost, log_likelihood = model(x, graph)
# Evaluate baseline, get baseline loss if any (only for critic)
bl_val, bl_loss = baseline.eval(x, graph, cost) if bl_val is None else (bl_val, 0)
# Calculate loss
reinforce_loss = ((cost - bl_val) * log_likelihood).mean()
loss = reinforce_loss + bl_loss
# Normalize loss for gradient accumulation
loss = loss / opts.accumulation_steps
# Perform backward pass
loss.backward()
# Clip gradient norms and get (clipped) gradient norms for logging
grad_norms = clip_grad_norms(optimizer.param_groups, opts.max_grad_norm)
# Perform optimization step after accumulating gradients
if step % opts.accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
# Logging
if step % int(opts.log_step) == 0:
log_values_ft(cost, grad_norms, epoch, batch_id, step, log_likelihood,
reinforce_loss, bl_loss, tb_logger, opts)
def log_values_ft(cost, grad_norms, epoch, batch_id, step, log_likelihood,
reinforce_loss, bl_loss, tb_logger, opts):
avg_cost = cost.mean().item()
grad_norms, grad_norms_clipped = grad_norms
# Log values to screen
print('\nepoch: {}, train_batch_id: {}, avg_cost: {}'.format(epoch, batch_id, avg_cost))
print('grad_norm: {}, clipped: {}'.format(grad_norms[0], grad_norms_clipped[0]))
# Log values to tensorboard
if not opts.no_tensorboard:
tb_logger.log_value('avg_cost/ft', avg_cost, step)
tb_logger.log_value('actor_loss/ft', reinforce_loss.item(), step)
tb_logger.log_value('nll/ft', -log_likelihood.mean().item(), step)
tb_logger.log_value('grad_norm/ft', grad_norms[0], step)
tb_logger.log_value('grad_norm_clipped/ft', grad_norms_clipped[0], step)
if opts.baseline == 'critic':
tb_logger.log_value('critic_loss/ft', bl_loss.item(), step)
tb_logger.log_value('critic_grad_norm/ft', grad_norms[1], step)
tb_logger.log_value('critic_grad_norm_clipped/ft', grad_norms_clipped[1], step)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--ft_run_name", type=str, default="debug",
help="Run name to create logging sub-directory")
parser.add_argument("--ft_strategy", type=str, default="active",
help="Finetuning strategy: active/fixed/random")
parser.add_argument("--problem", type=str, default="tsp")
parser.add_argument("--min_size", type=int, default=200)
parser.add_argument("--max_size", type=int, default=200)
parser.add_argument("--neighbors", type=float, default=0.20)
parser.add_argument("--knn_strat", type=str, default="percentage")
parser.add_argument("--data_distribution", type=str, default="random")
parser.add_argument("--val_dataset", type=str, default="data/tsp/tsp200_test_concorde.txt",
help="Dataset to evaluate finetuned model on")
parser.add_argument("--epoch_size", type=int, default=128000)
parser.add_argument("--val_size", type=int, default=1280)
parser.add_argument("--rollout_size", type=int, default=1280)
parser.add_argument("--batch_size", type=int, default=16)
parser.add_argument("--accumulation_steps", type=int, default=8)
parser.add_argument("--n_epochs", type=int, default=100)
parser.add_argument('--model', type=str,
help="Path to model checkpoints directory")
parser.add_argument('--baseline', type=str, default="exponential",
help="Baseline for finetuning model: none/exponential/rollout")
parser.add_argument('--bl_alpha', type=float, default=0.05,
help='Significance in the t-test for updating rollout baseline')
parser.add_argument("--lr_ft", type=float, default=0.00001)
parser.add_argument("--max_grad_norm", type=float, default=1)
parser.add_argument('--seed', type=int, default=1234, help='Random seed to use')
parser.add_argument('--no_cuda', action='store_true', help='Disable CUDA')
parser.add_argument('--num_workers', type=int, default=0,
help='Number of workers for DataLoaders')
parser.add_argument('--no_tensorboard', action='store_true',
help='Disable logging TensorBoard files')
parser.add_argument('--no_progress_bar', action='store_true',
help='Disable progress bar')
parser.add_argument('--log_step', type=int, default=100,
help='Log info every log_step steps')
parser.add_argument('--val_every', type=int, default=1,
help='Validate every val_every epochs')
opts = parser.parse_args()
opts.use_cuda = torch.cuda.is_available() and not opts.no_cuda
opts.ft_run_name = "{}_{}".format(opts.ft_run_name, time.strftime("%Y%m%dT%H%M%S"))
# Pretty print the run args
pp.pprint(vars(opts))
# Opts from checkpoint
args = load_args(os.path.join(opts.model, 'args.json'))
os.makedirs(os.path.join(args["save_dir"], opts.ft_run_name))
# Save arguments so exact configuration can always be found
with open(os.path.join(args["save_dir"], opts.ft_run_name, "args-ft.json"), 'w') as f:
json.dump(vars(opts), f, indent=True)
# Set the device
opts.device = torch.device("cuda:0" if opts.use_cuda else "cpu")
# Find model file
if os.path.isfile(opts.model):
model_filename = opts.model
path = os.path.dirname(model_filename)
elif os.path.isdir(opts.model):
epoch = max(
int(os.path.splitext(filename)[0].split("-")[1])
for filename in os.listdir(opts.model)
if os.path.splitext(filename)[1] == '.pt'
)
model_filename = os.path.join(opts.model, 'epoch-{}.pt'.format(epoch))
else:
assert False, "{} is not a valid directory or file".format(opts.model)
# Set the random seed
torch.manual_seed(opts.seed)
np.random.seed(opts.seed)
# Configure tensorboard
tb_logger = TbLogger(os.path.join(
args["log_dir"], "{}_{}-{}".format(args["problem"], args["min_size"], args["max_size"]), args["run_name"], opts.ft_run_name))
# Figure out what's the problem
problem = load_problem(args["problem"])
# Load data from load_path
load_data = {}
print('\nLoading data from {}'.format(opts.model))
load_data = torch_load_cpu(model_filename)
# Initialize model
model_class = {
'attention': AttentionModel,
'nar': NARModel,
}.get(args.get('model', 'attention'), None)
assert model_class is not None, "Unknown model: {}".format(model_class)
encoder_class = {
'gnn': GNNEncoder,
'gat': GraphAttentionEncoder,
'mlp': MLPEncoder
}.get(args.get('encoder', 'gnn'), None)
assert encoder_class is not None, "Unknown encoder: {}".format(encoder_class)
model = model_class(
problem=problem,
embedding_dim=args['embedding_dim'],
encoder_class=encoder_class,
n_encode_layers=args['n_encode_layers'],
aggregation=args['aggregation'],
aggregation_graph=args['aggregation_graph'],
normalization=args['normalization'],
learn_norm=args['learn_norm'],
track_norm=args['track_norm'],
gated=args['gated'],
n_heads=args['n_heads'],
tanh_clipping=args['tanh_clipping'],
mask_inner=True,
mask_logits=True,
mask_graph=False,
checkpoint_encoder=args['checkpoint_encoder'],
shrink_size=args['shrink_size']
).to(opts.device)
# Compute number of network parameters
print(model)
nb_param = 0
for param in model.parameters():
nb_param += np.prod(list(param.data.size()))
print('Number of parameters: ', nb_param)
# Overwrite model parameters by parameters to load
print('\nOverwriting model parameters from checkpoint')
model_ = get_inner_model(model)
model_.load_state_dict({**model_.state_dict(), **load_data.get('model', {})})
# Initialize baseline
if opts.baseline == 'exponential':
baseline = ExponentialBaseline(args["exp_beta"])
elif opts.baseline == 'critic':
assert problem.NAME == 'tsp', "Critic only supported for TSP"
baseline = CriticBaseline(
(
CriticNetwork(
embedding_dim=args["embedding_dim"],
encoder_class=encoder_class,
n_encode_layers=args["n_encode_layers"],
aggregation=args["aggregation"],
normalization=args["normalization"],
learn_norm=args["learn_norm"],
track_norm=args["track_norm"],
gated=args["gated"],
n_heads=args["n_heads"]
)
).to(opts.device)
)
print(baseline.critic)
nb_param = 0
for param in baseline.get_learnable_parameters():
nb_param += np.prod(list(param.data.size()))
print('Number of parameters (BL): ', nb_param)
elif opts.baseline == 'rollout':
baseline = RolloutBaseline(model, problem, opts)
else:
# assert opts.baseline is None, "Unknown baseline: {}".format(opts.baseline)
baseline = NoBaseline()
# Load baseline from data, make sure script is called with same type of baseline
if 'baseline' in load_data and opts.baseline == args["baseline"]:
print('\nOverwriting baseline from checkpoint')
baseline.load_state_dict(load_data['baseline'])
# Initialize optimizer
optimizer = optim.Adam(
[{'params': model.parameters(), 'lr': args["lr_model"]}]
+ (
[{'params': baseline.get_learnable_parameters(), 'lr': args["lr_critic"]}]
if len(baseline.get_learnable_parameters()) > 0
else []
)
)
# Load optimizer state
if 'optimizer' in load_data:
print('\nOverwriting optimizer from checkpoint')
optimizer.load_state_dict(load_data['optimizer'])
for state in optimizer.state.values():
for k, v in state.items():
if torch.is_tensor(v):
state[k] = v.to(opts.device)
# Set finetuning learning rate
for param_group in optimizer.param_groups:
param_group['lr'] = opts.lr_ft
# Load random state
torch.set_rng_state(load_data['rng_state'])
if opts.use_cuda:
torch.cuda.set_rng_state_all(load_data['cuda_rng_state'])
# Dumping of state was done before epoch callback, so do that now (model is loaded)
baseline.epoch_callback(model, epoch)
print("Resuming after epoch {}".format(epoch))
epoch_start = epoch + 1
step = 0
# Evaluate on held-out set
val_dataset = TSP.make_dataset(
filename=opts.val_dataset, batch_size=opts.batch_size, num_samples=opts.val_size,
neighbors=opts.neighbors, knn_strat=opts.knn_strat, supervised=True
)
avg_reward, avg_opt_gap = validate(model, val_dataset, problem, opts)
tb_logger.log_value('val_ft/avg_reward', avg_reward, step)
tb_logger.log_value('val_ft/opt_gap', avg_opt_gap, step)
if opts.ft_strategy == "active":
# Active search: finetune on the test set
train_dataset = baseline.wrap_dataset(val_dataset)
train_dataloader = DataLoader(
train_dataset, batch_size=opts.batch_size, shuffle=True, num_workers=opts.num_workers)
elif opts.ft_strategy == "fixed":
# Fixed finetuning: finetune on a fixed training set
train_dataset = baseline.wrap_dataset(
problem.make_dataset(
min_size=opts.min_size, max_size=opts.max_size, batch_size=opts.batch_size,
num_samples=opts.epoch_size, distribution=opts.data_distribution,
neighbors=opts.neighbors, knn_strat=opts.knn_strat
))
train_dataloader = DataLoader(
train_dataset, batch_size=opts.batch_size, shuffle=True, num_workers=opts.num_workers)
# Start finetuning loop
for epoch in range(epoch_start, epoch_start + opts.n_epochs):
print("\nStart finetuning epoch {}, lr={} for run {}".format(epoch, optimizer.param_groups[0]['lr'], args["run_name"]))
start_time = time.time()
# Put model in train mode!
model.train()
optimizer.zero_grad()
set_decode_type(model, "sampling")
if opts.ft_strategy == "random":
# Random finetuning: finetune on new/random samples each epoch
train_dataset = baseline.wrap_dataset(
problem.make_dataset(
min_size=opts.min_size, max_size=opts.max_size, batch_size=opts.batch_size,
num_samples=opts.epoch_size, distribution=opts.data_distribution,
neighbors=opts.neighbors, knn_strat=opts.knn_strat
))
train_dataloader = DataLoader(
train_dataset, batch_size=opts.batch_size, shuffle=True, num_workers=opts.num_workers)
for batch_id, batch in enumerate(tqdm(train_dataloader, disable=opts.no_progress_bar, ascii=True)):
train_batch_ft(
model,
optimizer,
baseline,
epoch,
batch_id,
step,
batch,
tb_logger,
opts
)
step += 1
epoch_duration = time.time() - start_time
print("Finished epoch {}, took {} s".format(epoch, time.strftime('%H:%M:%S', time.gmtime(epoch_duration))))
if epoch % opts.val_every == 0:
# Evaluate on held-out set
avg_reward, avg_opt_gap = validate(model, val_dataset, problem, opts)
tb_logger.log_value('val_ft/avg_reward', avg_reward, step)
tb_logger.log_value('val_ft/opt_gap', avg_opt_gap, step)
baseline.epoch_callback(model, epoch)
print('\nSaving model and state...')
torch.save(
{
'model': get_inner_model(model).state_dict(),
'optimizer': optimizer.state_dict(),
'rng_state': torch.get_rng_state(),
'cuda_rng_state': torch.cuda.get_rng_state_all()
},
os.path.join(args["save_dir"], opts.ft_run_name, 'epoch-{}-ft.pt'.format(epoch))
)
|
the-stack_0_1076 | # Copyright 2019. ThingsBoard
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from bluepy import __path__ as bluepy_path
from pprint import pformat
from bluepy.btle import DefaultDelegate, Peripheral, Scanner, UUID, capitaliseName
from bluepy.btle import BTLEDisconnectError, BTLEManagementError, BTLEGattError
from random import choice
from string import ascii_lowercase
import time
from threading import Thread
from thingsboard_gateway.connectors.connector import Connector, log
from thingsboard_gateway.connectors.ble.bytes_ble_uplink_converter import BytesBLEUplinkConverter
from thingsboard_gateway.tb_utility.tb_utility import TBUtility
class BLEConnector(Connector, Thread):
def __init__(self, gateway, config, connector_type):
super().__init__()
self.__connector_type = connector_type
self.__default_services = [x for x in range(0x1800, 0x183A)]
self.statistics = {'MessagesReceived': 0,
'MessagesSent': 0}
self.__gateway = gateway
self.__config = config
self.setName(self.__config.get("name",
'BLE Connector ' + ''.join(choice(ascii_lowercase) for _ in range(5))))
self._connected = False
self.__stopped = False
self.__previous_scan_time = time.time()-10000
self.__previous_read_time = time.time()-10000
self.__check_interval_seconds = self.__config['checkIntervalSeconds'] if self.__config.get(
'checkIntervalSeconds') is not None else 10
self.__rescan_time = self.__config['rescanIntervalSeconds'] if self.__config.get(
'rescanIntervalSeconds') is not None else 10
self.__scanner = Scanner().withDelegate(ScanDelegate(self))
self.__devices_around = {}
self.__available_converters = []
self.__notify_delegators = {}
self.__fill_interest_devices()
self.daemon = True
def run(self):
while True:
if time.time() - self.__previous_scan_time >= self.__rescan_time != 0:
self.__scan_ble()
self.__previous_scan_time = time.time()
if time.time() - self.__previous_read_time >= self.__check_interval_seconds:
self.__get_services_and_chars()
self.__previous_read_time = time.time()
time.sleep(.1)
if self.__stopped:
log.debug('STOPPED')
break
def close(self):
self.__stopped = True
for device in self.__devices_around:
try:
self.__devices_around[device]['peripheral'].disconnect()
except Exception as e:
log.exception(e)
raise e
def get_name(self):
return self.name
def on_attributes_update(self, content):
log.debug(content)
try:
for device in self.__devices_around:
if self.__devices_around[device]['device_config'].get('name') == content['device']:
for requests in self.__devices_around[device]['device_config']["attributeUpdates"]:
for service in self.__devices_around[device]['services']:
if requests['characteristicUUID'] in self.__devices_around[device]['services'][service]:
characteristic = self.__devices_around[device]['services'][service][requests['characteristicUUID']]['characteristic']
if 'WRITE' in characteristic.propertiesToString():
if content['data'].get(requests['attributeOnThingsBoard']) is not None:
try:
self.__check_and_reconnect(device)
characteristic.write(content['data'][requests['attributeOnThingsBoard']].encode('UTF-8'))
except BTLEDisconnectError:
self.__check_and_reconnect(device)
characteristic.write(content['data'][requests['attributeOnThingsBoard']].encode('UTF-8'))
except Exception as e:
log.exception(e)
else:
log.error('Cannot process attribute update request for device: %s with data: %s and config: %s',
device,
content,
self.__devices_around[device]['device_config']["attributeUpdates"])
except Exception as e:
log.exception(e)
def server_side_rpc_handler(self, content):
log.debug(content)
try:
for device in self.__devices_around:
if self.__devices_around[device]['device_config'].get('name') == content['device']:
for requests in self.__devices_around[device]['device_config']["serverSideRpc"]:
for service in self.__devices_around[device]['services']:
if requests['characteristicUUID'] in self.__devices_around[device]['services'][service]:
characteristic = self.__devices_around[device]['services'][service][requests['characteristicUUID']]['characteristic']
if requests.get('methodProcessing') and requests['methodProcessing'].upper() in characteristic.propertiesToString():
if content['data']['method'] == requests['methodRPC']:
response = None
if requests['methodProcessing'].upper() == 'WRITE':
try:
self.__check_and_reconnect(device)
response = characteristic.write(content['data'].get('params', '').encode('UTF-8'), requests.get('withResponse', False))
except BTLEDisconnectError:
self.__check_and_reconnect(device)
response = characteristic.write(content['data'].get('params', '').encode('UTF-8'), requests.get('withResponse', False))
except Exception as e:
log.exception(e)
elif requests['methodProcessing'].upper() == 'READ':
try:
self.__check_and_reconnect(device)
response = characteristic.read()
except BTLEDisconnectError:
self.__check_and_reconnect(device)
response = characteristic.read()
except Exception as e:
log.exception(e)
elif requests['methodProcessing'].upper() == 'NOTIFY':
try:
self.__check_and_reconnect(device)
delegate = self.__notify_handler(self.__devices_around[device], characteristic.handle)
response = delegate.data
except BTLEDisconnectError:
self.__check_and_reconnect(device)
delegate = self.__notify_handler(self.__devices_around[device], characteristic.handle)
response = delegate.data
except Exception as e:
log.exception(e)
if response is not None:
log.debug('Response from device: %s', response)
if requests['withResponse']:
response = 'success'
self.__gateway.send_rpc_reply(content['device'], content['data']['id'], str(response))
else:
log.error('Method for rpc request - not supported by characteristic or not found in the config.\nDevice: %s with data: %s and config: %s',
device,
content,
self.__devices_around[device]['device_config']["serverSideRpc"])
except Exception as e:
log.exception(e)
def is_connected(self):
return self._connected
def open(self):
self.__stopped = False
self.start()
def device_add(self, device):
for interested_device in self.__devices_around:
if device.addr.upper() == interested_device and self.__devices_around[interested_device].get('scanned_device') is None:
self.__devices_around[interested_device]['scanned_device'] = device
self.__devices_around[interested_device]['is_new_device'] = True
log.debug('Device with address: %s - found.', device.addr.upper())
def __get_services_and_chars(self):
for device in self.__devices_around:
try:
if self.__devices_around.get(device) is not None and self.__devices_around[device].get('scanned_device') is not None:
log.debug('Connecting to device with address: %s', self.__devices_around[device]['scanned_device'].addr.upper())
if self.__devices_around[device].get('peripheral') is None:
peripheral = Peripheral(self.__devices_around[device]['scanned_device'])
self.__devices_around[device]['peripheral'] = peripheral
else:
peripheral = self.__devices_around[device]['peripheral']
peripheral.connect(self.__devices_around[device]['scanned_device'])
services = peripheral.getServices()
for service in services:
if self.__devices_around[device].get('services') is None:
log.debug('Building device %s map, it may take a time, please wait...', device)
self.__devices_around[device]['services'] = {}
service_uuid = str(service.uuid).upper()
if self.__devices_around[device]['services'].get(service_uuid) is None:
self.__devices_around[device]['services'][service_uuid] = {}
try:
characteristics = service.getCharacteristics()
except BTLEDisconnectError:
self.__check_and_reconnect(device)
characteristics = service.getCharacteristics()
if self.__config.get('buildDevicesMap', False):
for characteristic in characteristics:
descriptors = []
try:
self.__check_and_reconnect(device)
try:
descriptors = characteristic.getDescriptors()
except BTLEDisconnectError:
self.__check_and_reconnect(device)
descriptors = characteristic.getDescriptors()
except BTLEGattError as e:
log.debug(e)
except Exception as e:
log.exception(e)
characteristic_uuid = str(characteristic.uuid).upper()
if self.__devices_around[device]['services'][service_uuid].get(characteristic_uuid) is None:
self.__devices_around[device]['services'][service_uuid][characteristic_uuid] = {
'characteristic': characteristic,
'handle': characteristic.handle,
'descriptors': {}}
for descriptor in descriptors:
log.debug(descriptor.handle)
log.debug(str(descriptor.uuid))
log.debug(str(descriptor))
self.__devices_around[device]['services'][service_uuid][characteristic_uuid]['descriptors'][descriptor.handle] = descriptor
except BTLEDisconnectError:
self.__check_and_reconnect(device)
else:
for characteristic in characteristics:
characteristic_uuid = str(characteristic.uuid).upper()
self.__devices_around[device]['services'][service_uuid][characteristic_uuid] = {
'characteristic': characteristic,
'handle': characteristic.handle}
if self.__devices_around[device]['is_new_device']:
log.debug('New device %s - processing.', device)
self.__devices_around[device]['is_new_device'] = False
self.__new_device_processing(device)
for interest_char in self.__devices_around[device]['interest_uuid']:
for section in self.__devices_around[device]['interest_uuid'][interest_char]:
data = self.__service_processing(device, section['section_config'])
converter = section['converter']
converted_data = converter.convert(section, data)
log.debug(data)
log.debug(converted_data)
self.__gateway.send_to_storage(self.get_name(), converted_data)
except BTLEDisconnectError:
log.debug('Cannot connect to device %s', device)
continue
except Exception as e:
log.exception(e)
def __new_device_processing(self, device):
default_services_on_device = [service for service in self.__devices_around[device]['services'].keys() if int(service.split('-')[0], 16) in self.__default_services]
log.debug('Default services found on device %s :%s', device, default_services_on_device)
converter = BytesBLEUplinkConverter(self.__devices_around[device]['device_config'])
converted_data = None
for service in default_services_on_device:
characteristics = [char for char in self.__devices_around[device]['services'][service].keys() if self.__devices_around[device]['services'][service][char]['characteristic'].supportsRead()]
for char in characteristics:
read_config = {'characteristicUUID': char,
'method': 'READ',
}
try:
self.__check_and_reconnect(device)
data = self.__service_processing(device, read_config)
attribute = capitaliseName(UUID(char).getCommonName())
read_config['key'] = attribute
read_config['byteFrom'] = 0
read_config['byteTo'] = -1
converter_config = [{"type": "attributes",
"clean": False,
"section_config": read_config}]
for interest_information in converter_config:
try:
converted_data = converter.convert(interest_information, data)
log.debug(converted_data)
except Exception as e:
log.debug(e)
except Exception as e:
log.debug('Cannot process %s', e)
continue
if converted_data is not None:
self.__gateway.add_device(converted_data["deviceName"], {"connector": self})
self.__gateway.send_to_storage(self.get_name(), converted_data)
def __check_and_reconnect(self, device):
while self.__devices_around[device]['peripheral']._helper is None:
self.__devices_around[device]['peripheral'].connect(self.__devices_around[device]['scanned_device'])
def __notify_handler(self, device, notify_handle, delegate=None):
class NotifyDelegate(DefaultDelegate):
def __init__(self):
DefaultDelegate.__init__(self)
self.device = device
self.data = {}
def handleNotification(self, handle, data):
self.data = data
log.debug('Notification received from device %s handle: %i, data: %s', self.device, handle, data)
if delegate is None:
delegate = NotifyDelegate()
device['peripheral'].withDelegate(delegate)
device['peripheral'].writeCharacteristic(notify_handle, b'\x01\x00', True)
if device['peripheral'].waitForNotifications(1):
log.debug("Data received: %s", delegate.data)
return delegate
def __service_processing(self, device, characteristic_processing_conf):
for service in self.__devices_around[device]['services']:
characteristic_uuid_from_config = characteristic_processing_conf.get('characteristicUUID')
if characteristic_uuid_from_config is None:
log.error('Characteristic not found in config: %s', pformat(characteristic_processing_conf))
return
if self.__devices_around[device]['services'][service].get(characteristic_uuid_from_config) is None:
continue
characteristic = self.__devices_around[device]['services'][service][characteristic_uuid_from_config]['characteristic']
self.__check_and_reconnect(device)
data = None
if characteristic_processing_conf.get('method', '_').upper().split()[0] == "READ":
if characteristic.supportsRead():
self.__check_and_reconnect(device)
data = characteristic.read()
log.debug(data)
else:
log.error('This characteristic doesn\'t support "READ" method.')
if characteristic_processing_conf.get('method', '_').upper().split()[0] == "NOTIFY":
self.__check_and_reconnect(device)
descriptor = characteristic.getDescriptors(forUUID=0x2902)[0]
handle = descriptor.handle
if self.__notify_delegators.get(device) is None:
self.__notify_delegators[device] = {}
if self.__notify_delegators[device].get(handle) is None:
self.__notify_delegators[device][handle] = {'function': self.__notify_handler,
'args': (
self.__devices_around[device],
handle,
self.__notify_delegators[device].get(handle)),
'delegate': None,
}
self.__notify_delegators[device][handle]['delegate'] = self.__notify_delegators[device][handle]['function'](*self.__notify_delegators[device][handle]['args'])
data = self.__notify_delegators[device][handle]['delegate'].data
else:
self.__notify_delegators[device][handle]['args'] = (self.__devices_around[device], handle, self.__notify_delegators[device][handle]['delegate'])
self.__notify_delegators[device][handle]['delegate'] = self.__notify_delegators[device][handle]['function'](*self.__notify_delegators[device][handle]['args'])
data = self.__notify_delegators[device][handle]['delegate'].data
if data is None:
log.error('Cannot process characteristic: %s with config:\n%s', str(characteristic.uuid).upper(), pformat(characteristic_processing_conf))
else:
log.debug('data: %s', data)
return data
def __scan_ble(self):
log.debug("Scanning for devices...")
try:
self.__scanner.scan(self.__config.get('scanTimeSeconds', 5), passive=self.__config.get('passiveScanMode', False))
except BTLEManagementError as e:
log.error('BLE working only with root user.')
log.error('Or you can try this command:\nsudo setcap '
'\'cap_net_raw,cap_net_admin+eip\' %s'
'\n====== Attention! ====== '
'\nCommand above - provided access to ble devices to any user.'
'\n========================', str(bluepy_path[0] + '/bluepy-helper'))
self._connected = False
raise e
except Exception as e:
log.exception(e)
time.sleep(10)
def __fill_interest_devices(self):
if self.__config.get('devices') is None:
log.error('Devices not found in configuration file. BLE Connector stopped.')
self._connected = False
return
for interest_device in self.__config.get('devices'):
keys_in_config = ['attributes', 'telemetry']
if interest_device.get('MACAddress') is not None:
default_converter = BytesBLEUplinkConverter(interest_device)
interest_uuid = {}
for key_type in keys_in_config:
for type_section in interest_device.get(key_type):
if type_section.get("characteristicUUID") is not None:
converter = None
if type_section.get('converter') is not None:
try:
module = TBUtility.check_and_import(self.__connector_type, type_section['converter'])
if module is not None:
log.debug('Custom converter for device %s - found!', interest_device['MACAddress'])
converter = module(interest_device)
else:
log.error("\n\nCannot find extension module for device %s .\nPlease check your configuration.\n", interest_device['MACAddress'])
except Exception as e:
log.exception(e)
else:
converter = default_converter
if converter is not None:
if interest_uuid.get(type_section["characteristicUUID"].upper()) is None:
interest_uuid[type_section["characteristicUUID"].upper()] = [{'section_config': type_section,
'type': key_type,
'converter': converter}]
else:
interest_uuid[type_section["characteristicUUID"].upper()].append({'section_config': type_section,
'type': key_type,
'converter': converter})
else:
log.error("No characteristicUUID found in configuration section for %s:\n%s\n", key_type, pformat(type_section))
if self.__devices_around.get(interest_device['MACAddress'].upper()) is None:
self.__devices_around[interest_device['MACAddress'].upper()] = {}
self.__devices_around[interest_device['MACAddress'].upper()]['device_config'] = interest_device
self.__devices_around[interest_device['MACAddress'].upper()]['interest_uuid'] = interest_uuid
else:
log.error("Device address not found, please check your settings.")
class ScanDelegate(DefaultDelegate):
def __init__(self, ble_connector):
DefaultDelegate.__init__(self)
self.__connector = ble_connector
def handleDiscovery(self, dev, is_new_device, is_new_data):
if is_new_device:
self.__connector.device_add(dev)
|
the-stack_0_1077 | """The error checking chain is a list of status word
(sw1, sw2) error check strategies.
__author__ = "http://www.gemalto.com"
Copyright 2001-2012 gemalto
Author: Jean-Daniel Aussel, mailto:[email protected]
This file is part of pyscard.
pyscard is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2.1 of the License, or
(at your option) any later version.
pyscard is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with pyscard; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
from sys import exc_info
class ErrorCheckingChain(object):
"""The error checking chain is a list of response apdu status word
(sw1, sw2) error check strategies. Each strategy in the chain is
called until an error is detected. A L{smartcard.sw.SWException}
exception is raised when an error is detected. No exception is
raised if no error is detected.
Implementation derived from Bruce Eckel, Thinking in Python. The
L{ErrorCheckingChain} implements the Chain Of Responsibility design
pattern.
"""
def __init__(self, chain, strategy):
"""constructor. Appends a strategy to the L{ErrorCheckingChain}
chain."""
self.strategy = strategy
self.chain = chain
self.chain.append(self)
self.excludes = []
def next(self):
"""Returns next error checking strategy."""
# Where this link is in the chain:
location = self.chain.index(self)
if not self.end():
return self.chain[location + 1]
def addFilterException(self, exClass):
"""Add an exception filter to the error checking chain.
@param exClass: the exception to exclude, e.g.
L{smartcard.sw.SWExceptions.WarningProcessingException} A filtered
exception will not be raised when the sw1,sw2 conditions that
would raise the excption are met.
"""
self.excludes.append(exClass)
if self.end():
return
self.next().addFilterException(exClass)
def end(self):
"""Returns True if this is the end of the error checking
strategy chain."""
return (self.chain.index(self) + 1 >= len(self.chain))
def __call__(self, data, sw1, sw2):
"""Called to test data, sw1 and sw2 for error on the chain."""
try:
self.strategy(data, sw1, sw2)
except tuple(self.excludes) as exc:
# The following addtional filter may look redundant, it isn't.
# It checks that type(exc) is *equal* to any of self.excludes,
# rather than equal-or-subclass to any of self.excludes.
# This maintains backward compatibility with the behaviour of
# pyscard <= 1.6.16.
# if exception is filtered, return
for exception in self.excludes:
if exception == exc_info()[0]:
return
# otherwise reraise exception
raise
# if not done, call next strategy
if self.end():
return
return self.next()(data, sw1, sw2)
|
the-stack_0_1078 | # -*- coding: utf-8 -*-
"""The `Common Sense Knowledge Graph <https://github.com/usc-isi-i2/cskg>`_ dataset.
- GitHub Repository: https://github.com/usc-isi-i2/cskg
- Paper: https://arxiv.org/pdf/2012.11490.pdf
- Data download: https://zenodo.org/record/4331372/files/cskg.tsv.gz
"""
import logging
from .base import SingleTabbedDataset
from ..typing import TorchRandomHint
__all__ = [
'CSKG',
]
URL = 'https://zenodo.org/record/4331372/files/cskg.tsv.gz'
class CSKG(SingleTabbedDataset):
"""The CSKG dataset.
The CSKG combines several knowledge graphs with "common sense" knowledge. It contains
2,087,833 entities, 58 relations, and 5,748,411 triples.
.. [ilievski2020] Ilievski, F., Szekely, P., & Zhang, B. (2020). `CSKG: The CommonSense Knowledge
Graph <http://arxiv.org/abs/2012.11490>`_. *arxiv*, 2012.11490.
"""
def __init__(self, create_inverse_triples: bool = False, random_state: TorchRandomHint = 0, **kwargs):
"""Initialize the `CSKG <https://github.com/usc-isi-i2/cskg>`_ dataset from [ilievski2020]_.
:param create_inverse_triples: Should inverse triples be created? Defaults to false.
:param random_state: The random seed to use in splitting the dataset. Defaults to 0.
:param kwargs: keyword arguments passed to :class:`pykeen.datasets.base.SingleTabbedDataset`.
"""
super().__init__(
url=URL,
create_inverse_triples=create_inverse_triples,
random_state=random_state,
read_csv_kwargs=dict(
usecols=['node1', 'relation', 'node2'],
),
**kwargs,
)
def _main():
ds = CSKG(eager=True)
ds.summarize()
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
_main()
|
the-stack_0_1080 | """tasks_2_2 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "TASKS-2"
admin.site.site_title = "TASKS-2 Admin Portal"
admin.site.index_title = "TASKS-2 Admin"
# swagger
api_info = openapi.Info(
title="TASKS-2 API",
default_version="v1",
description="API documentation for TASKS-2 App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
urlpatterns += [path("", TemplateView.as_view(template_name='index.html'))]
urlpatterns += [re_path(r"^(?:.*)/?$",
TemplateView.as_view(template_name='index.html'))]
|
the-stack_0_1081 | # Copyright 2014 eBay Inc.
#
# Author: Ron Rickard <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
CONF = cfg.CONF
pool_manager_group = cfg.OptGroup(
name='service:pool_manager', title="Configuration for Pool Manager Service"
)
OPTS = [
cfg.IntOpt('workers',
help='Number of Pool Manager worker processes to spawn'),
cfg.IntOpt('threads', default=1000,
help='Number of Pool Manager greenthreads to spawn'),
cfg.StrOpt('pool-id', default='794ccc2c-d751-44fe-b57f-8894c9f5c842',
help='The ID of the pool managed by this instance of the '
'Pool Manager'),
cfg.IntOpt('threshold-percentage', default=100,
help='The percentage of servers requiring a successful update '
'for a zone change to be considered active',
deprecated_for_removal=True,
deprecated_reason='Migrated to designate-worker'),
cfg.IntOpt('poll-timeout', default=30,
help='The time to wait for a response from a server',
deprecated_for_removal=True,
deprecated_reason='Migrated to designate-worker'),
cfg.IntOpt('poll-retry-interval', default=15,
help='The time between retrying to send a request and '
'waiting for a response from a server',
deprecated_for_removal=True,
deprecated_reason='Migrated to designate-worker'),
cfg.IntOpt('poll-max-retries', default=10,
help='The maximum number of times to retry sending a request '
'and wait for a response from a server',
deprecated_for_removal=True,
deprecated_reason='Migrated to designate-worker'),
cfg.IntOpt('poll-delay', default=5,
help='The time to wait before sending the first request '
'to a server',
deprecated_for_removal=True,
deprecated_reason='Migrated to designate-worker'),
cfg.BoolOpt('enable-recovery-timer', default=True,
help='The flag for the recovery timer'),
cfg.IntOpt('periodic-recovery-interval', default=120,
help='The time between recovering from failures'),
cfg.BoolOpt('enable-sync-timer', default=True,
help='The flag for the sync timer'),
cfg.IntOpt('periodic-sync-interval', default=1800,
help='The time between synchronizing the servers with storage'),
cfg.IntOpt('periodic-sync-seconds', default=21600,
help='Zones Updated within last N seconds will be syncd.'
'Use an empty value to sync all zones.'),
cfg.IntOpt('periodic-sync-max-attempts', default=3,
help='Number of attempts to update a zone during sync'),
cfg.IntOpt('periodic-sync-retry-interval', default=30,
help='Interval between zone update attempts during sync'),
cfg.StrOpt('cache-driver', default='memcache',
help='The cache driver to use'),
cfg.StrOpt('pool_manager_topic', default='pool_manager',
help='RPC topic name for pool-manager')
]
def register_dynamic_pool_options():
# Pool Options Registration Pass One
# Find the Current Pool ID
pool_id = CONF['service:pool_manager'].pool_id
# Build the [pool:<id>] config section
pool_group = cfg.OptGroup('pool:%s' % pool_id)
pool_opts = [
cfg.ListOpt('targets', default=[]),
cfg.ListOpt('nameservers', default=[]),
cfg.ListOpt('also_notifies', default=[]),
]
CONF.register_group(pool_group)
CONF.register_opts(pool_opts, group=pool_group)
# Pool Options Registration Pass Two
# Find the Current Pools Target ID's
pool_target_ids = CONF['pool:%s' % pool_id].targets
# Build the [pool_target:<id>] config sections
pool_target_opts = [
cfg.StrOpt('type'),
cfg.ListOpt('masters', default=[]),
cfg.DictOpt('options', default={}, secret=True),
]
for pool_target_id in pool_target_ids:
pool_target_group = cfg.OptGroup('pool_target:%s' % pool_target_id)
CONF.register_group(pool_target_group)
CONF.register_opts(pool_target_opts, group=pool_target_group)
# Find the Current Pools Nameserver ID's
pool_nameserver_ids = CONF['pool:%s' % pool_id].nameservers
# Build the [pool_nameserver:<id>] config sections
pool_nameserver_opts = [
cfg.StrOpt('host'),
cfg.IntOpt('port'),
]
for pool_nameserver_id in pool_nameserver_ids:
pool_nameserver_group = cfg.OptGroup(
'pool_nameserver:%s' % pool_nameserver_id)
CONF.register_group(pool_nameserver_group)
CONF.register_opts(pool_nameserver_opts, group=pool_nameserver_group)
cfg.CONF.register_group(pool_manager_group)
cfg.CONF.register_opts(OPTS, group=pool_manager_group)
def list_opts():
yield pool_manager_group, OPTS
|
the-stack_0_1083 | # 1: +train 2: -train 3: +test 4:-test
# http://pongor.itk.ppke.hu/benchmark/#/Benchmark_data_formats
import numpy as np
import os
os.system('mkdir Index')
mat = np.empty([1357,55], int)
infile = open('./CAST.txt')
lines = infile.read().splitlines()
for i in range(len(lines)):
line = lines[i]
a = line[7:].split()
for j in range(55):
mat[i,j] = int(a[j])
for i in range(55):
print(i+1)
TrainIndex = []
TestIndex = []
TrainLabel = []
TestLabel = []
for j in range(1357):
if mat[j,i] == 1 or mat[j,i] == 2:
TrainIndex.append(j)
if mat[j,i] == 1:
TrainLabel.append(1)
elif mat[j,i] == 2:
TrainLabel.append(-1)
if mat[j,i] == 3 or mat[j,i] == 4:
TestIndex.append(j)
if mat[j,i] == 3:
TestLabel.append(1)
elif mat[j,i] == 4:
TestLabel.append(-1)
TrainIndex = np.asarray(TrainIndex, int)
TestIndex = np.asarray(TestIndex, int)
TrainLabel = np.asarray(TrainLabel, int)
TestLabel = np.asarray(TestLabel, int)
print(len(TrainIndex), np.sum(TrainLabel), len(TestIndex), np.sum(TestLabel), len(TrainIndex)+len(TestIndex))
outfile = open('./Index/TrainIndex'+str(i+1)+'.npy','wb')
np.save(outfile, TrainIndex)
outfile.close()
outfile = open('./Index/TrainLabel'+str(i+1)+'.npy','wb')
np.save(outfile, TrainLabel)
outfile.close()
outfile = open('./Index/TestIndex'+str(i+1)+'.npy','wb')
np.save(outfile, TestIndex)
outfile.close()
outfile = open('./Index/TestLabel'+str(i+1)+'.npy','wb')
np.save(outfile, TestLabel)
outfile.close()
|
the-stack_0_1084 | from datetime import datetime
from datetime import timedelta
import json
import requests
from requests_toolbelt import MultipartEncoder
import traceback
import types
import modules.botconfig as config
import modules.botlog as botlog
agentSession = requests.Session()
agentSession.cert = config.BotCertificate
agentV2Session = requests.Session()
agentV2Session.cert = config.BotCertificate
v2LastAuth: datetime = None
v2SessionToken = None
v2KeyAuthToken = None
def GetSessionToken():
#botlog.LogConsoleInfo(config.SessionAuthEP)
return GetSymphonyAuthToken(config.SessionAuthEP)
def GetKeyManagerToken():
#botlog.LogConsoleInfo(config.KeyManagerEP)
return GetSymphonyAuthToken(config.KeyManagerEP)
def GetSymphonyAuthToken(authEndpoint):
response = SymphonyREST('AUTH', authEndpoint, None)
return response.ResponseData.token
def BuildHeaders(sessionToken, keyAuthToken, contentType="application/json"):
RESTheaders = {
"sessionToken": sessionToken,
"keyManagerToken": keyAuthToken,
"Content-Type": contentType,
"User-Agent": "SymphonyZendeskBot (Alex Nalin - API Engineer - [email protected])"
}
return RESTheaders
def SymphonyReAuth():
global agentSession
sessionToken = GetSessionToken()
keyAuthToken = GetKeyManagerToken()
# RESTHeaders = {"sessionToken": sessionToken, "keyManagerToken": keyAuthToken,
# "Content-Type": "application/json"}
RESTHeaders = BuildHeaders(sessionToken, keyAuthToken)
# Attempting to use requests.Session
agentSession.headers.update(RESTHeaders)
def SymphonyGET(endpoint):
return SymphonyREST('GET', endpoint, None)
def SymphonyPOST(endpoint, body):
return SymphonyREST('POST', endpoint, body)
def SymphonyPOSTV2(endpoint, body):
return SymphonyREST('POSTV2', endpoint, body)
def SymphonyPOSTV2_1(endpoint, body):
return SymphonyREST('POSTV2_1', endpoint, body)
def SymphonyREST(method, endpoint, body):
retVal = SymphonyAgentResponse()
# Allowing for reauth from the async process
if method != 'AUTH' and 'sessionToken' not in agentSession.headers:
SymphonyReAuth()
try:
if method == 'GET':
response = agentSession.get(endpoint)
elif method == 'POST':
response = agentSession.post(endpoint, data=body)
elif method == 'POSTV2':
response = PostV2(endpoint, body)
elif method == 'POSTV2_1':
response = PostV2_1(endpoint, body)
elif method == 'AUTH':
response = agentSession.post(endpoint)
else:
raise MethodNotImplementedException(method + ' is not yet implemented.')
retVal.ResponseText = response.text
retVal.ResponseCode = response.status_code
if response.status_code == 200:
retVal.Success = True
retVal.ParseResponseJSON()
elif response.status_code // 100 == 2: # Any other 200 code, not success but don't throw exception
retVal.Success = True
else:
response.raise_for_status()
except requests.exceptions.HTTPError as httpex:
errorStr = "Symphony REST Exception (http): " + str(httpex)
botlog.LogConsoleInfo("Response Code: " + str(response.status_code))
botlog.LogConsoleInfo("Response Message: " + response.text)
retVal.ErrorMessage = errorStr
stackTrace = 'Stack Trace: ' + ''.join(traceback.format_stack())
botlog.LogSymphonyError(errorStr)
botlog.LogSymphonyError(stackTrace)
except requests.exceptions.RequestException as connex:
errorStr = "Symphony REST Exception (connection - Status Code " + str(response.status_code) + \
"): " + str(connex)
retVal.ErrorMessage = errorStr
stackTrace = 'Stack Trace: ' + ''.join(traceback.format_stack())
botlog.LogSymphonyError(errorStr)
botlog.LogSymphonyError(stackTrace)
except Exception as ex:
errorStr = "Symphony REST Exception (system): " + str(ex)
retVal.ErrorMessage = errorStr
stackTrace = 'Stack Trace: ' + ''.join(traceback.format_stack())
botlog.LogSystemError(errorStr)
botlog.LogSystemError(stackTrace)
finally:
return retVal
# def PostV2(endpoint, body):
# encoder = MultipartEncoder(fields=body)
#
# v2SessionToken = GetSessionToken()
# v2KeyAuthToken = GetKeyManagerToken()
#
# v2Headers = {"sessionToken": v2SessionToken, "keyManagerToken": v2KeyAuthToken,
# "Content-Type": encoder.content_type}
#
# agentV2Session.headers.update(v2Headers)
#
# return agentV2Session.post(endpoint, data=encoder)
def PostV2(endpoint, body):
global v2LastAuth
global v2SessionToken
global v2KeyAuthToken
global agentV2Session
if v2SessionToken is None or v2LastAuth is None or datetime.now() > v2LastAuth + timedelta(days=2):
v2SessionToken = GetSessionToken()
v2KeyAuthToken = GetKeyManagerToken()
v2LastAuth = datetime.now()
encoder = MultipartEncoder(fields=body)
v2Headers = BuildHeaders(v2SessionToken, v2KeyAuthToken, encoder.content_type)
agentV2Session.headers.update(v2Headers)
return agentV2Session.post(endpoint, data=encoder)
# Does not work
# I believe the problem is the Content-Type header, which does not include the boundary
# statement. If I am prepared to build the boundary myself, I might be able to get this
# to work without the requests_toolbelt package
def PostV2_1(endpoint, body):
import io
ph = io.StringIO("")
tempSession = requests.Session()
tempSession.cert = config.BotCertificate
tempSessionToken = GetSessionToken()
tempKeyAuthToken = GetKeyManagerToken()
tempHeaders = {"sessionToken": tempSessionToken, "keyManagerToken": tempKeyAuthToken,
"Content-Type": "multipart/form-data"}
tempSession.headers.update(tempHeaders)
return tempSession.post(endpoint, data=body, files=ph)
class SymphonyAgentResponse:
def __init__(self):
self.Success = False
self.ResponseText = ''
self.ResponseCode = 0
self.ErrorMessage = ''
self.ResponseData = {}
def ParseResponseJSON(self):
self.ResponseData = json.loads(self.ResponseText, object_hook=lambda d: types.SimpleNamespace(**d))
class JSONData:
def __init__(self, jsonStr):
self.__dict__ = json.loads(jsonStr)
class MethodNotImplementedException(Exception):
pass
|
the-stack_0_1086 | # -*- coding: UTF-8 -*-
import unittest
from ytcc.download import Download
from unittest.mock import patch, mock_open, Mock
from test.fixtures.webvtt import FIXTURE_WEBVTT
from colorama import Fore, Style
from ytcc.download import NoCaptionsException
def red(input):
return Fore.RED + input + Style.RESET_ALL
class TestCaptions(unittest.TestCase):
def test_caption(self):
tests = [{'name': '1 video, caption found',
'urls': ['https://www.swag.com/'],
'pattern': 'vision',
'regex': False,
'links': False,
'expected': '[00:00:17.350 --> 00:00:18.752] we have this ' + red('vision') + ' of einstein'},
{'name': '1 video, caption not found',
'urls': ['https://www.swag.com/'],
'pattern': 'iwontbefound',
'regex': False,
'links': False,
'expected': '',
},
{'name': '1 video, caption found more than once',
'urls': ['https://www.swag.com/'],
'pattern': 'light',
'regex': False,
'links': False,
'expected': '[00:00:33.666 --> 00:00:38.138] actor as einstein: what ' + red('light') + ' would i see if i rode on a beam of ' + red('light') + '?',
},
{'name': '1 video, regular expression',
'urls': ['https://www.swag.com/'],
'pattern': 'actor|light',
'regex': True,
'links': False,
'expected': '[00:00:33.666 --> 00:00:38.138] ' + red('actor') + ' as einstein: what ' + red('light') + ' would i see if i rode on a beam of ' + red('light') + '?',
},
{'name': '1 video, 1 link',
'urls': ['https://www.swag.com/'],
'pattern': 'actor|light',
'regex': True,
'links': True,
'expected': '[00:00:33.666 --> 00:00:38.138] ' + red('actor') + ' as einstein: what ' + red('light') + ' would i see if i rode on a beam of ' + red('light') + '? (https://www.swag.com/&t=33s)',
},
]
for test in tests:
download = Download({'urls': test['urls'],
'pattern': test['pattern'],
'e': test['regex'],
'v': False,
'links': test['links']})
m = mock_open(read_data=FIXTURE_WEBVTT)
with patch('ytcc.download.open', m, create=True):
with patch('ytcc.storage.Storage.remove_file', Mock()):
download.get_result = Mock(return_value=0)
actual = download.get_captions()
expected = test['expected']
self.assertEqual(actual, expected)
def test_caption_captions_do_not_exist(self):
test = {
'name': 'captions do not exist',
'urls': ['https://www.swag.com/'],
'pattern': 'my pattern',
'regex': False,
'links': False,
}
download = Download({'urls': test['urls'],
'pattern': test['pattern'],
'e': test['regex'],
'v': False,
'links': test['links']})
m = mock_open(read_data=FIXTURE_WEBVTT)
m.side_effect = FileNotFoundError
with patch('ytcc.download.open', m, create=True):
with patch('ytcc.storage.Storage.remove_file', Mock()):
download.get_result = Mock(return_value=0)
with self.assertRaises(NoCaptionsException):
download.get_captions()
|
the-stack_0_1088 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
from .base import BaseHandler
from ..dao.notice_dao import NoticeDao
logger = logging.getLogger('nebula.api.batch_notice')
class BatchBWListHandler(BaseHandler):
def get(self):
"""
批量查询当前未过期黑白灰名单的值集合接口.
@API
summary: 批量查询当前未过期黑白灰名单的值集合接口.
notes: 批量查询当前未过期黑白灰名单的值集合接口, 返回逗号分隔的黑白灰名单值.
tags:
- platform
parameters:
-
name: strategy
in: query
required: false
type: string
description: filter by strategy name
-
name: scene_type
in: query
required: false
type: string
description: scene type filter statement, ex. login, visit
-
name: check_type
in: query
required: false
type: string
default: IP
description: check Type filter statement, ex. IP, MOBILE
-
name: decision
in: query
required: false
default: reject
type: string
description: decision filter statement, ex. accept, reject
-
name: test
in: query
required: false
type: string
default: false
description: test notice is test or production
produces:
- text/plain
"""
strategy = self.get_argument('strategy', None)
scene_type = self.get_argument('scene_type', None)
check_type = self.get_argument('check_type', 'IP')
decision = self.get_argument('decision', "reject")
test = self.get_argument('test', 'false')
if test == "true":
test = 1
elif test == "false":
test = 0
else:
test = None
result = ''
try:
ND = NoticeDao()
data = ND.get_unexpired_notice_data(strategy=strategy, check_type=check_type, decision=decision, test=test,
scene_type=scene_type)
result = ",".join(data)
except Exception as err:
logger.error(err)
self.set_header('content-type', 'text/plain')
self.write(result)
|
the-stack_0_1089 | """This component provides Switches for Unifi Protect."""
import logging
from homeassistant.components.switch import SwitchEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_ATTRIBUTION
from homeassistant.core import HomeAssistant
from .const import (
ATTR_DEVICE_MODEL,
CONF_IR_OFF,
CONF_IR_ON,
DEFAULT_ATTRIBUTION,
DOMAIN,
TYPE_HIGH_FPS_ON,
TYPE_RECORD_ALWAYS,
TYPE_RECORD_MOTION,
TYPE_RECORD_NEVER,
TYPE_RECORD_OFF,
TYPE_RECORD_SMARTDETECT,
)
from .entity import UnifiProtectEntity
_LOGGER = logging.getLogger(__name__)
_SWITCH_NAME = 0
_SWITCH_ICON = 1
_SWITCH_TYPE = 2
_SWITCH_REQUIRES = 3
SWITCH_TYPES = {
"record_motion": [
"Record Motion",
"video-outline",
"record_motion",
"recording_mode",
],
"record_always": ["Record Always", "video", "record_always", "recording_mode"],
"record_smart": ["Record Smart", "video", "record_smart", "has_smartdetect"],
"ir_mode": ["IR Active", "brightness-4", "ir_mode", "ir_mode"],
"status_light": ["Status Light On", "led-on", "status_light", None],
"hdr_mode": ["HDR Mode", "brightness-7", "hdr_mode", "has_hdr"],
"high_fps": ["High FPS", "video-high-definition", "high_fps", "has_highfps"],
"light_motion": [
"Light when Motion",
"motion-sensor",
"light_motion",
"motion_mode",
],
"light_dark": ["Light when Dark", "motion-sensor", "light_dark", "motion_mode"],
}
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities
) -> None:
"""Set up switches for UniFi Protect integration."""
entry_data = hass.data[DOMAIN][entry.entry_id]
upv_object = entry_data["upv"]
protect_data = entry_data["protect_data"]
server_info = entry_data["server_info"]
if not protect_data.data:
return
ir_on = entry.data[CONF_IR_ON]
if ir_on == "always_on":
ir_on = "on"
ir_off = entry.data[CONF_IR_OFF]
if ir_off == "led_off":
ir_off = "autoFilterOnly"
elif ir_off == "always_off":
ir_off = "off"
switches = []
for switch, switch_type in SWITCH_TYPES.items():
required_field = switch_type[_SWITCH_REQUIRES]
for device_id in protect_data.data:
# Only Add Switches if Device supports it.
if required_field and not protect_data.data[device_id].get(required_field):
continue
switches.append(
UnifiProtectSwitch(
upv_object,
protect_data,
server_info,
device_id,
switch,
ir_on,
ir_off,
)
)
_LOGGER.debug("UNIFIPROTECT SWITCH CREATED: %s", switch)
async_add_entities(switches)
class UnifiProtectSwitch(UnifiProtectEntity, SwitchEntity):
"""A Unifi Protect Switch."""
def __init__(
self, upv_object, protect_data, server_info, device_id, switch, ir_on, ir_off
):
"""Initialize an Unifi Protect Switch."""
super().__init__(upv_object, protect_data, server_info, device_id, switch)
self.upv = upv_object
switch_type = SWITCH_TYPES[switch]
self._name = f"{switch_type[_SWITCH_NAME]} {self._device_data['name']}"
self._icon = f"mdi:{switch_type[_SWITCH_ICON]}"
self._ir_on_cmd = ir_on
self._ir_off_cmd = ir_off
self._switch_type = switch_type[_SWITCH_TYPE]
@property
def name(self):
"""Return the name of the device if any."""
return self._name
@property
def is_on(self):
"""Return true if device is on."""
if self._switch_type == "record_motion":
return self._device_data["recording_mode"] == TYPE_RECORD_MOTION
if self._switch_type == "record_always":
return self._device_data["recording_mode"] == TYPE_RECORD_ALWAYS
if self._switch_type == "record_smart":
return self._device_data["recording_mode"] == TYPE_RECORD_SMARTDETECT
if self._switch_type == "ir_mode":
return self._device_data["ir_mode"] == self._ir_on_cmd
if self._switch_type == "hdr_mode":
return self._device_data["hdr_mode"] is True
if self._switch_type == "high_fps":
return self._device_data["video_mode"] == TYPE_HIGH_FPS_ON
if self._switch_type == "light_motion":
return self._device_data["motion_mode"] == TYPE_RECORD_MOTION
if self._switch_type == "light_dark":
return self._device_data["motion_mode"] == TYPE_RECORD_ALWAYS
return self._device_data["status_light"] is True
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self._icon
@property
def device_state_attributes(self):
"""Return the device state attributes."""
return {
ATTR_ATTRIBUTION: DEFAULT_ATTRIBUTION,
ATTR_DEVICE_MODEL: self._model,
}
async def async_turn_on(self, **kwargs):
"""Turn the device on."""
if self._switch_type == "record_motion":
_LOGGER.debug("Turning on Motion Detection for %s", self._name)
await self.upv.set_camera_recording(self._device_id, TYPE_RECORD_MOTION)
elif self._switch_type == "record_always":
_LOGGER.debug("Turning on Constant Recording")
await self.upv.set_camera_recording(self._device_id, TYPE_RECORD_ALWAYS)
elif self._switch_type == "record_smart":
_LOGGER.debug("Turning on SmartDetect Recording")
await self.upv.set_camera_recording(
self._device_id, TYPE_RECORD_SMARTDETECT
)
elif self._switch_type == "ir_mode":
_LOGGER.debug("Turning on IR")
await self.upv.set_camera_ir(self._device_id, self._ir_on_cmd)
elif self._switch_type == "hdr_mode":
_LOGGER.debug("Turning on HDR mode")
await self.upv.set_camera_hdr_mode(self._device_id, True)
elif self._switch_type == "high_fps":
_LOGGER.debug("Turning on High FPS mode")
await self.upv.set_camera_video_mode_highfps(self._device_id, True)
elif self._switch_type == "light_motion":
_LOGGER.debug("Turning on Light Motion detection")
await self.upv.light_settings(
self._device_id, TYPE_RECORD_MOTION, enable_at="fulltime"
)
elif self._switch_type == "light_dark":
_LOGGER.debug("Turning on Light Motion when Dark")
await self.upv.light_settings(
self._device_id, TYPE_RECORD_ALWAYS, enable_at="dark"
)
else:
_LOGGER.debug("Changing Status Light to On")
await self.upv.set_device_status_light(
self._device_id, True, self._device_type
)
await self.protect_data.async_refresh(force_camera_update=True)
async def async_turn_off(self, **kwargs):
"""Turn the device off."""
if self._switch_type == "ir_mode":
_LOGGER.debug("Turning off IR")
await self.upv.set_camera_ir(self._device_id, self._ir_off_cmd)
elif self._switch_type == "status_light":
_LOGGER.debug("Changing Status Light to Off")
await self.upv.set_device_status_light(
self._device_id, False, self._device_type
)
elif self._switch_type == "hdr_mode":
_LOGGER.debug("Turning off HDR mode")
await self.upv.set_camera_hdr_mode(self._device_id, False)
elif self._switch_type == "high_fps":
_LOGGER.debug("Turning off High FPS mode")
await self.upv.set_camera_video_mode_highfps(self._device_id, False)
elif self._switch_type == "light_motion":
_LOGGER.debug("Turning off Light Motion detection")
await self.upv.light_settings(self._device_id, TYPE_RECORD_OFF)
elif self._switch_type == "light_dark":
_LOGGER.debug("Turning off Light Motion when Dark")
await self.upv.light_settings(self._device_id, TYPE_RECORD_OFF)
else:
_LOGGER.debug("Turning off Recording")
await self.upv.set_camera_recording(self._device_id, TYPE_RECORD_NEVER)
await self.protect_data.async_refresh(force_camera_update=True)
|
the-stack_0_1090 | import urllib
from calcrepo import info
from calcrepo import repo
name = "ticalc"
url = "http://www.ticalc.org/"
enabled = True
class TicalcRepository(repo.CalcRepository):
def formatDownloadUrl(self, url):
return "http://www.ticalc.org" + url
def updateRepoIndexes(self, verbose=False):
self.printd("Reading ticalc.org master index (this will take some time).")
# First read in the text (the only network process involved)
masterIndex = urllib.urlopen('http://www.ticalc.org/pub/master.index').read()
self.printd(" Read in ticalc.org master index.")
# Delete and open new indices
files = self.openIndex(self.index.fileIndex, "files index")
names = self.openIndex(self.index.nameIndex, "names index")
if files is None or names is None:
try:
files.close()
except:
return
# Now, parse the enormous data and write index files
self.printd(" ")
masterIndex = masterIndex[39:]
directory = ""
while len(masterIndex) > 2:
line = masterIndex[:masterIndex.find('\n')]
masterIndex = masterIndex[masterIndex.find('\n') + 1:]
if line == "":
continue
if line[:9] == "Index of ":
dirData = line[9:]
directory = dirData[:dirData.find(" ")]
if verbose:
self.printd(" Caching " + line[9:])
else:
fileData = line[:line.find(" ")]
files.write(directory + '/' + fileData + '\n')
nameData = line[len(fileData)+1:].lstrip()
names.write(nameData + '\n')
# Close the indexes now
files.close()
names.close()
self.printd("Finished updating ticalc.org repo.\n")
def getFileInfo(self, fileUrl, fileName):
#Get the category path for the file
categoryPath = "http://www.ticalc.org/"
splitUrls = fileUrl.split('/')
for splitUrl in splitUrls:
if splitUrl != "" and (not "." in splitUrl):
categoryPath += splitUrl + '/'
#Now open the category page and extract the URL for the file info page
categoryPage = urllib.urlopen(categoryPath, "")
categoryData = categoryPage.read()
categoryPage.close()
index = categoryData.find(fileUrl) - 7
rIndex = categoryData.rfind('A HREF="', 0, index)
infoUrl = categoryData[rIndex + 9:]
infoUrl = "http://www.ticalc.org/" + infoUrl[:infoUrl.find('">')]
#Create a file info object
fileInfo = info.FileInfo(fileUrl, fileName, infoUrl, self.output)
infoPage = urllib.urlopen(infoUrl)
infoText = infoPage.read()
infoPage.close()
#Fill in all the data bits
fileInfo.description = self.getBaseFileData(infoText, "Description")
fileInfo.fileSize = self.getBaseFileData(infoText, "File Size")
fileInfo.fileDate = self.getBaseFileData(infoText, "File Date and Time", 47, 2)
fileInfo.documentation = self.getBaseFileData(infoText, "Documentation Included?")
fileInfo.sourceCode = self.getBaseFileData(infoText, "Source Code")
fileInfo.category = self.getFileCategory(infoText)
fileInfo.author = self.getFileAuthor(infoText)
fileInfo.downloads = self.getNumDownloads(infoText)
fileInfo.repository = self.name
#Print the file info object
fileInfo.printData(self.output)
return fileInfo
def getBaseFileData(self, fileInfo, data, index1 = 47, index2 = 1):
"""Function to initialize the simple data for file info"""
result = fileInfo[fileInfo.find(data):]
result = result[result.find("<FONT ") + index1:]
result = result[:result.find("</FONT>") - index2]
return result
def getFileCategory(self, fileInfo):
"""Function to get the file category for file info"""
category = fileInfo[fileInfo.find("Category"):]
category = category[category.find("<FONT ") + 47:]
category = category[category.find('">') + 2:]
category = category[:category.find("</A></B>") - 0]
return category
def getFileAuthor(self, fileInfo):
"""Function to get the file's author for file info, note that we are pretending that multiple authors do not exist here"""
author = fileInfo[fileInfo.find("Author"):]
author = author[author.find("<FONT ") + 47:]
author = author[author.find('<B>') + 3:]
authormail = author[author.find("mailto:") + 7:]
authormail = authormail[:authormail.find('"')]
author = author[:author.find("</B></A>") - 0]
author = author + " (" + authormail + ")"
return author
def getNumDownloads(self, fileInfo):
"""Function to get the number of times a file has been downloaded"""
downloads = fileInfo[fileInfo.find("FILE INFORMATION"):]
if -1 != fileInfo.find("not included in ranking"):
return "0"
downloads = downloads[:downloads.find(".<BR>")]
downloads = downloads[downloads.find("</A> with ") + len("</A> with "):]
return downloads
def getRepository():
"""Returns the relevant CalcRepository object for this repo file"""
global name, url
return TicalcRepository(name, url)
|
the-stack_0_1092 | import sys
input = sys.stdin.readline
sys.setrecursionlimit(10 ** 7)
n = int(input())
a = [0] + list(map(int, input().split())) + [0]
diff = [0] * (n + 1)
for i in range(n + 1):
diff[i] = abs(a[i+1] - a[i])
ans = sum(diff)
for i in range(1, n + 1):
print(ans - (diff[i-1] + diff[i]) + (abs(a[i+1] - a[i-1])))
|
the-stack_0_1094 | import _plotly_utils.basevalidators
class DtickrangeValidator(_plotly_utils.basevalidators.InfoArrayValidator):
def __init__(
self,
plotly_name="dtickrange",
parent_name="scatter3d.marker.colorbar.tickformatstop",
**kwargs
):
super(DtickrangeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
items=kwargs.pop(
"items",
[
{"valType": "any", "editType": "calc"},
{"valType": "any", "editType": "calc"},
],
),
role=kwargs.pop("role", "info"),
**kwargs
)
|
the-stack_0_1096 | # -*- coding: utf-8 -*-
#########################################################################
# Copyright (C) 2011 Cameron Franc and Marc Masdeu
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# http://www.gnu.org/licenses/
#########################################################################
r"""
Spaces of `p`-adic automorphic forms
Compute with harmonic cocycles and `p`-adic automorphic forms, including
overconvergent `p`-adic automorphic forms.
For a discussion of nearly rigid analytic modular forms and
the rigid analytic Shimura-Maass operator, see [F]_. It is worth also
looking at [FM]_ for information on how these are implemented in this code.
EXAMPLES:
Create a quotient of the Bruhat-Tits tree::
sage: X = BruhatTitsQuotient(13,11)
Declare the corresponding space of harmonic cocycles::
sage: H = X.harmonic_cocycles(2,prec=5)
And the space of `p`-adic automorphic forms::
sage: A = X.padic_automorphic_forms(2,prec=5,overconvergent=True)
Harmonic cocycles, unlike `p`-adic automorphic forms, can be used to compute a basis::
sage: a = H.gen(0)
This can then be lifted to an overconvergent `p`-adic modular form::
sage: A.lift(a) # long time
p-adic automorphic form of cohomological weight 0
REFERENCES:
.. [F] Nearly rigid analytic modular forms and their values at CM points
Cameron Franc
Ph.D. thesis, McGill University, 2011.
"""
from __future__ import print_function
from builtins import zip
from sage.modular.btquotients.btquotient import DoubleCosetReduction
from sage.structure.unique_representation import UniqueRepresentation
from sage.structure.richcmp import op_EQ, op_NE
from sage.matrix.matrix_space import MatrixSpace
from sage.structure.element import ModuleElement
from sage.modules.module import Module
from sage.rings.all import Integer
from sage.matrix.constructor import Matrix, zero_matrix
from sage.rings.all import Qp, QQ, ZZ
from copy import copy
from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing
from sage.rings.laurent_series_ring import LaurentSeriesRing
from sage.modular.hecke.all import (AmbientHeckeModule, HeckeModuleElement)
from sage.rings.infinity import Infinity
import sage.modular.hecke.hecke_operator
from sage.misc.misc import verbose
from sage.rings.real_mpfr import RR
from sage.modular.pollack_stevens.sigma0 import Sigma0ActionAdjuster
from sage.modular.pollack_stevens.distributions import OverconvergentDistributions, Symk
# Need this to be pickleable
class _btquot_adjuster(Sigma0ActionAdjuster):
"""
Callable object that turns matrices into 4-tuples.
Since the modular symbol and harmonic cocycle code use different
conventions for group actions, this function is used to make sure
that actions are correct for harmonic cocycle computations.
EXAMPLES::
sage: from sage.modular.btquotients.pautomorphicform import _btquot_adjuster
sage: adj = _btquot_adjuster()
sage: adj(matrix(ZZ,2,2,[1..4]))
(4, 2, 3, 1)
"""
def __call__(self, g):
"""
Turn matrices into 4-tuples.
INPUT:
- ``g`` - a 2x2 matrix
OUTPUT:
A 4-tuple encoding the entries of ``g``.
EXAMPLES::
sage: from sage.modular.btquotients.pautomorphicform import _btquot_adjuster
sage: adj = _btquot_adjuster()
sage: adj(matrix(ZZ,2,2,[0, 1, 2, 3]))
(3, 1, 2, 0)
"""
a, b, c, d = g.list()
return (d, b, c, a)
def eval_dist_at_powseries(phi, f):
"""
Evaluate a distribution on a powerseries.
A distribution is an element in the dual of the Tate ring. The
elements of coefficient modules of overconvergent modular symbols
and overconvergent `p`-adic automorphic forms give examples of
distributions in Sage.
INPUT:
- ``phi`` - a distribution
- ``f`` - a power series over a ring coercible into a `p`-adic field
OUTPUT:
The value of ``phi`` evaluated at ``f``, which will be an element in the
ring of definition of ``f``
EXAMPLES::
sage: from sage.modular.btquotients.pautomorphicform import eval_dist_at_powseries
sage: R.<X> = PowerSeriesRing(ZZ,10)
sage: f = (1 - 7*X)^(-1)
sage: D = OverconvergentDistributions(0,7,10)
sage: phi = D(list(range(1,11)))
sage: eval_dist_at_powseries(phi,f)
1 + 2*7 + 3*7^2 + 4*7^3 + 5*7^4 + 6*7^5 + 2*7^7 + 3*7^8 + 4*7^9 + O(7^10)
"""
nmoments = phi.parent().precision_cap()
K = f.parent().base_ring()
if K.is_exact():
K = phi.parent().base_ring()
return sum(a * K(phi.moment(i))
for a, i in zip(f.coefficients(), f.exponents())
if i >= 0 and i < nmoments)
class BruhatTitsHarmonicCocycleElement(HeckeModuleElement):
r"""
`\Gamma`-invariant harmonic cocycles on the Bruhat-Tits
tree. `\Gamma`-invariance is necessary so that the cocycle can be
stored in terms of a finite amount of data.
More precisely, given a ``BruhatTitsQuotient`` `T`, harmonic cocycles are stored as
a list of values in some coefficient module (e.g. for weight 2 forms
can take `\CC_p`) indexed by edges of a fundamental domain for `T` in the
Bruhat-Tits tree. Evaluate the cocycle at other edges using Gamma
invariance (although the values may not be equal over an orbit of
edges as the coefficient module action may be nontrivial).
EXAMPLES:
Harmonic cocycles form a vector space, so they can be added and/or
subtracted from each other::
sage: X = BruhatTitsQuotient(5,23)
sage: H = X.harmonic_cocycles(2,prec=10)
sage: v1 = H.basis()[0]; v2 = H.basis()[1] # indirect doctest
sage: v3 = v1+v2
sage: v1 == v3-v2
True
and rescaled::
sage: v4 = 2*v1
sage: v1 == v4 - v1
True
AUTHORS:
- Cameron Franc (2012-02-20)
- Marc Masdeu
"""
def __init__(self, _parent, vec):
"""
Create a harmonic cocycle element.
INPUT:
- ``_parent`` : the parent space of harmonic cocycles.
- ``vec`` : a list of elements in the coefficient module.
EXAMPLES::
sage: X = BruhatTitsQuotient(31,7)
sage: H = X.harmonic_cocycles(2,prec=10)
sage: v = H.basis()[0] # indirect doctest
sage: TestSuite(v).run()
"""
HeckeModuleElement.__init__(self, _parent, None)
self._parent = _parent
assert type(vec) is list
assert all(v.parent() is _parent._U for v in vec)
self._R = _parent._U.base_ring()
self._wt = _parent._k
self._nE = len(_parent._E)
self._F = copy(vec)
def _add_(self, g):
r"""
Add two cocycles componentwise.
INPUT:
- ``g`` - a harmonic cocycle
OUTPUT:
A harmonic cocycle
EXAMPLES::
sage: X = BruhatTitsQuotient(5,23)
sage: H = X.harmonic_cocycles(2,prec=10)
sage: v1 = H.basis()[0]; v2 = H.basis()[1]
sage: v3 = v1+v2 # indirect doctest
sage: v1 == v3-v2
True
"""
return self.parent()(self.element() + g.element())
def _sub_(self, g):
r"""
Compute the difference of two cocycles.
INPUT:
- ``g`` - a harmonic cocycle
OUTPUT:
A harmonic cocycle
EXAMPLES::
sage: X = BruhatTitsQuotient(5,11)
sage: H = X.harmonic_cocycles(2,prec=10)
sage: v1 = H.basis()[0]; v2 = H.basis()[1]
sage: v3 = v1-v2 # indirect doctest
sage: v1 == v3+v2
True
"""
# Should ensure that self and g are modular forms of the same
# weight and on the same curve
return self.parent()(self.element() - g.element())
def _lmul_(self, a):
r"""
Multiply a cocycle by a scalar.
INPUT:
- ``a`` - a ring element
OUTPUT:
A harmonic cocycle
EXAMPLES::
sage: X = BruhatTitsQuotient(3,23)
sage: H = X.harmonic_cocycles(2,prec=10)
sage: v1 = H.basis()[0]
sage: v2 = 2*v1 # indirect doctest
sage: v1 == v2-v1
True
"""
# Should ensure that 'a' is a scalar
return self.parent()(a * self.element())
def _richcmp_(self, other, op):
r"""
General comparison method for ``HarmonicCocycles``
INPUT:
- ``other`` - Another harmonic cocycle
EXAMPLES::
sage: X = BruhatTitsQuotient(11,23)
sage: H = X.harmonic_cocycles(2,prec=10)
sage: v1 = H.basis()[0]
sage: v2 = 3*v1 # indirect doctest
sage: 2*v1 == v2-v1
True
"""
if op not in [op_EQ, op_NE]:
return NotImplemented
b = all(self._F[e] == other._F[e] for e in range(self._nE))
if op == op_EQ:
return b
return not b
def _repr_(self):
r"""
Return a string describing the cocycle.
EXAMPLES::
sage: X = BruhatTitsQuotient(5,13)
sage: H = X.harmonic_cocycles(2,prec=10)
sage: H.basis()[0] # indirect doctest
Harmonic cocycle with values in Sym^0 Q_5^2
"""
return 'Harmonic cocycle with values in %s' % self.parent()._U
def monomial_coefficients(self):
r"""
Void method to comply with pickling.
EXAMPLES::
sage: M = BruhatTitsQuotient(3,5).harmonic_cocycles(2,prec=10)
sage: M.monomial_coefficients()
{}
"""
return {}
def print_values(self):
r"""
Print the values of the cocycle on all of the edges.
EXAMPLES::
sage: X = BruhatTitsQuotient(5,23)
sage: H = X.harmonic_cocycles(2,prec=10)
sage: H.basis()[0].print_values()
0 |1 + O(5^10)
1 |0
2 |0
3 |4 + 4*5 + 4*5^2 + 4*5^3 + 4*5^4 + 4*5^5 + 4*5^6 + 4*5^7 + 4*5^8 + 4*5^9 + O(5^10)
4 |0
5 |0
6 |0
7 |0
8 |0
9 |0
10 |0
11 |0
"""
tmp = ''
for e in range(self._nE):
tmp += str(e) + '\t|'+ str(self._F[e]) + '\n'
print (tmp[:-1])
def valuation(self):
r"""
Return the valuation of the cocycle, defined as the
minimum of the values it takes on a set of representatives.
OUTPUT:
An integer.
EXAMPLES::
sage: X = BruhatTitsQuotient(3,17)
sage: H = X.harmonic_cocycles(2,prec=10)
sage: b1 = H.basis()[0]
sage: b2 = 3*b1
sage: b1.valuation()
0
sage: b2.valuation()
1
sage: H(0).valuation()
+Infinity
"""
if self == 0:
return Infinity
else:
return min(self._F[e].valuation() for e in range(self._nE))
def _compute_element(self):
r"""
Express a harmonic cocycle in a coordinate vector.
OUTPUT:
A coordinate vector encoding ``self`` in terms of the ambient
basis in ``self.parent``
EXAMPLES::
sage: X = BruhatTitsQuotient(3,17)
sage: H = X.harmonic_cocycles(2,prec=10)
sage: H.basis()[0]._compute_element()
(1 + O(3^9), O(3^9), 0)
sage: H.basis()[1]._compute_element()
(0, 1 + O(3^9), 0)
sage: H.basis()[2]._compute_element()
(0, O(3^9), 1 + O(3^10))
"""
R = self._R
A = self.parent().basis_matrix().transpose()
B = Matrix(R, self._nE * (self.parent()._k - 1), 1,
[self._F[e].moment(ii) for e in range(self._nE)
for ii in range(self.parent()._k - 1)])
try:
res = (A.solve_right(B)).transpose()
except ValueError:
rest = (A.transpose() * A).solve_right(A.transpose() * B)
err = A * rest - B
if err != 0:
try:
if hasattr(err.parent().base_ring().an_element(),
'valuation'):
minval = min([o.valuation() for o in err.list()
if o != 0])
else:
minval = sum([RR(o.norm() ** 2) for o in err.list()])
verbose('Error = %s' % minval)
except AttributeError:
verbose('Warning: something did not work in the '
'computation')
res = rest.transpose()
return self.parent().free_module()(res.row(0))
#In BruhatTitsHarmonicCocycle
def evaluate(self, e1):
r"""
Evaluate a harmonic cocycle on an edge of the Bruhat-Tits tree.
INPUT:
- ``e1`` - a matrix corresponding to an edge of the
Bruhat-Tits tree
OUTPUT:
- An element of the coefficient module of the cocycle which
describes the value of the cocycle on ``e1``
EXAMPLES::
sage: X = BruhatTitsQuotient(5,17)
sage: e0 = X.get_edge_list()[0]
sage: e1 = X.get_edge_list()[1]
sage: H = X.harmonic_cocycles(2,prec=10)
sage: b = H.basis()[0]
sage: b.evaluate(e0.rep)
1 + O(5^10)
sage: b.evaluate(e1.rep)
4 + 4*5 + 4*5^2 + 4*5^3 + 4*5^4 + 4*5^5 + 4*5^6 + 4*5^7 + 4*5^8 + 4*5^9 + O(5^10)
"""
X = self.parent()._X
p = X._p
u = DoubleCosetReduction(X, e1)
if u.label < self._nE:
val = self._F[u.label]
else:
val = -self._F[u.label - self._nE]
return u.igamma(self.parent().embed_quaternion, scale=p ** (-u.power)) * val
#In BruhatTitsHarmonicCocycle
def riemann_sum(self, f, center=1, level=0, E=None):
r"""
Evaluate the integral of the function ``f`` with respect
to the measure determined by ``self`` over `\mathbf{P}^1(\QQ_p)`.
INPUT:
- ``f`` - a function on `\mathbf{P}^1(\QQ_p)`.
- ``center`` - An integer (default = 1). Center of integration.
- ``level`` - An integer (default = 0). Determines the size of
the covering when computing the Riemann sum. Runtime is
exponential in the level.
- ``E`` - A list of edges (default = None). They should describe
a covering of `\mathbf{P}^1(\QQ_p)`.
OUTPUT:
A `p`-adic number.
EXAMPLES::
sage: X = BruhatTitsQuotient(5,7)
sage: H = X.harmonic_cocycles(2,prec=10)
sage: b = H.basis()[0]
sage: R.<z> = PolynomialRing(QQ,1)
sage: f = z^2
Note that `f` has a pole at infinity, so that the result will
be meaningless::
sage: b.riemann_sum(f,level=0)
1 + 5 + 2*5^3 + 4*5^4 + 2*5^5 + 3*5^6 + 3*5^7 + 2*5^8 + 4*5^9 + O(5^10)
"""
R1 = LaurentSeriesRing(f.base_ring(), 'r1')
if E is None:
E = self.parent()._X._BT.get_balls(center, level)
else:
E = self.parent()._X._BT.subdivide(E, level)
value = 0
ii = 0
for e in E:
ii += 1
expansion = ((R1([e[1, 1], e[1, 0]]) ** (self.parent()._k - 2) * e.determinant() ** (-(self.parent()._k - 2) / 2)) * f(R1([e[0, 1], e[0, 0]]) / R1([e[1, 1], e[1, 0]]))).truncate(self.parent()._k - 1)
dist = self.parent()._Sigma0(e.inverse(), check=False) * self.evaluate(e)
value += eval_dist_at_powseries(dist, expansion)
return value
def modular_form(self, z=None, level=0):
r"""
Integrate Teitelbaum's `p`-adic Poisson kernel against
the measure corresponding to ``self`` to evaluate the associated
modular form at ``z``.
If ``z`` = None, a function is returned that encodes the modular form.
.. NOTE::
This function uses the integration method of Riemann
summation and is incredibly slow! It should only be used for
testing and bug-finding. Overconvergent methods are quicker.
INPUT:
- ``z`` - an element in the quadratic unramified extension of
`\QQ_p` that is not contained in `\QQ_p` (default = None).
- ``level`` - an integer. How fine of a mesh should the Riemann
sum use.
OUTPUT:
An element of the quadratic unramified extension of `\QQ_p`.
EXAMPLES::
sage: X = BruhatTitsQuotient(3,23)
sage: H = X.harmonic_cocycles(2,prec = 8)
sage: b = H.basis()[0]
sage: R.<a> = Qq(9,prec=10)
sage: x1 = b.modular_form(a,level = 0); x1
a + (2*a + 1)*3 + (a + 1)*3^2 + (a + 1)*3^3 + 3^4 + (a + 2)*3^5 + O(3^7)
sage: x2 = b.modular_form(a,level = 1); x2
a + (a + 2)*3 + (2*a + 1)*3^3 + (2*a + 1)*3^4 + 3^5 + (a + 2)*3^6 + O(3^7)
sage: x3 = b.modular_form(a,level = 2); x3
a + (a + 2)*3 + (2*a + 2)*3^2 + 2*a*3^4 + (a + 1)*3^5 + 3^6 + O(3^7)
sage: x4 = b.modular_form(a,level = 3);x4
a + (a + 2)*3 + (2*a + 2)*3^2 + (2*a + 2)*3^3 + 2*a*3^5 + a*3^6 + O(3^7)
sage: (x4-x3).valuation()
3
TESTS:
Check that :trac:`22634` is fixed::
sage: X = BruhatTitsQuotient(7,2)
sage: H = X.harmonic_cocycles(4,20)
sage: f0, g0 = H.basis()
sage: A = X.padic_automorphic_forms(4,20,overconvergent=True)
sage: f = A.lift(f0).modular_form(method='moments')
sage: T.<x> = Qq(7^2,20)
sage: a,b,c,d = X.embed_quaternion(X.get_units_of_order()[1]).change_ring(Qp(7,20)).list()
sage: (c*x + d)^4 * f(x) == f((a*x + b)/(c*x + d))
True
sage: g = A.lift(g0).modular_form(method='moments')
sage: (c*x + d)^4 * f(x) == f((a*x + b)/(c*x + d))
True
"""
return self.derivative(z, level, order=0)
# In BruhatTitsHarmonicCocycle
def derivative(self, z=None, level=0, order=1):
r"""
Integrate Teitelbaum's `p`-adic Poisson kernel against
the measure corresponding to ``self`` to evaluate the rigid
analytic Shimura-Maass derivatives of the associated modular
form at `z`.
If ``z = None``, a function is returned that encodes the
derivative of the modular form.
.. NOTE::
This function uses the integration method of Riemann
summation and is incredibly slow! It should only be used for
testing and bug-finding. Overconvergent methods are quicker.
INPUT:
- ``z`` - an element in the quadratic unramified extension of
`\QQ_p` that is not contained in `\QQ_p` (default = None). If ``z
= None`` then a function encoding the derivative is returned.
- ``level`` - an integer. How fine of a mesh should the Riemann
sum use.
- ``order`` - an integer. How many derivatives to take.
OUTPUT:
An element of the quadratic unramified extension of `\QQ_p`, or
a function encoding the derivative.
EXAMPLES::
sage: X = BruhatTitsQuotient(3,23)
sage: H = X.harmonic_cocycles(2,prec=5)
sage: b = H.basis()[0]
sage: R.<a> = Qq(9,prec=10)
sage: b.modular_form(a,level=0) == b.derivative(a,level=0,order=0)
True
sage: b.derivative(a,level=1,order=1)
(2*a + 2)*3 + (a + 2)*3^2 + 2*a*3^3 + O(3^4)
sage: b.derivative(a,level=2,order=1)
(2*a + 2)*3 + 2*a*3^2 + 3^3 + O(3^4)
"""
def F(z):
R = PolynomialRing(z.parent(), 'x,y').fraction_field()
Rx = PolynomialRing(z.parent(), 'x1').fraction_field()
x1 = Rx.gen()
subst = R.hom([x1, z], codomain=Rx)
x, y = R.gens()
center = self.parent()._X._BT.find_containing_affinoid(z)
zbar = z.trace() - z
f = R(1) / (x - y)
k = self.parent()._k
V = [f]
for ii in range(order):
V = [v.derivative(y) for v in V] + [k / (y - zbar) * v
for v in V]
k += 2
return sum([self.riemann_sum(subst(v), center, level) for v in V])
if z is None:
return F
else:
return F(z)
class BruhatTitsHarmonicCocycles(AmbientHeckeModule, UniqueRepresentation):
r"""
Ensure unique representation
EXAMPLES::
sage: X = BruhatTitsQuotient(3,5)
sage: M1 = X.harmonic_cocycles( 2, prec = 10)
sage: M2 = X.harmonic_cocycles( 2, 10)
sage: M1 is M2
True
"""
Element = BruhatTitsHarmonicCocycleElement
@staticmethod
def __classcall__(cls, X, k, prec=None, basis_matrix=None, base_field=None):
r"""
Represent a space of Gamma invariant harmonic
cocycles valued in a coefficient module.
INPUT:
- ``X`` - A BruhatTitsQuotient object
- ``k`` - integer - The weight. It must be even.
- ``prec`` - integer (default: None). If specified, the
precision for the coefficient module
- ``basis_matrix`` - a matrix (default: None).
- ``base_field`` - a ring (default: None)
EXAMPLES::
sage: X = BruhatTitsQuotient(3,23)
sage: H = X.harmonic_cocycles(2,prec = 5)
sage: H.dimension()
3
sage: X.genus()
3
Higher even weights are implemented::
sage: H = X.harmonic_cocycles(8, prec = 10)
sage: H.dimension()
26
AUTHORS:
- Cameron Franc (2012-02-20)
- Marc Masdeu
"""
return super(BruhatTitsHarmonicCocycles, cls).__classcall__(cls, X, k, prec,
basis_matrix,
base_field)
def __init__(self, X, k, prec=None, basis_matrix=None, base_field=None):
"""
Compute the space of harmonic cocycles.
EXAMPLES::
sage: X = BruhatTitsQuotient(3,37)
sage: H = X.harmonic_cocycles(4,prec=10)
sage: TestSuite(H).run()
"""
self._k = k
self._X = X
self._E = self._X.get_edge_list()
self._V = self._X.get_vertex_list()
if base_field is not None and not base_field.is_exact():
prec = base_field.precision_cap()
if prec is None:
self._prec = None # Be careful!
if base_field is None:
try:
self._R = X.get_splitting_field()
except AttributeError:
raise ValueError("It looks like you are not using Magma as"
" backend...and still we don't know how "
"to compute splittings in that case!")
else:
pol = X.get_splitting_field().defining_polynomial().factor()[0][0]
self._R = base_field.extension(pol, pol.variable_name()).absolute_field(name='r')
else:
self._prec = prec
if base_field is None:
self._R = Qp(self._X._p, prec=prec)
else:
self._R = base_field
self._U = Symk(self._k - 2, base=self._R, act_on_left=True,
adjuster=_btquot_adjuster(),
dettwist=-ZZ((self._k - 2) / 2), act_padic=True)
if basis_matrix is None:
self.__rank = self._X.dimension_harmonic_cocycles(self._k)
else:
self.__rank = basis_matrix.nrows()
if basis_matrix is not None:
self.__matrix = basis_matrix
self.__matrix.set_immutable()
assert self.__rank == self.__matrix.nrows()
self._Sigma0 = self._U._act._Sigma0
AmbientHeckeModule.__init__(self, self._R, self.__rank,
self._X.prime() * self._X.Nplus() * self._X.Nminus(), weight=self._k)
self._populate_coercion_lists_()
def monomial_coefficients(self):
r"""
Void method to comply with pickling.
EXAMPLES::
sage: M = BruhatTitsQuotient(3,5).harmonic_cocycles(2,prec=10)
sage: M.monomial_coefficients()
{}
"""
return {}
def base_extend(self, base_ring):
r"""
Extend the base ring of the coefficient module.
INPUT:
- ``base_ring`` - a ring that has a coerce map from the
current base ring
OUTPUT:
A new space of HarmonicCocycles with the base extended.
EXAMPLES::
sage: X = BruhatTitsQuotient(3,19)
sage: H = X.harmonic_cocycles(2,10)
sage: H.base_ring()
3-adic Field with capped relative precision 10
sage: H1 = H.base_extend(Qp(3,prec=15))
sage: H1.base_ring()
3-adic Field with capped relative precision 15
"""
if not base_ring.has_coerce_map_from(self.base_ring()):
raise ValueError("No coercion defined")
else:
return self.change_ring(base_ring)
def change_ring(self, new_base_ring):
r"""
Change the base ring of the coefficient module.
INPUT:
- ``new_base_ring`` - a ring that has a coerce map from the
current base ring
OUTPUT:
New space of HarmonicCocycles with different base ring
EXAMPLES::
sage: X = BruhatTitsQuotient(5,17)
sage: H = X.harmonic_cocycles(2,10)
sage: H.base_ring()
5-adic Field with capped relative precision 10
sage: H1 = H.base_extend(Qp(5,prec=15)) # indirect doctest
sage: H1.base_ring()
5-adic Field with capped relative precision 15
"""
if not new_base_ring.has_coerce_map_from(self.base_ring()):
raise ValueError("No coercion defined")
basis_matrix = self.basis_matrix().change_ring(new_base_ring)
basis_matrix.set_immutable()
return self.__class__(self._X, self._k, prec=None,
basis_matrix=basis_matrix,
base_field=new_base_ring)
def rank(self):
r"""
Return the rank (dimension) of ``self``.
OUTPUT:
An integer.
EXAMPLES::
sage: X = BruhatTitsQuotient(7,11)
sage: H = X.harmonic_cocycles(2,prec = 10)
sage: X.genus() == H.rank()
True
sage: H1 = X.harmonic_cocycles(4,prec = 10)
sage: H1.rank()
16
"""
return self.__rank
def submodule(self, v, check=False):
r"""
Return the submodule of ``self`` spanned by ``v``.
INPUT:
- ``v`` - Submodule of self.free_module().
- ``check`` - Boolean (default = False).
OUTPUT:
Subspace of harmonic cocycles.
EXAMPLES::
sage: X = BruhatTitsQuotient(3,17)
sage: H = X.harmonic_cocycles(2,prec=10)
sage: H.rank()
3
sage: v = H.gen(0)
sage: N = H.free_module().span([v.element()])
sage: H1 = H.submodule(N)
Traceback (most recent call last):
...
NotImplementedError
"""
# return BruhatTitsHarmonicCocyclesSubmodule(self, v)
raise NotImplementedError
def is_simple(self):
r"""
Whether ``self`` is irreducible.
OUTPUT:
Boolean. True if and only if ``self`` is irreducible.
EXAMPLES::
sage: X = BruhatTitsQuotient(3,29)
sage: H = X.harmonic_cocycles(4,prec =10)
sage: H.rank()
14
sage: H.is_simple()
False
sage: X = BruhatTitsQuotient(7,2)
sage: H = X.harmonic_cocycles(2,prec=10)
sage: H.rank()
1
sage: H.is_simple()
True
"""
return self.rank() == 1
def _repr_(self):
r"""
This returns the representation of self as a string.
EXAMPLES::
sage: X = BruhatTitsQuotient(5,23)
sage: H = X.harmonic_cocycles(2,prec=10)
sage: H
Space of harmonic cocycles of weight 2 on Quotient of the Bruhat
Tits tree of GL_2(QQ_5) with discriminant 23 and level 1
"""
return 'Space of harmonic cocycles of weight %s on %s' % (self._k,
self._X)
def _latex_(self):
r"""
A LaTeX representation of ``self``.
EXAMPLES::
sage: X = BruhatTitsQuotient(5,23)
sage: H = X.harmonic_cocycles(2,prec=10)
sage: latex(H) # indirect doctest
\text{Space of harmonic cocycles of weight } 2 \text{ on } X(5 \cdot 23,1)\otimes_{\mathbb{Z}} \mathbb{F}_{5}
"""
s = '\\text{Space of harmonic cocycles of weight } '
s += (self._k)._latex_() + ' \\text{ on } ' + self._X._latex_()
return s
def _an_element_(self):
r"""
Return an element of the ambient space
OUTPUT:
A harmonic cocycle in self.
EXAMPLES:
sage: X = BruhatTitsQuotient(5,23)
sage: H = X.harmonic_cocycles(2,prec=10)
sage: H.an_element() # indirect doctest
Harmonic cocycle with values in Sym^0 Q_5^2
"""
return self.basis()[0]
def _coerce_map_from_(self, S):
r"""
Can coerce from other BruhatTitsHarmonicCocycles or from
pAdicAutomorphicForms, also from 0
OUTPUT:
Boolean. True if and only if ``self`` is a space of
BruhatTitsHarmonicCocycles or pAdicAutomorphicForms.
EXAMPLES::
sage: X = BruhatTitsQuotient(3,17)
sage: H = X.harmonic_cocycles(2,prec=10)
sage: A = X.padic_automorphic_forms(2,prec=10)
sage: A(H.basis()[0]) # indirect doctest
p-adic automorphic form of cohomological weight 0
"""
if isinstance(S, (BruhatTitsHarmonicCocycles, pAdicAutomorphicForms)):
if S._k != self._k:
return False
if S._X != self._X:
return False
return True
return False
def __eq__(self, other):
r"""
Test whether two BruhatTitsHarmonicCocycle spaces are equal.
INPUT:
- ``other`` -- a BruhatTitsHarmonicCocycles class.
OUTPUT:
A boolean value
EXAMPLES::
sage: X = BruhatTitsQuotient(5,7)
sage: H1 = X.harmonic_cocycles(2,prec=10)
sage: H2 = X.harmonic_cocycles(2,prec=10)
sage: H1 == H2
True
"""
if not isinstance(other, BruhatTitsHarmonicCocycles):
return False
return (self.base_ring() == other.base_ring() and
self._X == other._X and
self._k == other._k)
def __ne__(self, other):
r"""
Test whether two BruhatTitsHarmonicCocycle spaces are not equal.
INPUT:
- ``other`` -- a BruhatTitsHarmonicCocycles class.
OUTPUT:
A boolean value
EXAMPLES::
sage: X = BruhatTitsQuotient(5,7)
sage: H1 = X.harmonic_cocycles(2,prec=10)
sage: H2 = X.harmonic_cocycles(2,prec=10)
sage: H1 != H2
False
"""
return not self.__eq__(other)
def _element_constructor_(self, x):
r"""
Constructor for harmonic cocycles.
INPUT:
- ``x`` - an object coercible into a harmonic cocycle.
OUTPUT:
A harmonic cocycle.
EXAMPLES::
sage: X = BruhatTitsQuotient(3,17)
sage: H = X.harmonic_cocycles(2,prec=10)
sage: H(H.an_element()) # indirect doctest
Harmonic cocycle with values in Sym^0 Q_3^2
sage: H(0)
Harmonic cocycle with values in Sym^0 Q_3^2
"""
if type(x) is sage.modules.free_module_element.FreeModuleElement_generic_dense:
vmat = MatrixSpace(self._R, 1, self.dimension())(x)
tmp = (vmat * self.ambient_module().basis_matrix()).row(0)
vec = [self._U(tmp[e * (self._k - 1):(e + 1) * (self._k - 1)])
for e in range(len(self._E))]
return self.element_class(self, vec)
if type(x) is list:
return self.element_class(self, [self._U(o) for o in x])
if hasattr(x, 'parent'):
parent = x.parent()
if isinstance(parent, BruhatTitsHarmonicCocycles):
return self.element_class(self, [self._U(o) for o in x._F])
elif isinstance(parent, pAdicAutomorphicForms):
tmp = [self._E[ii].rep * self._U(x._F[ii]) for ii in range(self._nE)]
return self.element_class(self, tmp)
if x == 0:
tmp = [self._U([0] * (self.weight() - 1))] * self._X._num_edges
return self.element_class(self, tmp)
else:
raise TypeError
def free_module(self):
r"""
Return the underlying free module
OUTPUT:
A free module.
EXAMPLES::
sage: X = BruhatTitsQuotient(3,7)
sage: H = X.harmonic_cocycles(2,prec=10)
sage: H.free_module()
Vector space of dimension 1 over 3-adic Field with
capped relative precision 10
"""
try:
return self.__free_module
except AttributeError:
pass
V = self.base_ring() ** self.dimension()
self.__free_module = V
return V
def character(self):
r"""
The trivial character.
OUTPUT:
The identity map.
EXAMPLES::
sage: X = BruhatTitsQuotient(3,7)
sage: H = X.harmonic_cocycles(2,prec = 10)
sage: f = H.character()
sage: f(1)
1
sage: f(2)
2
"""
return lambda x: x
def embed_quaternion(self, g, scale=1, exact=None):
r"""
Embed the quaternion element ``g`` into the matrix algebra.
INPUT:
- ``g`` - A quaternion, expressed as a 4x1 matrix.
OUTPUT:
A 2x2 matrix with `p`-adic entries.
EXAMPLES::
sage: X = BruhatTitsQuotient(7,2)
sage: q = X.get_stabilizers()[0][1][0]
sage: H = X.harmonic_cocycles(2,prec = 5)
sage: Hmat = H.embed_quaternion(q)
sage: Hmat.matrix().trace() == X._conv(q).reduced_trace() and Hmat.matrix().determinant() == 1
True
"""
if exact is None:
exact = self._R.is_exact()
return self._Sigma0(scale * self._X.embed_quaternion(g, exact=exact,
prec=self._prec),
check=False)
def basis_matrix(self):
r"""
Return a basis of ``self`` in matrix form.
If the coefficient module `M` is of finite rank then the space
of Gamma invariant `M` valued harmonic cocycles can be
represented as a subspace of the finite rank space of all
functions from the finitely many edges in the corresponding
BruhatTitsQuotient into `M`. This function computes this
representation of the space of cocycles.
OUTPUT:
- A basis matrix describing the cocycles in the spaced of all
`M` valued Gamma invariant functions on the tree.
EXAMPLES::
sage: X = BruhatTitsQuotient(5,3)
sage: M = X.harmonic_cocycles(4,prec = 20)
sage: B = M.basis() # indirect doctest
sage: len(B) == X.dimension_harmonic_cocycles(4)
True
AUTHORS:
- Cameron Franc (2012-02-20)
- Marc Masdeu (2012-02-20)
"""
try:
return self.__matrix
except AttributeError:
pass
nV = len(self._V)
nE = len(self._E)
stab_conds = []
S = self._X.get_edge_stabilizers()
p = self._X._p
d = self._k - 1
for e in self._E:
try:
g = filter(lambda g: g[2], S[e.label])[0]
C = self._U.acting_matrix(self._Sigma0(self.embed_quaternion(g[0])), d).transpose() # Warning - Need to allow the check = True
C -= self._U.acting_matrix(self._Sigma0(Matrix(QQ, 2, 2, p ** g[1])), d).transpose() # Warning - Need to allow the check = True
stab_conds.append([e.label, C])
except IndexError:
pass
n_stab_conds = len(stab_conds)
self._M = Matrix(self._R, (nV + n_stab_conds) * d, nE * d, 0,
sparse=True)
for v in self._V:
for e in filter(lambda e: e.parity == 0, v.leaving_edges):
C = sum([self._U.acting_matrix(self.embed_quaternion(x[0]), d)
for x in e.links],
Matrix(self._R, d, d, 0)).transpose()
self._M.set_block(v.label * d, e.label * d, C)
for e in filter(lambda e: e.parity == 0, v.entering_edges):
C = sum([self._U.acting_matrix(self.embed_quaternion(x[0]), d)
for x in e.opposite.links],
Matrix(self._R, d, d, 0)).transpose()
self._M.set_block(v.label * d, e.opposite.label * d, C)
for kk in range(n_stab_conds):
v = stab_conds[kk]
self._M.set_block((nV + kk) * d, v[0] * d, v[1])
x1 = self._M.right_kernel().matrix()
if x1.nrows() != self.rank():
raise RuntimeError('The computed dimension does not agree with '
'the expectation. Consider increasing '
'precision!')
K = [c.list() for c in x1.rows()]
if not self._R.is_exact():
for ii in range(len(K)):
s = min([t.valuation() for t in K[ii]])
for jj in range(len(K[ii])):
K[ii][jj] = (p ** (-s)) * K[ii][jj]
self.__matrix = Matrix(self._R, len(K), nE * d, K)
self.__matrix.set_immutable()
return self.__matrix
def __apply_atkin_lehner(self, q, f):
r"""
Apply an Atkin-Lehner involution to a harmonic cocycle
INPUT:
- ``q`` - an integer dividing the full level p*Nminus*Nplus
- ``f`` - a harmonic cocycle
OUTPUT:
- The harmonic cocycle obtained by hitting ``f`` with the
Atkin-Lehner at ``q``
EXAMPLES::
sage: X = BruhatTitsQuotient(5,17)
sage: H = X.harmonic_cocycles(2,prec = 10)
sage: A = H.atkin_lehner_operator(5).matrix() # indirect doctest
sage: A**2 == 1
True
"""
Data = self._X._get_atkin_lehner_data(q)
p = self._X._p
tmp = [self._U(0) for jj in range(len(self._E))]
d1 = Data[1]
mga = self.embed_quaternion(Data[0])
nE = len(self._E)
for jj in range(nE):
t = d1[jj]
if t.label < nE:
tmp[jj] += mga * t.igamma(self.embed_quaternion, scale=p ** -t.power) * f._F[t.label]
else:
tmp[jj] += mga * t.igamma(self.embed_quaternion, scale=p ** -t.power) * (-f._F[t.label - nE])
return self(tmp)
def __apply_hecke_operator(self, l, f):
r"""
This function applies a Hecke operator to a harmonic cocycle.
INPUT:
- ``l`` - an integer
- ``f`` - a harmonic cocycle
OUTPUT:
- A harmonic cocycle which is the result of applying the lth
Hecke operator to ``f``
EXAMPLES::
sage: X = BruhatTitsQuotient(5,17)
sage: H = X.harmonic_cocycles(2,prec=50)
sage: A = H.hecke_operator(7).matrix() # indirect doctest
sage: [o.rational_reconstruction() for o in A.charpoly().coefficients()]
[-8, -12, 12, 20, 8, 1]
"""
HeckeData, alpha = self._X._get_hecke_data(l)
if self.level() % l == 0:
factor = QQ(l ** (Integer((self._k - 2) / 2)) / (l + 1))
else:
factor = QQ(l ** (Integer((self._k - 2) / 2)))
p = self._X._p
alphamat = self.embed_quaternion(alpha)
tmp = [self._U(0) for jj in range(len(self._E))]
for d0, d1 in HeckeData:
mga = self.embed_quaternion(d0) * alphamat
nE = len(self._E)
for jj in range(nE):
t = d1[jj]
if t.label < nE:
tmp[jj] += mga * t.igamma(self.embed_quaternion, scale=p ** -t.power) * f._F[t.label]
else:
tmp[jj] += mga * t.igamma(self.embed_quaternion, scale=p ** -t.power) * (-f._F[t.label - nE])
return self([factor * x for x in tmp])
def _compute_atkin_lehner_matrix(self, d):
r"""
When the underlying coefficient module is finite, this
function computes the matrix of an Atkin-Lehner involution in
the basis provided by the function basis_matrix
INPUT:
- ``d`` - an integer dividing p*Nminus*Nplus, where these
quantities are associated to the BruhatTitsQuotient self._X
OUTPUT:
- The matrix of the Atkin-Lehner involution at ``d`` in the basis given by
self.basis_matrix
EXAMPLES::
sage: X = BruhatTitsQuotient(5,13)
sage: H = X.harmonic_cocycles(2,prec=5)
sage: A = H.atkin_lehner_operator(5).matrix() # indirect doctest
sage: A**2 == 1
True
"""
return self.__compute_operator_matrix(lambda f: self.__apply_atkin_lehner(d, f))
def _compute_hecke_matrix_prime(self, l):
r"""
When the underlying coefficient module is finite, this
function computes the matrix of a (prime) Hecke operator in
the basis provided by the function basis_matrix
INPUT:
- ``l`` - a prime integer
OUTPUT:
- The matrix of `T_l` acting on the cocycles in the basis given by
self.basis_matrix
EXAMPLES::
sage: X = BruhatTitsQuotient(3,11)
sage: H = X.harmonic_cocycles(4,prec=60)
sage: A = H.hecke_operator(7).matrix() # long time, indirect doctest
sage: [o.rational_reconstruction() for o in A.charpoly().coefficients()] # long time
[6496256, 1497856, -109040, -33600, -904, 32, 1]
"""
return self.__compute_operator_matrix(lambda f: self.__apply_hecke_operator(l, f))
def __compute_operator_matrix(self, T):
r"""
Compute the matrix of the operator `T`.
Used primarily to compute matrices of Hecke operators
in a streamlined way.
INPUT:
- ``T`` - A linear function on the space of harmonic cocycles.
OUTPUT:
The matrix of ``T`` acting on the space of harmonic cocycles.
EXAMPLES::
sage: X = BruhatTitsQuotient(3,17)
sage: H = X.harmonic_cocycles(2,prec=10)
sage: A = H.hecke_operator(11).matrix() # indirect doctest
sage: [o.rational_reconstruction() for o in A.charpoly().coefficients()]
[-12, -1, 4, 1]
"""
R = self._R
A = self.basis_matrix().transpose()
basis = self.basis()
B = zero_matrix(R, len(self._E) * (self._k - 1), self.dimension())
for rr in range(len(basis)):
g = T(basis[rr])
B.set_block(0, rr, Matrix(R, len(self._E) * (self._k - 1), 1, [g._F[e].moment(ii) for e in range(len(self._E)) for ii in range(self._k - 1)]))
try:
res = (A.solve_right(B)).transpose()
except ValueError:
rest = (A.transpose() * A).solve_right(A.transpose() * B)
err = A * rest - B
if err != 0:
try:
if hasattr(err.parent().base_ring().an_element(),
'valuation'):
minval = min([o.valuation() for o in err.list()
if o != 0])
else:
minval = sum([RR(o.norm() ** 2) for o in err.list()])
verbose('Error = %s' % minval)
except AttributeError:
verbose('Warning: something did not work in the computation')
res = rest.transpose()
res.set_immutable()
return res
# class BruhatTitsHarmonicCocyclesSubmodule(BruhatTitsHarmonicCocycles,sage.modular.hecke.submodule.HeckeSubmodule):
# r"""
# Submodule of a space of BruhatTitsHarmonicCocycles.
#
# INPUT:
#
# - ``x`` - integer (default: 1) the description of the
# argument x goes here. If it contains multiple lines, all
# the lines after the first need to be indented.
#
# - ``y`` - integer (default: 2) the ...
#
# EXAMPLES::
#
# sage: X = BruhatTitsQuotient(3,17)
# sage: H = X.harmonic_cocycles(2,prec=10)
# sage: N = H.free_module().span([H.an_element().element()])
# sage: H1 = H.submodule(N) # indirect doctest
# sage: H1
# Subspace of Space of harmonic cocycles of weight 2 on Quotient of the Bruhat Tits tree of GL_2(QQ_3) with discriminant 17 and level 1 of dimension 1
#
# AUTHOR:
#
# - Marc Masdeu (2012-02-20)
# """
# def __init__(self, ambient_module, submodule, check):
# """
# Submodule of harmonic cocycles.
#
# INPUT:
#
# - ``ambient_module`` - BruhatTitsHarmonicCocycles
#
# - ``submodule`` - submodule of the ambient space.
#
# - ``check`` - (default: False) whether to check that the
# submodule is Hecke equivariant
#
# EXAMPLES::
#
# sage: X = BruhatTitsQuotient(3,17)
# sage: H = X.harmonic_cocycles(2,prec=10)
# sage: N = H.free_module().span([H.an_element().element()])
# sage: H1 = H.submodule(N)
# sage: TestSuite(H1).run()
# """
# A = ambient_module
# self.__rank = submodule.dimension()
# basis_matrix = submodule.basis_matrix()*A.basis_matrix()
# basis_matrix.set_immutable()
# BruhatTitsHarmonicCocycles.__init__(self,A._X,A._k,A._prec,basis_matrix,A.base_ring())
#
# def rank(self):
# r"""
# Returns the rank (dimension) of the submodule.
#
# OUTPUT:
#
# Integer - The rank of ``self``.
#
# EXAMPLES::
#
# sage: X = BruhatTitsQuotient(3,17)
# sage: H = X.harmonic_cocycles(2,prec=10)
# sage: N = H.free_module().span([H.an_element().element()])
# sage: H1 = H.submodule(basis = [H.an_element()])
# sage: H1.rank()
# 1
# """
# return self.__rank
#
# def _repr_(self):
# r"""
# Returns the representation of self as a string.
#
# OUTPUT:
#
# String representation of self.
#
# EXAMPLES::
#
# sage: X = BruhatTitsQuotient(3,17)
# sage: H = X.harmonic_cocycles(2,prec=10)
# sage: N = H.free_module().span([H.an_element().element()])
# sage: H1=H.submodule(N)
# sage: H1
# Subspace of Space of harmonic cocycles of weight 2 on Quotient of the Bruhat Tits tree of GL_2(QQ_3) with discriminant 17 and level 1 of dimension 1
# """
# return "Subspace of %s of dimension %s"%(self.ambient(),self.dimension())
class pAdicAutomorphicFormElement(ModuleElement):
r"""
Rudimentary implementation of a class for a `p`-adic
automorphic form on a definite quaternion algebra over `\QQ`. These
are required in order to compute moments of measures associated to
harmonic cocycles on the Bruhat-Tits tree using the overconvergent modules
of Darmon-Pollack and Matt Greenberg. See Greenberg's thesis [G]_ for
more details.
INPUT:
- ``vec`` - A preformatted list of data
EXAMPLES::
sage: X = BruhatTitsQuotient(17,3)
sage: H = X.harmonic_cocycles(2,prec=10)
sage: h = H.an_element()
sage: HH = X.padic_automorphic_forms(2,10)
sage: a = HH(h)
sage: a
p-adic automorphic form of cohomological weight 0
REFERENCES:
.. [G] Heegner points and rigid analytic modular forms
Matthew Greenberg
Ph.D. Thesis, McGill University, 2006.
AUTHORS:
- Cameron Franc (2012-02-20)
- Marc Masdeu
"""
def __init__(self, parent, vec):
"""
Create a pAdicAutomorphicFormElement
EXAMPLES::
sage: X = BruhatTitsQuotient(17,3)
sage: A = X.padic_automorphic_forms(2,prec=10)
sage: TestSuite(A.an_element()).run()
"""
self._num_generators = len(parent._list)
self._cached_values = {}
self._R = Qp(parent.prime(), prec=parent._prec)
self._value = [parent._U(v) for v in vec]
ModuleElement.__init__(self, parent)
def _add_(self, g):
r"""
This function adds two `p`-adic automorphic forms.
INPUT:
- ``g`` - a `p`-adic automorphic form
OUTPUT:
- the result of adding ``g`` to self
EXAMPLES::
sage: X = BruhatTitsQuotient(17,3)
sage: A = X.padic_automorphic_forms(2,prec=10)
sage: a = A.an_element()
sage: b = a + a # indirect doctest
"""
# Should ensure that self and g are of the same weight and on
# the same curve
vec = [self._value[e] + g._value[e]
for e in range(self._num_generators)]
return self.parent()(vec)
def _sub_(self, g):
r"""
This function subtracts a `p`-adic automorphic form from another.
INPUT:
- ``g`` - a `p`-adic automorphic form
OUTPUT:
- the result of subtracting ``g`` from self
EXAMPLES::
sage: X = BruhatTitsQuotient(17,3)
sage: A = X.padic_automorphic_forms(2,prec=10)
sage: a = A.an_element()
sage: b = a - a # indirect doctest
sage: b == 0
True
"""
# Should ensure that self and g are of the same weight and on
# the same curve
vec = [self._value[e] - g._value[e]
for e in range(self._num_generators)]
return self.parent()(vec)
def _richcmp_(self, other, op):
r"""
Test for equality of pAdicAutomorphicForm elements
INPUT:
- ``other`` - Another `p`-automorphic form
EXAMPLES::
sage: X = BruhatTitsQuotient(5,23)
sage: H = X.harmonic_cocycles(2,prec=10)
sage: A = X.padic_automorphic_forms(2,prec=10)
sage: v1 = A(H.basis()[0])
sage: v2 = 3*v1
sage: 2*v1 == v2-v1 # indirect doctest
True
"""
if op not in [op_EQ, op_NE]:
return NotImplemented
b = all(self._value[e] == other._value[e]
for e in range(self._num_generators))
if op == op_EQ:
return b
return not b
def __bool__(self):
"""
Tell whether the form is zero or not.
OUTPUT:
Boolean. ``True`` if self is zero, ``False`` otherwise.
EXAMPLES::
sage: X = BruhatTitsQuotient(5,23)
sage: H = X.harmonic_cocycles(4,prec = 20)
sage: A = X.padic_automorphic_forms(4,prec = 20)
sage: v1 = A(H.basis()[1])
sage: bool(v1)
True
sage: v2 = v1-v1
sage: bool(v2)
False
"""
return any(not o.is_zero() for o in self._value)
__nonzero__ = __bool__
def __getitem__(self, e1):
r"""
Evaluate a `p`-adic automorphic form on a matrix in `GL_2(\QQ_p)`.
INPUT:
- ``e1`` - a matrix in `GL_2(\QQ_p)`
OUTPUT:
- the value of self evaluated on ``e1``
EXAMPLES::
sage: X = BruhatTitsQuotient(17,3)
sage: M = X.harmonic_cocycles(2,prec=5)
sage: A = X.padic_automorphic_forms(2,prec=5)
sage: a = A(M.gen(0))
sage: a[Matrix(ZZ,2,2,[1,2,3,4])]
8 + 8*17 + 8*17^2 + 8*17^3 + 8*17^4 + O(17^5)
"""
return self.evaluate(e1)
def evaluate(self, e1):
r"""
Evaluate a `p`-adic automorphic form on a matrix in `GL_2(\QQ_p)`.
INPUT:
- ``e1`` - a matrix in `GL_2(\QQ_p)`
OUTPUT:
- the value of self evaluated on ``e1``
EXAMPLES::
sage: X = BruhatTitsQuotient(7,5)
sage: M = X.harmonic_cocycles(2,prec=5)
sage: A = X.padic_automorphic_forms(2,prec=5)
sage: a = A(M.basis()[0])
sage: a.evaluate(Matrix(ZZ,2,2,[1,2,3,1]))
4 + 6*7 + 6*7^2 + 6*7^3 + 6*7^4 + O(7^5)
sage: a.evaluate(Matrix(ZZ,2,2,[17,0,0,1]))
1 + O(7^5)
"""
X = self.parent()._source
p = self.parent().prime()
u = DoubleCosetReduction(X, e1)
tmp = ((u.t(self.parent()._U.base_ring().precision_cap())) * p ** (u.power)).adjoint()
S0 = self.parent()._Sigma0
return S0(tmp, check=False) * self._value[u.label]
# Warning! Should remove check=False...
def _lmul_(self, a):
r"""
Multiply the automorphic form by a scalar.
INPUT:
- a scalar
EXAMPLES::
sage: X = BruhatTitsQuotient(17,3)
sage: M = X.harmonic_cocycles(2,prec=5)
sage: A = X.padic_automorphic_forms(2,prec=5)
sage: a = A(M.basis()[0])
sage: a.evaluate(Matrix(ZZ,2,2,[1,2,3,4]))
8 + 8*17 + 8*17^2 + 8*17^3 + 8*17^4 + O(17^5)
sage: b = 2*a # indirect doctest
sage: b.evaluate(Matrix(ZZ,2,2,[1,2,3,4]))
16 + 16*17 + 16*17^2 + 16*17^3 + 16*17^4 + O(17^5)
"""
# Should ensure that 'a' is a scalar
return self.parent()([a * self._value[e]
for e in range(self._num_generators)])
def _repr_(self):
r"""
This returns the representation of self as a string.
If self corresponds to a modular form of weight `k`, then the
cohomological weight is `k-2`.
OUTPUT:
A string.
EXAMPLES::
sage: X = BruhatTitsQuotient(17,3)
sage: A = X.padic_automorphic_forms(2,prec=10)
sage: a = A.an_element()
sage: a # indirect doctest
p-adic automorphic form of cohomological weight 0
"""
return 'p-adic automorphic form of cohomological weight %s' % self.parent()._U.weight()
def valuation(self):
r"""
The valuation of ``self``, defined as the minimum of the
valuations of the values that it takes on a set of edge
representatives.
OUTPUT:
An integer.
EXAMPLES::
sage: X = BruhatTitsQuotient(17,3)
sage: M = X.harmonic_cocycles(2,prec=10)
sage: A = X.padic_automorphic_forms(2,prec=10)
sage: a = A(M.gen(0))
sage: a.valuation()
0
sage: (17*a).valuation()
1
"""
return min(self._value[e].valuation()
for e in range(self._num_generators))
def _improve(self, hc):
r"""
Repeatedly apply the `U_p` operator to a `p`-adic
automorphic form. This is used to compute moments of a measure
associated to a rigid modular form in the following way: lift
a rigid modular form to an overconvergent `p`-adic
automorphic form in any way, and then repeatedly apply `U_p`
to project to the ordinary part. The resulting form encodes
the moments of the measure of the original rigid modular form
(assuming it is ordinary).
EXAMPLES::
sage: X = BruhatTitsQuotient(7,2)
sage: H = X.harmonic_cocycles(2,prec = 10)
sage: h = H.gen(0)
sage: A = X.padic_automorphic_forms(2,prec = 10,overconvergent=True)
sage: a = A.lift(h) # indirect doctest
REFERENCES:
For details see [G]_. Alternatively, one can look at
[DP]_ for the analogous algorithm in the case of modular symbols.
AUTHORS:
- Cameron Franc (2012-02-20)
- Marc Masdeu
"""
MMM = self.parent()
U = MMM._U
S0 = MMM._Sigma0
h1 = MMM([o.lift(M=MMM.precision_cap()) for o in self._value])
h2 = MMM._apply_Up_operator(h1, True)
verbose("Applied Up once")
ii = 0
current_val = 0
init_val = self.valuation()
old_val = init_val - 1
while current_val > old_val:
old_val = current_val
ii += 1
h1._value = [U(c) for c in h2._value]
h2 = MMM._apply_Up_operator(h1, True)
current_val = (h2 - h1).valuation() - init_val
verbose('val = %s' % current_val)
if current_val is Infinity:
break
verbose('Applied Up %s times' % (ii + 1))
return h2
def integrate(self, f, center=1, level=0, method='moments'):
r"""
Calculate
.. MATH::
\int_{\mathbf{P}^1(\QQ_p)} f(x)d\mu(x)
were `\mu` is the measure associated to ``self``.
INPUT:
- ``f`` - An analytic function.
- ``center`` - 2x2 matrix over `\QQ_p` (default: 1)
- ``level`` - integer (default: 0)
- ``method`` - string (default: 'moments'). Which method of
integration to use. Either 'moments' or 'riemann_sum'.
EXAMPLES:
Integrating the Poisson kernel against a measure yields a
value of the associated modular form. Such values can be
computed efficiently using the overconvergent method, as long
as one starts with an ordinary form::
sage: X = BruhatTitsQuotient(7,2)
sage: X.genus()
1
Since the genus is 1, the space of weight 2 forms is 1
dimensional. Hence any nonzero form will be a `U_7`
eigenvector. By Jacquet-Langlands and Cerednik-Drinfeld, in
this case the Hecke eigenvalues correspond to that of any
nonzero form on `\Gamma_0(14)` of weight `2`. Such a form is
ordinary at `7`, and so we can apply the overconvergent method
directly to this form without `p`-stabilizing::
sage: H = X.harmonic_cocycles(2,prec = 5)
sage: h = H.gen(0)
sage: A = X.padic_automorphic_forms(2,prec = 5,overconvergent=True)
sage: a = A.lift(h)
sage: a._value[0].moment(2)
2 + 6*7 + 4*7^2 + 4*7^3 + 6*7^4 + O(7^5)
Now that we've lifted our harmonic cocycle to an
overconvergent automorphic form we simply need to define the
Teitelbaum-Poisson Kernel, and then integrate::
sage: Kp.<x> = Qq(49,prec = 5)
sage: z = Kp['z'].gen()
sage: f = 1/(z-x)
sage: a.integrate(f)
(5*x + 5) + (4*x + 4)*7 + (5*x + 5)*7^2 + (5*x + 6)*7^3 + O(7^5)
AUTHORS:
- Cameron Franc (2012-02-20)
- Marc Masdeu (2012-02-20)
"""
E = self.parent()._source._BT.get_balls(center, level)
R1 = LaurentSeriesRing(f.base_ring(), 'r1', default_prec = self.parent()._U.base_ring().precision_cap() + 1)
R2 = PolynomialRing(f.base_ring(), 'x')
x = R2.gen()
value = 0
ii = 0
if method == 'riemann_sum':
for e in E:
ii += 1
#print(ii,"/",len(E))
exp = ((R1([e[1, 1], e[1, 0]])) ** (self.parent()._U.weight()) * e.determinant() ** (-(self.parent()._U.weight()) / 2)) * f(R1([e[0, 1], e[0, 0]]) / R1([e[1, 1], e[1, 0]]))
#exp = R2([tmp[jj] for jj in range(self.parent()._k-1)])
new = eval_dist_at_powseries(self.evaluate(e), exp.truncate(self.parent()._U.weight() + 1))
value += new
elif method == 'moments':
n = self.parent()._U.weight()
for e in E:
ii += 1
#print(ii,"/",len(E))
a, b, c, d = e.list()
delta = e.determinant()
verbose('%s' % (R2([e[0, 1], e[0, 0]])
/ R2([e[1, 1], e[1, 0]])))
tmp = ((c * x + d) ** n * delta ** -ZZ(n / 2)) * f((a * x + b) / (c * x + d))
exp = R1(tmp.numerator()) / R1(tmp.denominator())
new = eval_dist_at_powseries(self.evaluate(e), exp)
value += new
else:
print('The available methods are either "moments" or "riemann_sum". The latter is only provided for consistency check, and should never be used.')
return False
return value
def modular_form(self, z=None, level=0, method='moments'):
r"""
Return the modular form corresponding to ``self``.
INPUT:
- ``z`` - (default: None). If specified, returns the value of
the form at the point ``z`` in the `p`-adic upper half
plane.
- ``level`` - integer (default: 0). If ``method`` is
'riemann_sum', will use a covering of `P^1(\QQ_p)` with
balls of size `p^-\mbox{level}`.
- ``method`` - string (default: ``moments``). It must be
either ``moments`` or ``riemann_sum``.
OUTPUT:
- A function from the `p`-adic upper half plane to `\CC_p`. If
an argument ``z`` was passed, returns instead the value at
that point.
EXAMPLES:
Integrating the Poisson kernel against a measure yields a
value of the associated modular form. Such values can be
computed efficiently using the overconvergent method, as long
as one starts with an ordinary form::
sage: X = BruhatTitsQuotient(7, 2)
sage: X.genus()
1
Since the genus is 1, the space of weight 2 forms is 1
dimensional. Hence any nonzero form will be a `U_7`
eigenvector. By Jacquet-Langlands and Cerednik-Drinfeld, in
this case the Hecke eigenvalues correspond to that of any
nonzero form on `\Gamma_0(14)` of weight `2`. Such a form is
ordinary at `7`, and so we can apply the overconvergent method
directly to this form without `p`-stabilizing::
sage: H = X.harmonic_cocycles(2,prec = 5)
sage: A = X.padic_automorphic_forms(2,prec = 5,overconvergent=True)
sage: f0 = A.lift(H.basis()[0])
Now that we've lifted our harmonic cocycle to an
overconvergent automorphic form, we extract the associated
modular form as a function and test the modular property::
sage: T.<x> = Qq(7^2,prec = 5)
sage: f = f0.modular_form(method = 'moments')
sage: a,b,c,d = X.embed_quaternion(X.get_units_of_order()[1]).change_ring(T.base_ring()).list()
sage: ((c*x + d)^2*f(x)-f((a*x + b)/(c*x + d))).valuation()
5
"""
return self.derivative(z, level, method, order=0)
def derivative(self, z=None, level=0, method='moments', order=1):
r"""
Return the derivative of the modular form corresponding to
``self``.
INPUT:
- ``z`` - (default: None). If specified, evaluates the derivative
at the point ``z`` in the `p`-adic upper half plane.
- ``level`` - integer (default: 0). If ``method`` is
'riemann_sum', will use a covering of `P^1(\QQ_p)` with
balls of size `p^-\mbox{level}`.
- ``method`` - string (default: ``moments``). It must be
either ``moments`` or ``riemann_sum``.
- ``order`` - integer (default: 1). The order of the
derivative to be computed.
OUTPUT:
- A function from the `p`-adic upper half plane to `\CC_p`. If
an argument ``z`` was passed, returns instead the value of
the derivative at that point.
EXAMPLES:
Integrating the Poisson kernel against a measure yields a
value of the associated modular form. Such values can be
computed efficiently using the overconvergent method, as long
as one starts with an ordinary form::
sage: X = BruhatTitsQuotient(7, 2)
sage: X.genus()
1
Since the genus is 1, the space of weight 2 forms is 1
dimensional. Hence any nonzero form will be a `U_7`
eigenvector. By Jacquet-Langlands and Cerednik-Drinfeld, in
this case the Hecke eigenvalues correspond to that of any
nonzero form on `\Gamma_0(14)` of weight `2`. Such a form is
ordinary at `7`, and so we can apply the overconvergent method
directly to this form without `p`-stabilizing::
sage: H = X.harmonic_cocycles(2,prec=5)
sage: h = H.gen(0)
sage: A = X.padic_automorphic_forms(2,prec=5,overconvergent=True)
sage: f0 = A.lift(h)
Now that we've lifted our harmonic cocycle to an
overconvergent automorphic form, we extract the associated
modular form as a function and test the modular property::
sage: T.<x> = Qq(49,prec=10)
sage: f = f0.modular_form()
sage: g = X.get_embedding_matrix()*X.get_units_of_order()[1]
sage: a,b,c,d = g.change_ring(T).list()
sage: (c*x +d)^2*f(x)-f((a*x + b)/(c*x + d))
O(7^5)
We can also compute the Shimura-Maass derivative, which is a
nearly rigid analytic modular forms of weight 4::
sage: f = f0.derivative()
sage: (c*x + d)^4*f(x)-f((a*x + b)/(c*x + d))
O(7^5)
"""
def F(z, level=level, method=method):
R = PolynomialRing(z.parent(), 'x,y').fraction_field()
Rx = PolynomialRing(z.parent(), 'x1').fraction_field()
x1 = Rx.gen()
subst = R.hom([x1, z], codomain=Rx)
x, y = R.gens()
center = self.parent()._source._BT.find_containing_affinoid(z)
zbar = z.trace() - z
f = R(1) / (x - y)
k = self.parent()._n + 2
V = [f]
for ii in range(order):
V = [v.derivative(y) for v in V] + [k / (y - zbar) * v
for v in V]
k += 2
return sum(self.integrate(subst(v), center, level, method)
for v in V)
if z is None:
return F
return F(z, level, method)
# So far we cannot break it into two integrals because of the pole
# at infinity.
def coleman(self, t1, t2, E=None, method='moments', mult=False,
delta=-1):
r"""
If ``self`` is a `p`-adic automorphic form that
corresponds to a rigid modular form, then this computes the
Coleman integral of this form between two points on the
boundary `P^1(\QQ_p)` of the `p`-adic upper half plane.
INPUT:
- ``t1``, ``t2`` - elements of `P^1(\QQ_p)` (the endpoints
of integration)
- ``E`` - (default: None). If specified, will not compute the
covering adapted to ``t1`` and ``t2`` and instead use the
given one. In that case, ``E`` should be a list of matrices
corresponding to edges describing the open balls to be
considered.
- ``method`` - string (default: 'moments'). Tells which
algorithm to use (alternative is 'riemann_sum', which is
unsuitable for computations requiring high precision)
- ``mult`` - boolean (default: False). Whether to compute the
multiplicative version.
OUTPUT:
The result of the Coleman integral
EXAMPLES::
sage: p = 7
sage: lev = 2
sage: prec = 10
sage: X = BruhatTitsQuotient(p,lev, use_magma = True) # optional - magma
sage: k = 2 # optional - magma
sage: M = X.harmonic_cocycles(k,prec) # optional - magma
sage: B = M.basis() # optional - magma
sage: f = 3*B[0] # optional - magma
sage: MM = X.padic_automorphic_forms(k,prec,overconvergent = True) # optional - magma
sage: D = -11 # optional - magma
sage: X.is_admissible(D) # optional - magma
True
sage: K.<a> = QuadraticField(D) # optional - magma
sage: Kp.<g> = Qq(p**2,prec) # optional - magma
sage: P = Kp.gen() # optional - magma
sage: Q = 2+Kp.gen()+ p*(Kp.gen() +1) # optional - magma
sage: F = MM.lift(f) # long time, optional - magma
sage: J0 = F.coleman(P,Q,mult = True) # long time, optional - magma
AUTHORS:
- Cameron Franc (2012-02-20)
- Marc Masdeu (2012-02-20)
"""
p = self.parent().prime()
K = t1.parent()
R = PolynomialRing(K, 'x')
x = R.gen()
R1 = LaurentSeriesRing(K, 'r1', default_prec=self.parent()._U.base_ring().precision_cap())
r1 = R1.gen()
if E is None:
E = self.parent()._source._BT.find_covering(t1, t2)
# print('Got ', len(E), ' open balls.')
value = 0
ii = 0
value_exp = K(1)
if method == 'riemann_sum':
for e in E:
ii += 1
b = e[0, 1]
d = e[1, 1]
y = (b - d * t1) / (b - d * t2)
poly = R1(y.log()) # R1(our_log(y))
c_e = self.evaluate(e)
new = eval_dist_at_powseries(c_e, poly)
value += new
if mult:
value_exp *= K.teichmuller(y) ** Integer(c_e.moment(0).rational_reconstruction())
elif method == 'moments':
for e in E:
ii += 1
f = (x - t1) / (x - t2)
a, b, c, d = e.list()
y0 = f(R1([b, a]) / R1([d, c])) # f( (ax+b)/(cx+d) )
y0 = p ** (-y0(ZZ(0)).valuation()) * y0
mu = K.teichmuller(y0(ZZ(0)))
y = y0 / mu - 1
poly = R1(0)
ypow = y
for jj in range(1, R1.default_prec() + 10):
poly += (-1) ** (jj + 1) * ypow / jj
ypow *= y
c_e = self.evaluate(e)
new = eval_dist_at_powseries(c_e, poly)
if hasattr(new, 'degree'):
assert 0
value += new
if mult:
value_exp *= K.teichmuller(((b - d * t1) / (b - d * t2))) ** Integer(c_e.moment(0).rational_reconstruction())
else:
print('The available methods are either "moments" or "riemann_sum". The latter is only provided for consistency check, and should not be used in practice.')
return False
if mult:
return K.teichmuller(value_exp) * value.exp()
return value
class pAdicAutomorphicForms(Module, UniqueRepresentation):
Element = pAdicAutomorphicFormElement
@staticmethod
def __classcall__(cls, domain, U, prec=None, t=None, R=None,
overconvergent=False):
r"""
The module of (quaternionic) `p`-adic automorphic forms.
INPUT:
- ``domain`` - A BruhatTitsQuotient.
- ``U`` -- A distributions module or an integer. If ``U`` is a
distributions module then this creates the relevant space of
automorphic forms. If ``U`` is an integer then the coefficients
are the (`U-2`)nd power of the symmetric representation of
`GL_2(\QQ_p)`.
- ``prec`` -- A precision (default : None). If not None should
be a positive integer.
- ``t`` -- (default : None). The number of additional moments to store. If None, determine
it automatically from ``prec``, ``U`` and the ``overconvergent`` flag.
- ``R`` -- (default : None). If specified, coefficient field of the automorphic forms.
If not specified it defaults to the base ring of the distributions ``U``, or to `Q_p`
with the working precision ``prec``.
- ``overconvergent`` -- Boolean (default = False). If True, will construct overconvergent
`p`-adic automorphic forms. Otherwise it constructs the finite dimensional space of
`p`-adic automorphic forms which is isomorphic to the space of harmonic cocycles.
EXAMPLES:
The space of weight 2 p-automorphic forms is isomorphic with
the space of scalar valued invariant harmonic cocycles::
sage: X = BruhatTitsQuotient(11,5)
sage: H0 = X.padic_automorphic_forms(2,10)
sage: H1 = X.padic_automorphic_forms(2,prec = 10)
sage: H0 == H1
True
AUTHORS:
- Cameron Franc (2012-02-20)
- Marc Masdeu (2012-02-20)
"""
return super(pAdicAutomorphicForms, cls).__classcall__(cls, domain, U,
prec, t, R,
overconvergent)
def __init__(self, domain, U, prec=None, t=None, R=None,
overconvergent=False):
"""
Create a space of `p`-automorphic forms
EXAMPLES::
sage: X = BruhatTitsQuotient(11,5)
sage: H = X.harmonic_cocycles(2,prec=10)
sage: A = X.padic_automorphic_forms(2,prec=10)
sage: TestSuite(A).run()
"""
if R is None:
if not isinstance(U, Integer):
self._R = U.base_ring()
else:
if prec is None:
prec = 100
self._R = Qp(domain._p, prec)
else:
self._R = R
#U is a CoefficientModuleSpace
if isinstance(U, Integer):
if t is None:
if overconvergent:
t = prec - U + 1
else:
t = 0
if overconvergent:
self._U = OverconvergentDistributions(U - 2, base=self._R,
prec_cap=U - 1 + t,
act_on_left=True,
adjuster=_btquot_adjuster(),
dettwist=-ZZ((U - 2) / 2),
act_padic=True)
else:
self._U = Symk(U - 2, base=self._R, act_on_left=True,
adjuster=_btquot_adjuster(),
dettwist=-ZZ((U - 2) / 2),
act_padic=True)
else:
self._U = U
self._source = domain
self._list = self._source.get_list() # Contains also the opposite edges
self._prec = self._R.precision_cap()
self._n = self._U.weight()
self._p = self._source._p
self._Sigma0 = self._U._act._Sigma0
Module.__init__(self, base=self._R)
self._populate_coercion_lists_()
def prime(self):
"""
Return the underlying prime.
OUTPUT:
- ``p`` - a prime integer
EXAMPLES::
sage: X = BruhatTitsQuotient(11,5)
sage: H = X.harmonic_cocycles(2,prec = 10)
sage: A = X.padic_automorphic_forms(2,prec = 10)
sage: A.prime()
11
"""
return self._p
def zero_element(self):
r"""
Return the zero element of ``self``.
EXAMPLES::
sage: X = BruhatTitsQuotient(5, 7)
sage: H1 = X.padic_automorphic_forms( 2, prec=10)
sage: H1.zero_element() == 0
True
"""
return self.element_class(self, [self._U(0) for o in self._list])
def __eq__(self, other):
r"""
Test whether two pAdicAutomorphicForm spaces are equal.
INPUT:
- ``other`` -- another space of `p`-automorphic forms.
OUTPUT:
A boolean value
EXAMPLES::
sage: X = BruhatTitsQuotient(5,7)
sage: H1 = X.padic_automorphic_forms(2,prec = 10)
sage: H2 = X.padic_automorphic_forms(2,prec = 10)
sage: H1 == H2
True
"""
if not isinstance(other, pAdicAutomorphicForms):
return False
return (self.base_ring() == other.base_ring() and
self._source == other._source and
self._U == other._U)
def __ne__(self, other):
r"""
Test whether two pAdicAutomorphicForm spaces are not equal.
INPUT:
- ``other`` -- another space of `p`-automorphic forms.
OUTPUT:
A boolean value
EXAMPLES::
sage: X = BruhatTitsQuotient(5,7)
sage: H1 = X.padic_automorphic_forms(2,prec = 10)
sage: H2 = X.padic_automorphic_forms(2,prec = 10)
sage: H1 == H2
True
"""
return not self.__eq__(other)
def _repr_(self):
r"""
Return the representation of self as a string.
EXAMPLES::
sage: X = BruhatTitsQuotient(3,7)
sage: A = X.padic_automorphic_forms(2,prec = 10)
sage: A # indirect doctest
Space of automorphic forms on Quotient of the Bruhat Tits tree of GL_2(QQ_3) with discriminant 7 and level 1 with values in Sym^0 Q_3^2
"""
s = 'Space of automorphic forms on '
s += str(self._source)
s += ' with values in ' + str(self._U)
return s
def _coerce_map_from_(self, S):
r"""
Can coerce from other BruhatTitsHarmonicCocycles or from pAdicAutomorphicForms
INPUT:
- ``S`` - a BruhatTitsHarmonicCocycle or pAdicAutomorphicForm
OUTPUT:
A boolean value. True if adn only if ``S`` is coercible into self.
EXAMPLES::
sage: X = BruhatTitsQuotient(3,7)
sage: H = X.harmonic_cocycles(2,prec=10)
sage: A = X.padic_automorphic_forms(2,prec=10)
sage: A._coerce_map_from_(H)
True
"""
if isinstance(S, BruhatTitsHarmonicCocycles):
if S.weight() - 2 != self._n:
return False
if S._X != self._source:
return False
return True
if isinstance(S, pAdicAutomorphicForms):
if S._n != self._n:
return False
if S._source != self._source:
return False
return True
return False
def _element_constructor_(self, data):
r"""
Construct a `p`-automorphic form.
INPUT:
- ``data`` - defining data. Can be either a harmonic cocycle, or a `p`-adic automorphic form,
or a list of elements coercible into the module of coefficients of ``self``.
OUTPUT:
A `p`-adic automorphic form.
EXAMPLES::
sage: X = BruhatTitsQuotient(13,5)
sage: H = X.harmonic_cocycles(2,prec=10)
sage: h=H.an_element() # indirect doctest
sage: A = X.padic_automorphic_forms(2,prec=10)
sage: A(h)
p-adic automorphic form of cohomological weight 0
"""
# Code how to coerce x into the space
# Admissible values of x?
if type(data) is list:
return self.element_class(self, [self._U(o, normalize=False) for o in data])
if isinstance(data, pAdicAutomorphicFormElement):
vals = [self._U(o, normalize=False) for o in data._value]
return self.element_class(self, vals)
if isinstance(data, BruhatTitsHarmonicCocycleElement):
E = self._list
tmp = []
F = []
Uold = data.parent()._U
for ii in range(len(data._F)):
newtmp = data.parent()._Sigma0(E[ii].rep.inverse(), check=False) * Uold(data._F[ii],normalize=False)
tmp.append(newtmp)
F.append(newtmp)
A = data.parent()._Sigma0(Matrix(QQ,2,2,[0,1/self.prime(),1,0]),check=False)
for ii in range(len(data._F)):
F.append(-(A * tmp[ii]))
vals = self._make_invariant([self._U(o,normalize=False) for o in F])
return self.element_class(self, vals)
if data == 0:
return self.zero_element()
def _an_element_(self):
r"""
Return an element of the module.
OUTPUT:
A harmonic cocycle.
EXAMPLES::
sage: X = BruhatTitsQuotient(13,5)
sage: A = X.padic_automorphic_forms(2,prec=10)
sage: A.an_element() # indirect doctest
p-adic automorphic form of cohomological weight 0
"""
return self(0)
def precision_cap(self):
"""
Return the precision of self.
OUTPUT:
An integer.
EXAMPLES::
sage: X = BruhatTitsQuotient(13,11)
sage: A = X.padic_automorphic_forms(2,prec=10)
sage: A.precision_cap()
10
"""
return self._prec
def lift(self, f):
r"""
Lift the harmonic cocycle ``f`` to a p-automorphic form.
If one is using overconvergent coefficients, then this will
compute all of the moments of the measure associated to ``f``.
INPUT:
- ``f`` - a harmonic cocycle
OUTPUT:
A `p`-adic automorphic form
EXAMPLES:
If one does not work with an overconvergent form then lift
does nothing::
sage: X = BruhatTitsQuotient(13,5)
sage: H = X.harmonic_cocycles(2,prec=10)
sage: h = H.gen(0)
sage: A = X.padic_automorphic_forms(2,prec=10)
sage: A.lift(h) # long time
p-adic automorphic form of cohomological weight 0
With overconvergent forms, the input is lifted naively and its
moments are computed::
sage: X = BruhatTitsQuotient(13,11)
sage: H = X.harmonic_cocycles(2,prec=5)
sage: A2 = X.padic_automorphic_forms(2,prec=5,overconvergent=True)
sage: a = H.gen(0)
sage: A2.lift(a) # long time
p-adic automorphic form of cohomological weight 0
"""
return self(f)._improve(f)
def _make_invariant(self, F):
r"""
Naively lift a ``classical`` automorphic form to an
overconvergent form.
INPUT:
- ``F`` - a classical (nonoverconvergent) pAdicAutomorphicForm or
BruhatTitsHarmonicCocycle.
OUTPUT:
An overconvergent pAdicAutomorphicForm
EXAMPLES::
sage: X = BruhatTitsQuotient(13,11)
sage: H = X.harmonic_cocycles(2,prec = 5)
sage: A = X.padic_automorphic_forms(2,prec = 5)
sage: h = H.basis()[0]
sage: A.lift(h) # indirect doctest long time
p-adic automorphic form of cohomological weight 0
"""
S = self._source.get_stabilizers()
M = [e.rep for e in self._list]
newF = []
for ii in range(len(S)):
Si = S[ii]
x = self._U(F[ii], normalize=False)
if any(v[2] for v in Si):
newFi = self._U(0)
s = QQ(0)
m = M[ii]
for v in Si:
s += 1
g = self._Sigma0(m.adjoint() * self._source.embed_quaternion(v[0], prec=self._prec).adjoint() * m,check = False)
newFi += g * x
newF.append((QQ(1) / s) * newFi)
else:
newF.append(self._U(x,normalize=False))
return newF
def _apply_Up_operator(self, f, scale=False, original_moments=None):
r"""
Apply the Up operator to ``f``.
INPUT:
- f -- a `p`-adic automorphic form.
- scale -- (default: True) whether to scale by the appropriate power of `p`
at each iteration.
EXAMPLES::
sage: X = BruhatTitsQuotient(3,11)
sage: M = X.harmonic_cocycles(4,10)
sage: A = X.padic_automorphic_forms(4,10, overconvergent = True)
sage: F = A.lift(M.basis()[0]); F # indirect doctest
p-adic automorphic form of cohomological weight 2
"""
HeckeData = self._source._get_Up_data()
S0 = f._value[0].parent()._act._Sigma0
prec_cap = self._U.base_ring().precision_cap()
if not scale:
factor = self._p ** (self._U.weight() // 2)
else:
factor = 1
# Save original moments
if original_moments is None:
original_moments = [[fval._moments[ii] for ii in range(self._n + 1)]
for fval in f._value]
Tf = []
for jj in range(len(self._list)):
tmp = self._U(0,normalize=False)
for gg, edge_list in HeckeData:
u = edge_list[jj]
tprec = 2 * (prec_cap + u.power) + 1
r = S0(self._p ** -u.power * (u.t(tprec) * gg).adjoint(),check=False)
tmp += r * f._value[u.label]
tmp *= factor
for ii in range(self._n + 1):
tmp._moments[ii] = original_moments[jj][ii]
Tf.append(tmp)
return self(Tf)
|
the-stack_0_1097 | # coding: utf-8
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
from ask_sdk_model.interfaces.connections.requests.base_request import BaseRequest
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional
from datetime import datetime
from ask_sdk_model.interfaces.connections.entities.restaurant import Restaurant
class ScheduleFoodEstablishmentReservationRequest(BaseRequest):
"""
ScheduleFoodEstablishmentReservationRequest for booking restaurant reservation
:param version: version of the request
:type version: (optional) str
:param start_time: start time of the reservation
:type start_time: (optional) str
:param party_size: party size
:type party_size: (optional) str
:param restaurant: restaurant
:type restaurant: (optional) ask_sdk_model.interfaces.connections.entities.restaurant.Restaurant
"""
deserialized_types = {
'object_type': 'str',
'version': 'str',
'start_time': 'str',
'party_size': 'str',
'restaurant': 'ask_sdk_model.interfaces.connections.entities.restaurant.Restaurant'
} # type: Dict
attribute_map = {
'object_type': '@type',
'version': '@version',
'start_time': 'startTime',
'party_size': 'partySize',
'restaurant': 'restaurant'
} # type: Dict
def __init__(self, version=None, start_time=None, party_size=None, restaurant=None):
# type: (Optional[str], Optional[str], Optional[str], Optional[Restaurant]) -> None
"""ScheduleFoodEstablishmentReservationRequest for booking restaurant reservation
:param version: version of the request
:type version: (optional) str
:param start_time: start time of the reservation
:type start_time: (optional) str
:param party_size: party size
:type party_size: (optional) str
:param restaurant: restaurant
:type restaurant: (optional) ask_sdk_model.interfaces.connections.entities.restaurant.Restaurant
"""
self.__discriminator_value = "ScheduleFoodEstablishmentReservationRequest" # type: str
self.object_type = self.__discriminator_value
super(ScheduleFoodEstablishmentReservationRequest, self).__init__(object_type=self.__discriminator_value, version=version)
self.start_time = start_time
self.party_size = party_size
self.restaurant = restaurant
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {} # type: Dict
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, ScheduleFoodEstablishmentReservationRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_0_1099 | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name
"""Matplotlib classes for pulse visualization."""
import collections
import numpy as np
try:
from matplotlib import pyplot as plt, gridspec
HAS_MATPLOTLIB = True
except ImportError:
HAS_MATPLOTLIB = False
from qiskit.visualization.pulse.qcstyle import PulseStyle, SchedStyle
from qiskit.visualization.pulse import interpolation
from qiskit.pulse.channels import (DriveChannel, ControlChannel,
MeasureChannel, AcquireChannel,
SnapshotChannel)
from qiskit.pulse import (SamplePulse, FrameChange, PersistentValue, Snapshot,
Acquire, PulseError)
class EventsOutputChannels:
"""Pulse dataset for channel."""
def __init__(self, t0, tf):
"""Create new channel dataset.
Args:
t0 (int): starting time of plot
tf (int): ending time of plot
"""
self.pulses = {}
self.t0 = t0
self.tf = tf
self._waveform = None
self._framechanges = None
self._conditionals = None
self._snapshots = None
self._labels = None
self.enable = False
def add_instruction(self, start_time, pulse):
"""Add new pulse instruction to channel.
Args:
start_time (int): Starting time of instruction
pulse (Instruction): Instruction object to be added
"""
if start_time in self.pulses.keys():
self.pulses[start_time].append(pulse.command)
else:
self.pulses[start_time] = [pulse.command]
@property
def waveform(self):
"""Get waveform."""
if self._waveform is None:
self._build_waveform()
return self._waveform[self.t0:self.tf]
@property
def framechanges(self):
"""Get frame changes."""
if self._framechanges is None:
self._build_waveform()
return self._trim(self._framechanges)
@property
def conditionals(self):
"""Get conditionals."""
if self._conditionals is None:
self._build_waveform()
return self._trim(self._conditionals)
@property
def snapshots(self):
"""Get snapshots."""
if self._snapshots is None:
self._build_waveform()
return self._trim(self._snapshots)
@property
def labels(self):
"""Get labels."""
if self._labels is None:
self._build_waveform()
return self._trim(self._labels)
def is_empty(self):
"""Return if pulse is empty.
Returns:
bool: if the channel has nothing to plot
"""
if any(self.waveform) or self.framechanges or self.conditionals or self.snapshots:
return False
return True
def to_table(self, name):
"""Get table contains.
Args:
name (str): name of channel
Returns:
dict: dictionary of events in the channel
"""
time_event = []
framechanges = self.framechanges
conditionals = self.conditionals
snapshots = self.snapshots
for key, val in framechanges.items():
data_str = 'framechange: %.2f' % val
time_event.append((key, name, data_str))
for key, val in conditionals.items():
data_str = 'conditional, %s' % val
time_event.append((key, name, data_str))
for key, val in snapshots.items():
data_str = 'snapshot: %s' % val
time_event.append((key, name, data_str))
return time_event
def _build_waveform(self):
"""Create waveform from stored pulses.
"""
self._framechanges = {}
self._conditionals = {}
self._snapshots = {}
self._labels = {}
fc = 0
pv = np.zeros(self.tf + 1, dtype=np.complex128)
wf = np.zeros(self.tf + 1, dtype=np.complex128)
last_pv = None
for time, commands in sorted(self.pulses.items()):
if time > self.tf:
break
tmp_fc = 0
for command in commands:
if isinstance(command, FrameChange):
tmp_fc += command.phase
pv[time:] = 0
elif isinstance(command, Snapshot):
self._snapshots[time] = command.name
if tmp_fc != 0:
self._framechanges[time] = tmp_fc
fc += tmp_fc
for command in commands:
if isinstance(command, PersistentValue):
pv[time:] = np.exp(1j*fc) * command.value
last_pv = (time, command)
break
for command in commands:
duration = command.duration
tf = min(time + duration, self.tf)
if isinstance(command, SamplePulse):
wf[time:tf] = np.exp(1j*fc) * command.samples[:tf-time]
pv[time:] = 0
self._labels[time] = (tf, command)
if last_pv is not None:
pv_cmd = last_pv[1]
self._labels[last_pv[0]] = (time, pv_cmd)
last_pv = None
elif isinstance(command, Acquire):
wf[time:tf] = np.ones(tf - time)
self._labels[time] = (tf, command)
self._waveform = wf + pv
def _trim(self, events):
"""Return events during given `time_range`.
Args:
events (dict): time and operation of events
Returns:
dict: dictionary of events within the time
"""
events_in_time_range = {}
for k, v in events.items():
if self.t0 <= k <= self.tf:
events_in_time_range[k] = v
return events_in_time_range
class SamplePulseDrawer:
"""A class to create figure for sample pulse."""
def __init__(self, style):
"""Create new figure.
Args:
style (PulseStyle): style sheet
"""
self.style = style or PulseStyle()
def draw(self, pulse, dt, interp_method, scaling=1):
"""Draw figure.
Args:
pulse (SamplePulse): SamplePulse to draw
dt (float): time interval
interp_method (Callable): interpolation function
See `qiskit.visualization.interpolation` for more information
scaling (float): Relative visual scaling of waveform amplitudes
Returns:
matplotlib.figure: A matplotlib figure object of the pulse envelope
"""
figure = plt.figure()
interp_method = interp_method or interpolation.step_wise
figure.set_size_inches(self.style.figsize[0], self.style.figsize[1])
ax = figure.add_subplot(111)
ax.set_facecolor(self.style.bg_color)
samples = pulse.samples
time = np.arange(0, len(samples) + 1, dtype=float) * dt
time, re, im = interp_method(time, samples, self.style.num_points)
# plot
ax.fill_between(x=time, y1=re, y2=np.zeros_like(time),
facecolor=self.style.wave_color[0], alpha=0.3,
edgecolor=self.style.wave_color[0], linewidth=1.5,
label='real part')
ax.fill_between(x=time, y1=im, y2=np.zeros_like(time),
facecolor=self.style.wave_color[1], alpha=0.3,
edgecolor=self.style.wave_color[1], linewidth=1.5,
label='imaginary part')
ax.set_xlim(0, pulse.duration * dt)
if scaling:
ax.set_ylim(-scaling, scaling)
else:
v_max = max(max(np.abs(re)), max(np.abs(im)))
ax.set_ylim(-1.2 * v_max, 1.2 * v_max)
return figure
class ScheduleDrawer:
"""A class to create figure for schedule and channel."""
def __init__(self, style):
"""Create new figure.
Args:
style (SchedStyle): style sheet
"""
self.style = style or SchedStyle()
def _build_channels(self, schedule, channels_to_plot, t0, tf):
# prepare waveform channels
drive_channels = collections.OrderedDict()
measure_channels = collections.OrderedDict()
control_channels = collections.OrderedDict()
acquire_channels = collections.OrderedDict()
snapshot_channels = collections.OrderedDict()
_channels = list(schedule.channels) + channels_to_plot
_channels = list(set(_channels))
for chan in _channels:
if isinstance(chan, DriveChannel):
try:
drive_channels[chan] = EventsOutputChannels(t0, tf)
except PulseError:
pass
elif isinstance(chan, MeasureChannel):
try:
measure_channels[chan] = EventsOutputChannels(t0, tf)
except PulseError:
pass
elif isinstance(chan, ControlChannel):
try:
control_channels[chan] = EventsOutputChannels(t0, tf)
except PulseError:
pass
elif isinstance(chan, AcquireChannel):
try:
acquire_channels[chan] = EventsOutputChannels(t0, tf)
except PulseError:
pass
elif isinstance(chan, SnapshotChannel):
try:
snapshot_channels[chan] = EventsOutputChannels(t0, tf)
except PulseError:
pass
output_channels = {**drive_channels, **measure_channels,
**control_channels, **acquire_channels}
channels = {**output_channels, **acquire_channels, **snapshot_channels}
# sort by index then name to group qubits together.
output_channels = collections.OrderedDict(sorted(output_channels.items(),
key=lambda x: (x[0].index, x[0].name)))
channels = collections.OrderedDict(sorted(channels.items(),
key=lambda x: (x[0].index, x[0].name)))
for start_time, instruction in schedule.instructions:
for channel in instruction.channels:
if channel in output_channels:
output_channels[channel].add_instruction(start_time, instruction)
elif channel in snapshot_channels:
snapshot_channels[channel].add_instruction(start_time, instruction)
return channels, output_channels, snapshot_channels
def _count_valid_waveforms(self, channels, scaling=1, channels_to_plot=None,
plot_all=False):
# count numbers of valid waveform
n_valid_waveform = 0
v_max = 0
for channel, events in channels.items():
if channels_to_plot:
if channel in channels_to_plot:
waveform = events.waveform
v_max = max(v_max,
max(np.abs(np.real(waveform))),
max(np.abs(np.imag(waveform))))
n_valid_waveform += 1
events.enable = True
else:
if not events.is_empty() or plot_all:
waveform = events.waveform
v_max = max(v_max,
max(np.abs(np.real(waveform))),
max(np.abs(np.imag(waveform))))
n_valid_waveform += 1
events.enable = True
# when input schedule is empty or comprises only frame changes,
# we need to overwrite maximum amplitude by a value greater than zero,
# otherwise auto axis scaling will fail with zero division.
v_max = v_max or 1
if scaling:
v_max = 0.5 * scaling
else:
v_max = 0.5 / (1.2 * v_max)
return n_valid_waveform, v_max
# pylint: disable=unused-argument
def _draw_table(self, figure, channels, dt, n_valid_waveform):
# create table
table_data = []
if self.style.use_table:
for channel, events in channels.items():
if events.enable:
table_data.extend(events.to_table(channel.name))
table_data = sorted(table_data, key=lambda x: x[0])
# plot table
if table_data:
# table area size
ncols = self.style.table_columns
nrows = int(np.ceil(len(table_data)/ncols))
# fig size
h_table = nrows * self.style.fig_unit_h_table
h_waves = (self.style.figsize[1] - h_table)
# create subplots
gs = gridspec.GridSpec(2, 1, height_ratios=[h_table, h_waves], hspace=0)
tb = plt.subplot(gs[0])
ax = plt.subplot(gs[1])
# configure each cell
tb.axis('off')
cell_value = [['' for _kk in range(ncols * 3)] for _jj in range(nrows)]
cell_color = [self.style.table_color * ncols for _jj in range(nrows)]
cell_width = [*([0.2, 0.2, 0.5] * ncols)]
for ii, data in enumerate(table_data):
# pylint: disable=unbalanced-tuple-unpacking
r, c = np.unravel_index(ii, (nrows, ncols), order='f')
# pylint: enable=unbalanced-tuple-unpacking
time, ch_name, data_str = data
# item
cell_value[r][3 * c + 0] = 't = %s' % time * dt
cell_value[r][3 * c + 1] = 'ch %s' % ch_name
cell_value[r][3 * c + 2] = data_str
table = tb.table(cellText=cell_value,
cellLoc='left',
rowLoc='center',
colWidths=cell_width,
bbox=[0, 0, 1, 1],
cellColours=cell_color)
table.auto_set_font_size(False)
table.set_fontsize = self.style.table_font_size
else:
ax = figure.add_subplot(111)
figure.set_size_inches(self.style.figsize[0], self.style.figsize[1])
return ax
def _draw_snapshots(self, ax, snapshot_channels, dt, y0):
for events in snapshot_channels.values():
snapshots = events.snapshots
if snapshots:
for time in snapshots:
ax.annotate(s=u"\u25D8", xy=(time*dt, y0), xytext=(time*dt, y0+0.08),
arrowprops={'arrowstyle': 'wedge'}, ha='center')
def _draw_framechanges(self, ax, fcs, dt, y0):
framechanges_present = True
for time in fcs.keys():
ax.text(x=time*dt, y=y0, s=r'$\circlearrowleft$',
fontsize=self.style.icon_font_size,
ha='center', va='center')
return framechanges_present
def _get_channel_color(self, channel):
# choose color
if isinstance(channel, DriveChannel):
color = self.style.d_ch_color
elif isinstance(channel, ControlChannel):
color = self.style.u_ch_color
elif isinstance(channel, MeasureChannel):
color = self.style.m_ch_color
elif isinstance(channel, AcquireChannel):
color = self.style.a_ch_color
else:
color = 'black'
return color
def _prev_label_at_time(self, prev_labels, time):
for _, labels in enumerate(prev_labels):
for t0, (tf, _) in labels.items():
if time in (t0, tf):
return True
return False
def _draw_labels(self, ax, labels, prev_labels, dt, y0):
for t0, (tf, cmd) in labels.items():
if isinstance(cmd, PersistentValue):
name = cmd.name if cmd.name else 'pv'
elif isinstance(cmd, Acquire):
name = cmd.name if cmd.name else 'acquire'
else:
name = cmd.name
ax.annotate(r'%s' % name,
xy=((t0+tf)//2*dt, y0),
xytext=((t0+tf)//2*dt, y0-0.07),
fontsize=self.style.label_font_size,
ha='center', va='center')
linestyle = self.style.label_ch_linestyle
alpha = self.style.label_ch_alpha
color = self.style.label_ch_color
if not self._prev_label_at_time(prev_labels, t0):
ax.axvline(t0*dt, -1, 1, color=color,
linestyle=linestyle, alpha=alpha)
if not (self._prev_label_at_time(prev_labels, tf) or tf in labels):
ax.axvline(tf*dt, -1, 1, color=color,
linestyle=linestyle, alpha=alpha)
def _draw_channels(self, ax, output_channels, interp_method, t0, tf, dt, v_max,
label=False, framechange=True):
y0 = 0
prev_labels = []
for channel, events in output_channels.items():
if events.enable:
# plot waveform
waveform = events.waveform
time = np.arange(t0, tf + 1, dtype=float) * dt
if waveform.any():
time, re, im = interp_method(time, waveform, self.style.num_points)
else:
# when input schedule is empty or comprises only frame changes,
# we should avoid interpolation due to lack of data points.
# instead, it just returns vector of zero.
re, im = np.zeros_like(time), np.zeros_like(time)
color = self._get_channel_color(channel)
# scaling and offset
re = v_max * re + y0
im = v_max * im + y0
offset = np.zeros_like(time) + y0
# plot
ax.fill_between(x=time, y1=re, y2=offset,
facecolor=color[0], alpha=0.3,
edgecolor=color[0], linewidth=1.5,
label='real part')
ax.fill_between(x=time, y1=im, y2=offset,
facecolor=color[1], alpha=0.3,
edgecolor=color[1], linewidth=1.5,
label='imaginary part')
ax.plot((t0, tf), (y0, y0), color='#000000', linewidth=1.0)
# plot frame changes
fcs = events.framechanges
if fcs and framechange:
self._draw_framechanges(ax, fcs, dt, y0)
# plot labels
labels = events.labels
if labels and label:
self._draw_labels(ax, labels, prev_labels, dt, y0)
prev_labels.append(labels)
else:
continue
# plot label
ax.text(x=0, y=y0, s=channel.name,
fontsize=self.style.axis_font_size,
ha='right', va='center')
y0 -= 1
return y0
def draw(self, schedule, dt, interp_method, plot_range,
scaling=1, channels_to_plot=None, plot_all=True,
table=True, label=False, framechange=True):
"""Draw figure.
Args:
schedule (ScheduleComponent): Schedule to draw
dt (float): time interval
interp_method (Callable): interpolation function
See `qiskit.visualization.interpolation` for more information
plot_range (tuple[float]): plot range
scaling (float): Relative visual scaling of waveform amplitudes
channels_to_plot (list[OutputChannel]): channels to draw
plot_all (bool): if plot all channels even it is empty
table (bool): Draw event table
label (bool): Label individual instructions
framechange (bool): Add framechange indicators
Returns:
matplotlib.figure: A matplotlib figure object for the pulse schedule
Raises:
VisualizationError: when schedule cannot be drawn
"""
figure = plt.figure()
if not channels_to_plot:
channels_to_plot = []
interp_method = interp_method or interpolation.step_wise
# setup plot range
if plot_range:
t0 = int(np.floor(plot_range[0]/dt))
tf = int(np.floor(plot_range[1]/dt))
else:
t0 = 0
# when input schedule is empty or comprises only frame changes,
# we need to overwrite pulse duration by an integer greater than zero,
# otherwise waveform returns empty array and matplotlib will be crashed.
tf = schedule.stop_time or 1
# prepare waveform channels
(channels, output_channels,
snapshot_channels) = self._build_channels(schedule, channels_to_plot, t0, tf)
# count numbers of valid waveform
n_valid_waveform, v_max = self._count_valid_waveforms(output_channels, scaling=scaling,
channels_to_plot=channels_to_plot,
plot_all=plot_all)
if table:
ax = self._draw_table(figure, channels, dt, n_valid_waveform)
else:
ax = figure.add_subplot(111)
figure.set_size_inches(self.style.figsize[0], self.style.figsize[1])
ax.set_facecolor(self.style.bg_color)
y0 = self._draw_channels(ax, output_channels, interp_method,
t0, tf, dt, v_max, label=label,
framechange=framechange)
self._draw_snapshots(ax, snapshot_channels, dt, y0)
ax.set_xlim(t0 * dt, tf * dt)
ax.set_ylim(y0, 1)
ax.set_yticklabels([])
return figure
|
the-stack_0_1100 | import logging
from typing import Any, MutableMapping, Optional
from cloudformation_cli_python_lib import (
Action,
HandlerErrorCode,
OperationStatus,
ProgressEvent,
Resource,
SessionProxy,
)
from datadog_api_client.v1 import ApiException
from datadog_api_client.v1.api.monitors_api import MonitorsApi
from datadog_api_client.v1.model.monitor import Monitor as ApiMonitor
from datadog_api_client.v1.model.monitor_options import MonitorOptions as ApiMonitorOptions
from datadog_api_client.v1.model.monitor_threshold_window_options import \
MonitorThresholdWindowOptions as ApiMonitorThresholdWindows
from datadog_api_client.v1.model.monitor_thresholds import MonitorThresholds as ApiMonitorThresholds
from datadog_api_client.v1.model.monitor_type import MonitorType as ApiMonitorType
from datadog_api_client.v1.model.monitor_update_request import MonitorUpdateRequest as ApiMonitorUpdateRequest
from datadog_cloudformation_common.api_clients import v1_client
from datadog_cloudformation_common.utils import http_to_handler_error_code
from .models import (
Creator,
MonitorOptions,
MonitorThresholdWindows,
MonitorThresholds,
ResourceHandlerRequest,
ResourceModel,
TypeConfigurationModel,
)
from .version import __version__
# Use this logger to forward log messages to CloudWatch Logs.
LOG = logging.getLogger(__name__)
TYPE_NAME = "Datadog::Monitors::Monitor"
TELEMETRY_TYPE_NAME = "monitors-monitor"
resource = Resource(TYPE_NAME, ResourceModel, TypeConfigurationModel)
test_entrypoint = resource.test_entrypoint
@resource.handler(Action.READ)
def read_handler(
session: Optional[SessionProxy],
request: ResourceHandlerRequest,
callback_context: MutableMapping[str, Any],
) -> ProgressEvent:
LOG.info("Starting %s Read Handler", TYPE_NAME)
model = request.desiredResourceState
type_configuration = request.typeConfiguration
with v1_client(
type_configuration.DatadogCredentials.ApiKey,
type_configuration.DatadogCredentials.ApplicationKey,
type_configuration.DatadogCredentials.ApiURL,
TELEMETRY_TYPE_NAME,
__version__,
) as api_client:
api_instance = MonitorsApi(api_client)
monitor_id = model.Id
if monitor_id is None:
return ProgressEvent(
status=OperationStatus.FAILED,
resourceModel=model,
message=f"Error getting monitor: monitor does not exist",
errorCode=HandlerErrorCode.NotFound,
)
try:
monitor = api_instance.get_monitor(monitor_id)
except ApiException as e:
LOG.exception("Exception when calling MonitorsApi->get_monitor: %s\n", e)
return ProgressEvent(
status=OperationStatus.FAILED,
resourceModel=model,
message=f"Error getting monitor: {e}",
errorCode=http_to_handler_error_code(e.status),
)
model.Created = monitor.created.isoformat()
model.Modified = monitor.modified.isoformat()
model.Message = monitor.message
model.Name = monitor.name
model.Tags = monitor.tags
model.Priority = monitor.priority
model.Query = monitor.query
model.Multi = monitor.multi
if monitor.deleted:
model.Deleted = monitor.deleted.isoformat()
if not (
(model.Type == "query alert" and monitor.type.value == "metric alert") or
(model.Type == "metric alert" and monitor.type.value == "query alert")
):
# metric alert and query alert are interchangeable, so don't update from one to the other
model.Type = monitor.type.value
if monitor.creator:
model.Creator = Creator(Name=monitor.creator.name, Email=monitor.creator.email, Handle=monitor.creator.handle)
# Add hasattr checks for options since not all of them are applicable to all monitor types, so some attributes
# might not always be present
options = monitor.options if hasattr(monitor, "options") else None
if options:
model.Options = MonitorOptions(
EnableLogsSample=options.enable_logs_sample if hasattr(options, "enable_logs_sample") else None,
EscalationMessage=options.escalation_message if hasattr(options, "escalation_message") else None,
EvaluationDelay=options.evaluation_delay if hasattr(options, "evaluation_delay") else None,
IncludeTags=options.include_tags if hasattr(options, "include_tags") else None,
Locked=options.locked if hasattr(options, "locked") else None,
MinLocationFailed=options.min_location_failed if hasattr(options, "min_location_failed") else None,
NewHostDelay=options.new_host_delay if hasattr(options, "new_host_delay") else None,
NoDataTimeframe=options.no_data_timeframe if hasattr(options, "no_data_timeframe") else None,
NotifyAudit=options.notify_audit if hasattr(options, "notify_audit") else None,
NotifyNoData=options.notify_no_data if hasattr(options, "notify_no_data") else None,
RenotifyInterval=options.renotify_interval if hasattr(options, "renotify_interval") else None,
RequireFullWindow=options.require_full_window if hasattr(options, "require_full_window") else None,
SyntheticsCheckID=options.synthetics_check_id if hasattr(options, "synthetics_check_id") else None,
Thresholds=None,
ThresholdWindows=None,
TimeoutH=options.timeout_h if hasattr(options, "timeout_h") else None,
)
thresholds = options.thresholds if hasattr(options, "thresholds") else None
if thresholds:
model.Options.Thresholds = MonitorThresholds(
Critical=thresholds.critical if hasattr(thresholds, "critical") else None,
CriticalRecovery=thresholds.critical_recovery if hasattr(thresholds, "critical_recovery") else None,
Warning=thresholds.warning if hasattr(thresholds, "warning") else None,
WarningRecovery=thresholds.warning_recovery if hasattr(thresholds, "warning_recovery") else None,
OK=thresholds.ok if hasattr(thresholds, "ok") else None,
)
tw = options.threshold_windows if hasattr(options, "threshold_windows") else None
if tw:
model.Options.ThresholdWindows = MonitorThresholdWindows(
TriggerWindow=tw.trigger_window if hasattr(tw, "trigger_window") else None,
RecoveryWindow=tw.recovery_window if hasattr(tw, "recovery_window") else None,
)
model.Id = monitor.id
return ProgressEvent(
status=OperationStatus.SUCCESS,
resourceModel=model,
)
@resource.handler(Action.UPDATE)
def update_handler(
session: Optional[SessionProxy],
request: ResourceHandlerRequest,
callback_context: MutableMapping[str, Any],
) -> ProgressEvent:
LOG.info("Starting %s Update Handler", TYPE_NAME)
model = request.desiredResourceState
type_configuration = request.typeConfiguration
monitor = ApiMonitorUpdateRequest()
monitor.query = model.Query
monitor.type = ApiMonitorType(model.Type)
if model.Message is not None:
monitor.message = model.Message
if model.Name is not None:
monitor.name = model.Name
if model.Tags is not None:
monitor.tags = model.Tags
if model.Priority is not None:
monitor.priority = model.Priority
options = build_monitor_options_from_model(model)
if options:
monitor.options = options
with v1_client(
type_configuration.DatadogCredentials.ApiKey,
type_configuration.DatadogCredentials.ApplicationKey,
type_configuration.DatadogCredentials.ApiURL,
TELEMETRY_TYPE_NAME,
__version__,
) as api_client:
api_instance = MonitorsApi(api_client)
try:
api_instance.update_monitor(model.Id, monitor)
except ApiException as e:
LOG.exception("Exception when calling MonitorsApi->update_monitor: %s\n", e)
return ProgressEvent(
status=OperationStatus.FAILED,
resourceModel=model,
message=f"Error updating monitor: {e}",
errorCode=http_to_handler_error_code(e.status),
)
return read_handler(session, request, callback_context)
@resource.handler(Action.DELETE)
def delete_handler(
session: Optional[SessionProxy],
request: ResourceHandlerRequest,
callback_context: MutableMapping[str, Any],
) -> ProgressEvent:
LOG.info("Starting %s Delete Handler", TYPE_NAME)
model = request.desiredResourceState
type_configuration = request.typeConfiguration
with v1_client(
type_configuration.DatadogCredentials.ApiKey,
type_configuration.DatadogCredentials.ApplicationKey,
type_configuration.DatadogCredentials.ApiURL,
TELEMETRY_TYPE_NAME,
__version__,
) as api_client:
api_instance = MonitorsApi(api_client)
try:
api_instance.delete_monitor(model.Id)
except ApiException as e:
LOG.exception("Exception when calling MonitorsApi->delete_monitor: %s\n", e)
return ProgressEvent(
status=OperationStatus.FAILED,
resourceModel=model,
message=f"Error deleting monitor: {e}",
errorCode=http_to_handler_error_code(e.status),
)
return ProgressEvent(
status=OperationStatus.SUCCESS,
resourceModel=None,
)
@resource.handler(Action.CREATE)
def create_handler(
session: Optional[SessionProxy],
request: ResourceHandlerRequest,
callback_context: MutableMapping[str, Any],
) -> ProgressEvent:
LOG.info("Starting %s Create Handler", TYPE_NAME)
model = request.desiredResourceState
type_configuration = request.typeConfiguration
monitor = ApiMonitor(model.Query, ApiMonitorType(model.Type))
if model.Message is not None:
monitor.message = model.Message
if model.Name is not None:
monitor.name = model.Name
if model.Tags is not None:
monitor.tags = model.Tags
if model.Priority is not None:
monitor.priority = model.Priority
options = build_monitor_options_from_model(model)
if options:
monitor.options = options
with v1_client(
type_configuration.DatadogCredentials.ApiKey,
type_configuration.DatadogCredentials.ApplicationKey,
type_configuration.DatadogCredentials.ApiURL,
TELEMETRY_TYPE_NAME,
__version__,
) as api_client:
api_instance = MonitorsApi(api_client)
try:
monitor_resp = api_instance.create_monitor(monitor)
except ApiException as e:
LOG.exception("Exception when calling MonitorsApi->create_monitor: %s\n", e)
return ProgressEvent(
status=OperationStatus.FAILED,
resourceModel=model,
message=f"Error creating monitor: {e}",
errorCode=http_to_handler_error_code(e.status),
)
model.Id = monitor_resp.id
return read_handler(session, request, callback_context)
def build_monitor_options_from_model(model: ResourceModel) -> ApiMonitorOptions:
options = None
if model.Options:
options = ApiMonitorOptions()
# Nullable attributes
options.evaluation_delay = model.Options.EvaluationDelay
options.min_location_failed = model.Options.MinLocationFailed
options.new_host_delay = model.Options.NewHostDelay
options.no_data_timeframe = model.Options.NoDataTimeframe
options.synthetics_check_id = model.Options.SyntheticsCheckID
options.timeout_h = model.Options.TimeoutH
options.renotify_interval = model.Options.RenotifyInterval
# Non nullable
if model.Options.EnableLogsSample is not None:
options.enable_logs_sample = model.Options.EnableLogsSample
if model.Options.EscalationMessage is not None:
options.escalation_message = model.Options.EscalationMessage
if model.Options.IncludeTags is not None:
options.include_tags = model.Options.IncludeTags
if model.Options.Locked is not None:
options.locked = model.Options.Locked
if model.Options.NotifyAudit is not None:
options.notify_audit = model.Options.NotifyAudit
if model.Options.NotifyNoData is not None:
options.notify_no_data = model.Options.NotifyNoData
if model.Options.RequireFullWindow is not None:
options.require_full_window = model.Options.RequireFullWindow
if model.Options.Thresholds is not None:
options.thresholds = ApiMonitorThresholds()
if model.Options.Thresholds.Critical is not None:
options.thresholds.critical = model.Options.Thresholds.Critical
if model.Options.Thresholds.CriticalRecovery is not None:
options.thresholds.critical_recovery = model.Options.Thresholds.CriticalRecovery
if model.Options.Thresholds.Warning is not None:
options.thresholds.warning = model.Options.Thresholds.Warning
if model.Options.Thresholds.WarningRecovery is not None:
options.thresholds.warning_recovery = model.Options.Thresholds.WarningRecovery
if model.Options.Thresholds.OK is not None:
options.thresholds.ok = model.Options.Thresholds.OK
if model.Options.ThresholdWindows is not None:
options.threshold_windows = ApiMonitorThresholdWindows()
options.threshold_windows.trigger_window = model.Options.ThresholdWindows.TriggerWindow
options.threshold_windows.recovery_window = model.Options.ThresholdWindows.RecoveryWindow
return options
|
the-stack_0_1102 | from floodsystem.flood import stations_highest_rel_level
from floodsystem.stationdata import build_station_list , update_water_levels
from floodsystem.datafetcher import fetch_latest_water_level_data, fetch_station_data
stations = build_station_list()
N=10
update_water_levels(stations)
stations_high_threat = stations_highest_rel_level(stations, N)
for station in stations_high_threat:
print(station[0].name, station[1])
if __name__ == "__main__":
print("*** Task 2C: CUED Part IA Flood Warning System ***")
|
the-stack_0_1103 | # placeholder definition for an access pattern object, can be passed as input
class PatternConfig:
def __init__(self,
exp_name="default", #name or ID
benchmark_name="test", #if this is a specific benchmark, include here
read_freq=-1, #number of reads/s
total_reads=-1, #total number of reads, can compute either way
read_size=8, #size/read in bytes
write_freq=-1, #number of writes/s
total_writes=-1, #total number of reads, can compute either way
write_size=8, #size/write in bytes
workingset=1, #total working set size in MB
total_ins=-1 #total number of ins in benchmark
):
#load all the parameters into the pattern class
#everything that defines the access pattern should be in this class
self.exp_name = exp_name
self.benchmark_name = benchmark_name
self.read_freq = read_freq
self.total_reads = total_reads
self.read_size = read_size
self.write_freq = write_freq
self.total_writes = total_writes
self.write_size = write_size
self.workingset = workingset
self.total_ins = total_ins
benchmarks = [ #collection of benchmarks from Tufts IISWC paper
PatternConfig(benchmark_name="bzip2",
total_reads=4.3e9,
read_size=4,
total_writes=1.47e9,
write_size=4,
workingset=(2505.38e3/1024./1024.),
total_ins=1404973
),
PatternConfig(benchmark_name="GemsFDTD",
total_reads=1.3e9,
read_size=4,
total_writes=0.7e9,
write_size=4,
workingset=(76576.59e3/1024./1024.),
total_ins=475257
),
PatternConfig(benchmark_name="tonto",
total_reads=1.1e9,
read_size=4,
total_writes=0.47e9,
write_size=4,
workingset=(5.59e3/1024./1024.),
total_ins=490533
),
PatternConfig(benchmark_name="leela",
total_reads=6.01e9,
read_size=4,
total_writes=2.35e9,
write_size=4,
workingset=(1.59e3/1024./1024.),
total_ins=42211
),
PatternConfig(benchmark_name="exchange2",
total_reads=62.28e9,
read_size=4,
total_writes=42.89e9,
write_size=4,
workingset=(0.64e3/1024./1024.),
total_ins=417088
),
PatternConfig(benchmark_name="deepsjeng",
total_reads=9.36e9,
read_size=4,
total_writes=4.43e9,
write_size=4,
workingset=(4.79e3/1024./1024.),
total_ins=71720506
),
PatternConfig(benchmark_name="vips",
total_reads=1.91e9,
read_size=4,
total_writes=0.68e9,
write_size=4,
workingset=(1107.19e3/1024./1024.),
total_ins=3949419070
),
PatternConfig(benchmark_name="x264",
total_reads=18.07e9,
read_size=4,
total_writes=2.84e9,
write_size=4,
workingset=(1585.49e3/1024./1024.),
total_ins=229607
),
PatternConfig(benchmark_name="cg",
total_reads=0.73e9,
read_size=4,
total_writes=0.04e9,
write_size=4,
workingset=(1015.43e3/1024./1024.),
total_ins=1942892619
),
PatternConfig(benchmark_name="ep",
total_reads=1.25e9,
read_size=4,
total_writes=0.54e9,
write_size=4,
workingset=(0.84e3/1024./1024.),
total_ins=7051878902
),
PatternConfig(benchmark_name="ft",
total_reads=0.28e9,
read_size=4,
total_writes=0.27e9,
write_size=4,
workingset=(342.64e3/1024./1024.),
total_ins=1416746823
),
PatternConfig(benchmark_name="is",
total_reads=0.12e9,
read_size=4,
total_writes=0.06e9,
write_size=4,
workingset=(1228.86e3/1024./1024.),
total_ins=298507496
),
PatternConfig(benchmark_name="lu",
total_reads=17.84e9,
read_size=4,
total_writes=3.99e9,
write_size=4,
workingset=(289.46e3/1024./1024.),
total_ins=29482003362
),
PatternConfig(benchmark_name="mg",
total_reads=0.76e9,
read_size=4,
total_writes=0.16e9,
write_size=4,
workingset=(4249.78e3/1024./1024.),
total_ins=1308033184
),
PatternConfig(benchmark_name="sp",
total_reads=9.23e9,
read_size=4,
total_writes=4.12e9,
write_size=4,
workingset=(556.75e3/1024./1024.),
total_ins=30840210911
),
PatternConfig(benchmark_name="ua",
total_reads=9.97e9,
read_size=4,
total_writes=5.85e9,
write_size=4,
workingset=(362.45e3/1024./1024.),
total_ins=19361069980
),
]
|
the-stack_0_1105 | import datetime
import re
from io import BytesIO
from unittest.mock import create_autospec, call, Mock
import pytest
from sap.aibus.dar.client.base_client import BaseClient
from sap.aibus.dar.client.data_manager_client import DataManagerClient
from sap.aibus.dar.client.exceptions import ModelAlreadyExists, DARHTTPException
from sap.aibus.dar.client.util.credentials import (
StaticCredentialsSource,
CredentialsSource,
)
from sap.aibus.dar.client.workflow.model import ModelCreator
from sap.aibus.dar.client.model_manager_client import ModelManagerClient
from tests.sap.aibus.dar.client.test_data_manager_client import (
AbstractDARClientConstruction,
)
@pytest.fixture
def csv_data_stream():
csv = """
manufacturer,description,category,subcategory
me,"simple è test, records",A,AA
me,"übrigens ein Beispiel, records",A,AA
me,"un po' di testo",A,AA
me,"какой-то текст",A,AA
me,"du texte",A,AA
me,"一些文字",A,AA
me,"कुछ पाठ",A,AA
me,"κάποιο κείμενο",A,AA
me,"кейбір мәтін",A,AA
me,"iu teksto",A,AA
"""
data_stream = BytesIO(csv.strip().encode("utf-8"))
return data_stream
@pytest.fixture()
def create_model():
create_model = ModelCreator.construct_from_jwt("https://abcd/", token="54321")
create_model.data_manager_client = create_autospec(DataManagerClient, instance=True)
create_model.model_manager_client = create_autospec(
ModelManagerClient, instance=True
)
return create_model
@pytest.fixture()
def model_resource():
return {
"jobId": "522de4e6-2609-4972-8f75-61e9262b86de",
"name": "my-model",
"createdAt": "2018-08-31T11:45:54+00:00",
"validationResult": {
"accuracy": 0.9,
"f1Score": 0.9,
"precision": 0.9,
"recall": 0.9,
},
}
a_timestamp = datetime.datetime(
2011, 11, 4, 0, 5, 23, 283000, tzinfo=datetime.timezone.utc
)
class TestModelCreatorClientConstruction(AbstractDARClientConstruction):
# Tests are in base class
clazz = ModelCreator
def test_constructor(self):
dar_url = "https://aiservices-dar.cfapps.xxx.hana.ondemand.com/"
source = StaticCredentialsSource("1234")
client = self.clazz(dar_url, source)
for embedded_client in [
client.data_manager_client,
client.model_manager_client,
]:
assert embedded_client.credentials_source == source
def test_create_from_jwt(self):
# Override and change assertions to look into embedded clients.
jwt = "12345"
client = self.clazz.construct_from_jwt(self.dar_url, jwt)
for embedded_client in [
client.data_manager_client,
client.model_manager_client,
]:
assert isinstance(
embedded_client.credentials_source, StaticCredentialsSource
)
assert embedded_client.credentials_source.token() == jwt
assert embedded_client.session.base_url == self.dar_url[:-1]
def _assert_fields_initialized(self, client):
assert isinstance(client.data_manager_client, DataManagerClient)
assert isinstance(client.model_manager_client, ModelManagerClient)
for embedded_client in [
client.data_manager_client,
client.model_manager_client,
]:
assert (
embedded_client.session.base_url
== "https://aiservices-dar.cfapps.xxx.hana.ondemand.com"
)
assert isinstance(embedded_client.credentials_source, CredentialsSource)
class TestModelCreator:
def test_is_subclass_of_base_client(self):
# Should have all the nice construction methods
assert issubclass(ModelCreator, BaseClient)
def test_format_dataset_name(self):
formatted = ModelCreator.format_dataset_name("my-model")
assert re.match(r"my-model-(\w|-)+", formatted)
assert len(formatted) <= 255
# returns a different same for same input on next call
formatted_2 = ModelCreator.format_dataset_name("my-model")
assert formatted != formatted_2
assert len(formatted_2) <= 255
def test_format_dataset_name_excessive_length_is_truncated(self):
input_str = "a" * 300
formatted = ModelCreator.format_dataset_name(input_str)
assert len(formatted) == 255
uuid_len = 37
# First part is still all a's
assert formatted[:-uuid_len] == input_str[0 : 255 - uuid_len]
def test_create_model(self, csv_data_stream, create_model, model_resource):
# inputs
# model_name: str,
model_template_id = "d7810207-ca31-4d4d-9b5a-841a644fd81f"
dataset_schema = {
"features": [
{"label": "manufacturer", "type": "CATEGORY"},
{"label": "description", "type": "TEXT"},
],
"labels": [
{"label": "category", "type": "CATEGORY"},
{"label": "subcategory", "type": "CATEGORY"},
],
"name": "test",
}
new_dataset_schema_id = "3689fc17-5394-46ba-8757-39a36b570e6e"
dataset_schema_created = dict(dataset_schema.items())
dataset_schema_created["id"] = new_dataset_schema_id
dataset_schema_created["createdAt"] = a_timestamp.isoformat()
model_name = "my-model"
dataset_name = model_name + "-123"
new_dataset_id = "915f16d7-48b0-438b-aca8-048f855ac627"
dataset_created = {
"createdAt": a_timestamp.isoformat(),
"id": new_dataset_id,
"name": dataset_name,
"status": "SUCCEEDED",
"validationMessage": "",
"datasetSchemaId": new_dataset_schema_id,
}
create_model.format_dataset_name = Mock(return_value=dataset_name)
create_model.data_manager_client.create_dataset_schema.return_value = (
dataset_schema_created
)
create_model.data_manager_client.create_dataset.return_value = dataset_created
dm = create_model.data_manager_client
mm = create_model.model_manager_client
mm.read_model_by_name.side_effect = [
DARHTTPException(url="https://abcd/", response=Mock(status_code=404)),
model_resource,
]
# act
result = create_model.create(
data_stream=csv_data_stream,
model_template_id=model_template_id,
dataset_schema=dataset_schema,
model_name=model_name,
)
assert result == model_resource
# Expected calls
expected_create_dataset_schema = call(dataset_schema)
assert dm.create_dataset_schema.call_args_list == [
expected_create_dataset_schema
]
expected_dataset_name = dataset_name
expected_create_dataset = call(
dataset_name=expected_dataset_name,
dataset_schema_id=dataset_schema_created["id"],
)
assert dm.create_dataset.call_args_list == [expected_create_dataset]
expected_call_to_upload_and_validate = call(
dataset_id=dataset_created["id"], data_stream=csv_data_stream
)
assert dm.upload_data_and_validate.call_args_list == [
expected_call_to_upload_and_validate
]
expected_call_to_create_job_and_wait = call(
model_name=model_name,
dataset_id=new_dataset_id,
model_template_id=model_template_id,
)
assert mm.create_job_and_wait.call_args_list == [
expected_call_to_create_job_and_wait
]
expected_call_to_read_model_by_name = call(model_name=model_name)
assert mm.read_model_by_name.call_args_list == [
expected_call_to_read_model_by_name,
expected_call_to_read_model_by_name,
]
def test_create_model_checks_for_existing_model(self, create_model, model_resource):
"""
If the model already exists, this should be an error.
"""
model_name = "my-model"
create_model.model_manager_client.read_model_by_name.return_value = (
model_resource
)
with pytest.raises(ModelAlreadyExists) as context:
create_model.create(
data_stream=Mock(),
model_template_id=Mock(),
dataset_schema=Mock(),
model_name=model_name,
)
assert "Model 'my-model' already exists" in str(context.value)
assert create_model.model_manager_client.read_model_by_name.call_args_list == [
call(model_name=model_name)
]
def test_create_model_forwards_exception(self, create_model, model_resource):
"""
If ModelManagerClient.read_model_by_name raises a 404 in the initial check,
this means that the model is not there and execution and proceed. This is
tested in test_create_model above.
For all other status code, the exception should be re-raised as is.
This is tested here.
"""
model_name = "my-model"
exc = DARHTTPException(url="https://abcd/", response=Mock(status_code=429))
create_model.model_manager_client.read_model_by_name.side_effect = exc
with pytest.raises(DARHTTPException) as context:
create_model.create(
data_stream=Mock(),
model_template_id=Mock(),
dataset_schema=Mock(),
model_name=model_name,
)
assert context.value == exc
assert create_model.model_manager_client.read_model_by_name.call_args_list == [
call(model_name=model_name)
]
|
the-stack_0_1107 | from os import getenv
from dotenv import load_dotenv
load_dotenv()
UNSPLASH_ACCESS_KEY = getenv('UNSPLASH_ACCESS_KEY')
FLICKR_KEY = getenv('FLICKR_KEY')
FLICKR_SECRET = getenv('FLICKR_SECRET')
ALBUM_FONTS = [
'Comforter Brush',
'Praise',
'Dancing Script',
'Estonia',
]
ARTIST_FONTS = [
'Bebas Neue',
'Road Rage',
'Comfortaa',
'Lobster',
'Patua One',
] |
the-stack_0_1109 | import asyncio
import sys
import time
from datetime import datetime
from decimal import Decimal
from typing import Callable, List, Optional, Tuple, Dict
import aiohttp
from peas.cmds.units import units
from peas.rpc.wallet_rpc_client import WalletRpcClient
from peas.server.start_wallet import SERVICE_NAME
from peas.util.bech32m import encode_puzzle_hash
from peas.util.byte_types import hexstr_to_bytes
from peas.util.config import load_config
from peas.util.default_root import DEFAULT_ROOT_PATH
from peas.util.ints import uint16, uint64
from peas.wallet.transaction_record import TransactionRecord
from peas.wallet.util.wallet_types import WalletType
def print_transaction(tx: TransactionRecord, verbose: bool, name) -> None:
if verbose:
print(tx)
else:
peas_amount = Decimal(int(tx.amount)) / units["peas"]
to_address = encode_puzzle_hash(tx.to_puzzle_hash, name)
print(f"Transaction {tx.name}")
print(f"Status: {'Confirmed' if tx.confirmed else ('In mempool' if tx.is_in_mempool() else 'Pending')}")
print(f"Amount: {peas_amount} {name}")
print(f"To address: {to_address}")
print("Created at:", datetime.fromtimestamp(tx.created_at_time).strftime("%Y-%m-%d %H:%M:%S"))
print("")
async def get_transaction(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None:
wallet_id = args["id"]
transaction_id = hexstr_to_bytes(args["tx_id"])
config = load_config(DEFAULT_ROOT_PATH, "config.yaml", SERVICE_NAME)
name = config["network_overrides"]["config"][config["selected_network"]]["address_prefix"]
tx: TransactionRecord = await wallet_client.get_transaction(wallet_id, transaction_id=transaction_id)
print_transaction(tx, verbose=(args["verbose"] > 0), name=name)
async def get_transactions(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None:
wallet_id = args["id"]
txs: List[TransactionRecord] = await wallet_client.get_transactions(wallet_id)
config = load_config(DEFAULT_ROOT_PATH, "config.yaml", SERVICE_NAME)
name = config["network_overrides"]["config"][config["selected_network"]]["address_prefix"]
if len(txs) == 0:
print("There are no transactions to this address")
offset = args["offset"]
num_per_screen = 5
for i in range(offset, len(txs), num_per_screen):
for j in range(0, num_per_screen):
if i + j >= len(txs):
break
print_transaction(txs[i + j], verbose=(args["verbose"] > 0), name=name)
if i + num_per_screen >= len(txs):
return None
print("Press q to quit, or c to continue")
while True:
entered_key = sys.stdin.read(1)
if entered_key == "q":
return None
elif entered_key == "c":
break
def check_unusual_transaction(amount: Decimal, fee: Decimal):
return fee >= amount
async def send(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None:
wallet_id = args["id"]
amount = Decimal(args["amount"])
fee = Decimal(args["fee"])
address = args["address"]
override = args["override"]
if not override and check_unusual_transaction(amount, fee):
print(
f"A transaction of amount {amount} and fee {fee} is unusual.\n"
f"Pass in --override if you are sure you mean to do this."
)
return
print("Submitting transaction...")
final_amount = uint64(int(amount * units["peas"]))
final_fee = uint64(int(fee * units["peas"]))
res = await wallet_client.send_transaction(wallet_id, final_amount, address, final_fee)
tx_id = res.name
start = time.time()
while time.time() - start < 10:
await asyncio.sleep(0.1)
tx = await wallet_client.get_transaction(wallet_id, tx_id)
if len(tx.sent_to) > 0:
print(f"Transaction submitted to nodes: {tx.sent_to}")
print(f"Do peas wallet get_transaction -f {fingerprint} -tx 0x{tx_id} to get status")
return None
print("Transaction not yet submitted to nodes")
print(f"Do 'peas wallet get_transaction -f {fingerprint} -tx 0x{tx_id}' to get status")
async def get_address(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None:
wallet_id = args["id"]
res = await wallet_client.get_next_address(wallet_id, False)
print(res)
async def delete_unconfirmed_transactions(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None:
wallet_id = args["id"]
await wallet_client.delete_unconfirmed_transactions(wallet_id)
print(f"Successfully deleted all unconfirmed transactions for wallet id {wallet_id} on key {fingerprint}")
def wallet_coin_unit(typ: WalletType, address_prefix: str) -> Tuple[str, int]:
if typ == WalletType.COLOURED_COIN:
return "", units["colouredcoin"]
if typ in [WalletType.STANDARD_WALLET, WalletType.POOLING_WALLET, WalletType.MULTI_SIG, WalletType.RATE_LIMITED]:
return address_prefix, units["peas"]
return "", units["mojo"]
def print_balance(amount: int, scale: int, address_prefix: str) -> str:
ret = f"{amount/scale} {address_prefix} "
if scale > 1:
ret += f"({amount} mojo)"
return ret
async def print_balances(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None:
summaries_response = await wallet_client.get_wallets()
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
address_prefix = config["network_overrides"]["config"][config["selected_network"]]["address_prefix"]
print(f"Wallet height: {await wallet_client.get_height_info()}")
print(f"Sync status: {'Synced' if (await wallet_client.get_synced()) else 'Not synced'}")
print(f"Balances, fingerprint: {fingerprint}")
for summary in summaries_response:
wallet_id = summary["id"]
balances = await wallet_client.get_wallet_balance(wallet_id)
typ = WalletType(int(summary["type"]))
address_prefix, scale = wallet_coin_unit(typ, address_prefix)
print(f"Wallet ID {wallet_id} type {typ.name} {summary['name']}")
print(f" -Total Balance: {print_balance(balances['confirmed_wallet_balance'], scale, address_prefix)}")
print(
f" -Pending Total Balance: {print_balance(balances['unconfirmed_wallet_balance'], scale, address_prefix)}"
)
print(f" -Spendable: {print_balance(balances['spendable_balance'], scale, address_prefix)}")
async def get_wallet(wallet_client: WalletRpcClient, fingerprint: int = None) -> Optional[Tuple[WalletRpcClient, int]]:
if fingerprint is not None:
fingerprints = [fingerprint]
else:
fingerprints = await wallet_client.get_public_keys()
if len(fingerprints) == 0:
print("No keys loaded. Run 'peas keys generate' or import a key")
return None
if len(fingerprints) == 1:
fingerprint = fingerprints[0]
if fingerprint is not None:
log_in_response = await wallet_client.log_in(fingerprint)
else:
print("Choose wallet key:")
for i, fp in enumerate(fingerprints):
print(f"{i+1}) {fp}")
val = None
while val is None:
val = input("Enter a number to pick or q to quit: ")
if val == "q":
return None
if not val.isdigit():
val = None
else:
index = int(val) - 1
if index >= len(fingerprints):
print("Invalid value")
val = None
continue
else:
fingerprint = fingerprints[index]
assert fingerprint is not None
log_in_response = await wallet_client.log_in(fingerprint)
if log_in_response["success"] is False:
if log_in_response["error"] == "not_initialized":
use_cloud = True
if "backup_path" in log_in_response:
path = log_in_response["backup_path"]
print(f"Backup file from backup.peas.net downloaded and written to: {path}")
val = input("Do you want to use this file to restore from backup? (Y/N) ")
if val.lower() == "y":
log_in_response = await wallet_client.log_in_and_restore(fingerprint, path)
else:
use_cloud = False
if "backup_path" not in log_in_response or use_cloud is False:
if use_cloud is True:
val = input(
"No online backup file found,\n Press S to skip restore from backup"
"\n Press F to use your own backup file: "
)
else:
val = input(
"Cloud backup declined,\n Press S to skip restore from backup"
"\n Press F to use your own backup file: "
)
if val.lower() == "s":
log_in_response = await wallet_client.log_in_and_skip(fingerprint)
elif val.lower() == "f":
val = input("Please provide the full path to your backup file: ")
log_in_response = await wallet_client.log_in_and_restore(fingerprint, val)
if "success" not in log_in_response or log_in_response["success"] is False:
if "error" in log_in_response:
error = log_in_response["error"]
print(f"Error: {log_in_response[error]}")
return None
return wallet_client, fingerprint
async def execute_with_wallet(
wallet_rpc_port: Optional[int], fingerprint: int, extra_params: Dict, function: Callable
) -> None:
try:
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
if wallet_rpc_port is None:
wallet_rpc_port = config["wallet"]["rpc_port"]
wallet_client = await WalletRpcClient.create(self_hostname, uint16(wallet_rpc_port), DEFAULT_ROOT_PATH, config)
wallet_client_f = await get_wallet(wallet_client, fingerprint=fingerprint)
if wallet_client_f is None:
wallet_client.close()
await wallet_client.await_closed()
return None
wallet_client, fingerprint = wallet_client_f
await function(extra_params, wallet_client, fingerprint)
except KeyboardInterrupt:
pass
except Exception as e:
if isinstance(e, aiohttp.ClientConnectorError):
print(
f"Connection error. Check if the wallet is running at {wallet_rpc_port}. "
"You can run the wallet via:\n\tchia start wallet"
)
else:
print(f"Exception from 'wallet' {e}")
wallet_client.close()
await wallet_client.await_closed()
|
the-stack_0_1110 | __author__ = 'marble_xu'
import os
import json
from abc import abstractmethod
import pygame as pg
from . import constants as c
class State():
def __init__(self):
self.start_time = 0.0
self.current_time = 0.0
self.done = False
self.next = None
self.persist = {}
@abstractmethod
def startup(self, current_time, persist):
'''abstract method'''
def cleanup(self):
self.done = False
return self.persist
@abstractmethod
def update(self, surface, keys, current_time):
'''abstract method'''
class Control():
def __init__(self):
self.screen = pg.display.get_surface()
self.done = False
self.clock = pg.time.Clock()
self.fps = 60
self.keys = pg.key.get_pressed()
self.mouse_pos = None
self.mouse_click = [False, False] # value:[left mouse click, right mouse click]
self.current_time = 0.0
self.state_dict = {}
self.state_name = None
self.state = None
self.game_info = {c.CURRENT_TIME:0.0,
c.LEVEL_NUM:c.START_LEVEL_NUM}
def setup_states(self, state_dict, start_state):
self.state_dict = state_dict
self.state_name = start_state
self.state = self.state_dict[self.state_name]
self.state.startup(self.current_time, self.game_info)
def update(self):
self.current_time = pg.time.get_ticks()
if self.state.done:
self.flip_state()
self.state.update(self.screen, self.current_time, self.mouse_pos, self.mouse_click)
self.mouse_pos = None
self.mouse_click[0] = False
self.mouse_click[1] = False
def flip_state(self):
previous, self.state_name = self.state_name, self.state.next
persist = self.state.cleanup()
self.state = self.state_dict[self.state_name]
self.state.startup(self.current_time, persist)
def event_loop(self):
for event in pg.event.get():
if event.type == pg.QUIT:
self.done = True
elif event.type == pg.KEYDOWN:
self.keys = pg.key.get_pressed()
elif event.type == pg.KEYUP:
self.keys = pg.key.get_pressed()
elif event.type == pg.MOUSEBUTTONDOWN:
self.mouse_pos = pg.mouse.get_pos()
self.mouse_click[0], _, self.mouse_click[1] = pg.mouse.get_pressed()
print('pos:', self.mouse_pos, ' mouse:', self.mouse_click)
def main(self):
while not self.done:
self.event_loop()
self.update()
pg.display.update()
self.clock.tick(self.fps)
print('game over')
def get_image(sheet, x, y, width, height, colorkey=c.BLACK, scale=1):
image = pg.Surface([width, height])
rect = image.get_rect()
image.blit(sheet, (0, 0), (x, y, width, height))
image.set_colorkey(colorkey)
image = pg.transform.scale(image,
(int(rect.width*scale),
int(rect.height*scale)))
return image
def load_image_frames(directory, image_name, colorkey, accept):
frame_list = []
tmp = {}
# image_name is "Peashooter", pic name is 'Peashooter_1', get the index 1
index_start = len(image_name) + 1
frame_num = 0;
for pic in os.listdir(directory):
name, ext = os.path.splitext(pic)
if ext.lower() in accept:
index = int(name[index_start:])
img = pg.image.load(os.path.join(directory, pic))
if img.get_alpha():
img = img.convert_alpha()
else:
img = img.convert()
img.set_colorkey(colorkey)
tmp[index]= img
frame_num += 1
for i in range(frame_num):
frame_list.append(tmp[i])
return frame_list
def load_all_gfx(directory, colorkey=c.WHITE, accept=('.png', '.jpg', '.bmp', '.gif')):
graphics = {}
for name1 in os.listdir(directory):
# subfolders under the folder resources\graphics
dir1 = os.path.join(directory, name1)
if os.path.isdir(dir1):
for name2 in os.listdir(dir1):
dir2 = os.path.join(dir1, name2)
if os.path.isdir(dir2):
# e.g. subfolders under the folder resources\graphics\Zombies
for name3 in os.listdir(dir2):
dir3 = os.path.join(dir2, name3)
# e.g. subfolders or pics under the folder resources\graphics\Zombies\ConeheadZombie
if os.path.isdir(dir3):
# e.g. it's the folder resources\graphics\Zombies\ConeheadZombie\ConeheadZombieAttack
image_name, _ = os.path.splitext(name3)
graphics[image_name] = load_image_frames(dir3, image_name, colorkey, accept)
else:
# e.g. pics under the folder resources\graphics\Plants\Peashooter
image_name, _ = os.path.splitext(name2)
graphics[image_name] = load_image_frames(dir2, image_name, colorkey, accept)
break
else:
# e.g. pics under the folder resources\graphics\Screen
name, ext = os.path.splitext(name2)
if ext.lower() in accept:
img = pg.image.load(dir2)
if img.get_alpha():
img = img.convert_alpha()
else:
img = img.convert()
img.set_colorkey(colorkey)
graphics[name] = img
return graphics
def loadZombieImageRect():
file_path = os.path.join('source', 'data', 'entity', 'zombie.json')
f = open(file_path)
data = json.load(f)
f.close()
return data[c.ZOMBIE_IMAGE_RECT]
def loadPlantImageRect():
file_path = os.path.join('source', 'data', 'entity', 'plant.json')
f = open(file_path)
data = json.load(f)
f.close()
return data[c.PLANT_IMAGE_RECT]
pg.init()
pg.display.set_caption(c.ORIGINAL_CAPTION)
SCREEN = pg.display.set_mode(c.SCREEN_SIZE)
GFX = load_all_gfx(os.path.join("resources","graphics"))
ZOMBIE_RECT = loadZombieImageRect()
PLANT_RECT = loadPlantImageRect()
|
the-stack_0_1111 | from datetime import date
from silverstrike.models import Account, Split, Transaction
def create_transaction(title, src, dst, amount, type, date=date.today(), category=None):
t = Transaction.objects.create(title=title, date=date, transaction_type=type,
src=src, dst=dst, amount=amount)
Split.objects.bulk_create([
Split(title=title, account=src, opposing_account=dst,
amount=-amount, transaction=t, date=date, category=category),
Split(title=title, account=dst, opposing_account=src,
amount=amount, transaction=t, date=date, category=category)])
return t
def create_account(name, account_type=Account.AccountType.PERSONAL):
return Account.objects.create(name=name, account_type=account_type)
|
the-stack_0_1113 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example Airflow DAG that show how to use various Dataproc
operators to manage a cluster and submit jobs.
"""
import os
from datetime import datetime
from airflow import models
from airflow.providers.google.cloud.operators.dataproc import (
DataprocCreateClusterOperator,
DataprocCreateWorkflowTemplateOperator,
DataprocDeleteClusterOperator,
DataprocInstantiateWorkflowTemplateOperator,
DataprocSubmitJobOperator,
DataprocUpdateClusterOperator,
)
from airflow.providers.google.cloud.sensors.dataproc import DataprocJobSensor
PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "an-id")
CLUSTER_NAME = os.environ.get("GCP_DATAPROC_CLUSTER_NAME", "example-cluster")
REGION = os.environ.get("GCP_LOCATION", "europe-west1")
ZONE = os.environ.get("GCP_REGION", "europe-west1-b")
BUCKET = os.environ.get("GCP_DATAPROC_BUCKET", "dataproc-system-tests")
OUTPUT_FOLDER = "wordcount"
OUTPUT_PATH = f"gs://{BUCKET}/{OUTPUT_FOLDER}/"
PYSPARK_MAIN = os.environ.get("PYSPARK_MAIN", "hello_world.py")
PYSPARK_URI = f"gs://{BUCKET}/{PYSPARK_MAIN}"
SPARKR_MAIN = os.environ.get("SPARKR_MAIN", "hello_world.R")
SPARKR_URI = f"gs://{BUCKET}/{SPARKR_MAIN}"
# Cluster definition
# [START how_to_cloud_dataproc_create_cluster]
CLUSTER_CONFIG = {
"master_config": {
"num_instances": 1,
"machine_type_uri": "n1-standard-4",
"disk_config": {"boot_disk_type": "pd-standard", "boot_disk_size_gb": 1024},
},
"worker_config": {
"num_instances": 2,
"machine_type_uri": "n1-standard-4",
"disk_config": {"boot_disk_type": "pd-standard", "boot_disk_size_gb": 1024},
},
}
# [END how_to_cloud_dataproc_create_cluster]
# Update options
# [START how_to_cloud_dataproc_updatemask_cluster_operator]
CLUSTER_UPDATE = {
"config": {"worker_config": {"num_instances": 3}, "secondary_worker_config": {"num_instances": 3}}
}
UPDATE_MASK = {
"paths": ["config.worker_config.num_instances", "config.secondary_worker_config.num_instances"]
}
# [END how_to_cloud_dataproc_updatemask_cluster_operator]
TIMEOUT = {"seconds": 1 * 24 * 60 * 60}
# Jobs definitions
# [START how_to_cloud_dataproc_pig_config]
PIG_JOB = {
"reference": {"project_id": PROJECT_ID},
"placement": {"cluster_name": CLUSTER_NAME},
"pig_job": {"query_list": {"queries": ["define sin HiveUDF('sin');"]}},
}
# [END how_to_cloud_dataproc_pig_config]
# [START how_to_cloud_dataproc_sparksql_config]
SPARK_SQL_JOB = {
"reference": {"project_id": PROJECT_ID},
"placement": {"cluster_name": CLUSTER_NAME},
"spark_sql_job": {"query_list": {"queries": ["SHOW DATABASES;"]}},
}
# [END how_to_cloud_dataproc_sparksql_config]
# [START how_to_cloud_dataproc_spark_config]
SPARK_JOB = {
"reference": {"project_id": PROJECT_ID},
"placement": {"cluster_name": CLUSTER_NAME},
"spark_job": {
"jar_file_uris": ["file:///usr/lib/spark/examples/jars/spark-examples.jar"],
"main_class": "org.apache.spark.examples.SparkPi",
},
}
# [END how_to_cloud_dataproc_spark_config]
# [START how_to_cloud_dataproc_pyspark_config]
PYSPARK_JOB = {
"reference": {"project_id": PROJECT_ID},
"placement": {"cluster_name": CLUSTER_NAME},
"pyspark_job": {"main_python_file_uri": PYSPARK_URI},
}
# [END how_to_cloud_dataproc_pyspark_config]
# [START how_to_cloud_dataproc_sparkr_config]
SPARKR_JOB = {
"reference": {"project_id": PROJECT_ID},
"placement": {"cluster_name": CLUSTER_NAME},
"spark_r_job": {"main_r_file_uri": SPARKR_URI},
}
# [END how_to_cloud_dataproc_sparkr_config]
# [START how_to_cloud_dataproc_hive_config]
HIVE_JOB = {
"reference": {"project_id": PROJECT_ID},
"placement": {"cluster_name": CLUSTER_NAME},
"hive_job": {"query_list": {"queries": ["SHOW DATABASES;"]}},
}
# [END how_to_cloud_dataproc_hive_config]
# [START how_to_cloud_dataproc_hadoop_config]
HADOOP_JOB = {
"reference": {"project_id": PROJECT_ID},
"placement": {"cluster_name": CLUSTER_NAME},
"hadoop_job": {
"main_jar_file_uri": "file:///usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar",
"args": ["wordcount", "gs://pub/shakespeare/rose.txt", OUTPUT_PATH],
},
}
# [END how_to_cloud_dataproc_hadoop_config]
WORKFLOW_NAME = "airflow-dataproc-test"
WORKFLOW_TEMPLATE = {
"id": WORKFLOW_NAME,
"placement": {
"managed_cluster": {
"cluster_name": CLUSTER_NAME,
"config": CLUSTER_CONFIG,
}
},
"jobs": [{"step_id": "pig_job_1", "pig_job": PIG_JOB["pig_job"]}],
}
with models.DAG(
"example_gcp_dataproc",
schedule_interval='@once',
start_date=datetime(2021, 1, 1),
catchup=False,
) as dag:
# [START how_to_cloud_dataproc_create_cluster_operator]
create_cluster = DataprocCreateClusterOperator(
task_id="create_cluster",
project_id=PROJECT_ID,
cluster_config=CLUSTER_CONFIG,
region=REGION,
cluster_name=CLUSTER_NAME,
)
# [END how_to_cloud_dataproc_create_cluster_operator]
# [START how_to_cloud_dataproc_update_cluster_operator]
scale_cluster = DataprocUpdateClusterOperator(
task_id="scale_cluster",
cluster_name=CLUSTER_NAME,
cluster=CLUSTER_UPDATE,
update_mask=UPDATE_MASK,
graceful_decommission_timeout=TIMEOUT,
project_id=PROJECT_ID,
region=REGION,
)
# [END how_to_cloud_dataproc_update_cluster_operator]
# [START how_to_cloud_dataproc_create_workflow_template]
create_workflow_template = DataprocCreateWorkflowTemplateOperator(
task_id="create_workflow_template",
template=WORKFLOW_TEMPLATE,
project_id=PROJECT_ID,
region=REGION,
)
# [END how_to_cloud_dataproc_create_workflow_template]
# [START how_to_cloud_dataproc_trigger_workflow_template]
trigger_workflow = DataprocInstantiateWorkflowTemplateOperator(
task_id="trigger_workflow", region=REGION, project_id=PROJECT_ID, template_id=WORKFLOW_NAME
)
# [END how_to_cloud_dataproc_trigger_workflow_template]
pig_task = DataprocSubmitJobOperator(
task_id="pig_task", job=PIG_JOB, region=REGION, project_id=PROJECT_ID
)
spark_sql_task = DataprocSubmitJobOperator(
task_id="spark_sql_task", job=SPARK_SQL_JOB, region=REGION, project_id=PROJECT_ID
)
spark_task = DataprocSubmitJobOperator(
task_id="spark_task", job=SPARK_JOB, region=REGION, project_id=PROJECT_ID
)
# [START cloud_dataproc_async_submit_sensor]
spark_task_async = DataprocSubmitJobOperator(
task_id="spark_task_async", job=SPARK_JOB, region=REGION, project_id=PROJECT_ID, asynchronous=True
)
spark_task_async_sensor = DataprocJobSensor(
task_id='spark_task_async_sensor_task',
region=REGION,
project_id=PROJECT_ID,
dataproc_job_id=spark_task_async.output,
poke_interval=10,
)
# [END cloud_dataproc_async_submit_sensor]
# [START how_to_cloud_dataproc_submit_job_to_cluster_operator]
pyspark_task = DataprocSubmitJobOperator(
task_id="pyspark_task", job=PYSPARK_JOB, region=REGION, project_id=PROJECT_ID
)
# [END how_to_cloud_dataproc_submit_job_to_cluster_operator]
sparkr_task = DataprocSubmitJobOperator(
task_id="sparkr_task", job=SPARKR_JOB, region=REGION, project_id=PROJECT_ID
)
hive_task = DataprocSubmitJobOperator(
task_id="hive_task", job=HIVE_JOB, region=REGION, project_id=PROJECT_ID
)
hadoop_task = DataprocSubmitJobOperator(
task_id="hadoop_task", job=HADOOP_JOB, region=REGION, project_id=PROJECT_ID
)
# [START how_to_cloud_dataproc_delete_cluster_operator]
delete_cluster = DataprocDeleteClusterOperator(
task_id="delete_cluster", project_id=PROJECT_ID, cluster_name=CLUSTER_NAME, region=REGION
)
# [END how_to_cloud_dataproc_delete_cluster_operator]
create_cluster >> scale_cluster
scale_cluster >> create_workflow_template >> trigger_workflow >> delete_cluster
scale_cluster >> hive_task >> delete_cluster
scale_cluster >> pig_task >> delete_cluster
scale_cluster >> spark_sql_task >> delete_cluster
scale_cluster >> spark_task >> delete_cluster
scale_cluster >> spark_task_async
spark_task_async_sensor >> delete_cluster
scale_cluster >> pyspark_task >> delete_cluster
scale_cluster >> sparkr_task >> delete_cluster
scale_cluster >> hadoop_task >> delete_cluster
# Task dependency created via `XComArgs`:
# spark_task_async >> spark_task_async_sensor
|
the-stack_0_1115 | # -*- coding: utf-8 -*-
'''
Provide external pillar data from RethinkDB
.. versionadded:: 2018.3.0
:depends: rethinkdb (on the salt-master)
salt master rethinkdb configuration
===================================
These variables must be configured in your master configuration file.
* ``rethinkdb.host`` - The RethinkDB server. Defaults to ``'salt'``
* ``rethinkdb.port`` - The port the RethinkDB server listens on.
Defaults to ``'28015'``
* ``rethinkdb.database`` - The database to connect to.
Defaults to ``'salt'``
* ``rethinkdb.username`` - The username for connecting to RethinkDB.
Defaults to ``''``
* ``rethinkdb.password`` - The password for connecting to RethinkDB.
Defaults to ``''``
salt-master ext_pillar configuration
====================================
The ext_pillar function arguments are given in single line dictionary notation.
.. code-block:: yaml
ext_pillar:
- rethinkdb: {table: ext_pillar, id_field: minion_id, field: pillar_root, pillar_key: external_pillar}
In the example above the following happens.
* The salt-master will look for external pillars in the 'ext_pillar' table
on the RethinkDB host
* The minion id will be matched against the 'minion_id' field
* Pillars will be retrieved from the nested field 'pillar_root'
* Found pillars will be merged inside a key called 'external_pillar'
Module Documentation
====================
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libraries
import logging
# Import 3rd party libraries
try:
import rethinkdb
HAS_RETHINKDB = True
except ImportError:
HAS_RETHINKDB = False
__virtualname__ = 'rethinkdb'
__opts__ = {
'rethinkdb.host': 'salt',
'rethinkdb.port': '28015',
'rethinkdb.database': 'salt',
'rethinkdb.username': None,
'rethinkdb.password': None
}
def __virtual__():
if not HAS_RETHINKDB:
return False
return True
# Configure logging
log = logging.getLogger(__name__)
def ext_pillar(minion_id,
pillar,
table='pillar',
id_field=None,
field=None,
pillar_key=None):
'''
Collect minion external pillars from a RethinkDB database
Arguments:
* `table`: The RethinkDB table containing external pillar information.
Defaults to ``'pillar'``
* `id_field`: Field in document containing the minion id.
If blank then we assume the table index matches minion ids
* `field`: Specific field in the document used for pillar data, if blank
then the entire document will be used
* `pillar_key`: The salt-master will nest found external pillars under
this key before merging into the minion pillars. If blank, external
pillars will be merged at top level
'''
host = __opts__['rethinkdb.host']
port = __opts__['rethinkdb.port']
database = __opts__['rethinkdb.database']
username = __opts__['rethinkdb.username']
password = __opts__['rethinkdb.password']
log.debug('Connecting to %s:%s as user \'%s\' for RethinkDB ext_pillar',
host, port, username)
# Connect to the database
conn = rethinkdb.connect(host=host,
port=port,
db=database,
user=username,
password=password)
data = None
try:
if id_field:
log.debug('ext_pillar.rethinkdb: looking up pillar. '
'table: %s, field: %s, minion: %s',
table, id_field, minion_id)
if field:
data = rethinkdb.table(table).filter(
{id_field: minion_id}).pluck(field).run(conn)
else:
data = rethinkdb.table(table).filter(
{id_field: minion_id}).run(conn)
else:
log.debug('ext_pillar.rethinkdb: looking up pillar. '
'table: %s, field: id, minion: %s',
table, minion_id)
if field:
data = rethinkdb.table(table).get(minion_id).pluck(field).run(
conn)
else:
data = rethinkdb.table(table).get(minion_id).run(conn)
finally:
if conn.is_open():
conn.close()
if data.items:
# Return nothing if multiple documents are found for a minion
if len(data.items) > 1:
log.error('ext_pillar.rethinkdb: ambiguous documents found for '
'minion %s', minion_id)
return {}
else:
result = data.items.pop()
if pillar_key:
return {pillar_key: result}
return result
else:
# No document found in the database
log.debug('ext_pillar.rethinkdb: no document found')
return {}
|
the-stack_0_1116 | # Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from oslo_config import cfg
CONF = cfg.CONF
class FilterTests(object):
# Provide support for checking if a batch of list items all
# exist within a contiguous range in a total list
def _match_with_list(self, this_batch, total_list,
batch_size=None,
list_start=None, list_end=None):
if batch_size is None:
batch_size = len(this_batch)
if list_start is None:
list_start = 0
if list_end is None:
list_end = len(total_list)
for batch_item in range(0, batch_size):
found = False
for list_item in range(list_start, list_end):
if this_batch[batch_item]['id'] == total_list[list_item]['id']:
found = True
self.assertTrue(found)
def _create_entity(self, entity_type):
f = getattr(self.identity_api, 'create_%s' % entity_type, None)
if f is None:
f = getattr(self.assignment_api, 'create_%s' % entity_type)
return f
def _delete_entity(self, entity_type):
f = getattr(self.identity_api, 'delete_%s' % entity_type, None)
if f is None:
f = getattr(self.assignment_api, 'delete_%s' % entity_type)
return f
def _list_entities(self, entity_type):
f = getattr(self.identity_api, 'list_%ss' % entity_type, None)
if f is None:
f = getattr(self.assignment_api, 'list_%ss' % entity_type)
return f
def _create_one_entity(self, entity_type, domain_id, name):
new_entity = {'name': name,
'domain_id': domain_id}
if entity_type in ['user', 'group']:
# The manager layer creates the ID for users and groups
new_entity = self._create_entity(entity_type)(new_entity)
else:
new_entity['id'] = '0000' + uuid.uuid4().hex
self._create_entity(entity_type)(new_entity['id'], new_entity)
return new_entity
def _create_test_data(self, entity_type, number, domain_id=None,
name_dict=None):
"""Create entity test data
:param entity_type: type of entity to create, e.g. 'user', group' etc.
:param number: number of entities to create,
:param domain_id: if not defined, all users will be created in the
default domain.
:param name_dict: optional dict containing entity number and name pairs
"""
entity_list = []
if domain_id is None:
domain_id = CONF.identity.default_domain_id
name_dict = name_dict or {}
for x in range(number):
# If this index has a name defined in the name_dict, then use it
name = name_dict.get(x, uuid.uuid4().hex)
new_entity = self._create_one_entity(entity_type, domain_id, name)
entity_list.append(new_entity)
return entity_list
def _delete_test_data(self, entity_type, entity_list):
for entity in entity_list:
self._delete_entity(entity_type)(entity['id'])
|
the-stack_0_1117 | import numpy as np
import pickle
import os
import time
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
from PIL import Image
class ImageWriter(object):
def __init__(self, data_dir, dataset, unnormalizer):
self.data_dir = data_dir
self.dataset = dataset
self.unnormalizer = unnormalizer
self.init_data()
def init_data(self):
if not os.path.exists(self.data_dir):
os.mkdir(self.data_dir)
self.output_dir = os.path.join(self.data_dir, "{}_by_id".format(self.dataset))
print(self.output_dir)
if not os.path.exists(self.output_dir):
os.mkdir(self.output_dir)
def write_partition(self, partition):
to_pil = torchvision.transforms.ToPILImage()
for elem in partition:
img_tensor = elem[0].cpu()
unnormalized = self.unnormalizer(img_tensor)
img = to_pil(unnormalized)
img_id = elem[2]
img_file = os.path.join(self.output_dir, "image-{}.png".format(img_id))
img.save(img_file, 'PNG')
class ProbabilityByImageLogger(object):
def __init__(self, pickle_dir, pickle_prefix, max_num_images=None):
self.pickle_dir = pickle_dir
self.pickle_prefix = pickle_prefix
self.init_data()
self.max_num_images = max_num_images
self.probabilities = {}
self.backward_selects = {}
self.forward_selects = {}
self.losses = {}
def next_epoch(self):
self.write()
def init_data(self):
# Store frequency of each image getting backpropped
data_pickle_dir = os.path.join(self.pickle_dir, "probabilities_by_image")
self.probabilities_pickle_file = os.path.join(data_pickle_dir,
"{}_probabilities".format(self.pickle_prefix))
self.backward_selects_pickle_file = os.path.join(data_pickle_dir,
"{}_selects".format(self.pickle_prefix))
self.forward_selects_pickle_file = os.path.join(data_pickle_dir,
"{}_forwardselects".format(self.pickle_prefix))
self.losses_pickle_file = os.path.join(data_pickle_dir,
"{}_losses".format(self.pickle_prefix))
# Make images hist pickle path
if not os.path.exists(data_pickle_dir):
os.mkdir(data_pickle_dir)
def update_data(self, image_ids, probabilities, backward_selects, forward_selects, losses):
for image_id, probability in zip(image_ids, probabilities):
if image_id not in self.probabilities.keys():
if self.max_num_images:
if image_id >= self.max_num_images:
continue
self.probabilities[image_id] = []
self.probabilities[image_id].append(probability)
for image_id, is_selected in zip(image_ids, backward_selects):
if image_id not in self.backward_selects.keys():
if self.max_num_images:
if image_id >= self.max_num_images:
continue
self.backward_selects[image_id] = []
self.backward_selects[image_id].append(int(is_selected))
for image_id, is_selected in zip(image_ids, forward_selects):
if image_id not in self.forward_selects.keys():
if self.max_num_images:
if image_id >= self.max_num_images:
continue
self.forward_selects[image_id] = []
self.forward_selects[image_id].append(int(is_selected))
for image_id, loss in zip(image_ids, losses):
if image_id not in self.losses.keys():
if self.max_num_images:
if image_id >= self.max_num_images:
continue
self.losses[image_id] = []
self.losses[image_id].append(loss)
def handle_backward_batch(self, batch):
ids = [em.example.image_id for em in batch]
probabilities = [em.example.get_sp(False) for em in batch]
backward_selects = [em.example.get_select(False) for em in batch]
forward_selects = [em.example.get_select(True) for em in batch]
losses = [em.example.loss for em in batch]
self.update_data(ids, probabilities, backward_selects, forward_selects, losses)
def write(self):
latest_file = "{}.pickle".format(self.probabilities_pickle_file)
with open(latest_file, "wb") as handle:
print(latest_file)
pickle.dump(self.probabilities, handle, protocol=pickle.HIGHEST_PROTOCOL)
latest_file = "{}.pickle".format(self.backward_selects_pickle_file)
with open(latest_file, "wb") as handle:
print(latest_file)
pickle.dump(self.backward_selects, handle, protocol=pickle.HIGHEST_PROTOCOL)
latest_file = "{}.pickle".format(self.forward_selects_pickle_file)
with open(latest_file, "wb") as handle:
print(latest_file)
pickle.dump(self.forward_selects, handle, protocol=pickle.HIGHEST_PROTOCOL)
class ImageIdHistLogger(object):
def __init__(self, pickle_dir, pickle_prefix, num_images, log_interval):
self.current_epoch = 0
self.pickle_dir = pickle_dir
self.pickle_prefix = pickle_prefix
self.log_interval = log_interval
self.init_data(num_images)
def next_epoch(self):
self.write()
self.current_epoch += 1
def init_data(self, num_images):
# Store frequency of each image getting backpropped
keys = range(num_images)
self.data = dict(zip(keys, [0] * len(keys)))
data_pickle_dir = os.path.join(self.pickle_dir, "image_id_hist")
self.data_pickle_file = os.path.join(data_pickle_dir,
"{}_images_hist".format(self.pickle_prefix))
# Make images hist pickle path
if not os.path.exists(data_pickle_dir):
os.mkdir(data_pickle_dir)
def update_data(self, image_ids):
for chosen_id in image_ids:
self.data[chosen_id] += 1
def handle_backward_batch(self, batch):
ids = [em.example.image_id.item() for em in batch if em.example.select]
self.update_data(ids)
def write(self):
latest_file = "{}.pickle".format(self.data_pickle_file)
with open(latest_file, "wb") as handle:
print(latest_file)
pickle.dump(self.data, handle, protocol=pickle.HIGHEST_PROTOCOL)
epoch_file = "{}.epoch_{}.pickle".format(self.data_pickle_file,
self.current_epoch)
if self.current_epoch % self.log_interval == 0:
with open(epoch_file, "wb") as handle:
print(epoch_file)
pickle.dump(self.data, handle, protocol=pickle.HIGHEST_PROTOCOL)
class LossesByEpochLogger(object):
def __init__(self, pickle_dir, pickle_prefix, log_frequency):
self.current_epoch = 0
self.pickle_dir = pickle_dir
self.log_frequency = log_frequency
self.pickle_prefix = pickle_prefix
self.init_data()
def next_epoch(self):
self.write()
self.current_epoch += 1
self.data = []
def init_data(self):
# Store frequency of each image getting backpropped
self.data = []
data_pickle_dir = os.path.join(self.pickle_dir, "losses")
self.data_pickle_file = os.path.join(data_pickle_dir,
"{}_losses".format(self.pickle_prefix))
# Make images hist pickle path
if not os.path.exists(data_pickle_dir):
os.mkdir(data_pickle_dir)
def update_data(self, losses):
self.data += losses
def handle_backward_batch(self, batch):
losses = [em.example.loss.item() for em in batch]
self.update_data(losses)
def write(self):
epoch_file = "{}.epoch_{}.pickle".format(self.data_pickle_file,
self.current_epoch)
if self.current_epoch % self.log_frequency == 0:
with open(epoch_file, "wb") as handle:
print(epoch_file)
pickle.dump(self.data, handle, protocol=pickle.HIGHEST_PROTOCOL)
class LossesByImageLogger(object):
def __init__(self, pickle_dir, pickle_prefix, max_num_images=None):
self.pickle_dir = pickle_dir
self.pickle_prefix = pickle_prefix
self.init_data()
self.max_num_images = max_num_images
self.data = {}
def next_epoch(self):
self.write()
def init_data(self):
# Store frequency of each image getting backpropped
data_pickle_dir = os.path.join(self.pickle_dir, "losses_by_image")
self.data_pickle_file = os.path.join(data_pickle_dir,
"{}_losses".format(self.pickle_prefix))
# Make images hist pickle path
if not os.path.exists(data_pickle_dir):
os.mkdir(data_pickle_dir)
def update_data(self, image_ids, losses):
for image_id, loss in zip(image_ids, losses):
if image_id not in self.data.keys():
if self.max_num_images:
if image_id >= self.max_num_images:
continue
self.data[image_id] = []
self.data[image_id].append(loss)
def handle_backward_batch(self, batch):
ids = [em.example.image_id for em in batch]
losses = [em.example.loss for em in batch]
self.update_data(ids, losses)
def write(self):
latest_file = "{}.pickle".format(self.data_pickle_file)
with open(latest_file, "wb") as handle:
print(latest_file)
pickle.dump(self.data, handle, protocol=pickle.HIGHEST_PROTOCOL)
class VariancesByImageLogger(object):
def __init__(self, pickle_dir, pickle_prefix, max_num_images=None):
self.pickle_dir = pickle_dir
self.pickle_prefix = pickle_prefix
self.init_data()
self.max_num_images = max_num_images
self.data = {}
def next_epoch(self):
self.write()
def init_data(self):
# Store frequency of each image getting backpropped
data_pickle_dir = os.path.join(self.pickle_dir, "variance_by_image")
self.data_pickle_file = os.path.join(data_pickle_dir,
"{}_variances".format(self.pickle_prefix))
# Make images hist pickle path
if not os.path.exists(data_pickle_dir):
os.mkdir(data_pickle_dir)
def update_data(self, image_ids, losses):
for image_id, loss in zip(image_ids, losses):
if image_id not in self.data.keys():
if self.max_num_images:
if image_id >= self.max_num_images:
continue
self.data[image_id] = []
self.data[image_id].append(loss)
def handle_backward_batch(self, batch):
ids = [em.example.image_id for em in batch]
losses = [em.example.loss for em in batch]
self.update_data(ids, losses)
def write(self):
variance = {}
for image_id in self.data.keys():
variance[image_id] = np.var(self.data[image_id])
latest_file = "{}.pickle".format(self.data_pickle_file)
with open(latest_file, "wb") as handle:
print(latest_file)
pickle.dump(variance, handle, protocol=pickle.HIGHEST_PROTOCOL)
class VariancesByEpochLogger(object):
def __init__(self, pickle_dir, pickle_prefix, log_frequency):
self.current_epoch = 0
self.pickle_dir = pickle_dir
self.log_frequency = log_frequency
self.pickle_prefix = pickle_prefix
self.init_data()
def next_epoch(self):
self.write()
self.current_epoch += 1
self.data = []
def init_data(self):
# Store frequency of each image getting backpropped
self.data = []
data_pickle_dir = os.path.join(self.pickle_dir, "variance_by_epoch")
self.data_pickle_file = os.path.join(data_pickle_dir,
"{}_variances".format(self.pickle_prefix))
# Make images hist pickle path
if not os.path.exists(data_pickle_dir):
os.mkdir(data_pickle_dir)
def update_data(self, variance):
self.data += [variance]
def handle_backward_batch(self, batch):
losses = [em.example.loss.item() for em in batch]
variance = np.var(losses)
self.update_data(variance)
def write(self):
epoch_file = "{}.epoch_{}.pickle".format(self.data_pickle_file,
self.current_epoch)
if self.current_epoch % self.log_frequency == 0:
with open(epoch_file, "wb") as handle:
print(epoch_file)
pickle.dump(self.data, handle, protocol=pickle.HIGHEST_PROTOCOL)
class VariancesByAverageProbabilityByImageLogger(object):
def __init__(self, pickle_dir, pickle_prefix, max_num_images=None):
self.pickle_dir = pickle_dir
self.pickle_prefix = pickle_prefix
self.init_data()
self.max_num_images = max_num_images
self.data = {"losses": {}, "probabilities": {}}
def next_epoch(self):
self.write()
def init_data(self):
# Store frequency of each image getting backpropped
data_pickle_dir = os.path.join(self.pickle_dir, "variance_by_avg_prob")
self.data_pickle_file = os.path.join(data_pickle_dir,
"{}_variances".format(self.pickle_prefix))
# Make images hist pickle path
if not os.path.exists(data_pickle_dir):
os.mkdir(data_pickle_dir)
def update_data(self, image_ids, probabilities, losses):
for image_id, prob, loss in zip(image_ids, probabilities, losses):
if image_id not in self.data["losses"].keys():
if self.max_num_images:
if image_id >= self.max_num_images:
continue
self.data["losses"][image_id] = []
self.data["probabilities"][image_id] = []
self.data["losses"][image_id].append(loss)
self.data["probabilities"][image_id].append(prob)
def handle_backward_batch(self, batch):
ids = [em.example.image_id for em in batch]
losses = [em.example.loss for em in batch]
probabilities = [em.example.select_probability for em in batch]
self.update_data(ids, probabilities, losses)
def write(self):
out = {}
for image_id in self.data["losses"].keys():
var = np.var(self.data["losses"][image_id])
avg_prob = np.average(self.data["probabilities"][image_id])
out[image_id] = (avg_prob, var)
latest_file = "{}.pickle".format(self.data_pickle_file)
with open(latest_file, "wb") as handle:
print(latest_file)
pickle.dump(out, handle, protocol=pickle.HIGHEST_PROTOCOL)
class Logger(object):
def __init__(self, log_interval=1, epoch=0, num_backpropped=0, num_skipped=0, num_skipped_fp=0, num_forwards=0, start_time_seconds=None):
self.current_epoch = epoch
self.current_batch = 0
self.log_interval = log_interval
self.global_num_backpropped = num_backpropped
self.global_num_skipped = num_skipped
self.global_num_skipped_fp = num_skipped_fp
self.global_num_forwards= num_forwards
self.partition_loss = 0
self.partition_backpropped_loss = 0
self.partition_num_backpropped = 0
self.partition_num_skipped = 0
self.partition_num_correct = 0
self.debug = False
if start_time_seconds is None:
self.start_time_seconds = time.time()
else:
self.start_time_seconds = start_time_seconds
def next_epoch(self):
self.current_epoch += 1
@property
def partition_seen(self):
return self.partition_num_backpropped + self.partition_num_skipped
@property
def average_partition_loss(self):
return self.partition_loss / float(self.partition_seen)
@property
def average_partition_backpropped_loss(self):
return self.partition_backpropped_loss / float(self.partition_num_backpropped)
@property
def partition_accuracy(self):
return 100. * self.partition_num_correct / self.partition_seen
@property
def train_debug(self):
return 'train_debug,{},{},{},{},{:.6f},{},{:.6f},{:4f}'.format(
self.current_epoch,
self.global_num_backpropped,
self.global_num_skipped,
self.global_num_skipped_fp,
self.average_partition_backpropped_loss,
self.global_num_forwards,
self.partition_accuracy,
time.time() - self.start_time_seconds)
def next_partition(self):
self.partition_loss = 0
self.partition_backpropped_loss = 0
self.partition_num_backpropped = 0
self.partition_num_skipped = 0
self.partition_num_correct = 0
def handle_forward_batch(self, batch):
# Populate batch_stats
# self.partition_loss += sum([example.loss for em in batch])
num_skipped_fp = sum([int(not em.example.forward_select) for em in batch])
self.global_num_skipped_fp += num_skipped_fp
self.global_num_forwards += sum([int(em.example.forward_select) for em in batch])
def handle_backward_batch(self, batch):
self.current_batch += 1
num_backpropped = sum([int(em.example.select) for em in batch])
num_skipped = sum([int(not em.example.select) for em in batch])
self.global_num_backpropped += num_backpropped
self.global_num_skipped += num_skipped
if self.debug:
self.partition_num_backpropped += num_backpropped
self.partition_num_skipped += num_skipped
self.partition_backpropped_loss += sum([em.example.backpropped_loss
for em in batch
if em.example.backpropped_loss])
chosen = [em for em in batch if em.example.select]
self.partition_num_correct += sum([1 for em in chosen if em.example.correct])
self.write()
def write(self):
if self.current_batch % self.log_interval == 0:
print(self.train_debug)
|
the-stack_0_1118 | from bolinette import types, data
from bolinette.data import ext, mapping
from bolinette.data.defaults.entities import Role
@ext.model("role")
class RoleModel(data.Model[Role]):
id = types.defs.Column(types.db.Integer, primary_key=True)
name = types.defs.Column(
types.db.String, unique=True, nullable=False, entity_key=True
)
def payloads(self):
yield [mapping.Column(self.name, required=True)]
def responses(self):
yield [mapping.Column(self.name)]
yield "complete", [
mapping.Column(self.name),
mapping.List(mapping.Definition("user"), key="users"),
]
|
the-stack_0_1119 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class FetchOptions(Package):
"""Mock package with fetch_options."""
homepage = "http://www.fetch-options-example.com"
url = 'https://example.com/some/tarball-1.0.tar.gz'
fetch_options = {'timeout': 42, 'cookie': 'foobar'}
timeout = {'timeout': 65}
cookie = {'cookie': 'baz'}
version('1.2', 'abc12', fetch_options=cookie)
version('1.1', 'abc11', fetch_options=timeout)
version('1.0', 'abc10')
|
the-stack_0_1121 | #
# Copyright (c) 2018 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import OrderedDict
from functools import partial
import attr
from plugincode.scan import ScanPlugin
from plugincode.scan import scan_impl
from scancode import CommandLineOption
from scancode import MISC_GROUP
from scancode import SCAN_OPTIONS_GROUP
from scancode import SCAN_GROUP
from scancode.api import DEJACODE_LICENSE_URL
def reindex_licenses(ctx, param, value):
if not value or ctx.resilient_parsing:
return
# TODO: check for temp file configuration and use that for the cache!!!
from licensedcode.cache import get_cached_index
import click
click.echo('Checking and rebuilding the license index...')
get_cached_index(check_consistency=True,)
click.echo('Done.')
ctx.exit(0)
@scan_impl
class LicenseScanner(ScanPlugin):
"""
Scan a Resource for licenses.
"""
resource_attributes = OrderedDict([
('licenses', attr.ib(default=attr.Factory(list))),
('license_expressions', attr.ib(default=attr.Factory(list))),
])
sort_order = 2
options = [
CommandLineOption(('-l', '--license'),
is_flag=True,
help='Scan <input> for licenses.',
help_group=SCAN_GROUP,
sort_order=10),
CommandLineOption(('--license-score',),
type=int, default=0, show_default=True,
required_options=['license'],
help='Do not return license matches with a score lower than this score. '
'A number between 0 and 100.',
help_group=SCAN_OPTIONS_GROUP),
CommandLineOption(('--license-text',),
is_flag=True,
required_options=['license'],
help='Include the detected licenses matched text.',
help_group=SCAN_OPTIONS_GROUP),
CommandLineOption(('--license-text-diagnostics',),
is_flag=True,
required_options=['license_text'],
help='In the matched license text, include diagnostic highlights '
'surrounding with square brackets [] words that are not matched.',
help_group=SCAN_OPTIONS_GROUP),
CommandLineOption(('--license-url-template',),
default=DEJACODE_LICENSE_URL, show_default=True,
required_options=['license'],
help='Set the template URL used for the license reference URLs. '
'Curly braces ({}) are replaced by the license key.',
help_group=SCAN_OPTIONS_GROUP),
CommandLineOption(('--license-diag',),
# not yet supported in Click 6.7 but added in CommandLineOption
hidden=True,
is_flag=True,
required_options=['license'],
help='(DEPRECATED: this is always included by default now). '
'Include diagnostic information in license scan results.',
help_group=SCAN_OPTIONS_GROUP),
CommandLineOption(
('--reindex-licenses',),
is_flag=True, is_eager=True,
callback=reindex_licenses,
help='Check the license index cache and reindex if needed and exit.',
help_group=MISC_GROUP)
]
def is_enabled(self, license, **kwargs): # NOQA
return license
def setup(self, **kwargs):
"""
This is a cache warmup such that child process inherit from this.
"""
from licensedcode.cache import get_index
get_index(return_value=False)
def get_scanner(self, license_score=0, license_text=False,
license_text_diagnostics=False,
license_url_template=DEJACODE_LICENSE_URL,
**kwargs):
from scancode.api import get_licenses
return partial(get_licenses,
min_score=license_score,
include_text=license_text,
license_text_diagnostics=license_text_diagnostics,
license_url_template=license_url_template
)
|
the-stack_0_1122 | import os
import sys
# Try to mute and then load TensorFlow and Keras
# Muting seems to not work lately on Linux in any way
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
stdin = sys.stdin
sys.stdin = open(os.devnull, 'w')
stderr = sys.stderr
sys.stderr = open(os.devnull, 'w')
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
from keras.callbacks import Callback
sys.stdin = stdin
sys.stderr = stderr
# Own Tensorboard class giving ability to use single writer across multiple .fit() calls
# Allows us also to easily log additional data
# Dramatically decreases amount of data being saved into Tensorboard logs and write time (as appends to one file)
class TensorBoard(Callback):
# Set initial step and writer (we want one log file for all .fit() calls)
def __init__(self, log_dir):
self.step = 1
self.log_dir = log_dir
self.writer = tf.summary.FileWriter(self.log_dir)
# Saves logs with our step number (otherwise every .fit() will start writing from 0th step)
def on_epoch_end(self, epoch, logs=None):
self.update_stats(self.step, **logs)
# Custom method for saving own (and also internal) metrics (can be called externally)
def update_stats(self, step, **stats):
self._write_logs(stats, step)
# More or less the same writer as in Keras' Tensorboard callback
# Physically writes to the log files
def _write_logs(self, logs, index):
for name, value in logs.items():
if name in ['batch', 'size']:
continue
summary = tf.Summary()
summary_value = summary.value.add()
summary_value.simple_value = value
summary_value.tag = name
self.writer.add_summary(summary, index)
self.writer.flush()
|
the-stack_0_1124 | def longestPalindrome(s: str) -> str:
max_length = 0
start = 0
for i in range(len(s)):
for j in range(i + max_length, len(s)):
length = j - i + 1
if i + length > len(s): break
if length > max_length and isPalin(s, i, j + 1):
start = i
max_length = length
return s[start:start+max_length] if max_length < len(s) else s
def isPalin(s: str, start: int, end: int) -> bool:
for i in range(int((end - start) / 2)):
if s[start + i] != s[end-i-1]:
return False
return True |
the-stack_0_1125 | # Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=unused-import
from unittest import TestCase
class TestImport(TestCase):
def test_import_init(self):
"""
Test that the metrics root module has the right symbols
"""
try:
from opentelemetry.sdk._metrics import ( # noqa: F401
Counter,
Histogram,
Meter,
MeterProvider,
ObservableCounter,
ObservableGauge,
ObservableUpDownCounter,
UpDownCounter,
)
except Exception as error:
self.fail(f"Unexpected error {error} was raised")
def test_import_export(self):
"""
Test that the metrics export module has the right symbols
"""
try:
from opentelemetry.sdk._metrics.export import ( # noqa: F401
AggregationTemporality,
ConsoleMetricExporter,
Gauge,
Histogram,
InMemoryMetricReader,
Metric,
MetricExporter,
MetricExportResult,
MetricReader,
PeriodicExportingMetricReader,
PointT,
Sum,
)
except Exception as error:
self.fail(f"Unexpected error {error} was raised")
def test_import_view(self):
"""
Test that the metrics view module has the right symbols
"""
try:
from opentelemetry.sdk._metrics.view import ( # noqa: F401
Aggregation,
DefaultAggregation,
DropAggregation,
ExplicitBucketHistogramAggregation,
LastValueAggregation,
SumAggregation,
View,
)
except Exception as error:
self.fail(f"Unexpected error {error} was raised")
|
the-stack_0_1128 | from setuptools import setup, find_packages
from os import path
here = path.abspath(path.dirname(__file__))
setup(
name='monitor',
version='0.1.0',
description='Monitor component of BIGSEA Asperathos framework',
url='',
author='Igor Natanael, Roberto Nascimento Jr.',
author_email='',
license='Apache 2.0',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: Apache 2.0',
'Programming Language :: Python :: 2.7',
],
keywords='webservice monitoring monasca asperathos bigsea',
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
install_requires=['flask'],
entry_points={
'console_scripts': [
'monitor=monitor.cli.main:main',
],
},
)
|
the-stack_0_1130 | import unittest
import os, sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../..')))
import autosar
### BEGIN TEST DATA
def apply_test_data(ws):
package=ws.createPackage("DataType", role="DataType")
package.createSubPackage("DataTypeSemantics", role="CompuMethod")
package.createSubPackage("DataTypeUnits", role="Unit")
package.createBooleanDataType('Boolean')
package.createIntegerDataType('SInt8', -128, 127)
package.createIntegerDataType('SInt16', -32768, 32767)
package.createIntegerDataType('SInt32', -2147483648, 2147483647)
package.createIntegerDataType('UInt8', 0, 255)
package.createIntegerDataType('UInt16', 0, 65535)
package.createIntegerDataType('UInt32', 0, 4294967295)
package.createRealDataType('Float', None, None, minValType='INFINITE', maxValType='INFINITE')
package.createRealDataType('Double', None, None, minValType='INFINITE', maxValType='INFINITE', hasNaN=True, encoding='DOUBLE')
package.createIntegerDataType('ButtonStatus_T', valueTable=['ButtonStatus_Released','ButtonStatus_Pressed','ButtonStatus_Error','ButtonStatus_NotAvailable'])
valueTableList = [
'VehicleModeInternal_Off',
'VehicleModeInternal_Accessory',
'VehicleModeInternal_Run',
'VehicleModeInternal_Crank',
'VehicleModeInternal_Spare1',
'VehicleModeInternal_Spare2',
'VehicleModeInternal_Error',
'VehicleModeInternal_NotAvailable'
]
package.createIntegerDataType('VehicleModeInternal_T', valueTable=valueTableList)
package.createIntegerDataType('BspApi_DigitalId_T', 0, 255, offset=0, scaling=1/1, forceFloatScaling=True, unit='Id')
package.createIntegerDataType('BspApi_DigitalState_T', valueTable=['BspApi_DigitalState_Inactive','BspApi_DigitalState_Active','BspApi_DigitalState_Error','BspApi_DigitalState_NotAvailable'])
package=ws.createPackage("Constant", role="Constant")
package.createConstant('ButtonStatus_IV', 'ButtonStatus_T', 3)
package.createConstant('VehicleModeInternal_IV', 'VehicleModeInternal_T', 7)
package=ws.createPackage("PortInterface", role="PortInterface")
package.createSenderReceiverInterface("EcuM_CurrentMode", modeGroups=autosar.ModeGroup("currentMode", "/ModeDclrGroup/EcuM_Mode"), isService=True, adminData={"SDG_GID": "edve:BSWM", "SD": "EcuM"})
package.createSenderReceiverInterface("ButtonStatus_I", autosar.DataElement('ButtonStatus', 'ButtonStatus_T'))
package.createSenderReceiverInterface("VehicleModeInternal_I", autosar.DataElement('VehicleModeInternal', 'VehicleModeInternal_T'))
portInterface=package.createClientServerInterface("BspApi_I", ["GetDiscreteInput", "SetDiscreteOutput"], autosar.ApplicationError("E_NOT_OK", 1), isService=True)
portInterface["GetDiscreteInput"].createInArgument("inputId", "BspApi_DigitalId_T")
portInterface["GetDiscreteInput"].createOutArgument("inputValue", "BspApi_DigitalState_T")
portInterface["SetDiscreteOutput"].createInArgument("outputId", "BspApi_DigitalId_T")
portInterface["SetDiscreteOutput"].createInArgument("outputValue", "BspApi_DigitalState_T")
portInterface["SetDiscreteOutput"].possibleErrors = "E_NOT_OK"
package=ws.createPackage("ModeDclrGroup", role="ModeDclrGroup")
package.createModeDeclarationGroup("EcuM_Mode", ["POST_RUN", "RUN", "SHUTDOWN", "SLEEP", "STARTUP", "WAKE_SLEEP"], "STARTUP", adminData={"SDG_GID": "edve:BSWM", "SD": "EcuM"})
package=ws.createPackage("ComponentType", role="ComponentType")
swc = package.createApplicationSoftwareComponent('SteeringWheelButtonReader')
swc.createProvidePort('SWS_PushButtonStatus_Back', 'ButtonStatus_I', initValueRef='ButtonStatus_IV')
swc.createProvidePort('SWS_PushButtonStatus_Down', 'ButtonStatus_I', initValueRef='ButtonStatus_IV')
swc.createProvidePort('SWS_PushButtonStatus_Enter', 'ButtonStatus_I', initValueRef='ButtonStatus_IV')
swc.createProvidePort('SWS_PushButtonStatus_Home', 'ButtonStatus_I', initValueRef='ButtonStatus_IV')
swc.createProvidePort('SWS_PushButtonStatus_Left', 'ButtonStatus_I', initValueRef='ButtonStatus_IV')
swc.createProvidePort('SWS_PushButtonStatus_Right', 'ButtonStatus_I', initValueRef='ButtonStatus_IV')
swc.createProvidePort('SWS_PushButtonStatus_Up', 'ButtonStatus_I', initValueRef='ButtonStatus_IV')
swc.createRequirePort('EcuM_CurrentMode', 'EcuM_CurrentMode')
swc.createRequirePort('VehicleModeInternal', 'VehicleModeInternal_I', initValueRef='VehicleModeInternal_IV')
swc.createRequirePort('BspApi', 'BspApi_I')
portAccessList = [
'SWS_PushButtonStatus_Back',
'SWS_PushButtonStatus_Down',
'SWS_PushButtonStatus_Enter',
'SWS_PushButtonStatus_Home',
'SWS_PushButtonStatus_Left',
'SWS_PushButtonStatus_Right',
'SWS_PushButtonStatus_Up'
]
swc.behavior.createRunnable('SteeringWheelButtonReader_Init', portAccess=portAccessList)
portAccessList = [
'SWS_PushButtonStatus_Back',
'SWS_PushButtonStatus_Down',
'SWS_PushButtonStatus_Enter',
'SWS_PushButtonStatus_Home',
'SWS_PushButtonStatus_Left',
'SWS_PushButtonStatus_Right',
'SWS_PushButtonStatus_Up'
]
swc.behavior.createRunnable('SteeringWheelButtonReader_Exit', portAccess=portAccessList)
portAccessList = [
'VehicleModeInternal',
'SWS_PushButtonStatus_Back',
'SWS_PushButtonStatus_Down',
'SWS_PushButtonStatus_Enter',
'SWS_PushButtonStatus_Home',
'SWS_PushButtonStatus_Left',
'SWS_PushButtonStatus_Right',
'SWS_PushButtonStatus_Up',
'BspApi/GetDiscreteInput'
]
swc.behavior.createRunnable('SteeringWheelButtonReader_Run', portAccess=portAccessList)
swc.behavior.createTimingEvent('SteeringWheelButtonReader_Run', period=10)
swc.behavior.createModeSwitchEvent('SteeringWheelButtonReader_Init', 'EcuM_CurrentMode/RUN')
### END TEST DATA
class TestPartition(unittest.TestCase):
def test_addComponent(self):
ws = autosar.workspace()
apply_test_data(ws)
partition = autosar.rte.Partition()
partition.addComponent(ws.find('/ComponentType/SteeringWheelButtonReader'))
self.assertEqual(len(partition.components), 1)
def test_unconnected(self):
ws = autosar.workspace()
apply_test_data(ws)
partition = autosar.rte.Partition()
partition.addComponent(ws.find('/ComponentType/SteeringWheelButtonReader'))
self.assertEqual(len(partition.components), 1)
unconnected = list(partition.unconnectedPorts())
self.assertEqual(len(unconnected), 10)
if __name__ == '__main__':
unittest.main() |
the-stack_0_1131 | # API v1
import logging
from django.conf import settings
from django.contrib import auth, messages
from django.contrib.auth.decorators import login_required
from django.shortcuts import get_object_or_404
from django.urls import reverse, reverse_lazy
from django.utils.decorators import method_decorator
from django.views.generic import FormView
from oauth2_provider.contrib.rest_framework import OAuth2Authentication, TokenHasScope
from rest_framework import mixins, status, viewsets
from rest_framework.decorators import action
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from apps.authentication.models import Email
from apps.authentication.models import OnlineUser as User
from apps.inventory.models import Item
from apps.payment.models import PaymentTransaction
from apps.payment.transaction_constants import TransactionSource
from apps.shop.forms import SetRFIDForm
from apps.shop.models import MagicToken, OrderLine
from apps.shop.serializers import (
ItemSerializer,
OrderLineSerializer,
TransactionSerializer,
UserOrderLineSerializer,
UserSerializer,
)
from apps.shop.utils import create_magic_token
class OrderLineViewSet(viewsets.GenericViewSet, mixins.CreateModelMixin):
queryset = OrderLine.objects.all()
serializer_class = OrderLineSerializer
authentication_classes = [OAuth2Authentication]
permission_classes = [TokenHasScope]
required_scopes = ["shop.readwrite"]
@action(detail=False, url_path="userorders")
def user_orders(self, request):
"""
Endpoint for fetching a users orders history
Intended for the nibble kiosk
"""
pk = self.request.query_params.get("pk")
if not pk:
return Response(
"Request must include a 'pk' query parameter where 'pk' is the users user id",
status=status.HTTP_400_BAD_REQUEST,
)
user = get_object_or_404(User, pk=pk)
# Only include the latest purchases
amount = 50
orders = OrderLine.objects.filter(user=user).order_by("-datetime")[:amount]
serializer = UserOrderLineSerializer(orders, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class TransactionViewSet(viewsets.GenericViewSet, mixins.CreateModelMixin):
queryset = PaymentTransaction.objects.all()
serializer_class = TransactionSerializer
authentication_classes = [OAuth2Authentication]
permission_classes = [TokenHasScope]
required_scopes = ["shop.readwrite"]
def perform_create(self, serializer):
"""
Transactions created by this view are strictly allowed to handle cash additions.
"""
serializer.save(source=TransactionSource.CASH)
class UserOrderViewSet(
viewsets.GenericViewSet, mixins.ListModelMixin, mixins.RetrieveModelMixin
):
serializer_class = UserOrderLineSerializer
permission_classes = (IsAuthenticated,)
def get_queryset(self):
return OrderLine.objects.filter(user=self.request.user)
class UserViewSet(
viewsets.GenericViewSet, mixins.RetrieveModelMixin, mixins.ListModelMixin, APIView
):
queryset = User.objects.all()
serializer_class = UserSerializer
authentication_classes = [OAuth2Authentication]
permission_classes = [TokenHasScope]
required_scopes = ["shop.readwrite"]
filterset_fields = ("rfid",)
class InventoryViewSet(
viewsets.GenericViewSet, mixins.RetrieveModelMixin, mixins.ListModelMixin
):
queryset = Item.objects.filter(available=True).order_by("pk")
serializer_class = ItemSerializer
permission_classes = (AllowAny,)
pagination_class = None
class SetRFIDView(APIView):
authentication_classes = [OAuth2Authentication]
permission_classes = [TokenHasScope]
required_scopes = ["shop.readwrite"]
def post(self, request):
username = request.data.get("username", "").lower()
password = request.data.get("password", "")
request_magic_link = request.data.get("magic_link", False)
send_magic_link_email = request.data.get("send_email", True)
if not username:
return Response(
"Missing authentication details", status=status.HTTP_400_BAD_REQUEST
)
if "@" in username:
email = Email.objects.filter(email=username)
if email:
username = email[0].user.username
user = auth.authenticate(username=username, password=password)
rfid = request.data.get("rfid", "")
if not rfid:
return Response(
"Missing RFID from request payload", status=status.HTTP_400_BAD_REQUEST
)
if user and rfid:
if user.rfid == rfid:
return Response("OK", status=status.HTTP_200_OK)
user.rfid = rfid
user.save()
return Response("OK", status=status.HTTP_200_OK)
if not user and username and rfid and request_magic_link:
onlineuser = None
try:
onlineuser = User.objects.get(username=username)
except User.DoesNotExist:
return Response(
"User does not exist", status=status.HTTP_400_BAD_REQUEST
)
magic_token = create_magic_token(
onlineuser, rfid, send_token_by_email=send_magic_link_email
)
data = {
"token": str(magic_token.token),
"url": "{}{}".format(
settings.BASE_URL,
reverse("shop_set_rfid", args=[str(magic_token.token)]),
),
}
return Response(data=data, status=status.HTTP_201_CREATED)
return Response("Invalid user credentials", status=status.HTTP_400_BAD_REQUEST)
@method_decorator(login_required, name="dispatch")
class SetRFIDWebView(FormView):
form_class = SetRFIDForm
template_name = "shop/set_rfid.html"
success_url = reverse_lazy("home")
def get(self, request, token="", *args, **kwargs):
get_object_or_404(MagicToken, token=token)
return super().get(request, token, *args, **kwargs)
def get_context_data(self, **kwargs):
kwargs["current_rfid"] = self.request.user.rfid
kwargs["token"] = self.kwargs.get("token")
return super().get_context_data(**kwargs)
def get_initial(self):
initial = super().get_initial()
initial["rfid"] = MagicToken.objects.get(token=self.kwargs.get("token")).data
return initial
def post(self, request, token="", *args, **kwargs):
logger = logging.getLogger(__name__)
form = self.get_form()
if not form.is_valid():
return self.form_invalid(form)
if not token:
form.add_error("Det finnes ingen token i denne forespørselen.")
return self.form_invalid(form)
magictoken = None
try:
magictoken = MagicToken.objects.get(token=token)
except MagicToken.DoesNotExist:
form.add_error("Tokenet du prøver å bruke eksisterer ikke.")
return self.form_invalid(form)
old_rfid = magictoken.user.rfid
magictoken.user.rfid = magictoken.data
magictoken.user.save()
logger.debug(
'{authed_user} updated RFID for {user} (from "{old}" to "{new}").'.format(
authed_user=self.request.user,
user=magictoken.user,
old=old_rfid,
new=magictoken.data,
)
)
magictoken.delete()
messages.success(request, "Oppdaterte RFID for {}".format(magictoken.user))
return self.form_valid(form)
|
the-stack_0_1133 | # from https://github.com/ajbrock/BigGAN-PyTorch (MIT license)
# some modifications in class Generator and G_D
# new class "Unet_Discriminator" based on original class "Discriminator"
import numpy as np
import math
import functools
import torch
import torch.nn as nn
from torch.nn import init
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter as P
from unetgan import layers
from unetgan import utils
import copy
from matplotlib import pyplot as plt
# Architectures for G
# Attention is passed in in the format '32_64' to mean applying an attention
# block at both resolution 32x32 and 64x64.
def G_arch(ch=64, attention='64', ksize='333333', dilation='111111'):
arch = {}
arch[256] = {'in_channels': [ch * item for item in [16, 16, 8, 8, 4, 2]],
'out_channels': [ch * item for item in [16, 8, 8, 4, 2, 1]],
'upsample': [True] * 6,
'resolution': [8, 16, 32, 64, 128, 256],
'attention': {2 ** i: (2 ** i in [int(item) for item in attention.split('_')])
for i in range(3, 9)}}
arch[128] = {'in_channels': [ch * item for item in [16, 16, 8, 4, 2]],
'out_channels': [ch * item for item in [16, 8, 4, 2, 1]],
'upsample': [True] * 5,
'resolution': [8, 16, 32, 64, 128],
'attention': {2 ** i: (2 ** i in [int(item) for item in attention.split('_')])
for i in range(3, 8)}}
return arch
class Generator(nn.Module):
def __init__(self, G_ch=64, dim_z=128, bottom_width=4, resolution=128,
G_kernel_size=3, G_attn='64', n_classes=1000,
num_G_SVs=1, num_G_SV_itrs=1,
G_shared=True, shared_dim=0, hier=False,
cross_replica=False, mybn=False,
G_activation=nn.ReLU(inplace=False),
G_lr=5e-5, G_B1=0.0, G_B2=0.999, adam_eps=1e-8,
BN_eps=1e-5, SN_eps=1e-12, G_mixed_precision=False, G_fp16=False,
G_init='ortho', skip_init=False, no_optim=False,
G_param='SN', norm_style='bn',
**kwargs):
super(Generator, self).__init__()
# Channel width mulitplier
self.ch = G_ch
# Dimensionality of the latent space
self.dim_z = dim_z
# The initial spatial dimensions
self.bottom_width = bottom_width
# Resolution of the output
self.resolution = resolution
# Kernel size?
self.kernel_size = G_kernel_size
# Attention?
self.attention = G_attn
# number of classes, for use in categorical conditional generation
self.n_classes = n_classes
# Use shared embeddings?
self.G_shared = G_shared
# Dimensionality of the shared embedding? Unused if not using G_shared
self.shared_dim = shared_dim if shared_dim > 0 else dim_z
# Hierarchical latent space?
self.hier = hier
# Cross replica batchnorm?
self.cross_replica = cross_replica
# Use my batchnorm?
self.mybn = mybn
# nonlinearity for residual blocks
self.activation = G_activation
# Initialization style
self.init = G_init
# Parameterization style
self.G_param = G_param
# Normalization style
self.norm_style = norm_style
# Epsilon for BatchNorm?
self.BN_eps = BN_eps
# Epsilon for Spectral Norm?
self.SN_eps = SN_eps
# fp16?
self.fp16 = G_fp16
# Architecture dict
self.arch = G_arch(self.ch, self.attention)[resolution]
self.unconditional = kwargs["unconditional"]
# If using hierarchical latents, adjust z
if self.hier:
# Number of places z slots into
self.num_slots = len(self.arch['in_channels']) + 1
self.z_chunk_size = (self.dim_z // self.num_slots)
if not self.unconditional:
self.dim_z = self.z_chunk_size * self.num_slots
else:
self.num_slots = 1
self.z_chunk_size = 0
# Which convs, batchnorms, and linear layers to use
if self.G_param == 'SN':
self.which_conv = functools.partial(layers.SNConv2d,
kernel_size=3, padding=1,
num_svs=num_G_SVs, num_itrs=num_G_SV_itrs,
eps=self.SN_eps)
self.which_linear = functools.partial(layers.SNLinear,
num_svs=num_G_SVs, num_itrs=num_G_SV_itrs,
eps=self.SN_eps)
else:
self.which_conv = functools.partial(nn.Conv2d, kernel_size=3, padding=1)
self.which_linear = nn.Linear
# We use a non-spectral-normed embedding here regardless;
# For some reason applying SN to G's embedding seems to randomly cripple G
self.which_embedding = nn.Embedding
if self.unconditional:
bn_linear = nn.Linear
input_size = self.dim_z + (self.shared_dim if self.G_shared else 0)
else:
bn_linear = (functools.partial(self.which_linear, bias=False) if self.G_shared
else self.which_embedding)
input_size = (self.shared_dim + self.z_chunk_size if self.G_shared
else self.n_classes)
self.which_bn = functools.partial(layers.ccbn,
which_linear=bn_linear,
cross_replica=self.cross_replica,
mybn=self.mybn,
input_size=input_size,
norm_style=self.norm_style,
eps=self.BN_eps,
self_modulation=self.unconditional)
# Prepare model
# If not using shared embeddings, self.shared is just a passthrough
self.shared = (self.which_embedding(n_classes, self.shared_dim) if G_shared
else layers.identity())
# First linear layer
if self.unconditional:
self.linear = self.which_linear(self.dim_z, self.arch['in_channels'][0] * (self.bottom_width ** 2))
else:
self.linear = self.which_linear(self.dim_z // self.num_slots,
self.arch['in_channels'][0] * (self.bottom_width ** 2))
# self.blocks is a doubly-nested list of modules, the outer loop intended
# to be over blocks at a given resolution (resblocks and/or self-attention)
# while the inner loop is over a given block
self.blocks = []
for index in range(len(self.arch['out_channels'])):
self.blocks += [[layers.GBlock(in_channels=self.arch['in_channels'][index],
out_channels=self.arch['out_channels'][index],
which_conv=self.which_conv,
which_bn=self.which_bn,
activation=self.activation,
upsample=(functools.partial(F.interpolate, scale_factor=2)
if self.arch['upsample'][index] else None))]]
# If attention on this block, attach it to the end
if self.arch['attention'][self.arch['resolution'][index]]:
print('Adding attention layer in G at resolution %d' % self.arch['resolution'][index])
self.blocks[-1] += [layers.Attention(self.arch['out_channels'][index], self.which_conv)]
# Turn self.blocks into a ModuleList so that it's all properly registered.
self.blocks = nn.ModuleList([nn.ModuleList(block) for block in self.blocks])
# output layer: batchnorm-relu-conv.
# Consider using a non-spectral conv here
self.output_layer = nn.Sequential(layers.bn(self.arch['out_channels'][-1],
cross_replica=self.cross_replica,
mybn=self.mybn),
self.activation,
self.which_conv(self.arch['out_channels'][-1], 3))
# Initialize weights. Optionally skip init for testing.
if not skip_init:
self.init_weights()
# Set up optimizer
# If this is an EMA copy, no need for an optim, so just return now
if no_optim:
return
self.lr, self.B1, self.B2, self.adam_eps = G_lr, G_B1, G_B2, adam_eps
if G_mixed_precision:
print('Using fp16 adam in G...')
import utils
self.optim = utils.Adam16(params=self.parameters(), lr=self.lr,
betas=(self.B1, self.B2), weight_decay=0,
eps=self.adam_eps)
else:
self.optim = optim.Adam(params=self.parameters(), lr=self.lr,
betas=(self.B1, self.B2), weight_decay=0,
eps=self.adam_eps)
# LR scheduling, left here for forward compatibility
# self.lr_sched = {'itr' : 0}# if self.progressive else {}
# self.j = 0
# Initialize
def init_weights(self):
self.param_count = 0
for module in self.modules():
if (isinstance(module, nn.Conv2d)
or isinstance(module, nn.Linear)
or isinstance(module, nn.Embedding)):
if self.init == 'ortho':
init.orthogonal_(module.weight)
elif self.init == 'N02':
init.normal_(module.weight, 0, 0.02)
elif self.init in ['glorot', 'xavier']:
init.xavier_uniform_(module.weight)
else:
print('Init style not recognized...')
self.param_count += sum([p.data.nelement() for p in module.parameters()])
print('Param count for G''s initialized parameters: %d' % self.param_count)
# Note on this forward function: we pass in a y vector which has
# already been passed through G.shared to enable easy class-wise
# interpolation later. If we passed in the one-hot and then ran it through
# G.shared in this forward function, it would be harder to handle.
def forward(self, z, y):
# If hierarchical, concatenate zs and ys
if self.hier:
# faces
if self.unconditional:
ys = [z for _ in range(self.num_slots)]
else:
zs = torch.split(z, self.z_chunk_size, 1)
z = zs[0]
ys = [torch.cat([y, item], 1) for item in zs[1:]]
else:
if self.unconditional:
ys = [None] * len(self.blocks)
else:
ys = [y] * len(self.blocks)
# First linear layer
h = self.linear(z)
# Reshape
h = h.view(h.size(0), -1, self.bottom_width, self.bottom_width)
# Loop over blocks
for index, blocklist in enumerate(self.blocks):
# Second inner loop in case block has multiple layers
for block in blocklist:
h = block(h, ys[index])
# Apply batchnorm-relu-conv-tanh at output
return torch.tanh(self.output_layer(h))
# Discriminator architecture, same paradigm as G's above
def D_arch(ch=64, attention='64', ksize='333333', dilation='111111'):
arch = {}
arch[256] = {'in_channels': [3] + [ch * item for item in [1, 2, 4, 8, 8, 16]],
'out_channels': [item * ch for item in [1, 2, 4, 8, 8, 16, 16]],
'downsample': [True] * 6 + [False],
'resolution': [128, 64, 32, 16, 8, 4, 4],
'attention': {2 ** i: 2 ** i in [int(item) for item in attention.split('_')]
for i in range(2, 8)}}
arch[128] = {'in_channels': [3] + [ch * item for item in [1, 2, 4, 8, 16]],
'out_channels': [item * ch for item in [1, 2, 4, 8, 16, 16]],
'downsample': [True] * 5 + [False],
'resolution': [64, 32, 16, 8, 4, 4],
'attention': {2 ** i: 2 ** i in [int(item) for item in attention.split('_')]
for i in range(2, 8)}}
return arch
def D_unet_arch(ch=64, attention='64', ksize='333333', dilation='111111', out_channel_multiplier=1):
arch = {}
n = 2
ocm = out_channel_multiplier
# covers bigger perceptual fields
arch[128] = {'in_channels': [3] + [ch * item for item in [1, 2, 4, 8, 16, 8 * n, 4 * 2, 2 * 2, 1 * 2, 1]],
'out_channels': [item * ch for item in [1, 2, 4, 8, 16, 8, 4, 2, 1, 1]],
'downsample': [True] * 5 + [False] * 5,
'upsample': [False] * 5 + [True] * 5,
'resolution': [64, 32, 16, 8, 4, 8, 16, 32, 64, 128],
'attention': {2 ** i: 2 ** i in [int(item) for item in attention.split('_')]
for i in range(2, 11)}}
arch[256] = {'in_channels': [3] + [ch * item for item in [1, 2, 4, 8, 8, 16, 8 * 2, 8 * 2, 4 * 2, 2 * 2, 1 * 2, 1]],
'out_channels': [item * ch for item in [1, 2, 4, 8, 8, 16, 8, 8, 4, 2, 1, 1]],
'downsample': [True] * 6 + [False] * 6,
'upsample': [False] * 6 + [True] * 6,
'resolution': [128, 64, 32, 16, 8, 4, 8, 16, 32, 64, 128, 256],
'attention': {2 ** i: 2 ** i in [int(item) for item in attention.split('_')]
for i in range(2, 13)}}
return arch
class Unet_Discriminator(nn.Module):
def __init__(self, D_ch=64, D_wide=True, resolution=128,
D_kernel_size=3, D_attn='64', n_classes=1000,
num_D_SVs=1, num_D_SV_itrs=1, D_activation=nn.ReLU(inplace=False),
D_lr=2e-4, D_B1=0.0, D_B2=0.999, adam_eps=1e-8,
SN_eps=1e-12, output_dim=1, D_mixed_precision=False, D_fp16=False,
D_init='ortho', skip_init=False, D_param='SN', decoder_skip_connection=True, **kwargs):
super(Unet_Discriminator, self).__init__()
# Width multiplier
self.ch = D_ch
# Use Wide D as in BigGAN and SA-GAN or skinny D as in SN-GAN?
self.D_wide = D_wide
# Resolution
self.resolution = resolution
# Kernel size
self.kernel_size = D_kernel_size
# Attention?
self.attention = D_attn
# Number of classes
self.n_classes = n_classes
# Activation
self.activation = D_activation
# Initialization style
self.init = D_init
# Parameterization style
self.D_param = D_param
# Epsilon for Spectral Norm?
self.SN_eps = SN_eps
# Fp16?
self.fp16 = D_fp16
if self.resolution == 128:
self.save_features = [0, 1, 2, 3, 4]
elif self.resolution == 256:
self.save_features = [0, 1, 2, 3, 4, 5]
self.out_channel_multiplier = 1 # 4
# Architecture
self.arch = D_unet_arch(self.ch, self.attention, out_channel_multiplier=self.out_channel_multiplier)[resolution]
self.unconditional = kwargs["unconditional"]
# Which convs, batchnorms, and linear layers to use
# No option to turn off SN in D right now
if self.D_param == 'SN':
self.which_conv = functools.partial(layers.SNConv2d,
kernel_size=3, padding=1,
num_svs=num_D_SVs, num_itrs=num_D_SV_itrs,
eps=self.SN_eps)
self.which_linear = functools.partial(layers.SNLinear,
num_svs=num_D_SVs, num_itrs=num_D_SV_itrs,
eps=self.SN_eps)
self.which_embedding = functools.partial(layers.SNEmbedding,
num_svs=num_D_SVs, num_itrs=num_D_SV_itrs,
eps=self.SN_eps)
# Prepare model
# self.blocks is a doubly-nested list of modules, the outer loop intended
# to be over blocks at a given resolution (resblocks and/or self-attention)
self.blocks = []
for index in range(len(self.arch['out_channels'])):
if self.arch["downsample"][index]:
self.blocks += [[layers.DBlock(in_channels=self.arch['in_channels'][index],
out_channels=self.arch['out_channels'][index],
which_conv=self.which_conv,
wide=self.D_wide,
activation=self.activation,
preactivation=(index > 0),
downsample=(
nn.AvgPool2d(2) if self.arch['downsample'][index] else None))]]
elif self.arch["upsample"][index]:
upsample_function = (
functools.partial(F.interpolate, scale_factor=2, mode="nearest") # mode=nearest is default
if self.arch['upsample'][index] else None)
self.blocks += [[layers.GBlock2(in_channels=self.arch['in_channels'][index],
out_channels=self.arch['out_channels'][index],
which_conv=self.which_conv,
# which_bn=self.which_bn,
activation=self.activation,
upsample=upsample_function, skip_connection=True)]]
# If attention on this block, attach it to the end
attention_condition = index < 5
if self.arch['attention'][self.arch['resolution'][index]] and attention_condition: # index < 5
print('Adding attention layer in D at resolution %d' % self.arch['resolution'][index])
print("index = ", index)
self.blocks[-1] += [layers.Attention(self.arch['out_channels'][index],
self.which_conv)]
# Turn self.blocks into a ModuleList so that it's all properly registered.
self.blocks = nn.ModuleList([nn.ModuleList(block) for block in self.blocks])
last_layer = nn.Conv2d(self.ch * self.out_channel_multiplier, 1, kernel_size=1)
self.blocks.append(last_layer)
#
# Linear output layer. The output dimension is typically 1, but may be
# larger if we're e.g. turning this into a VAE with an inference output
self.linear = self.which_linear(self.arch['out_channels'][-1], output_dim)
self.linear_middle = self.which_linear(16 * self.ch, output_dim)
# Embedding for projection discrimination
# if not kwargs["agnostic_unet"] and not kwargs["unconditional"]:
# self.embed = self.which_embedding(self.n_classes, self.arch['out_channels'][-1]+extra)
if not kwargs["unconditional"]:
self.embed_middle = self.which_embedding(self.n_classes, 16 * self.ch)
self.embed = self.which_embedding(self.n_classes, self.arch['out_channels'][-1])
# Initialize weights
if not skip_init:
self.init_weights()
###
print("_____params______")
for name, param in self.named_parameters():
print(name, param.size())
# Set up optimizer
self.lr, self.B1, self.B2, self.adam_eps = D_lr, D_B1, D_B2, adam_eps
if D_mixed_precision:
print('Using fp16 adam in D...')
import utils
self.optim = utils.Adam16(params=self.parameters(), lr=self.lr,
betas=(self.B1, self.B2), weight_decay=0, eps=self.adam_eps)
else:
self.optim = optim.Adam(params=self.parameters(), lr=self.lr,
betas=(self.B1, self.B2), weight_decay=0, eps=self.adam_eps)
# LR scheduling, left here for forward compatibility
# self.lr_sched = {'itr' : 0}# if self.progressive else {}
# self.j = 0
# Initialize
def init_weights(self):
self.param_count = 0
for module in self.modules():
if (isinstance(module, nn.Conv2d)
or isinstance(module, nn.Linear)
or isinstance(module, nn.Embedding)):
if self.init == 'ortho':
init.orthogonal_(module.weight)
elif self.init == 'N02':
init.normal_(module.weight, 0, 0.02)
elif self.init in ['glorot', 'xavier']:
init.xavier_uniform_(module.weight)
else:
print('Init style not recognized...')
self.param_count += sum([p.data.nelement() for p in module.parameters()])
print('Param count for D''s initialized parameters: %d' % self.param_count)
def forward(self, x, y=None):
# Stick x into h for cleaner for loops without flow control
h = x
residual_features = []
residual_features.append(x)
# Loop over blocks
for index, blocklist in enumerate(self.blocks[:-1]):
if self.resolution == 128:
if index == 6:
h = torch.cat((h, residual_features[4]), dim=1)
elif index == 7:
h = torch.cat((h, residual_features[3]), dim=1)
elif index == 8: #
h = torch.cat((h, residual_features[2]), dim=1)
elif index == 9: #
h = torch.cat((h, residual_features[1]), dim=1)
if self.resolution == 256:
if index == 7:
h = torch.cat((h, residual_features[5]), dim=1)
elif index == 8:
h = torch.cat((h, residual_features[4]), dim=1)
elif index == 9: #
h = torch.cat((h, residual_features[3]), dim=1)
elif index == 10: #
h = torch.cat((h, residual_features[2]), dim=1)
elif index == 11:
h = torch.cat((h, residual_features[1]), dim=1)
for block in blocklist:
h = block(h)
if index in self.save_features[:-1]:
residual_features.append(h)
if index == self.save_features[-1]:
# Apply global sum pooling as in SN-GAN
h_ = torch.sum(self.activation(h), [2, 3])
# Get initial class-unconditional output
bottleneck_out = self.linear_middle(h_)
# Get projection of final featureset onto class vectors and add to evidence
if self.unconditional:
projection = 0
else:
# this is the bottleneck classifier c
emb_mid = self.embed_middle(y)
projection = torch.sum(emb_mid * h_, 1, keepdim=True)
bottleneck_out = bottleneck_out + projection
out = self.blocks[-1](h)
if self.unconditional:
proj = 0
else:
emb = self.embed(y)
emb = emb.view(emb.size(0), emb.size(1), 1, 1).expand_as(h)
proj = torch.sum(emb * h, 1, keepdim=True)
################
out = out + proj
out = out.view(out.size(0), 1, self.resolution, self.resolution)
return out, bottleneck_out
# Parallelized G_D to minimize cross-gpu communication
# Without this, Generator outputs would get all-gathered and then rebroadcast.
class G_D(nn.Module):
def __init__(self, G, D, config):
super(G_D, self).__init__()
self.G = G
self.D = D
self.config = config
def forward(self, z, gy, x=None, dy=None, train_G=False, return_G_z=False,
split_D=False, dw1=[], dw2=[], reference_x=None, mixup=False, mixup_only=False, target_map=None):
if mixup:
gy = dy
# why? so the mixup samples consist of same class
# If training G, enable grad tape
with torch.set_grad_enabled(train_G):
G_z = self.G(z, self.G.shared(gy))
# Cast as necessary
if self.G.fp16 and not self.D.fp16:
G_z = G_z.float()
if self.D.fp16 and not self.G.fp16:
G_z = G_z.half()
if mixup:
initial_x_size = x.size(0)
mixed = target_map * x + (1 - target_map) * G_z
mixed_y = dy
if not mixup_only:
# we get here in the cutmix cons extra case
D_input = torch.cat([G_z, x], 0) if x is not None else G_z
D_class = torch.cat([gy, dy], 0) if dy is not None else gy
dmap = torch.tensor([])
if mixup:
# we get here in the cutmix "consistency loss and augmentation" case, if "mixup" is true for the current round (depends on p mixup)
D_input = torch.cat([D_input, mixed], 0)
if self.config["dataset"] != "coco_animals":
D_class = torch.cat([D_class.float(), mixed_y.float()], 0)
else:
D_class = torch.cat([D_class.long(), mixed_y.long()], 0)
else:
# not reached in cutmix "consistency loss and augmentation"
D_input = mixed
D_class = mixed_y
dmap = torch.tensor([])
del G_z
del x
G_z = None
x = None
D_out, D_middle = self.D(D_input, D_class)
del D_input
del D_class
if x is not None:
if not mixup:
out = torch.split(D_out, [G_z.shape[0], x.shape[0]]) # D_fake, D_real
else:
out = torch.split(D_out, [G_z.shape[0], x.shape[0], mixed.shape[0]]) # D_fake, D_real, D_mixed
out = out + (G_z,)
if mixup:
out = out + (mixed,)
if not mixup:
D_middle = torch.split(D_middle, [G_z.shape[0], x.shape[0]]) # D_middle_fake, D_middle_real
else:
D_middle = torch.split(D_middle, [G_z.shape[0], x.shape[0], mixed.shape[0]])
out = out + D_middle
###return target map as well
if mixup:
out = out + (target_map,)
return out
else:
# in mixup# you arrive here
out = (D_out,)
if return_G_z:
out = out + (G_z,)
if mixup_only:
out = out + (mixed,)
out = out + (D_middle,)
##return target map as well
if mixup:
out = out + (target_map,)
return out
|
the-stack_0_1135 | import agate
import decimal
import unittest
from unittest import mock
import dbt.flags as flags
from dbt.task.debug import DebugTask
from dbt.adapters.base.query_headers import MacroQueryStringSetter
from dbt.adapters.postgres import PostgresAdapter
from dbt.adapters.postgres import Plugin as PostgresPlugin
from dbt.contracts.files import FileHash
from dbt.contracts.graph.manifest import ManifestStateCheck
from dbt.clients import agate_helper
from dbt.exceptions import ValidationException, DbtConfigError
from dbt.logger import GLOBAL_LOGGER as logger # noqa
from psycopg2 import extensions as psycopg2_extensions
from psycopg2 import DatabaseError
from .utils import config_from_parts_or_dicts, inject_adapter, mock_connection, TestAdapterConversions, load_internal_manifest_macros, clear_plugin
class TestPostgresAdapter(unittest.TestCase):
def setUp(self):
project_cfg = {
'name': 'X',
'version': '0.1',
'profile': 'test',
'project-root': '/tmp/dbt/does-not-exist',
'config-version': 2,
}
profile_cfg = {
'outputs': {
'test': {
'type': 'postgres',
'dbname': 'postgres',
'user': 'root',
'host': 'thishostshouldnotexist',
'pass': 'password',
'port': 5432,
'schema': 'public',
}
},
'target': 'test'
}
self.config = config_from_parts_or_dicts(project_cfg, profile_cfg)
self._adapter = None
@property
def adapter(self):
if self._adapter is None:
self._adapter = PostgresAdapter(self.config)
inject_adapter(self._adapter, PostgresPlugin)
return self._adapter
@mock.patch('dbt.adapters.postgres.connections.psycopg2')
def test_acquire_connection_validations(self, psycopg2):
try:
connection = self.adapter.acquire_connection('dummy')
except ValidationException as e:
self.fail('got ValidationException: {}'.format(str(e)))
except BaseException as e:
self.fail('acquiring connection failed with unknown exception: {}'
.format(str(e)))
self.assertEqual(connection.type, 'postgres')
psycopg2.connect.assert_not_called()
connection.handle
psycopg2.connect.assert_called_once()
@mock.patch('dbt.adapters.postgres.connections.psycopg2')
def test_acquire_connection(self, psycopg2):
connection = self.adapter.acquire_connection('dummy')
psycopg2.connect.assert_not_called()
connection.handle
self.assertEqual(connection.state, 'open')
self.assertNotEqual(connection.handle, None)
psycopg2.connect.assert_called_once()
def test_cancel_open_connections_empty(self):
self.assertEqual(len(list(self.adapter.cancel_open_connections())), 0)
def test_cancel_open_connections_master(self):
key = self.adapter.connections.get_thread_identifier()
self.adapter.connections.thread_connections[key] = mock_connection('master')
self.assertEqual(len(list(self.adapter.cancel_open_connections())), 0)
def test_cancel_open_connections_single(self):
master = mock_connection('master')
model = mock_connection('model')
key = self.adapter.connections.get_thread_identifier()
model.handle.get_backend_pid.return_value = 42
self.adapter.connections.thread_connections.update({
key: master,
1: model,
})
with mock.patch.object(self.adapter.connections, 'add_query') as add_query:
query_result = mock.MagicMock()
add_query.return_value = (None, query_result)
self.assertEqual(len(list(self.adapter.cancel_open_connections())), 1)
add_query.assert_called_once_with('select pg_terminate_backend(42)')
master.handle.get_backend_pid.assert_not_called()
@mock.patch('dbt.adapters.postgres.connections.psycopg2')
def test_default_connect_timeout(self, psycopg2):
connection = self.adapter.acquire_connection('dummy')
psycopg2.connect.assert_not_called()
connection.handle
psycopg2.connect.assert_called_once_with(
dbname='postgres',
user='root',
host='thishostshouldnotexist',
password='password',
port=5432,
connect_timeout=10,
application_name='dbt')
@mock.patch('dbt.adapters.postgres.connections.psycopg2')
def test_changed_connect_timeout(self, psycopg2):
self.config.credentials = self.config.credentials.replace(connect_timeout=30)
connection = self.adapter.acquire_connection('dummy')
psycopg2.connect.assert_not_called()
connection.handle
psycopg2.connect.assert_called_once_with(
dbname='postgres',
user='root',
host='thishostshouldnotexist',
password='password',
port=5432,
connect_timeout=30,
application_name='dbt')
@mock.patch('dbt.adapters.postgres.connections.psycopg2')
def test_default_keepalive(self, psycopg2):
connection = self.adapter.acquire_connection('dummy')
psycopg2.connect.assert_not_called()
connection.handle
psycopg2.connect.assert_called_once_with(
dbname='postgres',
user='root',
host='thishostshouldnotexist',
password='password',
port=5432,
connect_timeout=10,
application_name='dbt')
@mock.patch('dbt.adapters.postgres.connections.psycopg2')
def test_changed_keepalive(self, psycopg2):
self.config.credentials = self.config.credentials.replace(keepalives_idle=256)
connection = self.adapter.acquire_connection('dummy')
psycopg2.connect.assert_not_called()
connection.handle
psycopg2.connect.assert_called_once_with(
dbname='postgres',
user='root',
host='thishostshouldnotexist',
password='password',
port=5432,
connect_timeout=10,
keepalives_idle=256,
application_name='dbt')
@mock.patch('dbt.adapters.postgres.connections.psycopg2')
def test_default_application_name(self, psycopg2):
connection = self.adapter.acquire_connection('dummy')
psycopg2.connect.assert_not_called()
connection.handle
psycopg2.connect.assert_called_once_with(
dbname='postgres',
user='root',
host='thishostshouldnotexist',
password='password',
port=5432,
connect_timeout=10,
application_name='dbt')
@mock.patch('dbt.adapters.postgres.connections.psycopg2')
def test_changed_application_name(self, psycopg2):
self.config.credentials = self.config.credentials.replace(application_name='myapp')
connection = self.adapter.acquire_connection('dummy')
psycopg2.connect.assert_not_called()
connection.handle
psycopg2.connect.assert_called_once_with(
dbname='postgres',
user='root',
host='thishostshouldnotexist',
password='password',
port=5432,
connect_timeout=10,
application_name='myapp')
@mock.patch('dbt.adapters.postgres.connections.psycopg2')
def test_role(self, psycopg2):
self.config.credentials = self.config.credentials.replace(role='somerole')
connection = self.adapter.acquire_connection('dummy')
cursor = connection.handle.cursor()
cursor.execute.assert_called_once_with('set role somerole')
@mock.patch('dbt.adapters.postgres.connections.psycopg2')
def test_search_path(self, psycopg2):
self.config.credentials = self.config.credentials.replace(search_path="test")
connection = self.adapter.acquire_connection('dummy')
psycopg2.connect.assert_not_called()
connection.handle
psycopg2.connect.assert_called_once_with(
dbname='postgres',
user='root',
host='thishostshouldnotexist',
password='password',
port=5432,
connect_timeout=10,
application_name='dbt',
options="-c search_path=test")
@mock.patch('dbt.adapters.postgres.connections.psycopg2')
def test_sslmode(self, psycopg2):
self.config.credentials = self.config.credentials.replace(sslmode="require")
connection = self.adapter.acquire_connection('dummy')
psycopg2.connect.assert_not_called()
connection.handle
psycopg2.connect.assert_called_once_with(
dbname='postgres',
user='root',
host='thishostshouldnotexist',
password='password',
port=5432,
connect_timeout=10,
sslmode="require",
application_name='dbt')
@mock.patch('dbt.adapters.postgres.connections.psycopg2')
def test_ssl_parameters(self, psycopg2):
self.config.credentials = self.config.credentials.replace(sslmode="verify-ca")
self.config.credentials = self.config.credentials.replace(sslcert="service.crt")
self.config.credentials = self.config.credentials.replace(sslkey="service.key")
self.config.credentials = self.config.credentials.replace(sslrootcert="ca.crt")
connection = self.adapter.acquire_connection('dummy')
psycopg2.connect.assert_not_called()
connection.handle
psycopg2.connect.assert_called_once_with(
dbname='postgres',
user='root',
host='thishostshouldnotexist',
password='password',
port=5432,
connect_timeout=10,
sslmode="verify-ca",
sslcert="service.crt",
sslkey="service.key",
sslrootcert="ca.crt",
application_name='dbt')
@mock.patch('dbt.adapters.postgres.connections.psycopg2')
def test_schema_with_space(self, psycopg2):
self.config.credentials = self.config.credentials.replace(search_path="test test")
connection = self.adapter.acquire_connection('dummy')
psycopg2.connect.assert_not_called()
connection.handle
psycopg2.connect.assert_called_once_with(
dbname='postgres',
user='root',
host='thishostshouldnotexist',
password='password',
port=5432,
connect_timeout=10,
application_name='dbt',
options="-c search_path=test\ test")
@mock.patch('dbt.adapters.postgres.connections.psycopg2')
def test_set_zero_keepalive(self, psycopg2):
self.config.credentials = self.config.credentials.replace(keepalives_idle=0)
connection = self.adapter.acquire_connection('dummy')
psycopg2.connect.assert_not_called()
connection.handle
psycopg2.connect.assert_called_once_with(
dbname='postgres',
user='root',
host='thishostshouldnotexist',
password='password',
port=5432,
connect_timeout=10,
application_name='dbt')
@mock.patch.object(PostgresAdapter, 'execute_macro')
@mock.patch.object(PostgresAdapter, '_get_catalog_schemas')
def test_get_catalog_various_schemas(self, mock_get_schemas, mock_execute):
column_names = ['table_database', 'table_schema', 'table_name']
rows = [
('dbt', 'foo', 'bar'),
('dbt', 'FOO', 'baz'),
('dbt', None, 'bar'),
('dbt', 'quux', 'bar'),
('dbt', 'skip', 'bar'),
]
mock_execute.return_value = agate.Table(rows=rows,
column_names=column_names)
mock_get_schemas.return_value.items.return_value = [(mock.MagicMock(database='dbt'), {'foo', 'FOO', 'quux'})]
mock_manifest = mock.MagicMock()
mock_manifest.get_used_schemas.return_value = {('dbt', 'foo'),
('dbt', 'quux')}
catalog, exceptions = self.adapter.get_catalog(mock_manifest)
self.assertEqual(
set(map(tuple, catalog)),
{('dbt', 'foo', 'bar'), ('dbt', 'FOO', 'baz'), ('dbt', 'quux', 'bar')}
)
self.assertEqual(exceptions, [])
class TestConnectingPostgresAdapter(unittest.TestCase):
def setUp(self):
self.target_dict = {
'type': 'postgres',
'dbname': 'postgres',
'user': 'root',
'host': 'thishostshouldnotexist',
'pass': 'password',
'port': 5432,
'schema': 'public'
}
profile_cfg = {
'outputs': {
'test': self.target_dict,
},
'target': 'test'
}
project_cfg = {
'name': 'X',
'version': '0.1',
'profile': 'test',
'project-root': '/tmp/dbt/does-not-exist',
'quoting': {
'identifier': False,
'schema': True,
},
'config-version': 2,
}
self.config = config_from_parts_or_dicts(project_cfg, profile_cfg)
self.handle = mock.MagicMock(spec=psycopg2_extensions.connection)
self.cursor = self.handle.cursor.return_value
self.mock_execute = self.cursor.execute
self.patcher = mock.patch('dbt.adapters.postgres.connections.psycopg2')
self.psycopg2 = self.patcher.start()
# Create the Manifest.state_check patcher
@mock.patch('dbt.parser.manifest.ManifestLoader.build_manifest_state_check')
def _mock_state_check(self):
config = self.root_project
all_projects = self.all_projects
return ManifestStateCheck(
vars_hash=FileHash.from_contents('vars'),
project_hashes={name: FileHash.from_contents(name) for name in all_projects},
profile_hash=FileHash.from_contents('profile'),
)
self.load_state_check = mock.patch('dbt.parser.manifest.ManifestLoader.build_manifest_state_check')
self.mock_state_check = self.load_state_check.start()
self.mock_state_check.side_effect = _mock_state_check
self.psycopg2.connect.return_value = self.handle
self.adapter = PostgresAdapter(self.config)
self.adapter._macro_manifest_lazy = load_internal_manifest_macros(self.config)
self.adapter.connections.query_header = MacroQueryStringSetter(self.config, self.adapter._macro_manifest_lazy)
self.qh_patch = mock.patch.object(self.adapter.connections.query_header, 'add')
self.mock_query_header_add = self.qh_patch.start()
self.mock_query_header_add.side_effect = lambda q: '/* dbt */\n{}'.format(q)
self.adapter.acquire_connection()
inject_adapter(self.adapter, PostgresPlugin)
def tearDown(self):
# we want a unique self.handle every time.
self.adapter.cleanup_connections()
self.qh_patch.stop()
self.patcher.stop()
self.load_state_check.stop()
clear_plugin(PostgresPlugin)
def test_quoting_on_drop_schema(self):
relation = self.adapter.Relation.create(
database='postgres', schema='test_schema',
quote_policy=self.adapter.config.quoting,
)
self.adapter.drop_schema(relation)
self.mock_execute.assert_has_calls([
mock.call('/* dbt */\ndrop schema if exists "test_schema" cascade', None)
])
def test_quoting_on_drop(self):
relation = self.adapter.Relation.create(
database='postgres',
schema='test_schema',
identifier='test_table',
type='table',
quote_policy=self.adapter.config.quoting,
)
self.adapter.drop_relation(relation)
self.mock_execute.assert_has_calls([
mock.call('/* dbt */\ndrop table if exists "postgres"."test_schema".test_table cascade', None)
])
def test_quoting_on_truncate(self):
relation = self.adapter.Relation.create(
database='postgres',
schema='test_schema',
identifier='test_table',
type='table',
quote_policy=self.adapter.config.quoting,
)
self.adapter.truncate_relation(relation)
self.mock_execute.assert_has_calls([
mock.call('/* dbt */\ntruncate table "postgres"."test_schema".test_table', None)
])
def test_quoting_on_rename(self):
from_relation = self.adapter.Relation.create(
database='postgres',
schema='test_schema',
identifier='table_a',
type='table',
quote_policy=self.adapter.config.quoting,
)
to_relation = self.adapter.Relation.create(
database='postgres',
schema='test_schema',
identifier='table_b',
type='table',
quote_policy=self.adapter.config.quoting,
)
self.adapter.rename_relation(
from_relation=from_relation,
to_relation=to_relation
)
self.mock_execute.assert_has_calls([
mock.call('/* dbt */\nalter table "postgres"."test_schema".table_a rename to table_b', None)
])
def test_debug_connection_ok(self):
DebugTask.validate_connection(self.target_dict)
self.mock_execute.assert_has_calls([
mock.call('/* dbt */\nselect 1 as id', None)
])
def test_debug_connection_fail_nopass(self):
del self.target_dict['pass']
with self.assertRaises(DbtConfigError):
DebugTask.validate_connection(self.target_dict)
def test_connection_fail_select(self):
self.mock_execute.side_effect = DatabaseError()
with self.assertRaises(DbtConfigError):
DebugTask.validate_connection(self.target_dict)
self.mock_execute.assert_has_calls([
mock.call('/* dbt */\nselect 1 as id', None)
])
def test_dbname_verification_is_case_insensitive(self):
# Override adapter settings from setUp()
self.target_dict['dbname'] = 'Postgres'
profile_cfg = {
'outputs': {
'test': self.target_dict,
},
'target': 'test'
}
project_cfg = {
'name': 'X',
'version': '0.1',
'profile': 'test',
'project-root': '/tmp/dbt/does-not-exist',
'quoting': {
'identifier': False,
'schema': True,
},
'config-version': 2,
}
self.config = config_from_parts_or_dicts(project_cfg, profile_cfg)
self.adapter.cleanup_connections()
self._adapter = PostgresAdapter(self.config)
self.adapter.verify_database('postgres')
class TestPostgresFilterCatalog(unittest.TestCase):
def test__catalog_filter_table(self):
manifest = mock.MagicMock()
manifest.get_used_schemas.return_value = [['a', 'B'], ['a', '1234']]
column_names = ['table_name', 'table_database', 'table_schema', 'something']
rows = [
['foo', 'a', 'b', '1234'], # include
['foo', 'a', '1234', '1234'], # include, w/ table schema as str
['foo', 'c', 'B', '1234'], # skip
['1234', 'A', 'B', '1234'], # include, w/ table name as str
]
table = agate.Table(
rows, column_names, agate_helper.DEFAULT_TYPE_TESTER
)
result = PostgresAdapter._catalog_filter_table(table, manifest)
assert len(result) == 3
for row in result.rows:
assert isinstance(row['table_schema'], str)
assert isinstance(row['table_database'], str)
assert isinstance(row['table_name'], str)
assert isinstance(row['something'], decimal.Decimal)
class TestPostgresAdapterConversions(TestAdapterConversions):
def test_convert_text_type(self):
rows = [
['', 'a1', 'stringval1'],
['', 'a2', 'stringvalasdfasdfasdfa'],
['', 'a3', 'stringval3'],
]
agate_table = self._make_table_of(rows, agate.Text)
expected = ['text', 'text', 'text']
for col_idx, expect in enumerate(expected):
assert PostgresAdapter.convert_text_type(agate_table, col_idx) == expect
def test_convert_number_type(self):
rows = [
['', '23.98', '-1'],
['', '12.78', '-2'],
['', '79.41', '-3'],
]
agate_table = self._make_table_of(rows, agate.Number)
expected = ['integer', 'float8', 'integer']
for col_idx, expect in enumerate(expected):
assert PostgresAdapter.convert_number_type(agate_table, col_idx) == expect
def test_convert_boolean_type(self):
rows = [
['', 'false', 'true'],
['', 'false', 'false'],
['', 'false', 'true'],
]
agate_table = self._make_table_of(rows, agate.Boolean)
expected = ['boolean', 'boolean', 'boolean']
for col_idx, expect in enumerate(expected):
assert PostgresAdapter.convert_boolean_type(agate_table, col_idx) == expect
def test_convert_datetime_type(self):
rows = [
['', '20190101T01:01:01Z', '2019-01-01 01:01:01'],
['', '20190102T01:01:01Z', '2019-01-01 01:01:01'],
['', '20190103T01:01:01Z', '2019-01-01 01:01:01'],
]
agate_table = self._make_table_of(rows, [agate.DateTime, agate_helper.ISODateTime, agate.DateTime])
expected = ['timestamp without time zone', 'timestamp without time zone', 'timestamp without time zone']
for col_idx, expect in enumerate(expected):
assert PostgresAdapter.convert_datetime_type(agate_table, col_idx) == expect
def test_convert_date_type(self):
rows = [
['', '2019-01-01', '2019-01-04'],
['', '2019-01-02', '2019-01-04'],
['', '2019-01-03', '2019-01-04'],
]
agate_table = self._make_table_of(rows, agate.Date)
expected = ['date', 'date', 'date']
for col_idx, expect in enumerate(expected):
assert PostgresAdapter.convert_date_type(agate_table, col_idx) == expect
def test_convert_time_type(self):
# dbt's default type testers actually don't have a TimeDelta at all.
agate.TimeDelta
rows = [
['', '120s', '10s'],
['', '3m', '11s'],
['', '1h', '12s'],
]
agate_table = self._make_table_of(rows, agate.TimeDelta)
expected = ['time', 'time', 'time']
for col_idx, expect in enumerate(expected):
assert PostgresAdapter.convert_time_type(agate_table, col_idx) == expect
|
the-stack_0_1138 | from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter
from lasagne.nonlinearities import sigmoid, rectify, tanh
from lasagne.objectives import mse
from lasagne.init import Uniform, Normal
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 5000
GRADIENT_STEPS = 100
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[5] * 5,
max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=512,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.7,
one_target_per_seq=False,
n_seq_per_batch=16,
subsample_target=2,
include_diff=False,
clip_appliance_power=True,
target_is_prediction=False,
# independently_center_inputs = True,
standardise_input=True,
unit_variance_targets=True,
input_padding=8,
lag=0,
reshape_target_to_2D=True
# input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
# 'std': np.array([ 0.12636775], dtype=np.float32)},
# target_stats={
# 'mean': np.array([ 0.04066789, 0.01881946,
# 0.24639061, 0.17608672, 0.10273963],
# dtype=np.float32),
# 'std': np.array([ 0.11449792, 0.07338708,
# 0.26608968, 0.33463112, 0.21250485],
# dtype=np.float32)}
)
N = 50
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
# loss_function=lambda x, t: mse(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
loss_function=partial(scaled_cost3, ignore_inactive=False, loss_func=mdn_nll),
updates_func=momentum,
learning_rate=5e-3,
learning_rate_changes_by_iteration={
50: 1e-3,
200: 5e-4,
400: 1e-4
# 500: 1e-3
# 4000: 1e-03,
# 6000: 5e-06,
# 7000: 1e-06
# 2000: 5e-06
# 3000: 1e-05
# 7000: 5e-06,
# 10000: 1e-06,
# 15000: 5e-07,
# 50000: 1e-07
},
do_save_activations=True,
auto_reshape=False,
plotter=MDNPlotter
)
"""
||||||||||
||||||||||
||||||||||
||||||||||
||||||||||
||||||||||
12345678901234567890
"""
def exp_a(name):
global source
# source_dict_copy = deepcopy(source_dict)
# source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 1024
NUM_FILTERS = 50
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': NUM_FILTERS,
'filter_length': 10,
'stride': 2,
'nonlinearity': rectify,
'W': Normal(std=1/sqrt(source.seq_length))
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': DenseLayer,
'num_units': N,
'W': Normal(std=1/sqrt(N * NUM_FILTERS)),
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': N,
'W': Normal(std=1/sqrt(N)),
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': source.output_shape()[1] * source.output_shape()[2],
'W': Normal(std=1/sqrt(N)),
'nonlinearity': rectify
},
{
'type': ReshapeLayer,
'shape': (16 * 256, 5)
},
# {
# 'type': DenseLayer,
# 'num_units': source.output_shape()[1] * source.output_shape()[2],
# 'W': Normal(std=1/sqrt(N)),
# 'nonlinearity': T.nnet.softplus
# }
{
'type': MixtureDensityLayer,
'num_units': source.n_outputs,
'num_components': 1,
'nonlinearity_mu': T.nnet.softplus
}
]
net = Net(**net_dict_copy)
return net
def main():
# EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')
EXPERIMENTS = list('a')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=None)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
raise
finally:
logging.shutdown()
if __name__ == "__main__":
main()
|
the-stack_0_1142 | # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import re
import numbers
from botocore.utils import parse_timestamp
from botocore.docs.utils import escape_controls
from botocore.compat import six
class SharedExampleDocumenter(object):
def document_shared_example(self, example, prefix, section,
operation_model):
"""Documents a single shared example based on its definition.
:param example: The model of the example
:param prefix: The prefix to use in the method example.
:param section: The section to write to.
:param operation_model: The model of the operation used in the example
"""
section.style.new_paragraph()
section.write(example.get('description'))
section.style.new_line()
self.document_input(section, example, prefix,
operation_model.input_shape)
self.document_output(section, example, operation_model.output_shape)
def document_input(self, section, example, prefix, shape):
input_section = section.add_new_section('input')
input_section.style.start_codeblock()
if prefix is not None:
input_section.write(prefix)
params = example.get('input', {})
comments = example.get('comments')
if comments:
comments = comments.get('input')
param_section = input_section.add_new_section('parameters')
self._document_params(param_section, params, comments, [], shape)
closing_section = input_section.add_new_section('input-close')
closing_section.style.new_line()
closing_section.style.new_line()
closing_section.write('print(response)')
closing_section.style.end_codeblock()
def document_output(self, section, example, shape):
output_section = section.add_new_section('output')
output_section.style.new_line()
output_section.write('Expected Output:')
output_section.style.new_line()
output_section.style.start_codeblock()
params = example.get('output', {})
# There might not be an output, but we will return metadata anyway
params['ResponseMetadata'] = {"...": "..."}
comments = example.get('comments')
if comments:
comments = comments.get('output')
self._document_dict(output_section, params, comments, [], shape, True)
closing_section = output_section.add_new_section('output-close')
closing_section.style.end_codeblock()
def _document(self, section, value, comments, path, shape):
"""
:param section: The section to add the docs to.
:param value: The input / output values representing the parameters that
are included in the example.
:param comments: The dictionary containing all the comments to be
applied to the example.
:param path: A list describing where the documenter is in traversing the
parameters. This is used to find the equivalent location
in the comments dictionary.
"""
if isinstance(value, dict):
self._document_dict(section, value, comments, path, shape)
elif isinstance(value, list):
self._document_list(section, value, comments, path, shape)
elif isinstance(value, numbers.Number):
self._document_number(section, value, path)
elif shape and shape.type_name == 'timestamp':
self._document_datetime(section, value, path)
else:
self._document_str(section, value, path)
def _document_dict(self, section, value, comments, path, shape,
top_level=False):
dict_section = section.add_new_section('dict-value')
self._start_nested_value(dict_section, '{')
for key, val in value.items():
path.append('.%s' % key)
item_section = dict_section.add_new_section(key)
item_section.style.new_line()
item_comment = self._get_comment(path, comments)
if item_comment:
item_section.write(item_comment)
item_section.style.new_line()
item_section.write("'%s': " % key)
# Shape could be none if there is no output besides ResponseMetadata
item_shape = None
if shape:
if shape.type_name == 'structure':
item_shape = shape.members.get(key)
elif shape.type_name == 'map':
item_shape = shape.value
self._document(item_section, val, comments, path, item_shape)
path.pop()
dict_section_end = dict_section.add_new_section('ending-brace')
self._end_nested_value(dict_section_end, '}')
if not top_level:
dict_section_end.write(',')
def _document_params(self, section, value, comments, path, shape):
param_section = section.add_new_section('param-values')
self._start_nested_value(param_section, '(')
for key, val in value.items():
path.append('.%s' % key)
item_section = param_section.add_new_section(key)
item_section.style.new_line()
item_comment = self._get_comment(path, comments)
if item_comment:
item_section.write(item_comment)
item_section.style.new_line()
item_section.write(key + '=')
# Shape could be none if there are no input parameters
item_shape = None
if shape:
item_shape = shape.members.get(key)
self._document(item_section, val, comments, path, item_shape)
path.pop()
param_section_end = param_section.add_new_section('ending-parenthesis')
self._end_nested_value(param_section_end, ')')
def _document_list(self, section, value, comments, path, shape):
list_section = section.add_new_section('list-section')
self._start_nested_value(list_section, '[')
item_shape = shape.member
for index, val in enumerate(value):
item_section = list_section.add_new_section(index)
item_section.style.new_line()
path.append('[%s]' % index)
item_comment = self._get_comment(path, comments)
if item_comment:
item_section.write(item_comment)
item_section.style.new_line()
self._document(item_section, val, comments, path, item_shape)
path.pop()
list_section_end = list_section.add_new_section('ending-bracket')
self._end_nested_value(list_section_end, '],')
def _document_str(self, section, value, path):
# We do the string conversion because this might accept a type that
# we don't specifically address.
safe_value = escape_controls(value)
section.write(u"'%s'," % six.text_type(safe_value))
def _document_number(self, section, value, path):
section.write("%s," % str(value))
def _document_datetime(self, section, value, path):
datetime_tuple = parse_timestamp(value).timetuple()
datetime_str = str(datetime_tuple[0])
for i in range(1, len(datetime_tuple)):
datetime_str += ", " + str(datetime_tuple[i])
section.write("datetime(%s)," % datetime_str)
def _get_comment(self, path, comments):
key = re.sub(r'^\.', '', ''.join(path))
if comments and key in comments:
return '# ' + comments[key]
else:
return ''
def _start_nested_value(self, section, start):
section.write(start)
section.style.indent()
section.style.indent()
def _end_nested_value(self, section, end):
section.style.dedent()
section.style.dedent()
section.style.new_line()
section.write(end)
def document_shared_examples(section, operation_model, example_prefix,
shared_examples):
"""Documents the shared examples
:param section: The section to write to.
:param operation_model: The model of the operation.
:param example_prefix: The prefix to use in the method example.
:param shared_examples: The shared JSON examples from the model.
"""
container_section = section.add_new_section('shared-examples')
container_section.style.new_paragraph()
container_section.style.bold('Examples')
documenter = SharedExampleDocumenter()
for example in shared_examples:
documenter.document_shared_example(
example=example,
section=container_section.add_new_section(example['id']),
prefix=example_prefix,
operation_model=operation_model
)
|
the-stack_0_1143 | # coding: utf-8
"""
CRM cards
Allows an app to extend the CRM UI by surfacing custom cards in the sidebar of record pages. These cards are defined up-front as part of app configuration, then populated by external data fetch requests when the record page is accessed by a user. # noqa: E501
The version of the OpenAPI document: v3
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from hubspot.crm.extensions.cards.configuration import Configuration
class ObjectToken(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {"name": "str", "label": "str", "data_type": "str", "value": "str"}
attribute_map = {"name": "name", "label": "label", "data_type": "dataType", "value": "value"}
def __init__(self, name=None, label=None, data_type=None, value=None, local_vars_configuration=None): # noqa: E501
"""ObjectToken - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._name = None
self._label = None
self._data_type = None
self._value = None
self.discriminator = None
if name is not None:
self.name = name
if label is not None:
self.label = label
if data_type is not None:
self.data_type = data_type
self.value = value
@property
def name(self):
"""Gets the name of this ObjectToken. # noqa: E501
:return: The name of this ObjectToken. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ObjectToken.
:param name: The name of this ObjectToken. # noqa: E501
:type: str
"""
self._name = name
@property
def label(self):
"""Gets the label of this ObjectToken. # noqa: E501
:return: The label of this ObjectToken. # noqa: E501
:rtype: str
"""
return self._label
@label.setter
def label(self, label):
"""Sets the label of this ObjectToken.
:param label: The label of this ObjectToken. # noqa: E501
:type: str
"""
self._label = label
@property
def data_type(self):
"""Gets the data_type of this ObjectToken. # noqa: E501
:return: The data_type of this ObjectToken. # noqa: E501
:rtype: str
"""
return self._data_type
@data_type.setter
def data_type(self, data_type):
"""Sets the data_type of this ObjectToken.
:param data_type: The data_type of this ObjectToken. # noqa: E501
:type: str
"""
allowed_values = ["BOOLEAN", "CURRENCY", "DATE", "DATETIME", "EMAIL", "LINK", "NUMERIC", "STRING", "STATUS"] # noqa: E501
if self.local_vars_configuration.client_side_validation and data_type not in allowed_values: # noqa: E501
raise ValueError("Invalid value for `data_type` ({0}), must be one of {1}".format(data_type, allowed_values)) # noqa: E501
self._data_type = data_type
@property
def value(self):
"""Gets the value of this ObjectToken. # noqa: E501
:return: The value of this ObjectToken. # noqa: E501
:rtype: str
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this ObjectToken.
:param value: The value of this ObjectToken. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and value is None: # noqa: E501
raise ValueError("Invalid value for `value`, must not be `None`") # noqa: E501
self._value = value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items()))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ObjectToken):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ObjectToken):
return True
return self.to_dict() != other.to_dict()
|
the-stack_0_1144 | """Utilities for including Python state in TensorFlow checkpoints."""
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy
from tensorflow.python.training.tracking import base
from tensorflow.python.training.tracking import python_state as core_python_state
# pylint: disable=g-import-not-at-top
try:
# In Python 2.x, use the faster string buffering option.
from cStringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
# pylint: enable=g-import-not-at-top
class NumpyState(base.Trackable):
"""A trackable object whose NumPy array attributes are saved/restored.
Example usage:
```python
arrays = tf.contrib.checkpoint.NumpyState()
checkpoint = tf.train.Checkpoint(numpy_arrays=arrays)
arrays.x = numpy.zeros([3, 4])
save_path = checkpoint.save("/tmp/ckpt")
arrays.x[1, 1] = 4.
checkpoint.restore(save_path)
assert (arrays.x == numpy.zeros([3, 4])).all()
second_checkpoint = tf.train.Checkpoint(
numpy_arrays=tf.contrib.checkpoint.NumpyState())
# Attributes of NumpyState objects are created automatically by restore()
second_checkpoint.restore(save_path)
assert (second_checkpoint.numpy_arrays.x == numpy.zeros([3, 4])).all()
```
Note that `NumpyState` objects re-create the attributes of the previously
saved object on `restore()`. This is in contrast to TensorFlow variables, for
which a `Variable` object must be created and assigned to an attribute.
This snippet works both when graph building and when executing eagerly. On
save, the NumPy array(s) are fed as strings to be saved in the checkpoint (via
a placeholder when graph building, or as a string constant when executing
eagerly). When restoring they skip the TensorFlow graph entirely, and so no
restore ops need be run. This means that restoration always happens eagerly,
rather than waiting for `checkpoint.restore(...).run_restore_ops()` like
TensorFlow variables when graph building.
"""
def _lookup_dependency(self, name):
"""Create placeholder NumPy arrays for to-be-restored attributes.
Typically `_lookup_dependency` is used to check by name whether a dependency
exists. We cheat slightly by creating a trackable object for `name` if
we don't already have one, giving us attribute re-creation behavior when
loading a checkpoint.
Args:
name: The name of the dependency being checked.
Returns:
An existing dependency if one exists, or a new `_NumpyWrapper` placeholder
dependency (which will generally be restored immediately).
"""
value = super(NumpyState, self)._lookup_dependency(name)
if value is None:
value = _NumpyWrapper(numpy.array([]))
new_reference = base.TrackableReference(name=name, ref=value)
self._unconditional_checkpoint_dependencies.append(new_reference)
self._unconditional_dependency_names[name] = value
super(NumpyState, self).__setattr__(name, value)
return value
def __getattribute__(self, name):
"""Un-wrap `_NumpyWrapper` objects when accessing attributes."""
value = super(NumpyState, self).__getattribute__(name)
if isinstance(value, _NumpyWrapper):
return value.array
return value
def __setattr__(self, name, value):
"""Automatically wrap NumPy arrays assigned to attributes."""
# TODO(allenl): Consider supporting lists/tuples, either ad-hoc or by making
# ndarrays trackable natively and using standard trackable list
# tracking.
if isinstance(value, (numpy.ndarray, numpy.generic)):
try:
existing = super(NumpyState, self).__getattribute__(name)
existing.array = value
return
except AttributeError:
value = _NumpyWrapper(value)
self._track_trackable(value, name=name, overwrite=True)
elif (name not in ("_self_setattr_tracking", "_self_update_uid")
and getattr(self, "_self_setattr_tracking", True)):
# Mixing restore()-created attributes with user-added trackable
# objects is tricky, since we can't use the `_lookup_dependency` trick to
# re-create attributes (we might accidentally steal the restoration for
# another trackable object). For now `NumpyState` objects must be
# leaf nodes. Theoretically we could add some extra arguments to
# `_lookup_dependency` to figure out whether we should create a NumPy
# array for the attribute or not.
raise NotImplementedError(
("Assigned %s to the %s property of %s, which is not a NumPy array. "
"Currently mixing NumPy arrays and other trackable objects is "
"not supported. File a feature request if this limitation bothers "
"you.")
% (value, name, self))
super(NumpyState, self).__setattr__(name, value)
class _NumpyWrapper(core_python_state.PythonState):
"""Wraps a NumPy array for storage in an object-based checkpoint."""
def __init__(self, array):
"""Specify a NumPy array to wrap.
Args:
array: The NumPy array to save and restore (may be overwritten).
"""
self.array = array
def serialize(self):
"""Callback to serialize the array."""
string_file = BytesIO()
try:
numpy.save(string_file, self.array, allow_pickle=False)
serialized = string_file.getvalue()
finally:
string_file.close()
return serialized
def deserialize(self, string_value):
"""Callback to deserialize the array."""
string_file = BytesIO(string_value)
try:
self.array = numpy.load(string_file, allow_pickle=False)
finally:
string_file.close()
|
the-stack_0_1145 | from typing import List, Optional
import aiosqlite
from chiadoge.types.blockchain_format.coin import Coin
from chiadoge.types.blockchain_format.sized_bytes import bytes32
from chiadoge.types.coin_record import CoinRecord
from chiadoge.types.full_block import FullBlock
from chiadoge.util.db_wrapper import DBWrapper
from chiadoge.util.ints import uint32, uint64
from chiadoge.util.lru_cache import LRUCache
class CoinStore:
"""
This object handles CoinRecords in DB.
A cache is maintained for quicker access to recent coins.
"""
coin_record_db: aiosqlite.Connection
coin_record_cache: LRUCache
cache_size: uint32
db_wrapper: DBWrapper
@classmethod
async def create(cls, db_wrapper: DBWrapper, cache_size: uint32 = uint32(60000)):
self = cls()
self.cache_size = cache_size
self.db_wrapper = db_wrapper
self.coin_record_db = db_wrapper.db
await self.coin_record_db.execute("pragma journal_mode=wal")
await self.coin_record_db.execute("pragma synchronous=2")
await self.coin_record_db.execute(
(
"CREATE TABLE IF NOT EXISTS coin_record("
"coin_name text PRIMARY KEY,"
" confirmed_index bigint,"
" spent_index bigint,"
" spent int,"
" coinbase int,"
" puzzle_hash text,"
" coin_parent text,"
" amount blob,"
" timestamp bigint)"
)
)
# Useful for reorg lookups
await self.coin_record_db.execute(
"CREATE INDEX IF NOT EXISTS coin_confirmed_index on coin_record(confirmed_index)"
)
await self.coin_record_db.execute("CREATE INDEX IF NOT EXISTS coin_spent_index on coin_record(spent_index)")
await self.coin_record_db.execute("CREATE INDEX IF NOT EXISTS coin_spent on coin_record(spent)")
await self.coin_record_db.execute("CREATE INDEX IF NOT EXISTS coin_puzzle_hash on coin_record(puzzle_hash)")
await self.coin_record_db.commit()
self.coin_record_cache = LRUCache(cache_size)
return self
async def new_block(self, block: FullBlock, tx_additions: List[Coin], tx_removals: List[bytes32]):
"""
Only called for blocks which are blocks (and thus have rewards and transactions)
"""
if block.is_transaction_block() is False:
return None
assert block.foliage_transaction_block is not None
for coin in tx_additions:
record: CoinRecord = CoinRecord(
coin,
block.height,
uint32(0),
False,
False,
block.foliage_transaction_block.timestamp,
)
await self._add_coin_record(record, False)
included_reward_coins = block.get_included_reward_coins()
if block.height == 0:
assert len(included_reward_coins) == 0
else:
assert len(included_reward_coins) >= 2
for coin in included_reward_coins:
reward_coin_r: CoinRecord = CoinRecord(
coin,
block.height,
uint32(0),
False,
True,
block.foliage_transaction_block.timestamp,
)
await self._add_coin_record(reward_coin_r, False)
total_amount_spent: int = 0
for coin_name in tx_removals:
total_amount_spent += await self._set_spent(coin_name, block.height)
# Sanity check, already checked in block_body_validation
assert sum([a.amount for a in tx_additions]) <= total_amount_spent
# Checks DB and DiffStores for CoinRecord with coin_name and returns it
async def get_coin_record(self, coin_name: bytes32) -> Optional[CoinRecord]:
cached = self.coin_record_cache.get(coin_name)
if cached is not None:
return cached
cursor = await self.coin_record_db.execute("SELECT * from coin_record WHERE coin_name=?", (coin_name.hex(),))
row = await cursor.fetchone()
await cursor.close()
if row is not None:
coin = Coin(bytes32(bytes.fromhex(row[6])), bytes32(bytes.fromhex(row[5])), uint64.from_bytes(row[7]))
record = CoinRecord(coin, row[1], row[2], row[3], row[4], row[8])
self.coin_record_cache.put(record.coin.name(), record)
return record
return None
async def get_coins_added_at_height(self, height: uint32) -> List[CoinRecord]:
cursor = await self.coin_record_db.execute("SELECT * from coin_record WHERE confirmed_index=?", (height,))
rows = await cursor.fetchall()
await cursor.close()
coins = []
for row in rows:
coin = Coin(bytes32(bytes.fromhex(row[6])), bytes32(bytes.fromhex(row[5])), uint64.from_bytes(row[7]))
coins.append(CoinRecord(coin, row[1], row[2], row[3], row[4], row[8]))
return coins
async def get_coins_removed_at_height(self, height: uint32) -> List[CoinRecord]:
cursor = await self.coin_record_db.execute("SELECT * from coin_record WHERE spent_index=?", (height,))
rows = await cursor.fetchall()
await cursor.close()
coins = []
for row in rows:
spent: bool = bool(row[3])
if spent:
coin = Coin(bytes32(bytes.fromhex(row[6])), bytes32(bytes.fromhex(row[5])), uint64.from_bytes(row[7]))
coin_record = CoinRecord(coin, row[1], row[2], spent, row[4], row[8])
coins.append(coin_record)
return coins
# Checks DB and DiffStores for CoinRecords with puzzle_hash and returns them
async def get_coin_records_by_puzzle_hash(
self,
include_spent_coins: bool,
puzzle_hash: bytes32,
start_height: uint32 = uint32(0),
end_height: uint32 = uint32((2 ** 32) - 1),
) -> List[CoinRecord]:
coins = set()
cursor = await self.coin_record_db.execute(
f"SELECT * from coin_record WHERE puzzle_hash=? AND confirmed_index>=? AND confirmed_index<? "
f"{'' if include_spent_coins else 'AND spent=0'}",
(puzzle_hash.hex(), start_height, end_height),
)
rows = await cursor.fetchall()
await cursor.close()
for row in rows:
coin = Coin(bytes32(bytes.fromhex(row[6])), bytes32(bytes.fromhex(row[5])), uint64.from_bytes(row[7]))
coins.add(CoinRecord(coin, row[1], row[2], row[3], row[4], row[8]))
return list(coins)
async def get_coin_records_by_puzzle_hashes(
self,
include_spent_coins: bool,
puzzle_hashes: List[bytes32],
start_height: uint32 = uint32(0),
end_height: uint32 = uint32((2 ** 32) - 1),
) -> List[CoinRecord]:
if len(puzzle_hashes) == 0:
return []
coins = set()
puzzle_hashes_db = tuple([ph.hex() for ph in puzzle_hashes])
cursor = await self.coin_record_db.execute(
f'SELECT * from coin_record WHERE puzzle_hash in ({"?," * (len(puzzle_hashes_db) - 1)}?) '
f"AND confirmed_index>=? AND confirmed_index<? "
f"{'' if include_spent_coins else 'AND spent=0'}",
puzzle_hashes_db + (start_height, end_height),
)
rows = await cursor.fetchall()
await cursor.close()
for row in rows:
coin = Coin(bytes32(bytes.fromhex(row[6])), bytes32(bytes.fromhex(row[5])), uint64.from_bytes(row[7]))
coins.add(CoinRecord(coin, row[1], row[2], row[3], row[4], row[8]))
return list(coins)
async def rollback_to_block(self, block_index: int):
"""
Note that block_index can be negative, in which case everything is rolled back
"""
# Update memory cache
delete_queue: bytes32 = []
for coin_name, coin_record in list(self.coin_record_cache.cache.items()):
if int(coin_record.spent_block_index) > block_index:
new_record = CoinRecord(
coin_record.coin,
coin_record.confirmed_block_index,
uint32(0),
False,
coin_record.coinbase,
coin_record.timestamp,
)
self.coin_record_cache.put(coin_record.coin.name(), new_record)
if int(coin_record.confirmed_block_index) > block_index:
delete_queue.append(coin_name)
for coin_name in delete_queue:
self.coin_record_cache.remove(coin_name)
# Delete from storage
c1 = await self.coin_record_db.execute("DELETE FROM coin_record WHERE confirmed_index>?", (block_index,))
await c1.close()
c2 = await self.coin_record_db.execute(
"UPDATE coin_record SET spent_index = 0, spent = 0 WHERE spent_index>?",
(block_index,),
)
await c2.close()
# Store CoinRecord in DB and ram cache
async def _add_coin_record(self, record: CoinRecord, allow_replace: bool) -> None:
if self.coin_record_cache.get(record.coin.name()) is not None:
self.coin_record_cache.remove(record.coin.name())
cursor = await self.coin_record_db.execute(
f"INSERT {'OR REPLACE ' if allow_replace else ''}INTO coin_record VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)",
(
record.coin.name().hex(),
record.confirmed_block_index,
record.spent_block_index,
int(record.spent),
int(record.coinbase),
str(record.coin.puzzle_hash.hex()),
str(record.coin.parent_coin_info.hex()),
bytes(record.coin.amount),
record.timestamp,
),
)
await cursor.close()
# Update coin_record to be spent in DB
async def _set_spent(self, coin_name: bytes32, index: uint32) -> uint64:
current: Optional[CoinRecord] = await self.get_coin_record(coin_name)
if current is None:
raise ValueError(f"Cannot spend a coin that does not exist in db: {coin_name}")
assert not current.spent # Redundant sanity check, already checked in block_body_validation
spent: CoinRecord = CoinRecord(
current.coin,
current.confirmed_block_index,
index,
True,
current.coinbase,
current.timestamp,
) # type: ignore # noqa
await self._add_coin_record(spent, True)
return current.coin.amount
|
the-stack_0_1148 | from __future__ import absolute_import
import os, itertools, json, numpy, pickle
from ann_benchmarks.plotting.metrics import all_metrics as metrics
import matplotlib.pyplot as plt
def create_pointset(data, xn, yn):
xm, ym = (metrics[xn], metrics[yn])
rev = ym["worst"] < 0
data.sort(key=lambda t: t[-1], reverse=rev) # sort by y coordinate
axs, ays, als = [], [], []
# Generate Pareto frontier
xs, ys, ls = [], [], []
last_x = xm["worst"]
comparator = \
(lambda xv, lx: xv > lx) if last_x < 0 else (lambda xv, lx: xv < lx)
for algo, algo_name, xv, yv in data:
if not xv or not yv:
continue
axs.append(xv)
ays.append(yv)
als.append(algo_name)
if comparator(xv, last_x):
last_x = xv
xs.append(xv)
ys.append(yv)
ls.append(algo_name)
return xs, ys, ls, axs, ays, als
def compute_metrics(true_nn_distances, res, metric_1, metric_2):
all_results = {}
for i, (definition, run) in enumerate(res):
algo = definition.algorithm
algo_name = run.attrs['name']
# cache distances to avoid access to hdf5 file
run_distances = list(run['distances'])
metric_1_value = metrics[metric_1]['function'](true_nn_distances, run_distances, run.attrs)
metric_2_value = metrics[metric_2]['function'](true_nn_distances, run_distances, run.attrs)
print('%3d: %80s %12.3f %12.3f' % (i, algo_name, metric_1_value, metric_2_value))
all_results.setdefault(algo, []).append((algo, algo_name, metric_1_value, metric_2_value))
return all_results
def compute_all_metrics(true_nn_distances, run, algo):
algo_name = run.attrs["name"]
print('--')
print(algo_name)
results = {}
# cache distances to avoid access to hdf5 file
run_distances = list(run["distances"])
run_attrs = dict(run.attrs)
for name, metric in metrics.items():
v = metric["function"](true_nn_distances, run_distances, run_attrs)
results[name] = v
if v:
print('%s: %g' % (name, v))
return (algo, algo_name, results)
def generate_n_colors(n):
vs = numpy.linspace(0.4, 1.0, 7)
colors = [(.9, .4, .4, 1.)]
def euclidean(a, b):
return sum((x-y)**2 for x, y in zip(a, b))
while len(colors) < n:
new_color = max(itertools.product(vs, vs, vs), key=lambda a: min(euclidean(a, b) for b in colors))
colors.append(new_color + (1.,))
return colors
def create_linestyles(unique_algorithms):
colors = dict(zip(unique_algorithms, generate_n_colors(len(unique_algorithms))))
linestyles = dict((algo, ['--', '-.', '-', ':'][i%4]) for i, algo in enumerate(unique_algorithms))
markerstyles = dict((algo, ['+', '<', 'o', '*', 'x'][i%5]) for i, algo in enumerate(unique_algorithms))
faded = dict((algo, (r, g, b, 0.3)) for algo, (r, g, b, a) in colors.items())
return dict((algo, (colors[algo], faded[algo], linestyles[algo], markerstyles[algo])) for algo in unique_algorithms)
def get_up_down(metric):
if metric["worst"] == float("inf"):
return "down"
return "up"
def get_left_right(metric):
if metric["worst"] == float("inf"):
return "left"
return "right"
def get_plot_label(xm, ym):
return "%(xlabel)s-%(ylabel)s tradeoff - %(updown)s and to the %(leftright)s is better" % {
"xlabel" : xm["description"], "ylabel" : ym["description"], "updown" : get_up_down(ym), "leftright" : get_left_right(xm) }
|
the-stack_0_1151 | from starflyer import Handler, redirect, asjson, AttributeMapper
from camper import BaseForm, db, BaseHandler, is_admin, logged_in, ensure_barcamp
from wtforms import *
from sfext.babel import T
from .base import BarcampBaseHandler, LocationNotFound
import uuid
class ParticipantDataEditForm(BaseForm):
"""form for defining a pareticipant data form"""
# base data
title = TextField(T("Name of field"), [validators.Length(max=50), validators.Required()],
description = T('the name of the field to be shown in the form, e.g. "t-shirt size"'),
)
description = TextAreaField(T("Description"),
description = T('please describe what the user should enter in this field.'),
)
fieldtype = RadioField(T("field type"), [validators.Required()],
choices=[
('checkbox',T('a yes/no field')),
('textfield',T('1 line of text')),
('textarea',T('multiple lines of text')),
('select',T('select one choice out of many'))],
description = T('please chose between a one-line text field or multi-line text area'),
)
choices = TextAreaField(T("Choices"),
description = T('please put each choice on a separate line.'),
)
required = BooleanField(T("field required?"),
description = T('If you enable this then the user cannot register before this field has been filled in.'),
)
class ParticipantsDataEditView(BarcampBaseHandler):
"""let the user define the participant data form fields"""
template = "admin/participants_data_edit.html"
@ensure_barcamp()
@logged_in()
@is_admin()
def get(self, slug = None):
"""render the view"""
form = ParticipantDataEditForm(self.request.form, config = self.config)
registration_form = self.barcamp.registration_form
if self.request.method == 'POST' and form.validate():
f = form.data
f['name'] = unicode(uuid.uuid4())
# clean up choices
new_choices = []
for c in f['choices'].split("\n"):
choice = c.strip()
if choice:
new_choices.append((choice, choice)) # value and name are the same
f['choices'] = new_choices
self.barcamp.registration_form.append(f)
self.barcamp.save()
return redirect(self.url_for("barcamps.registration_form_editor", slug = self.barcamp.slug))
return self.render(
view = self.barcamp_view,
barcamp = self.barcamp,
title = self.barcamp.name,
form = form,
fields = self.barcamp.registration_form,
**self.barcamp
)
post = get
@ensure_barcamp()
@logged_in()
@is_admin()
def delete(self, slug = None):
"""delete a form entry"""
idx = self.request.args.get("idx", None)
rf = self.barcamp.registration_form
if idx is not None and int(idx) < len(rf) and int(idx) >= 0:
del self.barcamp.registration_form[int(idx)]
self.barcamp.save()
return redirect(self.url_for("barcamps.registration_form_editor", slug = self.barcamp.slug))
|
the-stack_0_1152 | # -*- coding: utf-8 -*-
"""Compound ZIP file plugin related functions and classes for testing."""
import zipfile
from plaso.containers import sessions
from plaso.storage.fake import writer as fake_writer
from tests.parsers import test_lib
class CompoundZIPPluginTestCase(test_lib.ParserTestCase):
"""Compound ZIP file plugin test case."""
def _ParseZIPFileWithPlugin(
self, path_segments, plugin, knowledge_base_values=None):
"""Parses a file as a ZIP file and returns an event generator.
This method will first test if a ZIP file contains the required paths
using plugin.CheckRequiredPaths() and then extracts events using
plugin.Process().
Args:
path_segments (list[str]): path segments inside the test data directory.
plugin (CompoundZIPPlugin): compound ZIP file plugin.
knowledge_base_values (Optional[dict[str, object]]): knowledge base
values.
Returns:
FakeStorageWriter: storage writer.
"""
session = sessions.Session()
storage_writer = fake_writer.FakeStorageWriter(session)
storage_writer.Open()
file_entry = self._GetTestFileEntry(path_segments)
parser_mediator = self._CreateParserMediator(
storage_writer, file_entry=file_entry,
knowledge_base_values=knowledge_base_values)
file_object = file_entry.GetFileObject()
try:
zip_file = zipfile.ZipFile(file_object, 'r', allowZip64=True)
required_paths_exist = plugin.CheckRequiredPaths(zip_file)
self.assertTrue(required_paths_exist)
plugin.Process(parser_mediator, zip_file=zip_file)
zip_file.close()
finally:
file_object.close()
return storage_writer
|
the-stack_0_1153 | #predicting-house-prices.py
#Day 6: Multiple Linear Regression: Predicting House Prices
#Intro to Statistics
#By derekhh
#Apr 2, 2016
from sklearn import linear_model
f, n = input().split()
f = int(f)
n = int(n)
clf = linear_model.LinearRegression()
x_train = []
y_train = []
for i in range(n):
tmp = [float(n) for n in input().split()]
x_train.append(tmp[0: len(tmp) - 1])
y_train.append(tmp[len(tmp) - 1])
clf.fit(x_train, y_train)
x_test = []
n = int(input())
for i in range(n):
tmp = [float(n) for n in input().split()]
x_test.append(tmp)
y_test = clf.predict(x_test)
for y in y_test:
print(y) |
the-stack_0_1157 | address_resolver_abi = [
{
"inputs": [
{
"internalType": "address",
"name": "_owner",
"type": "address"
}
],
"payable": False,
"stateMutability": "nonpayable",
"type": "constructor",
"signature": "constructor"
},
{
"anonymous": False,
"inputs": [
{
"indexed": False,
"internalType": "address",
"name": "oldOwner",
"type": "address"
},
{
"indexed": False,
"internalType": "address",
"name": "newOwner",
"type": "address"
}
],
"name": "OwnerChanged",
"type": "event",
"signature": "0xb532073b38c83145e3e5135377a08bf9aab55bc0fd7c1179cd4fb995d2a5159c"
},
{
"anonymous": False,
"inputs": [
{
"indexed": False,
"internalType": "address",
"name": "newOwner",
"type": "address"
}
],
"name": "OwnerNominated",
"type": "event",
"signature": "0x906a1c6bd7e3091ea86693dd029a831c19049ce77f1dce2ce0bab1cacbabce22"
},
{
"constant": False,
"inputs": [],
"name": "acceptOwnership",
"outputs": [],
"payable": False,
"stateMutability": "nonpayable",
"type": "function",
"signature": "0x79ba5097"
},
{
"constant": True,
"inputs": [
{
"internalType": "bytes32",
"name": "name",
"type": "bytes32"
}
],
"name": "getAddress",
"outputs": [
{
"internalType": "address",
"name": "",
"type": "address"
}
],
"payable": False,
"stateMutability": "view",
"type": "function",
"signature": "0x21f8a721"
},
{
"constant": True,
"inputs": [
{
"internalType": "bytes32",
"name": "key",
"type": "bytes32"
}
],
"name": "getSynth",
"outputs": [
{
"internalType": "address",
"name": "",
"type": "address"
}
],
"payable": False,
"stateMutability": "view",
"type": "function",
"signature": "0x51456061"
},
{
"constant": False,
"inputs": [
{
"internalType": "bytes32[]",
"name": "names",
"type": "bytes32[]"
},
{
"internalType": "address[]",
"name": "destinations",
"type": "address[]"
}
],
"name": "importAddresses",
"outputs": [],
"payable": False,
"stateMutability": "nonpayable",
"type": "function",
"signature": "0xab0b8f77"
},
{
"constant": False,
"inputs": [
{
"internalType": "address",
"name": "_owner",
"type": "address"
}
],
"name": "nominateNewOwner",
"outputs": [],
"payable": False,
"stateMutability": "nonpayable",
"type": "function",
"signature": "0x1627540c"
},
{
"constant": True,
"inputs": [],
"name": "nominatedOwner",
"outputs": [
{
"internalType": "address",
"name": "",
"type": "address"
}
],
"payable": False,
"stateMutability": "view",
"type": "function",
"signature": "0x53a47bb7"
},
{
"constant": True,
"inputs": [],
"name": "owner",
"outputs": [
{
"internalType": "address",
"name": "",
"type": "address"
}
],
"payable": False,
"stateMutability": "view",
"type": "function",
"signature": "0x8da5cb5b"
},
{
"constant": True,
"inputs": [
{
"internalType": "bytes32",
"name": "",
"type": "bytes32"
}
],
"name": "repository",
"outputs": [
{
"internalType": "address",
"name": "",
"type": "address"
}
],
"payable": False,
"stateMutability": "view",
"type": "function",
"signature": "0x187f7935"
},
{
"constant": True,
"inputs": [
{
"internalType": "bytes32",
"name": "name",
"type": "bytes32"
},
{
"internalType": "string",
"name": "reason",
"type": "string"
}
],
"name": "requireAndGetAddress",
"outputs": [
{
"internalType": "address",
"name": "",
"type": "address"
}
],
"payable": False,
"stateMutability": "view",
"type": "function",
"signature": "0xdacb2d01"
}
]
|
the-stack_0_1159 | """Time series estimator that predicts using the naive forecasting approach."""
import numpy as np
from rayml.model_family import ModelFamily
from rayml.pipelines.components.estimators import Estimator
from rayml.pipelines.components.transformers import TimeSeriesFeaturizer
from rayml.problem_types import ProblemTypes
from rayml.utils import infer_feature_types
class TimeSeriesBaselineEstimator(Estimator):
"""Time series estimator that predicts using the naive forecasting approach.
This is useful as a simple baseline estimator for time series problems.
Args:
gap (int): Gap between prediction date and target date and must be a positive integer. If gap is 0, target date will be shifted ahead by 1 time period. Defaults to 1.
forecast_horizon (int): Number of time steps the model is expected to predict.
random_seed (int): Seed for the random number generator. Defaults to 0.
"""
name = "Time Series Baseline Estimator"
hyperparameter_ranges = {}
"""{}"""
model_family = ModelFamily.BASELINE
"""ModelFamily.BASELINE"""
supported_problem_types = [
ProblemTypes.TIME_SERIES_REGRESSION,
ProblemTypes.TIME_SERIES_BINARY,
ProblemTypes.TIME_SERIES_MULTICLASS,
]
"""[
ProblemTypes.TIME_SERIES_REGRESSION,
ProblemTypes.TIME_SERIES_BINARY,
ProblemTypes.TIME_SERIES_MULTICLASS,
]"""
def __init__(self, gap=1, forecast_horizon=1, random_seed=0, **kwargs):
self._prediction_value = None
self.start_delay = forecast_horizon + gap
self._classes = None
self._num_features = None
self._delay_index = None
if gap < 0:
raise ValueError(
f"gap value must be a positive integer. {gap} was provided."
)
parameters = {"gap": gap, "forecast_horizon": forecast_horizon}
parameters.update(kwargs)
super().__init__(
parameters=parameters, component_obj=None, random_seed=random_seed
)
def fit(self, X, y=None):
"""Fits time series baseline estimator to data.
Args:
X (pd.DataFrame): The input training data of shape [n_samples, n_features].
y (pd.Series): The target training data of length [n_samples].
Returns:
self
Raises:
ValueError: If input y is None.
"""
X = infer_feature_types(X)
if y is None:
raise ValueError("Cannot fit Time Series Baseline Classifier if y is None")
vals, _ = np.unique(y, return_counts=True)
self._classes = list(vals)
return self
def predict(self, X):
"""Make predictions using fitted time series baseline estimator.
Args:
X (pd.DataFrame): Data of shape [n_samples, n_features].
Returns:
pd.Series: Predicted values.
Raises:
ValueError: If input y is None.
"""
X = infer_feature_types(X)
feature_name = TimeSeriesFeaturizer.target_colname_prefix.format(
self.start_delay
)
if feature_name not in X.columns:
raise ValueError(
"Time Series Baseline Estimator is meant to be used in a pipeline with "
"a Time Series Featurizer"
)
self._num_features = X.shape[1]
self._delay_index = X.columns.tolist().index(feature_name)
return X.ww[feature_name]
def predict_proba(self, X):
"""Make prediction probabilities using fitted time series baseline estimator.
Args:
X (pd.DataFrame): Data of shape [n_samples, n_features].
Returns:
pd.DataFrame: Predicted probability values.
Raises:
ValueError: If input y is None.
"""
preds = self.predict(X).astype("int")
proba_arr = np.zeros((len(preds), len(self._classes)))
proba_arr[np.arange(len(preds)), preds] = 1
return infer_feature_types(proba_arr)
@property
def feature_importance(self):
"""Returns importance associated with each feature.
Since baseline estimators do not use input features to calculate predictions, returns an array of zeroes.
Returns:
np.ndarray (float): An array of zeroes.
"""
importance = np.array([0] * self._num_features)
importance[self._delay_index] = 1
return importance
|
the-stack_0_1160 | # Copyright (c) 2016 EMC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import threading
import oslo_serialization
from cinder.i18n import _
from cinder.volume.drivers.coprhd.helpers import commoncoprhdapi as common
from cinder.volume.drivers.coprhd.helpers import consistencygroup
from cinder.volume.drivers.coprhd.helpers import volume
class Snapshot(common.CoprHDResource):
# Commonly used URIs for the 'Snapshot' module
URI_SNAPSHOTS = '/{0}/snapshots/{1}'
URI_BLOCK_SNAPSHOTS = '/block/snapshots/{0}'
URI_SEARCH_SNAPSHOT_BY_TAG = '/block/snapshots/search?tag={0}'
URI_SNAPSHOT_LIST = '/{0}/{1}/{2}/protection/snapshots'
URI_SNAPSHOT_TASKS_BY_OPID = '/vdc/tasks/{0}'
URI_RESOURCE_DEACTIVATE = '{0}/deactivate'
URI_CONSISTENCY_GROUP = "/block/consistency-groups"
URI_CONSISTENCY_GROUPS_SNAPSHOT_INSTANCE = (
URI_CONSISTENCY_GROUP + "/{0}/protection/snapshots/{1}")
URI_CONSISTENCY_GROUPS_SNAPSHOT_DEACTIVATE = (
URI_CONSISTENCY_GROUPS_SNAPSHOT_INSTANCE + "/deactivate")
URI_BLOCK_SNAPSHOTS_TAG = URI_BLOCK_SNAPSHOTS + '/tags'
VOLUMES = 'volumes'
CG = 'consistency-groups'
BLOCK = 'block'
is_timeout = False
timeout = 300
def snapshot_list_uri(self, otype, otypename, ouri):
"""Makes REST API call to list snapshots under a volume.
:param otype : block
:param otypename : either volume or consistency-group should be
provided
:param ouri : uri of volume or consistency-group
:returns: list of snapshots
"""
(s, h) = common.service_json_request(
self.ipaddr, self.port,
"GET",
Snapshot.URI_SNAPSHOT_LIST.format(otype, otypename, ouri), None)
o = common.json_decode(s)
return o['snapshot']
def snapshot_show_uri(self, otype, resource_uri, suri):
"""Retrieves snapshot details based on snapshot Name or Label.
:param otype : block
:param suri : uri of the Snapshot.
:param resource_uri: uri of the source resource
:returns: Snapshot details in JSON response payload
"""
if(resource_uri is not None and
resource_uri.find('BlockConsistencyGroup') > 0):
(s, h) = common.service_json_request(
self.ipaddr, self.port,
"GET",
Snapshot.URI_CONSISTENCY_GROUPS_SNAPSHOT_INSTANCE.format(
resource_uri,
suri),
None)
else:
(s, h) = common.service_json_request(
self.ipaddr, self.port,
"GET",
Snapshot.URI_SNAPSHOTS.format(otype, suri), None)
return common.json_decode(s)
def snapshot_query(self, storageres_type,
storageres_typename, resuri, snapshot_name):
if resuri is not None:
uris = self.snapshot_list_uri(
storageres_type,
storageres_typename,
resuri)
for uri in uris:
snapshot = self.snapshot_show_uri(
storageres_type,
resuri,
uri['id'])
if (False == common.get_node_value(snapshot, 'inactive') and
snapshot['name'] == snapshot_name):
return snapshot['id']
raise common.CoprHdError(
common.CoprHdError.SOS_FAILURE_ERR,
(_("snapshot with the name: "
"%s Not Found") % snapshot_name))
def snapshot_show_task_opid(self, otype, snap, taskid):
(s, h) = common.service_json_request(
self.ipaddr, self.port,
"GET",
Snapshot.URI_SNAPSHOT_TASKS_BY_OPID.format(taskid),
None)
if (not s):
return None
o = common.json_decode(s)
return o
# Blocks the operation until the task is complete/error out/timeout
def block_until_complete(self, storageres_type, resuri,
task_id, synctimeout=0):
if synctimeout:
t = threading.Timer(synctimeout, common.timeout_handler)
else:
synctimeout = self.timeout
t = threading.Timer(synctimeout, common.timeout_handler)
t.start()
while True:
out = self.snapshot_show_task_opid(
storageres_type, resuri, task_id)
if out:
if out["state"] == "ready":
# cancel the timer and return
t.cancel()
break
# if the status of the task is 'error' then cancel the timer
# and raise exception
if out["state"] == "error":
# cancel the timer
t.cancel()
error_message = "Please see logs for more details"
if("service_error" in out and
"details" in out["service_error"]):
error_message = out["service_error"]["details"]
raise common.CoprHdError(
common.CoprHdError.VALUE_ERR,
(_("Task: %(task_id)s is failed with error: "
"%(error_message)s") %
{'task_id': task_id,
'error_message': error_message}))
if self.is_timeout:
self.is_timeout = False
raise common.CoprHdError(common.CoprHdError.TIME_OUT,
(_("Task did not complete in %d secs."
" Operation timed out. Task in"
" CoprHD will continue") %
synctimeout))
return
def storage_resource_query(self,
storageres_type,
volume_name,
cg_name,
project,
tenant):
resourcepath = "/" + project
if tenant is not None:
resourcepath = tenant + resourcepath
resUri = None
resourceObj = None
if Snapshot.BLOCK == storageres_type and volume_name is not None:
resourceObj = volume.Volume(self.ipaddr, self.port)
resUri = resourceObj.volume_query(resourcepath, volume_name)
elif Snapshot.BLOCK == storageres_type and cg_name is not None:
resourceObj = consistencygroup.ConsistencyGroup(
self.ipaddr,
self.port)
resUri = resourceObj.consistencygroup_query(
cg_name,
project,
tenant)
else:
resourceObj = None
return resUri
def snapshot_create(self, otype, typename, ouri,
snaplabel, inactive, sync,
readonly=False, synctimeout=0):
"""New snapshot is created, for a given volume.
:param otype : block type should be provided
:param typename : either volume or consistency-groups should
be provided
:param ouri : uri of volume
:param snaplabel : name of the snapshot
:param inactive : if true, the snapshot will not activate the
synchronization between source and target volumes
:param sync : synchronous request
:param synctimeout : Query for task status for "synctimeout" secs.
If the task doesn't complete in synctimeout
secs, an exception is thrown
"""
# check snapshot is already exist
is_snapshot_exist = True
try:
self.snapshot_query(otype, typename, ouri, snaplabel)
except common.CoprHdError as e:
if e.err_code == common.CoprHdError.NOT_FOUND_ERR:
is_snapshot_exist = False
else:
raise
if is_snapshot_exist:
raise common.CoprHdError(
common.CoprHdError.ENTRY_ALREADY_EXISTS_ERR,
(_("Snapshot with name %(snaplabel)s"
" already exists under %(typename)s") %
{'snaplabel': snaplabel,
'typename': typename
}))
parms = {
'name': snaplabel,
# if true, the snapshot will not activate the synchronization
# between source and target volumes
'create_inactive': inactive
}
if readonly is True:
parms['read_only'] = readonly
body = oslo_serialization.jsonutils.dumps(parms)
# REST api call
(s, h) = common.service_json_request(
self.ipaddr, self.port,
"POST",
Snapshot.URI_SNAPSHOT_LIST.format(otype, typename, ouri), body)
o = common.json_decode(s)
task = o["task"][0]
if sync:
return (
self.block_until_complete(
otype,
task['resource']['id'],
task["id"], synctimeout)
)
else:
return o
def snapshot_delete_uri(self, otype, resource_uri,
suri, sync, synctimeout=0):
"""Delete a snapshot by uri.
:param otype : block
:param resource_uri: uri of the source resource
:param suri : Uri of the Snapshot
:param sync : To perform operation synchronously
:param synctimeout : Query for task status for "synctimeout" secs. If
the task doesn't complete in synctimeout secs, an
exception is thrown
"""
s = None
if resource_uri.find("Volume") > 0:
(s, h) = common.service_json_request(
self.ipaddr, self.port,
"POST",
Snapshot.URI_RESOURCE_DEACTIVATE.format(
Snapshot.URI_BLOCK_SNAPSHOTS.format(suri)),
None)
elif resource_uri.find("BlockConsistencyGroup") > 0:
(s, h) = common.service_json_request(
self.ipaddr, self.port,
"POST",
Snapshot.URI_CONSISTENCY_GROUPS_SNAPSHOT_DEACTIVATE.format(
resource_uri,
suri),
None)
o = common.json_decode(s)
task = o["task"][0]
if sync:
return (
self.block_until_complete(
otype,
task['resource']['id'],
task["id"], synctimeout)
)
else:
return o
def snapshot_delete(self, storageres_type,
storageres_typename, resource_uri,
name, sync, synctimeout=0):
snapshotUri = self.snapshot_query(
storageres_type,
storageres_typename,
resource_uri,
name)
self.snapshot_delete_uri(
storageres_type,
resource_uri,
snapshotUri,
sync, synctimeout)
|
the-stack_0_1161 | import random
import string
import cherrypy
@cherrypy.expose
class StringGeneratorWebService(object):
@cherrypy.tools.accept(media='text/plain')
def GET(self):
return cherrypy.session['mystring']
def POST(self, length=8):
some_string = ''.join(random.sample(string.hexdigits, int(length)))
cherrypy.session['mystring'] = some_string
return some_string
def PUT(self, another_string):
cherrypy.session['mystring'] = another_string
def DELETE(self):
cherrypy.session.pop('mystring', None)
if __name__ == '__main__':
conf = {
'/': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'tools.sessions.on': True,
'tools.response_headers.on': True,
'tools.response_headers.headers': [('Content-Type', 'text/plain')],
}
}
cherrypy.quickstart(StringGeneratorWebService(), '/', conf)
|
the-stack_0_1163 | from unittest import TestCase
from tests import get_data
from pytezos.michelson.converter import build_schema, decode_micheline, encode_micheline, micheline_to_michelson
class StorageTestKT1MqrUiQg3GyZ3wTEGJ3LqFn5Xz4jy4bLZU(TestCase):
@classmethod
def setUpClass(cls):
cls.maxDiff = None
cls.contract = get_data('storage/zeronet/KT1MqrUiQg3GyZ3wTEGJ3LqFn5Xz4jy4bLZU.json')
def test_storage_encoding_KT1MqrUiQg3GyZ3wTEGJ3LqFn5Xz4jy4bLZU(self):
type_expr = self.contract['script']['code'][1]
val_expr = self.contract['script']['storage']
schema = build_schema(type_expr)
decoded = decode_micheline(val_expr, type_expr, schema)
actual = encode_micheline(decoded, schema)
self.assertEqual(val_expr, actual)
def test_storage_schema_KT1MqrUiQg3GyZ3wTEGJ3LqFn5Xz4jy4bLZU(self):
_ = build_schema(self.contract['script']['code'][0])
def test_storage_format_KT1MqrUiQg3GyZ3wTEGJ3LqFn5Xz4jy4bLZU(self):
_ = micheline_to_michelson(self.contract['script']['code'])
_ = micheline_to_michelson(self.contract['script']['storage'])
|
the-stack_0_1164 | # Time: O(n)
# Space: O(1)
# inplace solution
class Solution(object):
def addSpaces(self, s, spaces):
"""
:type s: str
:type spaces: List[int]
:rtype: str
"""
prev = len(s)
s = list(s)
s.extend([None]*len(spaces))
for i in reversed(xrange(len(spaces))):
for j in reversed(xrange(spaces[i], prev)):
s[j+1+i] = s[j]
s[spaces[i]+i] = ' '
prev = spaces[i]
return "".join(s)
|
the-stack_0_1165 | #!/usr/bin/env python3
# Copyright (c) 2013-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Generate seeds.txt from Pieter's DNS seeder
#
import re
import sys
import dns.resolver
import collections
NSEEDS=512
MAX_SEEDS_PER_ASN=4
MIN_BLOCKS = 1530000
# These are hosts that have been observed to be behaving strangely (e.g.
# aggressively connecting to every node).
with open("suspicious_hosts.txt", mode="r", encoding="utf-8") as f:
SUSPICIOUS_HOSTS = {s.strip() for s in f if s.strip()}
PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$")
PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$")
PATTERN_ONION = re.compile(r"^([abcdefghijklmnopqrstuvwxyz234567]{16}\.onion):(\d+)$")
PATTERN_AGENT = re.compile(
r"^/DeviantCore:("
r"4.0.(0|1|2|99|99.1|99.2)|"
r"4.1.(0|99)"
r")")
def parseline(line):
sline = line.split()
if len(sline) < 11:
return None
m = PATTERN_IPV4.match(sline[0])
sortkey = None
ip = None
if m is None:
m = PATTERN_IPV6.match(sline[0])
if m is None:
m = PATTERN_ONION.match(sline[0])
if m is None:
return None
else:
net = 'onion'
ipstr = sortkey = m.group(1)
port = int(m.group(2))
else:
net = 'ipv6'
if m.group(1) in ['::']: # Not interested in localhost
return None
ipstr = m.group(1)
sortkey = ipstr # XXX parse IPv6 into number, could use name_to_ipv6 from generate-seeds
port = int(m.group(2))
else:
# Do IPv4 sanity check
ip = 0
for i in range(0,4):
if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255:
return None
ip = ip + (int(m.group(i+2)) << (8*(3-i)))
if ip == 0:
return None
net = 'ipv4'
sortkey = ip
ipstr = m.group(1)
port = int(m.group(6))
# Skip bad results.
if sline[1] == 0:
return None
# Extract uptime %.
uptime30 = float(sline[7][:-1])
# Extract Unix timestamp of last success.
lastsuccess = int(sline[2])
# Extract protocol version.
version = int(sline[10])
# Extract user agent.
if len(sline) > 11:
agent = sline[11][1:] + sline[12][:-1]
else:
agent = sline[11][1:-1]
# Extract service flags.
service = int(sline[9], 16)
# Extract blocks.
blocks = int(sline[8])
# Construct result.
return {
'net': net,
'ip': ipstr,
'port': port,
'ipnum': ip,
'uptime': uptime30,
'lastsuccess': lastsuccess,
'version': version,
'agent': agent,
'service': service,
'blocks': blocks,
'sortkey': sortkey,
}
def dedup(ips):
'''deduplicate by address,port'''
d = {}
for ip in ips:
d[ip['ip'],ip['port']] = ip
return list(d.values())
def filtermultiport(ips):
'''Filter out hosts with more nodes per IP'''
hist = collections.defaultdict(list)
for ip in ips:
hist[ip['sortkey']].append(ip)
return [value[0] for (key,value) in list(hist.items()) if len(value)==1]
def lookup_asn(net, ip):
'''
Look up the asn for an IP (4 or 6) address by querying cymru.com, or None
if it could not be found.
'''
try:
if net == 'ipv4':
ipaddr = ip
prefix = '.origin'
else: # http://www.team-cymru.com/IP-ASN-mapping.html
res = str() # 2001:4860:b002:23::68
for nb in ip.split(':')[:4]: # pick the first 4 nibbles
for c in nb.zfill(4): # right padded with '0'
res += c + '.' # 2001 4860 b002 0023
ipaddr = res.rstrip('.') # 2.0.0.1.4.8.6.0.b.0.0.2.0.0.2.3
prefix = '.origin6'
asn = int([x.to_text() for x in dns.resolver.query('.'.join(
reversed(ipaddr.split('.'))) + prefix + '.asn.cymru.com',
'TXT').response.answer][0].split('\"')[1].split(' ')[0])
return asn
except Exception:
sys.stderr.write('ERR: Could not resolve ASN for "' + ip + '"\n')
return None
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(ips, max_per_asn, max_per_net):
# Sift out ips by type
ips_ipv46 = [ip for ip in ips if ip['net'] in ['ipv4', 'ipv6']]
ips_onion = [ip for ip in ips if ip['net'] == 'onion']
# Filter IPv46 by ASN, and limit to max_per_net per network
result = []
net_count = collections.defaultdict(int)
asn_count = collections.defaultdict(int)
for ip in ips_ipv46:
if net_count[ip['net']] == max_per_net:
continue
asn = lookup_asn(ip['net'], ip['ip'])
if asn is None or asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
net_count[ip['net']] += 1
result.append(ip)
# Add back Onions (up to max_per_net)
result.extend(ips_onion[0:max_per_net])
return result
def ip_stats(ips):
hist = collections.defaultdict(int)
for ip in ips:
if ip is not None:
hist[ip['net']] += 1
return '%6d %6d %6d' % (hist['ipv4'], hist['ipv6'], hist['onion'])
def main():
lines = sys.stdin.readlines()
ips = [parseline(line) for line in lines]
print('\x1b[7m IPv4 IPv6 Onion Pass \x1b[0m', file=sys.stderr)
print('%s Initial' % (ip_stats(ips)), file=sys.stderr)
# Skip entries with invalid address.
ips = [ip for ip in ips if ip is not None]
print('%s Skip entries with invalid address' % (ip_stats(ips)), file=sys.stderr)
# Skip duplicates (in case multiple seeds files were concatenated)
ips = dedup(ips)
print('%s After removing duplicates' % (ip_stats(ips)), file=sys.stderr)
# Skip entries from suspicious hosts.
ips = [ip for ip in ips if ip['ip'] not in SUSPICIOUS_HOSTS]
print('%s Skip entries from suspicious hosts' % (ip_stats(ips)), file=sys.stderr)
# Enforce minimal number of blocks.
ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS]
print('%s Enforce minimal number of blocks' % (ip_stats(ips)), file=sys.stderr)
# Require service bit 1.
ips = [ip for ip in ips if (ip['service'] & 1) == 1]
print('%s Require service bit 1' % (ip_stats(ips)), file=sys.stderr)
# Require at least 50% 30-day uptime for clearnet, 10% for onion.
req_uptime = {
'ipv4': 50,
'ipv6': 50,
'onion': 10,
}
ips = [ip for ip in ips if ip['uptime'] > req_uptime[ip['net']]]
print('%s Require minimum uptime' % (ip_stats(ips)), file=sys.stderr)
# Require a known and recent user agent.
ips = [ip for ip in ips if PATTERN_AGENT.match(ip['agent'])]
print('%s Require a known and recent user agent' % (ip_stats(ips)), file=sys.stderr)
# Sort by availability (and use last success as tie breaker)
ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True)
# Filter out hosts with multiple deviant ports, these are likely abusive
ips = filtermultiport(ips)
print('%s Filter out hosts with multiple deviant ports' % (ip_stats(ips)), file=sys.stderr)
# Look up ASNs and limit results, both per ASN and globally.
ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
print('%s Look up ASNs and limit results per ASN and per net' % (ip_stats(ips)), file=sys.stderr)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['net'], x['sortkey']))
for ip in ips:
if ip['net'] == 'ipv6':
print('[%s]:%i' % (ip['ip'], ip['port']))
else:
print('%s:%i' % (ip['ip'], ip['port']))
if __name__ == '__main__':
main()
|
the-stack_0_1166 | # -*- coding: utf-8 -*-
# File: parallel.py
import atexit
import pickle
import errno
import traceback
import itertools
import multiprocessing as mp
import os
import sys
import uuid
import weakref
from contextlib import contextmanager
import zmq
from six.moves import queue, range
from ..utils import logger
from ..utils.concurrency import (
StoppableThread, enable_death_signal, ensure_proc_terminate, start_proc_mask_signal)
from ..utils.serialize import dumps_once as dumps, loads_once as loads
from .base import DataFlow, DataFlowReentrantGuard, DataFlowTerminated, ProxyDataFlow
__all__ = ['PrefetchData', 'MultiProcessPrefetchData',
'MultiProcessRunner', 'MultiProcessRunnerZMQ', 'MultiThreadRunner',
'PrefetchDataZMQ', 'MultiThreadPrefetchData']
# from https://github.com/pytorch/pytorch/blob/master/torch/utils/data/_utils/__init__.py
class _ExceptionWrapper:
MAGIC = b"EXC_MAGIC"
"""Wraps an exception plus traceback to communicate across threads"""
def __init__(self, exc_info):
# It is important that we don't store exc_info, see
# NOTE [ Python Traceback Reference Cycle Problem ]
self.exc_type = exc_info[0]
self.exc_msg = "".join(traceback.format_exception(*exc_info))
def pack(self):
return self.MAGIC + pickle.dumps(self)
@staticmethod
def unpack(dp):
if isinstance(dp, bytes) and dp.startswith(_ExceptionWrapper.MAGIC):
return pickle.loads(dp[len(_ExceptionWrapper.MAGIC):])
def _repeat_iter(get_itr):
while True:
yield from get_itr()
def _bind_guard(sock, name):
try:
sock.bind(name)
except zmq.ZMQError:
logger.error(
"ZMQError in socket.bind('{}'). Perhaps you're \
using pipes on a non-local file system. See documentation of MultiProcessRunnerZMQ \
for more information.".format(name))
raise
def _get_pipe_name(name):
if sys.platform.startswith('linux'):
# linux supports abstract sockets: http://api.zeromq.org/4-1:zmq-ipc
pipename = "ipc://@{}-pipe-{}".format(name, str(uuid.uuid1())[:8])
pipedir = os.environ.get('TENSORPACK_PIPEDIR', None)
if pipedir is not None:
logger.warn("TENSORPACK_PIPEDIR is not used on Linux any more! Abstract sockets will be used.")
else:
pipedir = os.environ.get('TENSORPACK_PIPEDIR', None)
if pipedir is not None:
logger.info("ZMQ uses TENSORPACK_PIPEDIR={}".format(pipedir))
else:
pipedir = '.'
assert os.path.isdir(pipedir), pipedir
filename = '{}/{}-pipe-{}'.format(pipedir.rstrip('/'), name, str(uuid.uuid1())[:6])
assert not os.path.exists(filename), "Pipe {} exists! You may be unlucky.".format(filename)
pipename = "ipc://{}".format(filename)
return pipename
def del_weakref(x):
o = x()
if o is not None:
o.__del__()
@contextmanager
def _zmq_catch_error(name):
try:
yield
except zmq.ContextTerminated:
logger.info("[{}] Context terminated.".format(name))
raise DataFlowTerminated()
except zmq.ZMQError as e:
if e.errno == errno.ENOTSOCK: # socket closed
logger.info("[{}] Socket closed.".format(name))
raise DataFlowTerminated()
else:
raise
except Exception:
raise
class _MultiProcessZMQDataFlow(DataFlow):
def __init__(self):
assert os.name != 'nt', "ZMQ IPC doesn't support windows!"
self._reset_done = False
self._procs = []
def reset_state(self):
"""
All forked dataflows should only be reset **once and only once** in spawned processes.
Subclasses should call this method with super.
"""
assert not self._reset_done, "reset_state() was called twice! This violates the API of DataFlow!"
self._reset_done = True
# __del__ not guaranteed to get called at exit
atexit.register(del_weakref, weakref.ref(self))
def _start_processes(self):
start_proc_mask_signal(self._procs)
def __del__(self):
try:
if not self._reset_done:
return
if not self.context.closed:
self.socket.close(0)
self.context.destroy(0)
for x in self._procs:
x.terminate()
x.join(5)
print("{} successfully cleaned-up.".format(type(self).__name__))
except Exception:
pass
class MultiProcessRunner(ProxyDataFlow):
"""
Running a DataFlow in >=1 processes using Python multiprocessing utilities.
It will fork the process that calls :meth:`__init__`, collect datapoints from `ds` in each
process by a Python :class:`multiprocessing.Queue`.
Note:
1. (Data integrity) An iterator cannot run faster automatically -- what's happening is
that the process will be forked ``num_proc`` times.
There will be ``num_proc`` dataflow running in parallel and **independently**.
As a result, we have the following guarantee on the dataflow correctness:
a. When ``num_proc=1``, this dataflow produces the same data as the
given dataflow in the same order.
b. When ``num_proc>1``, if each sample from the given dataflow is i.i.d.,
then this dataflow produces the **same distribution** of data as the given dataflow.
This implies that there will be duplication, reordering, etc.
You probably only want to use it for training.
For example, if your original dataflow contains no randomness and produces the same first datapoint,
then after parallel prefetching, the datapoint will be produced ``num_proc`` times
at the beginning.
Even when your original dataflow is fully shuffled, you still need to be aware of the
`Birthday Paradox <https://en.wikipedia.org/wiki/Birthday_problem>`_
and know that you'll likely see duplicates.
To utilize parallelism with more strict data integrity, you can use
the parallel versions of :class:`MapData`: :class:`MultiThreadMapData`, :class:`MultiProcessMapData`.
2. This has more serialization overhead than :class:`MultiProcessRunnerZMQ` when data is large.
3. You can nest like this: ``MultiProcessRunnerZMQ(MultiProcessRunner(df, num_proc=a), num_proc=b)``.
A total of ``a`` instances of ``df`` worker processes will be created.
4. Fork happens in `__init__`. `reset_state()` is a no-op.
DataFlow in the worker processes will be reset at the time of fork.
5. This DataFlow does support windows. However, Windows requires more strict picklability on processes,
which means that some code that's forkable on Linux may not be forkable on Windows. If that happens you'll
need to re-organize some part of code that's not forkable.
"""
class _Worker(mp.Process):
def __init__(self, ds, queue, idx):
super(MultiProcessRunner._Worker, self).__init__()
self.ds = ds
self.queue = queue
self.idx = idx
def run(self):
enable_death_signal(_warn=self.idx == 0)
# reset all ds so each process will produce different data
self.ds.reset_state()
while True:
for dp in self.ds:
self.queue.put(dp)
def __init__(self, ds, num_prefetch, num_proc):
"""
Args:
ds (DataFlow): input DataFlow.
num_prefetch (int): size of the queue to hold prefetched datapoints.
Required.
num_proc (int): number of processes to use. Required.
"""
# https://docs.python.org/3.6/library/multiprocessing.html?highlight=process#the-spawn-and-forkserver-start-methods
if os.name == 'nt':
logger.warn("MultiProcessRunner does support Windows. \
However, Windows requires more strict picklability on processes, which may \
lead of failure on some of the code.")
super(MultiProcessRunner, self).__init__(ds)
try:
self._size = len(ds)
except NotImplementedError:
self._size = -1
assert num_proc > 0, num_proc
assert num_prefetch > 0, num_prefetch
self.num_proc = num_proc
self.num_prefetch = num_prefetch
if num_proc > 1:
logger.info("[MultiProcessRunner] Will fork a dataflow more than one times. "
"This assumes the datapoints are i.i.d.")
self.queue = mp.Queue(self.num_prefetch)
self.procs = [MultiProcessRunner._Worker(self.ds, self.queue, idx)
for idx in range(self.num_proc)]
ensure_proc_terminate(self.procs)
self._reset_done = False
def __iter__(self):
for k in itertools.count():
if self._size > 0 and k >= self._size:
break
dp = self.queue.get()
yield dp
def reset_state(self):
assert not self._reset_done, "reset_state() was called twice! This violates the API of DataFlow!"
self._reset_done = True
start_proc_mask_signal(self.procs)
class MultiProcessRunnerZMQ(_MultiProcessZMQDataFlow):
"""
Run a DataFlow in >=1 processes, with ZeroMQ for communication.
It will fork the calling process of :meth:`reset_state()`,
and collect datapoints from the given dataflow in each process by ZeroMQ IPC pipe.
This is typically faster than :class:`MultiProcessRunner`.
Note:
1. (Data integrity) An iterator cannot run faster automatically -- what's happening is
that the process will be forked ``num_proc`` times.
There will be ``num_proc`` dataflow running in parallel and **independently**.
As a result, we have the following guarantee on the dataflow correctness:
a. When ``num_proc=1``, this dataflow produces the same data as the
given dataflow in the same order.
b. When ``num_proc>1``, if each sample from the given dataflow is i.i.d.,
then this dataflow produces the **same distribution** of data as the given dataflow.
This implies that there will be duplication, reordering, etc.
You probably only want to use it for training.
For example, if your original dataflow contains no randomness and produces the same first datapoint,
then after parallel prefetching, the datapoint will be produced ``num_proc`` times
at the beginning.
Even when your original dataflow is fully shuffled, you still need to be aware of the
`Birthday Paradox <https://en.wikipedia.org/wiki/Birthday_problem>`_
and know that you'll likely see duplicates.
To utilize parallelism with more strict data integrity, you can use
the parallel versions of :class:`MapData`: :class:`MultiThreadMapData`, :class:`MultiProcessMapData`.
2. `reset_state()` of the given dataflow will be called **once and only once** in the worker processes.
3. The fork of processes happened in this dataflow's `reset_state()` method.
Please note that forking a TensorFlow GPU session may be unsafe.
If you're managing this dataflow on your own,
it's better to fork before creating the session.
4. (Fork-safety) After the fork has happened, this dataflow becomes not fork-safe.
i.e., if you fork an already reset instance of this dataflow,
it won't be usable in the forked process. Therefore, do not nest two `MultiProcessRunnerZMQ`.
5. (Thread-safety) ZMQ is not thread safe. Therefore, do not call :meth:`get_data` of the same dataflow in
more than 1 threads.
6. This dataflow does not support windows. Use `MultiProcessRunner` which works on windows.
7. (For Mac only) A UNIX named pipe will be created in the current directory.
However, certain non-local filesystem such as NFS/GlusterFS/AFS doesn't always support pipes.
You can change the directory by ``export TENSORPACK_PIPEDIR=/other/dir``.
In particular, you can use somewhere under '/tmp' which is usually local.
Note that some non-local FS may appear to support pipes and code
may appear to run but crash with bizarre error.
Also note that ZMQ limits the maximum length of pipe path.
If you hit the limit, you can set the directory to a softlink
which points to a local directory.
"""
class _Worker(mp.Process):
def __init__(self, ds, conn_name, hwm, idx):
super(MultiProcessRunnerZMQ._Worker, self).__init__()
self.ds = ds
self.conn_name = conn_name
self.hwm = hwm
self.idx = idx
def run(self):
enable_death_signal(_warn=self.idx == 0)
self.ds.reset_state()
itr = _repeat_iter(lambda: self.ds)
context = zmq.Context()
socket = context.socket(zmq.PUSH)
socket.set_hwm(self.hwm)
socket.connect(self.conn_name)
try:
while True:
try:
dp = next(itr)
socket.send(dumps(dp), copy=False)
except Exception:
dp = _ExceptionWrapper(sys.exc_info()).pack()
socket.send(dumps(dp), copy=False)
raise
# sigint could still propagate here, e.g. when nested
except KeyboardInterrupt:
pass
finally:
socket.close(0)
context.destroy(0)
def __init__(self, ds, num_proc=1, hwm=50):
"""
Args:
ds (DataFlow): input DataFlow.
num_proc (int): number of processes to use.
hwm (int): the zmq "high-water mark" (queue size) for both sender and receiver.
"""
super(MultiProcessRunnerZMQ, self).__init__()
self.ds = ds
self.num_proc = num_proc
self._hwm = hwm
if num_proc > 1:
logger.info("[MultiProcessRunnerZMQ] Will fork a dataflow more than one times. "
"This assumes the datapoints are i.i.d.")
try:
self._size = ds.__len__()
except NotImplementedError:
self._size = -1
def _recv(self):
ret = loads(self.socket.recv(copy=False))
exc = _ExceptionWrapper.unpack(ret)
if exc is not None:
logger.error("Exception '{}' in worker:".format(str(exc.exc_type)))
raise exc.exc_type(exc.exc_msg)
return ret
def __len__(self):
return self.ds.__len__()
def __iter__(self):
with self._guard, _zmq_catch_error('MultiProcessRunnerZMQ'):
for k in itertools.count():
if self._size > 0 and k >= self._size:
break
yield self._recv()
def reset_state(self):
super(MultiProcessRunnerZMQ, self).reset_state()
self._guard = DataFlowReentrantGuard()
self.context = zmq.Context()
self.socket = self.context.socket(zmq.PULL)
self.socket.set_hwm(self._hwm)
pipename = _get_pipe_name('dataflow')
_bind_guard(self.socket, pipename)
self._procs = [MultiProcessRunnerZMQ._Worker(self.ds, pipename, self._hwm, idx)
for idx in range(self.num_proc)]
self._start_processes()
class MultiThreadRunner(DataFlow):
"""
Create multiple dataflow instances and run them each in one thread.
Collect outputs from them with a queue.
Note:
1. (Data integrity) An iterator cannot run faster automatically -- what's happening is
that each thread will create a dataflow iterator.
There will be ``num_thread`` dataflow running in parallel and **independently**.
As a result, we have the following guarantee on the dataflow correctness:
a. When ``num_thread=1``, this dataflow produces the same data as the
given dataflow in the same order.
b. When ``num_thread>1``, if each sample from the given dataflow is i.i.d.,
then this dataflow produces the **same distribution** of data as the given dataflow.
This implies that there will be duplication, reordering, etc.
You probably only want to use it for training.
For example, if your original dataflow contains no randomness and produces the same first datapoint,
then after parallel prefetching, the datapoint will be produced ``num_thread`` times
at the beginning.
Even when your original dataflow is fully shuffled, you still need to be aware of the
`Birthday Paradox <https://en.wikipedia.org/wiki/Birthday_problem>`_
and know that you'll likely see duplicates.
To utilize parallelism with more strict data integrity, you can use
the parallel versions of :class:`MapData`: :class:`MultiThreadMapData`, :class:`MultiProcessMapData`.
"""
class _Worker(StoppableThread):
def __init__(self, get_df, queue):
super(MultiThreadRunner._Worker, self).__init__()
self.df = get_df()
assert isinstance(self.df, DataFlow), self.df
self.queue = queue
self.daemon = True
def run(self):
self.df.reset_state()
try:
while True:
for dp in self.df:
if self.stopped():
return
self.queue_put_stoppable(self.queue, dp)
except Exception:
if self.stopped():
pass # skip duplicated error messages
else:
raise
finally:
self.stop()
def __init__(self, get_df, num_prefetch, num_thread):
"""
Args:
get_df ( -> DataFlow): a callable which returns a DataFlow.
Each thread will call this function to get the DataFlow to use.
Therefore do not return the same DataFlow object for each call,
unless your dataflow is stateless.
num_prefetch (int): size of the queue
num_thread (int): number of threads
"""
assert num_thread > 0, num_thread
assert num_prefetch > 0, num_prefetch
self.num_thread = num_thread
self.queue = queue.Queue(maxsize=num_prefetch)
self.threads = [
MultiThreadRunner._Worker(get_df, self.queue)
for _ in range(num_thread)]
try:
self._size = self.__len__()
except NotImplementedError:
self._size = -1
def reset_state(self):
for th in self.threads:
th.df.reset_state()
th.start()
def __len__(self):
return self.threads[0].df.__len__()
def __iter__(self):
for k in itertools.count():
if self._size > 0 and k >= self._size:
break
yield self.queue.get()
def __del__(self):
for p in self.threads:
if p.is_alive():
p.stop()
p.join()
class PlasmaPutData(ProxyDataFlow):
"""
Put each data point to plasma shared memory object store, and yield the object id instead.
Experimental.
"""
def __init__(self, ds, socket="/tmp/plasma"):
self._socket = socket
super(PlasmaPutData, self).__init__(ds)
def reset_state(self):
super(PlasmaPutData, self).reset_state()
self.client = plasma.connect(self._socket, "", 0)
def __iter__(self):
for dp in self.ds:
oid = self.client.put(dp)
yield [oid.binary()]
class PlasmaGetData(ProxyDataFlow):
"""
Take plasma object id from a DataFlow, and retrieve it from plasma shared
memory object store.
Experimental.
"""
def __init__(self, ds, socket="/tmp/plasma"):
self._socket = socket
super(PlasmaGetData, self).__init__(ds)
def reset_state(self):
super(PlasmaGetData, self).reset_state()
self.client = plasma.connect(self._socket, "", 0)
def __iter__(self):
for dp in self.ds:
oid = plasma.ObjectID(dp[0])
dp = self.client.get(oid)
yield dp
plasma = None
# These plasma code is only experimental
# try:
# import pyarrow.plasma as plasma
# except ImportError:
# from ..utils.develop import create_dummy_class
# PlasmaPutData = create_dummy_class('PlasmaPutData', 'pyarrow') # noqa
# PlasmaGetData = create_dummy_class('PlasmaGetData', 'pyarrow') # noqa
# The old inappropriate names:
PrefetchData = MultiProcessRunner
MultiProcessPrefetchData = MultiProcessRunner
PrefetchDataZMQ = MultiProcessRunnerZMQ
MultiThreadPrefetchData = MultiThreadRunner
if __name__ == '__main__':
import time
from .raw import DataFromGenerator
from .common import FixedSizeData
x = DataFromGenerator(itertools.count())
x = FixedSizeData(x, 100)
x = MultiProcessRunnerZMQ(x, 2)
x.reset_state()
for idx, dp in enumerate(x):
print(dp)
time.sleep(0.1)
|
the-stack_0_1167 | from core.himesis import Himesis
import uuid
class HSon2Man(Himesis):
def __init__(self):
"""
Creates the himesis graph representing the DSLTrans rule Son2Man.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HSon2Man, self).__init__(name='HSon2Man', num_nodes=0, edges=[])
# Set the graph attributes
self["mm__"] = ['HimesisMM']
self["name"] = """Son2Man"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'Son2Man')
# match model. We only support one match model
self.add_node()
self.vs[0]["mm__"] = """MatchModel"""
# apply model node
self.add_node()
self.vs[1]["mm__"] = """ApplyModel"""
# paired with relation between match and apply models
self.add_node()
self.vs[2]["mm__"] = """paired_with"""
self.vs[2]["attr1"] = """Son2Man"""
# match class Child() node
self.add_node()
self.vs[3]["mm__"] = """Child"""
self.vs[3]["attr1"] = """+"""
# match class Family() node
self.add_node()
self.vs[4]["mm__"] = """Family"""
self.vs[4]["attr1"] = """1"""
# apply class Man() node
self.add_node()
self.vs[5]["mm__"] = """Man"""
self.vs[5]["attr1"] = """1"""
# match association Child--family-->Family node
self.add_node()
self.vs[6]["attr1"] = """family"""
self.vs[6]["mm__"] = """directLink_S"""
# match association Family--sons-->Child node
self.add_node()
self.vs[7]["attr1"] = """sons"""
self.vs[7]["mm__"] = """directLink_S"""
# Add the edges
self.add_edges([
(0,3), # matchmodel -> match_class Child()
(0,4), # matchmodel -> match_class Family()
(1,5), # applymodel -> -> apply_class Man()
(3,6), # match_class Child() -> association family
(6,4), # association family -> match_class Family()
(4,7), # match_class Family() -> association sons
(7,3), # association sons -> match_class Child()
(0,2), # matchmodel -> pairedwith
(2,1) # pairedwith -> applyModel
])
# Add the attribute equations
self["equations"] = [((5,'fullName'),('concat',((3,'firstName'),(4,'lastName')))), ]
|
the-stack_0_1168 | """
This module lets you practice one form of the ACCUMULATOR pattern,
namely, the "IN GRAPHICS" form which features:
-- DRAWING OBJECTS via ACCUMULATING positions and/or sizes,
as in: x = x + pixels
Additionally, it emphasizes that you must
** DO A CONCRETE EXAMPLE BY HAND **
before you can implement a solution to the problem in Python.
Authors: David Mutchler, Dave Fisher, Valerie Galluzzi, Amanda Stouder,
their colleagues and Mitch Lugsch.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
import rosegraphics as rg
# ----------------------------------------------------------------------
# Students: As you work each of these problems, ask yourself:
# 1. Do I need a loop?
# If so, HOW MANY LOOPS?
#
# 2. Where I need a loop, what needs to happen:
# -- BEFORE the loop?
# -- IN the loop?
# -- AFTER the loop?
# ----------------------------------------------------------------------
def main():
""" Calls the TEST functions in this module. """
run_test_draw_squares_from_circle()
run_test_draw_circles_from_rectangle()
run_test_draw_lines_from_rectangles()
def run_test_draw_squares_from_circle():
""" Tests the draw_squares_from_circle function. """
print()
print('--------------------------------------------------')
print('Testing the draw_squares_from_circle function:')
print(' See the graphics windows that pop up.')
print('--------------------------------------------------')
# ------------------------------------------------------------------
# TWO tests on ONE window.
# ------------------------------------------------------------------
title = 'Tests 1 and 2 of DRAW_SQUARES_FROM_CIRCLE: '
title = title + ' 7 little squares from green circle, 4 big squares'
window1 = rg.RoseWindow(650, 350, title)
# Test 1:
circle = rg.Circle(rg.Point(100, 100), 20)
circle.fill_color = 'green'
draw_squares_from_circle(7, circle, window1)
# Test 2:
circle = rg.Circle(rg.Point(350, 70), 50)
draw_squares_from_circle(4, circle, window1)
window1.close_on_mouse_click()
# ------------------------------------------------------------------
# A third test on ANOTHER window.
# ------------------------------------------------------------------
title = 'Test 3 of DRAW_SQUARES_FROM_CIRCLE: '
title += ' 20 teeny squares from blue circle!'
window2 = rg.RoseWindow(525, 300, title)
# Test 3:
circle = rg.Circle(rg.Point(50, 50), 10)
circle.fill_color = 'blue'
draw_squares_from_circle(20, circle, window2)
window2.close_on_mouse_click()
def draw_squares_from_circle(n, circle, window):
"""
What comes in: Three arguments:
-- A positive integer n.
-- An rg.Circle.
-- An rg.RoseWindow.
What goes out: Nothing (i.e., None).
Side effects:
See draw_squares_from_circle.pdf in this project for pictures
that may help you better understand the following specification:
First draws the given rg.Circle on the given rg.RoseWindow.
Then draws n rg.Squares on the given rg.RoseWindow, such that:
-- The first rg.Square circumscribes the given rg.Circle.
-- Each subsequent rg.Square has its upper-left quarter
on top of the lower-right quarter of the previous rg.Square,
so that the squares form an overlapping sequence
that goes down and to the right.
Must ** render ** but ** NOT close ** the window.
Type hints:
:type n: int
:type circle: rg.Circle
:type window: rg.RoseWindow
"""
# ------------------------------------------------------------------
# DONE: 2. Implement and test this function.
# Tests have been written for you (above).
#
# CONSIDER using the ACCUMULATOR IN GRAPHICS pattern,
# as in draw_row_of_circles in m1e,
# instead of directly using the loop variable.
#
####################################################################
# HINT: To figure out the code that computes the necessary
# positions of each square,
# ** FIRST DO A CONCRETE EXAMPLE BY HAND! **
####################################################################
# ------------------------------------------------------------------
point = circle.center
x = point.x
y = point.y
circle.attach_to(window)
for _ in range(n): # Loop that does NOT use its index variable
point = rg.Point(x, y)
square = rg.Square(point, circle.radius * 2)
# Attach the object(s) to the window.
square.attach_to(window)
# Increment x and y
x = x + (circle.radius)
y = y + (circle.radius)
window.render()
def run_test_draw_circles_from_rectangle():
""" Tests the draw_circles_from_rectangle function. """
print()
print('--------------------------------------------------')
print('Testing the draw_circles_from_rectangle function:')
print(' See the graphics windows that pop up.')
print('--------------------------------------------------')
# ------------------------------------------------------------------
# DONE: 3. Implement this TEST function.
# It TESTS the draw_circles_from_rectangle function
# defined below. Include at least ** 3 ** tests, of which
# *** at least TWO tests are on ONE window and
# *** at least ONE test is on a DIFFERENT window.
#
####################################################################
# HINT: Consider using the same test cases as suggested by the
# pictures in draw_circles_from_rectangle.pdf in this project.
# Follow the same form as the example in a previous problem.
####################################################################
# ------------------------------------------------------------------
title = 'Tests 1 and 2 of DRAW_CIRCLES_FROM_RECTANGLE: '
window1 = rg.RoseWindow(720, 500, title)
# Test 1:
rectangle = rg.Rectangle(rg.Point(400, 250), rg.Point(440, 325))
rectangle.fill_color = 'green'
rectangle.outline_color = 'black'
rectangle.outline_thickness = 5
draw_circles_from_rectangle(4, 5, rectangle, window1)
# Test 2:
rectangle = rg.Rectangle(rg.Point(600, 400), rg.Point(500, 450))
rectangle.fill_color = 'blue'
rectangle.outline_color = 'red'
rectangle.outline_thickness = 3
draw_circles_from_rectangle(8, 3, rectangle, window1)
window1.close_on_mouse_click()
title = 'Test 3 of DRAW_CIRCLES_FROM_RECTANGLE: '
window2 = rg.RoseWindow(620, 380, title)
# Test 3:
rectangle = rg.Rectangle(rg.Point(350, 280), rg.Point(375, 330))
rectangle.fill_color = 'yellow'
rectangle.outline_color = 'brown'
rectangle.outline_thickness = 5
draw_circles_from_rectangle(6, 10, rectangle, window2)
window2.close_on_mouse_click()
def draw_circles_from_rectangle(m, n, rectangle, window):
"""
What comes in: Four arguments:
-- Positive integers m and n.
-- An rg.Rectangle.
-- An rg.RoseWindow.
What goes out: Nothing (i.e., None).
Side effects:
See draw_circles_from_rectangle.pdf in this project for pictures
that may help you better understand the following specification:
First draws the given rg.Rectangle on the given rg.RoseWindow.
Then draws m rg.Circles on the given rg.RoseWindow, such that:
-- The diameter of each rg.Circle is the same as the height
of the given rg.Rectangle.
-- The first rg.Circle is immediately to the left of the
given rg.Rectangle
-- Each subsequent rg.Circle is immediately to the left
of the previous rg.Circle, so that the circles form a row
that goes to the left.
-- Each rg. Circle has the same fill_color as the given
rg.Rectangle (and has no outline_color).
Then draws n rg.Circles on the given RoseWindow, such that:
-- The diameter of each rg.Circle is the same as the width
of the given rg.Rectangle.
-- The first rg.Circle is immediately above the
given rg.Rectangle
-- Each subsequent rg.Circle is immediately above the previous
rg.Circle, so that the circles form a column that goes up.
-- Each rg.Circle has the same outline_color as the given
rg.Rectangle (and has no fill_color).
Must ** render ** but ** NOT close ** the window.
Type hints:
:type m: int
:type n: int
:type rectangle: rg.Rectangle
:type window: rg.RoseWindow
"""
# ------------------------------------------------------------------
# DONE: 4. Implement and test this function.
# Tests have been written for you (above).
#
# CONSIDER using the ACCUMULATOR IN GRAPHICS pattern,
# as in draw_row_of_circles in m1e,
# instead of directly using the loop variable.
#
####################################################################
# HINT: To figure out the code that computes the necessary
# positions of each circle,
# ** FIRST DO A CONCRETE EXAMPLE BY HAND! **
####################################################################
# ------------------------------------------------------------------
rectangle_center = rectangle.get_center()
x = rectangle_center.x
y = rectangle_center.y
width = rectangle.get_width()
height = rectangle.get_height()
rectangle.attach_to(window)
for _ in range(m): # Loop that does NOT use its index variable
center1 = rg.Point(x - ((width / 2) + (height / 2)), rectangle_center.y)
circle1 = rg.Circle(center1, height / 2)
circle1.fill_color = rectangle.fill_color
# Attach the object(s) to the window.
circle1.attach_to(window)
# Increment x
x = x - height
for _ in range(n):
center2 = rg.Point(rectangle_center.x, y - ((height / 2) + (width / 2)))
circle2 = rg.Circle(center2, width / 2)
circle2.outline_color = rectangle.outline_color
# Attach the object(s) to the window.
circle2.attach_to(window)
# Increment y
y = y - width
window.render()
def run_test_draw_lines_from_rectangles():
""" Tests the draw_lines_from_rectangles function. """
print()
print('--------------------------------------------------')
print('Testing the draw_lines_from_rectangles function:')
print(' See the graphics windows that pop up.')
print('--------------------------------------------------')
# TWO tests on ONE window.
title = 'Tests 1 & 2 of DRAW_LINES_FROM_RECTANGLES:'
title += ' 5 lines, 8 lines!'
window1 = rg.RoseWindow(900, 400, title)
rectangle1 = rg.Rectangle(rg.Point(100, 25), rg.Point(150, 125))
rectangle2 = rg.Rectangle(rg.Point(300, 150), rg.Point(400, 175))
rectangle1.outline_color = 'red'
rectangle2.outline_color = 'blue'
draw_lines_from_rectangles(rectangle1, rectangle2, 5, window1)
rectangle1 = rg.Rectangle(rg.Point(870, 30), rg.Point(750, 100))
rectangle2 = rg.Rectangle(rg.Point(700, 90), rg.Point(650, 60))
rectangle2.outline_color = 'green'
draw_lines_from_rectangles(rectangle1, rectangle2, 8, window1)
window1.close_on_mouse_click()
# A third test on ANOTHER window.
title = 'Test 3 of DRAW_LINES_FROM_RECTANGLES: 11 lines!'
window2 = rg.RoseWindow(700, 700, title)
rectangle1 = rg.Rectangle(rg.Point(550, 200), rg.Point(650, 100))
rectangle2 = rg.Rectangle(rg.Point(600, 50), rg.Point(650, 75))
rectangle1.outline_color = 'brown'
rectangle2.outline_color = 'cyan'
rectangle2.outline_thickness = 10
draw_lines_from_rectangles(rectangle1, rectangle2, 11, window2)
window2.close_on_mouse_click()
def draw_lines_from_rectangles(rectangle1, rectangle2, n, window):
"""
What comes in: Four arguments:
-- Two rg.Rectangles.
-- A positive integer n.
-- An rg.RoseWindow.
What goes out: Nothing (i.e., None).
Side effects:
See draw_lines_from_rectangles.pdf in this project
for pictures that may help you better understand
the following specification:
First draws the given rg.Rectangles on the given rg.RoseWindow.
Then draws n rg.Lines on the given rg.RoseWindow, such that:
-- The 1st rg.Line goes from the center of one of the
1st rg.Rectangle to the center of the 2nd rg.Rectangle.
-- The 2nd rg.Line goes from the lower-left corner of the
1st rg.Rectangle and is parallel to the 1st rg.Line,
with the same length and direction as the 1st rg.Line.
-- Subsequent rg.Lines are shifted from the previous rg.Line in
the same way that the 2nd rg.Line is shifted from the 1st.
-- Each of the rg.Lines has thickness 5.
-- The colors of the rg.Lines alternate, as follows:
- The 1st, 3rd, 5th, ... rg.Line has color R1_color
- The 2nd, 4th, 6th, ... rg.Line has color R2_color
where
- R1_color is the outline color of the 1st rg.Rectangle
- R2_color is the outline color of the 2nd rg.Rectangle
Must ** render ** but ** NOT close ** the window.
Type hints:
:type rectangle1: rg.Rectangle
:type rectangle2: rg.Rectangle
:type n: int
:type window: rg.RoseWindow
"""
# ------------------------------------------------------------------
# DONE: 5. Implement and test this function.
# Tests have been written for you (above).
#
# CONSIDER using the ACCUMULATOR IN GRAPHICS pattern,
# as in draw_row_of_circles in m1e,
# instead of directly using the loop variable.
#
####################################################################
# HINT: To figure out the code that computes the necessary
# endpoints for each line,
# ** FIRST DO A CONCRETE EXAMPLE BY HAND! **
####################################################################
# ------------------------------------------------------------------
center_R1 = rectangle1.get_center()
center_R2 = rectangle2.get_center()
width_R1 = rectangle1.get_width()
height_R1 = rectangle1.get_height()
x1 = center_R1.x
y1 = center_R1.y
x2 = center_R2.x
y2 = center_R2.y
rectangle1.attach_to(window)
rectangle2.attach_to(window)
for k in range(n):
if (k + 1) % 2 == 0:
start = rg.Point(x1, y1)
end = rg.Point(x2, y2)
line = rg.Line(start, end)
line.thickness = 5
line.color = rectangle2.outline_color
# Attach the object(s) to the window.
line.attach_to(window)
# Increment variables
x1 = x1 - (width_R1 / 2)
y1 = y1 + (height_R1 / 2)
x2 = x2 - (width_R1 / 2)
y2 = y2 + (height_R1 / 2)
else:
start = rg.Point(x1, y1)
end = rg.Point(x2, y2)
line = rg.Line(start, end)
line.thickness = 5
line.color = rectangle1.outline_color
# Attach the object(s) to the window.
line.attach_to(window)
# Increment variables
x1 = x1 - (width_R1 / 2)
y1 = y1 + (height_R1 / 2)
x2 = x2 - (width_R1 / 2)
y2 = y2 + (height_R1 / 2)
window.render()
# ----------------------------------------------------------------------
# Calls main to start the ball rolling.
# ----------------------------------------------------------------------
main()
|
the-stack_0_1169 | # -*- coding: utf-8 -*-
import datetime
import os
from pyvirtualdisplay import Display
from selenium import webdriver
import constants
# Choose and configure the browser of your choice
def get_browser():
# # These work on Mac
# return webdriver.Chrome()
# return webdriver.Firefox()
# On Linux you need to initialize a display
global display
display = Display(visible=0, size=(1024, 768))
display.start()
options = webdriver.ChromeOptions()
options.add_argument("--no-sandbox")
options.add_argument("--disable-dev-shm-usage")
options.add_argument("--ignore-certificate-errors")
options.add_experimental_option("useAutomationExtension", False);
return webdriver.Chrome(options=options)
# If present and callable, it will be called at the end of the whole test suite
def teardown():
global display
try:
display.stop()
except NameError:
pass
# A failed login by a provider will be retried so many times as set here
MAX_LOGIN_ATTEMPTS = 3
# Multiplies the wait times set in expected values
WAIT_MULTIPLIER = 1
# Minimum wait time
MIN_WAIT = 0
# The host and port where the tested ap should listen.
HOST = '127.0.0.1'
PORT = 443
# The host alias set in the /etc/hosts file.
# The actual tests will navigate selenium browser to this host.
# This is necessary because some providers don't support localhost as the
# callback url.
HOST_ALIAS = 'authomatic.org'
# Only frameworks included here will be tested.
INCLUDE_FRAMEWORKS = [
# 'django',
'flask', # Runs with https
'pyramid', # Runs with https
]
# Only providers included here will be tested.
# Leave commented-out entries (with explanation) to prevent trying to re-add tests for services
# Which aren't testable in an automated environment.
INCLUDE_PROVIDERS = [
# OAuth 1.0a - This mostly deprecated as a service 'in the wild' - we should drop support.
# 'bitbucket',
# 'flickr',
# 'plurk',
'twitter',
# 'tumblr',
# 'ubuntuone', # UbuntuOne service is no longer available
# 'vimeo',
# Xero requires creation of a new trial project every month which makes
# the setup of the automated test too laborious to support it.
# 'xero',
# 'xing',
# 'yahoo',
# OAuth 2.0
# 'amazon', # Asks for a captcha (cannot be automated)
# 'behance', # doesn't support third party authorization anymore.
# 'bitly', # deprecated for test suite refactoring - consider re-enabling
# 'deviantart', # deprecated for test suite refactoring - consider re-enabling
'facebook',
# 'foursquare', # deprecated for test suite refactoring - consider re-enabling
# 'google', # deprecated for test suite refactoring - consider re-enabling
# 'github', # Asks for 2FA/one-time-pass verification in Travis CI environment.
# 'linkedin', # # Asks for verification (captcha) in the login form in Travis CI environment.
# 'paypal', # deprecated for test suite refactoring - consider re-enabling
# 'reddit', # deprecated for test suite refactoring - consider re-enabling
# 'vk', # deprecated for test suite refactoring - consider re-enabling
# 'windowslive', # Asks for verification (captcha) in the login form in Travis CI environment.
# 'yammer', # deprecated for test suite refactoring - consider re-enabling
# 'yandex', # deprecated for test suite refactoring - consider re-enabling
# OpenID
# 'openid_livejournal', # Login and password elements are not visible.
# 'openid_verisignlabs', # deprecated for test suite refactoring - consider re-enabling
# 'openid_wordpress', # deprecated for test suite refactoring - consider re-enabling
# 'openid_yahoo', # deprecated for test suite refactoring - consider re-enabling
]
# Recommended setup for Travis CI environment.
if os.environ.get('TRAVIS'):
MAX_LOGIN_ATTEMPTS = 20
WAIT_MULTIPLIER = 2
MIN_WAIT = 2
# Use these constants if you have the same user info by all tested providers.
EMAIL = '[email protected]'
FIRST_NAME = 'Authomatic'
LAST_NAME = 'Testuser'
NAME = FIRST_NAME + ' ' + LAST_NAME
USERNAME = 'authomaticproject'
USERNAME_REVERSE = 'projectauthomatic'
NICKNAME = 'Mr. AP'
BIRTH_YEAR = 2000
BIRTH_MONTH = 5
BIRTH_DAY = 5
BIRTH_DATE = datetime.datetime(BIRTH_YEAR, BIRTH_MONTH, BIRTH_DAY)
CITY = 'London'
COUNTRY = 'Great Britain'
COUNTRY_ISO2 = 'gb'
POSTAL_CODE = 'EC1A1DH'
PHONE = '??????????'
PHONE_INTERNATIONAL = '0044??????????'
GENDER = constants.GENDER_MALE
LOCALE = 'en_UK'
LOCATION = CITY + ', ' + COUNTRY
# Common values for all providers
COMMON = {
# Could be same if the user sets it so
'user_birth_date': BIRTH_DATE,
'user_birth_day': BIRTH_DAY,
'user_birth_month': BIRTH_MONTH,
'user_birth_year': BIRTH_YEAR,
'user_login': EMAIL,
'user_email': EMAIL,
'user_first_name': FIRST_NAME,
'user_last_name': LAST_NAME,
'user_name': NAME,
'user_username': USERNAME,
'user_username_reverse': USERNAME_REVERSE,
'user_nickname': NICKNAME,
'user_birth_year': BIRTH_YEAR,
'user_city': CITY,
'user_country': COUNTRY,
'user_gender': GENDER,
'user_phone': PHONE,
'user_postal_code': POSTAL_CODE,
'user_locale': LOCALE,
'user_location': LOCATION,
# It is not a good idea to have the same password for all providers
# 'user_password': '##########',
# Provider and user specific value
# 'user_id': '',
# 'user_locale': None,
# 'user_timezone': None,
# Provider specific format
# 'user_picture': '',
# 'user_link': '',
# Provider specific value
# 'consumer_key': '',
# 'consumer_secret': '',
}
# Values from COMMON will be overridden by values from PROVIDERS[provider_name]
# if set.
# Since this file is public, only put providers in here if they aren't secret.
# Otherwise, secret providers should be added to config_secret.py[.enc]
PROVIDERS = {
# # OAuth 2.0
# 'facebook': {
# 'consumer_key': '##########',
# 'consumer_secret': '##########',
# 'user_password': '##########',
# 'user_id': '??????????',
# },
}
|
the-stack_0_1171 | from enum import Enum
from typing import TYPE_CHECKING, Callable, Dict, Optional
from prompt_toolkit.clipboard import ClipboardData
if TYPE_CHECKING:
from .key_processor import KeyPressEvent
from .key_bindings.vi import TextObject
__all__ = [
'InputMode',
'CharacterFind',
'ViState',
]
class InputMode(str, Enum):
value: str
INSERT = 'vi-insert'
INSERT_MULTIPLE = 'vi-insert-multiple'
NAVIGATION = 'vi-navigation' # Normal mode.
REPLACE = 'vi-replace'
class CharacterFind:
def __init__(self, character: str, backwards: bool = False) -> None:
self.character = character
self.backwards = backwards
class ViState:
"""
Mutable class to hold the state of the Vi navigation.
"""
def __init__(self) -> None:
#: None or CharacterFind instance. (This is used to repeat the last
#: search in Vi mode, by pressing the 'n' or 'N' in navigation mode.)
self.last_character_find = None
# When an operator is given and we are waiting for text object,
# -- e.g. in the case of 'dw', after the 'd' --, an operator callback
# is set here.
self.operator_func: Optional[Callable[['KeyPressEvent', 'TextObject'], None]] = None
self.operator_arg: Optional[int] = None
#: Named registers. Maps register name (e.g. 'a') to
#: :class:`ClipboardData` instances.
self.named_registers: Dict[str, ClipboardData] = {}
#: The Vi mode we're currently in to.
self.__input_mode = InputMode.INSERT
#: Waiting for digraph.
self.waiting_for_digraph = False
self.digraph_symbol1: Optional[str] = None # (None or a symbol.)
#: When true, make ~ act as an operator.
self.tilde_operator = False
#: Register in which we are recording a macro.
#: `None` when not recording anything.
# Note that the recording is only stored in the register after the
# recording is stopped. So we record in a separate `current_recording`
# variable.
self.recording_register: Optional[str] = None
self.current_recording = ''
# Temporary navigation (normal) mode.
# This happens when control-o has been pressed in insert or replace
# mode. The user can now do one navigation action and we'll return back
# to insert/replace.
self.temporary_navigation_mode = False
@property
def input_mode(self) -> InputMode:
" Get `InputMode`. "
return self.__input_mode
@input_mode.setter
def input_mode(self, value: InputMode) -> None:
" Set `InputMode`. "
if value == InputMode.NAVIGATION:
self.waiting_for_digraph = False
self.operator_func = None
self.operator_arg = None
self.__input_mode = value
def reset(self) -> None:
"""
Reset state, go back to the given mode. INSERT by default.
"""
# Go back to insert mode.
self.input_mode = InputMode.INSERT
self.waiting_for_digraph = False
self.operator_func = None
self.operator_arg = None
# Reset recording state.
self.recording_register = None
self.current_recording = ''
|
the-stack_0_1176 | #!/usr/bin/env python3
#-*- coding: utf-8 -*-
#pylint: disable-msg=W0122,R0914,R0912
"""
File : pkg.py
Author : Valentin Kuznetsov <[email protected]>
Description: AbstractGenerator class provides basic functionality
to generate CMSSW class from given template
"""
from __future__ import print_function
# system modules
import os
import sys
import time
import pprint
# package modules
from FWCore.Skeletons.utils import parse_word, functor, user_info, tree, template_directory
class AbstractPkg(object):
"""
AbstractPkg takes care how to generate code from template/PKG
package area. The PKG can be any directory which may include
any types of files, e.g. C++ (.cc), python (.py), etc.
This class relies on specific logic which we outline here:
- each template may use tags defined with double underscores
enclosure, e.g. __class__, __record__, etc.
- each template may have example tags, such tags should
start with @example_. While processing template user may
choose to strip them off or keep the code behind those tags
- in addition user may specify pure python code which can
operate with user defined tags. This code snipped should
be enclosed with #python_begin and #python_end lines
which declares start and end of python block
"""
def __init__(self, config=None):
super(AbstractPkg, self).__init__()
if not config:
self.config = {}
else:
self.config = config
self.pname = self.config.get('pname', None)
self.tmpl = self.config.get('tmpl', None)
self.debug = self.config.get('debug', 0)
self.tdir = template_directory()
self.author = user_info(self.config.get('author', None))
self.date = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime())
self.not_in_dir = self.config.get('not_in_dir', [])
self.working_dir = self.config.get('working_dir')
def tmpl_etags(self):
"Scan template files and return example tags"
keys = []
sdir = '%s/%s' % (self.tdir, self.tmpl)
for name in os.listdir(sdir):
if name[-1] == '~':
continue
if name == 'CVS':
continue
fname = os.path.join(sdir, name)
with open(fname, 'r') as stream:
for line in stream.readlines():
if line.find('@example_') != -1: # possible tag
keys += [k for k in line.split() if \
k.find('@example_') != -1]
return set(keys)
def print_etags(self):
"Print out template example tags"
for key in self.tmpl_etags():
print(key)
def tmpl_tags(self):
"Scan template files and return template tags"
keys = []
sdir = '%s/%s' % (self.tdir, self.tmpl)
for name in os.listdir(sdir):
if name[-1] == '~':
continue
if name == 'CVS':
continue
fname = os.path.join(sdir, name)
with open(fname, 'r') as stream:
for line in stream.readlines():
if line.find('__') != -1: # possible key
keys += [k for k in parse_word(line)]
return set(keys)
def print_tags(self):
"Print out template keys"
for key in self.tmpl_tags():
print(key)
def parse_etags(self, line):
"""
Determine either skip or keep given line based on class tags
meta-strings
"""
tmpl_etags = self.tmpl_etags()
keep_etags = self.config.get('tmpl_etags', [])
for tag in tmpl_etags:
for valid_tag in keep_etags:
if line.find(valid_tag) != -1:
line = line.replace(valid_tag, '')
return line
if line.find(tag) != -1:
line = ''
return line
if len(keep_etags) == 0:
return line.replace('@default', '')
if '@default' in line:
return ''
return line
def write(self, fname, tmpl_name, kwds):
"Create new file from given template name and set of arguments"
code = ""
read_code = False
if os.path.exists(fname):
return
with open(fname, 'w') as stream:
for line in open(tmpl_name, 'r').readlines():
line = self.parse_etags(line)
if not line:
continue
if line.find('#python_begin') != -1:
read_code = True
continue
if line.find('#python_end') != -1:
read_code = False
if read_code:
code += line
if code and not read_code:
res = functor(code, kwds, self.debug)
stream.write(res)
code = ""
continue
if not read_code:
for key, val in kwds.items():
if isinstance(val, str):
line = line.replace(key, val)
stream.write(line)
def get_kwds(self):
"Return keyword arguments to be used in methods"
kwds = {'__pkgname__': self.config.get('pkgname', 'Package'),
'__author__': self.author,
'__date__': self.date,
'__class__': self.pname,
'__class_lowercase__': self.pname.lower(),
'__class_space__': " "*len(self.pname),
'__name__': self.pname,
'__subsys__': self.config.get('subsystem', 'Subsystem')}
args = self.config.get('args', None)
kwds.update(args)
if self.debug:
print("Template tags:")
pprint.pprint(kwds)
return kwds
def generate(self):
"Generate package templates in a given directory"
# keep current location, since generate will switch directories
cdir = os.getcwd()
# read from configutation which template files to create
tmpl_files = self.config.get('tmpl_files', 'all')
# setup keyword arguments which we'll pass to write method
kwds = self.get_kwds()
# create template package dir and cd into it
if tmpl_files == 'all' and self.tmpl not in self.not_in_dir:
if os.path.isdir(self.pname):
msg = "Can't create package '%s'\n" % self.pname
msg += "Directory %s is already exists" % self.pname
print(msg)
sys.exit(1)
os.makedirs(self.pname)
os.chdir(self.pname)
# read directory driver information and create file list to generate
sdir = os.path.join(self.tdir, self.tmpl)
sources = [s for s in os.listdir(sdir) \
if s != 'Driver.dir' and s.find('~') == -1]
driver = os.path.join(sdir, 'Driver.dir')
if os.path.isfile(driver):
sources = [s.replace('\n', '') for s in open(driver, 'r').readlines()]
if 'CVS' in sources:
sources.remove('CVS')
# special case of Skeleton, which requires to generate only given
# file type if self.pname has extension of that type
names = set([s.split('.')[0] for s in sources])
if names == set(['Skeleton']):
if self.pname.find('.') != -1:
_, ext = os.path.splitext(self.pname)
sources = [s for s in sources if s.rfind(ext) != -1]
self.pname = self.pname.replace(ext, '')
kwds = self.get_kwds()
if not sources:
msg = 'Unable to find skeleton for extension "%s"' % ext
print(msg)
sys.exit(1)
bdir = os.environ.get('CMSSW_BASE', '')
dirs = os.getcwd().replace(bdir, '').split('/')
ldir = os.getcwd().split('/')[-1]
idir = ''
subsys = kwds['__subsys__']
pkgname = kwds['__pkgname__']
if sources == ['Skeleton.cc', 'Skeleton.h']:
if ldir == 'interface' and os.getcwd().find(bdir) != -1:
idir = '%s/%s/interface/' % (subsys, pkgname)
# run within some directory of the Sybsystem/Pkg area
# and only for mkskel <file>.cc
elif sources == ['Skeleton.cc'] and \
len(dirs) == 5 and dirs[0] == '' and dirs[1] == 'src':
idir = '%s/%s/interface/' % (subsys, pkgname)
elif sources == ['Skeleton.h'] and ldir == 'interface' and \
len(dirs) == 5 and dirs[0] == '' and dirs[1] == 'src':
idir = '%s/%s/interface/' % (subsys, pkgname)
kwds.update({'__incdir__': idir})
# loop over source files, create dirs as necessary and generate files
# names for writing templates
gen_files = []
for src in sources:
if tmpl_files != 'all':
fname, ext = os.path.splitext(src)
if tmpl_files != ext:
continue
#also reject if this is the wrong directory
if self.working_dir and src.split('/')[-2] != self.working_dir:
continue
src = src.split('/')[-1]
if self.debug:
print("Read", src)
items = src.split('/')
if items[-1] == '/':
items = items[:-1]
tname = items[-1] # template file name
tmpl_name = os.path.join(sdir, items[-1]) # full tmpl file name
if os.path.isfile(tmpl_name):
ftype = 'file'
else:
ftype = 'dir'
name2gen = src # new file we'll create
if items[-1] == 'testBuildFile.xml':
name2gen = '/'.join(src.split('/')[:-1])+'/BuildFile.xml'
if -1 !=tname.split('.')[0].find(self.tmpl): # need to substitute
name2gen = name2gen.replace(self.tmpl, self.pname)
name2gen = os.path.join(os.getcwd(), name2gen)
if self.debug:
print("Create", name2gen)
if ftype == 'dir':
if not os.path.isdir(name2gen):
os.makedirs(name2gen)
continue # we're done with dir
fdir = os.path.dirname(name2gen)
if not os.path.isdir(fdir):
os.makedirs(fdir)
self.write(name2gen, tmpl_name, kwds)
gen_files.append(name2gen.split('/')[-1])
if tmpl_files == 'all' and self.tmpl not in self.not_in_dir:
msg = 'New package "%s" of %s type is successfully generated' \
% (self.pname, self.tmpl)
else:
msg = 'Generated %s file' % ', '.join(gen_files)
if len(gen_files) > 1:
msg += 's'
print(msg)
# return back where we started
os.chdir(cdir)
if msg.find('New package') != -1:
tree(self.pname)
|
the-stack_0_1177 | # vim: set fenc=utf8 ts=4 sw=4 et :
import os
import io
import json
import unittest
from shlex import split
from .testcase import TestCase
from pdml2flow.conf import Conf
import pdml2flow
TEST_DIR_PDML2FLOW="test/pdml2flow_tests/"
TEST_DIR_PDML2FRAME="test/pdml2frame_tests/"
class TestSystem(TestCase):
def read_json(self, f):
objs = []
data = ''
for line in f:
data += line
try:
objs.append(json.loads(data))
data = ''
except ValueError:
# Not yet a complete JSON value
pass
return objs
def get_test(run, directory, test):
def system_test(self):
if os.path.isfile('{}/{}/skip'.format(directory, test)):
self.skipTest('Skipfile found')
with open('{}/{}/stdin'.format(directory, test)) as f_stdin, \
io.StringIO() as f_stdout, \
io.StringIO() as f_stderr:
# wire up io
Conf.IN = f_stdin
Conf.OUT = f_stdout
Conf.OUT_DEBUG = f_stderr
Conf.OUT_WARNING = f_stderr
Conf.OUT_ERROR = f_stderr
try:
# try to load arguments
with open('{}/{}/args'.format(directory, test)) as f:
Conf.ARGS = split(f.read())
except FileNotFoundError:
Conf.ARGS = ''
# run
run()
# compare stdout
stdout_raw = f_stdout.getvalue()
stderr_raw = f_stderr.getvalue()
with open('{}/{}/stdout'.format(directory, test)) as f:
expected_raw = f.read()
# Try parsing as json, and compare objects
run_objs = self.read_json(stdout_raw)
expected_objs = self.read_json(expected_raw)
self.assertEqual(
len(run_objs),
len(expected_objs)
)
for e in expected_objs:
self.assertIn(
e,
expected_objs
)
for o in run_objs:
self.assertIn(
o,
expected_objs
)
# if no object loaded: do a raw comparison, line by line
if len(run_objs) == 0 or len(expected_objs) == 0:
self.assertEqual(
sorted(
stdout_raw.splitlines()
),
sorted(
expected_raw.splitlines()
)
)
try:
# try compare stderr
with open('{}/{}/stderr'.format(directory, test)) as f:
expected_raw = f.read()
self.assertEqual(
expected_raw,
stderr_raw
)
except FileNotFoundError:
self.assertEqual(
'',
stderr_raw
)
return system_test
def add_tests(run, directory):
for test in os.listdir(directory):
# append test
setattr(
TestSystem,
'test_{}_{}'.format(run.__name__, test),
get_test(run, directory, test)
)
# Add tests
add_tests(pdml2flow.pdml2flow, TEST_DIR_PDML2FLOW)
add_tests(pdml2flow.pdml2frame, TEST_DIR_PDML2FRAME)
|
the-stack_0_1178 | from models.image_classification import alexnet, vgg16, resnet
class ModelSelector:
@staticmethod
def get_model(model_name):
model_mux = {
"alexnet": alexnet.AlexNet,
"vgg16": vgg16.VGG16,
"resnet": resnet.ResNet,
}
return model_mux.get(model_name, "Invalid model name")
|
the-stack_0_1179 | #!/usr/bin/env python
# _*_ coding:utf-8 _*_
# auth: clsn
# by: covid-19
# date 20200328
#**********************
import requests
import json
import pymysql
import datetime
import sys
# 解决 python2 中文报错
reload(sys)
sys.setdefaultencoding('utf8')
now_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
# 数据库配置
db_config = {
'host': '61.149.146.136',
'port': 63306,
'user': 'clsn',
'password': '123456',
'db': 'clsn',
'charset': 'utf8'
}
def to_dingtalk():
# 请求的URL,WebHook地址
webhook = "https://oapi.dingtalk.com/robot/send?access_token=9f54eaaa734cdab863149bfff2b2fa1be86ea2ec5eb89cad6bf93e7c6b771066"
#构建请求头部
header = {
"Content-Type": "application/json",
"Charset": "UTF-8"
}
#构建请求数据
message = {
"actionCard": {
"title": title,
"text": text,
"hideAvatar": "0",
"btnOrientation": "0"
},
"msgtype": "actionCard"
}
#对请求的数据进行json封装
message_json = json.dumps(message)
# print message_json
#发送请求
info = requests.post(url=webhook,data=message_json,headers=header)
#打印返回的结果
print(info.text)
def get_now_info():
with pymysql.connect(**db_config) as curr:
sql1 = "select max(monitor_time) from covid_19_overseas"
curr.execute(sql1)
date_time = list(curr.fetchone())
# print date_time[0]
img = "\n"
head = "## <font color=#FFBF00>[风险提示]</font> <font color=#000000>全球新型冠状病毒肺炎疫情</font>\n"
msg_from = '\n >数据更新至 {}'.format(date_time[0])
sql2 = "select sum(confirm) as '累计确诊',sum(heal) as '治愈',sum(confirmCompare) as '新增确诊',sum(dead) as '死亡' from " \
"covid_19_overseas where monitor_time = '{}' ".format(date_time[0])
curr.execute(sql2)
data = list(curr.fetchone())
bf_dead = round(data[3]/data[0]*100,2)
bf_heal = round(data[1]/data[0]*100,2)
info = """\n ## **确诊病例:** <font color=#FF0000>{}</font>
\n ## **死亡病例:** <font color=#404040>{} ({}%)</font>
\n ## **治愈病例:** <font color=#9DEA15> {} ({}%)</font>
\n ## **新增病例:** <font color=#FFBF00> {}</font>\n""" .format(format(data[0],','),
format(data[3],','),bf_dead,
format(data[1],','),bf_heal,
format(data[2],','))
sql3 = "select confirm as '累计确诊', heal as '治愈',confirmCompare as '新增确诊',dead as '死亡',country as '国家' from " \
"covid_19_overseas where monitor_time = '{}' limit 5;".format(date_time[0])
curr.execute(sql3)
top_data = list(curr.fetchall())
country_info = ''
for data in top_data:
# print data
info_ = """ \n -国家:{}
\n ## **确诊病例:** <font color=#FF0000>{}</font>
\n ## **死亡病例:** <font color=#404040>{}</font>
\n ## **治愈病例:** <font color=#9DEA15> {}</font>
\n ## **新增病例:** <font color=#FFBF00> {}</font>\n *** \n """.format(data[4],
format(data[0], ','),
format(data[3], ','),
format(data[1], ','),
format(data[2], ','))
country_info = country_info + info_
talk_all = '\n# *风险等级TOP5*\n'
to_dingtalk_data = img + head + "***" + info + "***" + talk_all + "***" + country_info + msg_from
return to_dingtalk_data
if __name__=="__main__":
title = "新型冠状病毒疫情(国际)实时追踪"
text = get_now_info()
to_dingtalk()
|
the-stack_0_1181 | def encrypt(text,key):
output = ""
for i in range(len(text)):
char = text[i]
if (char.isupper()):
output += chr((ord(char) + key - 65) % 26 + 65)
elif (char.islower()):
output += chr((ord(char) + key - 97) % 26 + 97)
else:
output += char
return output
def decrypt(text,key):
output = ""
for i in range(len(text)):
char = text[i]
if (char.isupper()):
output += chr((ord(char) - 65 - key) % 26 + 65)
elif (char.islower()):
output += chr((ord(char) - 97 - key) % 26 + 97)
else:
output += char
return output
message = input('Enter your message:')
choice = input('What do you want to do?\nType 1 for encrypt:\nType 2 for decrypt:\n')
if (int(choice) == 1):
shift = input('Enter your key to encrypt (numbers to be shift):')
message = encrypt(message, int(shift))
print('Your encrypt message is: ', message)
exit()
elif (int(choice) == 2):
key = input('Enter your key for decryping (number that has been shifted):')
message = decrypt(message, int(key))
print('Your decrypt message is:', message)
exit()
else:
print('Error, Terminated.')
exit()
|
the-stack_0_1182 | import json
from base64 import b64decode, b64encode
import numpy as np
from numpy.lib.format import dtype_to_descr, descr_to_dtype
def default(obj):
if isinstance(obj, (np.ndarray, np.generic)):
return {
'__numpy__': b64encode(obj.data if obj.flags.c_contiguous else obj.tobytes()).decode('ascii'),
'dtype': dtype_to_descr(obj.dtype),
'shape': obj.shape
}
raise TypeError(f'Object of type {type(obj)} is not JSON serializable')
def object_hook(dct):
if '__numpy__' in dct:
np_obj = np.frombuffer(b64decode(dct['__numpy__']), descr_to_dtype(dct['dtype']))
shape = dct['shape']
return np_obj.reshape(shape) if shape else np_obj[0] # Scalar test
return dct
_dumps = json.dumps
_loads = json.loads
_dump = json.dump
_load = json.load
def dumps(*args, **kwargs):
kwargs.setdefault('default', default)
return _dumps(*args, **kwargs)
def loads(*args, **kwargs):
kwargs.setdefault('object_hook', object_hook)
return _loads(*args, **kwargs)
def dump(*args, **kwargs):
kwargs.setdefault('default', default)
return _dump(*args, **kwargs)
def load(*args, **kwargs):
kwargs.setdefault('object_hook', object_hook)
return _load(*args, **kwargs)
def patch():
"""Monkey patches the json module in order to support serialization/deserialization of Numpy arrays and scalars."""
json.dumps = dumps
json.loads = loads
json.dump = dump
json.load = load
|
the-stack_0_1183 | from django.core.management import BaseCommand
from wagtail.images import get_image_model
from cms.tagging import TAG_ENRICHMENT, I18N_TAGS
from core.utils import overwrite_media_domain
class Command(BaseCommand):
def handle(self, *args, **options):
get_image_model().objects.filter(tags__name='delete', collection__name='services').delete()
for k, v in TAG_ENRICHMENT.items():
for v1 in v:
for i in get_image_model().objects.filter(tags__name=k).exclude(tags__name=v1):
i.tags.add(v1)
for tid, text in I18N_TAGS:
for i in get_image_model().objects.filter(tags__id=tid).exclude(tags__name=text):
i.tags.add(text)
for image in get_image_model().objects.all().order_by('id'):
if image.collection.name == 'services':
image.make_semantic_tags()
if image.all_tags_str:
image.title = image.all_tags_str[:254]
image.url_800x800 = overwrite_media_domain(image.get_rendition('max-800x800|format-jpeg').url)
image.url_400x400 = overwrite_media_domain(image.get_rendition('max-400x400|format-jpeg').url)
image.url_200x200 = overwrite_media_domain(image.get_rendition('max-200x200|format-jpeg').url)
# wagtail admin interface
image.get_rendition('max-165x165|format-jpeg')
image.save()
|
the-stack_0_1184 | import json
from datacite import schema40
from osf.metadata import utils
from website.settings import DOMAIN
serializer_registry = {}
def register(schema_id):
"""Register classes into serializer_registry"""
def decorator(cls):
serializer_registry[schema_id] = cls
return cls
return decorator
class MetadataRecordSerializer(object):
def serialize_json(self, metadata_record):
raise NotImplementedError
def serialize_xml(self, metadata_record):
raise NotImplementedError
@classmethod
def serialize(cls, metadata_record, format='json'):
if format == 'json':
return cls.serialize_json(metadata_record)
if format == 'xml':
return cls.serialize_xml(metadata_record)
raise ValueError('Format "{}" is not supported.'.format(format))
@register(schema_id='datacite')
class DataciteMetadataRecordSerializer(MetadataRecordSerializer):
osf_schema = 'osf_datacite.json'
@classmethod
def serialize_json(cls, record):
osfstorage_file = record.file
target = osfstorage_file.target
doc = {
'creators': utils.datacite_format_creators(target.visible_contributors),
'titles': [
{
'title': osfstorage_file.name
},
{
'title': target.title,
'titleType': 'AlternativeTitle'
}
],
'publisher': 'Open Science Framework',
'dates': [
{
'date': str(osfstorage_file.created),
'dateType': 'Created'
},
{
'date': str(osfstorage_file.modified),
'dateType': 'Updated'
}
],
}
file_description = record.metadata.get('file_description')
if file_description:
doc['descriptions'] = [
{
'description': file_description,
'descriptionType': 'Abstract'
}
]
subject_list = []
if target.subjects.all().exists():
subject_list = utils.datacite_format_subjects(target)
tags_on_file = osfstorage_file.tags.values_list('name', flat=True)
for tag_name in tags_on_file:
subject_list.append({'subject': tag_name})
if subject_list:
doc['subjects'] = subject_list
resource_type = record.metadata.get('resource_type', '(:unas)')
doc['resourceType'] = {
'resourceType': resource_type,
'resourceTypeGeneral': utils.DATACITE_RESOURCE_TYPE_MAP.get(resource_type)
}
doc['publicationYear'] = str(osfstorage_file.created.year)
related_publication_doi = record.metadata.get('related_publication_doi')
if related_publication_doi:
doc['relatedIdentifiers'] = [
{
'relatedIdentifier': related_publication_doi,
'relatedIdentifierType': 'DOI',
'relationType': 'IsSupplementTo'
}
]
if osfstorage_file.guids.exists():
doc['alternateIdentifiers'] = [
{
'alternateIdentifier': DOMAIN + osfstorage_file.guids.first()._id,
'alternateIdentifierType': 'URL'
}
]
funders = record.metadata.get('funders')
if funders:
doc['fundingReferences'] = []
for funder in funders:
funder_info = {}
if funder.get('funding_agency'):
funder_info['funderName'] = funder['funding_agency']
if funder.get('grant_number'):
funder_info['awardNumber'] = {'awardNumber': funder['grant_number']}
doc['fundingReferences'].append(funder_info)
if getattr(target, 'node_license', None):
doc['rightsList'] = [utils.datacite_format_rights(target.node_license)]
latest_version_identifier = osfstorage_file.versions.all().order_by('-created').values_list('identifier', flat=True)
if latest_version_identifier:
doc['version'] = latest_version_identifier[0]
return json.dumps(doc)
@classmethod
def serialize_xml(cls, record):
data = json.loads(cls.serialize_json(record))
return schema40.tostring(data)
|
the-stack_0_1185 | # -*- coding: utf-8 -*-
"""
Data conversion utility for numpy
=====================================
Convert cytoscape.js style graphs from numpy object.
http://www.numpy.org
"""
import numpy as np
def from_ndarray(data, name=None, labels=None, directed=False, weighted=False):
"""
This method is converter to change ndarray to cytoscape.js style JSON.
:param data: ndarray object.
:param name: This is the network name.
:param labels: This is the list of nodes' names
:param directed: If this parapeter is True, the graph will be directed. On the other hand, the graph will be undirected.
:param weighted: If this parapeter is True, the graph will have weighted edges. On the other hand, the graph will have unweighted edges.
:return : The cytoscape.js object.
"""
mat_dim = data.shape
if mat_dim[0] != mat_dim[1]:
raise ValueError('Data should be square matrix.')
data_size = mat_dim[0]
if labels is not None:
label_len = len(labels)
if label_len != data_size:
raise ValueError('Label length is not equal to the size of data.')
network_name = name
if network_name is None:
network_name = 'from ndarray'
g = {
'data': {
'name': network_name
},
'elements': {
'nodes': [],
'edges': []
}
}
g['elements']['nodes'] = __get_nodes(labels, data_size)
if weighted:
g['elements']['edges'] = __get_weighted_edges(matrix=data)
else:
g['elements']['edges'] = __get_unweighted_edges(matrix=data)
return g
def __get_nodes(labels, size):
nodes = []
if labels is None:
node_labels = np.arange(size)
else:
node_labels = labels
for idx, label in enumerate(node_labels):
nodes.append(__get_node(idx, label))
return nodes
def __get_node(node_id, name):
n = {
'data': {
'id': str(node_id),
'name': str(name)
}
}
return n
def __get_egdes(matrix, labels):
pass
def __get_edge(source, target, weight=None):
e = {
'data': {
'id': source + '-' + target,
'source': source,
'target': target
}
}
if weight is not None:
e['data']['weight'] = weight
return e
def __get_unweighted_edges(matrix):
size = matrix.shape[0]
edges = []
row_idx = 0
for row in matrix:
idx = row_idx
while idx < size:
if row[idx] == 1:
e = __get_edge(str(row_idx), str(idx))
edges.append(e)
idx += 1
row_idx += 1
return edges
def __get_weighted_edges(matrix):
size = matrix.shape[0]
edges = []
row_idx = 0
for row in matrix:
idx = row_idx
while idx < size:
if not np.isnan(row[idx]):
e = __get_edge(str(row_idx), str(idx), weight=row[idx])
edges.append(e)
idx += 1
row_idx += 1
return edges
pass
|
the-stack_0_1186 | """
CLI command for "deploy" command
"""
import logging
import os
import click
from samcli.cli.cli_config_file import TomlProvider, configuration_option
from samcli.cli.main import aws_creds_options, common_options, pass_context, print_cmdline_args
from samcli.commands._utils.cdk_support_decorators import unsupported_command_cdk
from samcli.commands._utils.options import (
capabilities_option,
guided_deploy_stack_name,
metadata_option,
notification_arns_option,
parameter_override_option,
no_progressbar_option,
tags_option,
template_click_option,
signing_profiles_option,
stack_name_option,
s3_bucket_option,
image_repository_option,
image_repositories_option,
s3_prefix_option,
kms_key_id_option,
use_json_option,
force_upload_option,
resolve_s3_option,
role_arn_option,
resolve_image_repos_option,
)
from samcli.commands.deploy.utils import sanitize_parameter_overrides
from samcli.lib.telemetry.metric import track_command
from samcli.lib.cli_validation.image_repository_validation import image_repository_validation
from samcli.lib.utils import osutils
from samcli.lib.bootstrap.bootstrap import manage_stack
from samcli.lib.utils.version_checker import check_newer_version
from samcli.lib.bootstrap.companion_stack.companion_stack_manager import sync_ecr_stack
SHORT_HELP = "Deploy an AWS SAM application."
HELP_TEXT = """The sam deploy command creates a Cloudformation Stack and deploys your resources.
\b
Set SAM_CLI_POLL_DELAY Environment Vairable with a value of seconds in your shell to configure
how often SAM CLI checks the Stack state, which is useful when seeing throttling from CloudFormation.
\b
e.g. sam deploy --template-file packaged.yaml --stack-name sam-app --capabilities CAPABILITY_IAM
\b
"""
CONFIG_SECTION = "parameters"
LOG = logging.getLogger(__name__)
@click.command(
"deploy",
short_help=SHORT_HELP,
context_settings={"ignore_unknown_options": False, "allow_interspersed_args": True, "allow_extra_args": True},
help=HELP_TEXT,
)
@configuration_option(provider=TomlProvider(section=CONFIG_SECTION))
@click.option(
"--guided",
"-g",
required=False,
is_flag=True,
is_eager=True,
help="Specify this flag to allow SAM CLI to guide you through the deployment using guided prompts.",
)
@template_click_option(include_build=True)
@click.option(
"--no-execute-changeset",
required=False,
is_flag=True,
help="Indicates whether to execute the change set. "
"Specify this flag if you want to view your stack changes "
"before executing the change set. The command creates an AWS CloudFormation "
"change set and then exits without executing the change set. if "
"the changeset looks satisfactory, the stack changes can be made by "
"running the same command without specifying `--no-execute-changeset`",
)
@click.option(
"--fail-on-empty-changeset/--no-fail-on-empty-changeset",
default=True,
required=False,
is_flag=True,
help="Specify if the CLI should return a non-zero exit code if there are no "
"changes to be made to the stack. The default behavior is to return a "
"non-zero exit code.",
)
@click.option(
"--confirm-changeset/--no-confirm-changeset",
default=False,
required=False,
is_flag=True,
help="Prompt to confirm if the computed changeset is to be deployed by SAM CLI.",
)
@click.option(
"--disable-rollback/--no-disable-rollback",
default=False,
required=False,
is_flag=True,
help="Preserves the state of previously provisioned resources when an operation fails.",
)
@stack_name_option(callback=guided_deploy_stack_name) # pylint: disable=E1120
@s3_bucket_option(guided=True) # pylint: disable=E1120
@image_repository_option
@image_repositories_option
@force_upload_option
@s3_prefix_option
@kms_key_id_option
@role_arn_option
@use_json_option
@resolve_s3_option(guided=True) # pylint: disable=E1120
@resolve_image_repos_option
@metadata_option
@notification_arns_option
@tags_option
@parameter_override_option
@signing_profiles_option
@no_progressbar_option
@capabilities_option
@aws_creds_options
@common_options
@image_repository_validation
@pass_context
@track_command
@check_newer_version
@print_cmdline_args
@unsupported_command_cdk(alternative_command="cdk deploy")
def cli(
ctx,
template_file,
stack_name,
s3_bucket,
image_repository,
image_repositories,
force_upload,
no_progressbar,
s3_prefix,
kms_key_id,
parameter_overrides,
capabilities,
no_execute_changeset,
role_arn,
notification_arns,
fail_on_empty_changeset,
use_json,
tags,
metadata,
guided,
confirm_changeset,
signing_profiles,
resolve_s3,
resolve_image_repos,
config_file,
config_env,
disable_rollback,
):
"""
`sam deploy` command entry point
"""
# All logic must be implemented in the ``do_cli`` method. This helps with easy unit testing
do_cli(
template_file,
stack_name,
s3_bucket,
image_repository,
image_repositories,
force_upload,
no_progressbar,
s3_prefix,
kms_key_id,
parameter_overrides,
capabilities,
no_execute_changeset,
role_arn,
notification_arns,
fail_on_empty_changeset,
use_json,
tags,
metadata,
guided,
confirm_changeset,
ctx.region,
ctx.profile,
signing_profiles,
resolve_s3,
config_file,
config_env,
resolve_image_repos,
disable_rollback,
) # pragma: no cover
def do_cli(
template_file,
stack_name,
s3_bucket,
image_repository,
image_repositories,
force_upload,
no_progressbar,
s3_prefix,
kms_key_id,
parameter_overrides,
capabilities,
no_execute_changeset,
role_arn,
notification_arns,
fail_on_empty_changeset,
use_json,
tags,
metadata,
guided,
confirm_changeset,
region,
profile,
signing_profiles,
resolve_s3,
config_file,
config_env,
resolve_image_repos,
disable_rollback,
):
"""
Implementation of the ``cli`` method
"""
from samcli.commands.package.package_context import PackageContext
from samcli.commands.deploy.deploy_context import DeployContext
from samcli.commands.deploy.guided_context import GuidedContext
from samcli.commands.deploy.exceptions import DeployResolveS3AndS3SetError
if guided:
# Allow for a guided deploy to prompt and save those details.
guided_context = GuidedContext(
template_file=template_file,
stack_name=stack_name,
s3_bucket=s3_bucket,
image_repository=image_repository,
image_repositories=image_repositories,
s3_prefix=s3_prefix,
region=region,
profile=profile,
confirm_changeset=confirm_changeset,
capabilities=capabilities,
signing_profiles=signing_profiles,
parameter_overrides=parameter_overrides,
config_section=CONFIG_SECTION,
config_env=config_env,
config_file=config_file,
disable_rollback=disable_rollback,
)
guided_context.run()
else:
if resolve_s3:
if bool(s3_bucket):
raise DeployResolveS3AndS3SetError()
s3_bucket = manage_stack(profile=profile, region=region)
click.echo(f"\n\t\tManaged S3 bucket: {s3_bucket}")
click.echo("\t\tA different default S3 bucket can be set in samconfig.toml")
click.echo("\t\tOr by specifying --s3-bucket explicitly.")
# TODO Refactor resolve-s3 and resolve-image-repos into one place
# after we figure out how to enable resolve-images-repos in package
if resolve_image_repos:
image_repositories = sync_ecr_stack(
template_file, stack_name, region, s3_bucket, s3_prefix, image_repositories
)
with osutils.tempfile_platform_independent() as output_template_file:
with PackageContext(
template_file=template_file,
s3_bucket=guided_context.guided_s3_bucket if guided else s3_bucket,
s3_prefix=guided_context.guided_s3_prefix if guided else s3_prefix,
image_repository=guided_context.guided_image_repository if guided else image_repository,
image_repositories=guided_context.guided_image_repositories if guided else image_repositories,
output_template_file=output_template_file.name,
kms_key_id=kms_key_id,
use_json=use_json,
force_upload=force_upload,
no_progressbar=no_progressbar,
metadata=metadata,
on_deploy=True,
region=guided_context.guided_region if guided else region,
profile=profile,
signing_profiles=guided_context.signing_profiles if guided else signing_profiles,
) as package_context:
package_context.run()
# 500ms of sleep time between stack checks and describe stack events.
DEFAULT_POLL_DELAY = 0.5
try:
poll_delay = float(os.getenv("SAM_CLI_POLL_DELAY", str(DEFAULT_POLL_DELAY)))
except ValueError:
poll_delay = DEFAULT_POLL_DELAY
if poll_delay <= 0:
poll_delay = DEFAULT_POLL_DELAY
with DeployContext(
template_file=output_template_file.name,
stack_name=guided_context.guided_stack_name if guided else stack_name,
s3_bucket=guided_context.guided_s3_bucket if guided else s3_bucket,
image_repository=guided_context.guided_image_repository if guided else image_repository,
image_repositories=guided_context.guided_image_repositories if guided else image_repositories,
force_upload=force_upload,
no_progressbar=no_progressbar,
s3_prefix=guided_context.guided_s3_prefix if guided else s3_prefix,
kms_key_id=kms_key_id,
parameter_overrides=sanitize_parameter_overrides(guided_context.guided_parameter_overrides)
if guided
else parameter_overrides,
capabilities=guided_context.guided_capabilities if guided else capabilities,
no_execute_changeset=no_execute_changeset,
role_arn=role_arn,
notification_arns=notification_arns,
fail_on_empty_changeset=fail_on_empty_changeset,
tags=tags,
region=guided_context.guided_region if guided else region,
profile=profile,
confirm_changeset=guided_context.confirm_changeset if guided else confirm_changeset,
signing_profiles=guided_context.signing_profiles if guided else signing_profiles,
use_changeset=True,
disable_rollback=guided_context.disable_rollback if guided else disable_rollback,
poll_delay=poll_delay,
) as deploy_context:
deploy_context.run()
|
Subsets and Splits